server: tcl_trace command
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 ***************************************************************************/
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 #include <helper/time_support.h>
47 #include <jtag/jtag.h>
48 #include <flash/nor/core.h>
49
50 #include "target.h"
51 #include "target_type.h"
52 #include "target_request.h"
53 #include "breakpoints.h"
54 #include "register.h"
55 #include "trace.h"
56 #include "image.h"
57 #include "rtos/rtos.h"
58 #include "transport/transport.h"
59
60 /* default halt wait timeout (ms) */
61 #define DEFAULT_HALT_TIMEOUT 5000
62
63 static int target_read_buffer_default(struct target *target, uint32_t address,
64 uint32_t count, uint8_t *buffer);
65 static int target_write_buffer_default(struct target *target, uint32_t address,
66 uint32_t count, const uint8_t *buffer);
67 static int target_array2mem(Jim_Interp *interp, struct target *target,
68 int argc, Jim_Obj * const *argv);
69 static int target_mem2array(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_register_user_commands(struct command_context *cmd_ctx);
72 static int target_get_gdb_fileio_info_default(struct target *target,
73 struct gdb_fileio_info *fileio_info);
74 static int target_gdb_fileio_end_default(struct target *target, int retcode,
75 int fileio_errno, bool ctrl_c);
76 static int target_profiling_default(struct target *target, uint32_t *samples,
77 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds);
78
79 /* targets */
80 extern struct target_type arm7tdmi_target;
81 extern struct target_type arm720t_target;
82 extern struct target_type arm9tdmi_target;
83 extern struct target_type arm920t_target;
84 extern struct target_type arm966e_target;
85 extern struct target_type arm946e_target;
86 extern struct target_type arm926ejs_target;
87 extern struct target_type fa526_target;
88 extern struct target_type feroceon_target;
89 extern struct target_type dragonite_target;
90 extern struct target_type xscale_target;
91 extern struct target_type cortexm_target;
92 extern struct target_type cortexa_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type mips_m4k_target;
96 extern struct target_type avr_target;
97 extern struct target_type dsp563xx_target;
98 extern struct target_type dsp5680xx_target;
99 extern struct target_type testee_target;
100 extern struct target_type avr32_ap7k_target;
101 extern struct target_type hla_target;
102 extern struct target_type nds32_v2_target;
103 extern struct target_type nds32_v3_target;
104 extern struct target_type nds32_v3m_target;
105 extern struct target_type or1k_target;
106 extern struct target_type quark_x10xx_target;
107
108 static struct target_type *target_types[] = {
109 &arm7tdmi_target,
110 &arm9tdmi_target,
111 &arm920t_target,
112 &arm720t_target,
113 &arm966e_target,
114 &arm946e_target,
115 &arm926ejs_target,
116 &fa526_target,
117 &feroceon_target,
118 &dragonite_target,
119 &xscale_target,
120 &cortexm_target,
121 &cortexa_target,
122 &cortexr4_target,
123 &arm11_target,
124 &mips_m4k_target,
125 &avr_target,
126 &dsp563xx_target,
127 &dsp5680xx_target,
128 &testee_target,
129 &avr32_ap7k_target,
130 &hla_target,
131 &nds32_v2_target,
132 &nds32_v3_target,
133 &nds32_v3m_target,
134 &or1k_target,
135 &quark_x10xx_target,
136 NULL,
137 };
138
139 struct target *all_targets;
140 static struct target_event_callback *target_event_callbacks;
141 static struct target_timer_callback *target_timer_callbacks;
142 LIST_HEAD(target_reset_callback_list);
143 LIST_HEAD(target_trace_callback_list);
144 static const int polling_interval = 100;
145
146 static const Jim_Nvp nvp_assert[] = {
147 { .name = "assert", NVP_ASSERT },
148 { .name = "deassert", NVP_DEASSERT },
149 { .name = "T", NVP_ASSERT },
150 { .name = "F", NVP_DEASSERT },
151 { .name = "t", NVP_ASSERT },
152 { .name = "f", NVP_DEASSERT },
153 { .name = NULL, .value = -1 }
154 };
155
156 static const Jim_Nvp nvp_error_target[] = {
157 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
158 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
159 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
160 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
161 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
162 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
163 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
164 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
165 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
166 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
167 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
168 { .value = -1, .name = NULL }
169 };
170
171 static const char *target_strerror_safe(int err)
172 {
173 const Jim_Nvp *n;
174
175 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
176 if (n->name == NULL)
177 return "unknown";
178 else
179 return n->name;
180 }
181
182 static const Jim_Nvp nvp_target_event[] = {
183
184 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
185 { .value = TARGET_EVENT_HALTED, .name = "halted" },
186 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
187 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
188 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
189
190 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
191 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
192
193 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
194 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
195 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
196 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
197 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
198 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
199 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
200 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
201 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
202 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
203 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
204 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
205
206 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
207 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
208
209 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
210 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
211
212 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
213 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
214
215 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
216 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
217
218 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
219 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
220
221 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
222
223 { .name = NULL, .value = -1 }
224 };
225
226 static const Jim_Nvp nvp_target_state[] = {
227 { .name = "unknown", .value = TARGET_UNKNOWN },
228 { .name = "running", .value = TARGET_RUNNING },
229 { .name = "halted", .value = TARGET_HALTED },
230 { .name = "reset", .value = TARGET_RESET },
231 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
232 { .name = NULL, .value = -1 },
233 };
234
235 static const Jim_Nvp nvp_target_debug_reason[] = {
236 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
237 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
238 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
239 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
240 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
241 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
242 { .name = "program-exit" , .value = DBG_REASON_EXIT },
243 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
244 { .name = NULL, .value = -1 },
245 };
246
247 static const Jim_Nvp nvp_target_endian[] = {
248 { .name = "big", .value = TARGET_BIG_ENDIAN },
249 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
250 { .name = "be", .value = TARGET_BIG_ENDIAN },
251 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
252 { .name = NULL, .value = -1 },
253 };
254
255 static const Jim_Nvp nvp_reset_modes[] = {
256 { .name = "unknown", .value = RESET_UNKNOWN },
257 { .name = "run" , .value = RESET_RUN },
258 { .name = "halt" , .value = RESET_HALT },
259 { .name = "init" , .value = RESET_INIT },
260 { .name = NULL , .value = -1 },
261 };
262
263 const char *debug_reason_name(struct target *t)
264 {
265 const char *cp;
266
267 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
268 t->debug_reason)->name;
269 if (!cp) {
270 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
271 cp = "(*BUG*unknown*BUG*)";
272 }
273 return cp;
274 }
275
276 const char *target_state_name(struct target *t)
277 {
278 const char *cp;
279 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
280 if (!cp) {
281 LOG_ERROR("Invalid target state: %d", (int)(t->state));
282 cp = "(*BUG*unknown*BUG*)";
283 }
284 return cp;
285 }
286
287 const char *target_event_name(enum target_event event)
288 {
289 const char *cp;
290 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
291 if (!cp) {
292 LOG_ERROR("Invalid target event: %d", (int)(event));
293 cp = "(*BUG*unknown*BUG*)";
294 }
295 return cp;
296 }
297
298 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
299 {
300 const char *cp;
301 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
302 if (!cp) {
303 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
304 cp = "(*BUG*unknown*BUG*)";
305 }
306 return cp;
307 }
308
309 /* determine the number of the new target */
310 static int new_target_number(void)
311 {
312 struct target *t;
313 int x;
314
315 /* number is 0 based */
316 x = -1;
317 t = all_targets;
318 while (t) {
319 if (x < t->target_number)
320 x = t->target_number;
321 t = t->next;
322 }
323 return x + 1;
324 }
325
326 /* read a uint64_t from a buffer in target memory endianness */
327 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
328 {
329 if (target->endianness == TARGET_LITTLE_ENDIAN)
330 return le_to_h_u64(buffer);
331 else
332 return be_to_h_u64(buffer);
333 }
334
335 /* read a uint32_t from a buffer in target memory endianness */
336 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
337 {
338 if (target->endianness == TARGET_LITTLE_ENDIAN)
339 return le_to_h_u32(buffer);
340 else
341 return be_to_h_u32(buffer);
342 }
343
344 /* read a uint24_t from a buffer in target memory endianness */
345 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
346 {
347 if (target->endianness == TARGET_LITTLE_ENDIAN)
348 return le_to_h_u24(buffer);
349 else
350 return be_to_h_u24(buffer);
351 }
352
353 /* read a uint16_t from a buffer in target memory endianness */
354 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
355 {
356 if (target->endianness == TARGET_LITTLE_ENDIAN)
357 return le_to_h_u16(buffer);
358 else
359 return be_to_h_u16(buffer);
360 }
361
362 /* read a uint8_t from a buffer in target memory endianness */
363 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
364 {
365 return *buffer & 0x0ff;
366 }
367
368 /* write a uint64_t to a buffer in target memory endianness */
369 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
370 {
371 if (target->endianness == TARGET_LITTLE_ENDIAN)
372 h_u64_to_le(buffer, value);
373 else
374 h_u64_to_be(buffer, value);
375 }
376
377 /* write a uint32_t to a buffer in target memory endianness */
378 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
379 {
380 if (target->endianness == TARGET_LITTLE_ENDIAN)
381 h_u32_to_le(buffer, value);
382 else
383 h_u32_to_be(buffer, value);
384 }
385
386 /* write a uint24_t to a buffer in target memory endianness */
387 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
388 {
389 if (target->endianness == TARGET_LITTLE_ENDIAN)
390 h_u24_to_le(buffer, value);
391 else
392 h_u24_to_be(buffer, value);
393 }
394
395 /* write a uint16_t to a buffer in target memory endianness */
396 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
397 {
398 if (target->endianness == TARGET_LITTLE_ENDIAN)
399 h_u16_to_le(buffer, value);
400 else
401 h_u16_to_be(buffer, value);
402 }
403
404 /* write a uint8_t to a buffer in target memory endianness */
405 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
406 {
407 *buffer = value;
408 }
409
410 /* write a uint64_t array to a buffer in target memory endianness */
411 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
412 {
413 uint32_t i;
414 for (i = 0; i < count; i++)
415 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
416 }
417
418 /* write a uint32_t array to a buffer in target memory endianness */
419 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
420 {
421 uint32_t i;
422 for (i = 0; i < count; i++)
423 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
424 }
425
426 /* write a uint16_t array to a buffer in target memory endianness */
427 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
428 {
429 uint32_t i;
430 for (i = 0; i < count; i++)
431 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
432 }
433
434 /* write a uint64_t array to a buffer in target memory endianness */
435 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
436 {
437 uint32_t i;
438 for (i = 0; i < count; i++)
439 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
440 }
441
442 /* write a uint32_t array to a buffer in target memory endianness */
443 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
444 {
445 uint32_t i;
446 for (i = 0; i < count; i++)
447 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
448 }
449
450 /* write a uint16_t array to a buffer in target memory endianness */
451 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
452 {
453 uint32_t i;
454 for (i = 0; i < count; i++)
455 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
456 }
457
458 /* return a pointer to a configured target; id is name or number */
459 struct target *get_target(const char *id)
460 {
461 struct target *target;
462
463 /* try as tcltarget name */
464 for (target = all_targets; target; target = target->next) {
465 if (target_name(target) == NULL)
466 continue;
467 if (strcmp(id, target_name(target)) == 0)
468 return target;
469 }
470
471 /* It's OK to remove this fallback sometime after August 2010 or so */
472
473 /* no match, try as number */
474 unsigned num;
475 if (parse_uint(id, &num) != ERROR_OK)
476 return NULL;
477
478 for (target = all_targets; target; target = target->next) {
479 if (target->target_number == (int)num) {
480 LOG_WARNING("use '%s' as target identifier, not '%u'",
481 target_name(target), num);
482 return target;
483 }
484 }
485
486 return NULL;
487 }
488
489 /* returns a pointer to the n-th configured target */
490 struct target *get_target_by_num(int num)
491 {
492 struct target *target = all_targets;
493
494 while (target) {
495 if (target->target_number == num)
496 return target;
497 target = target->next;
498 }
499
500 return NULL;
501 }
502
503 struct target *get_current_target(struct command_context *cmd_ctx)
504 {
505 struct target *target = get_target_by_num(cmd_ctx->current_target);
506
507 if (target == NULL) {
508 LOG_ERROR("BUG: current_target out of bounds");
509 exit(-1);
510 }
511
512 return target;
513 }
514
515 int target_poll(struct target *target)
516 {
517 int retval;
518
519 /* We can't poll until after examine */
520 if (!target_was_examined(target)) {
521 /* Fail silently lest we pollute the log */
522 return ERROR_FAIL;
523 }
524
525 retval = target->type->poll(target);
526 if (retval != ERROR_OK)
527 return retval;
528
529 if (target->halt_issued) {
530 if (target->state == TARGET_HALTED)
531 target->halt_issued = false;
532 else {
533 long long t = timeval_ms() - target->halt_issued_time;
534 if (t > DEFAULT_HALT_TIMEOUT) {
535 target->halt_issued = false;
536 LOG_INFO("Halt timed out, wake up GDB.");
537 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
538 }
539 }
540 }
541
542 return ERROR_OK;
543 }
544
545 int target_halt(struct target *target)
546 {
547 int retval;
548 /* We can't poll until after examine */
549 if (!target_was_examined(target)) {
550 LOG_ERROR("Target not examined yet");
551 return ERROR_FAIL;
552 }
553
554 retval = target->type->halt(target);
555 if (retval != ERROR_OK)
556 return retval;
557
558 target->halt_issued = true;
559 target->halt_issued_time = timeval_ms();
560
561 return ERROR_OK;
562 }
563
564 /**
565 * Make the target (re)start executing using its saved execution
566 * context (possibly with some modifications).
567 *
568 * @param target Which target should start executing.
569 * @param current True to use the target's saved program counter instead
570 * of the address parameter
571 * @param address Optionally used as the program counter.
572 * @param handle_breakpoints True iff breakpoints at the resumption PC
573 * should be skipped. (For example, maybe execution was stopped by
574 * such a breakpoint, in which case it would be counterprodutive to
575 * let it re-trigger.
576 * @param debug_execution False if all working areas allocated by OpenOCD
577 * should be released and/or restored to their original contents.
578 * (This would for example be true to run some downloaded "helper"
579 * algorithm code, which resides in one such working buffer and uses
580 * another for data storage.)
581 *
582 * @todo Resolve the ambiguity about what the "debug_execution" flag
583 * signifies. For example, Target implementations don't agree on how
584 * it relates to invalidation of the register cache, or to whether
585 * breakpoints and watchpoints should be enabled. (It would seem wrong
586 * to enable breakpoints when running downloaded "helper" algorithms
587 * (debug_execution true), since the breakpoints would be set to match
588 * target firmware being debugged, not the helper algorithm.... and
589 * enabling them could cause such helpers to malfunction (for example,
590 * by overwriting data with a breakpoint instruction. On the other
591 * hand the infrastructure for running such helpers might use this
592 * procedure but rely on hardware breakpoint to detect termination.)
593 */
594 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
595 {
596 int retval;
597
598 /* We can't poll until after examine */
599 if (!target_was_examined(target)) {
600 LOG_ERROR("Target not examined yet");
601 return ERROR_FAIL;
602 }
603
604 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
605
606 /* note that resume *must* be asynchronous. The CPU can halt before
607 * we poll. The CPU can even halt at the current PC as a result of
608 * a software breakpoint being inserted by (a bug?) the application.
609 */
610 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
611 if (retval != ERROR_OK)
612 return retval;
613
614 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
615
616 return retval;
617 }
618
619 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
620 {
621 char buf[100];
622 int retval;
623 Jim_Nvp *n;
624 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
625 if (n->name == NULL) {
626 LOG_ERROR("invalid reset mode");
627 return ERROR_FAIL;
628 }
629
630 struct target *target;
631 for (target = all_targets; target; target = target->next)
632 target_call_reset_callbacks(target, reset_mode);
633
634 /* disable polling during reset to make reset event scripts
635 * more predictable, i.e. dr/irscan & pathmove in events will
636 * not have JTAG operations injected into the middle of a sequence.
637 */
638 bool save_poll = jtag_poll_get_enabled();
639
640 jtag_poll_set_enabled(false);
641
642 sprintf(buf, "ocd_process_reset %s", n->name);
643 retval = Jim_Eval(cmd_ctx->interp, buf);
644
645 jtag_poll_set_enabled(save_poll);
646
647 if (retval != JIM_OK) {
648 Jim_MakeErrorMessage(cmd_ctx->interp);
649 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
650 return ERROR_FAIL;
651 }
652
653 /* We want any events to be processed before the prompt */
654 retval = target_call_timer_callbacks_now();
655
656 for (target = all_targets; target; target = target->next) {
657 target->type->check_reset(target);
658 target->running_alg = false;
659 }
660
661 return retval;
662 }
663
664 static int identity_virt2phys(struct target *target,
665 uint32_t virtual, uint32_t *physical)
666 {
667 *physical = virtual;
668 return ERROR_OK;
669 }
670
671 static int no_mmu(struct target *target, int *enabled)
672 {
673 *enabled = 0;
674 return ERROR_OK;
675 }
676
677 static int default_examine(struct target *target)
678 {
679 target_set_examined(target);
680 return ERROR_OK;
681 }
682
683 /* no check by default */
684 static int default_check_reset(struct target *target)
685 {
686 return ERROR_OK;
687 }
688
689 int target_examine_one(struct target *target)
690 {
691 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
692
693 int retval = target->type->examine(target);
694 if (retval != ERROR_OK)
695 return retval;
696
697 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
698
699 return ERROR_OK;
700 }
701
702 static int jtag_enable_callback(enum jtag_event event, void *priv)
703 {
704 struct target *target = priv;
705
706 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
707 return ERROR_OK;
708
709 jtag_unregister_event_callback(jtag_enable_callback, target);
710
711 return target_examine_one(target);
712 }
713
714 /* Targets that correctly implement init + examine, i.e.
715 * no communication with target during init:
716 *
717 * XScale
718 */
719 int target_examine(void)
720 {
721 int retval = ERROR_OK;
722 struct target *target;
723
724 for (target = all_targets; target; target = target->next) {
725 /* defer examination, but don't skip it */
726 if (!target->tap->enabled) {
727 jtag_register_event_callback(jtag_enable_callback,
728 target);
729 continue;
730 }
731
732 retval = target_examine_one(target);
733 if (retval != ERROR_OK)
734 return retval;
735 }
736 return retval;
737 }
738
739 const char *target_type_name(struct target *target)
740 {
741 return target->type->name;
742 }
743
744 static int target_soft_reset_halt(struct target *target)
745 {
746 if (!target_was_examined(target)) {
747 LOG_ERROR("Target not examined yet");
748 return ERROR_FAIL;
749 }
750 if (!target->type->soft_reset_halt) {
751 LOG_ERROR("Target %s does not support soft_reset_halt",
752 target_name(target));
753 return ERROR_FAIL;
754 }
755 return target->type->soft_reset_halt(target);
756 }
757
758 /**
759 * Downloads a target-specific native code algorithm to the target,
760 * and executes it. * Note that some targets may need to set up, enable,
761 * and tear down a breakpoint (hard or * soft) to detect algorithm
762 * termination, while others may support lower overhead schemes where
763 * soft breakpoints embedded in the algorithm automatically terminate the
764 * algorithm.
765 *
766 * @param target used to run the algorithm
767 * @param arch_info target-specific description of the algorithm.
768 */
769 int target_run_algorithm(struct target *target,
770 int num_mem_params, struct mem_param *mem_params,
771 int num_reg_params, struct reg_param *reg_param,
772 uint32_t entry_point, uint32_t exit_point,
773 int timeout_ms, void *arch_info)
774 {
775 int retval = ERROR_FAIL;
776
777 if (!target_was_examined(target)) {
778 LOG_ERROR("Target not examined yet");
779 goto done;
780 }
781 if (!target->type->run_algorithm) {
782 LOG_ERROR("Target type '%s' does not support %s",
783 target_type_name(target), __func__);
784 goto done;
785 }
786
787 target->running_alg = true;
788 retval = target->type->run_algorithm(target,
789 num_mem_params, mem_params,
790 num_reg_params, reg_param,
791 entry_point, exit_point, timeout_ms, arch_info);
792 target->running_alg = false;
793
794 done:
795 return retval;
796 }
797
798 /**
799 * Downloads a target-specific native code algorithm to the target,
800 * executes and leaves it running.
801 *
802 * @param target used to run the algorithm
803 * @param arch_info target-specific description of the algorithm.
804 */
805 int target_start_algorithm(struct target *target,
806 int num_mem_params, struct mem_param *mem_params,
807 int num_reg_params, struct reg_param *reg_params,
808 uint32_t entry_point, uint32_t exit_point,
809 void *arch_info)
810 {
811 int retval = ERROR_FAIL;
812
813 if (!target_was_examined(target)) {
814 LOG_ERROR("Target not examined yet");
815 goto done;
816 }
817 if (!target->type->start_algorithm) {
818 LOG_ERROR("Target type '%s' does not support %s",
819 target_type_name(target), __func__);
820 goto done;
821 }
822 if (target->running_alg) {
823 LOG_ERROR("Target is already running an algorithm");
824 goto done;
825 }
826
827 target->running_alg = true;
828 retval = target->type->start_algorithm(target,
829 num_mem_params, mem_params,
830 num_reg_params, reg_params,
831 entry_point, exit_point, arch_info);
832
833 done:
834 return retval;
835 }
836
837 /**
838 * Waits for an algorithm started with target_start_algorithm() to complete.
839 *
840 * @param target used to run the algorithm
841 * @param arch_info target-specific description of the algorithm.
842 */
843 int target_wait_algorithm(struct target *target,
844 int num_mem_params, struct mem_param *mem_params,
845 int num_reg_params, struct reg_param *reg_params,
846 uint32_t exit_point, int timeout_ms,
847 void *arch_info)
848 {
849 int retval = ERROR_FAIL;
850
851 if (!target->type->wait_algorithm) {
852 LOG_ERROR("Target type '%s' does not support %s",
853 target_type_name(target), __func__);
854 goto done;
855 }
856 if (!target->running_alg) {
857 LOG_ERROR("Target is not running an algorithm");
858 goto done;
859 }
860
861 retval = target->type->wait_algorithm(target,
862 num_mem_params, mem_params,
863 num_reg_params, reg_params,
864 exit_point, timeout_ms, arch_info);
865 if (retval != ERROR_TARGET_TIMEOUT)
866 target->running_alg = false;
867
868 done:
869 return retval;
870 }
871
872 /**
873 * Executes a target-specific native code algorithm in the target.
874 * It differs from target_run_algorithm in that the algorithm is asynchronous.
875 * Because of this it requires an compliant algorithm:
876 * see contrib/loaders/flash/stm32f1x.S for example.
877 *
878 * @param target used to run the algorithm
879 */
880
881 int target_run_flash_async_algorithm(struct target *target,
882 const uint8_t *buffer, uint32_t count, int block_size,
883 int num_mem_params, struct mem_param *mem_params,
884 int num_reg_params, struct reg_param *reg_params,
885 uint32_t buffer_start, uint32_t buffer_size,
886 uint32_t entry_point, uint32_t exit_point, void *arch_info)
887 {
888 int retval;
889 int timeout = 0;
890
891 const uint8_t *buffer_orig = buffer;
892
893 /* Set up working area. First word is write pointer, second word is read pointer,
894 * rest is fifo data area. */
895 uint32_t wp_addr = buffer_start;
896 uint32_t rp_addr = buffer_start + 4;
897 uint32_t fifo_start_addr = buffer_start + 8;
898 uint32_t fifo_end_addr = buffer_start + buffer_size;
899
900 uint32_t wp = fifo_start_addr;
901 uint32_t rp = fifo_start_addr;
902
903 /* validate block_size is 2^n */
904 assert(!block_size || !(block_size & (block_size - 1)));
905
906 retval = target_write_u32(target, wp_addr, wp);
907 if (retval != ERROR_OK)
908 return retval;
909 retval = target_write_u32(target, rp_addr, rp);
910 if (retval != ERROR_OK)
911 return retval;
912
913 /* Start up algorithm on target and let it idle while writing the first chunk */
914 retval = target_start_algorithm(target, num_mem_params, mem_params,
915 num_reg_params, reg_params,
916 entry_point,
917 exit_point,
918 arch_info);
919
920 if (retval != ERROR_OK) {
921 LOG_ERROR("error starting target flash write algorithm");
922 return retval;
923 }
924
925 while (count > 0) {
926
927 retval = target_read_u32(target, rp_addr, &rp);
928 if (retval != ERROR_OK) {
929 LOG_ERROR("failed to get read pointer");
930 break;
931 }
932
933 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
934 (size_t) (buffer - buffer_orig), count, wp, rp);
935
936 if (rp == 0) {
937 LOG_ERROR("flash write algorithm aborted by target");
938 retval = ERROR_FLASH_OPERATION_FAILED;
939 break;
940 }
941
942 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
943 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
944 break;
945 }
946
947 /* Count the number of bytes available in the fifo without
948 * crossing the wrap around. Make sure to not fill it completely,
949 * because that would make wp == rp and that's the empty condition. */
950 uint32_t thisrun_bytes;
951 if (rp > wp)
952 thisrun_bytes = rp - wp - block_size;
953 else if (rp > fifo_start_addr)
954 thisrun_bytes = fifo_end_addr - wp;
955 else
956 thisrun_bytes = fifo_end_addr - wp - block_size;
957
958 if (thisrun_bytes == 0) {
959 /* Throttle polling a bit if transfer is (much) faster than flash
960 * programming. The exact delay shouldn't matter as long as it's
961 * less than buffer size / flash speed. This is very unlikely to
962 * run when using high latency connections such as USB. */
963 alive_sleep(10);
964
965 /* to stop an infinite loop on some targets check and increment a timeout
966 * this issue was observed on a stellaris using the new ICDI interface */
967 if (timeout++ >= 500) {
968 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
969 return ERROR_FLASH_OPERATION_FAILED;
970 }
971 continue;
972 }
973
974 /* reset our timeout */
975 timeout = 0;
976
977 /* Limit to the amount of data we actually want to write */
978 if (thisrun_bytes > count * block_size)
979 thisrun_bytes = count * block_size;
980
981 /* Write data to fifo */
982 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
983 if (retval != ERROR_OK)
984 break;
985
986 /* Update counters and wrap write pointer */
987 buffer += thisrun_bytes;
988 count -= thisrun_bytes / block_size;
989 wp += thisrun_bytes;
990 if (wp >= fifo_end_addr)
991 wp = fifo_start_addr;
992
993 /* Store updated write pointer to target */
994 retval = target_write_u32(target, wp_addr, wp);
995 if (retval != ERROR_OK)
996 break;
997 }
998
999 if (retval != ERROR_OK) {
1000 /* abort flash write algorithm on target */
1001 target_write_u32(target, wp_addr, 0);
1002 }
1003
1004 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1005 num_reg_params, reg_params,
1006 exit_point,
1007 10000,
1008 arch_info);
1009
1010 if (retval2 != ERROR_OK) {
1011 LOG_ERROR("error waiting for target flash write algorithm");
1012 retval = retval2;
1013 }
1014
1015 return retval;
1016 }
1017
1018 int target_read_memory(struct target *target,
1019 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1020 {
1021 if (!target_was_examined(target)) {
1022 LOG_ERROR("Target not examined yet");
1023 return ERROR_FAIL;
1024 }
1025 if (!target->type->read_memory) {
1026 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1027 return ERROR_FAIL;
1028 }
1029 return target->type->read_memory(target, address, size, count, buffer);
1030 }
1031
1032 int target_read_phys_memory(struct target *target,
1033 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1034 {
1035 if (!target_was_examined(target)) {
1036 LOG_ERROR("Target not examined yet");
1037 return ERROR_FAIL;
1038 }
1039 if (!target->type->read_phys_memory) {
1040 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1041 return ERROR_FAIL;
1042 }
1043 return target->type->read_phys_memory(target, address, size, count, buffer);
1044 }
1045
1046 int target_write_memory(struct target *target,
1047 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1048 {
1049 if (!target_was_examined(target)) {
1050 LOG_ERROR("Target not examined yet");
1051 return ERROR_FAIL;
1052 }
1053 if (!target->type->write_memory) {
1054 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1055 return ERROR_FAIL;
1056 }
1057 return target->type->write_memory(target, address, size, count, buffer);
1058 }
1059
1060 int target_write_phys_memory(struct target *target,
1061 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1062 {
1063 if (!target_was_examined(target)) {
1064 LOG_ERROR("Target not examined yet");
1065 return ERROR_FAIL;
1066 }
1067 if (!target->type->write_phys_memory) {
1068 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1069 return ERROR_FAIL;
1070 }
1071 return target->type->write_phys_memory(target, address, size, count, buffer);
1072 }
1073
1074 int target_add_breakpoint(struct target *target,
1075 struct breakpoint *breakpoint)
1076 {
1077 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1078 LOG_WARNING("target %s is not halted", target_name(target));
1079 return ERROR_TARGET_NOT_HALTED;
1080 }
1081 return target->type->add_breakpoint(target, breakpoint);
1082 }
1083
1084 int target_add_context_breakpoint(struct target *target,
1085 struct breakpoint *breakpoint)
1086 {
1087 if (target->state != TARGET_HALTED) {
1088 LOG_WARNING("target %s is not halted", target_name(target));
1089 return ERROR_TARGET_NOT_HALTED;
1090 }
1091 return target->type->add_context_breakpoint(target, breakpoint);
1092 }
1093
1094 int target_add_hybrid_breakpoint(struct target *target,
1095 struct breakpoint *breakpoint)
1096 {
1097 if (target->state != TARGET_HALTED) {
1098 LOG_WARNING("target %s is not halted", target_name(target));
1099 return ERROR_TARGET_NOT_HALTED;
1100 }
1101 return target->type->add_hybrid_breakpoint(target, breakpoint);
1102 }
1103
1104 int target_remove_breakpoint(struct target *target,
1105 struct breakpoint *breakpoint)
1106 {
1107 return target->type->remove_breakpoint(target, breakpoint);
1108 }
1109
1110 int target_add_watchpoint(struct target *target,
1111 struct watchpoint *watchpoint)
1112 {
1113 if (target->state != TARGET_HALTED) {
1114 LOG_WARNING("target %s is not halted", target_name(target));
1115 return ERROR_TARGET_NOT_HALTED;
1116 }
1117 return target->type->add_watchpoint(target, watchpoint);
1118 }
1119 int target_remove_watchpoint(struct target *target,
1120 struct watchpoint *watchpoint)
1121 {
1122 return target->type->remove_watchpoint(target, watchpoint);
1123 }
1124 int target_hit_watchpoint(struct target *target,
1125 struct watchpoint **hit_watchpoint)
1126 {
1127 if (target->state != TARGET_HALTED) {
1128 LOG_WARNING("target %s is not halted", target->cmd_name);
1129 return ERROR_TARGET_NOT_HALTED;
1130 }
1131
1132 if (target->type->hit_watchpoint == NULL) {
1133 /* For backward compatible, if hit_watchpoint is not implemented,
1134 * return ERROR_FAIL such that gdb_server will not take the nonsense
1135 * information. */
1136 return ERROR_FAIL;
1137 }
1138
1139 return target->type->hit_watchpoint(target, hit_watchpoint);
1140 }
1141
1142 int target_get_gdb_reg_list(struct target *target,
1143 struct reg **reg_list[], int *reg_list_size,
1144 enum target_register_class reg_class)
1145 {
1146 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1147 }
1148 int target_step(struct target *target,
1149 int current, uint32_t address, int handle_breakpoints)
1150 {
1151 return target->type->step(target, current, address, handle_breakpoints);
1152 }
1153
1154 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1155 {
1156 if (target->state != TARGET_HALTED) {
1157 LOG_WARNING("target %s is not halted", target->cmd_name);
1158 return ERROR_TARGET_NOT_HALTED;
1159 }
1160 return target->type->get_gdb_fileio_info(target, fileio_info);
1161 }
1162
1163 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1164 {
1165 if (target->state != TARGET_HALTED) {
1166 LOG_WARNING("target %s is not halted", target->cmd_name);
1167 return ERROR_TARGET_NOT_HALTED;
1168 }
1169 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1170 }
1171
1172 int target_profiling(struct target *target, uint32_t *samples,
1173 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1174 {
1175 if (target->state != TARGET_HALTED) {
1176 LOG_WARNING("target %s is not halted", target->cmd_name);
1177 return ERROR_TARGET_NOT_HALTED;
1178 }
1179 return target->type->profiling(target, samples, max_num_samples,
1180 num_samples, seconds);
1181 }
1182
1183 /**
1184 * Reset the @c examined flag for the given target.
1185 * Pure paranoia -- targets are zeroed on allocation.
1186 */
1187 static void target_reset_examined(struct target *target)
1188 {
1189 target->examined = false;
1190 }
1191
1192 static int handle_target(void *priv);
1193
1194 static int target_init_one(struct command_context *cmd_ctx,
1195 struct target *target)
1196 {
1197 target_reset_examined(target);
1198
1199 struct target_type *type = target->type;
1200 if (type->examine == NULL)
1201 type->examine = default_examine;
1202
1203 if (type->check_reset == NULL)
1204 type->check_reset = default_check_reset;
1205
1206 assert(type->init_target != NULL);
1207
1208 int retval = type->init_target(cmd_ctx, target);
1209 if (ERROR_OK != retval) {
1210 LOG_ERROR("target '%s' init failed", target_name(target));
1211 return retval;
1212 }
1213
1214 /* Sanity-check MMU support ... stub in what we must, to help
1215 * implement it in stages, but warn if we need to do so.
1216 */
1217 if (type->mmu) {
1218 if (type->virt2phys == NULL) {
1219 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1220 type->virt2phys = identity_virt2phys;
1221 }
1222 } else {
1223 /* Make sure no-MMU targets all behave the same: make no
1224 * distinction between physical and virtual addresses, and
1225 * ensure that virt2phys() is always an identity mapping.
1226 */
1227 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1228 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1229
1230 type->mmu = no_mmu;
1231 type->write_phys_memory = type->write_memory;
1232 type->read_phys_memory = type->read_memory;
1233 type->virt2phys = identity_virt2phys;
1234 }
1235
1236 if (target->type->read_buffer == NULL)
1237 target->type->read_buffer = target_read_buffer_default;
1238
1239 if (target->type->write_buffer == NULL)
1240 target->type->write_buffer = target_write_buffer_default;
1241
1242 if (target->type->get_gdb_fileio_info == NULL)
1243 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1244
1245 if (target->type->gdb_fileio_end == NULL)
1246 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1247
1248 if (target->type->profiling == NULL)
1249 target->type->profiling = target_profiling_default;
1250
1251 return ERROR_OK;
1252 }
1253
1254 static int target_init(struct command_context *cmd_ctx)
1255 {
1256 struct target *target;
1257 int retval;
1258
1259 for (target = all_targets; target; target = target->next) {
1260 retval = target_init_one(cmd_ctx, target);
1261 if (ERROR_OK != retval)
1262 return retval;
1263 }
1264
1265 if (!all_targets)
1266 return ERROR_OK;
1267
1268 retval = target_register_user_commands(cmd_ctx);
1269 if (ERROR_OK != retval)
1270 return retval;
1271
1272 retval = target_register_timer_callback(&handle_target,
1273 polling_interval, 1, cmd_ctx->interp);
1274 if (ERROR_OK != retval)
1275 return retval;
1276
1277 return ERROR_OK;
1278 }
1279
1280 COMMAND_HANDLER(handle_target_init_command)
1281 {
1282 int retval;
1283
1284 if (CMD_ARGC != 0)
1285 return ERROR_COMMAND_SYNTAX_ERROR;
1286
1287 static bool target_initialized;
1288 if (target_initialized) {
1289 LOG_INFO("'target init' has already been called");
1290 return ERROR_OK;
1291 }
1292 target_initialized = true;
1293
1294 retval = command_run_line(CMD_CTX, "init_targets");
1295 if (ERROR_OK != retval)
1296 return retval;
1297
1298 retval = command_run_line(CMD_CTX, "init_target_events");
1299 if (ERROR_OK != retval)
1300 return retval;
1301
1302 retval = command_run_line(CMD_CTX, "init_board");
1303 if (ERROR_OK != retval)
1304 return retval;
1305
1306 LOG_DEBUG("Initializing targets...");
1307 return target_init(CMD_CTX);
1308 }
1309
1310 int target_register_event_callback(int (*callback)(struct target *target,
1311 enum target_event event, void *priv), void *priv)
1312 {
1313 struct target_event_callback **callbacks_p = &target_event_callbacks;
1314
1315 if (callback == NULL)
1316 return ERROR_COMMAND_SYNTAX_ERROR;
1317
1318 if (*callbacks_p) {
1319 while ((*callbacks_p)->next)
1320 callbacks_p = &((*callbacks_p)->next);
1321 callbacks_p = &((*callbacks_p)->next);
1322 }
1323
1324 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1325 (*callbacks_p)->callback = callback;
1326 (*callbacks_p)->priv = priv;
1327 (*callbacks_p)->next = NULL;
1328
1329 return ERROR_OK;
1330 }
1331
1332 int target_register_reset_callback(int (*callback)(struct target *target,
1333 enum target_reset_mode reset_mode, void *priv), void *priv)
1334 {
1335 struct target_reset_callback *entry;
1336
1337 if (callback == NULL)
1338 return ERROR_COMMAND_SYNTAX_ERROR;
1339
1340 entry = malloc(sizeof(struct target_reset_callback));
1341 if (entry == NULL) {
1342 LOG_ERROR("error allocating buffer for reset callback entry");
1343 return ERROR_COMMAND_SYNTAX_ERROR;
1344 }
1345
1346 entry->callback = callback;
1347 entry->priv = priv;
1348 list_add(&entry->list, &target_reset_callback_list);
1349
1350
1351 return ERROR_OK;
1352 }
1353
1354 int target_register_trace_callback(int (*callback)(struct target *target,
1355 size_t len, uint8_t *data, void *priv), void *priv)
1356 {
1357 struct target_trace_callback *entry;
1358
1359 if (callback == NULL)
1360 return ERROR_COMMAND_SYNTAX_ERROR;
1361
1362 entry = malloc(sizeof(struct target_trace_callback));
1363 if (entry == NULL) {
1364 LOG_ERROR("error allocating buffer for trace callback entry");
1365 return ERROR_COMMAND_SYNTAX_ERROR;
1366 }
1367
1368 entry->callback = callback;
1369 entry->priv = priv;
1370 list_add(&entry->list, &target_trace_callback_list);
1371
1372
1373 return ERROR_OK;
1374 }
1375
1376 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1377 {
1378 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1379 struct timeval now;
1380
1381 if (callback == NULL)
1382 return ERROR_COMMAND_SYNTAX_ERROR;
1383
1384 if (*callbacks_p) {
1385 while ((*callbacks_p)->next)
1386 callbacks_p = &((*callbacks_p)->next);
1387 callbacks_p = &((*callbacks_p)->next);
1388 }
1389
1390 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1391 (*callbacks_p)->callback = callback;
1392 (*callbacks_p)->periodic = periodic;
1393 (*callbacks_p)->time_ms = time_ms;
1394 (*callbacks_p)->removed = false;
1395
1396 gettimeofday(&now, NULL);
1397 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
1398 time_ms -= (time_ms % 1000);
1399 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
1400 if ((*callbacks_p)->when.tv_usec > 1000000) {
1401 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
1402 (*callbacks_p)->when.tv_sec += 1;
1403 }
1404
1405 (*callbacks_p)->priv = priv;
1406 (*callbacks_p)->next = NULL;
1407
1408 return ERROR_OK;
1409 }
1410
1411 int target_unregister_event_callback(int (*callback)(struct target *target,
1412 enum target_event event, void *priv), void *priv)
1413 {
1414 struct target_event_callback **p = &target_event_callbacks;
1415 struct target_event_callback *c = target_event_callbacks;
1416
1417 if (callback == NULL)
1418 return ERROR_COMMAND_SYNTAX_ERROR;
1419
1420 while (c) {
1421 struct target_event_callback *next = c->next;
1422 if ((c->callback == callback) && (c->priv == priv)) {
1423 *p = next;
1424 free(c);
1425 return ERROR_OK;
1426 } else
1427 p = &(c->next);
1428 c = next;
1429 }
1430
1431 return ERROR_OK;
1432 }
1433
1434 int target_unregister_reset_callback(int (*callback)(struct target *target,
1435 enum target_reset_mode reset_mode, void *priv), void *priv)
1436 {
1437 struct target_reset_callback *entry;
1438
1439 if (callback == NULL)
1440 return ERROR_COMMAND_SYNTAX_ERROR;
1441
1442 list_for_each_entry(entry, &target_reset_callback_list, list) {
1443 if (entry->callback == callback && entry->priv == priv) {
1444 list_del(&entry->list);
1445 free(entry);
1446 break;
1447 }
1448 }
1449
1450 return ERROR_OK;
1451 }
1452
1453 int target_unregister_trace_callback(int (*callback)(struct target *target,
1454 size_t len, uint8_t *data, void *priv), void *priv)
1455 {
1456 struct target_trace_callback *entry;
1457
1458 if (callback == NULL)
1459 return ERROR_COMMAND_SYNTAX_ERROR;
1460
1461 list_for_each_entry(entry, &target_trace_callback_list, list) {
1462 if (entry->callback == callback && entry->priv == priv) {
1463 list_del(&entry->list);
1464 free(entry);
1465 break;
1466 }
1467 }
1468
1469 return ERROR_OK;
1470 }
1471
1472 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1473 {
1474 if (callback == NULL)
1475 return ERROR_COMMAND_SYNTAX_ERROR;
1476
1477 for (struct target_timer_callback *c = target_timer_callbacks;
1478 c; c = c->next) {
1479 if ((c->callback == callback) && (c->priv == priv)) {
1480 c->removed = true;
1481 return ERROR_OK;
1482 }
1483 }
1484
1485 return ERROR_FAIL;
1486 }
1487
1488 int target_call_event_callbacks(struct target *target, enum target_event event)
1489 {
1490 struct target_event_callback *callback = target_event_callbacks;
1491 struct target_event_callback *next_callback;
1492
1493 if (event == TARGET_EVENT_HALTED) {
1494 /* execute early halted first */
1495 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1496 }
1497
1498 LOG_DEBUG("target event %i (%s)", event,
1499 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1500
1501 target_handle_event(target, event);
1502
1503 while (callback) {
1504 next_callback = callback->next;
1505 callback->callback(target, event, callback->priv);
1506 callback = next_callback;
1507 }
1508
1509 return ERROR_OK;
1510 }
1511
1512 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1513 {
1514 struct target_reset_callback *callback;
1515
1516 LOG_DEBUG("target reset %i (%s)", reset_mode,
1517 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1518
1519 list_for_each_entry(callback, &target_reset_callback_list, list)
1520 callback->callback(target, reset_mode, callback->priv);
1521
1522 return ERROR_OK;
1523 }
1524
1525 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1526 {
1527 struct target_trace_callback *callback;
1528
1529 list_for_each_entry(callback, &target_trace_callback_list, list)
1530 callback->callback(target, len, data, callback->priv);
1531
1532 return ERROR_OK;
1533 }
1534
1535 static int target_timer_callback_periodic_restart(
1536 struct target_timer_callback *cb, struct timeval *now)
1537 {
1538 int time_ms = cb->time_ms;
1539 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1540 time_ms -= (time_ms % 1000);
1541 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1542 if (cb->when.tv_usec > 1000000) {
1543 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1544 cb->when.tv_sec += 1;
1545 }
1546 return ERROR_OK;
1547 }
1548
1549 static int target_call_timer_callback(struct target_timer_callback *cb,
1550 struct timeval *now)
1551 {
1552 cb->callback(cb->priv);
1553
1554 if (cb->periodic)
1555 return target_timer_callback_periodic_restart(cb, now);
1556
1557 return target_unregister_timer_callback(cb->callback, cb->priv);
1558 }
1559
1560 static int target_call_timer_callbacks_check_time(int checktime)
1561 {
1562 static bool callback_processing;
1563
1564 /* Do not allow nesting */
1565 if (callback_processing)
1566 return ERROR_OK;
1567
1568 callback_processing = true;
1569
1570 keep_alive();
1571
1572 struct timeval now;
1573 gettimeofday(&now, NULL);
1574
1575 /* Store an address of the place containing a pointer to the
1576 * next item; initially, that's a standalone "root of the
1577 * list" variable. */
1578 struct target_timer_callback **callback = &target_timer_callbacks;
1579 while (*callback) {
1580 if ((*callback)->removed) {
1581 struct target_timer_callback *p = *callback;
1582 *callback = (*callback)->next;
1583 free(p);
1584 continue;
1585 }
1586
1587 bool call_it = (*callback)->callback &&
1588 ((!checktime && (*callback)->periodic) ||
1589 now.tv_sec > (*callback)->when.tv_sec ||
1590 (now.tv_sec == (*callback)->when.tv_sec &&
1591 now.tv_usec >= (*callback)->when.tv_usec));
1592
1593 if (call_it)
1594 target_call_timer_callback(*callback, &now);
1595
1596 callback = &(*callback)->next;
1597 }
1598
1599 callback_processing = false;
1600 return ERROR_OK;
1601 }
1602
1603 int target_call_timer_callbacks(void)
1604 {
1605 return target_call_timer_callbacks_check_time(1);
1606 }
1607
1608 /* invoke periodic callbacks immediately */
1609 int target_call_timer_callbacks_now(void)
1610 {
1611 return target_call_timer_callbacks_check_time(0);
1612 }
1613
1614 /* Prints the working area layout for debug purposes */
1615 static void print_wa_layout(struct target *target)
1616 {
1617 struct working_area *c = target->working_areas;
1618
1619 while (c) {
1620 LOG_DEBUG("%c%c 0x%08"PRIx32"-0x%08"PRIx32" (%"PRIu32" bytes)",
1621 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1622 c->address, c->address + c->size - 1, c->size);
1623 c = c->next;
1624 }
1625 }
1626
1627 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1628 static void target_split_working_area(struct working_area *area, uint32_t size)
1629 {
1630 assert(area->free); /* Shouldn't split an allocated area */
1631 assert(size <= area->size); /* Caller should guarantee this */
1632
1633 /* Split only if not already the right size */
1634 if (size < area->size) {
1635 struct working_area *new_wa = malloc(sizeof(*new_wa));
1636
1637 if (new_wa == NULL)
1638 return;
1639
1640 new_wa->next = area->next;
1641 new_wa->size = area->size - size;
1642 new_wa->address = area->address + size;
1643 new_wa->backup = NULL;
1644 new_wa->user = NULL;
1645 new_wa->free = true;
1646
1647 area->next = new_wa;
1648 area->size = size;
1649
1650 /* If backup memory was allocated to this area, it has the wrong size
1651 * now so free it and it will be reallocated if/when needed */
1652 if (area->backup) {
1653 free(area->backup);
1654 area->backup = NULL;
1655 }
1656 }
1657 }
1658
1659 /* Merge all adjacent free areas into one */
1660 static void target_merge_working_areas(struct target *target)
1661 {
1662 struct working_area *c = target->working_areas;
1663
1664 while (c && c->next) {
1665 assert(c->next->address == c->address + c->size); /* This is an invariant */
1666
1667 /* Find two adjacent free areas */
1668 if (c->free && c->next->free) {
1669 /* Merge the last into the first */
1670 c->size += c->next->size;
1671
1672 /* Remove the last */
1673 struct working_area *to_be_freed = c->next;
1674 c->next = c->next->next;
1675 if (to_be_freed->backup)
1676 free(to_be_freed->backup);
1677 free(to_be_freed);
1678
1679 /* If backup memory was allocated to the remaining area, it's has
1680 * the wrong size now */
1681 if (c->backup) {
1682 free(c->backup);
1683 c->backup = NULL;
1684 }
1685 } else {
1686 c = c->next;
1687 }
1688 }
1689 }
1690
1691 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1692 {
1693 /* Reevaluate working area address based on MMU state*/
1694 if (target->working_areas == NULL) {
1695 int retval;
1696 int enabled;
1697
1698 retval = target->type->mmu(target, &enabled);
1699 if (retval != ERROR_OK)
1700 return retval;
1701
1702 if (!enabled) {
1703 if (target->working_area_phys_spec) {
1704 LOG_DEBUG("MMU disabled, using physical "
1705 "address for working memory 0x%08"PRIx32,
1706 target->working_area_phys);
1707 target->working_area = target->working_area_phys;
1708 } else {
1709 LOG_ERROR("No working memory available. "
1710 "Specify -work-area-phys to target.");
1711 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1712 }
1713 } else {
1714 if (target->working_area_virt_spec) {
1715 LOG_DEBUG("MMU enabled, using virtual "
1716 "address for working memory 0x%08"PRIx32,
1717 target->working_area_virt);
1718 target->working_area = target->working_area_virt;
1719 } else {
1720 LOG_ERROR("No working memory available. "
1721 "Specify -work-area-virt to target.");
1722 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1723 }
1724 }
1725
1726 /* Set up initial working area on first call */
1727 struct working_area *new_wa = malloc(sizeof(*new_wa));
1728 if (new_wa) {
1729 new_wa->next = NULL;
1730 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1731 new_wa->address = target->working_area;
1732 new_wa->backup = NULL;
1733 new_wa->user = NULL;
1734 new_wa->free = true;
1735 }
1736
1737 target->working_areas = new_wa;
1738 }
1739
1740 /* only allocate multiples of 4 byte */
1741 if (size % 4)
1742 size = (size + 3) & (~3UL);
1743
1744 struct working_area *c = target->working_areas;
1745
1746 /* Find the first large enough working area */
1747 while (c) {
1748 if (c->free && c->size >= size)
1749 break;
1750 c = c->next;
1751 }
1752
1753 if (c == NULL)
1754 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1755
1756 /* Split the working area into the requested size */
1757 target_split_working_area(c, size);
1758
1759 LOG_DEBUG("allocated new working area of %"PRIu32" bytes at address 0x%08"PRIx32, size, c->address);
1760
1761 if (target->backup_working_area) {
1762 if (c->backup == NULL) {
1763 c->backup = malloc(c->size);
1764 if (c->backup == NULL)
1765 return ERROR_FAIL;
1766 }
1767
1768 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1769 if (retval != ERROR_OK)
1770 return retval;
1771 }
1772
1773 /* mark as used, and return the new (reused) area */
1774 c->free = false;
1775 *area = c;
1776
1777 /* user pointer */
1778 c->user = area;
1779
1780 print_wa_layout(target);
1781
1782 return ERROR_OK;
1783 }
1784
1785 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1786 {
1787 int retval;
1788
1789 retval = target_alloc_working_area_try(target, size, area);
1790 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1791 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1792 return retval;
1793
1794 }
1795
1796 static int target_restore_working_area(struct target *target, struct working_area *area)
1797 {
1798 int retval = ERROR_OK;
1799
1800 if (target->backup_working_area && area->backup != NULL) {
1801 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1802 if (retval != ERROR_OK)
1803 LOG_ERROR("failed to restore %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1804 area->size, area->address);
1805 }
1806
1807 return retval;
1808 }
1809
1810 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1811 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1812 {
1813 int retval = ERROR_OK;
1814
1815 if (area->free)
1816 return retval;
1817
1818 if (restore) {
1819 retval = target_restore_working_area(target, area);
1820 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1821 if (retval != ERROR_OK)
1822 return retval;
1823 }
1824
1825 area->free = true;
1826
1827 LOG_DEBUG("freed %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1828 area->size, area->address);
1829
1830 /* mark user pointer invalid */
1831 /* TODO: Is this really safe? It points to some previous caller's memory.
1832 * How could we know that the area pointer is still in that place and not
1833 * some other vital data? What's the purpose of this, anyway? */
1834 *area->user = NULL;
1835 area->user = NULL;
1836
1837 target_merge_working_areas(target);
1838
1839 print_wa_layout(target);
1840
1841 return retval;
1842 }
1843
1844 int target_free_working_area(struct target *target, struct working_area *area)
1845 {
1846 return target_free_working_area_restore(target, area, 1);
1847 }
1848
1849 void target_quit(void)
1850 {
1851 struct target_event_callback *pe = target_event_callbacks;
1852 while (pe) {
1853 struct target_event_callback *t = pe->next;
1854 free(pe);
1855 pe = t;
1856 }
1857 target_event_callbacks = NULL;
1858
1859 struct target_timer_callback *pt = target_timer_callbacks;
1860 while (pt) {
1861 struct target_timer_callback *t = pt->next;
1862 free(pt);
1863 pt = t;
1864 }
1865 target_timer_callbacks = NULL;
1866
1867 for (struct target *target = all_targets;
1868 target; target = target->next) {
1869 if (target->type->deinit_target)
1870 target->type->deinit_target(target);
1871 }
1872 }
1873
1874 /* free resources and restore memory, if restoring memory fails,
1875 * free up resources anyway
1876 */
1877 static void target_free_all_working_areas_restore(struct target *target, int restore)
1878 {
1879 struct working_area *c = target->working_areas;
1880
1881 LOG_DEBUG("freeing all working areas");
1882
1883 /* Loop through all areas, restoring the allocated ones and marking them as free */
1884 while (c) {
1885 if (!c->free) {
1886 if (restore)
1887 target_restore_working_area(target, c);
1888 c->free = true;
1889 *c->user = NULL; /* Same as above */
1890 c->user = NULL;
1891 }
1892 c = c->next;
1893 }
1894
1895 /* Run a merge pass to combine all areas into one */
1896 target_merge_working_areas(target);
1897
1898 print_wa_layout(target);
1899 }
1900
1901 void target_free_all_working_areas(struct target *target)
1902 {
1903 target_free_all_working_areas_restore(target, 1);
1904 }
1905
1906 /* Find the largest number of bytes that can be allocated */
1907 uint32_t target_get_working_area_avail(struct target *target)
1908 {
1909 struct working_area *c = target->working_areas;
1910 uint32_t max_size = 0;
1911
1912 if (c == NULL)
1913 return target->working_area_size;
1914
1915 while (c) {
1916 if (c->free && max_size < c->size)
1917 max_size = c->size;
1918
1919 c = c->next;
1920 }
1921
1922 return max_size;
1923 }
1924
1925 int target_arch_state(struct target *target)
1926 {
1927 int retval;
1928 if (target == NULL) {
1929 LOG_USER("No target has been configured");
1930 return ERROR_OK;
1931 }
1932
1933 LOG_USER("target state: %s", target_state_name(target));
1934
1935 if (target->state != TARGET_HALTED)
1936 return ERROR_OK;
1937
1938 retval = target->type->arch_state(target);
1939 return retval;
1940 }
1941
1942 static int target_get_gdb_fileio_info_default(struct target *target,
1943 struct gdb_fileio_info *fileio_info)
1944 {
1945 /* If target does not support semi-hosting function, target
1946 has no need to provide .get_gdb_fileio_info callback.
1947 It just return ERROR_FAIL and gdb_server will return "Txx"
1948 as target halted every time. */
1949 return ERROR_FAIL;
1950 }
1951
1952 static int target_gdb_fileio_end_default(struct target *target,
1953 int retcode, int fileio_errno, bool ctrl_c)
1954 {
1955 return ERROR_OK;
1956 }
1957
1958 static int target_profiling_default(struct target *target, uint32_t *samples,
1959 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1960 {
1961 struct timeval timeout, now;
1962
1963 gettimeofday(&timeout, NULL);
1964 timeval_add_time(&timeout, seconds, 0);
1965
1966 LOG_INFO("Starting profiling. Halting and resuming the"
1967 " target as often as we can...");
1968
1969 uint32_t sample_count = 0;
1970 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
1971 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
1972
1973 int retval = ERROR_OK;
1974 for (;;) {
1975 target_poll(target);
1976 if (target->state == TARGET_HALTED) {
1977 uint32_t t = buf_get_u32(reg->value, 0, 32);
1978 samples[sample_count++] = t;
1979 /* current pc, addr = 0, do not handle breakpoints, not debugging */
1980 retval = target_resume(target, 1, 0, 0, 0);
1981 target_poll(target);
1982 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
1983 } else if (target->state == TARGET_RUNNING) {
1984 /* We want to quickly sample the PC. */
1985 retval = target_halt(target);
1986 } else {
1987 LOG_INFO("Target not halted or running");
1988 retval = ERROR_OK;
1989 break;
1990 }
1991
1992 if (retval != ERROR_OK)
1993 break;
1994
1995 gettimeofday(&now, NULL);
1996 if ((sample_count >= max_num_samples) ||
1997 ((now.tv_sec >= timeout.tv_sec) && (now.tv_usec >= timeout.tv_usec))) {
1998 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
1999 break;
2000 }
2001 }
2002
2003 *num_samples = sample_count;
2004 return retval;
2005 }
2006
2007 /* Single aligned words are guaranteed to use 16 or 32 bit access
2008 * mode respectively, otherwise data is handled as quickly as
2009 * possible
2010 */
2011 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
2012 {
2013 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
2014 (int)size, (unsigned)address);
2015
2016 if (!target_was_examined(target)) {
2017 LOG_ERROR("Target not examined yet");
2018 return ERROR_FAIL;
2019 }
2020
2021 if (size == 0)
2022 return ERROR_OK;
2023
2024 if ((address + size - 1) < address) {
2025 /* GDB can request this when e.g. PC is 0xfffffffc*/
2026 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
2027 (unsigned)address,
2028 (unsigned)size);
2029 return ERROR_FAIL;
2030 }
2031
2032 return target->type->write_buffer(target, address, size, buffer);
2033 }
2034
2035 static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t count, const uint8_t *buffer)
2036 {
2037 uint32_t size;
2038
2039 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2040 * will have something to do with the size we leave to it. */
2041 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2042 if (address & size) {
2043 int retval = target_write_memory(target, address, size, 1, buffer);
2044 if (retval != ERROR_OK)
2045 return retval;
2046 address += size;
2047 count -= size;
2048 buffer += size;
2049 }
2050 }
2051
2052 /* Write the data with as large access size as possible. */
2053 for (; size > 0; size /= 2) {
2054 uint32_t aligned = count - count % size;
2055 if (aligned > 0) {
2056 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2057 if (retval != ERROR_OK)
2058 return retval;
2059 address += aligned;
2060 count -= aligned;
2061 buffer += aligned;
2062 }
2063 }
2064
2065 return ERROR_OK;
2066 }
2067
2068 /* Single aligned words are guaranteed to use 16 or 32 bit access
2069 * mode respectively, otherwise data is handled as quickly as
2070 * possible
2071 */
2072 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
2073 {
2074 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
2075 (int)size, (unsigned)address);
2076
2077 if (!target_was_examined(target)) {
2078 LOG_ERROR("Target not examined yet");
2079 return ERROR_FAIL;
2080 }
2081
2082 if (size == 0)
2083 return ERROR_OK;
2084
2085 if ((address + size - 1) < address) {
2086 /* GDB can request this when e.g. PC is 0xfffffffc*/
2087 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
2088 address,
2089 size);
2090 return ERROR_FAIL;
2091 }
2092
2093 return target->type->read_buffer(target, address, size, buffer);
2094 }
2095
2096 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t count, uint8_t *buffer)
2097 {
2098 uint32_t size;
2099
2100 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2101 * will have something to do with the size we leave to it. */
2102 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2103 if (address & size) {
2104 int retval = target_read_memory(target, address, size, 1, buffer);
2105 if (retval != ERROR_OK)
2106 return retval;
2107 address += size;
2108 count -= size;
2109 buffer += size;
2110 }
2111 }
2112
2113 /* Read the data with as large access size as possible. */
2114 for (; size > 0; size /= 2) {
2115 uint32_t aligned = count - count % size;
2116 if (aligned > 0) {
2117 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2118 if (retval != ERROR_OK)
2119 return retval;
2120 address += aligned;
2121 count -= aligned;
2122 buffer += aligned;
2123 }
2124 }
2125
2126 return ERROR_OK;
2127 }
2128
2129 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
2130 {
2131 uint8_t *buffer;
2132 int retval;
2133 uint32_t i;
2134 uint32_t checksum = 0;
2135 if (!target_was_examined(target)) {
2136 LOG_ERROR("Target not examined yet");
2137 return ERROR_FAIL;
2138 }
2139
2140 retval = target->type->checksum_memory(target, address, size, &checksum);
2141 if (retval != ERROR_OK) {
2142 buffer = malloc(size);
2143 if (buffer == NULL) {
2144 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
2145 return ERROR_COMMAND_SYNTAX_ERROR;
2146 }
2147 retval = target_read_buffer(target, address, size, buffer);
2148 if (retval != ERROR_OK) {
2149 free(buffer);
2150 return retval;
2151 }
2152
2153 /* convert to target endianness */
2154 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2155 uint32_t target_data;
2156 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2157 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2158 }
2159
2160 retval = image_calculate_checksum(buffer, size, &checksum);
2161 free(buffer);
2162 }
2163
2164 *crc = checksum;
2165
2166 return retval;
2167 }
2168
2169 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
2170 {
2171 int retval;
2172 if (!target_was_examined(target)) {
2173 LOG_ERROR("Target not examined yet");
2174 return ERROR_FAIL;
2175 }
2176
2177 if (target->type->blank_check_memory == 0)
2178 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2179
2180 retval = target->type->blank_check_memory(target, address, size, blank);
2181
2182 return retval;
2183 }
2184
2185 int target_read_u64(struct target *target, uint64_t address, uint64_t *value)
2186 {
2187 uint8_t value_buf[8];
2188 if (!target_was_examined(target)) {
2189 LOG_ERROR("Target not examined yet");
2190 return ERROR_FAIL;
2191 }
2192
2193 int retval = target_read_memory(target, address, 8, 1, value_buf);
2194
2195 if (retval == ERROR_OK) {
2196 *value = target_buffer_get_u64(target, value_buf);
2197 LOG_DEBUG("address: 0x%" PRIx64 ", value: 0x%16.16" PRIx64 "",
2198 address,
2199 *value);
2200 } else {
2201 *value = 0x0;
2202 LOG_DEBUG("address: 0x%" PRIx64 " failed",
2203 address);
2204 }
2205
2206 return retval;
2207 }
2208
2209 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
2210 {
2211 uint8_t value_buf[4];
2212 if (!target_was_examined(target)) {
2213 LOG_ERROR("Target not examined yet");
2214 return ERROR_FAIL;
2215 }
2216
2217 int retval = target_read_memory(target, address, 4, 1, value_buf);
2218
2219 if (retval == ERROR_OK) {
2220 *value = target_buffer_get_u32(target, value_buf);
2221 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
2222 address,
2223 *value);
2224 } else {
2225 *value = 0x0;
2226 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
2227 address);
2228 }
2229
2230 return retval;
2231 }
2232
2233 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
2234 {
2235 uint8_t value_buf[2];
2236 if (!target_was_examined(target)) {
2237 LOG_ERROR("Target not examined yet");
2238 return ERROR_FAIL;
2239 }
2240
2241 int retval = target_read_memory(target, address, 2, 1, value_buf);
2242
2243 if (retval == ERROR_OK) {
2244 *value = target_buffer_get_u16(target, value_buf);
2245 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
2246 address,
2247 *value);
2248 } else {
2249 *value = 0x0;
2250 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
2251 address);
2252 }
2253
2254 return retval;
2255 }
2256
2257 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
2258 {
2259 if (!target_was_examined(target)) {
2260 LOG_ERROR("Target not examined yet");
2261 return ERROR_FAIL;
2262 }
2263
2264 int retval = target_read_memory(target, address, 1, 1, value);
2265
2266 if (retval == ERROR_OK) {
2267 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
2268 address,
2269 *value);
2270 } else {
2271 *value = 0x0;
2272 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
2273 address);
2274 }
2275
2276 return retval;
2277 }
2278
2279 int target_write_u64(struct target *target, uint64_t address, uint64_t value)
2280 {
2281 int retval;
2282 uint8_t value_buf[8];
2283 if (!target_was_examined(target)) {
2284 LOG_ERROR("Target not examined yet");
2285 return ERROR_FAIL;
2286 }
2287
2288 LOG_DEBUG("address: 0x%" PRIx64 ", value: 0x%16.16" PRIx64 "",
2289 address,
2290 value);
2291
2292 target_buffer_set_u64(target, value_buf, value);
2293 retval = target_write_memory(target, address, 8, 1, value_buf);
2294 if (retval != ERROR_OK)
2295 LOG_DEBUG("failed: %i", retval);
2296
2297 return retval;
2298 }
2299
2300 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
2301 {
2302 int retval;
2303 uint8_t value_buf[4];
2304 if (!target_was_examined(target)) {
2305 LOG_ERROR("Target not examined yet");
2306 return ERROR_FAIL;
2307 }
2308
2309 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
2310 address,
2311 value);
2312
2313 target_buffer_set_u32(target, value_buf, value);
2314 retval = target_write_memory(target, address, 4, 1, value_buf);
2315 if (retval != ERROR_OK)
2316 LOG_DEBUG("failed: %i", retval);
2317
2318 return retval;
2319 }
2320
2321 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
2322 {
2323 int retval;
2324 uint8_t value_buf[2];
2325 if (!target_was_examined(target)) {
2326 LOG_ERROR("Target not examined yet");
2327 return ERROR_FAIL;
2328 }
2329
2330 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
2331 address,
2332 value);
2333
2334 target_buffer_set_u16(target, value_buf, value);
2335 retval = target_write_memory(target, address, 2, 1, value_buf);
2336 if (retval != ERROR_OK)
2337 LOG_DEBUG("failed: %i", retval);
2338
2339 return retval;
2340 }
2341
2342 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
2343 {
2344 int retval;
2345 if (!target_was_examined(target)) {
2346 LOG_ERROR("Target not examined yet");
2347 return ERROR_FAIL;
2348 }
2349
2350 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
2351 address, value);
2352
2353 retval = target_write_memory(target, address, 1, 1, &value);
2354 if (retval != ERROR_OK)
2355 LOG_DEBUG("failed: %i", retval);
2356
2357 return retval;
2358 }
2359
2360 static int find_target(struct command_context *cmd_ctx, const char *name)
2361 {
2362 struct target *target = get_target(name);
2363 if (target == NULL) {
2364 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2365 return ERROR_FAIL;
2366 }
2367 if (!target->tap->enabled) {
2368 LOG_USER("Target: TAP %s is disabled, "
2369 "can't be the current target\n",
2370 target->tap->dotted_name);
2371 return ERROR_FAIL;
2372 }
2373
2374 cmd_ctx->current_target = target->target_number;
2375 return ERROR_OK;
2376 }
2377
2378
2379 COMMAND_HANDLER(handle_targets_command)
2380 {
2381 int retval = ERROR_OK;
2382 if (CMD_ARGC == 1) {
2383 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2384 if (retval == ERROR_OK) {
2385 /* we're done! */
2386 return retval;
2387 }
2388 }
2389
2390 struct target *target = all_targets;
2391 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2392 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2393 while (target) {
2394 const char *state;
2395 char marker = ' ';
2396
2397 if (target->tap->enabled)
2398 state = target_state_name(target);
2399 else
2400 state = "tap-disabled";
2401
2402 if (CMD_CTX->current_target == target->target_number)
2403 marker = '*';
2404
2405 /* keep columns lined up to match the headers above */
2406 command_print(CMD_CTX,
2407 "%2d%c %-18s %-10s %-6s %-18s %s",
2408 target->target_number,
2409 marker,
2410 target_name(target),
2411 target_type_name(target),
2412 Jim_Nvp_value2name_simple(nvp_target_endian,
2413 target->endianness)->name,
2414 target->tap->dotted_name,
2415 state);
2416 target = target->next;
2417 }
2418
2419 return retval;
2420 }
2421
2422 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2423
2424 static int powerDropout;
2425 static int srstAsserted;
2426
2427 static int runPowerRestore;
2428 static int runPowerDropout;
2429 static int runSrstAsserted;
2430 static int runSrstDeasserted;
2431
2432 static int sense_handler(void)
2433 {
2434 static int prevSrstAsserted;
2435 static int prevPowerdropout;
2436
2437 int retval = jtag_power_dropout(&powerDropout);
2438 if (retval != ERROR_OK)
2439 return retval;
2440
2441 int powerRestored;
2442 powerRestored = prevPowerdropout && !powerDropout;
2443 if (powerRestored)
2444 runPowerRestore = 1;
2445
2446 long long current = timeval_ms();
2447 static long long lastPower;
2448 int waitMore = lastPower + 2000 > current;
2449 if (powerDropout && !waitMore) {
2450 runPowerDropout = 1;
2451 lastPower = current;
2452 }
2453
2454 retval = jtag_srst_asserted(&srstAsserted);
2455 if (retval != ERROR_OK)
2456 return retval;
2457
2458 int srstDeasserted;
2459 srstDeasserted = prevSrstAsserted && !srstAsserted;
2460
2461 static long long lastSrst;
2462 waitMore = lastSrst + 2000 > current;
2463 if (srstDeasserted && !waitMore) {
2464 runSrstDeasserted = 1;
2465 lastSrst = current;
2466 }
2467
2468 if (!prevSrstAsserted && srstAsserted)
2469 runSrstAsserted = 1;
2470
2471 prevSrstAsserted = srstAsserted;
2472 prevPowerdropout = powerDropout;
2473
2474 if (srstDeasserted || powerRestored) {
2475 /* Other than logging the event we can't do anything here.
2476 * Issuing a reset is a particularly bad idea as we might
2477 * be inside a reset already.
2478 */
2479 }
2480
2481 return ERROR_OK;
2482 }
2483
2484 /* process target state changes */
2485 static int handle_target(void *priv)
2486 {
2487 Jim_Interp *interp = (Jim_Interp *)priv;
2488 int retval = ERROR_OK;
2489
2490 if (!is_jtag_poll_safe()) {
2491 /* polling is disabled currently */
2492 return ERROR_OK;
2493 }
2494
2495 /* we do not want to recurse here... */
2496 static int recursive;
2497 if (!recursive) {
2498 recursive = 1;
2499 sense_handler();
2500 /* danger! running these procedures can trigger srst assertions and power dropouts.
2501 * We need to avoid an infinite loop/recursion here and we do that by
2502 * clearing the flags after running these events.
2503 */
2504 int did_something = 0;
2505 if (runSrstAsserted) {
2506 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2507 Jim_Eval(interp, "srst_asserted");
2508 did_something = 1;
2509 }
2510 if (runSrstDeasserted) {
2511 Jim_Eval(interp, "srst_deasserted");
2512 did_something = 1;
2513 }
2514 if (runPowerDropout) {
2515 LOG_INFO("Power dropout detected, running power_dropout proc.");
2516 Jim_Eval(interp, "power_dropout");
2517 did_something = 1;
2518 }
2519 if (runPowerRestore) {
2520 Jim_Eval(interp, "power_restore");
2521 did_something = 1;
2522 }
2523
2524 if (did_something) {
2525 /* clear detect flags */
2526 sense_handler();
2527 }
2528
2529 /* clear action flags */
2530
2531 runSrstAsserted = 0;
2532 runSrstDeasserted = 0;
2533 runPowerRestore = 0;
2534 runPowerDropout = 0;
2535
2536 recursive = 0;
2537 }
2538
2539 /* Poll targets for state changes unless that's globally disabled.
2540 * Skip targets that are currently disabled.
2541 */
2542 for (struct target *target = all_targets;
2543 is_jtag_poll_safe() && target;
2544 target = target->next) {
2545
2546 if (!target_was_examined(target))
2547 continue;
2548
2549 if (!target->tap->enabled)
2550 continue;
2551
2552 if (target->backoff.times > target->backoff.count) {
2553 /* do not poll this time as we failed previously */
2554 target->backoff.count++;
2555 continue;
2556 }
2557 target->backoff.count = 0;
2558
2559 /* only poll target if we've got power and srst isn't asserted */
2560 if (!powerDropout && !srstAsserted) {
2561 /* polling may fail silently until the target has been examined */
2562 retval = target_poll(target);
2563 if (retval != ERROR_OK) {
2564 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2565 if (target->backoff.times * polling_interval < 5000) {
2566 target->backoff.times *= 2;
2567 target->backoff.times++;
2568 }
2569
2570 /* Tell GDB to halt the debugger. This allows the user to
2571 * run monitor commands to handle the situation.
2572 */
2573 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2574 }
2575 if (target->backoff.times > 0) {
2576 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
2577 target_reset_examined(target);
2578 retval = target_examine_one(target);
2579 /* Target examination could have failed due to unstable connection,
2580 * but we set the examined flag anyway to repoll it later */
2581 if (retval != ERROR_OK) {
2582 target->examined = true;
2583 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
2584 target->backoff.times * polling_interval);
2585 return retval;
2586 }
2587 }
2588
2589 /* Since we succeeded, we reset backoff count */
2590 target->backoff.times = 0;
2591 }
2592 }
2593
2594 return retval;
2595 }
2596
2597 COMMAND_HANDLER(handle_reg_command)
2598 {
2599 struct target *target;
2600 struct reg *reg = NULL;
2601 unsigned count = 0;
2602 char *value;
2603
2604 LOG_DEBUG("-");
2605
2606 target = get_current_target(CMD_CTX);
2607
2608 /* list all available registers for the current target */
2609 if (CMD_ARGC == 0) {
2610 struct reg_cache *cache = target->reg_cache;
2611
2612 count = 0;
2613 while (cache) {
2614 unsigned i;
2615
2616 command_print(CMD_CTX, "===== %s", cache->name);
2617
2618 for (i = 0, reg = cache->reg_list;
2619 i < cache->num_regs;
2620 i++, reg++, count++) {
2621 /* only print cached values if they are valid */
2622 if (reg->valid) {
2623 value = buf_to_str(reg->value,
2624 reg->size, 16);
2625 command_print(CMD_CTX,
2626 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2627 count, reg->name,
2628 reg->size, value,
2629 reg->dirty
2630 ? " (dirty)"
2631 : "");
2632 free(value);
2633 } else {
2634 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2635 count, reg->name,
2636 reg->size) ;
2637 }
2638 }
2639 cache = cache->next;
2640 }
2641
2642 return ERROR_OK;
2643 }
2644
2645 /* access a single register by its ordinal number */
2646 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2647 unsigned num;
2648 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2649
2650 struct reg_cache *cache = target->reg_cache;
2651 count = 0;
2652 while (cache) {
2653 unsigned i;
2654 for (i = 0; i < cache->num_regs; i++) {
2655 if (count++ == num) {
2656 reg = &cache->reg_list[i];
2657 break;
2658 }
2659 }
2660 if (reg)
2661 break;
2662 cache = cache->next;
2663 }
2664
2665 if (!reg) {
2666 command_print(CMD_CTX, "%i is out of bounds, the current target "
2667 "has only %i registers (0 - %i)", num, count, count - 1);
2668 return ERROR_OK;
2669 }
2670 } else {
2671 /* access a single register by its name */
2672 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2673
2674 if (!reg) {
2675 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2676 return ERROR_OK;
2677 }
2678 }
2679
2680 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2681
2682 /* display a register */
2683 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2684 && (CMD_ARGV[1][0] <= '9')))) {
2685 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2686 reg->valid = 0;
2687
2688 if (reg->valid == 0)
2689 reg->type->get(reg);
2690 value = buf_to_str(reg->value, reg->size, 16);
2691 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2692 free(value);
2693 return ERROR_OK;
2694 }
2695
2696 /* set register value */
2697 if (CMD_ARGC == 2) {
2698 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2699 if (buf == NULL)
2700 return ERROR_FAIL;
2701 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2702
2703 reg->type->set(reg, buf);
2704
2705 value = buf_to_str(reg->value, reg->size, 16);
2706 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2707 free(value);
2708
2709 free(buf);
2710
2711 return ERROR_OK;
2712 }
2713
2714 return ERROR_COMMAND_SYNTAX_ERROR;
2715 }
2716
2717 COMMAND_HANDLER(handle_poll_command)
2718 {
2719 int retval = ERROR_OK;
2720 struct target *target = get_current_target(CMD_CTX);
2721
2722 if (CMD_ARGC == 0) {
2723 command_print(CMD_CTX, "background polling: %s",
2724 jtag_poll_get_enabled() ? "on" : "off");
2725 command_print(CMD_CTX, "TAP: %s (%s)",
2726 target->tap->dotted_name,
2727 target->tap->enabled ? "enabled" : "disabled");
2728 if (!target->tap->enabled)
2729 return ERROR_OK;
2730 retval = target_poll(target);
2731 if (retval != ERROR_OK)
2732 return retval;
2733 retval = target_arch_state(target);
2734 if (retval != ERROR_OK)
2735 return retval;
2736 } else if (CMD_ARGC == 1) {
2737 bool enable;
2738 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2739 jtag_poll_set_enabled(enable);
2740 } else
2741 return ERROR_COMMAND_SYNTAX_ERROR;
2742
2743 return retval;
2744 }
2745
2746 COMMAND_HANDLER(handle_wait_halt_command)
2747 {
2748 if (CMD_ARGC > 1)
2749 return ERROR_COMMAND_SYNTAX_ERROR;
2750
2751 unsigned ms = DEFAULT_HALT_TIMEOUT;
2752 if (1 == CMD_ARGC) {
2753 int retval = parse_uint(CMD_ARGV[0], &ms);
2754 if (ERROR_OK != retval)
2755 return ERROR_COMMAND_SYNTAX_ERROR;
2756 }
2757
2758 struct target *target = get_current_target(CMD_CTX);
2759 return target_wait_state(target, TARGET_HALTED, ms);
2760 }
2761
2762 /* wait for target state to change. The trick here is to have a low
2763 * latency for short waits and not to suck up all the CPU time
2764 * on longer waits.
2765 *
2766 * After 500ms, keep_alive() is invoked
2767 */
2768 int target_wait_state(struct target *target, enum target_state state, int ms)
2769 {
2770 int retval;
2771 long long then = 0, cur;
2772 int once = 1;
2773
2774 for (;;) {
2775 retval = target_poll(target);
2776 if (retval != ERROR_OK)
2777 return retval;
2778 if (target->state == state)
2779 break;
2780 cur = timeval_ms();
2781 if (once) {
2782 once = 0;
2783 then = timeval_ms();
2784 LOG_DEBUG("waiting for target %s...",
2785 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2786 }
2787
2788 if (cur-then > 500)
2789 keep_alive();
2790
2791 if ((cur-then) > ms) {
2792 LOG_ERROR("timed out while waiting for target %s",
2793 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2794 return ERROR_FAIL;
2795 }
2796 }
2797
2798 return ERROR_OK;
2799 }
2800
2801 COMMAND_HANDLER(handle_halt_command)
2802 {
2803 LOG_DEBUG("-");
2804
2805 struct target *target = get_current_target(CMD_CTX);
2806 int retval = target_halt(target);
2807 if (ERROR_OK != retval)
2808 return retval;
2809
2810 if (CMD_ARGC == 1) {
2811 unsigned wait_local;
2812 retval = parse_uint(CMD_ARGV[0], &wait_local);
2813 if (ERROR_OK != retval)
2814 return ERROR_COMMAND_SYNTAX_ERROR;
2815 if (!wait_local)
2816 return ERROR_OK;
2817 }
2818
2819 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2820 }
2821
2822 COMMAND_HANDLER(handle_soft_reset_halt_command)
2823 {
2824 struct target *target = get_current_target(CMD_CTX);
2825
2826 LOG_USER("requesting target halt and executing a soft reset");
2827
2828 target_soft_reset_halt(target);
2829
2830 return ERROR_OK;
2831 }
2832
2833 COMMAND_HANDLER(handle_reset_command)
2834 {
2835 if (CMD_ARGC > 1)
2836 return ERROR_COMMAND_SYNTAX_ERROR;
2837
2838 enum target_reset_mode reset_mode = RESET_RUN;
2839 if (CMD_ARGC == 1) {
2840 const Jim_Nvp *n;
2841 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2842 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
2843 return ERROR_COMMAND_SYNTAX_ERROR;
2844 reset_mode = n->value;
2845 }
2846
2847 /* reset *all* targets */
2848 return target_process_reset(CMD_CTX, reset_mode);
2849 }
2850
2851
2852 COMMAND_HANDLER(handle_resume_command)
2853 {
2854 int current = 1;
2855 if (CMD_ARGC > 1)
2856 return ERROR_COMMAND_SYNTAX_ERROR;
2857
2858 struct target *target = get_current_target(CMD_CTX);
2859
2860 /* with no CMD_ARGV, resume from current pc, addr = 0,
2861 * with one arguments, addr = CMD_ARGV[0],
2862 * handle breakpoints, not debugging */
2863 uint32_t addr = 0;
2864 if (CMD_ARGC == 1) {
2865 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2866 current = 0;
2867 }
2868
2869 return target_resume(target, current, addr, 1, 0);
2870 }
2871
2872 COMMAND_HANDLER(handle_step_command)
2873 {
2874 if (CMD_ARGC > 1)
2875 return ERROR_COMMAND_SYNTAX_ERROR;
2876
2877 LOG_DEBUG("-");
2878
2879 /* with no CMD_ARGV, step from current pc, addr = 0,
2880 * with one argument addr = CMD_ARGV[0],
2881 * handle breakpoints, debugging */
2882 uint32_t addr = 0;
2883 int current_pc = 1;
2884 if (CMD_ARGC == 1) {
2885 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2886 current_pc = 0;
2887 }
2888
2889 struct target *target = get_current_target(CMD_CTX);
2890
2891 return target->type->step(target, current_pc, addr, 1);
2892 }
2893
2894 static void handle_md_output(struct command_context *cmd_ctx,
2895 struct target *target, uint32_t address, unsigned size,
2896 unsigned count, const uint8_t *buffer)
2897 {
2898 const unsigned line_bytecnt = 32;
2899 unsigned line_modulo = line_bytecnt / size;
2900
2901 char output[line_bytecnt * 4 + 1];
2902 unsigned output_len = 0;
2903
2904 const char *value_fmt;
2905 switch (size) {
2906 case 4:
2907 value_fmt = "%8.8x ";
2908 break;
2909 case 2:
2910 value_fmt = "%4.4x ";
2911 break;
2912 case 1:
2913 value_fmt = "%2.2x ";
2914 break;
2915 default:
2916 /* "can't happen", caller checked */
2917 LOG_ERROR("invalid memory read size: %u", size);
2918 return;
2919 }
2920
2921 for (unsigned i = 0; i < count; i++) {
2922 if (i % line_modulo == 0) {
2923 output_len += snprintf(output + output_len,
2924 sizeof(output) - output_len,
2925 "0x%8.8x: ",
2926 (unsigned)(address + (i*size)));
2927 }
2928
2929 uint32_t value = 0;
2930 const uint8_t *value_ptr = buffer + i * size;
2931 switch (size) {
2932 case 4:
2933 value = target_buffer_get_u32(target, value_ptr);
2934 break;
2935 case 2:
2936 value = target_buffer_get_u16(target, value_ptr);
2937 break;
2938 case 1:
2939 value = *value_ptr;
2940 }
2941 output_len += snprintf(output + output_len,
2942 sizeof(output) - output_len,
2943 value_fmt, value);
2944
2945 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
2946 command_print(cmd_ctx, "%s", output);
2947 output_len = 0;
2948 }
2949 }
2950 }
2951
2952 COMMAND_HANDLER(handle_md_command)
2953 {
2954 if (CMD_ARGC < 1)
2955 return ERROR_COMMAND_SYNTAX_ERROR;
2956
2957 unsigned size = 0;
2958 switch (CMD_NAME[2]) {
2959 case 'w':
2960 size = 4;
2961 break;
2962 case 'h':
2963 size = 2;
2964 break;
2965 case 'b':
2966 size = 1;
2967 break;
2968 default:
2969 return ERROR_COMMAND_SYNTAX_ERROR;
2970 }
2971
2972 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2973 int (*fn)(struct target *target,
2974 uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
2975 if (physical) {
2976 CMD_ARGC--;
2977 CMD_ARGV++;
2978 fn = target_read_phys_memory;
2979 } else
2980 fn = target_read_memory;
2981 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2982 return ERROR_COMMAND_SYNTAX_ERROR;
2983
2984 uint32_t address;
2985 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2986
2987 unsigned count = 1;
2988 if (CMD_ARGC == 2)
2989 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2990
2991 uint8_t *buffer = calloc(count, size);
2992
2993 struct target *target = get_current_target(CMD_CTX);
2994 int retval = fn(target, address, size, count, buffer);
2995 if (ERROR_OK == retval)
2996 handle_md_output(CMD_CTX, target, address, size, count, buffer);
2997
2998 free(buffer);
2999
3000 return retval;
3001 }
3002
3003 typedef int (*target_write_fn)(struct target *target,
3004 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3005
3006 static int target_fill_mem(struct target *target,
3007 uint32_t address,
3008 target_write_fn fn,
3009 unsigned data_size,
3010 /* value */
3011 uint32_t b,
3012 /* count */
3013 unsigned c)
3014 {
3015 /* We have to write in reasonably large chunks to be able
3016 * to fill large memory areas with any sane speed */
3017 const unsigned chunk_size = 16384;
3018 uint8_t *target_buf = malloc(chunk_size * data_size);
3019 if (target_buf == NULL) {
3020 LOG_ERROR("Out of memory");
3021 return ERROR_FAIL;
3022 }
3023
3024 for (unsigned i = 0; i < chunk_size; i++) {
3025 switch (data_size) {
3026 case 4:
3027 target_buffer_set_u32(target, target_buf + i * data_size, b);
3028 break;
3029 case 2:
3030 target_buffer_set_u16(target, target_buf + i * data_size, b);
3031 break;
3032 case 1:
3033 target_buffer_set_u8(target, target_buf + i * data_size, b);
3034 break;
3035 default:
3036 exit(-1);
3037 }
3038 }
3039
3040 int retval = ERROR_OK;
3041
3042 for (unsigned x = 0; x < c; x += chunk_size) {
3043 unsigned current;
3044 current = c - x;
3045 if (current > chunk_size)
3046 current = chunk_size;
3047 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3048 if (retval != ERROR_OK)
3049 break;
3050 /* avoid GDB timeouts */
3051 keep_alive();
3052 }
3053 free(target_buf);
3054
3055 return retval;
3056 }
3057
3058
3059 COMMAND_HANDLER(handle_mw_command)
3060 {
3061 if (CMD_ARGC < 2)
3062 return ERROR_COMMAND_SYNTAX_ERROR;
3063 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3064 target_write_fn fn;
3065 if (physical) {
3066 CMD_ARGC--;
3067 CMD_ARGV++;
3068 fn = target_write_phys_memory;
3069 } else
3070 fn = target_write_memory;
3071 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3072 return ERROR_COMMAND_SYNTAX_ERROR;
3073
3074 uint32_t address;
3075 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
3076
3077 uint32_t value;
3078 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3079
3080 unsigned count = 1;
3081 if (CMD_ARGC == 3)
3082 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3083
3084 struct target *target = get_current_target(CMD_CTX);
3085 unsigned wordsize;
3086 switch (CMD_NAME[2]) {
3087 case 'w':
3088 wordsize = 4;
3089 break;
3090 case 'h':
3091 wordsize = 2;
3092 break;
3093 case 'b':
3094 wordsize = 1;
3095 break;
3096 default:
3097 return ERROR_COMMAND_SYNTAX_ERROR;
3098 }
3099
3100 return target_fill_mem(target, address, fn, wordsize, value, count);
3101 }
3102
3103 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3104 uint32_t *min_address, uint32_t *max_address)
3105 {
3106 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3107 return ERROR_COMMAND_SYNTAX_ERROR;
3108
3109 /* a base address isn't always necessary,
3110 * default to 0x0 (i.e. don't relocate) */
3111 if (CMD_ARGC >= 2) {
3112 uint32_t addr;
3113 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
3114 image->base_address = addr;
3115 image->base_address_set = 1;
3116 } else
3117 image->base_address_set = 0;
3118
3119 image->start_address_set = 0;
3120
3121 if (CMD_ARGC >= 4)
3122 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
3123 if (CMD_ARGC == 5) {
3124 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
3125 /* use size (given) to find max (required) */
3126 *max_address += *min_address;
3127 }
3128
3129 if (*min_address > *max_address)
3130 return ERROR_COMMAND_SYNTAX_ERROR;
3131
3132 return ERROR_OK;
3133 }
3134
3135 COMMAND_HANDLER(handle_load_image_command)
3136 {
3137 uint8_t *buffer;
3138 size_t buf_cnt;
3139 uint32_t image_size;
3140 uint32_t min_address = 0;
3141 uint32_t max_address = 0xffffffff;
3142 int i;
3143 struct image image;
3144
3145 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3146 &image, &min_address, &max_address);
3147 if (ERROR_OK != retval)
3148 return retval;
3149
3150 struct target *target = get_current_target(CMD_CTX);
3151
3152 struct duration bench;
3153 duration_start(&bench);
3154
3155 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3156 return ERROR_OK;
3157
3158 image_size = 0x0;
3159 retval = ERROR_OK;
3160 for (i = 0; i < image.num_sections; i++) {
3161 buffer = malloc(image.sections[i].size);
3162 if (buffer == NULL) {
3163 command_print(CMD_CTX,
3164 "error allocating buffer for section (%d bytes)",
3165 (int)(image.sections[i].size));
3166 break;
3167 }
3168
3169 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3170 if (retval != ERROR_OK) {
3171 free(buffer);
3172 break;
3173 }
3174
3175 uint32_t offset = 0;
3176 uint32_t length = buf_cnt;
3177
3178 /* DANGER!!! beware of unsigned comparision here!!! */
3179
3180 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3181 (image.sections[i].base_address < max_address)) {
3182
3183 if (image.sections[i].base_address < min_address) {
3184 /* clip addresses below */
3185 offset += min_address-image.sections[i].base_address;
3186 length -= offset;
3187 }
3188
3189 if (image.sections[i].base_address + buf_cnt > max_address)
3190 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3191
3192 retval = target_write_buffer(target,
3193 image.sections[i].base_address + offset, length, buffer + offset);
3194 if (retval != ERROR_OK) {
3195 free(buffer);
3196 break;
3197 }
3198 image_size += length;
3199 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
3200 (unsigned int)length,
3201 image.sections[i].base_address + offset);
3202 }
3203
3204 free(buffer);
3205 }
3206
3207 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3208 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
3209 "in %fs (%0.3f KiB/s)", image_size,
3210 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3211 }
3212
3213 image_close(&image);
3214
3215 return retval;
3216
3217 }
3218
3219 COMMAND_HANDLER(handle_dump_image_command)
3220 {
3221 struct fileio fileio;
3222 uint8_t *buffer;
3223 int retval, retvaltemp;
3224 uint32_t address, size;
3225 struct duration bench;
3226 struct target *target = get_current_target(CMD_CTX);
3227
3228 if (CMD_ARGC != 3)
3229 return ERROR_COMMAND_SYNTAX_ERROR;
3230
3231 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
3232 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
3233
3234 uint32_t buf_size = (size > 4096) ? 4096 : size;
3235 buffer = malloc(buf_size);
3236 if (!buffer)
3237 return ERROR_FAIL;
3238
3239 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3240 if (retval != ERROR_OK) {
3241 free(buffer);
3242 return retval;
3243 }
3244
3245 duration_start(&bench);
3246
3247 while (size > 0) {
3248 size_t size_written;
3249 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3250 retval = target_read_buffer(target, address, this_run_size, buffer);
3251 if (retval != ERROR_OK)
3252 break;
3253
3254 retval = fileio_write(&fileio, this_run_size, buffer, &size_written);
3255 if (retval != ERROR_OK)
3256 break;
3257
3258 size -= this_run_size;
3259 address += this_run_size;
3260 }
3261
3262 free(buffer);
3263
3264 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3265 int filesize;
3266 retval = fileio_size(&fileio, &filesize);
3267 if (retval != ERROR_OK)
3268 return retval;
3269 command_print(CMD_CTX,
3270 "dumped %ld bytes in %fs (%0.3f KiB/s)", (long)filesize,
3271 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3272 }
3273
3274 retvaltemp = fileio_close(&fileio);
3275 if (retvaltemp != ERROR_OK)
3276 return retvaltemp;
3277
3278 return retval;
3279 }
3280
3281 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
3282 {
3283 uint8_t *buffer;
3284 size_t buf_cnt;
3285 uint32_t image_size;
3286 int i;
3287 int retval;
3288 uint32_t checksum = 0;
3289 uint32_t mem_checksum = 0;
3290
3291 struct image image;
3292
3293 struct target *target = get_current_target(CMD_CTX);
3294
3295 if (CMD_ARGC < 1)
3296 return ERROR_COMMAND_SYNTAX_ERROR;
3297
3298 if (!target) {
3299 LOG_ERROR("no target selected");
3300 return ERROR_FAIL;
3301 }
3302
3303 struct duration bench;
3304 duration_start(&bench);
3305
3306 if (CMD_ARGC >= 2) {
3307 uint32_t addr;
3308 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
3309 image.base_address = addr;
3310 image.base_address_set = 1;
3311 } else {
3312 image.base_address_set = 0;
3313 image.base_address = 0x0;
3314 }
3315
3316 image.start_address_set = 0;
3317
3318 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3319 if (retval != ERROR_OK)
3320 return retval;
3321
3322 image_size = 0x0;
3323 int diffs = 0;
3324 retval = ERROR_OK;
3325 for (i = 0; i < image.num_sections; i++) {
3326 buffer = malloc(image.sections[i].size);
3327 if (buffer == NULL) {
3328 command_print(CMD_CTX,
3329 "error allocating buffer for section (%d bytes)",
3330 (int)(image.sections[i].size));
3331 break;
3332 }
3333 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3334 if (retval != ERROR_OK) {
3335 free(buffer);
3336 break;
3337 }
3338
3339 if (verify) {
3340 /* calculate checksum of image */
3341 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3342 if (retval != ERROR_OK) {
3343 free(buffer);
3344 break;
3345 }
3346
3347 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3348 if (retval != ERROR_OK) {
3349 free(buffer);
3350 break;
3351 }
3352
3353 if (checksum != mem_checksum) {
3354 /* failed crc checksum, fall back to a binary compare */
3355 uint8_t *data;
3356
3357 if (diffs == 0)
3358 LOG_ERROR("checksum mismatch - attempting binary compare");
3359
3360 data = malloc(buf_cnt);
3361
3362 /* Can we use 32bit word accesses? */
3363 int size = 1;
3364 int count = buf_cnt;
3365 if ((count % 4) == 0) {
3366 size *= 4;
3367 count /= 4;
3368 }
3369 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
3370 if (retval == ERROR_OK) {
3371 uint32_t t;
3372 for (t = 0; t < buf_cnt; t++) {
3373 if (data[t] != buffer[t]) {
3374 command_print(CMD_CTX,
3375 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3376 diffs,
3377 (unsigned)(t + image.sections[i].base_address),
3378 data[t],
3379 buffer[t]);
3380 if (diffs++ >= 127) {
3381 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
3382 free(data);
3383 free(buffer);
3384 goto done;
3385 }
3386 }
3387 keep_alive();
3388 }
3389 }
3390 free(data);
3391 }
3392 } else {
3393 command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx",
3394 image.sections[i].base_address,
3395 buf_cnt);
3396 }
3397
3398 free(buffer);
3399 image_size += buf_cnt;
3400 }
3401 if (diffs > 0)
3402 command_print(CMD_CTX, "No more differences found.");
3403 done:
3404 if (diffs > 0)
3405 retval = ERROR_FAIL;
3406 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3407 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3408 "in %fs (%0.3f KiB/s)", image_size,
3409 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3410 }
3411
3412 image_close(&image);
3413
3414 return retval;
3415 }
3416
3417 COMMAND_HANDLER(handle_verify_image_command)
3418 {
3419 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
3420 }
3421
3422 COMMAND_HANDLER(handle_test_image_command)
3423 {
3424 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
3425 }
3426
3427 static int handle_bp_command_list(struct command_context *cmd_ctx)
3428 {
3429 struct target *target = get_current_target(cmd_ctx);
3430 struct breakpoint *breakpoint = target->breakpoints;
3431 while (breakpoint) {
3432 if (breakpoint->type == BKPT_SOFT) {
3433 char *buf = buf_to_str(breakpoint->orig_instr,
3434 breakpoint->length, 16);
3435 command_print(cmd_ctx, "IVA breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
3436 breakpoint->address,
3437 breakpoint->length,
3438 breakpoint->set, buf);
3439 free(buf);
3440 } else {
3441 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3442 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3443 breakpoint->asid,
3444 breakpoint->length, breakpoint->set);
3445 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3446 command_print(cmd_ctx, "Hybrid breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3447 breakpoint->address,
3448 breakpoint->length, breakpoint->set);
3449 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3450 breakpoint->asid);
3451 } else
3452 command_print(cmd_ctx, "Breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3453 breakpoint->address,
3454 breakpoint->length, breakpoint->set);
3455 }
3456
3457 breakpoint = breakpoint->next;
3458 }
3459 return ERROR_OK;
3460 }
3461
3462 static int handle_bp_command_set(struct command_context *cmd_ctx,
3463 uint32_t addr, uint32_t asid, uint32_t length, int hw)
3464 {
3465 struct target *target = get_current_target(cmd_ctx);
3466 int retval;
3467
3468 if (asid == 0) {
3469 retval = breakpoint_add(target, addr, length, hw);
3470 if (ERROR_OK == retval)
3471 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
3472 else {
3473 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
3474 return retval;
3475 }
3476 } else if (addr == 0) {
3477 if (target->type->add_context_breakpoint == NULL) {
3478 LOG_WARNING("Context breakpoint not available");
3479 return ERROR_OK;
3480 }
3481 retval = context_breakpoint_add(target, asid, length, hw);
3482 if (ERROR_OK == retval)
3483 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3484 else {
3485 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
3486 return retval;
3487 }
3488 } else {
3489 if (target->type->add_hybrid_breakpoint == NULL) {
3490 LOG_WARNING("Hybrid breakpoint not available");
3491 return ERROR_OK;
3492 }
3493 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3494 if (ERROR_OK == retval)
3495 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3496 else {
3497 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3498 return retval;
3499 }
3500 }
3501 return ERROR_OK;
3502 }
3503
3504 COMMAND_HANDLER(handle_bp_command)
3505 {
3506 uint32_t addr;
3507 uint32_t asid;
3508 uint32_t length;
3509 int hw = BKPT_SOFT;
3510
3511 switch (CMD_ARGC) {
3512 case 0:
3513 return handle_bp_command_list(CMD_CTX);
3514
3515 case 2:
3516 asid = 0;
3517 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3518 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3519 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3520
3521 case 3:
3522 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3523 hw = BKPT_HARD;
3524 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3525
3526 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3527
3528 asid = 0;
3529 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3530 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3531 hw = BKPT_HARD;
3532 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3533 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3534 addr = 0;
3535 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3536 }
3537
3538 case 4:
3539 hw = BKPT_HARD;
3540 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3541 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3542 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3543 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3544
3545 default:
3546 return ERROR_COMMAND_SYNTAX_ERROR;
3547 }
3548 }
3549
3550 COMMAND_HANDLER(handle_rbp_command)
3551 {
3552 if (CMD_ARGC != 1)
3553 return ERROR_COMMAND_SYNTAX_ERROR;
3554
3555 uint32_t addr;
3556 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3557
3558 struct target *target = get_current_target(CMD_CTX);
3559 breakpoint_remove(target, addr);
3560
3561 return ERROR_OK;
3562 }
3563
3564 COMMAND_HANDLER(handle_wp_command)
3565 {
3566 struct target *target = get_current_target(CMD_CTX);
3567
3568 if (CMD_ARGC == 0) {
3569 struct watchpoint *watchpoint = target->watchpoints;
3570
3571 while (watchpoint) {
3572 command_print(CMD_CTX, "address: 0x%8.8" PRIx32
3573 ", len: 0x%8.8" PRIx32
3574 ", r/w/a: %i, value: 0x%8.8" PRIx32
3575 ", mask: 0x%8.8" PRIx32,
3576 watchpoint->address,
3577 watchpoint->length,
3578 (int)watchpoint->rw,
3579 watchpoint->value,
3580 watchpoint->mask);
3581 watchpoint = watchpoint->next;
3582 }
3583 return ERROR_OK;
3584 }
3585
3586 enum watchpoint_rw type = WPT_ACCESS;
3587 uint32_t addr = 0;
3588 uint32_t length = 0;
3589 uint32_t data_value = 0x0;
3590 uint32_t data_mask = 0xffffffff;
3591
3592 switch (CMD_ARGC) {
3593 case 5:
3594 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3595 /* fall through */
3596 case 4:
3597 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3598 /* fall through */
3599 case 3:
3600 switch (CMD_ARGV[2][0]) {
3601 case 'r':
3602 type = WPT_READ;
3603 break;
3604 case 'w':
3605 type = WPT_WRITE;
3606 break;
3607 case 'a':
3608 type = WPT_ACCESS;
3609 break;
3610 default:
3611 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3612 return ERROR_COMMAND_SYNTAX_ERROR;
3613 }
3614 /* fall through */
3615 case 2:
3616 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3617 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3618 break;
3619
3620 default:
3621 return ERROR_COMMAND_SYNTAX_ERROR;
3622 }
3623
3624 int retval = watchpoint_add(target, addr, length, type,
3625 data_value, data_mask);
3626 if (ERROR_OK != retval)
3627 LOG_ERROR("Failure setting watchpoints");
3628
3629 return retval;
3630 }
3631
3632 COMMAND_HANDLER(handle_rwp_command)
3633 {
3634 if (CMD_ARGC != 1)
3635 return ERROR_COMMAND_SYNTAX_ERROR;
3636
3637 uint32_t addr;
3638 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3639
3640 struct target *target = get_current_target(CMD_CTX);
3641 watchpoint_remove(target, addr);
3642
3643 return ERROR_OK;
3644 }
3645
3646 /**
3647 * Translate a virtual address to a physical address.
3648 *
3649 * The low-level target implementation must have logged a detailed error
3650 * which is forwarded to telnet/GDB session.
3651 */
3652 COMMAND_HANDLER(handle_virt2phys_command)
3653 {
3654 if (CMD_ARGC != 1)
3655 return ERROR_COMMAND_SYNTAX_ERROR;
3656
3657 uint32_t va;
3658 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], va);
3659 uint32_t pa;
3660
3661 struct target *target = get_current_target(CMD_CTX);
3662 int retval = target->type->virt2phys(target, va, &pa);
3663 if (retval == ERROR_OK)
3664 command_print(CMD_CTX, "Physical address 0x%08" PRIx32 "", pa);
3665
3666 return retval;
3667 }
3668
3669 static void writeData(FILE *f, const void *data, size_t len)
3670 {
3671 size_t written = fwrite(data, 1, len, f);
3672 if (written != len)
3673 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3674 }
3675
3676 static void writeLong(FILE *f, int l, struct target *target)
3677 {
3678 uint8_t val[4];
3679
3680 target_buffer_set_u32(target, val, l);
3681 writeData(f, val, 4);
3682 }
3683
3684 static void writeString(FILE *f, char *s)
3685 {
3686 writeData(f, s, strlen(s));
3687 }
3688
3689 typedef unsigned char UNIT[2]; /* unit of profiling */
3690
3691 /* Dump a gmon.out histogram file. */
3692 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
3693 uint32_t start_address, uint32_t end_address, struct target *target)
3694 {
3695 uint32_t i;
3696 FILE *f = fopen(filename, "w");
3697 if (f == NULL)
3698 return;
3699 writeString(f, "gmon");
3700 writeLong(f, 0x00000001, target); /* Version */
3701 writeLong(f, 0, target); /* padding */
3702 writeLong(f, 0, target); /* padding */
3703 writeLong(f, 0, target); /* padding */
3704
3705 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
3706 writeData(f, &zero, 1);
3707
3708 /* figure out bucket size */
3709 uint32_t min;
3710 uint32_t max;
3711 if (with_range) {
3712 min = start_address;
3713 max = end_address;
3714 } else {
3715 min = samples[0];
3716 max = samples[0];
3717 for (i = 0; i < sampleNum; i++) {
3718 if (min > samples[i])
3719 min = samples[i];
3720 if (max < samples[i])
3721 max = samples[i];
3722 }
3723
3724 /* max should be (largest sample + 1)
3725 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
3726 max++;
3727 }
3728
3729 int addressSpace = max - min;
3730 assert(addressSpace >= 2);
3731
3732 /* FIXME: What is the reasonable number of buckets?
3733 * The profiling result will be more accurate if there are enough buckets. */
3734 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
3735 uint32_t numBuckets = addressSpace / sizeof(UNIT);
3736 if (numBuckets > maxBuckets)
3737 numBuckets = maxBuckets;
3738 int *buckets = malloc(sizeof(int) * numBuckets);
3739 if (buckets == NULL) {
3740 fclose(f);
3741 return;
3742 }
3743 memset(buckets, 0, sizeof(int) * numBuckets);
3744 for (i = 0; i < sampleNum; i++) {
3745 uint32_t address = samples[i];
3746
3747 if ((address < min) || (max <= address))
3748 continue;
3749
3750 long long a = address - min;
3751 long long b = numBuckets;
3752 long long c = addressSpace;
3753 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
3754 buckets[index_t]++;
3755 }
3756
3757 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
3758 writeLong(f, min, target); /* low_pc */
3759 writeLong(f, max, target); /* high_pc */
3760 writeLong(f, numBuckets, target); /* # of buckets */
3761 writeLong(f, 100, target); /* KLUDGE! We lie, ca. 100Hz best case. */
3762 writeString(f, "seconds");
3763 for (i = 0; i < (15-strlen("seconds")); i++)
3764 writeData(f, &zero, 1);
3765 writeString(f, "s");
3766
3767 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
3768
3769 char *data = malloc(2 * numBuckets);
3770 if (data != NULL) {
3771 for (i = 0; i < numBuckets; i++) {
3772 int val;
3773 val = buckets[i];
3774 if (val > 65535)
3775 val = 65535;
3776 data[i * 2] = val&0xff;
3777 data[i * 2 + 1] = (val >> 8) & 0xff;
3778 }
3779 free(buckets);
3780 writeData(f, data, numBuckets * 2);
3781 free(data);
3782 } else
3783 free(buckets);
3784
3785 fclose(f);
3786 }
3787
3788 /* profiling samples the CPU PC as quickly as OpenOCD is able,
3789 * which will be used as a random sampling of PC */
3790 COMMAND_HANDLER(handle_profile_command)
3791 {
3792 struct target *target = get_current_target(CMD_CTX);
3793
3794 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
3795 return ERROR_COMMAND_SYNTAX_ERROR;
3796
3797 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
3798 uint32_t offset;
3799 uint32_t num_of_samples;
3800 int retval = ERROR_OK;
3801
3802 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
3803
3804 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
3805 if (samples == NULL) {
3806 LOG_ERROR("No memory to store samples.");
3807 return ERROR_FAIL;
3808 }
3809
3810 /**
3811 * Some cores let us sample the PC without the
3812 * annoying halt/resume step; for example, ARMv7 PCSR.
3813 * Provide a way to use that more efficient mechanism.
3814 */
3815 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
3816 &num_of_samples, offset);
3817 if (retval != ERROR_OK) {
3818 free(samples);
3819 return retval;
3820 }
3821
3822 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
3823
3824 retval = target_poll(target);
3825 if (retval != ERROR_OK) {
3826 free(samples);
3827 return retval;
3828 }
3829 if (target->state == TARGET_RUNNING) {
3830 retval = target_halt(target);
3831 if (retval != ERROR_OK) {
3832 free(samples);
3833 return retval;
3834 }
3835 }
3836
3837 retval = target_poll(target);
3838 if (retval != ERROR_OK) {
3839 free(samples);
3840 return retval;
3841 }
3842
3843 uint32_t start_address = 0;
3844 uint32_t end_address = 0;
3845 bool with_range = false;
3846 if (CMD_ARGC == 4) {
3847 with_range = true;
3848 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
3849 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
3850 }
3851
3852 write_gmon(samples, num_of_samples, CMD_ARGV[1],
3853 with_range, start_address, end_address, target);
3854 command_print(CMD_CTX, "Wrote %s", CMD_ARGV[1]);
3855
3856 free(samples);
3857 return retval;
3858 }
3859
3860 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
3861 {
3862 char *namebuf;
3863 Jim_Obj *nameObjPtr, *valObjPtr;
3864 int result;
3865
3866 namebuf = alloc_printf("%s(%d)", varname, idx);
3867 if (!namebuf)
3868 return JIM_ERR;
3869
3870 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
3871 valObjPtr = Jim_NewIntObj(interp, val);
3872 if (!nameObjPtr || !valObjPtr) {
3873 free(namebuf);
3874 return JIM_ERR;
3875 }
3876
3877 Jim_IncrRefCount(nameObjPtr);
3878 Jim_IncrRefCount(valObjPtr);
3879 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
3880 Jim_DecrRefCount(interp, nameObjPtr);
3881 Jim_DecrRefCount(interp, valObjPtr);
3882 free(namebuf);
3883 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
3884 return result;
3885 }
3886
3887 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
3888 {
3889 struct command_context *context;
3890 struct target *target;
3891
3892 context = current_command_context(interp);
3893 assert(context != NULL);
3894
3895 target = get_current_target(context);
3896 if (target == NULL) {
3897 LOG_ERROR("mem2array: no current target");
3898 return JIM_ERR;
3899 }
3900
3901 return target_mem2array(interp, target, argc - 1, argv + 1);
3902 }
3903
3904 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
3905 {
3906 long l;
3907 uint32_t width;
3908 int len;
3909 uint32_t addr;
3910 uint32_t count;
3911 uint32_t v;
3912 const char *varname;
3913 int n, e, retval;
3914 uint32_t i;
3915
3916 /* argv[1] = name of array to receive the data
3917 * argv[2] = desired width
3918 * argv[3] = memory address
3919 * argv[4] = count of times to read
3920 */
3921 if (argc != 4) {
3922 Jim_WrongNumArgs(interp, 1, argv, "varname width addr nelems");
3923 return JIM_ERR;
3924 }
3925 varname = Jim_GetString(argv[0], &len);
3926 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
3927
3928 e = Jim_GetLong(interp, argv[1], &l);
3929 width = l;
3930 if (e != JIM_OK)
3931 return e;
3932
3933 e = Jim_GetLong(interp, argv[2], &l);
3934 addr = l;
3935 if (e != JIM_OK)
3936 return e;
3937 e = Jim_GetLong(interp, argv[3], &l);
3938 len = l;
3939 if (e != JIM_OK)
3940 return e;
3941 switch (width) {
3942 case 8:
3943 width = 1;
3944 break;
3945 case 16:
3946 width = 2;
3947 break;
3948 case 32:
3949 width = 4;
3950 break;
3951 default:
3952 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3953 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
3954 return JIM_ERR;
3955 }
3956 if (len == 0) {
3957 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3958 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
3959 return JIM_ERR;
3960 }
3961 if ((addr + (len * width)) < addr) {
3962 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3963 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
3964 return JIM_ERR;
3965 }
3966 /* absurd transfer size? */
3967 if (len > 65536) {
3968 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3969 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
3970 return JIM_ERR;
3971 }
3972
3973 if ((width == 1) ||
3974 ((width == 2) && ((addr & 1) == 0)) ||
3975 ((width == 4) && ((addr & 3) == 0))) {
3976 /* all is well */
3977 } else {
3978 char buf[100];
3979 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
3980 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
3981 addr,
3982 width);
3983 Jim_AppendStrings(interp, Jim_GetResult(interp), buf , NULL);
3984 return JIM_ERR;
3985 }
3986
3987 /* Transfer loop */
3988
3989 /* index counter */
3990 n = 0;
3991
3992 size_t buffersize = 4096;
3993 uint8_t *buffer = malloc(buffersize);
3994 if (buffer == NULL)
3995 return JIM_ERR;
3996
3997 /* assume ok */
3998 e = JIM_OK;
3999 while (len) {
4000 /* Slurp... in buffer size chunks */
4001
4002 count = len; /* in objects.. */
4003 if (count > (buffersize / width))
4004 count = (buffersize / width);
4005
4006 retval = target_read_memory(target, addr, width, count, buffer);
4007 if (retval != ERROR_OK) {
4008 /* BOO !*/
4009 LOG_ERROR("mem2array: Read @ 0x%08x, w=%d, cnt=%d, failed",
4010 (unsigned int)addr,
4011 (int)width,
4012 (int)count);
4013 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4014 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4015 e = JIM_ERR;
4016 break;
4017 } else {
4018 v = 0; /* shut up gcc */
4019 for (i = 0; i < count ; i++, n++) {
4020 switch (width) {
4021 case 4:
4022 v = target_buffer_get_u32(target, &buffer[i*width]);
4023 break;
4024 case 2:
4025 v = target_buffer_get_u16(target, &buffer[i*width]);
4026 break;
4027 case 1:
4028 v = buffer[i] & 0x0ff;
4029 break;
4030 }
4031 new_int_array_element(interp, varname, n, v);
4032 }
4033 len -= count;
4034 addr += count * width;
4035 }
4036 }
4037
4038 free(buffer);
4039
4040 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4041
4042 return e;
4043 }
4044
4045 static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
4046 {
4047 char *namebuf;
4048 Jim_Obj *nameObjPtr, *valObjPtr;
4049 int result;
4050 long l;
4051
4052 namebuf = alloc_printf("%s(%d)", varname, idx);
4053 if (!namebuf)
4054 return JIM_ERR;
4055
4056 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4057 if (!nameObjPtr) {
4058 free(namebuf);
4059 return JIM_ERR;
4060 }
4061
4062 Jim_IncrRefCount(nameObjPtr);
4063 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
4064 Jim_DecrRefCount(interp, nameObjPtr);
4065 free(namebuf);
4066 if (valObjPtr == NULL)
4067 return JIM_ERR;
4068
4069 result = Jim_GetLong(interp, valObjPtr, &l);
4070 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
4071 *val = l;
4072 return result;
4073 }
4074
4075 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4076 {
4077 struct command_context *context;
4078 struct target *target;
4079
4080 context = current_command_context(interp);
4081 assert(context != NULL);
4082
4083 target = get_current_target(context);
4084 if (target == NULL) {
4085 LOG_ERROR("array2mem: no current target");
4086 return JIM_ERR;
4087 }
4088
4089 return target_array2mem(interp, target, argc-1, argv + 1);
4090 }
4091
4092 static int target_array2mem(Jim_Interp *interp, struct target *target,
4093 int argc, Jim_Obj *const *argv)
4094 {
4095 long l;
4096 uint32_t width;
4097 int len;
4098 uint32_t addr;
4099 uint32_t count;
4100 uint32_t v;
4101 const char *varname;
4102 int n, e, retval;
4103 uint32_t i;
4104
4105 /* argv[1] = name of array to get the data
4106 * argv[2] = desired width
4107 * argv[3] = memory address
4108 * argv[4] = count to write
4109 */
4110 if (argc != 4) {
4111 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems");
4112 return JIM_ERR;
4113 }
4114 varname = Jim_GetString(argv[0], &len);
4115 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4116
4117 e = Jim_GetLong(interp, argv[1], &l);
4118 width = l;
4119 if (e != JIM_OK)
4120 return e;
4121
4122 e = Jim_GetLong(interp, argv[2], &l);
4123 addr = l;
4124 if (e != JIM_OK)
4125 return e;
4126 e = Jim_GetLong(interp, argv[3], &l);
4127 len = l;
4128 if (e != JIM_OK)
4129 return e;
4130 switch (width) {
4131 case 8:
4132 width = 1;
4133 break;
4134 case 16:
4135 width = 2;
4136 break;
4137 case 32:
4138 width = 4;
4139 break;
4140 default:
4141 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4142 Jim_AppendStrings(interp, Jim_GetResult(interp),
4143 "Invalid width param, must be 8/16/32", NULL);
4144 return JIM_ERR;
4145 }
4146 if (len == 0) {
4147 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4148 Jim_AppendStrings(interp, Jim_GetResult(interp),
4149 "array2mem: zero width read?", NULL);
4150 return JIM_ERR;
4151 }
4152 if ((addr + (len * width)) < addr) {
4153 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4154 Jim_AppendStrings(interp, Jim_GetResult(interp),
4155 "array2mem: addr + len - wraps to zero?", NULL);
4156 return JIM_ERR;
4157 }
4158 /* absurd transfer size? */
4159 if (len > 65536) {
4160 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4161 Jim_AppendStrings(interp, Jim_GetResult(interp),
4162 "array2mem: absurd > 64K item request", NULL);
4163 return JIM_ERR;
4164 }
4165
4166 if ((width == 1) ||
4167 ((width == 2) && ((addr & 1) == 0)) ||
4168 ((width == 4) && ((addr & 3) == 0))) {
4169 /* all is well */
4170 } else {
4171 char buf[100];
4172 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4173 sprintf(buf, "array2mem address: 0x%08x is not aligned for %d byte reads",
4174 (unsigned int)addr,
4175 (int)width);
4176 Jim_AppendStrings(interp, Jim_GetResult(interp), buf , NULL);
4177 return JIM_ERR;
4178 }
4179
4180 /* Transfer loop */
4181
4182 /* index counter */
4183 n = 0;
4184 /* assume ok */
4185 e = JIM_OK;
4186
4187 size_t buffersize = 4096;
4188 uint8_t *buffer = malloc(buffersize);
4189 if (buffer == NULL)
4190 return JIM_ERR;
4191
4192 while (len) {
4193 /* Slurp... in buffer size chunks */
4194
4195 count = len; /* in objects.. */
4196 if (count > (buffersize / width))
4197 count = (buffersize / width);
4198
4199 v = 0; /* shut up gcc */
4200 for (i = 0; i < count; i++, n++) {
4201 get_int_array_element(interp, varname, n, &v);
4202 switch (width) {
4203 case 4:
4204 target_buffer_set_u32(target, &buffer[i * width], v);
4205 break;
4206 case 2:
4207 target_buffer_set_u16(target, &buffer[i * width], v);
4208 break;
4209 case 1:
4210 buffer[i] = v & 0x0ff;
4211 break;
4212 }
4213 }
4214 len -= count;
4215
4216 retval = target_write_memory(target, addr, width, count, buffer);
4217 if (retval != ERROR_OK) {
4218 /* BOO !*/
4219 LOG_ERROR("array2mem: Write @ 0x%08x, w=%d, cnt=%d, failed",
4220 (unsigned int)addr,
4221 (int)width,
4222 (int)count);
4223 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4224 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4225 e = JIM_ERR;
4226 break;
4227 }
4228 addr += count * width;
4229 }
4230
4231 free(buffer);
4232
4233 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4234
4235 return e;
4236 }
4237
4238 /* FIX? should we propagate errors here rather than printing them
4239 * and continuing?
4240 */
4241 void target_handle_event(struct target *target, enum target_event e)
4242 {
4243 struct target_event_action *teap;
4244
4245 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4246 if (teap->event == e) {
4247 LOG_DEBUG("target: (%d) %s (%s) event: %d (%s) action: %s",
4248 target->target_number,
4249 target_name(target),
4250 target_type_name(target),
4251 e,
4252 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4253 Jim_GetString(teap->body, NULL));
4254 if (Jim_EvalObj(teap->interp, teap->body) != JIM_OK) {
4255 Jim_MakeErrorMessage(teap->interp);
4256 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(teap->interp), NULL));
4257 }
4258 }
4259 }
4260 }
4261
4262 /**
4263 * Returns true only if the target has a handler for the specified event.
4264 */
4265 bool target_has_event_action(struct target *target, enum target_event event)
4266 {
4267 struct target_event_action *teap;
4268
4269 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4270 if (teap->event == event)
4271 return true;
4272 }
4273 return false;
4274 }
4275
4276 enum target_cfg_param {
4277 TCFG_TYPE,
4278 TCFG_EVENT,
4279 TCFG_WORK_AREA_VIRT,
4280 TCFG_WORK_AREA_PHYS,
4281 TCFG_WORK_AREA_SIZE,
4282 TCFG_WORK_AREA_BACKUP,
4283 TCFG_ENDIAN,
4284 TCFG_COREID,
4285 TCFG_CHAIN_POSITION,
4286 TCFG_DBGBASE,
4287 TCFG_RTOS,
4288 };
4289
4290 static Jim_Nvp nvp_config_opts[] = {
4291 { .name = "-type", .value = TCFG_TYPE },
4292 { .name = "-event", .value = TCFG_EVENT },
4293 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4294 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4295 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4296 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4297 { .name = "-endian" , .value = TCFG_ENDIAN },
4298 { .name = "-coreid", .value = TCFG_COREID },
4299 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4300 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4301 { .name = "-rtos", .value = TCFG_RTOS },
4302 { .name = NULL, .value = -1 }
4303 };
4304
4305 static int target_configure(Jim_GetOptInfo *goi, struct target *target)
4306 {
4307 Jim_Nvp *n;
4308 Jim_Obj *o;
4309 jim_wide w;
4310 int e;
4311
4312 /* parse config or cget options ... */
4313 while (goi->argc > 0) {
4314 Jim_SetEmptyResult(goi->interp);
4315 /* Jim_GetOpt_Debug(goi); */
4316
4317 if (target->type->target_jim_configure) {
4318 /* target defines a configure function */
4319 /* target gets first dibs on parameters */
4320 e = (*(target->type->target_jim_configure))(target, goi);
4321 if (e == JIM_OK) {
4322 /* more? */
4323 continue;
4324 }
4325 if (e == JIM_ERR) {
4326 /* An error */
4327 return e;
4328 }
4329 /* otherwise we 'continue' below */
4330 }
4331 e = Jim_GetOpt_Nvp(goi, nvp_config_opts, &n);
4332 if (e != JIM_OK) {
4333 Jim_GetOpt_NvpUnknown(goi, nvp_config_opts, 0);
4334 return e;
4335 }
4336 switch (n->value) {
4337 case TCFG_TYPE:
4338 /* not setable */
4339 if (goi->isconfigure) {
4340 Jim_SetResultFormatted(goi->interp,
4341 "not settable: %s", n->name);
4342 return JIM_ERR;
4343 } else {
4344 no_params:
4345 if (goi->argc != 0) {
4346 Jim_WrongNumArgs(goi->interp,
4347 goi->argc, goi->argv,
4348 "NO PARAMS");
4349 return JIM_ERR;
4350 }
4351 }
4352 Jim_SetResultString(goi->interp,
4353 target_type_name(target), -1);
4354 /* loop for more */
4355 break;
4356 case TCFG_EVENT:
4357 if (goi->argc == 0) {
4358 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4359 return JIM_ERR;
4360 }
4361
4362 e = Jim_GetOpt_Nvp(goi, nvp_target_event, &n);
4363 if (e != JIM_OK) {
4364 Jim_GetOpt_NvpUnknown(goi, nvp_target_event, 1);
4365 return e;
4366 }
4367
4368 if (goi->isconfigure) {
4369 if (goi->argc != 1) {
4370 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4371 return JIM_ERR;
4372 }
4373 } else {
4374 if (goi->argc != 0) {
4375 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4376 return JIM_ERR;
4377 }
4378 }
4379
4380 {
4381 struct target_event_action *teap;
4382
4383 teap = target->event_action;
4384 /* replace existing? */
4385 while (teap) {
4386 if (teap->event == (enum target_event)n->value)
4387 break;
4388 teap = teap->next;
4389 }
4390
4391 if (goi->isconfigure) {
4392 bool replace = true;
4393 if (teap == NULL) {
4394 /* create new */
4395 teap = calloc(1, sizeof(*teap));
4396 replace = false;
4397 }
4398 teap->event = n->value;
4399 teap->interp = goi->interp;
4400 Jim_GetOpt_Obj(goi, &o);
4401 if (teap->body)
4402 Jim_DecrRefCount(teap->interp, teap->body);
4403 teap->body = Jim_DuplicateObj(goi->interp, o);
4404 /*
4405 * FIXME:
4406 * Tcl/TK - "tk events" have a nice feature.
4407 * See the "BIND" command.
4408 * We should support that here.
4409 * You can specify %X and %Y in the event code.
4410 * The idea is: %T - target name.
4411 * The idea is: %N - target number
4412 * The idea is: %E - event name.
4413 */
4414 Jim_IncrRefCount(teap->body);
4415
4416 if (!replace) {
4417 /* add to head of event list */
4418 teap->next = target->event_action;
4419 target->event_action = teap;
4420 }
4421 Jim_SetEmptyResult(goi->interp);
4422 } else {
4423 /* get */
4424 if (teap == NULL)
4425 Jim_SetEmptyResult(goi->interp);
4426 else
4427 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
4428 }
4429 }
4430 /* loop for more */
4431 break;
4432
4433 case TCFG_WORK_AREA_VIRT:
4434 if (goi->isconfigure) {
4435 target_free_all_working_areas(target);
4436 e = Jim_GetOpt_Wide(goi, &w);
4437 if (e != JIM_OK)
4438 return e;
4439 target->working_area_virt = w;
4440 target->working_area_virt_spec = true;
4441 } else {
4442 if (goi->argc != 0)
4443 goto no_params;
4444 }
4445 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
4446 /* loop for more */
4447 break;
4448
4449 case TCFG_WORK_AREA_PHYS:
4450 if (goi->isconfigure) {
4451 target_free_all_working_areas(target);
4452 e = Jim_GetOpt_Wide(goi, &w);
4453 if (e != JIM_OK)
4454 return e;
4455 target->working_area_phys = w;
4456 target->working_area_phys_spec = true;
4457 } else {
4458 if (goi->argc != 0)
4459 goto no_params;
4460 }
4461 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
4462 /* loop for more */
4463 break;
4464
4465 case TCFG_WORK_AREA_SIZE:
4466 if (goi->isconfigure) {
4467 target_free_all_working_areas(target);
4468 e = Jim_GetOpt_Wide(goi, &w);
4469 if (e != JIM_OK)
4470 return e;
4471 target->working_area_size = w;
4472 } else {
4473 if (goi->argc != 0)
4474 goto no_params;
4475 }
4476 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4477 /* loop for more */
4478 break;
4479
4480 case TCFG_WORK_AREA_BACKUP:
4481 if (goi->isconfigure) {
4482 target_free_all_working_areas(target);
4483 e = Jim_GetOpt_Wide(goi, &w);
4484 if (e != JIM_OK)
4485 return e;
4486 /* make this exactly 1 or 0 */
4487 target->backup_working_area = (!!w);
4488 } else {
4489 if (goi->argc != 0)
4490 goto no_params;
4491 }
4492 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
4493 /* loop for more e*/
4494 break;
4495
4496
4497 case TCFG_ENDIAN:
4498 if (goi->isconfigure) {
4499 e = Jim_GetOpt_Nvp(goi, nvp_target_endian, &n);
4500 if (e != JIM_OK) {
4501 Jim_GetOpt_NvpUnknown(goi, nvp_target_endian, 1);
4502 return e;
4503 }
4504 target->endianness = n->value;
4505 } else {
4506 if (goi->argc != 0)
4507 goto no_params;
4508 }
4509 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4510 if (n->name == NULL) {
4511 target->endianness = TARGET_LITTLE_ENDIAN;
4512 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4513 }
4514 Jim_SetResultString(goi->interp, n->name, -1);
4515 /* loop for more */
4516 break;
4517
4518 case TCFG_COREID:
4519 if (goi->isconfigure) {
4520 e = Jim_GetOpt_Wide(goi, &w);
4521 if (e != JIM_OK)
4522 return e;
4523 target->coreid = (int32_t)w;
4524 } else {
4525 if (goi->argc != 0)
4526 goto no_params;
4527 }
4528 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4529 /* loop for more */
4530 break;
4531
4532 case TCFG_CHAIN_POSITION:
4533 if (goi->isconfigure) {
4534 Jim_Obj *o_t;
4535 struct jtag_tap *tap;
4536 target_free_all_working_areas(target);
4537 e = Jim_GetOpt_Obj(goi, &o_t);
4538 if (e != JIM_OK)
4539 return e;
4540 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
4541 if (tap == NULL)
4542 return JIM_ERR;
4543 /* make this exactly 1 or 0 */
4544 target->tap = tap;
4545 } else {
4546 if (goi->argc != 0)
4547 goto no_params;
4548 }
4549 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
4550 /* loop for more e*/
4551 break;
4552 case TCFG_DBGBASE:
4553 if (goi->isconfigure) {
4554 e = Jim_GetOpt_Wide(goi, &w);
4555 if (e != JIM_OK)
4556 return e;
4557 target->dbgbase = (uint32_t)w;
4558 target->dbgbase_set = true;
4559 } else {
4560 if (goi->argc != 0)
4561 goto no_params;
4562 }
4563 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
4564 /* loop for more */
4565 break;
4566
4567 case TCFG_RTOS:
4568 /* RTOS */
4569 {
4570 int result = rtos_create(goi, target);
4571 if (result != JIM_OK)
4572 return result;
4573 }
4574 /* loop for more */
4575 break;
4576 }
4577 } /* while (goi->argc) */
4578
4579
4580 /* done - we return */
4581 return JIM_OK;
4582 }
4583
4584 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
4585 {
4586 Jim_GetOptInfo goi;
4587
4588 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4589 goi.isconfigure = !strcmp(Jim_GetString(argv[0], NULL), "configure");
4590 int need_args = 1 + goi.isconfigure;
4591 if (goi.argc < need_args) {
4592 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
4593 goi.isconfigure
4594 ? "missing: -option VALUE ..."
4595 : "missing: -option ...");
4596 return JIM_ERR;
4597 }
4598 struct target *target = Jim_CmdPrivData(goi.interp);
4599 return target_configure(&goi, target);
4600 }
4601
4602 static int jim_target_mw(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4603 {
4604 const char *cmd_name = Jim_GetString(argv[0], NULL);
4605
4606 Jim_GetOptInfo goi;
4607 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4608
4609 if (goi.argc < 2 || goi.argc > 4) {
4610 Jim_SetResultFormatted(goi.interp,
4611 "usage: %s [phys] <address> <data> [<count>]", cmd_name);
4612 return JIM_ERR;
4613 }
4614
4615 target_write_fn fn;
4616 fn = target_write_memory;
4617
4618 int e;
4619 if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
4620 /* consume it */
4621 struct Jim_Obj *obj;
4622 e = Jim_GetOpt_Obj(&goi, &obj);
4623 if (e != JIM_OK)
4624 return e;
4625
4626 fn = target_write_phys_memory;
4627 }
4628
4629 jim_wide a;
4630 e = Jim_GetOpt_Wide(&goi, &a);
4631 if (e != JIM_OK)
4632 return e;
4633
4634 jim_wide b;
4635 e = Jim_GetOpt_Wide(&goi, &b);
4636 if (e != JIM_OK)
4637 return e;
4638
4639 jim_wide c = 1;
4640 if (goi.argc == 1) {
4641 e = Jim_GetOpt_Wide(&goi, &c);
4642 if (e != JIM_OK)
4643 return e;
4644 }
4645
4646 /* all args must be consumed */
4647 if (goi.argc != 0)
4648 return JIM_ERR;
4649
4650 struct target *target = Jim_CmdPrivData(goi.interp);
4651 unsigned data_size;
4652 if (strcasecmp(cmd_name, "mww") == 0)
4653 data_size = 4;
4654 else if (strcasecmp(cmd_name, "mwh") == 0)
4655 data_size = 2;
4656 else if (strcasecmp(cmd_name, "mwb") == 0)
4657 data_size = 1;
4658 else {
4659 LOG_ERROR("command '%s' unknown: ", cmd_name);
4660 return JIM_ERR;
4661 }
4662
4663 return (target_fill_mem(target, a, fn, data_size, b, c) == ERROR_OK) ? JIM_OK : JIM_ERR;
4664 }
4665
4666 /**
4667 * @brief Reads an array of words/halfwords/bytes from target memory starting at specified address.
4668 *
4669 * Usage: mdw [phys] <address> [<count>] - for 32 bit reads
4670 * mdh [phys] <address> [<count>] - for 16 bit reads
4671 * mdb [phys] <address> [<count>] - for 8 bit reads
4672 *
4673 * Count defaults to 1.
4674 *
4675 * Calls target_read_memory or target_read_phys_memory depending on
4676 * the presence of the "phys" argument
4677 * Reads the target memory in blocks of max. 32 bytes, and returns an array of ints formatted
4678 * to int representation in base16.
4679 * Also outputs read data in a human readable form using command_print
4680 *
4681 * @param phys if present target_read_phys_memory will be used instead of target_read_memory
4682 * @param address address where to start the read. May be specified in decimal or hex using the standard "0x" prefix
4683 * @param count optional count parameter to read an array of values. If not specified, defaults to 1.
4684 * @returns: JIM_ERR on error or JIM_OK on success and sets the result string to an array of ascii formatted numbers
4685 * on success, with [<count>] number of elements.
4686 *
4687 * In case of little endian target:
4688 * Example1: "mdw 0x00000000" returns "10123456"
4689 * Exmaple2: "mdh 0x00000000 1" returns "3456"
4690 * Example3: "mdb 0x00000000" returns "56"
4691 * Example4: "mdh 0x00000000 2" returns "3456 1012"
4692 * Example5: "mdb 0x00000000 3" returns "56 34 12"
4693 **/
4694 static int jim_target_md(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4695 {
4696 const char *cmd_name = Jim_GetString(argv[0], NULL);
4697
4698 Jim_GetOptInfo goi;
4699 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4700
4701 if ((goi.argc < 1) || (goi.argc > 3)) {
4702 Jim_SetResultFormatted(goi.interp,
4703 "usage: %s [phys] <address> [<count>]", cmd_name);
4704 return JIM_ERR;
4705 }
4706
4707 int (*fn)(struct target *target,
4708 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
4709 fn = target_read_memory;
4710
4711 int e;
4712 if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
4713 /* consume it */
4714 struct Jim_Obj *obj;
4715 e = Jim_GetOpt_Obj(&goi, &obj);
4716 if (e != JIM_OK)
4717 return e;
4718
4719 fn = target_read_phys_memory;
4720 }
4721
4722 /* Read address parameter */
4723 jim_wide addr;
4724 e = Jim_GetOpt_Wide(&goi, &addr);
4725 if (e != JIM_OK)
4726 return JIM_ERR;
4727
4728 /* If next parameter exists, read it out as the count parameter, if not, set it to 1 (default) */
4729 jim_wide count;
4730 if (goi.argc == 1) {
4731 e = Jim_GetOpt_Wide(&goi, &count);
4732 if (e != JIM_OK)
4733 return JIM_ERR;
4734 } else
4735 count = 1;
4736
4737 /* all args must be consumed */
4738 if (goi.argc != 0)
4739 return JIM_ERR;
4740
4741 jim_wide dwidth = 1; /* shut up gcc */
4742 if (strcasecmp(cmd_name, "mdw") == 0)
4743 dwidth = 4;
4744 else if (strcasecmp(cmd_name, "mdh") == 0)
4745 dwidth = 2;
4746 else if (strcasecmp(cmd_name, "mdb") == 0)
4747 dwidth = 1;
4748 else {
4749 LOG_ERROR("command '%s' unknown: ", cmd_name);
4750 return JIM_ERR;
4751 }
4752
4753 /* convert count to "bytes" */
4754 int bytes = count * dwidth;
4755
4756 struct target *target = Jim_CmdPrivData(goi.interp);
4757 uint8_t target_buf[32];
4758 jim_wide x, y, z;
4759 while (bytes > 0) {
4760 y = (bytes < 16) ? bytes : 16; /* y = min(bytes, 16); */
4761
4762 /* Try to read out next block */
4763 e = fn(target, addr, dwidth, y / dwidth, target_buf);
4764
4765 if (e != ERROR_OK) {
4766 Jim_SetResultFormatted(interp, "error reading target @ 0x%08lx", (long)addr);
4767 return JIM_ERR;
4768 }
4769
4770 command_print_sameline(NULL, "0x%08x ", (int)(addr));
4771 switch (dwidth) {
4772 case 4:
4773 for (x = 0; x < 16 && x < y; x += 4) {
4774 z = target_buffer_get_u32(target, &(target_buf[x]));
4775 command_print_sameline(NULL, "%08x ", (int)(z));
4776 }
4777 for (; (x < 16) ; x += 4)
4778 command_print_sameline(NULL, " ");
4779 break;
4780 case 2:
4781 for (x = 0; x < 16 && x < y; x += 2) {
4782 z = target_buffer_get_u16(target, &(target_buf[x]));
4783 command_print_sameline(NULL, "%04x ", (int)(z));
4784 }
4785 for (; (x < 16) ; x += 2)
4786 command_print_sameline(NULL, " ");
4787 break;
4788 case 1:
4789 default:
4790 for (x = 0 ; (x < 16) && (x < y) ; x += 1) {
4791 z = target_buffer_get_u8(target, &(target_buf[x]));
4792 command_print_sameline(NULL, "%02x ", (int)(z));
4793 }
4794 for (; (x < 16) ; x += 1)
4795 command_print_sameline(NULL, " ");
4796 break;
4797 }
4798 /* ascii-ify the bytes */
4799 for (x = 0 ; x < y ; x++) {
4800 if ((target_buf[x] >= 0x20) &&
4801 (target_buf[x] <= 0x7e)) {
4802 /* good */
4803 } else {
4804 /* smack it */
4805 target_buf[x] = '.';
4806 }
4807 }
4808 /* space pad */
4809 while (x < 16) {
4810 target_buf[x] = ' ';
4811 x++;
4812 }
4813 /* terminate */
4814 target_buf[16] = 0;
4815 /* print - with a newline */
4816 command_print_sameline(NULL, "%s\n", target_buf);
4817 /* NEXT... */
4818 bytes -= 16;
4819 addr += 16;
4820 }
4821 return JIM_OK;
4822 }
4823
4824 static int jim_target_mem2array(Jim_Interp *interp,
4825 int argc, Jim_Obj *const *argv)
4826 {
4827 struct target *target = Jim_CmdPrivData(interp);
4828 return target_mem2array(interp, target, argc - 1, argv + 1);
4829 }
4830
4831 static int jim_target_array2mem(Jim_Interp *interp,
4832 int argc, Jim_Obj *const *argv)
4833 {
4834 struct target *target = Jim_CmdPrivData(interp);
4835 return target_array2mem(interp, target, argc - 1, argv + 1);
4836 }
4837
4838 static int jim_target_tap_disabled(Jim_Interp *interp)
4839 {
4840 Jim_SetResultFormatted(interp, "[TAP is disabled]");
4841 return JIM_ERR;
4842 }
4843
4844 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4845 {
4846 if (argc != 1) {
4847 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
4848 return JIM_ERR;
4849 }
4850 struct target *target = Jim_CmdPrivData(interp);
4851 if (!target->tap->enabled)
4852 return jim_target_tap_disabled(interp);
4853
4854 int e = target->type->examine(target);
4855 if (e != ERROR_OK)
4856 return JIM_ERR;
4857 return JIM_OK;
4858 }
4859
4860 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4861 {
4862 if (argc != 1) {
4863 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
4864 return JIM_ERR;
4865 }
4866 struct target *target = Jim_CmdPrivData(interp);
4867
4868 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
4869 return JIM_ERR;
4870
4871 return JIM_OK;
4872 }
4873
4874 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4875 {
4876 if (argc != 1) {
4877 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
4878 return JIM_ERR;
4879 }
4880 struct target *target = Jim_CmdPrivData(interp);
4881 if (!target->tap->enabled)
4882 return jim_target_tap_disabled(interp);
4883
4884 int e;
4885 if (!(target_was_examined(target)))
4886 e = ERROR_TARGET_NOT_EXAMINED;
4887 else
4888 e = target->type->poll(target);
4889 if (e != ERROR_OK)
4890 return JIM_ERR;
4891 return JIM_OK;
4892 }
4893
4894 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4895 {
4896 Jim_GetOptInfo goi;
4897 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4898
4899 if (goi.argc != 2) {
4900 Jim_WrongNumArgs(interp, 0, argv,
4901 "([tT]|[fF]|assert|deassert) BOOL");
4902 return JIM_ERR;
4903 }
4904
4905 Jim_Nvp *n;
4906 int e = Jim_GetOpt_Nvp(&goi, nvp_assert, &n);
4907 if (e != JIM_OK) {
4908 Jim_GetOpt_NvpUnknown(&goi, nvp_assert, 1);
4909 return e;
4910 }
4911 /* the halt or not param */
4912 jim_wide a;
4913 e = Jim_GetOpt_Wide(&goi, &a);
4914 if (e != JIM_OK)
4915 return e;
4916
4917 struct target *target = Jim_CmdPrivData(goi.interp);
4918 if (!target->tap->enabled)
4919 return jim_target_tap_disabled(interp);
4920 if (!(target_was_examined(target))) {
4921 LOG_ERROR("Target not examined yet");
4922 return ERROR_TARGET_NOT_EXAMINED;
4923 }
4924 if (!target->type->assert_reset || !target->type->deassert_reset) {
4925 Jim_SetResultFormatted(interp,
4926 "No target-specific reset for %s",
4927 target_name(target));
4928 return JIM_ERR;
4929 }
4930 /* determine if we should halt or not. */
4931 target->reset_halt = !!a;
4932 /* When this happens - all workareas are invalid. */
4933 target_free_all_working_areas_restore(target, 0);
4934
4935 /* do the assert */
4936 if (n->value == NVP_ASSERT)
4937 e = target->type->assert_reset(target);
4938 else
4939 e = target->type->deassert_reset(target);
4940 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
4941 }
4942
4943 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4944 {
4945 if (argc != 1) {
4946 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
4947 return JIM_ERR;
4948 }
4949 struct target *target = Jim_CmdPrivData(interp);
4950 if (!target->tap->enabled)
4951 return jim_target_tap_disabled(interp);
4952 int e = target->type->halt(target);
4953 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
4954 }
4955
4956 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4957 {
4958 Jim_GetOptInfo goi;
4959 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4960
4961 /* params: <name> statename timeoutmsecs */
4962 if (goi.argc != 2) {
4963 const char *cmd_name = Jim_GetString(argv[0], NULL);
4964 Jim_SetResultFormatted(goi.interp,
4965 "%s <state_name> <timeout_in_msec>", cmd_name);
4966 return JIM_ERR;
4967 }
4968
4969 Jim_Nvp *n;
4970 int e = Jim_GetOpt_Nvp(&goi, nvp_target_state, &n);
4971 if (e != JIM_OK) {
4972 Jim_GetOpt_NvpUnknown(&goi, nvp_target_state, 1);
4973 return e;
4974 }
4975 jim_wide a;
4976 e = Jim_GetOpt_Wide(&goi, &a);
4977 if (e != JIM_OK)
4978 return e;
4979 struct target *target = Jim_CmdPrivData(interp);
4980 if (!target->tap->enabled)
4981 return jim_target_tap_disabled(interp);
4982
4983 e = target_wait_state(target, n->value, a);
4984 if (e != ERROR_OK) {
4985 Jim_Obj *eObj = Jim_NewIntObj(interp, e);
4986 Jim_SetResultFormatted(goi.interp,
4987 "target: %s wait %s fails (%#s) %s",
4988 target_name(target), n->name,
4989 eObj, target_strerror_safe(e));
4990 Jim_FreeNewObj(interp, eObj);
4991 return JIM_ERR;
4992 }
4993 return JIM_OK;
4994 }
4995 /* List for human, Events defined for this target.
4996 * scripts/programs should use 'name cget -event NAME'
4997 */
4998 static int jim_target_event_list(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4999 {
5000 struct command_context *cmd_ctx = current_command_context(interp);
5001 assert(cmd_ctx != NULL);
5002
5003 struct target *target = Jim_CmdPrivData(interp);
5004 struct target_event_action *teap = target->event_action;
5005 command_print(cmd_ctx, "Event actions for target (%d) %s\n",
5006 target->target_number,
5007 target_name(target));
5008 command_print(cmd_ctx, "%-25s | Body", "Event");
5009 command_print(cmd_ctx, "------------------------- | "
5010 "----------------------------------------");
5011 while (teap) {
5012 Jim_Nvp *opt = Jim_Nvp_value2name_simple(nvp_target_event, teap->event);
5013 command_print(cmd_ctx, "%-25s | %s",
5014 opt->name, Jim_GetString(teap->body, NULL));
5015 teap = teap->next;
5016 }
5017 command_print(cmd_ctx, "***END***");
5018 return JIM_OK;
5019 }
5020 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5021 {
5022 if (argc != 1) {
5023 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5024 return JIM_ERR;
5025 }
5026 struct target *target = Jim_CmdPrivData(interp);
5027 Jim_SetResultString(interp, target_state_name(target), -1);
5028 return JIM_OK;
5029 }
5030 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5031 {
5032 Jim_GetOptInfo goi;
5033 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5034 if (goi.argc != 1) {
5035 const char *cmd_name = Jim_GetString(argv[0], NULL);
5036 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5037 return JIM_ERR;
5038 }
5039 Jim_Nvp *n;
5040 int e = Jim_GetOpt_Nvp(&goi, nvp_target_event, &n);
5041 if (e != JIM_OK) {
5042 Jim_GetOpt_NvpUnknown(&goi, nvp_target_event, 1);
5043 return e;
5044 }
5045 struct target *target = Jim_CmdPrivData(interp);
5046 target_handle_event(target, n->value);
5047 return JIM_OK;
5048 }
5049
5050 static const struct command_registration target_instance_command_handlers[] = {
5051 {
5052 .name = "configure",
5053 .mode = COMMAND_CONFIG,
5054 .jim_handler = jim_target_configure,
5055 .help = "configure a new target for use",
5056 .usage = "[target_attribute ...]",
5057 },
5058 {
5059 .name = "cget",
5060 .mode = COMMAND_ANY,
5061 .jim_handler = jim_target_configure,
5062 .help = "returns the specified target attribute",
5063 .usage = "target_attribute",
5064 },
5065 {
5066 .name = "mww",
5067 .mode = COMMAND_EXEC,
5068 .jim_handler = jim_target_mw,
5069 .help = "Write 32-bit word(s) to target memory",
5070 .usage = "address data [count]",
5071 },
5072 {
5073 .name = "mwh",
5074 .mode = COMMAND_EXEC,
5075 .jim_handler = jim_target_mw,
5076 .help = "Write 16-bit half-word(s) to target memory",
5077 .usage = "address data [count]",
5078 },
5079 {
5080 .name = "mwb",
5081 .mode = COMMAND_EXEC,
5082 .jim_handler = jim_target_mw,
5083 .help = "Write byte(s) to target memory",
5084 .usage = "address data [count]",
5085 },
5086 {
5087 .name = "mdw",
5088 .mode = COMMAND_EXEC,
5089 .jim_handler = jim_target_md,
5090 .help = "Display target memory as 32-bit words",
5091 .usage = "address [count]",
5092 },
5093 {
5094 .name = "mdh",
5095 .mode = COMMAND_EXEC,
5096 .jim_handler = jim_target_md,
5097 .help = "Display target memory as 16-bit half-words",
5098 .usage = "address [count]",
5099 },
5100 {
5101 .name = "mdb",
5102 .mode = COMMAND_EXEC,
5103 .jim_handler = jim_target_md,
5104 .help = "Display target memory as 8-bit bytes",
5105 .usage = "address [count]",
5106 },
5107 {
5108 .name = "array2mem",
5109 .mode = COMMAND_EXEC,
5110 .jim_handler = jim_target_array2mem,
5111 .help = "Writes Tcl array of 8/16/32 bit numbers "
5112 "to target memory",
5113 .usage = "arrayname bitwidth address count",
5114 },
5115 {
5116 .name = "mem2array",
5117 .mode = COMMAND_EXEC,
5118 .jim_handler = jim_target_mem2array,
5119 .help = "Loads Tcl array of 8/16/32 bit numbers "
5120 "from target memory",
5121 .usage = "arrayname bitwidth address count",
5122 },
5123 {
5124 .name = "eventlist",
5125 .mode = COMMAND_EXEC,
5126 .jim_handler = jim_target_event_list,
5127 .help = "displays a table of events defined for this target",
5128 },
5129 {
5130 .name = "curstate",
5131 .mode = COMMAND_EXEC,
5132 .jim_handler = jim_target_current_state,
5133 .help = "displays the current state of this target",
5134 },
5135 {
5136 .name = "arp_examine",
5137 .mode = COMMAND_EXEC,
5138 .jim_handler = jim_target_examine,
5139 .help = "used internally for reset processing",
5140 },
5141 {
5142 .name = "arp_halt_gdb",
5143 .mode = COMMAND_EXEC,
5144 .jim_handler = jim_target_halt_gdb,
5145 .help = "used internally for reset processing to halt GDB",
5146 },
5147 {
5148 .name = "arp_poll",
5149 .mode = COMMAND_EXEC,
5150 .jim_handler = jim_target_poll,
5151 .help = "used internally for reset processing",
5152 },
5153 {
5154 .name = "arp_reset",
5155 .mode = COMMAND_EXEC,
5156 .jim_handler = jim_target_reset,
5157 .help = "used internally for reset processing",
5158 },
5159 {
5160 .name = "arp_halt",
5161 .mode = COMMAND_EXEC,
5162 .jim_handler = jim_target_halt,
5163 .help = "used internally for reset processing",
5164 },
5165 {
5166 .name = "arp_waitstate",
5167 .mode = COMMAND_EXEC,
5168 .jim_handler = jim_target_wait_state,
5169 .help = "used internally for reset processing",
5170 },
5171 {
5172 .name = "invoke-event",
5173 .mode = COMMAND_EXEC,
5174 .jim_handler = jim_target_invoke_event,
5175 .help = "invoke handler for specified event",
5176 .usage = "event_name",
5177 },
5178 COMMAND_REGISTRATION_DONE
5179 };
5180
5181 static int target_create(Jim_GetOptInfo *goi)
5182 {
5183 Jim_Obj *new_cmd;
5184 Jim_Cmd *cmd;
5185 const char *cp;
5186 char *cp2;
5187 int e;
5188 int x;
5189 struct target *target;
5190 struct command_context *cmd_ctx;
5191
5192 cmd_ctx = current_command_context(goi->interp);
5193 assert(cmd_ctx != NULL);
5194
5195 if (goi->argc < 3) {
5196 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5197 return JIM_ERR;
5198 }
5199
5200 /* COMMAND */
5201 Jim_GetOpt_Obj(goi, &new_cmd);
5202 /* does this command exist? */
5203 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5204 if (cmd) {
5205 cp = Jim_GetString(new_cmd, NULL);
5206 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5207 return JIM_ERR;
5208 }
5209
5210 /* TYPE */
5211 e = Jim_GetOpt_String(goi, &cp2, NULL);
5212 if (e != JIM_OK)
5213 return e;
5214 cp = cp2;
5215 struct transport *tr = get_current_transport();
5216 if (tr->override_target) {
5217 e = tr->override_target(&cp);
5218 if (e != ERROR_OK) {
5219 LOG_ERROR("The selected transport doesn't support this target");
5220 return JIM_ERR;
5221 }
5222 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5223 }
5224 /* now does target type exist */
5225 for (x = 0 ; target_types[x] ; x++) {
5226 if (0 == strcmp(cp, target_types[x]->name)) {
5227 /* found */
5228 break;
5229 }
5230
5231 /* check for deprecated name */
5232 if (target_types[x]->deprecated_name) {
5233 if (0 == strcmp(cp, target_types[x]->deprecated_name)) {
5234 /* found */
5235 LOG_WARNING("target name is deprecated use: \'%s\'", target_types[x]->name);
5236 break;
5237 }
5238 }
5239 }
5240 if (target_types[x] == NULL) {
5241 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5242 for (x = 0 ; target_types[x] ; x++) {
5243 if (target_types[x + 1]) {
5244 Jim_AppendStrings(goi->interp,
5245 Jim_GetResult(goi->interp),
5246 target_types[x]->name,
5247 ", ", NULL);
5248 } else {
5249 Jim_AppendStrings(goi->interp,
5250 Jim_GetResult(goi->interp),
5251 " or ",
5252 target_types[x]->name, NULL);
5253 }
5254 }
5255 return JIM_ERR;
5256 }
5257
5258 /* Create it */
5259 target = calloc(1, sizeof(struct target));
5260 /* set target number */
5261 target->target_number = new_target_number();
5262 cmd_ctx->current_target = target->target_number;
5263
5264 /* allocate memory for each unique target type */
5265 target->type = calloc(1, sizeof(struct target_type));
5266
5267 memcpy(target->type, target_types[x], sizeof(struct target_type));
5268
5269 /* will be set by "-endian" */
5270 target->endianness = TARGET_ENDIAN_UNKNOWN;
5271
5272 /* default to first core, override with -coreid */
5273 target->coreid = 0;
5274
5275 target->working_area = 0x0;
5276 target->working_area_size = 0x0;
5277 target->working_areas = NULL;
5278 target->backup_working_area = 0;
5279
5280 target->state = TARGET_UNKNOWN;
5281 target->debug_reason = DBG_REASON_UNDEFINED;
5282 target->reg_cache = NULL;
5283 target->breakpoints = NULL;
5284 target->watchpoints = NULL;
5285 target->next = NULL;
5286 target->arch_info = NULL;
5287
5288 target->display = 1;
5289
5290 target->halt_issued = false;
5291
5292 /* initialize trace information */
5293 target->trace_info = malloc(sizeof(struct trace));
5294 target->trace_info->num_trace_points = 0;
5295 target->trace_info->trace_points_size = 0;
5296 target->trace_info->trace_points = NULL;
5297 target->trace_info->trace_history_size = 0;
5298 target->trace_info->trace_history = NULL;
5299 target->trace_info->trace_history_pos = 0;
5300 target->trace_info->trace_history_overflowed = 0;
5301
5302 target->dbgmsg = NULL;
5303 target->dbg_msg_enabled = 0;
5304
5305 target->endianness = TARGET_ENDIAN_UNKNOWN;
5306
5307 target->rtos = NULL;
5308 target->rtos_auto_detect = false;
5309
5310 /* Do the rest as "configure" options */
5311 goi->isconfigure = 1;
5312 e = target_configure(goi, target);
5313
5314 if (target->tap == NULL) {
5315 Jim_SetResultString(goi->interp, "-chain-position required when creating target", -1);
5316 e = JIM_ERR;
5317 }
5318
5319 if (e != JIM_OK) {
5320 free(target->type);
5321 free(target);
5322 return e;
5323 }
5324
5325 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5326 /* default endian to little if not specified */
5327 target->endianness = TARGET_LITTLE_ENDIAN;
5328 }
5329
5330 cp = Jim_GetString(new_cmd, NULL);
5331 target->cmd_name = strdup(cp);
5332
5333 /* create the target specific commands */
5334 if (target->type->commands) {
5335 e = register_commands(cmd_ctx, NULL, target->type->commands);
5336 if (ERROR_OK != e)
5337 LOG_ERROR("unable to register '%s' commands", cp);
5338 }
5339 if (target->type->target_create)
5340 (*(target->type->target_create))(target, goi->interp);
5341
5342 /* append to end of list */
5343 {
5344 struct target **tpp;
5345 tpp = &(all_targets);
5346 while (*tpp)
5347 tpp = &((*tpp)->next);
5348 *tpp = target;
5349 }
5350
5351 /* now - create the new target name command */
5352 const struct command_registration target_subcommands[] = {
5353 {
5354 .chain = target_instance_command_handlers,
5355 },
5356 {
5357 .chain = target->type->commands,
5358 },
5359 COMMAND_REGISTRATION_DONE
5360 };
5361 const struct command_registration target_commands[] = {
5362 {
5363 .name = cp,
5364 .mode = COMMAND_ANY,
5365 .help = "target command group",
5366 .usage = "",
5367 .chain = target_subcommands,
5368 },
5369 COMMAND_REGISTRATION_DONE
5370 };
5371 e = register_commands(cmd_ctx, NULL, target_commands);
5372 if (ERROR_OK != e)
5373 return JIM_ERR;
5374
5375 struct command *c = command_find_in_context(cmd_ctx, cp);
5376 assert(c);
5377 command_set_handler_data(c, target);
5378
5379 return (ERROR_OK == e) ? JIM_OK : JIM_ERR;
5380 }
5381
5382 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5383 {
5384 if (argc != 1) {
5385 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5386 return JIM_ERR;
5387 }
5388 struct command_context *cmd_ctx = current_command_context(interp);
5389 assert(cmd_ctx != NULL);
5390
5391 Jim_SetResultString(interp, target_name(get_current_target(cmd_ctx)), -1);
5392 return JIM_OK;
5393 }
5394
5395 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5396 {
5397 if (argc != 1) {
5398 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5399 return JIM_ERR;
5400 }
5401 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5402 for (unsigned x = 0; NULL != target_types[x]; x++) {
5403 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5404 Jim_NewStringObj(interp, target_types[x]->name, -1));
5405 }
5406 return JIM_OK;
5407 }
5408
5409 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5410 {
5411 if (argc != 1) {
5412 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5413 return JIM_ERR;
5414 }
5415 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5416 struct target *target = all_targets;
5417 while (target) {
5418 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5419 Jim_NewStringObj(interp, target_name(target), -1));
5420 target = target->next;
5421 }
5422 return JIM_OK;
5423 }
5424
5425 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5426 {
5427 int i;
5428 const char *targetname;
5429 int retval, len;
5430 struct target *target = (struct target *) NULL;
5431 struct target_list *head, *curr, *new;
5432 curr = (struct target_list *) NULL;
5433 head = (struct target_list *) NULL;
5434
5435 retval = 0;
5436 LOG_DEBUG("%d", argc);
5437 /* argv[1] = target to associate in smp
5438 * argv[2] = target to assoicate in smp
5439 * argv[3] ...
5440 */
5441
5442 for (i = 1; i < argc; i++) {
5443
5444 targetname = Jim_GetString(argv[i], &len);
5445 target = get_target(targetname);
5446 LOG_DEBUG("%s ", targetname);
5447 if (target) {
5448 new = malloc(sizeof(struct target_list));
5449 new->target = target;
5450 new->next = (struct target_list *)NULL;
5451 if (head == (struct target_list *)NULL) {
5452 head = new;
5453 curr = head;
5454 } else {
5455 curr->next = new;
5456 curr = new;
5457 }
5458 }
5459 }
5460 /* now parse the list of cpu and put the target in smp mode*/
5461 curr = head;
5462
5463 while (curr != (struct target_list *)NULL) {
5464 target = curr->target;
5465 target->smp = 1;
5466 target->head = head;
5467 curr = curr->next;
5468 }
5469
5470 if (target && target->rtos)
5471 retval = rtos_smp_init(head->target);
5472
5473 return retval;
5474 }
5475
5476
5477 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5478 {
5479 Jim_GetOptInfo goi;
5480 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5481 if (goi.argc < 3) {
5482 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5483 "<name> <target_type> [<target_options> ...]");
5484 return JIM_ERR;
5485 }
5486 return target_create(&goi);
5487 }
5488
5489 static const struct command_registration target_subcommand_handlers[] = {
5490 {
5491 .name = "init",
5492 .mode = COMMAND_CONFIG,
5493 .handler = handle_target_init_command,
5494 .help = "initialize targets",
5495 },
5496 {
5497 .name = "create",
5498 /* REVISIT this should be COMMAND_CONFIG ... */
5499 .mode = COMMAND_ANY,
5500 .jim_handler = jim_target_create,
5501 .usage = "name type '-chain-position' name [options ...]",
5502 .help = "Creates and selects a new target",
5503 },
5504 {
5505 .name = "current",
5506 .mode = COMMAND_ANY,
5507 .jim_handler = jim_target_current,
5508 .help = "Returns the currently selected target",
5509 },
5510 {
5511 .name = "types",
5512 .mode = COMMAND_ANY,
5513 .jim_handler = jim_target_types,
5514 .help = "Returns the available target types as "
5515 "a list of strings",
5516 },
5517 {
5518 .name = "names",
5519 .mode = COMMAND_ANY,
5520 .jim_handler = jim_target_names,
5521 .help = "Returns the names of all targets as a list of strings",
5522 },
5523 {
5524 .name = "smp",
5525 .mode = COMMAND_ANY,
5526 .jim_handler = jim_target_smp,
5527 .usage = "targetname1 targetname2 ...",
5528 .help = "gather several target in a smp list"
5529 },
5530
5531 COMMAND_REGISTRATION_DONE
5532 };
5533
5534 struct FastLoad {
5535 uint32_t address;
5536 uint8_t *data;
5537 int length;
5538
5539 };
5540
5541 static int fastload_num;
5542 static struct FastLoad *fastload;
5543
5544 static void free_fastload(void)
5545 {
5546 if (fastload != NULL) {
5547 int i;
5548 for (i = 0; i < fastload_num; i++) {
5549 if (fastload[i].data)
5550 free(fastload[i].data);
5551 }
5552 free(fastload);
5553 fastload = NULL;
5554 }
5555 }
5556
5557 COMMAND_HANDLER(handle_fast_load_image_command)
5558 {
5559 uint8_t *buffer;
5560 size_t buf_cnt;
5561 uint32_t image_size;
5562 uint32_t min_address = 0;
5563 uint32_t max_address = 0xffffffff;
5564 int i;
5565
5566 struct image image;
5567
5568 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
5569 &image, &min_address, &max_address);
5570 if (ERROR_OK != retval)
5571 return retval;
5572
5573 struct duration bench;
5574 duration_start(&bench);
5575
5576 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
5577 if (retval != ERROR_OK)
5578 return retval;
5579
5580 image_size = 0x0;
5581 retval = ERROR_OK;
5582 fastload_num = image.num_sections;
5583 fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
5584 if (fastload == NULL) {
5585 command_print(CMD_CTX, "out of memory");
5586 image_close(&image);
5587 return ERROR_FAIL;
5588 }
5589 memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
5590 for (i = 0; i < image.num_sections; i++) {
5591 buffer = malloc(image.sections[i].size);
5592 if (buffer == NULL) {
5593 command_print(CMD_CTX, "error allocating buffer for section (%d bytes)",
5594 (int)(image.sections[i].size));
5595 retval = ERROR_FAIL;
5596 break;
5597 }
5598
5599 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
5600 if (retval != ERROR_OK) {
5601 free(buffer);
5602 break;
5603 }
5604
5605 uint32_t offset = 0;
5606 uint32_t length = buf_cnt;
5607
5608 /* DANGER!!! beware of unsigned comparision here!!! */
5609
5610 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
5611 (image.sections[i].base_address < max_address)) {
5612 if (image.sections[i].base_address < min_address) {
5613 /* clip addresses below */
5614 offset += min_address-image.sections[i].base_address;
5615 length -= offset;
5616 }
5617
5618 if (image.sections[i].base_address + buf_cnt > max_address)
5619 length -= (image.sections[i].base_address + buf_cnt)-max_address;
5620
5621 fastload[i].address = image.sections[i].base_address + offset;
5622 fastload[i].data = malloc(length);
5623 if (fastload[i].data == NULL) {
5624 free(buffer);
5625 command_print(CMD_CTX, "error allocating buffer for section (%" PRIu32 " bytes)",
5626 length);
5627 retval = ERROR_FAIL;
5628 break;
5629 }
5630 memcpy(fastload[i].data, buffer + offset, length);
5631 fastload[i].length = length;
5632
5633 image_size += length;
5634 command_print(CMD_CTX, "%u bytes written at address 0x%8.8x",
5635 (unsigned int)length,
5636 ((unsigned int)(image.sections[i].base_address + offset)));
5637 }
5638
5639 free(buffer);
5640 }
5641
5642 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
5643 command_print(CMD_CTX, "Loaded %" PRIu32 " bytes "
5644 "in %fs (%0.3f KiB/s)", image_size,
5645 duration_elapsed(&bench), duration_kbps(&bench, image_size));
5646
5647 command_print(CMD_CTX,
5648 "WARNING: image has not been loaded to target!"
5649 "You can issue a 'fast_load' to finish loading.");
5650 }
5651
5652 image_close(&image);
5653
5654 if (retval != ERROR_OK)
5655 free_fastload();
5656
5657 return retval;
5658 }
5659
5660 COMMAND_HANDLER(handle_fast_load_command)
5661 {
5662 if (CMD_ARGC > 0)
5663 return ERROR_COMMAND_SYNTAX_ERROR;
5664 if (fastload == NULL) {
5665 LOG_ERROR("No image in memory");
5666 return ERROR_FAIL;
5667 }
5668 int i;
5669 int ms = timeval_ms();
5670 int size = 0;
5671 int retval = ERROR_OK;
5672 for (i = 0; i < fastload_num; i++) {
5673 struct target *target = get_current_target(CMD_CTX);
5674 command_print(CMD_CTX, "Write to 0x%08x, length 0x%08x",
5675 (unsigned int)(fastload[i].address),
5676 (unsigned int)(fastload[i].length));
5677 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
5678 if (retval != ERROR_OK)
5679 break;
5680 size += fastload[i].length;
5681 }
5682 if (retval == ERROR_OK) {
5683 int after = timeval_ms();
5684 command_print(CMD_CTX, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
5685 }
5686 return retval;
5687 }
5688
5689 static const struct command_registration target_command_handlers[] = {
5690 {
5691 .name = "targets",
5692 .handler = handle_targets_command,
5693 .mode = COMMAND_ANY,
5694 .help = "change current default target (one parameter) "
5695 "or prints table of all targets (no parameters)",
5696 .usage = "[target]",
5697 },
5698 {
5699 .name = "target",
5700 .mode = COMMAND_CONFIG,
5701 .help = "configure target",
5702
5703 .chain = target_subcommand_handlers,
5704 },
5705 COMMAND_REGISTRATION_DONE
5706 };
5707
5708 int target_register_commands(struct command_context *cmd_ctx)
5709 {
5710 return register_commands(cmd_ctx, NULL, target_command_handlers);
5711 }
5712
5713 static bool target_reset_nag = true;
5714
5715 bool get_target_reset_nag(void)
5716 {
5717 return target_reset_nag;
5718 }
5719
5720 COMMAND_HANDLER(handle_target_reset_nag)
5721 {
5722 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
5723 &target_reset_nag, "Nag after each reset about options to improve "
5724 "performance");
5725 }
5726
5727 COMMAND_HANDLER(handle_ps_command)
5728 {
5729 struct target *target = get_current_target(CMD_CTX);
5730 char *display;
5731 if (target->state != TARGET_HALTED) {
5732 LOG_INFO("target not halted !!");
5733 return ERROR_OK;
5734 }
5735
5736 if ((target->rtos) && (target->rtos->type)
5737 && (target->rtos->type->ps_command)) {
5738 display = target->rtos->type->ps_command(target);
5739 command_print(CMD_CTX, "%s", display);
5740 free(display);
5741 return ERROR_OK;
5742 } else {
5743 LOG_INFO("failed");
5744 return ERROR_TARGET_FAILURE;
5745 }
5746 }
5747
5748 static void binprint(struct command_context *cmd_ctx, const char *text, const uint8_t *buf, int size)
5749 {
5750 if (text != NULL)
5751 command_print_sameline(cmd_ctx, "%s", text);
5752 for (int i = 0; i < size; i++)
5753 command_print_sameline(cmd_ctx, " %02x", buf[i]);
5754 command_print(cmd_ctx, " ");
5755 }
5756
5757 COMMAND_HANDLER(handle_test_mem_access_command)
5758 {
5759 struct target *target = get_current_target(CMD_CTX);
5760 uint32_t test_size;
5761 int retval = ERROR_OK;
5762
5763 if (target->state != TARGET_HALTED) {
5764 LOG_INFO("target not halted !!");
5765 return ERROR_FAIL;
5766 }
5767
5768 if (CMD_ARGC != 1)
5769 return ERROR_COMMAND_SYNTAX_ERROR;
5770
5771 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
5772
5773 /* Test reads */
5774 size_t num_bytes = test_size + 4;
5775
5776 struct working_area *wa = NULL;
5777 retval = target_alloc_working_area(target, num_bytes, &wa);
5778 if (retval != ERROR_OK) {
5779 LOG_ERROR("Not enough working area");
5780 return ERROR_FAIL;
5781 }
5782
5783 uint8_t *test_pattern = malloc(num_bytes);
5784
5785 for (size_t i = 0; i < num_bytes; i++)
5786 test_pattern[i] = rand();
5787
5788 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
5789 if (retval != ERROR_OK) {
5790 LOG_ERROR("Test pattern write failed");
5791 goto out;
5792 }
5793
5794 for (int host_offset = 0; host_offset <= 1; host_offset++) {
5795 for (int size = 1; size <= 4; size *= 2) {
5796 for (int offset = 0; offset < 4; offset++) {
5797 uint32_t count = test_size / size;
5798 size_t host_bufsiz = (count + 2) * size + host_offset;
5799 uint8_t *read_ref = malloc(host_bufsiz);
5800 uint8_t *read_buf = malloc(host_bufsiz);
5801
5802 for (size_t i = 0; i < host_bufsiz; i++) {
5803 read_ref[i] = rand();
5804 read_buf[i] = read_ref[i];
5805 }
5806 command_print_sameline(CMD_CTX,
5807 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
5808 size, offset, host_offset ? "un" : "");
5809
5810 struct duration bench;
5811 duration_start(&bench);
5812
5813 retval = target_read_memory(target, wa->address + offset, size, count,
5814 read_buf + size + host_offset);
5815
5816 duration_measure(&bench);
5817
5818 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
5819 command_print(CMD_CTX, "Unsupported alignment");
5820 goto next;
5821 } else if (retval != ERROR_OK) {
5822 command_print(CMD_CTX, "Memory read failed");
5823 goto next;
5824 }
5825
5826 /* replay on host */
5827 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
5828
5829 /* check result */
5830 int result = memcmp(read_ref, read_buf, host_bufsiz);
5831 if (result == 0) {
5832 command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
5833 duration_elapsed(&bench),
5834 duration_kbps(&bench, count * size));
5835 } else {
5836 command_print(CMD_CTX, "Compare failed");
5837 binprint(CMD_CTX, "ref:", read_ref, host_bufsiz);
5838 binprint(CMD_CTX, "buf:", read_buf, host_bufsiz);
5839 }
5840 next:
5841 free(read_ref);
5842 free(read_buf);
5843 }
5844 }
5845 }
5846
5847 out:
5848 free(test_pattern);
5849
5850 if (wa != NULL)
5851 target_free_working_area(target, wa);
5852
5853 /* Test writes */
5854 num_bytes = test_size + 4 + 4 + 4;
5855
5856 retval = target_alloc_working_area(target, num_bytes, &wa);
5857 if (retval != ERROR_OK) {
5858 LOG_ERROR("Not enough working area");
5859 return ERROR_FAIL;
5860 }
5861
5862 test_pattern = malloc(num_bytes);
5863
5864 for (size_t i = 0; i < num_bytes; i++)
5865 test_pattern[i] = rand();
5866
5867 for (int host_offset = 0; host_offset <= 1; host_offset++) {
5868 for (int size = 1; size <= 4; size *= 2) {
5869 for (int offset = 0; offset < 4; offset++) {
5870 uint32_t count = test_size / size;
5871 size_t host_bufsiz = count * size + host_offset;
5872 uint8_t *read_ref = malloc(num_bytes);
5873 uint8_t *read_buf = malloc(num_bytes);
5874 uint8_t *write_buf = malloc(host_bufsiz);
5875
5876 for (size_t i = 0; i < host_bufsiz; i++)
5877 write_buf[i] = rand();
5878 command_print_sameline(CMD_CTX,
5879 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
5880 size, offset, host_offset ? "un" : "");
5881
5882 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
5883 if (retval != ERROR_OK) {
5884 command_print(CMD_CTX, "Test pattern write failed");
5885 goto nextw;
5886 }
5887
5888 /* replay on host */
5889 memcpy(read_ref, test_pattern, num_bytes);
5890 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
5891
5892 struct duration bench;
5893 duration_start(&bench);
5894
5895 retval = target_write_memory(target, wa->address + size + offset, size, count,
5896 write_buf + host_offset);
5897
5898 duration_measure(&bench);
5899
5900 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
5901 command_print(CMD_CTX, "Unsupported alignment");
5902 goto nextw;
5903 } else if (retval != ERROR_OK) {
5904 command_print(CMD_CTX, "Memory write failed");
5905 goto nextw;
5906 }
5907
5908 /* read back */
5909 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
5910 if (retval != ERROR_OK) {
5911 command_print(CMD_CTX, "Test pattern write failed");
5912 goto nextw;
5913 }
5914
5915 /* check result */
5916 int result = memcmp(read_ref, read_buf, num_bytes);
5917 if (result == 0) {
5918 command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
5919 duration_elapsed(&bench),
5920 duration_kbps(&bench, count * size));
5921 } else {
5922 command_print(CMD_CTX, "Compare failed");
5923 binprint(CMD_CTX, "ref:", read_ref, num_bytes);
5924 binprint(CMD_CTX, "buf:", read_buf, num_bytes);
5925 }
5926 nextw:
5927 free(read_ref);
5928 free(read_buf);
5929 }
5930 }
5931 }
5932
5933 free(test_pattern);
5934
5935 if (wa != NULL)
5936 target_free_working_area(target, wa);
5937 return retval;
5938 }
5939
5940 static const struct command_registration target_exec_command_handlers[] = {
5941 {
5942 .name = "fast_load_image",
5943 .handler = handle_fast_load_image_command,
5944 .mode = COMMAND_ANY,
5945 .help = "Load image into server memory for later use by "
5946 "fast_load; primarily for profiling",
5947 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
5948 "[min_address [max_length]]",
5949 },
5950 {
5951 .name = "fast_load",
5952 .handler = handle_fast_load_command,
5953 .mode = COMMAND_EXEC,
5954 .help = "loads active fast load image to current target "
5955 "- mainly for profiling purposes",
5956 .usage = "",
5957 },
5958 {
5959 .name = "profile",
5960 .handler = handle_profile_command,
5961 .mode = COMMAND_EXEC,
5962 .usage = "seconds filename [start end]",
5963 .help = "profiling samples the CPU PC",
5964 },
5965 /** @todo don't register virt2phys() unless target supports it */
5966 {
5967 .name = "virt2phys",
5968 .handler = handle_virt2phys_command,
5969 .mode = COMMAND_ANY,
5970 .help = "translate a virtual address into a physical address",
5971 .usage = "virtual_address",
5972 },
5973 {
5974 .name = "reg",
5975 .handler = handle_reg_command,
5976 .mode = COMMAND_EXEC,
5977 .help = "display (reread from target with \"force\") or set a register; "
5978 "with no arguments, displays all registers and their values",
5979 .usage = "[(register_number|register_name) [(value|'force')]]",
5980 },
5981 {
5982 .name = "poll",
5983 .handler = handle_poll_command,
5984 .mode = COMMAND_EXEC,
5985 .help = "poll target state; or reconfigure background polling",
5986 .usage = "['on'|'off']",
5987 },
5988 {
5989 .name = "wait_halt",
5990 .handler = handle_wait_halt_command,
5991 .mode = COMMAND_EXEC,
5992 .help = "wait up to the specified number of milliseconds "
5993 "(default 5000) for a previously requested halt",
5994 .usage = "[milliseconds]",
5995 },
5996 {
5997 .name = "halt",
5998 .handler = handle_halt_command,
5999 .mode = COMMAND_EXEC,
6000 .help = "request target to halt, then wait up to the specified"
6001 "number of milliseconds (default 5000) for it to complete",
6002 .usage = "[milliseconds]",
6003 },
6004 {
6005 .name = "resume",
6006 .handler = handle_resume_command,
6007 .mode = COMMAND_EXEC,
6008 .help = "resume target execution from current PC or address",
6009 .usage = "[address]",
6010 },
6011 {
6012 .name = "reset",
6013 .handler = handle_reset_command,
6014 .mode = COMMAND_EXEC,
6015 .usage = "[run|halt|init]",
6016 .help = "Reset all targets into the specified mode."
6017 "Default reset mode is run, if not given.",
6018 },
6019 {
6020 .name = "soft_reset_halt",
6021 .handler = handle_soft_reset_halt_command,
6022 .mode = COMMAND_EXEC,
6023 .usage = "",
6024 .help = "halt the target and do a soft reset",
6025 },
6026 {
6027 .name = "step",
6028 .handler = handle_step_command,
6029 .mode = COMMAND_EXEC,
6030 .help = "step one instruction from current PC or address",
6031 .usage = "[address]",
6032 },
6033 {
6034 .name = "mdw",
6035 .handler = handle_md_command,
6036 .mode = COMMAND_EXEC,
6037 .help = "display memory words",
6038 .usage = "['phys'] address [count]",
6039 },
6040 {
6041 .name = "mdh",
6042 .handler = handle_md_command,
6043 .mode = COMMAND_EXEC,
6044 .help = "display memory half-words",
6045 .usage = "['phys'] address [count]",
6046 },
6047 {
6048 .name = "mdb",
6049 .handler = handle_md_command,
6050 .mode = COMMAND_EXEC,
6051 .help = "display memory bytes",
6052 .usage = "['phys'] address [count]",
6053 },
6054 {
6055 .name = "mww",
6056 .handler = handle_mw_command,
6057 .mode = COMMAND_EXEC,
6058 .help = "write memory word",
6059 .usage = "['phys'] address value [count]",
6060 },
6061 {
6062 .name = "mwh",
6063 .handler = handle_mw_command,
6064 .mode = COMMAND_EXEC,
6065 .help = "write memory half-word",
6066 .usage = "['phys'] address value [count]",
6067 },
6068 {
6069 .name = "mwb",
6070 .handler = handle_mw_command,
6071 .mode = COMMAND_EXEC,
6072 .help = "write memory byte",
6073 .usage = "['phys'] address value [count]",
6074 },
6075 {
6076 .name = "bp",
6077 .handler = handle_bp_command,
6078 .mode = COMMAND_EXEC,
6079 .help = "list or set hardware or software breakpoint",
6080 .usage = "<address> [<asid>]<length> ['hw'|'hw_ctx']",
6081 },
6082 {
6083 .name = "rbp",
6084 .handler = handle_rbp_command,
6085 .mode = COMMAND_EXEC,
6086 .help = "remove breakpoint",
6087 .usage = "address",
6088 },
6089 {
6090 .name = "wp",
6091 .handler = handle_wp_command,
6092 .mode = COMMAND_EXEC,
6093 .help = "list (no params) or create watchpoints",
6094 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6095 },
6096 {
6097 .name = "rwp",
6098 .handler = handle_rwp_command,
6099 .mode = COMMAND_EXEC,
6100 .help = "remove watchpoint",
6101 .usage = "address",
6102 },
6103 {
6104 .name = "load_image",
6105 .handler = handle_load_image_command,
6106 .mode = COMMAND_EXEC,
6107 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6108 "[min_address] [max_length]",
6109 },
6110 {
6111 .name = "dump_image",
6112 .handler = handle_dump_image_command,
6113 .mode = COMMAND_EXEC,
6114 .usage = "filename address size",
6115 },
6116 {
6117 .name = "verify_image",
6118 .handler = handle_verify_image_command,
6119 .mode = COMMAND_EXEC,
6120 .usage = "filename [offset [type]]",
6121 },
6122 {
6123 .name = "test_image",
6124 .handler = handle_test_image_command,
6125 .mode = COMMAND_EXEC,
6126 .usage = "filename [offset [type]]",
6127 },
6128 {
6129 .name = "mem2array",
6130 .mode = COMMAND_EXEC,
6131 .jim_handler = jim_mem2array,
6132 .help = "read 8/16/32 bit memory and return as a TCL array "
6133 "for script processing",
6134 .usage = "arrayname bitwidth address count",
6135 },
6136 {
6137 .name = "array2mem",
6138 .mode = COMMAND_EXEC,
6139 .jim_handler = jim_array2mem,
6140 .help = "convert a TCL array to memory locations "
6141 "and write the 8/16/32 bit values",
6142 .usage = "arrayname bitwidth address count",
6143 },
6144 {
6145 .name = "reset_nag",
6146 .handler = handle_target_reset_nag,
6147 .mode = COMMAND_ANY,
6148 .help = "Nag after each reset about options that could have been "
6149 "enabled to improve performance. ",
6150 .usage = "['enable'|'disable']",
6151 },
6152 {
6153 .name = "ps",
6154 .handler = handle_ps_command,
6155 .mode = COMMAND_EXEC,
6156 .help = "list all tasks ",
6157 .usage = " ",
6158 },
6159 {
6160 .name = "test_mem_access",
6161 .handler = handle_test_mem_access_command,
6162 .mode = COMMAND_EXEC,
6163 .help = "Test the target's memory access functions",
6164 .usage = "size",
6165 },
6166
6167 COMMAND_REGISTRATION_DONE
6168 };
6169 static int target_register_user_commands(struct command_context *cmd_ctx)
6170 {
6171 int retval = ERROR_OK;
6172 retval = target_request_register_commands(cmd_ctx);
6173 if (retval != ERROR_OK)
6174 return retval;
6175
6176 retval = trace_register_commands(cmd_ctx);
6177 if (retval != ERROR_OK)
6178 return retval;
6179
6180
6181 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6182 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)