target: enable TARGET_EVENT_RESUME_* events
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
40 ***************************************************************************/
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 #include <helper/time_support.h>
47 #include <jtag/jtag.h>
48 #include <flash/nor/core.h>
49
50 #include "target.h"
51 #include "target_type.h"
52 #include "target_request.h"
53 #include "breakpoints.h"
54 #include "register.h"
55 #include "trace.h"
56 #include "image.h"
57 #include "rtos/rtos.h"
58
59 static int target_read_buffer_default(struct target *target, uint32_t address,
60 uint32_t size, uint8_t *buffer);
61 static int target_write_buffer_default(struct target *target, uint32_t address,
62 uint32_t size, const uint8_t *buffer);
63 static int target_array2mem(Jim_Interp *interp, struct target *target,
64 int argc, Jim_Obj * const *argv);
65 static int target_mem2array(Jim_Interp *interp, struct target *target,
66 int argc, Jim_Obj * const *argv);
67 static int target_register_user_commands(struct command_context *cmd_ctx);
68
69 /* targets */
70 extern struct target_type arm7tdmi_target;
71 extern struct target_type arm720t_target;
72 extern struct target_type arm9tdmi_target;
73 extern struct target_type arm920t_target;
74 extern struct target_type arm966e_target;
75 extern struct target_type arm946e_target;
76 extern struct target_type arm926ejs_target;
77 extern struct target_type fa526_target;
78 extern struct target_type feroceon_target;
79 extern struct target_type dragonite_target;
80 extern struct target_type xscale_target;
81 extern struct target_type cortexm3_target;
82 extern struct target_type cortexa8_target;
83 extern struct target_type arm11_target;
84 extern struct target_type mips_m4k_target;
85 extern struct target_type avr_target;
86 extern struct target_type dsp563xx_target;
87 extern struct target_type dsp5680xx_target;
88 extern struct target_type testee_target;
89 extern struct target_type avr32_ap7k_target;
90 extern struct target_type stm32_stlink_target;
91
92 static struct target_type *target_types[] = {
93 &arm7tdmi_target,
94 &arm9tdmi_target,
95 &arm920t_target,
96 &arm720t_target,
97 &arm966e_target,
98 &arm946e_target,
99 &arm926ejs_target,
100 &fa526_target,
101 &feroceon_target,
102 &dragonite_target,
103 &xscale_target,
104 &cortexm3_target,
105 &cortexa8_target,
106 &arm11_target,
107 &mips_m4k_target,
108 &avr_target,
109 &dsp563xx_target,
110 &dsp5680xx_target,
111 &testee_target,
112 &avr32_ap7k_target,
113 &stm32_stlink_target,
114 NULL,
115 };
116
117 struct target *all_targets;
118 static struct target_event_callback *target_event_callbacks;
119 static struct target_timer_callback *target_timer_callbacks;
120 static const int polling_interval = 100;
121
122 static const Jim_Nvp nvp_assert[] = {
123 { .name = "assert", NVP_ASSERT },
124 { .name = "deassert", NVP_DEASSERT },
125 { .name = "T", NVP_ASSERT },
126 { .name = "F", NVP_DEASSERT },
127 { .name = "t", NVP_ASSERT },
128 { .name = "f", NVP_DEASSERT },
129 { .name = NULL, .value = -1 }
130 };
131
132 static const Jim_Nvp nvp_error_target[] = {
133 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
134 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
135 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
136 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
137 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
138 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
139 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
140 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
141 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
142 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
143 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
144 { .value = -1, .name = NULL }
145 };
146
147 static const char *target_strerror_safe(int err)
148 {
149 const Jim_Nvp *n;
150
151 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
152 if (n->name == NULL)
153 return "unknown";
154 else
155 return n->name;
156 }
157
158 static const Jim_Nvp nvp_target_event[] = {
159
160 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
161 { .value = TARGET_EVENT_HALTED, .name = "halted" },
162 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
163 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
164 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
165
166 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
167 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
168
169 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
170 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
171 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
172 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
173 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
174 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
175 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
176 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
177 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
178 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
179 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
180 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
181
182 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
183 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
184
185 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
186 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
187
188 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
189 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
190
191 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
192 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
193
194 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
195 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
196
197 { .name = NULL, .value = -1 }
198 };
199
200 static const Jim_Nvp nvp_target_state[] = {
201 { .name = "unknown", .value = TARGET_UNKNOWN },
202 { .name = "running", .value = TARGET_RUNNING },
203 { .name = "halted", .value = TARGET_HALTED },
204 { .name = "reset", .value = TARGET_RESET },
205 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
206 { .name = NULL, .value = -1 },
207 };
208
209 static const Jim_Nvp nvp_target_debug_reason[] = {
210 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
211 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
212 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
213 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
214 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
215 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
216 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
217 { .name = NULL, .value = -1 },
218 };
219
220 static const Jim_Nvp nvp_target_endian[] = {
221 { .name = "big", .value = TARGET_BIG_ENDIAN },
222 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
223 { .name = "be", .value = TARGET_BIG_ENDIAN },
224 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
225 { .name = NULL, .value = -1 },
226 };
227
228 static const Jim_Nvp nvp_reset_modes[] = {
229 { .name = "unknown", .value = RESET_UNKNOWN },
230 { .name = "run" , .value = RESET_RUN },
231 { .name = "halt" , .value = RESET_HALT },
232 { .name = "init" , .value = RESET_INIT },
233 { .name = NULL , .value = -1 },
234 };
235
236 const char *debug_reason_name(struct target *t)
237 {
238 const char *cp;
239
240 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
241 t->debug_reason)->name;
242 if (!cp) {
243 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
244 cp = "(*BUG*unknown*BUG*)";
245 }
246 return cp;
247 }
248
249 const char *target_state_name(struct target *t)
250 {
251 const char *cp;
252 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
253 if (!cp) {
254 LOG_ERROR("Invalid target state: %d", (int)(t->state));
255 cp = "(*BUG*unknown*BUG*)";
256 }
257 return cp;
258 }
259
260 /* determine the number of the new target */
261 static int new_target_number(void)
262 {
263 struct target *t;
264 int x;
265
266 /* number is 0 based */
267 x = -1;
268 t = all_targets;
269 while (t) {
270 if (x < t->target_number)
271 x = t->target_number;
272 t = t->next;
273 }
274 return x + 1;
275 }
276
277 /* read a uint32_t from a buffer in target memory endianness */
278 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
279 {
280 if (target->endianness == TARGET_LITTLE_ENDIAN)
281 return le_to_h_u32(buffer);
282 else
283 return be_to_h_u32(buffer);
284 }
285
286 /* read a uint24_t from a buffer in target memory endianness */
287 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
288 {
289 if (target->endianness == TARGET_LITTLE_ENDIAN)
290 return le_to_h_u24(buffer);
291 else
292 return be_to_h_u24(buffer);
293 }
294
295 /* read a uint16_t from a buffer in target memory endianness */
296 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
297 {
298 if (target->endianness == TARGET_LITTLE_ENDIAN)
299 return le_to_h_u16(buffer);
300 else
301 return be_to_h_u16(buffer);
302 }
303
304 /* read a uint8_t from a buffer in target memory endianness */
305 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
306 {
307 return *buffer & 0x0ff;
308 }
309
310 /* write a uint32_t to a buffer in target memory endianness */
311 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
312 {
313 if (target->endianness == TARGET_LITTLE_ENDIAN)
314 h_u32_to_le(buffer, value);
315 else
316 h_u32_to_be(buffer, value);
317 }
318
319 /* write a uint24_t to a buffer in target memory endianness */
320 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
321 {
322 if (target->endianness == TARGET_LITTLE_ENDIAN)
323 h_u24_to_le(buffer, value);
324 else
325 h_u24_to_be(buffer, value);
326 }
327
328 /* write a uint16_t to a buffer in target memory endianness */
329 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
330 {
331 if (target->endianness == TARGET_LITTLE_ENDIAN)
332 h_u16_to_le(buffer, value);
333 else
334 h_u16_to_be(buffer, value);
335 }
336
337 /* write a uint8_t to a buffer in target memory endianness */
338 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
339 {
340 *buffer = value;
341 }
342
343 /* write a uint32_t array to a buffer in target memory endianness */
344 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
345 {
346 uint32_t i;
347 for (i = 0; i < count; i++)
348 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
349 }
350
351 /* write a uint16_t array to a buffer in target memory endianness */
352 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
353 {
354 uint32_t i;
355 for (i = 0; i < count; i++)
356 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
357 }
358
359 /* write a uint32_t array to a buffer in target memory endianness */
360 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, uint32_t *srcbuf)
361 {
362 uint32_t i;
363 for (i = 0; i < count; i++)
364 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
365 }
366
367 /* write a uint16_t array to a buffer in target memory endianness */
368 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, uint16_t *srcbuf)
369 {
370 uint32_t i;
371 for (i = 0; i < count; i++)
372 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
373 }
374
375 /* return a pointer to a configured target; id is name or number */
376 struct target *get_target(const char *id)
377 {
378 struct target *target;
379
380 /* try as tcltarget name */
381 for (target = all_targets; target; target = target->next) {
382 if (target->cmd_name == NULL)
383 continue;
384 if (strcmp(id, target->cmd_name) == 0)
385 return target;
386 }
387
388 /* It's OK to remove this fallback sometime after August 2010 or so */
389
390 /* no match, try as number */
391 unsigned num;
392 if (parse_uint(id, &num) != ERROR_OK)
393 return NULL;
394
395 for (target = all_targets; target; target = target->next) {
396 if (target->target_number == (int)num) {
397 LOG_WARNING("use '%s' as target identifier, not '%u'",
398 target->cmd_name, num);
399 return target;
400 }
401 }
402
403 return NULL;
404 }
405
406 /* returns a pointer to the n-th configured target */
407 static struct target *get_target_by_num(int num)
408 {
409 struct target *target = all_targets;
410
411 while (target) {
412 if (target->target_number == num)
413 return target;
414 target = target->next;
415 }
416
417 return NULL;
418 }
419
420 struct target *get_current_target(struct command_context *cmd_ctx)
421 {
422 struct target *target = get_target_by_num(cmd_ctx->current_target);
423
424 if (target == NULL) {
425 LOG_ERROR("BUG: current_target out of bounds");
426 exit(-1);
427 }
428
429 return target;
430 }
431
432 int target_poll(struct target *target)
433 {
434 int retval;
435
436 /* We can't poll until after examine */
437 if (!target_was_examined(target)) {
438 /* Fail silently lest we pollute the log */
439 return ERROR_FAIL;
440 }
441
442 retval = target->type->poll(target);
443 if (retval != ERROR_OK)
444 return retval;
445
446 if (target->halt_issued) {
447 if (target->state == TARGET_HALTED)
448 target->halt_issued = false;
449 else {
450 long long t = timeval_ms() - target->halt_issued_time;
451 if (t > 1000) {
452 target->halt_issued = false;
453 LOG_INFO("Halt timed out, wake up GDB.");
454 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
455 }
456 }
457 }
458
459 return ERROR_OK;
460 }
461
462 int target_halt(struct target *target)
463 {
464 int retval;
465 /* We can't poll until after examine */
466 if (!target_was_examined(target)) {
467 LOG_ERROR("Target not examined yet");
468 return ERROR_FAIL;
469 }
470
471 retval = target->type->halt(target);
472 if (retval != ERROR_OK)
473 return retval;
474
475 target->halt_issued = true;
476 target->halt_issued_time = timeval_ms();
477
478 return ERROR_OK;
479 }
480
481 /**
482 * Make the target (re)start executing using its saved execution
483 * context (possibly with some modifications).
484 *
485 * @param target Which target should start executing.
486 * @param current True to use the target's saved program counter instead
487 * of the address parameter
488 * @param address Optionally used as the program counter.
489 * @param handle_breakpoints True iff breakpoints at the resumption PC
490 * should be skipped. (For example, maybe execution was stopped by
491 * such a breakpoint, in which case it would be counterprodutive to
492 * let it re-trigger.
493 * @param debug_execution False if all working areas allocated by OpenOCD
494 * should be released and/or restored to their original contents.
495 * (This would for example be true to run some downloaded "helper"
496 * algorithm code, which resides in one such working buffer and uses
497 * another for data storage.)
498 *
499 * @todo Resolve the ambiguity about what the "debug_execution" flag
500 * signifies. For example, Target implementations don't agree on how
501 * it relates to invalidation of the register cache, or to whether
502 * breakpoints and watchpoints should be enabled. (It would seem wrong
503 * to enable breakpoints when running downloaded "helper" algorithms
504 * (debug_execution true), since the breakpoints would be set to match
505 * target firmware being debugged, not the helper algorithm.... and
506 * enabling them could cause such helpers to malfunction (for example,
507 * by overwriting data with a breakpoint instruction. On the other
508 * hand the infrastructure for running such helpers might use this
509 * procedure but rely on hardware breakpoint to detect termination.)
510 */
511 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
512 {
513 int retval;
514
515 /* We can't poll until after examine */
516 if (!target_was_examined(target)) {
517 LOG_ERROR("Target not examined yet");
518 return ERROR_FAIL;
519 }
520
521 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
522
523 /* note that resume *must* be asynchronous. The CPU can halt before
524 * we poll. The CPU can even halt at the current PC as a result of
525 * a software breakpoint being inserted by (a bug?) the application.
526 */
527 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
528 if (retval != ERROR_OK)
529 return retval;
530
531 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
532
533 return retval;
534 }
535
536 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
537 {
538 char buf[100];
539 int retval;
540 Jim_Nvp *n;
541 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
542 if (n->name == NULL) {
543 LOG_ERROR("invalid reset mode");
544 return ERROR_FAIL;
545 }
546
547 /* disable polling during reset to make reset event scripts
548 * more predictable, i.e. dr/irscan & pathmove in events will
549 * not have JTAG operations injected into the middle of a sequence.
550 */
551 bool save_poll = jtag_poll_get_enabled();
552
553 jtag_poll_set_enabled(false);
554
555 sprintf(buf, "ocd_process_reset %s", n->name);
556 retval = Jim_Eval(cmd_ctx->interp, buf);
557
558 jtag_poll_set_enabled(save_poll);
559
560 if (retval != JIM_OK) {
561 Jim_MakeErrorMessage(cmd_ctx->interp);
562 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
563 return ERROR_FAIL;
564 }
565
566 /* We want any events to be processed before the prompt */
567 retval = target_call_timer_callbacks_now();
568
569 struct target *target;
570 for (target = all_targets; target; target = target->next)
571 target->type->check_reset(target);
572
573 return retval;
574 }
575
576 static int identity_virt2phys(struct target *target,
577 uint32_t virtual, uint32_t *physical)
578 {
579 *physical = virtual;
580 return ERROR_OK;
581 }
582
583 static int no_mmu(struct target *target, int *enabled)
584 {
585 *enabled = 0;
586 return ERROR_OK;
587 }
588
589 static int default_examine(struct target *target)
590 {
591 target_set_examined(target);
592 return ERROR_OK;
593 }
594
595 /* no check by default */
596 static int default_check_reset(struct target *target)
597 {
598 return ERROR_OK;
599 }
600
601 int target_examine_one(struct target *target)
602 {
603 return target->type->examine(target);
604 }
605
606 static int jtag_enable_callback(enum jtag_event event, void *priv)
607 {
608 struct target *target = priv;
609
610 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
611 return ERROR_OK;
612
613 jtag_unregister_event_callback(jtag_enable_callback, target);
614 return target_examine_one(target);
615 }
616
617
618 /* Targets that correctly implement init + examine, i.e.
619 * no communication with target during init:
620 *
621 * XScale
622 */
623 int target_examine(void)
624 {
625 int retval = ERROR_OK;
626 struct target *target;
627
628 for (target = all_targets; target; target = target->next) {
629 /* defer examination, but don't skip it */
630 if (!target->tap->enabled) {
631 jtag_register_event_callback(jtag_enable_callback,
632 target);
633 continue;
634 }
635 retval = target_examine_one(target);
636 if (retval != ERROR_OK)
637 return retval;
638 }
639 return retval;
640 }
641 const char *target_type_name(struct target *target)
642 {
643 return target->type->name;
644 }
645
646 static int target_write_memory_imp(struct target *target, uint32_t address,
647 uint32_t size, uint32_t count, const uint8_t *buffer)
648 {
649 if (!target_was_examined(target)) {
650 LOG_ERROR("Target not examined yet");
651 return ERROR_FAIL;
652 }
653 return target->type->write_memory_imp(target, address, size, count, buffer);
654 }
655
656 static int target_read_memory_imp(struct target *target, uint32_t address,
657 uint32_t size, uint32_t count, uint8_t *buffer)
658 {
659 if (!target_was_examined(target)) {
660 LOG_ERROR("Target not examined yet");
661 return ERROR_FAIL;
662 }
663 return target->type->read_memory_imp(target, address, size, count, buffer);
664 }
665
666 static int target_soft_reset_halt_imp(struct target *target)
667 {
668 if (!target_was_examined(target)) {
669 LOG_ERROR("Target not examined yet");
670 return ERROR_FAIL;
671 }
672 if (!target->type->soft_reset_halt_imp) {
673 LOG_ERROR("Target %s does not support soft_reset_halt",
674 target_name(target));
675 return ERROR_FAIL;
676 }
677 return target->type->soft_reset_halt_imp(target);
678 }
679
680 /**
681 * Downloads a target-specific native code algorithm to the target,
682 * and executes it. * Note that some targets may need to set up, enable,
683 * and tear down a breakpoint (hard or * soft) to detect algorithm
684 * termination, while others may support lower overhead schemes where
685 * soft breakpoints embedded in the algorithm automatically terminate the
686 * algorithm.
687 *
688 * @param target used to run the algorithm
689 * @param arch_info target-specific description of the algorithm.
690 */
691 int target_run_algorithm(struct target *target,
692 int num_mem_params, struct mem_param *mem_params,
693 int num_reg_params, struct reg_param *reg_param,
694 uint32_t entry_point, uint32_t exit_point,
695 int timeout_ms, void *arch_info)
696 {
697 int retval = ERROR_FAIL;
698
699 if (!target_was_examined(target)) {
700 LOG_ERROR("Target not examined yet");
701 goto done;
702 }
703 if (!target->type->run_algorithm) {
704 LOG_ERROR("Target type '%s' does not support %s",
705 target_type_name(target), __func__);
706 goto done;
707 }
708
709 target->running_alg = true;
710 retval = target->type->run_algorithm(target,
711 num_mem_params, mem_params,
712 num_reg_params, reg_param,
713 entry_point, exit_point, timeout_ms, arch_info);
714 target->running_alg = false;
715
716 done:
717 return retval;
718 }
719
720 /**
721 * Downloads a target-specific native code algorithm to the target,
722 * executes and leaves it running.
723 *
724 * @param target used to run the algorithm
725 * @param arch_info target-specific description of the algorithm.
726 */
727 int target_start_algorithm(struct target *target,
728 int num_mem_params, struct mem_param *mem_params,
729 int num_reg_params, struct reg_param *reg_params,
730 uint32_t entry_point, uint32_t exit_point,
731 void *arch_info)
732 {
733 int retval = ERROR_FAIL;
734
735 if (!target_was_examined(target)) {
736 LOG_ERROR("Target not examined yet");
737 goto done;
738 }
739 if (!target->type->start_algorithm) {
740 LOG_ERROR("Target type '%s' does not support %s",
741 target_type_name(target), __func__);
742 goto done;
743 }
744 if (target->running_alg) {
745 LOG_ERROR("Target is already running an algorithm");
746 goto done;
747 }
748
749 target->running_alg = true;
750 retval = target->type->start_algorithm(target,
751 num_mem_params, mem_params,
752 num_reg_params, reg_params,
753 entry_point, exit_point, arch_info);
754
755 done:
756 return retval;
757 }
758
759 /**
760 * Waits for an algorithm started with target_start_algorithm() to complete.
761 *
762 * @param target used to run the algorithm
763 * @param arch_info target-specific description of the algorithm.
764 */
765 int target_wait_algorithm(struct target *target,
766 int num_mem_params, struct mem_param *mem_params,
767 int num_reg_params, struct reg_param *reg_params,
768 uint32_t exit_point, int timeout_ms,
769 void *arch_info)
770 {
771 int retval = ERROR_FAIL;
772
773 if (!target->type->wait_algorithm) {
774 LOG_ERROR("Target type '%s' does not support %s",
775 target_type_name(target), __func__);
776 goto done;
777 }
778 if (!target->running_alg) {
779 LOG_ERROR("Target is not running an algorithm");
780 goto done;
781 }
782
783 retval = target->type->wait_algorithm(target,
784 num_mem_params, mem_params,
785 num_reg_params, reg_params,
786 exit_point, timeout_ms, arch_info);
787 if (retval != ERROR_TARGET_TIMEOUT)
788 target->running_alg = false;
789
790 done:
791 return retval;
792 }
793
794 /**
795 * Executes a target-specific native code algorithm in the target.
796 * It differs from target_run_algorithm in that the algorithm is asynchronous.
797 * Because of this it requires an compliant algorithm:
798 * see contrib/loaders/flash/stm32f1x.S for example.
799 *
800 * @param target used to run the algorithm
801 */
802
803 int target_run_flash_async_algorithm(struct target *target,
804 uint8_t *buffer, uint32_t count, int block_size,
805 int num_mem_params, struct mem_param *mem_params,
806 int num_reg_params, struct reg_param *reg_params,
807 uint32_t buffer_start, uint32_t buffer_size,
808 uint32_t entry_point, uint32_t exit_point, void *arch_info)
809 {
810 int retval;
811
812 /* Set up working area. First word is write pointer, second word is read pointer,
813 * rest is fifo data area. */
814 uint32_t wp_addr = buffer_start;
815 uint32_t rp_addr = buffer_start + 4;
816 uint32_t fifo_start_addr = buffer_start + 8;
817 uint32_t fifo_end_addr = buffer_start + buffer_size;
818
819 uint32_t wp = fifo_start_addr;
820 uint32_t rp = fifo_start_addr;
821
822 /* validate block_size is 2^n */
823 assert(!block_size || !(block_size & (block_size - 1)));
824
825 retval = target_write_u32(target, wp_addr, wp);
826 if (retval != ERROR_OK)
827 return retval;
828 retval = target_write_u32(target, rp_addr, rp);
829 if (retval != ERROR_OK)
830 return retval;
831
832 /* Start up algorithm on target and let it idle while writing the first chunk */
833 retval = target_start_algorithm(target, num_mem_params, mem_params,
834 num_reg_params, reg_params,
835 entry_point,
836 exit_point,
837 arch_info);
838
839 if (retval != ERROR_OK) {
840 LOG_ERROR("error starting target flash write algorithm");
841 return retval;
842 }
843
844 while (count > 0) {
845
846 retval = target_read_u32(target, rp_addr, &rp);
847 if (retval != ERROR_OK) {
848 LOG_ERROR("failed to get read pointer");
849 break;
850 }
851
852 LOG_DEBUG("count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32, count, wp, rp);
853
854 if (rp == 0) {
855 LOG_ERROR("flash write algorithm aborted by target");
856 retval = ERROR_FLASH_OPERATION_FAILED;
857 break;
858 }
859
860 if ((rp & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
861 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
862 break;
863 }
864
865 /* Count the number of bytes available in the fifo without
866 * crossing the wrap around. Make sure to not fill it completely,
867 * because that would make wp == rp and that's the empty condition. */
868 uint32_t thisrun_bytes;
869 if (rp > wp)
870 thisrun_bytes = rp - wp - block_size;
871 else if (rp > fifo_start_addr)
872 thisrun_bytes = fifo_end_addr - wp;
873 else
874 thisrun_bytes = fifo_end_addr - wp - block_size;
875
876 if (thisrun_bytes == 0) {
877 /* Throttle polling a bit if transfer is (much) faster than flash
878 * programming. The exact delay shouldn't matter as long as it's
879 * less than buffer size / flash speed. This is very unlikely to
880 * run when using high latency connections such as USB. */
881 alive_sleep(10);
882 continue;
883 }
884
885 /* Limit to the amount of data we actually want to write */
886 if (thisrun_bytes > count * block_size)
887 thisrun_bytes = count * block_size;
888
889 /* Write data to fifo */
890 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
891 if (retval != ERROR_OK)
892 break;
893
894 /* Update counters and wrap write pointer */
895 buffer += thisrun_bytes;
896 count -= thisrun_bytes / block_size;
897 wp += thisrun_bytes;
898 if (wp >= fifo_end_addr)
899 wp = fifo_start_addr;
900
901 /* Store updated write pointer to target */
902 retval = target_write_u32(target, wp_addr, wp);
903 if (retval != ERROR_OK)
904 break;
905 }
906
907 if (retval != ERROR_OK) {
908 /* abort flash write algorithm on target */
909 target_write_u32(target, wp_addr, 0);
910 }
911
912 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
913 num_reg_params, reg_params,
914 exit_point,
915 10000,
916 arch_info);
917
918 if (retval2 != ERROR_OK) {
919 LOG_ERROR("error waiting for target flash write algorithm");
920 retval = retval2;
921 }
922
923 return retval;
924 }
925
926 int target_read_memory(struct target *target,
927 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
928 {
929 return target->type->read_memory(target, address, size, count, buffer);
930 }
931
932 static int target_read_phys_memory(struct target *target,
933 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
934 {
935 return target->type->read_phys_memory(target, address, size, count, buffer);
936 }
937
938 int target_write_memory(struct target *target,
939 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
940 {
941 return target->type->write_memory(target, address, size, count, buffer);
942 }
943
944 static int target_write_phys_memory(struct target *target,
945 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
946 {
947 return target->type->write_phys_memory(target, address, size, count, buffer);
948 }
949
950 int target_bulk_write_memory(struct target *target,
951 uint32_t address, uint32_t count, const uint8_t *buffer)
952 {
953 return target->type->bulk_write_memory(target, address, count, buffer);
954 }
955
956 int target_add_breakpoint(struct target *target,
957 struct breakpoint *breakpoint)
958 {
959 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
960 LOG_WARNING("target %s is not halted", target->cmd_name);
961 return ERROR_TARGET_NOT_HALTED;
962 }
963 return target->type->add_breakpoint(target, breakpoint);
964 }
965
966 int target_add_context_breakpoint(struct target *target,
967 struct breakpoint *breakpoint)
968 {
969 if (target->state != TARGET_HALTED) {
970 LOG_WARNING("target %s is not halted", target->cmd_name);
971 return ERROR_TARGET_NOT_HALTED;
972 }
973 return target->type->add_context_breakpoint(target, breakpoint);
974 }
975
976 int target_add_hybrid_breakpoint(struct target *target,
977 struct breakpoint *breakpoint)
978 {
979 if (target->state != TARGET_HALTED) {
980 LOG_WARNING("target %s is not halted", target->cmd_name);
981 return ERROR_TARGET_NOT_HALTED;
982 }
983 return target->type->add_hybrid_breakpoint(target, breakpoint);
984 }
985
986 int target_remove_breakpoint(struct target *target,
987 struct breakpoint *breakpoint)
988 {
989 return target->type->remove_breakpoint(target, breakpoint);
990 }
991
992 int target_add_watchpoint(struct target *target,
993 struct watchpoint *watchpoint)
994 {
995 if (target->state != TARGET_HALTED) {
996 LOG_WARNING("target %s is not halted", target->cmd_name);
997 return ERROR_TARGET_NOT_HALTED;
998 }
999 return target->type->add_watchpoint(target, watchpoint);
1000 }
1001 int target_remove_watchpoint(struct target *target,
1002 struct watchpoint *watchpoint)
1003 {
1004 return target->type->remove_watchpoint(target, watchpoint);
1005 }
1006
1007 int target_get_gdb_reg_list(struct target *target,
1008 struct reg **reg_list[], int *reg_list_size)
1009 {
1010 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size);
1011 }
1012 int target_step(struct target *target,
1013 int current, uint32_t address, int handle_breakpoints)
1014 {
1015 return target->type->step(target, current, address, handle_breakpoints);
1016 }
1017
1018 /**
1019 * Reset the @c examined flag for the given target.
1020 * Pure paranoia -- targets are zeroed on allocation.
1021 */
1022 static void target_reset_examined(struct target *target)
1023 {
1024 target->examined = false;
1025 }
1026
1027 static int err_read_phys_memory(struct target *target, uint32_t address,
1028 uint32_t size, uint32_t count, uint8_t *buffer)
1029 {
1030 LOG_ERROR("Not implemented: %s", __func__);
1031 return ERROR_FAIL;
1032 }
1033
1034 static int err_write_phys_memory(struct target *target, uint32_t address,
1035 uint32_t size, uint32_t count, const uint8_t *buffer)
1036 {
1037 LOG_ERROR("Not implemented: %s", __func__);
1038 return ERROR_FAIL;
1039 }
1040
1041 static int handle_target(void *priv);
1042
1043 static int target_init_one(struct command_context *cmd_ctx,
1044 struct target *target)
1045 {
1046 target_reset_examined(target);
1047
1048 struct target_type *type = target->type;
1049 if (type->examine == NULL)
1050 type->examine = default_examine;
1051
1052 if (type->check_reset == NULL)
1053 type->check_reset = default_check_reset;
1054
1055 assert(type->init_target != NULL);
1056
1057 int retval = type->init_target(cmd_ctx, target);
1058 if (ERROR_OK != retval) {
1059 LOG_ERROR("target '%s' init failed", target_name(target));
1060 return retval;
1061 }
1062
1063 /**
1064 * @todo get rid of those *memory_imp() methods, now that all
1065 * callers are using target_*_memory() accessors ... and make
1066 * sure the "physical" paths handle the same issues.
1067 */
1068 /* a non-invasive way(in terms of patches) to add some code that
1069 * runs before the type->write/read_memory implementation
1070 */
1071 type->write_memory_imp = target->type->write_memory;
1072 type->write_memory = target_write_memory_imp;
1073
1074 type->read_memory_imp = target->type->read_memory;
1075 type->read_memory = target_read_memory_imp;
1076
1077 type->soft_reset_halt_imp = target->type->soft_reset_halt;
1078 type->soft_reset_halt = target_soft_reset_halt_imp;
1079
1080 /* Sanity-check MMU support ... stub in what we must, to help
1081 * implement it in stages, but warn if we need to do so.
1082 */
1083 if (type->mmu) {
1084 if (type->write_phys_memory == NULL) {
1085 LOG_ERROR("type '%s' is missing write_phys_memory",
1086 type->name);
1087 type->write_phys_memory = err_write_phys_memory;
1088 }
1089 if (type->read_phys_memory == NULL) {
1090 LOG_ERROR("type '%s' is missing read_phys_memory",
1091 type->name);
1092 type->read_phys_memory = err_read_phys_memory;
1093 }
1094 if (type->virt2phys == NULL) {
1095 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1096 type->virt2phys = identity_virt2phys;
1097 }
1098 } else {
1099 /* Make sure no-MMU targets all behave the same: make no
1100 * distinction between physical and virtual addresses, and
1101 * ensure that virt2phys() is always an identity mapping.
1102 */
1103 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1104 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1105
1106 type->mmu = no_mmu;
1107 type->write_phys_memory = type->write_memory;
1108 type->read_phys_memory = type->read_memory;
1109 type->virt2phys = identity_virt2phys;
1110 }
1111
1112 if (target->type->read_buffer == NULL)
1113 target->type->read_buffer = target_read_buffer_default;
1114
1115 if (target->type->write_buffer == NULL)
1116 target->type->write_buffer = target_write_buffer_default;
1117
1118 return ERROR_OK;
1119 }
1120
1121 static int target_init(struct command_context *cmd_ctx)
1122 {
1123 struct target *target;
1124 int retval;
1125
1126 for (target = all_targets; target; target = target->next) {
1127 retval = target_init_one(cmd_ctx, target);
1128 if (ERROR_OK != retval)
1129 return retval;
1130 }
1131
1132 if (!all_targets)
1133 return ERROR_OK;
1134
1135 retval = target_register_user_commands(cmd_ctx);
1136 if (ERROR_OK != retval)
1137 return retval;
1138
1139 retval = target_register_timer_callback(&handle_target,
1140 polling_interval, 1, cmd_ctx->interp);
1141 if (ERROR_OK != retval)
1142 return retval;
1143
1144 return ERROR_OK;
1145 }
1146
1147 COMMAND_HANDLER(handle_target_init_command)
1148 {
1149 int retval;
1150
1151 if (CMD_ARGC != 0)
1152 return ERROR_COMMAND_SYNTAX_ERROR;
1153
1154 static bool target_initialized;
1155 if (target_initialized) {
1156 LOG_INFO("'target init' has already been called");
1157 return ERROR_OK;
1158 }
1159 target_initialized = true;
1160
1161 retval = command_run_line(CMD_CTX, "init_targets");
1162 if (ERROR_OK != retval)
1163 return retval;
1164
1165 retval = command_run_line(CMD_CTX, "init_board");
1166 if (ERROR_OK != retval)
1167 return retval;
1168
1169 LOG_DEBUG("Initializing targets...");
1170 return target_init(CMD_CTX);
1171 }
1172
1173 int target_register_event_callback(int (*callback)(struct target *target,
1174 enum target_event event, void *priv), void *priv)
1175 {
1176 struct target_event_callback **callbacks_p = &target_event_callbacks;
1177
1178 if (callback == NULL)
1179 return ERROR_COMMAND_SYNTAX_ERROR;
1180
1181 if (*callbacks_p) {
1182 while ((*callbacks_p)->next)
1183 callbacks_p = &((*callbacks_p)->next);
1184 callbacks_p = &((*callbacks_p)->next);
1185 }
1186
1187 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1188 (*callbacks_p)->callback = callback;
1189 (*callbacks_p)->priv = priv;
1190 (*callbacks_p)->next = NULL;
1191
1192 return ERROR_OK;
1193 }
1194
1195 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1196 {
1197 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1198 struct timeval now;
1199
1200 if (callback == NULL)
1201 return ERROR_COMMAND_SYNTAX_ERROR;
1202
1203 if (*callbacks_p) {
1204 while ((*callbacks_p)->next)
1205 callbacks_p = &((*callbacks_p)->next);
1206 callbacks_p = &((*callbacks_p)->next);
1207 }
1208
1209 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1210 (*callbacks_p)->callback = callback;
1211 (*callbacks_p)->periodic = periodic;
1212 (*callbacks_p)->time_ms = time_ms;
1213
1214 gettimeofday(&now, NULL);
1215 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
1216 time_ms -= (time_ms % 1000);
1217 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
1218 if ((*callbacks_p)->when.tv_usec > 1000000) {
1219 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
1220 (*callbacks_p)->when.tv_sec += 1;
1221 }
1222
1223 (*callbacks_p)->priv = priv;
1224 (*callbacks_p)->next = NULL;
1225
1226 return ERROR_OK;
1227 }
1228
1229 int target_unregister_event_callback(int (*callback)(struct target *target,
1230 enum target_event event, void *priv), void *priv)
1231 {
1232 struct target_event_callback **p = &target_event_callbacks;
1233 struct target_event_callback *c = target_event_callbacks;
1234
1235 if (callback == NULL)
1236 return ERROR_COMMAND_SYNTAX_ERROR;
1237
1238 while (c) {
1239 struct target_event_callback *next = c->next;
1240 if ((c->callback == callback) && (c->priv == priv)) {
1241 *p = next;
1242 free(c);
1243 return ERROR_OK;
1244 } else
1245 p = &(c->next);
1246 c = next;
1247 }
1248
1249 return ERROR_OK;
1250 }
1251
1252 static int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1253 {
1254 struct target_timer_callback **p = &target_timer_callbacks;
1255 struct target_timer_callback *c = target_timer_callbacks;
1256
1257 if (callback == NULL)
1258 return ERROR_COMMAND_SYNTAX_ERROR;
1259
1260 while (c) {
1261 struct target_timer_callback *next = c->next;
1262 if ((c->callback == callback) && (c->priv == priv)) {
1263 *p = next;
1264 free(c);
1265 return ERROR_OK;
1266 } else
1267 p = &(c->next);
1268 c = next;
1269 }
1270
1271 return ERROR_OK;
1272 }
1273
1274 int target_call_event_callbacks(struct target *target, enum target_event event)
1275 {
1276 struct target_event_callback *callback = target_event_callbacks;
1277 struct target_event_callback *next_callback;
1278
1279 if (event == TARGET_EVENT_HALTED) {
1280 /* execute early halted first */
1281 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1282 }
1283
1284 LOG_DEBUG("target event %i (%s)", event,
1285 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1286
1287 target_handle_event(target, event);
1288
1289 while (callback) {
1290 next_callback = callback->next;
1291 callback->callback(target, event, callback->priv);
1292 callback = next_callback;
1293 }
1294
1295 return ERROR_OK;
1296 }
1297
1298 static int target_timer_callback_periodic_restart(
1299 struct target_timer_callback *cb, struct timeval *now)
1300 {
1301 int time_ms = cb->time_ms;
1302 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1303 time_ms -= (time_ms % 1000);
1304 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1305 if (cb->when.tv_usec > 1000000) {
1306 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1307 cb->when.tv_sec += 1;
1308 }
1309 return ERROR_OK;
1310 }
1311
1312 static int target_call_timer_callback(struct target_timer_callback *cb,
1313 struct timeval *now)
1314 {
1315 cb->callback(cb->priv);
1316
1317 if (cb->periodic)
1318 return target_timer_callback_periodic_restart(cb, now);
1319
1320 return target_unregister_timer_callback(cb->callback, cb->priv);
1321 }
1322
1323 static int target_call_timer_callbacks_check_time(int checktime)
1324 {
1325 keep_alive();
1326
1327 struct timeval now;
1328 gettimeofday(&now, NULL);
1329
1330 struct target_timer_callback *callback = target_timer_callbacks;
1331 while (callback) {
1332 /* cleaning up may unregister and free this callback */
1333 struct target_timer_callback *next_callback = callback->next;
1334
1335 bool call_it = callback->callback &&
1336 ((!checktime && callback->periodic) ||
1337 now.tv_sec > callback->when.tv_sec ||
1338 (now.tv_sec == callback->when.tv_sec &&
1339 now.tv_usec >= callback->when.tv_usec));
1340
1341 if (call_it) {
1342 int retval = target_call_timer_callback(callback, &now);
1343 if (retval != ERROR_OK)
1344 return retval;
1345 }
1346
1347 callback = next_callback;
1348 }
1349
1350 return ERROR_OK;
1351 }
1352
1353 int target_call_timer_callbacks(void)
1354 {
1355 return target_call_timer_callbacks_check_time(1);
1356 }
1357
1358 /* invoke periodic callbacks immediately */
1359 int target_call_timer_callbacks_now(void)
1360 {
1361 return target_call_timer_callbacks_check_time(0);
1362 }
1363
1364 /* Prints the working area layout for debug purposes */
1365 static void print_wa_layout(struct target *target)
1366 {
1367 struct working_area *c = target->working_areas;
1368
1369 while (c) {
1370 LOG_DEBUG("%c%c 0x%08"PRIx32"-0x%08"PRIx32" (%"PRIu32" bytes)",
1371 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1372 c->address, c->address + c->size - 1, c->size);
1373 c = c->next;
1374 }
1375 }
1376
1377 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1378 static void target_split_working_area(struct working_area *area, uint32_t size)
1379 {
1380 assert(area->free); /* Shouldn't split an allocated area */
1381 assert(size <= area->size); /* Caller should guarantee this */
1382
1383 /* Split only if not already the right size */
1384 if (size < area->size) {
1385 struct working_area *new_wa = malloc(sizeof(*new_wa));
1386
1387 if (new_wa == NULL)
1388 return;
1389
1390 new_wa->next = area->next;
1391 new_wa->size = area->size - size;
1392 new_wa->address = area->address + size;
1393 new_wa->backup = NULL;
1394 new_wa->user = NULL;
1395 new_wa->free = true;
1396
1397 area->next = new_wa;
1398 area->size = size;
1399
1400 /* If backup memory was allocated to this area, it has the wrong size
1401 * now so free it and it will be reallocated if/when needed */
1402 if (area->backup) {
1403 free(area->backup);
1404 area->backup = NULL;
1405 }
1406 }
1407 }
1408
1409 /* Merge all adjacent free areas into one */
1410 static void target_merge_working_areas(struct target *target)
1411 {
1412 struct working_area *c = target->working_areas;
1413
1414 while (c && c->next) {
1415 assert(c->next->address == c->address + c->size); /* This is an invariant */
1416
1417 /* Find two adjacent free areas */
1418 if (c->free && c->next->free) {
1419 /* Merge the last into the first */
1420 c->size += c->next->size;
1421
1422 /* Remove the last */
1423 struct working_area *to_be_freed = c->next;
1424 c->next = c->next->next;
1425 if (to_be_freed->backup)
1426 free(to_be_freed->backup);
1427 free(to_be_freed);
1428
1429 /* If backup memory was allocated to the remaining area, it's has
1430 * the wrong size now */
1431 if (c->backup) {
1432 free(c->backup);
1433 c->backup = NULL;
1434 }
1435 } else {
1436 c = c->next;
1437 }
1438 }
1439 }
1440
1441 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1442 {
1443 /* Reevaluate working area address based on MMU state*/
1444 if (target->working_areas == NULL) {
1445 int retval;
1446 int enabled;
1447
1448 retval = target->type->mmu(target, &enabled);
1449 if (retval != ERROR_OK)
1450 return retval;
1451
1452 if (!enabled) {
1453 if (target->working_area_phys_spec) {
1454 LOG_DEBUG("MMU disabled, using physical "
1455 "address for working memory 0x%08"PRIx32,
1456 target->working_area_phys);
1457 target->working_area = target->working_area_phys;
1458 } else {
1459 LOG_ERROR("No working memory available. "
1460 "Specify -work-area-phys to target.");
1461 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1462 }
1463 } else {
1464 if (target->working_area_virt_spec) {
1465 LOG_DEBUG("MMU enabled, using virtual "
1466 "address for working memory 0x%08"PRIx32,
1467 target->working_area_virt);
1468 target->working_area = target->working_area_virt;
1469 } else {
1470 LOG_ERROR("No working memory available. "
1471 "Specify -work-area-virt to target.");
1472 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1473 }
1474 }
1475
1476 /* Set up initial working area on first call */
1477 struct working_area *new_wa = malloc(sizeof(*new_wa));
1478 if (new_wa) {
1479 new_wa->next = NULL;
1480 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1481 new_wa->address = target->working_area;
1482 new_wa->backup = NULL;
1483 new_wa->user = NULL;
1484 new_wa->free = true;
1485 }
1486
1487 target->working_areas = new_wa;
1488 }
1489
1490 /* only allocate multiples of 4 byte */
1491 if (size % 4)
1492 size = (size + 3) & (~3UL);
1493
1494 struct working_area *c = target->working_areas;
1495
1496 /* Find the first large enough working area */
1497 while (c) {
1498 if (c->free && c->size >= size)
1499 break;
1500 c = c->next;
1501 }
1502
1503 if (c == NULL)
1504 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1505
1506 /* Split the working area into the requested size */
1507 target_split_working_area(c, size);
1508
1509 LOG_DEBUG("allocated new working area of %"PRIu32" bytes at address 0x%08"PRIx32, size, c->address);
1510
1511 if (target->backup_working_area) {
1512 if (c->backup == NULL) {
1513 c->backup = malloc(c->size);
1514 if (c->backup == NULL)
1515 return ERROR_FAIL;
1516 }
1517
1518 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 }
1522
1523 /* mark as used, and return the new (reused) area */
1524 c->free = false;
1525 *area = c;
1526
1527 /* user pointer */
1528 c->user = area;
1529
1530 print_wa_layout(target);
1531
1532 return ERROR_OK;
1533 }
1534
1535 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1536 {
1537 int retval;
1538
1539 retval = target_alloc_working_area_try(target, size, area);
1540 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1541 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1542 return retval;
1543
1544 }
1545
1546 static int target_restore_working_area(struct target *target, struct working_area *area)
1547 {
1548 int retval = ERROR_OK;
1549
1550 if (target->backup_working_area && area->backup != NULL) {
1551 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1552 if (retval != ERROR_OK)
1553 LOG_ERROR("failed to restore %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1554 area->size, area->address);
1555 }
1556
1557 return retval;
1558 }
1559
1560 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1561 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1562 {
1563 int retval = ERROR_OK;
1564
1565 if (area->free)
1566 return retval;
1567
1568 if (restore) {
1569 retval = target_restore_working_area(target, area);
1570 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1571 if (retval != ERROR_OK)
1572 return retval;
1573 }
1574
1575 area->free = true;
1576
1577 LOG_DEBUG("freed %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1578 area->size, area->address);
1579
1580 /* mark user pointer invalid */
1581 /* TODO: Is this really safe? It points to some previous caller's memory.
1582 * How could we know that the area pointer is still in that place and not
1583 * some other vital data? What's the purpose of this, anyway? */
1584 *area->user = NULL;
1585 area->user = NULL;
1586
1587 target_merge_working_areas(target);
1588
1589 print_wa_layout(target);
1590
1591 return retval;
1592 }
1593
1594 int target_free_working_area(struct target *target, struct working_area *area)
1595 {
1596 return target_free_working_area_restore(target, area, 1);
1597 }
1598
1599 /* free resources and restore memory, if restoring memory fails,
1600 * free up resources anyway
1601 */
1602 static void target_free_all_working_areas_restore(struct target *target, int restore)
1603 {
1604 struct working_area *c = target->working_areas;
1605
1606 LOG_DEBUG("freeing all working areas");
1607
1608 /* Loop through all areas, restoring the allocated ones and marking them as free */
1609 while (c) {
1610 if (!c->free) {
1611 if (restore)
1612 target_restore_working_area(target, c);
1613 c->free = true;
1614 *c->user = NULL; /* Same as above */
1615 c->user = NULL;
1616 }
1617 c = c->next;
1618 }
1619
1620 /* Run a merge pass to combine all areas into one */
1621 target_merge_working_areas(target);
1622
1623 print_wa_layout(target);
1624 }
1625
1626 void target_free_all_working_areas(struct target *target)
1627 {
1628 target_free_all_working_areas_restore(target, 1);
1629 }
1630
1631 /* Find the largest number of bytes that can be allocated */
1632 uint32_t target_get_working_area_avail(struct target *target)
1633 {
1634 struct working_area *c = target->working_areas;
1635 uint32_t max_size = 0;
1636
1637 if (c == NULL)
1638 return target->working_area_size;
1639
1640 while (c) {
1641 if (c->free && max_size < c->size)
1642 max_size = c->size;
1643
1644 c = c->next;
1645 }
1646
1647 return max_size;
1648 }
1649
1650 int target_arch_state(struct target *target)
1651 {
1652 int retval;
1653 if (target == NULL) {
1654 LOG_USER("No target has been configured");
1655 return ERROR_OK;
1656 }
1657
1658 LOG_USER("target state: %s", target_state_name(target));
1659
1660 if (target->state != TARGET_HALTED)
1661 return ERROR_OK;
1662
1663 retval = target->type->arch_state(target);
1664 return retval;
1665 }
1666
1667 /* Single aligned words are guaranteed to use 16 or 32 bit access
1668 * mode respectively, otherwise data is handled as quickly as
1669 * possible
1670 */
1671 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1672 {
1673 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1674 (int)size, (unsigned)address);
1675
1676 if (!target_was_examined(target)) {
1677 LOG_ERROR("Target not examined yet");
1678 return ERROR_FAIL;
1679 }
1680
1681 if (size == 0)
1682 return ERROR_OK;
1683
1684 if ((address + size - 1) < address) {
1685 /* GDB can request this when e.g. PC is 0xfffffffc*/
1686 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1687 (unsigned)address,
1688 (unsigned)size);
1689 return ERROR_FAIL;
1690 }
1691
1692 return target->type->write_buffer(target, address, size, buffer);
1693 }
1694
1695 static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1696 {
1697 int retval = ERROR_OK;
1698
1699 if (((address % 2) == 0) && (size == 2))
1700 return target_write_memory(target, address, 2, 1, buffer);
1701
1702 /* handle unaligned head bytes */
1703 if (address % 4) {
1704 uint32_t unaligned = 4 - (address % 4);
1705
1706 if (unaligned > size)
1707 unaligned = size;
1708
1709 retval = target_write_memory(target, address, 1, unaligned, buffer);
1710 if (retval != ERROR_OK)
1711 return retval;
1712
1713 buffer += unaligned;
1714 address += unaligned;
1715 size -= unaligned;
1716 }
1717
1718 /* handle aligned words */
1719 if (size >= 4) {
1720 int aligned = size - (size % 4);
1721
1722 /* use bulk writes above a certain limit. This may have to be changed */
1723 if (aligned > 128) {
1724 retval = target->type->bulk_write_memory(target, address, aligned / 4, buffer);
1725 if (retval != ERROR_OK)
1726 return retval;
1727 } else {
1728 retval = target_write_memory(target, address, 4, aligned / 4, buffer);
1729 if (retval != ERROR_OK)
1730 return retval;
1731 }
1732
1733 buffer += aligned;
1734 address += aligned;
1735 size -= aligned;
1736 }
1737
1738 /* handle tail writes of less than 4 bytes */
1739 if (size > 0) {
1740 retval = target_write_memory(target, address, 1, size, buffer);
1741 if (retval != ERROR_OK)
1742 return retval;
1743 }
1744
1745 return retval;
1746 }
1747
1748 /* Single aligned words are guaranteed to use 16 or 32 bit access
1749 * mode respectively, otherwise data is handled as quickly as
1750 * possible
1751 */
1752 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1753 {
1754 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
1755 (int)size, (unsigned)address);
1756
1757 if (!target_was_examined(target)) {
1758 LOG_ERROR("Target not examined yet");
1759 return ERROR_FAIL;
1760 }
1761
1762 if (size == 0)
1763 return ERROR_OK;
1764
1765 if ((address + size - 1) < address) {
1766 /* GDB can request this when e.g. PC is 0xfffffffc*/
1767 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
1768 address,
1769 size);
1770 return ERROR_FAIL;
1771 }
1772
1773 return target->type->read_buffer(target, address, size, buffer);
1774 }
1775
1776 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1777 {
1778 int retval = ERROR_OK;
1779
1780 if (((address % 2) == 0) && (size == 2))
1781 return target_read_memory(target, address, 2, 1, buffer);
1782
1783 /* handle unaligned head bytes */
1784 if (address % 4) {
1785 uint32_t unaligned = 4 - (address % 4);
1786
1787 if (unaligned > size)
1788 unaligned = size;
1789
1790 retval = target_read_memory(target, address, 1, unaligned, buffer);
1791 if (retval != ERROR_OK)
1792 return retval;
1793
1794 buffer += unaligned;
1795 address += unaligned;
1796 size -= unaligned;
1797 }
1798
1799 /* handle aligned words */
1800 if (size >= 4) {
1801 int aligned = size - (size % 4);
1802
1803 retval = target_read_memory(target, address, 4, aligned / 4, buffer);
1804 if (retval != ERROR_OK)
1805 return retval;
1806
1807 buffer += aligned;
1808 address += aligned;
1809 size -= aligned;
1810 }
1811
1812 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
1813 if (size >= 2) {
1814 int aligned = size - (size % 2);
1815 retval = target_read_memory(target, address, 2, aligned / 2, buffer);
1816 if (retval != ERROR_OK)
1817 return retval;
1818
1819 buffer += aligned;
1820 address += aligned;
1821 size -= aligned;
1822 }
1823 /* handle tail writes of less than 4 bytes */
1824 if (size > 0) {
1825 retval = target_read_memory(target, address, 1, size, buffer);
1826 if (retval != ERROR_OK)
1827 return retval;
1828 }
1829
1830 return ERROR_OK;
1831 }
1832
1833 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
1834 {
1835 uint8_t *buffer;
1836 int retval;
1837 uint32_t i;
1838 uint32_t checksum = 0;
1839 if (!target_was_examined(target)) {
1840 LOG_ERROR("Target not examined yet");
1841 return ERROR_FAIL;
1842 }
1843
1844 retval = target->type->checksum_memory(target, address, size, &checksum);
1845 if (retval != ERROR_OK) {
1846 buffer = malloc(size);
1847 if (buffer == NULL) {
1848 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
1849 return ERROR_COMMAND_SYNTAX_ERROR;
1850 }
1851 retval = target_read_buffer(target, address, size, buffer);
1852 if (retval != ERROR_OK) {
1853 free(buffer);
1854 return retval;
1855 }
1856
1857 /* convert to target endianness */
1858 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
1859 uint32_t target_data;
1860 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
1861 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
1862 }
1863
1864 retval = image_calculate_checksum(buffer, size, &checksum);
1865 free(buffer);
1866 }
1867
1868 *crc = checksum;
1869
1870 return retval;
1871 }
1872
1873 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
1874 {
1875 int retval;
1876 if (!target_was_examined(target)) {
1877 LOG_ERROR("Target not examined yet");
1878 return ERROR_FAIL;
1879 }
1880
1881 if (target->type->blank_check_memory == 0)
1882 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1883
1884 retval = target->type->blank_check_memory(target, address, size, blank);
1885
1886 return retval;
1887 }
1888
1889 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
1890 {
1891 uint8_t value_buf[4];
1892 if (!target_was_examined(target)) {
1893 LOG_ERROR("Target not examined yet");
1894 return ERROR_FAIL;
1895 }
1896
1897 int retval = target_read_memory(target, address, 4, 1, value_buf);
1898
1899 if (retval == ERROR_OK) {
1900 *value = target_buffer_get_u32(target, value_buf);
1901 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1902 address,
1903 *value);
1904 } else {
1905 *value = 0x0;
1906 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1907 address);
1908 }
1909
1910 return retval;
1911 }
1912
1913 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
1914 {
1915 uint8_t value_buf[2];
1916 if (!target_was_examined(target)) {
1917 LOG_ERROR("Target not examined yet");
1918 return ERROR_FAIL;
1919 }
1920
1921 int retval = target_read_memory(target, address, 2, 1, value_buf);
1922
1923 if (retval == ERROR_OK) {
1924 *value = target_buffer_get_u16(target, value_buf);
1925 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
1926 address,
1927 *value);
1928 } else {
1929 *value = 0x0;
1930 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1931 address);
1932 }
1933
1934 return retval;
1935 }
1936
1937 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
1938 {
1939 int retval = target_read_memory(target, address, 1, 1, value);
1940 if (!target_was_examined(target)) {
1941 LOG_ERROR("Target not examined yet");
1942 return ERROR_FAIL;
1943 }
1944
1945 if (retval == ERROR_OK) {
1946 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1947 address,
1948 *value);
1949 } else {
1950 *value = 0x0;
1951 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1952 address);
1953 }
1954
1955 return retval;
1956 }
1957
1958 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
1959 {
1960 int retval;
1961 uint8_t value_buf[4];
1962 if (!target_was_examined(target)) {
1963 LOG_ERROR("Target not examined yet");
1964 return ERROR_FAIL;
1965 }
1966
1967 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1968 address,
1969 value);
1970
1971 target_buffer_set_u32(target, value_buf, value);
1972 retval = target_write_memory(target, address, 4, 1, value_buf);
1973 if (retval != ERROR_OK)
1974 LOG_DEBUG("failed: %i", retval);
1975
1976 return retval;
1977 }
1978
1979 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
1980 {
1981 int retval;
1982 uint8_t value_buf[2];
1983 if (!target_was_examined(target)) {
1984 LOG_ERROR("Target not examined yet");
1985 return ERROR_FAIL;
1986 }
1987
1988 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
1989 address,
1990 value);
1991
1992 target_buffer_set_u16(target, value_buf, value);
1993 retval = target_write_memory(target, address, 2, 1, value_buf);
1994 if (retval != ERROR_OK)
1995 LOG_DEBUG("failed: %i", retval);
1996
1997 return retval;
1998 }
1999
2000 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
2001 {
2002 int retval;
2003 if (!target_was_examined(target)) {
2004 LOG_ERROR("Target not examined yet");
2005 return ERROR_FAIL;
2006 }
2007
2008 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
2009 address, value);
2010
2011 retval = target_write_memory(target, address, 1, 1, &value);
2012 if (retval != ERROR_OK)
2013 LOG_DEBUG("failed: %i", retval);
2014
2015 return retval;
2016 }
2017
2018 static int find_target(struct command_context *cmd_ctx, const char *name)
2019 {
2020 struct target *target = get_target(name);
2021 if (target == NULL) {
2022 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2023 return ERROR_FAIL;
2024 }
2025 if (!target->tap->enabled) {
2026 LOG_USER("Target: TAP %s is disabled, "
2027 "can't be the current target\n",
2028 target->tap->dotted_name);
2029 return ERROR_FAIL;
2030 }
2031
2032 cmd_ctx->current_target = target->target_number;
2033 return ERROR_OK;
2034 }
2035
2036
2037 COMMAND_HANDLER(handle_targets_command)
2038 {
2039 int retval = ERROR_OK;
2040 if (CMD_ARGC == 1) {
2041 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2042 if (retval == ERROR_OK) {
2043 /* we're done! */
2044 return retval;
2045 }
2046 }
2047
2048 struct target *target = all_targets;
2049 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2050 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2051 while (target) {
2052 const char *state;
2053 char marker = ' ';
2054
2055 if (target->tap->enabled)
2056 state = target_state_name(target);
2057 else
2058 state = "tap-disabled";
2059
2060 if (CMD_CTX->current_target == target->target_number)
2061 marker = '*';
2062
2063 /* keep columns lined up to match the headers above */
2064 command_print(CMD_CTX,
2065 "%2d%c %-18s %-10s %-6s %-18s %s",
2066 target->target_number,
2067 marker,
2068 target_name(target),
2069 target_type_name(target),
2070 Jim_Nvp_value2name_simple(nvp_target_endian,
2071 target->endianness)->name,
2072 target->tap->dotted_name,
2073 state);
2074 target = target->next;
2075 }
2076
2077 return retval;
2078 }
2079
2080 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2081
2082 static int powerDropout;
2083 static int srstAsserted;
2084
2085 static int runPowerRestore;
2086 static int runPowerDropout;
2087 static int runSrstAsserted;
2088 static int runSrstDeasserted;
2089
2090 static int sense_handler(void)
2091 {
2092 static int prevSrstAsserted;
2093 static int prevPowerdropout;
2094
2095 int retval = jtag_power_dropout(&powerDropout);
2096 if (retval != ERROR_OK)
2097 return retval;
2098
2099 int powerRestored;
2100 powerRestored = prevPowerdropout && !powerDropout;
2101 if (powerRestored)
2102 runPowerRestore = 1;
2103
2104 long long current = timeval_ms();
2105 static long long lastPower;
2106 int waitMore = lastPower + 2000 > current;
2107 if (powerDropout && !waitMore) {
2108 runPowerDropout = 1;
2109 lastPower = current;
2110 }
2111
2112 retval = jtag_srst_asserted(&srstAsserted);
2113 if (retval != ERROR_OK)
2114 return retval;
2115
2116 int srstDeasserted;
2117 srstDeasserted = prevSrstAsserted && !srstAsserted;
2118
2119 static long long lastSrst;
2120 waitMore = lastSrst + 2000 > current;
2121 if (srstDeasserted && !waitMore) {
2122 runSrstDeasserted = 1;
2123 lastSrst = current;
2124 }
2125
2126 if (!prevSrstAsserted && srstAsserted)
2127 runSrstAsserted = 1;
2128
2129 prevSrstAsserted = srstAsserted;
2130 prevPowerdropout = powerDropout;
2131
2132 if (srstDeasserted || powerRestored) {
2133 /* Other than logging the event we can't do anything here.
2134 * Issuing a reset is a particularly bad idea as we might
2135 * be inside a reset already.
2136 */
2137 }
2138
2139 return ERROR_OK;
2140 }
2141
2142 static int backoff_times;
2143 static int backoff_count;
2144
2145 /* process target state changes */
2146 static int handle_target(void *priv)
2147 {
2148 Jim_Interp *interp = (Jim_Interp *)priv;
2149 int retval = ERROR_OK;
2150
2151 if (!is_jtag_poll_safe()) {
2152 /* polling is disabled currently */
2153 return ERROR_OK;
2154 }
2155
2156 /* we do not want to recurse here... */
2157 static int recursive;
2158 if (!recursive) {
2159 recursive = 1;
2160 sense_handler();
2161 /* danger! running these procedures can trigger srst assertions and power dropouts.
2162 * We need to avoid an infinite loop/recursion here and we do that by
2163 * clearing the flags after running these events.
2164 */
2165 int did_something = 0;
2166 if (runSrstAsserted) {
2167 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2168 Jim_Eval(interp, "srst_asserted");
2169 did_something = 1;
2170 }
2171 if (runSrstDeasserted) {
2172 Jim_Eval(interp, "srst_deasserted");
2173 did_something = 1;
2174 }
2175 if (runPowerDropout) {
2176 LOG_INFO("Power dropout detected, running power_dropout proc.");
2177 Jim_Eval(interp, "power_dropout");
2178 did_something = 1;
2179 }
2180 if (runPowerRestore) {
2181 Jim_Eval(interp, "power_restore");
2182 did_something = 1;
2183 }
2184
2185 if (did_something) {
2186 /* clear detect flags */
2187 sense_handler();
2188 }
2189
2190 /* clear action flags */
2191
2192 runSrstAsserted = 0;
2193 runSrstDeasserted = 0;
2194 runPowerRestore = 0;
2195 runPowerDropout = 0;
2196
2197 recursive = 0;
2198 }
2199
2200 if (backoff_times > backoff_count) {
2201 /* do not poll this time as we failed previously */
2202 backoff_count++;
2203 return ERROR_OK;
2204 }
2205 backoff_count = 0;
2206
2207 /* Poll targets for state changes unless that's globally disabled.
2208 * Skip targets that are currently disabled.
2209 */
2210 for (struct target *target = all_targets;
2211 is_jtag_poll_safe() && target;
2212 target = target->next) {
2213 if (!target->tap->enabled)
2214 continue;
2215
2216 /* only poll target if we've got power and srst isn't asserted */
2217 if (!powerDropout && !srstAsserted) {
2218 /* polling may fail silently until the target has been examined */
2219 retval = target_poll(target);
2220 if (retval != ERROR_OK) {
2221 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2222 if (backoff_times * polling_interval < 5000) {
2223 backoff_times *= 2;
2224 backoff_times++;
2225 }
2226 LOG_USER("Polling target failed, GDB will be halted. Polling again in %dms",
2227 backoff_times * polling_interval);
2228
2229 /* Tell GDB to halt the debugger. This allows the user to
2230 * run monitor commands to handle the situation.
2231 */
2232 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2233 return retval;
2234 }
2235 /* Since we succeeded, we reset backoff count */
2236 if (backoff_times > 0)
2237 LOG_USER("Polling succeeded again");
2238 backoff_times = 0;
2239 }
2240 }
2241
2242 return retval;
2243 }
2244
2245 COMMAND_HANDLER(handle_reg_command)
2246 {
2247 struct target *target;
2248 struct reg *reg = NULL;
2249 unsigned count = 0;
2250 char *value;
2251
2252 LOG_DEBUG("-");
2253
2254 target = get_current_target(CMD_CTX);
2255
2256 /* list all available registers for the current target */
2257 if (CMD_ARGC == 0) {
2258 struct reg_cache *cache = target->reg_cache;
2259
2260 count = 0;
2261 while (cache) {
2262 unsigned i;
2263
2264 command_print(CMD_CTX, "===== %s", cache->name);
2265
2266 for (i = 0, reg = cache->reg_list;
2267 i < cache->num_regs;
2268 i++, reg++, count++) {
2269 /* only print cached values if they are valid */
2270 if (reg->valid) {
2271 value = buf_to_str(reg->value,
2272 reg->size, 16);
2273 command_print(CMD_CTX,
2274 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2275 count, reg->name,
2276 reg->size, value,
2277 reg->dirty
2278 ? " (dirty)"
2279 : "");
2280 free(value);
2281 } else {
2282 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2283 count, reg->name,
2284 reg->size) ;
2285 }
2286 }
2287 cache = cache->next;
2288 }
2289
2290 return ERROR_OK;
2291 }
2292
2293 /* access a single register by its ordinal number */
2294 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2295 unsigned num;
2296 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2297
2298 struct reg_cache *cache = target->reg_cache;
2299 count = 0;
2300 while (cache) {
2301 unsigned i;
2302 for (i = 0; i < cache->num_regs; i++) {
2303 if (count++ == num) {
2304 reg = &cache->reg_list[i];
2305 break;
2306 }
2307 }
2308 if (reg)
2309 break;
2310 cache = cache->next;
2311 }
2312
2313 if (!reg) {
2314 command_print(CMD_CTX, "%i is out of bounds, the current target "
2315 "has only %i registers (0 - %i)", num, count, count - 1);
2316 return ERROR_OK;
2317 }
2318 } else {
2319 /* access a single register by its name */
2320 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2321
2322 if (!reg) {
2323 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2324 return ERROR_OK;
2325 }
2326 }
2327
2328 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2329
2330 /* display a register */
2331 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2332 && (CMD_ARGV[1][0] <= '9')))) {
2333 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2334 reg->valid = 0;
2335
2336 if (reg->valid == 0)
2337 reg->type->get(reg);
2338 value = buf_to_str(reg->value, reg->size, 16);
2339 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2340 free(value);
2341 return ERROR_OK;
2342 }
2343
2344 /* set register value */
2345 if (CMD_ARGC == 2) {
2346 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2347 if (buf == NULL)
2348 return ERROR_FAIL;
2349 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2350
2351 reg->type->set(reg, buf);
2352
2353 value = buf_to_str(reg->value, reg->size, 16);
2354 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2355 free(value);
2356
2357 free(buf);
2358
2359 return ERROR_OK;
2360 }
2361
2362 return ERROR_COMMAND_SYNTAX_ERROR;
2363 }
2364
2365 COMMAND_HANDLER(handle_poll_command)
2366 {
2367 int retval = ERROR_OK;
2368 struct target *target = get_current_target(CMD_CTX);
2369
2370 if (CMD_ARGC == 0) {
2371 command_print(CMD_CTX, "background polling: %s",
2372 jtag_poll_get_enabled() ? "on" : "off");
2373 command_print(CMD_CTX, "TAP: %s (%s)",
2374 target->tap->dotted_name,
2375 target->tap->enabled ? "enabled" : "disabled");
2376 if (!target->tap->enabled)
2377 return ERROR_OK;
2378 retval = target_poll(target);
2379 if (retval != ERROR_OK)
2380 return retval;
2381 retval = target_arch_state(target);
2382 if (retval != ERROR_OK)
2383 return retval;
2384 } else if (CMD_ARGC == 1) {
2385 bool enable;
2386 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2387 jtag_poll_set_enabled(enable);
2388 } else
2389 return ERROR_COMMAND_SYNTAX_ERROR;
2390
2391 return retval;
2392 }
2393
2394 COMMAND_HANDLER(handle_wait_halt_command)
2395 {
2396 if (CMD_ARGC > 1)
2397 return ERROR_COMMAND_SYNTAX_ERROR;
2398
2399 unsigned ms = 5000;
2400 if (1 == CMD_ARGC) {
2401 int retval = parse_uint(CMD_ARGV[0], &ms);
2402 if (ERROR_OK != retval)
2403 return ERROR_COMMAND_SYNTAX_ERROR;
2404 /* convert seconds (given) to milliseconds (needed) */
2405 ms *= 1000;
2406 }
2407
2408 struct target *target = get_current_target(CMD_CTX);
2409 return target_wait_state(target, TARGET_HALTED, ms);
2410 }
2411
2412 /* wait for target state to change. The trick here is to have a low
2413 * latency for short waits and not to suck up all the CPU time
2414 * on longer waits.
2415 *
2416 * After 500ms, keep_alive() is invoked
2417 */
2418 int target_wait_state(struct target *target, enum target_state state, int ms)
2419 {
2420 int retval;
2421 long long then = 0, cur;
2422 int once = 1;
2423
2424 for (;;) {
2425 retval = target_poll(target);
2426 if (retval != ERROR_OK)
2427 return retval;
2428 if (target->state == state)
2429 break;
2430 cur = timeval_ms();
2431 if (once) {
2432 once = 0;
2433 then = timeval_ms();
2434 LOG_DEBUG("waiting for target %s...",
2435 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2436 }
2437
2438 if (cur-then > 500)
2439 keep_alive();
2440
2441 if ((cur-then) > ms) {
2442 LOG_ERROR("timed out while waiting for target %s",
2443 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2444 return ERROR_FAIL;
2445 }
2446 }
2447
2448 return ERROR_OK;
2449 }
2450
2451 COMMAND_HANDLER(handle_halt_command)
2452 {
2453 LOG_DEBUG("-");
2454
2455 struct target *target = get_current_target(CMD_CTX);
2456 int retval = target_halt(target);
2457 if (ERROR_OK != retval)
2458 return retval;
2459
2460 if (CMD_ARGC == 1) {
2461 unsigned wait_local;
2462 retval = parse_uint(CMD_ARGV[0], &wait_local);
2463 if (ERROR_OK != retval)
2464 return ERROR_COMMAND_SYNTAX_ERROR;
2465 if (!wait_local)
2466 return ERROR_OK;
2467 }
2468
2469 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2470 }
2471
2472 COMMAND_HANDLER(handle_soft_reset_halt_command)
2473 {
2474 struct target *target = get_current_target(CMD_CTX);
2475
2476 LOG_USER("requesting target halt and executing a soft reset");
2477
2478 target->type->soft_reset_halt(target);
2479
2480 return ERROR_OK;
2481 }
2482
2483 COMMAND_HANDLER(handle_reset_command)
2484 {
2485 if (CMD_ARGC > 1)
2486 return ERROR_COMMAND_SYNTAX_ERROR;
2487
2488 enum target_reset_mode reset_mode = RESET_RUN;
2489 if (CMD_ARGC == 1) {
2490 const Jim_Nvp *n;
2491 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2492 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
2493 return ERROR_COMMAND_SYNTAX_ERROR;
2494 reset_mode = n->value;
2495 }
2496
2497 /* reset *all* targets */
2498 return target_process_reset(CMD_CTX, reset_mode);
2499 }
2500
2501
2502 COMMAND_HANDLER(handle_resume_command)
2503 {
2504 int current = 1;
2505 if (CMD_ARGC > 1)
2506 return ERROR_COMMAND_SYNTAX_ERROR;
2507
2508 struct target *target = get_current_target(CMD_CTX);
2509
2510 /* with no CMD_ARGV, resume from current pc, addr = 0,
2511 * with one arguments, addr = CMD_ARGV[0],
2512 * handle breakpoints, not debugging */
2513 uint32_t addr = 0;
2514 if (CMD_ARGC == 1) {
2515 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2516 current = 0;
2517 }
2518
2519 return target_resume(target, current, addr, 1, 0);
2520 }
2521
2522 COMMAND_HANDLER(handle_step_command)
2523 {
2524 if (CMD_ARGC > 1)
2525 return ERROR_COMMAND_SYNTAX_ERROR;
2526
2527 LOG_DEBUG("-");
2528
2529 /* with no CMD_ARGV, step from current pc, addr = 0,
2530 * with one argument addr = CMD_ARGV[0],
2531 * handle breakpoints, debugging */
2532 uint32_t addr = 0;
2533 int current_pc = 1;
2534 if (CMD_ARGC == 1) {
2535 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2536 current_pc = 0;
2537 }
2538
2539 struct target *target = get_current_target(CMD_CTX);
2540
2541 return target->type->step(target, current_pc, addr, 1);
2542 }
2543
2544 static void handle_md_output(struct command_context *cmd_ctx,
2545 struct target *target, uint32_t address, unsigned size,
2546 unsigned count, const uint8_t *buffer)
2547 {
2548 const unsigned line_bytecnt = 32;
2549 unsigned line_modulo = line_bytecnt / size;
2550
2551 char output[line_bytecnt * 4 + 1];
2552 unsigned output_len = 0;
2553
2554 const char *value_fmt;
2555 switch (size) {
2556 case 4:
2557 value_fmt = "%8.8x ";
2558 break;
2559 case 2:
2560 value_fmt = "%4.4x ";
2561 break;
2562 case 1:
2563 value_fmt = "%2.2x ";
2564 break;
2565 default:
2566 /* "can't happen", caller checked */
2567 LOG_ERROR("invalid memory read size: %u", size);
2568 return;
2569 }
2570
2571 for (unsigned i = 0; i < count; i++) {
2572 if (i % line_modulo == 0) {
2573 output_len += snprintf(output + output_len,
2574 sizeof(output) - output_len,
2575 "0x%8.8x: ",
2576 (unsigned)(address + (i*size)));
2577 }
2578
2579 uint32_t value = 0;
2580 const uint8_t *value_ptr = buffer + i * size;
2581 switch (size) {
2582 case 4:
2583 value = target_buffer_get_u32(target, value_ptr);
2584 break;
2585 case 2:
2586 value = target_buffer_get_u16(target, value_ptr);
2587 break;
2588 case 1:
2589 value = *value_ptr;
2590 }
2591 output_len += snprintf(output + output_len,
2592 sizeof(output) - output_len,
2593 value_fmt, value);
2594
2595 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
2596 command_print(cmd_ctx, "%s", output);
2597 output_len = 0;
2598 }
2599 }
2600 }
2601
2602 COMMAND_HANDLER(handle_md_command)
2603 {
2604 if (CMD_ARGC < 1)
2605 return ERROR_COMMAND_SYNTAX_ERROR;
2606
2607 unsigned size = 0;
2608 switch (CMD_NAME[2]) {
2609 case 'w':
2610 size = 4;
2611 break;
2612 case 'h':
2613 size = 2;
2614 break;
2615 case 'b':
2616 size = 1;
2617 break;
2618 default:
2619 return ERROR_COMMAND_SYNTAX_ERROR;
2620 }
2621
2622 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2623 int (*fn)(struct target *target,
2624 uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
2625 if (physical) {
2626 CMD_ARGC--;
2627 CMD_ARGV++;
2628 fn = target_read_phys_memory;
2629 } else
2630 fn = target_read_memory;
2631 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2632 return ERROR_COMMAND_SYNTAX_ERROR;
2633
2634 uint32_t address;
2635 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2636
2637 unsigned count = 1;
2638 if (CMD_ARGC == 2)
2639 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2640
2641 uint8_t *buffer = calloc(count, size);
2642
2643 struct target *target = get_current_target(CMD_CTX);
2644 int retval = fn(target, address, size, count, buffer);
2645 if (ERROR_OK == retval)
2646 handle_md_output(CMD_CTX, target, address, size, count, buffer);
2647
2648 free(buffer);
2649
2650 return retval;
2651 }
2652
2653 typedef int (*target_write_fn)(struct target *target,
2654 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
2655
2656 static int target_write_memory_fast(struct target *target,
2657 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
2658 {
2659 return target_write_buffer(target, address, size * count, buffer);
2660 }
2661
2662 static int target_fill_mem(struct target *target,
2663 uint32_t address,
2664 target_write_fn fn,
2665 unsigned data_size,
2666 /* value */
2667 uint32_t b,
2668 /* count */
2669 unsigned c)
2670 {
2671 /* We have to write in reasonably large chunks to be able
2672 * to fill large memory areas with any sane speed */
2673 const unsigned chunk_size = 16384;
2674 uint8_t *target_buf = malloc(chunk_size * data_size);
2675 if (target_buf == NULL) {
2676 LOG_ERROR("Out of memory");
2677 return ERROR_FAIL;
2678 }
2679
2680 for (unsigned i = 0; i < chunk_size; i++) {
2681 switch (data_size) {
2682 case 4:
2683 target_buffer_set_u32(target, target_buf + i * data_size, b);
2684 break;
2685 case 2:
2686 target_buffer_set_u16(target, target_buf + i * data_size, b);
2687 break;
2688 case 1:
2689 target_buffer_set_u8(target, target_buf + i * data_size, b);
2690 break;
2691 default:
2692 exit(-1);
2693 }
2694 }
2695
2696 int retval = ERROR_OK;
2697
2698 for (unsigned x = 0; x < c; x += chunk_size) {
2699 unsigned current;
2700 current = c - x;
2701 if (current > chunk_size)
2702 current = chunk_size;
2703 retval = fn(target, address + x * data_size, data_size, current, target_buf);
2704 if (retval != ERROR_OK)
2705 break;
2706 /* avoid GDB timeouts */
2707 keep_alive();
2708 }
2709 free(target_buf);
2710
2711 return retval;
2712 }
2713
2714
2715 COMMAND_HANDLER(handle_mw_command)
2716 {
2717 if (CMD_ARGC < 2)
2718 return ERROR_COMMAND_SYNTAX_ERROR;
2719 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2720 target_write_fn fn;
2721 if (physical) {
2722 CMD_ARGC--;
2723 CMD_ARGV++;
2724 fn = target_write_phys_memory;
2725 } else
2726 fn = target_write_memory_fast;
2727 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
2728 return ERROR_COMMAND_SYNTAX_ERROR;
2729
2730 uint32_t address;
2731 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2732
2733 uint32_t value;
2734 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2735
2736 unsigned count = 1;
2737 if (CMD_ARGC == 3)
2738 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
2739
2740 struct target *target = get_current_target(CMD_CTX);
2741 unsigned wordsize;
2742 switch (CMD_NAME[2]) {
2743 case 'w':
2744 wordsize = 4;
2745 break;
2746 case 'h':
2747 wordsize = 2;
2748 break;
2749 case 'b':
2750 wordsize = 1;
2751 break;
2752 default:
2753 return ERROR_COMMAND_SYNTAX_ERROR;
2754 }
2755
2756 return target_fill_mem(target, address, fn, wordsize, value, count);
2757 }
2758
2759 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
2760 uint32_t *min_address, uint32_t *max_address)
2761 {
2762 if (CMD_ARGC < 1 || CMD_ARGC > 5)
2763 return ERROR_COMMAND_SYNTAX_ERROR;
2764
2765 /* a base address isn't always necessary,
2766 * default to 0x0 (i.e. don't relocate) */
2767 if (CMD_ARGC >= 2) {
2768 uint32_t addr;
2769 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2770 image->base_address = addr;
2771 image->base_address_set = 1;
2772 } else
2773 image->base_address_set = 0;
2774
2775 image->start_address_set = 0;
2776
2777 if (CMD_ARGC >= 4)
2778 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
2779 if (CMD_ARGC == 5) {
2780 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
2781 /* use size (given) to find max (required) */
2782 *max_address += *min_address;
2783 }
2784
2785 if (*min_address > *max_address)
2786 return ERROR_COMMAND_SYNTAX_ERROR;
2787
2788 return ERROR_OK;
2789 }
2790
2791 COMMAND_HANDLER(handle_load_image_command)
2792 {
2793 uint8_t *buffer;
2794 size_t buf_cnt;
2795 uint32_t image_size;
2796 uint32_t min_address = 0;
2797 uint32_t max_address = 0xffffffff;
2798 int i;
2799 struct image image;
2800
2801 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
2802 &image, &min_address, &max_address);
2803 if (ERROR_OK != retval)
2804 return retval;
2805
2806 struct target *target = get_current_target(CMD_CTX);
2807
2808 struct duration bench;
2809 duration_start(&bench);
2810
2811 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
2812 return ERROR_OK;
2813
2814 image_size = 0x0;
2815 retval = ERROR_OK;
2816 for (i = 0; i < image.num_sections; i++) {
2817 buffer = malloc(image.sections[i].size);
2818 if (buffer == NULL) {
2819 command_print(CMD_CTX,
2820 "error allocating buffer for section (%d bytes)",
2821 (int)(image.sections[i].size));
2822 break;
2823 }
2824
2825 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
2826 if (retval != ERROR_OK) {
2827 free(buffer);
2828 break;
2829 }
2830
2831 uint32_t offset = 0;
2832 uint32_t length = buf_cnt;
2833
2834 /* DANGER!!! beware of unsigned comparision here!!! */
2835
2836 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
2837 (image.sections[i].base_address < max_address)) {
2838
2839 if (image.sections[i].base_address < min_address) {
2840 /* clip addresses below */
2841 offset += min_address-image.sections[i].base_address;
2842 length -= offset;
2843 }
2844
2845 if (image.sections[i].base_address + buf_cnt > max_address)
2846 length -= (image.sections[i].base_address + buf_cnt)-max_address;
2847
2848 retval = target_write_buffer(target,
2849 image.sections[i].base_address + offset, length, buffer + offset);
2850 if (retval != ERROR_OK) {
2851 free(buffer);
2852 break;
2853 }
2854 image_size += length;
2855 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
2856 (unsigned int)length,
2857 image.sections[i].base_address + offset);
2858 }
2859
2860 free(buffer);
2861 }
2862
2863 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
2864 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
2865 "in %fs (%0.3f KiB/s)", image_size,
2866 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2867 }
2868
2869 image_close(&image);
2870
2871 return retval;
2872
2873 }
2874
2875 COMMAND_HANDLER(handle_dump_image_command)
2876 {
2877 struct fileio fileio;
2878 uint8_t *buffer;
2879 int retval, retvaltemp;
2880 uint32_t address, size;
2881 struct duration bench;
2882 struct target *target = get_current_target(CMD_CTX);
2883
2884 if (CMD_ARGC != 3)
2885 return ERROR_COMMAND_SYNTAX_ERROR;
2886
2887 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
2888 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
2889
2890 uint32_t buf_size = (size > 4096) ? 4096 : size;
2891 buffer = malloc(buf_size);
2892 if (!buffer)
2893 return ERROR_FAIL;
2894
2895 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
2896 if (retval != ERROR_OK) {
2897 free(buffer);
2898 return retval;
2899 }
2900
2901 duration_start(&bench);
2902
2903 while (size > 0) {
2904 size_t size_written;
2905 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
2906 retval = target_read_buffer(target, address, this_run_size, buffer);
2907 if (retval != ERROR_OK)
2908 break;
2909
2910 retval = fileio_write(&fileio, this_run_size, buffer, &size_written);
2911 if (retval != ERROR_OK)
2912 break;
2913
2914 size -= this_run_size;
2915 address += this_run_size;
2916 }
2917
2918 free(buffer);
2919
2920 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
2921 int filesize;
2922 retval = fileio_size(&fileio, &filesize);
2923 if (retval != ERROR_OK)
2924 return retval;
2925 command_print(CMD_CTX,
2926 "dumped %ld bytes in %fs (%0.3f KiB/s)", (long)filesize,
2927 duration_elapsed(&bench), duration_kbps(&bench, filesize));
2928 }
2929
2930 retvaltemp = fileio_close(&fileio);
2931 if (retvaltemp != ERROR_OK)
2932 return retvaltemp;
2933
2934 return retval;
2935 }
2936
2937 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
2938 {
2939 uint8_t *buffer;
2940 size_t buf_cnt;
2941 uint32_t image_size;
2942 int i;
2943 int retval;
2944 uint32_t checksum = 0;
2945 uint32_t mem_checksum = 0;
2946
2947 struct image image;
2948
2949 struct target *target = get_current_target(CMD_CTX);
2950
2951 if (CMD_ARGC < 1)
2952 return ERROR_COMMAND_SYNTAX_ERROR;
2953
2954 if (!target) {
2955 LOG_ERROR("no target selected");
2956 return ERROR_FAIL;
2957 }
2958
2959 struct duration bench;
2960 duration_start(&bench);
2961
2962 if (CMD_ARGC >= 2) {
2963 uint32_t addr;
2964 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2965 image.base_address = addr;
2966 image.base_address_set = 1;
2967 } else {
2968 image.base_address_set = 0;
2969 image.base_address = 0x0;
2970 }
2971
2972 image.start_address_set = 0;
2973
2974 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
2975 if (retval != ERROR_OK)
2976 return retval;
2977
2978 image_size = 0x0;
2979 int diffs = 0;
2980 retval = ERROR_OK;
2981 for (i = 0; i < image.num_sections; i++) {
2982 buffer = malloc(image.sections[i].size);
2983 if (buffer == NULL) {
2984 command_print(CMD_CTX,
2985 "error allocating buffer for section (%d bytes)",
2986 (int)(image.sections[i].size));
2987 break;
2988 }
2989 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
2990 if (retval != ERROR_OK) {
2991 free(buffer);
2992 break;
2993 }
2994
2995 if (verify) {
2996 /* calculate checksum of image */
2997 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
2998 if (retval != ERROR_OK) {
2999 free(buffer);
3000 break;
3001 }
3002
3003 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3004 if (retval != ERROR_OK) {
3005 free(buffer);
3006 break;
3007 }
3008
3009 if (checksum != mem_checksum) {
3010 /* failed crc checksum, fall back to a binary compare */
3011 uint8_t *data;
3012
3013 if (diffs == 0)
3014 LOG_ERROR("checksum mismatch - attempting binary compare");
3015
3016 data = (uint8_t *)malloc(buf_cnt);
3017
3018 /* Can we use 32bit word accesses? */
3019 int size = 1;
3020 int count = buf_cnt;
3021 if ((count % 4) == 0) {
3022 size *= 4;
3023 count /= 4;
3024 }
3025 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
3026 if (retval == ERROR_OK) {
3027 uint32_t t;
3028 for (t = 0; t < buf_cnt; t++) {
3029 if (data[t] != buffer[t]) {
3030 command_print(CMD_CTX,
3031 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3032 diffs,
3033 (unsigned)(t + image.sections[i].base_address),
3034 data[t],
3035 buffer[t]);
3036 if (diffs++ >= 127) {
3037 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
3038 free(data);
3039 free(buffer);
3040 goto done;
3041 }
3042 }
3043 keep_alive();
3044 }
3045 }
3046 free(data);
3047 }
3048 } else {
3049 command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx",
3050 image.sections[i].base_address,
3051 buf_cnt);
3052 }
3053
3054 free(buffer);
3055 image_size += buf_cnt;
3056 }
3057 if (diffs > 0)
3058 command_print(CMD_CTX, "No more differences found.");
3059 done:
3060 if (diffs > 0)
3061 retval = ERROR_FAIL;
3062 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3063 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3064 "in %fs (%0.3f KiB/s)", image_size,
3065 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3066 }
3067
3068 image_close(&image);
3069
3070 return retval;
3071 }
3072
3073 COMMAND_HANDLER(handle_verify_image_command)
3074 {
3075 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
3076 }
3077
3078 COMMAND_HANDLER(handle_test_image_command)
3079 {
3080 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
3081 }
3082
3083 static int handle_bp_command_list(struct command_context *cmd_ctx)
3084 {
3085 struct target *target = get_current_target(cmd_ctx);
3086 struct breakpoint *breakpoint = target->breakpoints;
3087 while (breakpoint) {
3088 if (breakpoint->type == BKPT_SOFT) {
3089 char *buf = buf_to_str(breakpoint->orig_instr,
3090 breakpoint->length, 16);
3091 command_print(cmd_ctx, "IVA breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
3092 breakpoint->address,
3093 breakpoint->length,
3094 breakpoint->set, buf);
3095 free(buf);
3096 } else {
3097 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3098 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3099 breakpoint->asid,
3100 breakpoint->length, breakpoint->set);
3101 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3102 command_print(cmd_ctx, "Hybrid breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3103 breakpoint->address,
3104 breakpoint->length, breakpoint->set);
3105 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3106 breakpoint->asid);
3107 } else
3108 command_print(cmd_ctx, "Breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3109 breakpoint->address,
3110 breakpoint->length, breakpoint->set);
3111 }
3112
3113 breakpoint = breakpoint->next;
3114 }
3115 return ERROR_OK;
3116 }
3117
3118 static int handle_bp_command_set(struct command_context *cmd_ctx,
3119 uint32_t addr, uint32_t asid, uint32_t length, int hw)
3120 {
3121 struct target *target = get_current_target(cmd_ctx);
3122
3123 if (asid == 0) {
3124 int retval = breakpoint_add(target, addr, length, hw);
3125 if (ERROR_OK == retval)
3126 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
3127 else {
3128 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
3129 return retval;
3130 }
3131 } else if (addr == 0) {
3132 int retval = context_breakpoint_add(target, asid, length, hw);
3133 if (ERROR_OK == retval)
3134 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3135 else {
3136 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
3137 return retval;
3138 }
3139 } else {
3140 int retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3141 if (ERROR_OK == retval)
3142 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3143 else {
3144 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3145 return retval;
3146 }
3147 }
3148 return ERROR_OK;
3149 }
3150
3151 COMMAND_HANDLER(handle_bp_command)
3152 {
3153 uint32_t addr;
3154 uint32_t asid;
3155 uint32_t length;
3156 int hw = BKPT_SOFT;
3157
3158 switch (CMD_ARGC) {
3159 case 0:
3160 return handle_bp_command_list(CMD_CTX);
3161
3162 case 2:
3163 asid = 0;
3164 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3165 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3166 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3167
3168 case 3:
3169 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3170 hw = BKPT_HARD;
3171 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr