RTOS Thread awareness support wip
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * This program is free software; you can redistribute it and/or modify *
21 * it under the terms of the GNU General Public License as published by *
22 * the Free Software Foundation; either version 2 of the License, or *
23 * (at your option) any later version. *
24 * *
25 * This program is distributed in the hope that it will be useful, *
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
28 * GNU General Public License for more details. *
29 * *
30 * You should have received a copy of the GNU General Public License *
31 * along with this program; if not, write to the *
32 * Free Software Foundation, Inc., *
33 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include <helper/time_support.h>
40 #include <jtag/jtag.h>
41 #include <flash/nor/core.h>
42
43 #include "target.h"
44 #include "target_type.h"
45 #include "target_request.h"
46 #include "breakpoints.h"
47 #include "register.h"
48 #include "trace.h"
49 #include "image.h"
50 #include "rtos/rtos.h"
51
52
53 static int target_read_buffer_default(struct target *target, uint32_t address,
54 uint32_t size, uint8_t *buffer);
55 static int target_write_buffer_default(struct target *target, uint32_t address,
56 uint32_t size, const uint8_t *buffer);
57 static int target_array2mem(Jim_Interp *interp, struct target *target,
58 int argc, Jim_Obj *const *argv);
59 static int target_mem2array(Jim_Interp *interp, struct target *target,
60 int argc, Jim_Obj *const *argv);
61 static int target_register_user_commands(struct command_context *cmd_ctx);
62
63 /* targets */
64 extern struct target_type arm7tdmi_target;
65 extern struct target_type arm720t_target;
66 extern struct target_type arm9tdmi_target;
67 extern struct target_type arm920t_target;
68 extern struct target_type arm966e_target;
69 extern struct target_type arm946e_target;
70 extern struct target_type arm926ejs_target;
71 extern struct target_type fa526_target;
72 extern struct target_type feroceon_target;
73 extern struct target_type dragonite_target;
74 extern struct target_type xscale_target;
75 extern struct target_type cortexm3_target;
76 extern struct target_type cortexa8_target;
77 extern struct target_type arm11_target;
78 extern struct target_type mips_m4k_target;
79 extern struct target_type avr_target;
80 extern struct target_type dsp563xx_target;
81 extern struct target_type testee_target;
82 extern struct target_type avr32_ap7k_target;
83
84 static struct target_type *target_types[] =
85 {
86 &arm7tdmi_target,
87 &arm9tdmi_target,
88 &arm920t_target,
89 &arm720t_target,
90 &arm966e_target,
91 &arm946e_target,
92 &arm926ejs_target,
93 &fa526_target,
94 &feroceon_target,
95 &dragonite_target,
96 &xscale_target,
97 &cortexm3_target,
98 &cortexa8_target,
99 &arm11_target,
100 &mips_m4k_target,
101 &avr_target,
102 &dsp563xx_target,
103 &testee_target,
104 &avr32_ap7k_target,
105 NULL,
106 };
107
108 struct target *all_targets = NULL;
109 static struct target_event_callback *target_event_callbacks = NULL;
110 static struct target_timer_callback *target_timer_callbacks = NULL;
111 static const int polling_interval = 100;
112
113 static const Jim_Nvp nvp_assert[] = {
114 { .name = "assert", NVP_ASSERT },
115 { .name = "deassert", NVP_DEASSERT },
116 { .name = "T", NVP_ASSERT },
117 { .name = "F", NVP_DEASSERT },
118 { .name = "t", NVP_ASSERT },
119 { .name = "f", NVP_DEASSERT },
120 { .name = NULL, .value = -1 }
121 };
122
123 static const Jim_Nvp nvp_error_target[] = {
124 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
125 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
126 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
127 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
128 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
129 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
130 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
131 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
132 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
133 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
134 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
135 { .value = -1, .name = NULL }
136 };
137
138 static const char *target_strerror_safe(int err)
139 {
140 const Jim_Nvp *n;
141
142 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
143 if (n->name == NULL) {
144 return "unknown";
145 } else {
146 return n->name;
147 }
148 }
149
150 static const Jim_Nvp nvp_target_event[] = {
151 { .value = TARGET_EVENT_OLD_gdb_program_config , .name = "old-gdb_program_config" },
152 { .value = TARGET_EVENT_OLD_pre_resume , .name = "old-pre_resume" },
153
154 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
155 { .value = TARGET_EVENT_HALTED, .name = "halted" },
156 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
157 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
158 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
159
160 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
161 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
162
163 /* historical name */
164
165 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
166
167 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
168 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
169 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
170 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
171 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
172 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
173 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
174 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
175 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
176 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
177 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
178
179 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
180 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
181
182 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
183 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
184
185 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
186 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
187
188 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
189 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
190
191 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
192 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
193
194 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
195 { .value = TARGET_EVENT_RESUMED , .name = "resume-ok" },
196 { .value = TARGET_EVENT_RESUME_END , .name = "resume-end" },
197
198 { .name = NULL, .value = -1 }
199 };
200
201 static const Jim_Nvp nvp_target_state[] = {
202 { .name = "unknown", .value = TARGET_UNKNOWN },
203 { .name = "running", .value = TARGET_RUNNING },
204 { .name = "halted", .value = TARGET_HALTED },
205 { .name = "reset", .value = TARGET_RESET },
206 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
207 { .name = NULL, .value = -1 },
208 };
209
210 static const Jim_Nvp nvp_target_debug_reason [] = {
211 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
212 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
213 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
214 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
215 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
216 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
217 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
218 { .name = NULL, .value = -1 },
219 };
220
221 static const Jim_Nvp nvp_target_endian[] = {
222 { .name = "big", .value = TARGET_BIG_ENDIAN },
223 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
224 { .name = "be", .value = TARGET_BIG_ENDIAN },
225 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
226 { .name = NULL, .value = -1 },
227 };
228
229 static const Jim_Nvp nvp_reset_modes[] = {
230 { .name = "unknown", .value = RESET_UNKNOWN },
231 { .name = "run" , .value = RESET_RUN },
232 { .name = "halt" , .value = RESET_HALT },
233 { .name = "init" , .value = RESET_INIT },
234 { .name = NULL , .value = -1 },
235 };
236
237 const char *debug_reason_name(struct target *t)
238 {
239 const char *cp;
240
241 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
242 t->debug_reason)->name;
243 if (!cp) {
244 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
245 cp = "(*BUG*unknown*BUG*)";
246 }
247 return cp;
248 }
249
250 const char *
251 target_state_name( struct target *t )
252 {
253 const char *cp;
254 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
255 if( !cp ){
256 LOG_ERROR("Invalid target state: %d", (int)(t->state));
257 cp = "(*BUG*unknown*BUG*)";
258 }
259 return cp;
260 }
261
262 /* determine the number of the new target */
263 static int new_target_number(void)
264 {
265 struct target *t;
266 int x;
267
268 /* number is 0 based */
269 x = -1;
270 t = all_targets;
271 while (t) {
272 if (x < t->target_number) {
273 x = t->target_number;
274 }
275 t = t->next;
276 }
277 return x + 1;
278 }
279
280 /* read a uint32_t from a buffer in target memory endianness */
281 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
282 {
283 if (target->endianness == TARGET_LITTLE_ENDIAN)
284 return le_to_h_u32(buffer);
285 else
286 return be_to_h_u32(buffer);
287 }
288
289 /* read a uint24_t from a buffer in target memory endianness */
290 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
291 {
292 if (target->endianness == TARGET_LITTLE_ENDIAN)
293 return le_to_h_u24(buffer);
294 else
295 return be_to_h_u24(buffer);
296 }
297
298 /* read a uint16_t from a buffer in target memory endianness */
299 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
300 {
301 if (target->endianness == TARGET_LITTLE_ENDIAN)
302 return le_to_h_u16(buffer);
303 else
304 return be_to_h_u16(buffer);
305 }
306
307 /* read a uint8_t from a buffer in target memory endianness */
308 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
309 {
310 return *buffer & 0x0ff;
311 }
312
313 /* write a uint32_t to a buffer in target memory endianness */
314 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
315 {
316 if (target->endianness == TARGET_LITTLE_ENDIAN)
317 h_u32_to_le(buffer, value);
318 else
319 h_u32_to_be(buffer, value);
320 }
321
322 /* write a uint24_t to a buffer in target memory endianness */
323 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
324 {
325 if (target->endianness == TARGET_LITTLE_ENDIAN)
326 h_u24_to_le(buffer, value);
327 else
328 h_u24_to_be(buffer, value);
329 }
330
331 /* write a uint16_t to a buffer in target memory endianness */
332 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
333 {
334 if (target->endianness == TARGET_LITTLE_ENDIAN)
335 h_u16_to_le(buffer, value);
336 else
337 h_u16_to_be(buffer, value);
338 }
339
340 /* write a uint8_t to a buffer in target memory endianness */
341 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
342 {
343 *buffer = value;
344 }
345
346 /* return a pointer to a configured target; id is name or number */
347 struct target *get_target(const char *id)
348 {
349 struct target *target;
350
351 /* try as tcltarget name */
352 for (target = all_targets; target; target = target->next) {
353 if (target->cmd_name == NULL)
354 continue;
355 if (strcmp(id, target->cmd_name) == 0)
356 return target;
357 }
358
359 /* It's OK to remove this fallback sometime after August 2010 or so */
360
361 /* no match, try as number */
362 unsigned num;
363 if (parse_uint(id, &num) != ERROR_OK)
364 return NULL;
365
366 for (target = all_targets; target; target = target->next) {
367 if (target->target_number == (int)num) {
368 LOG_WARNING("use '%s' as target identifier, not '%u'",
369 target->cmd_name, num);
370 return target;
371 }
372 }
373
374 return NULL;
375 }
376
377 /* returns a pointer to the n-th configured target */
378 static struct target *get_target_by_num(int num)
379 {
380 struct target *target = all_targets;
381
382 while (target) {
383 if (target->target_number == num) {
384 return target;
385 }
386 target = target->next;
387 }
388
389 return NULL;
390 }
391
392 struct target* get_current_target(struct command_context *cmd_ctx)
393 {
394 struct target *target = get_target_by_num(cmd_ctx->current_target);
395
396 if (target == NULL)
397 {
398 LOG_ERROR("BUG: current_target out of bounds");
399 exit(-1);
400 }
401
402 return target;
403 }
404
405 int target_poll(struct target *target)
406 {
407 int retval;
408
409 /* We can't poll until after examine */
410 if (!target_was_examined(target))
411 {
412 /* Fail silently lest we pollute the log */
413 return ERROR_FAIL;
414 }
415
416 retval = target->type->poll(target);
417 if (retval != ERROR_OK)
418 return retval;
419
420 if (target->halt_issued)
421 {
422 if (target->state == TARGET_HALTED)
423 {
424 target->halt_issued = false;
425 } else
426 {
427 long long t = timeval_ms() - target->halt_issued_time;
428 if (t>1000)
429 {
430 target->halt_issued = false;
431 LOG_INFO("Halt timed out, wake up GDB.");
432 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
433 }
434 }
435 }
436
437 return ERROR_OK;
438 }
439
440 int target_halt(struct target *target)
441 {
442 int retval;
443 /* We can't poll until after examine */
444 if (!target_was_examined(target))
445 {
446 LOG_ERROR("Target not examined yet");
447 return ERROR_FAIL;
448 }
449
450 retval = target->type->halt(target);
451 if (retval != ERROR_OK)
452 return retval;
453
454 target->halt_issued = true;
455 target->halt_issued_time = timeval_ms();
456
457 return ERROR_OK;
458 }
459
460 /**
461 * Make the target (re)start executing using its saved execution
462 * context (possibly with some modifications).
463 *
464 * @param target Which target should start executing.
465 * @param current True to use the target's saved program counter instead
466 * of the address parameter
467 * @param address Optionally used as the program counter.
468 * @param handle_breakpoints True iff breakpoints at the resumption PC
469 * should be skipped. (For example, maybe execution was stopped by
470 * such a breakpoint, in which case it would be counterprodutive to
471 * let it re-trigger.
472 * @param debug_execution False if all working areas allocated by OpenOCD
473 * should be released and/or restored to their original contents.
474 * (This would for example be true to run some downloaded "helper"
475 * algorithm code, which resides in one such working buffer and uses
476 * another for data storage.)
477 *
478 * @todo Resolve the ambiguity about what the "debug_execution" flag
479 * signifies. For example, Target implementations don't agree on how
480 * it relates to invalidation of the register cache, or to whether
481 * breakpoints and watchpoints should be enabled. (It would seem wrong
482 * to enable breakpoints when running downloaded "helper" algorithms
483 * (debug_execution true), since the breakpoints would be set to match
484 * target firmware being debugged, not the helper algorithm.... and
485 * enabling them could cause such helpers to malfunction (for example,
486 * by overwriting data with a breakpoint instruction. On the other
487 * hand the infrastructure for running such helpers might use this
488 * procedure but rely on hardware breakpoint to detect termination.)
489 */
490 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
491 {
492 int retval;
493
494 /* We can't poll until after examine */
495 if (!target_was_examined(target))
496 {
497 LOG_ERROR("Target not examined yet");
498 return ERROR_FAIL;
499 }
500
501 /* note that resume *must* be asynchronous. The CPU can halt before
502 * we poll. The CPU can even halt at the current PC as a result of
503 * a software breakpoint being inserted by (a bug?) the application.
504 */
505 if ((retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution)) != ERROR_OK)
506 return retval;
507
508 return retval;
509 }
510
511 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
512 {
513 char buf[100];
514 int retval;
515 Jim_Nvp *n;
516 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
517 if (n->name == NULL) {
518 LOG_ERROR("invalid reset mode");
519 return ERROR_FAIL;
520 }
521
522 /* disable polling during reset to make reset event scripts
523 * more predictable, i.e. dr/irscan & pathmove in events will
524 * not have JTAG operations injected into the middle of a sequence.
525 */
526 bool save_poll = jtag_poll_get_enabled();
527
528 jtag_poll_set_enabled(false);
529
530 sprintf(buf, "ocd_process_reset %s", n->name);
531 retval = Jim_Eval(cmd_ctx->interp, buf);
532
533 jtag_poll_set_enabled(save_poll);
534
535 if (retval != JIM_OK) {
536 Jim_MakeErrorMessage(cmd_ctx->interp);
537 command_print(NULL,"%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
538 return ERROR_FAIL;
539 }
540
541 /* We want any events to be processed before the prompt */
542 retval = target_call_timer_callbacks_now();
543
544 struct target *target;
545 for (target = all_targets; target; target = target->next) {
546 target->type->check_reset(target);
547 }
548
549 return retval;
550 }
551
552 static int identity_virt2phys(struct target *target,
553 uint32_t virtual, uint32_t *physical)
554 {
555 *physical = virtual;
556 return ERROR_OK;
557 }
558
559 static int no_mmu(struct target *target, int *enabled)
560 {
561 *enabled = 0;
562 return ERROR_OK;
563 }
564
565 static int default_examine(struct target *target)
566 {
567 target_set_examined(target);
568 return ERROR_OK;
569 }
570
571 /* no check by default */
572 static int default_check_reset(struct target *target)
573 {
574 return ERROR_OK;
575 }
576
577 int target_examine_one(struct target *target)
578 {
579 return target->type->examine(target);
580 }
581
582 static int jtag_enable_callback(enum jtag_event event, void *priv)
583 {
584 struct target *target = priv;
585
586 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
587 return ERROR_OK;
588
589 jtag_unregister_event_callback(jtag_enable_callback, target);
590 return target_examine_one(target);
591 }
592
593
594 /* Targets that correctly implement init + examine, i.e.
595 * no communication with target during init:
596 *
597 * XScale
598 */
599 int target_examine(void)
600 {
601 int retval = ERROR_OK;
602 struct target *target;
603
604 for (target = all_targets; target; target = target->next)
605 {
606 /* defer examination, but don't skip it */
607 if (!target->tap->enabled) {
608 jtag_register_event_callback(jtag_enable_callback,
609 target);
610 continue;
611 }
612 if ((retval = target_examine_one(target)) != ERROR_OK)
613 return retval;
614 }
615 return retval;
616 }
617 const char *target_type_name(struct target *target)
618 {
619 return target->type->name;
620 }
621
622 static int target_write_memory_imp(struct target *target, uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
623 {
624 if (!target_was_examined(target))
625 {
626 LOG_ERROR("Target not examined yet");
627 return ERROR_FAIL;
628 }
629 return target->type->write_memory_imp(target, address, size, count, buffer);
630 }
631
632 static int target_read_memory_imp(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
633 {
634 if (!target_was_examined(target))
635 {
636 LOG_ERROR("Target not examined yet");
637 return ERROR_FAIL;
638 }
639 return target->type->read_memory_imp(target, address, size, count, buffer);
640 }
641
642 static int target_soft_reset_halt_imp(struct target *target)
643 {
644 if (!target_was_examined(target))
645 {
646 LOG_ERROR("Target not examined yet");
647 return ERROR_FAIL;
648 }
649 if (!target->type->soft_reset_halt_imp) {
650 LOG_ERROR("Target %s does not support soft_reset_halt",
651 target_name(target));
652 return ERROR_FAIL;
653 }
654 return target->type->soft_reset_halt_imp(target);
655 }
656
657 /**
658 * Downloads a target-specific native code algorithm to the target,
659 * and executes it. * Note that some targets may need to set up, enable,
660 * and tear down a breakpoint (hard or * soft) to detect algorithm
661 * termination, while others may support lower overhead schemes where
662 * soft breakpoints embedded in the algorithm automatically terminate the
663 * algorithm.
664 *
665 * @param target used to run the algorithm
666 * @param arch_info target-specific description of the algorithm.
667 */
668 int target_run_algorithm(struct target *target,
669 int num_mem_params, struct mem_param *mem_params,
670 int num_reg_params, struct reg_param *reg_param,
671 uint32_t entry_point, uint32_t exit_point,
672 int timeout_ms, void *arch_info)
673 {
674 int retval = ERROR_FAIL;
675
676 if (!target_was_examined(target))
677 {
678 LOG_ERROR("Target not examined yet");
679 goto done;
680 }
681 if (!target->type->run_algorithm) {
682 LOG_ERROR("Target type '%s' does not support %s",
683 target_type_name(target), __func__);
684 goto done;
685 }
686
687 target->running_alg = true;
688 retval = target->type->run_algorithm(target,
689 num_mem_params, mem_params,
690 num_reg_params, reg_param,
691 entry_point, exit_point, timeout_ms, arch_info);
692 target->running_alg = false;
693
694 done:
695 return retval;
696 }
697
698
699 int target_read_memory(struct target *target,
700 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
701 {
702 return target->type->read_memory(target, address, size, count, buffer);
703 }
704
705 static int target_read_phys_memory(struct target *target,
706 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
707 {
708 return target->type->read_phys_memory(target, address, size, count, buffer);
709 }
710
711 int target_write_memory(struct target *target,
712 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
713 {
714 return target->type->write_memory(target, address, size, count, buffer);
715 }
716
717 static int target_write_phys_memory(struct target *target,
718 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
719 {
720 return target->type->write_phys_memory(target, address, size, count, buffer);
721 }
722
723 int target_bulk_write_memory(struct target *target,
724 uint32_t address, uint32_t count, const uint8_t *buffer)
725 {
726 return target->type->bulk_write_memory(target, address, count, buffer);
727 }
728
729 int target_add_breakpoint(struct target *target,
730 struct breakpoint *breakpoint)
731 {
732 if (target->state != TARGET_HALTED) {
733 LOG_WARNING("target %s is not halted", target->cmd_name);
734 return ERROR_TARGET_NOT_HALTED;
735 }
736 return target->type->add_breakpoint(target, breakpoint);
737 }
738 int target_remove_breakpoint(struct target *target,
739 struct breakpoint *breakpoint)
740 {
741 return target->type->remove_breakpoint(target, breakpoint);
742 }
743
744 int target_add_watchpoint(struct target *target,
745 struct watchpoint *watchpoint)
746 {
747 if (target->state != TARGET_HALTED) {
748 LOG_WARNING("target %s is not halted", target->cmd_name);
749 return ERROR_TARGET_NOT_HALTED;
750 }
751 return target->type->add_watchpoint(target, watchpoint);
752 }
753 int target_remove_watchpoint(struct target *target,
754 struct watchpoint *watchpoint)
755 {
756 return target->type->remove_watchpoint(target, watchpoint);
757 }
758
759 int target_get_gdb_reg_list(struct target *target,
760 struct reg **reg_list[], int *reg_list_size)
761 {
762 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size);
763 }
764 int target_step(struct target *target,
765 int current, uint32_t address, int handle_breakpoints)
766 {
767 return target->type->step(target, current, address, handle_breakpoints);
768 }
769
770
771 /**
772 * Reset the @c examined flag for the given target.
773 * Pure paranoia -- targets are zeroed on allocation.
774 */
775 static void target_reset_examined(struct target *target)
776 {
777 target->examined = false;
778 }
779
780 static int
781 err_read_phys_memory(struct target *target, uint32_t address,
782 uint32_t size, uint32_t count, uint8_t *buffer)
783 {
784 LOG_ERROR("Not implemented: %s", __func__);
785 return ERROR_FAIL;
786 }
787
788 static int
789 err_write_phys_memory(struct target *target, uint32_t address,
790 uint32_t size, uint32_t count, const uint8_t *buffer)
791 {
792 LOG_ERROR("Not implemented: %s", __func__);
793 return ERROR_FAIL;
794 }
795
796 static int handle_target(void *priv);
797
798 static int target_init_one(struct command_context *cmd_ctx,
799 struct target *target)
800 {
801 target_reset_examined(target);
802
803 struct target_type *type = target->type;
804 if (type->examine == NULL)
805 type->examine = default_examine;
806
807 if (type->check_reset== NULL)
808 type->check_reset = default_check_reset;
809
810 int retval = type->init_target(cmd_ctx, target);
811 if (ERROR_OK != retval)
812 {
813 LOG_ERROR("target '%s' init failed", target_name(target));
814 return retval;
815 }
816
817 /**
818 * @todo get rid of those *memory_imp() methods, now that all
819 * callers are using target_*_memory() accessors ... and make
820 * sure the "physical" paths handle the same issues.
821 */
822 /* a non-invasive way(in terms of patches) to add some code that
823 * runs before the type->write/read_memory implementation
824 */
825 type->write_memory_imp = target->type->write_memory;
826 type->write_memory = target_write_memory_imp;
827
828 type->read_memory_imp = target->type->read_memory;
829 type->read_memory = target_read_memory_imp;
830
831 type->soft_reset_halt_imp = target->type->soft_reset_halt;
832 type->soft_reset_halt = target_soft_reset_halt_imp;
833
834 /* Sanity-check MMU support ... stub in what we must, to help
835 * implement it in stages, but warn if we need to do so.
836 */
837 if (type->mmu)
838 {
839 if (type->write_phys_memory == NULL)
840 {
841 LOG_ERROR("type '%s' is missing write_phys_memory",
842 type->name);
843 type->write_phys_memory = err_write_phys_memory;
844 }
845 if (type->read_phys_memory == NULL)
846 {
847 LOG_ERROR("type '%s' is missing read_phys_memory",
848 type->name);
849 type->read_phys_memory = err_read_phys_memory;
850 }
851 if (type->virt2phys == NULL)
852 {
853 LOG_ERROR("type '%s' is missing virt2phys", type->name);
854 type->virt2phys = identity_virt2phys;
855 }
856 }
857 else
858 {
859 /* Make sure no-MMU targets all behave the same: make no
860 * distinction between physical and virtual addresses, and
861 * ensure that virt2phys() is always an identity mapping.
862 */
863 if (type->write_phys_memory || type->read_phys_memory
864 || type->virt2phys)
865 {
866 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
867 }
868
869 type->mmu = no_mmu;
870 type->write_phys_memory = type->write_memory;
871 type->read_phys_memory = type->read_memory;
872 type->virt2phys = identity_virt2phys;
873 }
874
875 if (target->type->read_buffer == NULL)
876 target->type->read_buffer = target_read_buffer_default;
877
878 if (target->type->write_buffer == NULL)
879 target->type->write_buffer = target_write_buffer_default;
880
881 return ERROR_OK;
882 }
883
884 static int target_init(struct command_context *cmd_ctx)
885 {
886 struct target *target;
887 int retval;
888
889 for (target = all_targets; target; target = target->next)
890 {
891 retval = target_init_one(cmd_ctx, target);
892 if (ERROR_OK != retval)
893 return retval;
894 }
895
896 if (!all_targets)
897 return ERROR_OK;
898
899 retval = target_register_user_commands(cmd_ctx);
900 if (ERROR_OK != retval)
901 return retval;
902
903 retval = target_register_timer_callback(&handle_target,
904 polling_interval, 1, cmd_ctx->interp);
905 if (ERROR_OK != retval)
906 return retval;
907
908 return ERROR_OK;
909 }
910
911 COMMAND_HANDLER(handle_target_init_command)
912 {
913 if (CMD_ARGC != 0)
914 return ERROR_COMMAND_SYNTAX_ERROR;
915
916 static bool target_initialized = false;
917 if (target_initialized)
918 {
919 LOG_INFO("'target init' has already been called");
920 return ERROR_OK;
921 }
922 target_initialized = true;
923
924 LOG_DEBUG("Initializing targets...");
925 return target_init(CMD_CTX);
926 }
927
928 int target_register_event_callback(int (*callback)(struct target *target, enum target_event event, void *priv), void *priv)
929 {
930 struct target_event_callback **callbacks_p = &target_event_callbacks;
931
932 if (callback == NULL)
933 {
934 return ERROR_INVALID_ARGUMENTS;
935 }
936
937 if (*callbacks_p)
938 {
939 while ((*callbacks_p)->next)
940 callbacks_p = &((*callbacks_p)->next);
941 callbacks_p = &((*callbacks_p)->next);
942 }
943
944 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
945 (*callbacks_p)->callback = callback;
946 (*callbacks_p)->priv = priv;
947 (*callbacks_p)->next = NULL;
948
949 return ERROR_OK;
950 }
951
952 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
953 {
954 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
955 struct timeval now;
956
957 if (callback == NULL)
958 {
959 return ERROR_INVALID_ARGUMENTS;
960 }
961
962 if (*callbacks_p)
963 {
964 while ((*callbacks_p)->next)
965 callbacks_p = &((*callbacks_p)->next);
966 callbacks_p = &((*callbacks_p)->next);
967 }
968
969 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
970 (*callbacks_p)->callback = callback;
971 (*callbacks_p)->periodic = periodic;
972 (*callbacks_p)->time_ms = time_ms;
973
974 gettimeofday(&now, NULL);
975 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
976 time_ms -= (time_ms % 1000);
977 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
978 if ((*callbacks_p)->when.tv_usec > 1000000)
979 {
980 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
981 (*callbacks_p)->when.tv_sec += 1;
982 }
983
984 (*callbacks_p)->priv = priv;
985 (*callbacks_p)->next = NULL;
986
987 return ERROR_OK;
988 }
989
990 int target_unregister_event_callback(int (*callback)(struct target *target, enum target_event event, void *priv), void *priv)
991 {
992 struct target_event_callback **p = &target_event_callbacks;
993 struct target_event_callback *c = target_event_callbacks;
994
995 if (callback == NULL)
996 {
997 return ERROR_INVALID_ARGUMENTS;
998 }
999
1000 while (c)
1001 {
1002 struct target_event_callback *next = c->next;
1003 if ((c->callback == callback) && (c->priv == priv))
1004 {
1005 *p = next;
1006 free(c);
1007 return ERROR_OK;
1008 }
1009 else
1010 p = &(c->next);
1011 c = next;
1012 }
1013
1014 return ERROR_OK;
1015 }
1016
1017 static int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1018 {
1019 struct target_timer_callback **p = &target_timer_callbacks;
1020 struct target_timer_callback *c = target_timer_callbacks;
1021
1022 if (callback == NULL)
1023 {
1024 return ERROR_INVALID_ARGUMENTS;
1025 }
1026
1027 while (c)
1028 {
1029 struct target_timer_callback *next = c->next;
1030 if ((c->callback == callback) && (c->priv == priv))
1031 {
1032 *p = next;
1033 free(c);
1034 return ERROR_OK;
1035 }
1036 else
1037 p = &(c->next);
1038 c = next;
1039 }
1040
1041 return ERROR_OK;
1042 }
1043
1044 int target_call_event_callbacks(struct target *target, enum target_event event)
1045 {
1046 struct target_event_callback *callback = target_event_callbacks;
1047 struct target_event_callback *next_callback;
1048
1049 if (event == TARGET_EVENT_HALTED)
1050 {
1051 /* execute early halted first */
1052 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1053 }
1054
1055 LOG_DEBUG("target event %i (%s)",
1056 event,
1057 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1058
1059 target_handle_event(target, event);
1060
1061 while (callback)
1062 {
1063 next_callback = callback->next;
1064 callback->callback(target, event, callback->priv);
1065 callback = next_callback;
1066 }
1067
1068 return ERROR_OK;
1069 }
1070
1071 static int target_timer_callback_periodic_restart(
1072 struct target_timer_callback *cb, struct timeval *now)
1073 {
1074 int time_ms = cb->time_ms;
1075 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1076 time_ms -= (time_ms % 1000);
1077 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1078 if (cb->when.tv_usec > 1000000)
1079 {
1080 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1081 cb->when.tv_sec += 1;
1082 }
1083 return ERROR_OK;
1084 }
1085
1086 static int target_call_timer_callback(struct target_timer_callback *cb,
1087 struct timeval *now)
1088 {
1089 cb->callback(cb->priv);
1090
1091 if (cb->periodic)
1092 return target_timer_callback_periodic_restart(cb, now);
1093
1094 return target_unregister_timer_callback(cb->callback, cb->priv);
1095 }
1096
1097 static int target_call_timer_callbacks_check_time(int checktime)
1098 {
1099 keep_alive();
1100
1101 struct timeval now;
1102 gettimeofday(&now, NULL);
1103
1104 struct target_timer_callback *callback = target_timer_callbacks;
1105 while (callback)
1106 {
1107 // cleaning up may unregister and free this callback
1108 struct target_timer_callback *next_callback = callback->next;
1109
1110 bool call_it = callback->callback &&
1111 ((!checktime && callback->periodic) ||
1112 now.tv_sec > callback->when.tv_sec ||
1113 (now.tv_sec == callback->when.tv_sec &&
1114 now.tv_usec >= callback->when.tv_usec));
1115
1116 if (call_it)
1117 {
1118 int retval = target_call_timer_callback(callback, &now);
1119 if (retval != ERROR_OK)
1120 return retval;
1121 }
1122
1123 callback = next_callback;
1124 }
1125
1126 return ERROR_OK;
1127 }
1128
1129 int target_call_timer_callbacks(void)
1130 {
1131 return target_call_timer_callbacks_check_time(1);
1132 }
1133
1134 /* invoke periodic callbacks immediately */
1135 int target_call_timer_callbacks_now(void)
1136 {
1137 return target_call_timer_callbacks_check_time(0);
1138 }
1139
1140 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1141 {
1142 struct working_area *c = target->working_areas;
1143 struct working_area *new_wa = NULL;
1144
1145 /* Reevaluate working area address based on MMU state*/
1146 if (target->working_areas == NULL)
1147 {
1148 int retval;
1149 int enabled;
1150
1151 retval = target->type->mmu(target, &enabled);
1152 if (retval != ERROR_OK)
1153 {
1154 return retval;
1155 }
1156
1157 if (!enabled) {
1158 if (target->working_area_phys_spec) {
1159 LOG_DEBUG("MMU disabled, using physical "
1160 "address for working memory 0x%08x",
1161 (unsigned)target->working_area_phys);
1162 target->working_area = target->working_area_phys;
1163 } else {
1164 LOG_ERROR("No working memory available. "
1165 "Specify -work-area-phys to target.");
1166 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1167 }
1168 } else {
1169 if (target->working_area_virt_spec) {
1170 LOG_DEBUG("MMU enabled, using virtual "
1171 "address for working memory 0x%08x",
1172 (unsigned)target->working_area_virt);
1173 target->working_area = target->working_area_virt;
1174 } else {
1175 LOG_ERROR("No working memory available. "
1176 "Specify -work-area-virt to target.");
1177 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1178 }
1179 }
1180 }
1181
1182 /* only allocate multiples of 4 byte */
1183 if (size % 4)
1184 {
1185 LOG_ERROR("BUG: code tried to allocate unaligned number of bytes (0x%08x), padding", ((unsigned)(size)));
1186 size = (size + 3) & (~3);
1187 }
1188
1189 /* see if there's already a matching working area */
1190 while (c)
1191 {
1192 if ((c->free) && (c->size == size))
1193 {
1194 new_wa = c;
1195 break;
1196 }
1197 c = c->next;
1198 }
1199
1200 /* if not, allocate a new one */
1201 if (!new_wa)
1202 {
1203 struct working_area **p = &target->working_areas;
1204 uint32_t first_free = target->working_area;
1205 uint32_t free_size = target->working_area_size;
1206
1207 c = target->working_areas;
1208 while (c)
1209 {
1210 first_free += c->size;
1211 free_size -= c->size;
1212 p = &c->next;
1213 c = c->next;
1214 }
1215
1216 if (free_size < size)
1217 {
1218 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1219 }
1220
1221 LOG_DEBUG("allocated new working area at address 0x%08x", (unsigned)first_free);
1222
1223 new_wa = malloc(sizeof(struct working_area));
1224 new_wa->next = NULL;
1225 new_wa->size = size;
1226 new_wa->address = first_free;
1227
1228 if (target->backup_working_area)
1229 {
1230 int retval;
1231 new_wa->backup = malloc(new_wa->size);
1232 if ((retval = target_read_memory(target, new_wa->address, 4, new_wa->size / 4, new_wa->backup)) != ERROR_OK)
1233 {
1234 free(new_wa->backup);
1235 free(new_wa);
1236 return retval;
1237 }
1238 }
1239 else
1240 {
1241 new_wa->backup = NULL;
1242 }
1243
1244 /* put new entry in list */
1245 *p = new_wa;
1246 }
1247
1248 /* mark as used, and return the new (reused) area */
1249 new_wa->free = false;
1250 *area = new_wa;
1251
1252 /* user pointer */
1253 new_wa->user = area;
1254
1255 return ERROR_OK;
1256 }
1257
1258 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1259 {
1260 int retval;
1261
1262 retval = target_alloc_working_area_try(target, size, area);
1263 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1264 {
1265 LOG_WARNING("not enough working area available(requested %u)", (unsigned)(size));
1266 }
1267 return retval;
1268
1269 }
1270
1271 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1272 {
1273 if (area->free)
1274 return ERROR_OK;
1275
1276 if (restore && target->backup_working_area)
1277 {
1278 int retval;
1279 if ((retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup)) != ERROR_OK)
1280 return retval;
1281 }
1282
1283 area->free = true;
1284
1285 /* mark user pointer invalid */
1286 *area->user = NULL;
1287 area->user = NULL;
1288
1289 return ERROR_OK;
1290 }
1291
1292 int target_free_working_area(struct target *target, struct working_area *area)
1293 {
1294 return target_free_working_area_restore(target, area, 1);
1295 }
1296
1297 /* free resources and restore memory, if restoring memory fails,
1298 * free up resources anyway
1299 */
1300 static void target_free_all_working_areas_restore(struct target *target, int restore)
1301 {
1302 struct working_area *c = target->working_areas;
1303
1304 while (c)
1305 {
1306 struct working_area *next = c->next;
1307 target_free_working_area_restore(target, c, restore);
1308
1309 if (c->backup)
1310 free(c->backup);
1311
1312 free(c);
1313
1314 c = next;
1315 }
1316
1317 target->working_areas = NULL;
1318 }
1319
1320 void target_free_all_working_areas(struct target *target)
1321 {
1322 target_free_all_working_areas_restore(target, 1);
1323 }
1324
1325 int target_arch_state(struct target *target)
1326 {
1327 int retval;
1328 if (target == NULL)
1329 {
1330 LOG_USER("No target has been configured");
1331 return ERROR_OK;
1332 }
1333
1334 LOG_USER("target state: %s", target_state_name( target ));
1335
1336 if (target->state != TARGET_HALTED)
1337 return ERROR_OK;
1338
1339 retval = target->type->arch_state(target);
1340 return retval;
1341 }
1342
1343 /* Single aligned words are guaranteed to use 16 or 32 bit access
1344 * mode respectively, otherwise data is handled as quickly as
1345 * possible
1346 */
1347 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1348 {
1349 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1350 (int)size, (unsigned)address);
1351
1352 if (!target_was_examined(target))
1353 {
1354 LOG_ERROR("Target not examined yet");
1355 return ERROR_FAIL;
1356 }
1357
1358 if (size == 0) {
1359 return ERROR_OK;
1360 }
1361
1362 if ((address + size - 1) < address)
1363 {
1364 /* GDB can request this when e.g. PC is 0xfffffffc*/
1365 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1366 (unsigned)address,
1367 (unsigned)size);
1368 return ERROR_FAIL;
1369 }
1370
1371 return target->type->write_buffer(target, address, size, buffer);
1372 }
1373
1374 static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1375 {
1376 int retval = ERROR_OK;
1377
1378 if (((address % 2) == 0) && (size == 2))
1379 {
1380 return target_write_memory(target, address, 2, 1, buffer);
1381 }
1382
1383 /* handle unaligned head bytes */
1384 if (address % 4)
1385 {
1386 uint32_t unaligned = 4 - (address % 4);
1387
1388 if (unaligned > size)
1389 unaligned = size;
1390
1391 if ((retval = target_write_memory(target, address, 1, unaligned, buffer)) != ERROR_OK)
1392 return retval;
1393
1394 buffer += unaligned;
1395 address += unaligned;
1396 size -= unaligned;
1397 }
1398
1399 /* handle aligned words */
1400 if (size >= 4)
1401 {
1402 int aligned = size - (size % 4);
1403
1404 /* use bulk writes above a certain limit. This may have to be changed */
1405 if (aligned > 128)
1406 {
1407 if ((retval = target->type->bulk_write_memory(target, address, aligned / 4, buffer)) != ERROR_OK)
1408 return retval;
1409 }
1410 else
1411 {
1412 if ((retval = target_write_memory(target, address, 4, aligned / 4, buffer)) != ERROR_OK)
1413 return retval;
1414 }
1415
1416 buffer += aligned;
1417 address += aligned;
1418 size -= aligned;
1419 }
1420
1421 /* handle tail writes of less than 4 bytes */
1422 if (size > 0)
1423 {
1424 if ((retval = target_write_memory(target, address, 1, size, buffer)) != ERROR_OK)
1425 return retval;
1426 }
1427
1428 return retval;
1429 }
1430
1431 /* Single aligned words are guaranteed to use 16 or 32 bit access
1432 * mode respectively, otherwise data is handled as quickly as
1433 * possible
1434 */
1435 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1436 {
1437 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
1438 (int)size, (unsigned)address);
1439
1440 if (!target_was_examined(target))
1441 {
1442 LOG_ERROR("Target not examined yet");
1443 return ERROR_FAIL;
1444 }
1445
1446 if (size == 0) {
1447 return ERROR_OK;
1448 }
1449
1450 if ((address + size - 1) < address)
1451 {
1452 /* GDB can request this when e.g. PC is 0xfffffffc*/
1453 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
1454 address,
1455 size);
1456 return ERROR_FAIL;
1457 }
1458
1459 return target->type->read_buffer(target, address, size, buffer);
1460 }
1461
1462 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1463 {
1464 int retval = ERROR_OK;
1465
1466 if (((address % 2) == 0) && (size == 2))
1467 {
1468 return target_read_memory(target, address, 2, 1, buffer);
1469 }
1470
1471 /* handle unaligned head bytes */
1472 if (address % 4)
1473 {
1474 uint32_t unaligned = 4 - (address % 4);
1475
1476 if (unaligned > size)
1477 unaligned = size;
1478
1479 if ((retval = target_read_memory(target, address, 1, unaligned, buffer)) != ERROR_OK)
1480 return retval;
1481
1482 buffer += unaligned;
1483 address += unaligned;
1484 size -= unaligned;
1485 }
1486
1487 /* handle aligned words */
1488 if (size >= 4)
1489 {
1490 int aligned = size - (size % 4);
1491
1492 if ((retval = target_read_memory(target, address, 4, aligned / 4, buffer)) != ERROR_OK)
1493 return retval;
1494
1495 buffer += aligned;
1496 address += aligned;
1497 size -= aligned;
1498 }
1499
1500 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
1501 if(size >=2)
1502 {
1503 int aligned = size - (size%2);
1504 retval = target_read_memory(target, address, 2, aligned / 2, buffer);
1505 if (retval != ERROR_OK)
1506 return retval;
1507
1508 buffer += aligned;
1509 address += aligned;
1510 size -= aligned;
1511 }
1512 /* handle tail writes of less than 4 bytes */
1513 if (size > 0)
1514 {
1515 if ((retval = target_read_memory(target, address, 1, size, buffer)) != ERROR_OK)
1516 return retval;
1517 }
1518
1519 return ERROR_OK;
1520 }
1521
1522 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
1523 {
1524 uint8_t *buffer;
1525 int retval;
1526 uint32_t i;
1527 uint32_t checksum = 0;
1528 if (!target_was_examined(target))
1529 {
1530 LOG_ERROR("Target not examined yet");
1531 return ERROR_FAIL;
1532 }
1533
1534 if ((retval = target->type->checksum_memory(target, address,
1535 size, &checksum)) != ERROR_OK)
1536 {
1537 buffer = malloc(size);
1538 if (buffer == NULL)
1539 {
1540 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
1541 return ERROR_INVALID_ARGUMENTS;
1542 }
1543 retval = target_read_buffer(target, address, size, buffer);
1544 if (retval != ERROR_OK)
1545 {
1546 free(buffer);
1547 return retval;
1548 }
1549
1550 /* convert to target endianness */
1551 for (i = 0; i < (size/sizeof(uint32_t)); i++)
1552 {
1553 uint32_t target_data;
1554 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
1555 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
1556 }
1557
1558 retval = image_calculate_checksum(buffer, size, &checksum);
1559 free(buffer);
1560 }
1561
1562 *crc = checksum;
1563
1564 return retval;
1565 }
1566
1567 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
1568 {
1569 int retval;
1570 if (!target_was_examined(target))
1571 {
1572 LOG_ERROR("Target not examined yet");
1573 return ERROR_FAIL;
1574 }
1575
1576 if (target->type->blank_check_memory == 0)
1577 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1578
1579 retval = target->type->blank_check_memory(target, address, size, blank);
1580
1581 return retval;
1582 }
1583
1584 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
1585 {
1586 uint8_t value_buf[4];
1587 if (!target_was_examined(target))
1588 {
1589 LOG_ERROR("Target not examined yet");
1590 return ERROR_FAIL;
1591 }
1592
1593 int retval = target_read_memory(target, address, 4, 1, value_buf);
1594
1595 if (retval == ERROR_OK)
1596 {
1597 *value = target_buffer_get_u32(target, value_buf);
1598 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1599 address,
1600 *value);
1601 }
1602 else
1603 {
1604 *value = 0x0;
1605 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1606 address);
1607 }
1608
1609 return retval;
1610 }
1611
1612 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
1613 {
1614 uint8_t value_buf[2];
1615 if (!target_was_examined(target))
1616 {
1617 LOG_ERROR("Target not examined yet");
1618 return ERROR_FAIL;
1619 }
1620
1621 int retval = target_read_memory(target, address, 2, 1, value_buf);
1622
1623 if (retval == ERROR_OK)
1624 {
1625 *value = target_buffer_get_u16(target, value_buf);
1626 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
1627 address,
1628 *value);
1629 }
1630 else
1631 {
1632 *value = 0x0;
1633 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1634 address);
1635 }
1636
1637 return retval;
1638 }
1639
1640 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
1641 {
1642 int retval = target_read_memory(target, address, 1, 1, value);
1643 if (!target_was_examined(target))
1644 {
1645 LOG_ERROR("Target not examined yet");
1646 return ERROR_FAIL;
1647 }
1648
1649 if (retval == ERROR_OK)
1650 {
1651 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1652 address,
1653 *value);
1654 }
1655 else
1656 {
1657 *value = 0x0;
1658 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1659 address);
1660 }
1661
1662 return retval;
1663 }
1664
1665 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
1666 {
1667 int retval;
1668 uint8_t value_buf[4];
1669 if (!target_was_examined(target))
1670 {
1671 LOG_ERROR("Target not examined yet");
1672 return ERROR_FAIL;
1673 }
1674
1675 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1676 address,
1677 value);
1678
1679 target_buffer_set_u32(target, value_buf, value);
1680 if ((retval = target_write_memory(target, address, 4, 1, value_buf)) != ERROR_OK)
1681 {
1682 LOG_DEBUG("failed: %i", retval);
1683 }
1684
1685 return retval;
1686 }
1687
1688 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
1689 {
1690 int retval;
1691 uint8_t value_buf[2];
1692 if (!target_was_examined(target))
1693 {
1694 LOG_ERROR("Target not examined yet");
1695 return ERROR_FAIL;
1696 }
1697
1698 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
1699 address,
1700 value);
1701
1702 target_buffer_set_u16(target, value_buf, value);
1703 if ((retval = target_write_memory(target, address, 2, 1, value_buf)) != ERROR_OK)
1704 {
1705 LOG_DEBUG("failed: %i", retval);
1706 }
1707
1708 return retval;
1709 }
1710
1711 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
1712 {
1713 int retval;
1714 if (!target_was_examined(target))
1715 {
1716 LOG_ERROR("Target not examined yet");
1717 return ERROR_FAIL;
1718 }
1719
1720 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1721 address, value);
1722
1723 if ((retval = target_write_memory(target, address, 1, 1, &value)) != ERROR_OK)
1724 {
1725 LOG_DEBUG("failed: %i", retval);
1726 }
1727
1728 return retval;
1729 }
1730
1731 COMMAND_HANDLER(handle_targets_command)
1732 {
1733 struct target *target = all_targets;
1734
1735 if (CMD_ARGC == 1)
1736 {
1737 target = get_target(CMD_ARGV[0]);
1738 if (target == NULL) {
1739 command_print(CMD_CTX,"Target: %s is unknown, try one of:\n", CMD_ARGV[0]);
1740 goto DumpTargets;
1741 }
1742 if (!target->tap->enabled) {
1743 command_print(CMD_CTX,"Target: TAP %s is disabled, "
1744 "can't be the current target\n",
1745 target->tap->dotted_name);
1746 return ERROR_FAIL;
1747 }
1748
1749 CMD_CTX->current_target = target->target_number;
1750 return ERROR_OK;
1751 }
1752 DumpTargets:
1753
1754 target = all_targets;
1755 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
1756 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
1757 while (target)
1758 {
1759 const char *state;
1760 char marker = ' ';
1761
1762 if (target->tap->enabled)
1763 state = target_state_name( target );
1764 else
1765 state = "tap-disabled";
1766
1767 if (CMD_CTX->current_target == target->target_number)
1768 marker = '*';
1769
1770 /* keep columns lined up to match the headers above */
1771 command_print(CMD_CTX, "%2d%c %-18s %-10s %-6s %-18s %s",
1772 target->target_number,
1773 marker,
1774 target_name(target),
1775 target_type_name(target),
1776 Jim_Nvp_value2name_simple(nvp_target_endian,
1777 target->endianness)->name,
1778 target->tap->dotted_name,
1779 state);
1780 target = target->next;
1781 }
1782
1783 return ERROR_OK;
1784 }
1785
1786 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
1787
1788 static int powerDropout;
1789 static int srstAsserted;
1790
1791 static int runPowerRestore;
1792 static int runPowerDropout;
1793 static int runSrstAsserted;
1794 static int runSrstDeasserted;
1795
1796 static int sense_handler(void)
1797 {
1798 static int prevSrstAsserted = 0;
1799 static int prevPowerdropout = 0;
1800
1801 int retval;
1802 if ((retval = jtag_power_dropout(&powerDropout)) != ERROR_OK)
1803 return retval;
1804
1805 int powerRestored;
1806 powerRestored = prevPowerdropout && !powerDropout;
1807 if (powerRestored)
1808 {
1809 runPowerRestore = 1;
1810 }
1811
1812 long long current = timeval_ms();
1813 static long long lastPower = 0;
1814 int waitMore = lastPower + 2000 > current;
1815 if (powerDropout && !waitMore)
1816 {
1817 runPowerDropout = 1;
1818 lastPower = current;
1819 }
1820
1821 if ((retval = jtag_srst_asserted(&srstAsserted)) != ERROR_OK)
1822 return retval;
1823
1824 int srstDeasserted;
1825 srstDeasserted = prevSrstAsserted && !srstAsserted;
1826
1827 static long long lastSrst = 0;
1828 waitMore = lastSrst + 2000 > current;
1829 if (srstDeasserted && !waitMore)
1830 {
1831 runSrstDeasserted = 1;
1832 lastSrst = current;
1833 }
1834
1835 if (!prevSrstAsserted && srstAsserted)
1836 {
1837 runSrstAsserted = 1;
1838 }
1839
1840 prevSrstAsserted = srstAsserted;
1841 prevPowerdropout = powerDropout;
1842
1843 if (srstDeasserted || powerRestored)
1844 {
1845 /* Other than logging the event we can't do anything here.
1846 * Issuing a reset is a particularly bad idea as we might
1847 * be inside a reset already.
1848 */
1849 }
1850
1851 return ERROR_OK;
1852 }
1853
1854 static int backoff_times = 0;
1855 static int backoff_count = 0;
1856
1857 /* process target state changes */
1858 static int handle_target(void *priv)
1859 {
1860 Jim_Interp *interp = (Jim_Interp *)priv;
1861 int retval = ERROR_OK;
1862
1863 if (!is_jtag_poll_safe())
1864 {
1865 /* polling is disabled currently */
1866 return ERROR_OK;
1867 }
1868
1869 /* we do not want to recurse here... */
1870 static int recursive = 0;
1871 if (! recursive)
1872 {
1873 recursive = 1;
1874 sense_handler();
1875 /* danger! running these procedures can trigger srst assertions and power dropouts.
1876 * We need to avoid an infinite loop/recursion here and we do that by
1877 * clearing the flags after running these events.
1878 */
1879 int did_something = 0;
1880 if (runSrstAsserted)
1881 {
1882 LOG_INFO("srst asserted detected, running srst_asserted proc.");
1883 Jim_Eval(interp, "srst_asserted");
1884 did_something = 1;
1885 }
1886 if (runSrstDeasserted)
1887 {
1888 Jim_Eval(interp, "srst_deasserted");
1889 did_something = 1;
1890 }
1891 if (runPowerDropout)
1892 {
1893 LOG_INFO("Power dropout detected, running power_dropout proc.");
1894 Jim_Eval(interp, "power_dropout");
1895 did_something = 1;
1896 }
1897 if (runPowerRestore)
1898 {
1899 Jim_Eval(interp, "power_restore");
1900 did_something = 1;
1901 }
1902
1903 if (did_something)
1904 {
1905 /* clear detect flags */
1906 sense_handler();
1907 }
1908
1909 /* clear action flags */
1910
1911 runSrstAsserted = 0;
1912 runSrstDeasserted = 0;
1913 runPowerRestore = 0;
1914 runPowerDropout = 0;
1915
1916 recursive = 0;
1917 }
1918
1919 if (backoff_times > backoff_count)
1920 {
1921 /* do not poll this time as we failed previously */
1922 backoff_count++;
1923 return ERROR_OK;
1924 }
1925 backoff_count = 0;
1926
1927 /* Poll targets for state changes unless that's globally disabled.
1928 * Skip targets that are currently disabled.
1929 */
1930 for (struct target *target = all_targets;
1931 is_jtag_poll_safe() && target;
1932 target = target->next)
1933 {
1934 if (!target->tap->enabled)
1935 continue;
1936
1937 /* only poll target if we've got power and srst isn't asserted */
1938 if (!powerDropout && !srstAsserted)
1939 {
1940 /* polling may fail silently until the target has been examined */
1941 if ((retval = target_poll(target)) != ERROR_OK)
1942 {
1943 /* 100ms polling interval. Increase interval between polling up to 5000ms */
1944 if (backoff_times * polling_interval < 5000)
1945 {
1946 backoff_times *= 2;
1947 backoff_times++;
1948 }
1949 LOG_USER("Polling target failed, GDB will be halted. Polling again in %dms", backoff_times * polling_interval);
1950
1951 /* Tell GDB to halt the debugger. This allows the user to
1952 * run monitor commands to handle the situation.
1953 */
1954 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1955 return retval;
1956 }
1957 /* Since we succeeded, we reset backoff count */
1958 if (backoff_times > 0)
1959 {
1960 LOG_USER("Polling succeeded again");
1961 }
1962 backoff_times = 0;
1963 }
1964 }
1965
1966 return retval;
1967 }
1968
1969 COMMAND_HANDLER(handle_reg_command)
1970 {
1971 struct target *target;
1972 struct reg *reg = NULL;
1973 unsigned count = 0;
1974 char *value;
1975
1976 LOG_DEBUG("-");
1977
1978 target = get_current_target(CMD_CTX);
1979
1980 /* list all available registers for the current target */
1981 if (CMD_ARGC == 0)
1982 {
1983 struct reg_cache *cache = target->reg_cache;
1984
1985 count = 0;
1986 while (cache)
1987 {
1988 unsigned i;
1989
1990 command_print(CMD_CTX, "===== %s", cache->name);
1991
1992 for (i = 0, reg = cache->reg_list;
1993 i < cache->num_regs;
1994 i++, reg++, count++)
1995 {
1996 /* only print cached values if they are valid */
1997 if (reg->valid) {
1998 value = buf_to_str(reg->value,
1999 reg->size, 16);
2000 command_print(CMD_CTX,
2001 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2002 count, reg->name,
2003 reg->size, value,
2004 reg->dirty
2005 ? " (dirty)"
2006 : "");
2007 free(value);
2008 } else {
2009 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2010 count, reg->name,
2011 reg->size) ;
2012 }
2013 }
2014 cache = cache->next;
2015 }
2016
2017 return ERROR_OK;
2018 }
2019
2020 /* access a single register by its ordinal number */
2021 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9'))
2022 {
2023 unsigned num;
2024 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2025
2026 struct reg_cache *cache = target->reg_cache;
2027 count = 0;
2028 while (cache)
2029 {
2030 unsigned i;
2031 for (i = 0; i < cache->num_regs; i++)
2032 {
2033 if (count++ == num)
2034 {
2035 reg = &cache->reg_list[i];
2036 break;
2037 }
2038 }
2039 if (reg)
2040 break;
2041 cache = cache->next;
2042 }
2043
2044 if (!reg)
2045 {
2046 command_print(CMD_CTX, "%i is out of bounds, the current target has only %i registers (0 - %i)", num, count, count - 1);
2047 return ERROR_OK;
2048 }
2049 } else /* access a single register by its name */
2050 {
2051 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2052
2053 if (!reg)
2054 {
2055 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2056 return ERROR_OK;
2057 }
2058 }
2059
2060 /* display a register */
2061 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0') && (CMD_ARGV[1][0] <= '9'))))
2062 {
2063 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2064 reg->valid = 0;
2065
2066 if (reg->valid == 0)
2067 {
2068 reg->type->get(reg);
2069 }
2070 value = buf_to_str(reg->value, reg->size, 16);
2071 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2072 free(value);
2073 return ERROR_OK;
2074 }
2075
2076 /* set register value */
2077 if (CMD_ARGC == 2)
2078 {
2079 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2080 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2081
2082 reg->type->set(reg, buf);
2083
2084 value = buf_to_str(reg->value, reg->size, 16);
2085 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2086 free(value);
2087
2088 free(buf);
2089
2090 return ERROR_OK;
2091 }
2092
2093 command_print(CMD_CTX, "usage: reg <#|name> [value]");
2094
2095 return ERROR_OK;
2096 }
2097
2098 COMMAND_HANDLER(handle_poll_command)
2099 {
2100 int retval = ERROR_OK;
2101 struct target *target = get_current_target(CMD_CTX);
2102
2103 if (CMD_ARGC == 0)
2104 {
2105 command_print(CMD_CTX, "background polling: %s",
2106 jtag_poll_get_enabled() ? "on" : "off");
2107 command_print(CMD_CTX, "TAP: %s (%s)",
2108 target->tap->dotted_name,
2109 target->tap->enabled ? "enabled" : "disabled");
2110 if (!target->tap->enabled)
2111 return ERROR_OK;
2112 if ((retval = target_poll(target)) != ERROR_OK)
2113 return retval;
2114 if ((retval = target_arch_state(target)) != ERROR_OK)
2115 return retval;
2116 }
2117 else if (CMD_ARGC == 1)
2118 {
2119 bool enable;
2120 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2121 jtag_poll_set_enabled(enable);
2122 }
2123 else
2124 {
2125 return ERROR_COMMAND_SYNTAX_ERROR;
2126 }
2127
2128 return retval;
2129 }
2130
2131 COMMAND_HANDLER(handle_wait_halt_command)
2132 {
2133 if (CMD_ARGC > 1)
2134 return ERROR_COMMAND_SYNTAX_ERROR;
2135
2136 unsigned ms = 5000;
2137 if (1 == CMD_ARGC)
2138 {
2139 int retval = parse_uint(CMD_ARGV[0], &ms);
2140 if (ERROR_OK != retval)
2141 {
2142 command_print(CMD_CTX, "usage: %s [seconds]", CMD_NAME);
2143 return ERROR_COMMAND_SYNTAX_ERROR;
2144 }
2145 // convert seconds (given) to milliseconds (needed)
2146 ms *= 1000;
2147 }
2148
2149 struct target *target = get_current_target(CMD_CTX);
2150 return target_wait_state(target, TARGET_HALTED, ms);
2151 }
2152
2153 /* wait for target state to change. The trick here is to have a low
2154 * latency for short waits and not to suck up all the CPU time
2155 * on longer waits.
2156 *
2157 * After 500ms, keep_alive() is invoked
2158 */
2159 int target_wait_state(struct target *target, enum target_state state, int ms)
2160 {
2161 int retval;
2162 long long then = 0, cur;
2163 int once = 1;
2164
2165 for (;;)
2166 {
2167 if ((retval = target_poll(target)) != ERROR_OK)
2168 return retval;
2169 if (target->state == state)
2170 {
2171 break;
2172 }
2173 cur = timeval_ms();
2174 if (once)
2175 {
2176 once = 0;
2177 then = timeval_ms();
2178 LOG_DEBUG("waiting for target %s...",
2179 Jim_Nvp_value2name_simple(nvp_target_state,state)->name);
2180 }
2181
2182 if (cur-then > 500)
2183 {
2184 keep_alive();
2185 }
2186
2187 if ((cur-then) > ms)
2188 {
2189 LOG_ERROR("timed out while waiting for target %s",
2190 Jim_Nvp_value2name_simple(nvp_target_state,state)->name);
2191 return ERROR_FAIL;
2192 }
2193 }
2194
2195 return ERROR_OK;
2196 }
2197
2198 COMMAND_HANDLER(handle_halt_command)
2199 {
2200 LOG_DEBUG("-");
2201
2202 struct target *target = get_current_target(CMD_CTX);
2203 int retval = target_halt(target);
2204 if (ERROR_OK != retval)
2205 return retval;
2206
2207 if (CMD_ARGC == 1)
2208 {
2209 unsigned wait_local;
2210 retval = parse_uint(CMD_ARGV[0], &wait_local);
2211 if (ERROR_OK != retval)
2212 return ERROR_COMMAND_SYNTAX_ERROR;
2213 if (!wait_local)
2214 return ERROR_OK;
2215 }
2216
2217 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2218 }
2219
2220 COMMAND_HANDLER(handle_soft_reset_halt_command)
2221 {
2222 struct target *target = get_current_target(CMD_CTX);
2223
2224 LOG_USER("requesting target halt and executing a soft reset");
2225
2226 target->type->soft_reset_halt(target);
2227
2228 return ERROR_OK;
2229 }
2230
2231 COMMAND_HANDLER(handle_reset_command)
2232 {
2233 if (CMD_ARGC > 1)
2234 return ERROR_COMMAND_SYNTAX_ERROR;
2235
2236 enum target_reset_mode reset_mode = RESET_RUN;
2237 if (CMD_ARGC == 1)
2238 {
2239 const Jim_Nvp *n;
2240 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2241 if ((n->name == NULL) || (n->value == RESET_UNKNOWN)) {
2242 return ERROR_COMMAND_SYNTAX_ERROR;
2243 }
2244 reset_mode = n->value;
2245 }
2246
2247 /* reset *all* targets */
2248 return target_process_reset(CMD_CTX, reset_mode);
2249 }
2250
2251
2252 COMMAND_HANDLER(handle_resume_command)
2253 {
2254 int current = 1;
2255 if (CMD_ARGC > 1)
2256 return ERROR_COMMAND_SYNTAX_ERROR;
2257
2258 struct target *target = get_current_target(CMD_CTX);
2259 target_handle_event(target, TARGET_EVENT_OLD_pre_resume);
2260
2261 /* with no CMD_ARGV, resume from current pc, addr = 0,
2262 * with one arguments, addr = CMD_ARGV[0],
2263 * handle breakpoints, not debugging */
2264 uint32_t addr = 0;
2265 if (CMD_ARGC == 1)
2266 {
2267 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2268 current = 0;
2269 }
2270
2271 return target_resume(target, current, addr, 1, 0);
2272 }
2273
2274 COMMAND_HANDLER(handle_step_command)
2275 {
2276 if (CMD_ARGC > 1)
2277 return ERROR_COMMAND_SYNTAX_ERROR;
2278
2279 LOG_DEBUG("-");
2280
2281 /* with no CMD_ARGV, step from current pc, addr = 0,
2282 * with one argument addr = CMD_ARGV[0],
2283 * handle breakpoints, debugging */
2284 uint32_t addr = 0;
2285 int current_pc = 1;
2286 if (CMD_ARGC == 1)
2287 {
2288 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2289 current_pc = 0;
2290 }
2291
2292 struct target *target = get_current_target(CMD_CTX);
2293
2294 return target->type->step(target, current_pc, addr, 1);
2295 }
2296
2297 static void handle_md_output(struct command_context *cmd_ctx,
2298 struct target *target, uint32_t address, unsigned size,
2299 unsigned count, const uint8_t *buffer)
2300 {
2301 const unsigned line_bytecnt = 32;
2302 unsigned line_modulo = line_bytecnt / size;
2303
2304 char output[line_bytecnt * 4 + 1];
2305 unsigned output_len = 0;
2306
2307 const char *value_fmt;
2308 switch (size) {
2309 case 4: value_fmt = "%8.8x "; break;
2310 case 2: value_fmt = "%4.4x "; break;
2311 case 1: value_fmt = "%2.2x "; break;
2312 default:
2313 /* "can't happen", caller checked */
2314 LOG_ERROR("invalid memory read size: %u", size);
2315 return;
2316 }
2317
2318 for (unsigned i = 0; i < count; i++)
2319 {
2320 if (i % line_modulo == 0)
2321 {
2322 output_len += snprintf(output + output_len,
2323 sizeof(output) - output_len,
2324 "0x%8.8x: ",
2325 (unsigned)(address + (i*size)));
2326 }
2327
2328 uint32_t value = 0;
2329 const uint8_t *value_ptr = buffer + i * size;
2330 switch (size) {
2331 case 4: value = target_buffer_get_u32(target, value_ptr); break;
2332 case 2: value = target_buffer_get_u16(target, value_ptr); break;
2333 case 1: value = *value_ptr;
2334 }
2335 output_len += snprintf(output + output_len,
2336 sizeof(output) - output_len,
2337 value_fmt, value);
2338
2339 if ((i % line_modulo == line_modulo - 1) || (i == count - 1))
2340 {
2341 command_print(cmd_ctx, "%s", output);
2342 output_len = 0;
2343 }
2344 }
2345 }
2346
2347 COMMAND_HANDLER(handle_md_command)
2348 {
2349 if (CMD_ARGC < 1)
2350 return ERROR_COMMAND_SYNTAX_ERROR;
2351
2352 unsigned size = 0;
2353 switch (CMD_NAME[2]) {
2354 case 'w': size = 4; break;
2355 case 'h': size = 2; break;
2356 case 'b': size = 1; break;
2357 default: return ERROR_COMMAND_SYNTAX_ERROR;
2358 }
2359
2360 bool physical=strcmp(CMD_ARGV[0], "phys")==0;
2361 int (*fn)(struct target *target,
2362 uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
2363 if (physical)
2364 {
2365 CMD_ARGC--;
2366 CMD_ARGV++;
2367 fn=target_read_phys_memory;
2368 } else
2369 {
2370 fn=target_read_memory;
2371 }
2372 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2373 {
2374 return ERROR_COMMAND_SYNTAX_ERROR;
2375 }
2376
2377 uint32_t address;
2378 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2379
2380 unsigned count = 1;
2381 if (CMD_ARGC == 2)
2382 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2383
2384 uint8_t *buffer = calloc(count, size);
2385
2386 struct target *target = get_current_target(CMD_CTX);
2387 int retval = fn(target, address, size, count, buffer);
2388 if (ERROR_OK == retval)
2389 handle_md_output(CMD_CTX, target, address, size, count, buffer);
2390
2391 free(buffer);
2392
2393 return retval;
2394 }
2395
2396 typedef int (*target_write_fn)(struct target *target,
2397 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
2398
2399 static int target_write_memory_fast(struct target *target,
2400 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
2401 {
2402 return target_write_buffer(target, address, size * count, buffer);
2403 }
2404
2405 static int target_fill_mem(struct target *target,
2406 uint32_t address,
2407 target_write_fn fn,
2408 unsigned data_size,
2409 /* value */
2410 uint32_t b,
2411 /* count */
2412 unsigned c)
2413 {
2414 /* We have to write in reasonably large chunks to be able
2415 * to fill large memory areas with any sane speed */
2416 const unsigned chunk_size = 16384;
2417 uint8_t *target_buf = malloc(chunk_size * data_size);
2418 if (target_buf == NULL)
2419 {
2420 LOG_ERROR("Out of memory");
2421 return ERROR_FAIL;
2422 }
2423
2424 for (unsigned i = 0; i < chunk_size; i ++)
2425 {
2426 switch (data_size)
2427 {
2428 case 4:
2429 target_buffer_set_u32(target, target_buf + i*data_size, b);
2430 break;
2431 case 2:
2432 target_buffer_set_u16(target, target_buf + i*data_size, b);
2433 break;
2434 case 1:
2435 target_buffer_set_u8(target, target_buf + i*data_size, b);
2436 break;
2437 default:
2438 exit(-1);
2439 }
2440 }
2441
2442 int retval = ERROR_OK;
2443
2444 for (unsigned x = 0; x < c; x += chunk_size)
2445 {
2446 unsigned current;
2447 current = c - x;
2448 if (current > chunk_size)
2449 {
2450 current = chunk_size;
2451 }
2452 retval = fn(target, address + x * data_size, data_size, current, target_buf);
2453 if (retval != ERROR_OK)
2454 {
2455 break;
2456 }
2457 /* avoid GDB timeouts */
2458 keep_alive();
2459 }
2460 free(target_buf);
2461
2462 return retval;
2463 }
2464
2465
2466 COMMAND_HANDLER(handle_mw_command)
2467 {
2468 if (CMD_ARGC < 2)
2469 {
2470 return ERROR_COMMAND_SYNTAX_ERROR;
2471 }
2472 bool physical=strcmp(CMD_ARGV[0], "phys")==0;
2473 target_write_fn fn;
2474 if (physical)
2475 {
2476 CMD_ARGC--;
2477 CMD_ARGV++;
2478 fn=target_write_phys_memory;
2479 } else
2480 {
2481 fn = target_write_memory_fast;
2482 }
2483 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
2484 return ERROR_COMMAND_SYNTAX_ERROR;
2485
2486 uint32_t address;
2487 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2488
2489 uint32_t value;
2490 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2491
2492 unsigned count = 1;
2493 if (CMD_ARGC == 3)
2494 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
2495
2496 struct target *target = get_current_target(CMD_CTX);
2497 unsigned wordsize;
2498 switch (CMD_NAME[2])
2499 {
2500 case 'w':
2501 wordsize = 4;
2502 break;
2503 case 'h':
2504 wordsize = 2;
2505 break;
2506 case 'b':
2507 wordsize = 1;
2508 break;
2509 default:
2510 return ERROR_COMMAND_SYNTAX_ERROR;
2511 }
2512
2513 return target_fill_mem(target, address, fn, wordsize, value, count);
2514 }
2515
2516 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
2517 uint32_t *min_address, uint32_t *max_address)
2518 {
2519 if (CMD_ARGC < 1 || CMD_ARGC > 5)
2520 return ERROR_COMMAND_SYNTAX_ERROR;
2521
2522 /* a base address isn't always necessary,
2523 * default to 0x0 (i.e. don't relocate) */
2524 if (CMD_ARGC >= 2)
2525 {
2526 uint32_t addr;
2527 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2528 image->base_address = addr;
2529 image->base_address_set = 1;
2530 }
2531 else
2532 image->base_address_set = 0;
2533
2534 image->start_address_set = 0;
2535
2536 if (CMD_ARGC >= 4)
2537 {
2538 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
2539 }
2540 if (CMD_ARGC == 5)
2541 {
2542 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
2543 // use size (given) to find max (required)
2544 *max_address += *min_address;
2545 }
2546
2547 if (*min_address > *max_address)
2548 return ERROR_COMMAND_SYNTAX_ERROR;
2549
2550 return ERROR_OK;
2551 }
2552
2553 COMMAND_HANDLER(handle_load_image_command)
2554 {
2555 uint8_t *buffer;
2556 size_t buf_cnt;
2557 uint32_t image_size;
2558 uint32_t min_address = 0;
2559 uint32_t max_address = 0xffffffff;
2560 int i;
2561 struct image image;
2562
2563 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
2564 &image, &min_address, &max_address);
2565 if (ERROR_OK != retval)
2566 return retval;
2567
2568 struct target *target = get_current_target(CMD_CTX);
2569
2570 struct duration bench;
2571 duration_start(&bench);
2572
2573 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
2574 {
2575 return ERROR_OK;
2576 }
2577
2578 image_size = 0x0;
2579 retval = ERROR_OK;
2580 for (i = 0; i < image.num_sections; i++)
2581 {
2582 buffer = malloc(image.sections[i].size);
2583 if (buffer == NULL)
2584 {
2585 command_print(CMD_CTX,
2586 "error allocating buffer for section (%d bytes)",
2587 (int)(image.sections[i].size));
2588 break;
2589 }
2590
2591 if ((retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt)) != ERROR_OK)
2592 {
2593 free(buffer);
2594 break;
2595 }
2596
2597 uint32_t offset = 0;
2598 uint32_t length = buf_cnt;
2599
2600 /* DANGER!!! beware of unsigned comparision here!!! */
2601
2602 if ((image.sections[i].base_address + buf_cnt >= min_address)&&
2603 (image.sections[i].base_address < max_address))
2604 {
2605 if (image.sections[i].base_address < min_address)
2606 {
2607 /* clip addresses below */
2608 offset += min_address-image.sections[i].base_address;
2609 length -= offset;
2610 }
2611
2612 if (image.sections[i].base_address + buf_cnt > max_address)
2613 {
2614 length -= (image.sections[i].base_address + buf_cnt)-max_address;
2615 }
2616
2617 if ((retval = target_write_buffer(target, image.sections[i].base_address + offset, length, buffer + offset)) != ERROR_OK)
2618 {
2619 free(buffer);
2620 break;
2621 }
2622 image_size += length;
2623 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
2624 (unsigned int)length,
2625 image.sections[i].base_address + offset);
2626 }
2627
2628 free(buffer);
2629 }
2630
2631 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2632 {
2633 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
2634 "in %fs (%0.3f KiB/s)", image_size,
2635 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2636 }
2637
2638 image_close(&image);
2639
2640 return retval;
2641
2642 }
2643
2644 COMMAND_HANDLER(handle_dump_image_command)
2645 {
2646 struct fileio fileio;
2647 uint8_t buffer[560];
2648 int retval, retvaltemp;
2649 uint32_t address, size;
2650 struct duration bench;
2651 struct target *target = get_current_target(CMD_CTX);
2652
2653 if (CMD_ARGC != 3)
2654 return ERROR_COMMAND_SYNTAX_ERROR;
2655
2656 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
2657 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
2658
2659 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
2660 if (retval != ERROR_OK)
2661 return retval;
2662
2663 duration_start(&bench);
2664
2665 retval = ERROR_OK;
2666 while (size > 0)
2667 {
2668 size_t size_written;
2669 uint32_t this_run_size = (size > 560) ? 560 : size;
2670 retval = target_read_buffer(target, address, this_run_size, buffer);
2671 if (retval != ERROR_OK)
2672 {
2673 break;
2674 }
2675
2676 retval = fileio_write(&fileio, this_run_size, buffer, &size_written);
2677 if (retval != ERROR_OK)
2678 {
2679 break;
2680 }
2681
2682 size -= this_run_size;
2683 address += this_run_size;
2684 }
2685
2686 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2687 {
2688 int filesize;
2689 retval = fileio_size(&fileio, &filesize);
2690 if (retval != ERROR_OK)
2691 return retval;
2692 command_print(CMD_CTX,
2693 "dumped %ld bytes in %fs (%0.3f KiB/s)", (long)filesize,
2694 duration_elapsed(&bench), duration_kbps(&bench, filesize));
2695 }
2696
2697 if ((retvaltemp = fileio_close(&fileio)) != ERROR_OK)
2698 return retvaltemp;
2699
2700 return retval;
2701 }
2702
2703 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
2704 {
2705 uint8_t *buffer;
2706 size_t buf_cnt;
2707 uint32_t image_size;
2708 int i;
2709 int retval;
2710 uint32_t checksum = 0;
2711 uint32_t mem_checksum = 0;
2712
2713 struct image image;
2714
2715 struct target *target = get_current_target(CMD_CTX);
2716
2717 if (CMD_ARGC < 1)
2718 {
2719 return ERROR_COMMAND_SYNTAX_ERROR;
2720 }
2721
2722 if (!target)
2723 {
2724 LOG_ERROR("no target selected");
2725 return ERROR_FAIL;
2726 }
2727
2728 struct duration bench;
2729 duration_start(&bench);
2730
2731 if (CMD_ARGC >= 2)
2732 {
2733 uint32_t addr;
2734 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2735 image.base_address = addr;
2736 image.base_address_set = 1;
2737 }
2738 else
2739 {
2740 image.base_address_set = 0;
2741 image.base_address = 0x0;
2742 }
2743
2744 image.start_address_set = 0;
2745
2746 if ((retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL)) != ERROR_OK)
2747 {
2748 return retval;
2749 }
2750
2751 image_size = 0x0;
2752 int diffs = 0;
2753 retval = ERROR_OK;
2754 for (i = 0; i < image.num_sections; i++)
2755 {
2756 buffer = malloc(image.sections[i].size);
2757 if (buffer == NULL)
2758 {
2759 command_print(CMD_CTX,
2760 "error allocating buffer for section (%d bytes)",
2761 (int)(image.sections[i].size));
2762 break;
2763 }
2764 if ((retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt)) != ERROR_OK)
2765 {
2766 free(buffer);
2767 break;
2768 }
2769
2770 if (verify)
2771 {
2772 /* calculate checksum of image */
2773 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
2774 if (retval != ERROR_OK)
2775 {
2776 free(buffer);
2777 break;
2778 }
2779
2780 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
2781 if (retval != ERROR_OK)
2782 {
2783 free(buffer);
2784 break;
2785 }
2786
2787 if (checksum != mem_checksum)
2788 {
2789 /* failed crc checksum, fall back to a binary compare */
2790 uint8_t *data;
2791
2792 if (diffs == 0)
2793 {
2794 LOG_ERROR("checksum mismatch - attempting binary compare");
2795 }
2796
2797 data = (uint8_t*)malloc(buf_cnt);
2798
2799 /* Can we use 32bit word accesses? */
2800 int size = 1;
2801 int count = buf_cnt;
2802 if ((count % 4) == 0)
2803 {
2804 size *= 4;
2805 count /= 4;
2806 }
2807 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
2808 if (retval == ERROR_OK)
2809 {
2810 uint32_t t;
2811 for (t = 0; t < buf_cnt; t++)
2812 {
2813 if (data[t] != buffer[t])
2814 {
2815 command_print(CMD_CTX,
2816 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
2817 diffs,
2818 (unsigned)(t + image.sections[i].base_address),
2819 data[t],
2820 buffer[t]);
2821 if (diffs++ >= 127)
2822 {
2823 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
2824 free(data);
2825 free(buffer);
2826 goto done;
2827 }
2828 }
2829 keep_alive();
2830 }
2831 }
2832 free(data);
2833 }
2834 } else
2835 {
2836 command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx",
2837 image.sections[i].base_address,
2838 buf_cnt);
2839 }
2840
2841 free(buffer);
2842 image_size += buf_cnt;
2843 }
2844 if (diffs > 0)
2845 {
2846 command_print(CMD_CTX, "No more differences found.");
2847 }
2848 done:
2849 if (diffs > 0)
2850 {
2851 retval = ERROR_FAIL;
2852 }
2853 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2854 {
2855 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
2856 "in %fs (%0.3f KiB/s)", image_size,
2857 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2858 }
2859
2860 image_close(&image);
2861
2862 return retval;
2863 }
2864
2865 COMMAND_HANDLER(handle_verify_image_command)
2866 {
2867 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
2868 }
2869
2870 COMMAND_HANDLER(handle_test_image_command)
2871 {
2872 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
2873 }
2874
2875 static int handle_bp_command_list(struct command_context *cmd_ctx)
2876 {
2877 struct target *target = get_current_target(cmd_ctx);
2878 struct breakpoint *breakpoint = target->breakpoints;
2879 while (breakpoint)
2880 {
2881 if (breakpoint->type == BKPT_SOFT)
2882 {
2883 char* buf = buf_to_str(breakpoint->orig_instr,
2884 breakpoint->length, 16);
2885 command_print(cmd_ctx, "0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
2886 breakpoint->address,
2887 breakpoint->length,
2888 breakpoint->set, buf);
2889 free(buf);
2890 }
2891 else
2892 {
2893 command_print(cmd_ctx, "0x%8.8" PRIx32 ", 0x%x, %i",
2894 breakpoint->address,
2895 breakpoint->length, breakpoint->set);
2896 }
2897
2898 breakpoint = breakpoint->next;
2899 }
2900 return ERROR_OK;
2901 }
2902
2903 static int handle_bp_command_set(struct command_context *cmd_ctx,
2904 uint32_t addr, uint32_t length, int hw)
2905 {
2906 struct target *target = get_current_target(cmd_ctx);
2907 int retval = breakpoint_add(target, addr, length, hw);
2908 if (ERROR_OK == retval)
2909 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
2910 else
2911 LOG_ERROR("Failure setting breakpoint");
2912 return retval;
2913 }
2914
2915 COMMAND_HANDLER(handle_bp_command)
2916 {
2917 if (CMD_ARGC == 0)
2918 return handle_bp_command_list(CMD_CTX);
2919
2920 if (CMD_ARGC < 2 || CMD_ARGC > 3)
2921 {
2922 command_print(CMD_CTX, "usage: bp <address> <length> ['hw']");
2923 return ERROR_COMMAND_SYNTAX_ERROR;
2924 }
2925
2926 uint32_t addr;
2927 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2928 uint32_t length;
2929 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
2930
2931 int hw = BKPT_SOFT;
2932 if (CMD_ARGC == 3)
2933 {
2934 if (strcmp(CMD_ARGV[2], "hw") == 0)
2935 hw = BKPT_HARD;
2936 else
2937 return ERROR_COMMAND_SYNTAX_ERROR;
2938 }
2939
2940 return handle_bp_command_set(CMD_CTX, addr, length, hw);
2941 }
2942
2943 COMMAND_HANDLER(handle_rbp_command)
2944 {
2945 if (CMD_ARGC != 1)
2946 return ERROR_COMMAND_SYNTAX_ERROR;
2947
2948 uint32_t addr;
2949 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2950
2951 struct target *target = get_current_target(CMD_CTX);
2952 breakpoint_remove(target, addr);
2953
2954 return ERROR_OK;
2955 }
2956
2957 COMMAND_HANDLER(handle_wp_command)
2958 {
2959 struct target *target = get_current_target(CMD_CTX);
2960
2961 if (CMD_ARGC == 0)
2962 {
2963 struct watchpoint *watchpoint = target->watchpoints;
2964
2965 while (watchpoint)
2966 {
2967 command_print(CMD_CTX, "address: 0x%8.8" PRIx32
2968 ", len: 0x%8.8" PRIx32
2969 ", r/w/a: %i, value: 0x%8.8" PRIx32
2970 ", mask: 0x%8.8" PRIx32,
2971 watchpoint->address,
2972 watchpoint->length,
2973 (int)watchpoint->rw,
2974 watchpoint->value,
2975 watchpoint->mask);
2976 watchpoint = watchpoint->next;
2977 }
2978 return ERROR_OK;
2979 }
2980
2981 enum watchpoint_rw type = WPT_ACCESS;
2982 uint32_t addr = 0;
2983 uint32_t length = 0;
2984 uint32_t data_value = 0x0;
2985 uint32_t data_mask = 0xffffffff;
2986
2987 switch (CMD_ARGC)
2988 {
2989 case 5:
2990 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
2991 // fall through
2992 case 4:
2993 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
2994 // fall through
2995 case 3:
2996 switch (CMD_ARGV[2][0])
2997 {
2998 case 'r':
2999 type = WPT_READ;
3000 break;
3001 case 'w':
3002 type = WPT_WRITE;
3003 break;
3004 case 'a':
3005 type = WPT_ACCESS;
3006 break;
3007 default:
3008 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3009 return ERROR_COMMAND_SYNTAX_ERROR;
3010 }
3011 // fall through
3012 case 2:
3013 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3014 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3015 break;
3016
3017 default:
3018 command_print(CMD_CTX, "usage: wp [address length "
3019 "[(r|w|a) [value [mask]]]]");
3020 return ERROR_COMMAND_SYNTAX_ERROR;
3021 }
3022
3023 int retval = watchpoint_add(target, addr, length, type,
3024 data_value, data_mask);
3025 if (ERROR_OK != retval)
3026 LOG_ERROR("Failure setting watchpoints");
3027
3028 return retval;
3029 }
3030
3031 COMMAND_HANDLER(handle_rwp_command)
3032 {
3033 if (CMD_ARGC != 1)
3034 return ERROR_COMMAND_SYNTAX_ERROR;
3035
3036 uint32_t addr;
3037 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3038
3039 struct target *target = get_current_target(CMD_CTX);
3040 watchpoint_remove(target, addr);
3041
3042 return ERROR_OK;
3043 }
3044
3045
3046 /**
3047 * Translate a virtual address to a physical address.
3048 *
3049 * The low-level target implementation must have logged a detailed error
3050 * which is forwarded to telnet/GDB session.
3051 */
3052 COMMAND_HANDLER(handle_virt2phys_command)
3053 {
3054 if (CMD_ARGC != 1)
3055 return ERROR_COMMAND_SYNTAX_ERROR;
3056
3057 uint32_t va;
3058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], va);
3059 uint32_t pa;
3060
3061 struct target *target = get_current_target(CMD_CTX);
3062 int retval = target->type->virt2phys(target, va, &pa);
3063 if (retval == ERROR_OK)
3064 command_print(CMD_CTX, "Physical address 0x%08" PRIx32 "", pa);
3065
3066 return retval;
3067 }
3068
3069 static void writeData(FILE *f, const void *data, size_t len)
3070 {
3071 size_t written = fwrite(data, 1, len, f);
3072 if (written != len)
3073 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3074 }
3075
3076 static void writeLong(FILE *f, int l)
3077 {
3078 int i;
3079 for (i = 0; i < 4; i++)
3080 {
3081 char c = (l >> (i*8))&0xff;
3082 writeData(f, &c, 1);
3083 }
3084
3085 }
3086
3087 static void writeString(FILE *f, char *s)
3088 {
3089 writeData(f, s, strlen(s));
3090 }
3091
3092 /* Dump a gmon.out histogram file. */
3093 static void writeGmon(uint32_t *samples, uint32_t sampleNum, const char *filename)
3094 {
3095 uint32_t i;
3096 FILE *f = fopen(filename, "w");
3097 if (f == NULL)
3098 return;
3099 writeString(f, "gmon");
3100 writeLong(f, 0x00000001); /* Version */
3101 writeLong(f, 0); /* padding */
3102 writeLong(f, 0); /* padding */
3103 writeLong(f, 0); /* padding */
3104
3105 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
3106 writeData(f, &zero, 1);
3107
3108 /* figure out bucket size */
3109 uint32_t min = samples[0];
3110 uint32_t max = samples[0];
3111 for (i = 0; i < sampleNum; i++)
3112 {
3113 if (min > samples[i])
3114 {
3115 min = samples[i];
3116 }
3117 if (max < samples[i])
3118 {
3119 max = samples[i];
3120 }
3121 }
3122
3123 int addressSpace = (max-min + 1);
3124
3125 static const uint32_t maxBuckets = 16 * 1024; /* maximum buckets. */
3126 uint32_t length = addressSpace;
3127 if (length > maxBuckets)
3128 {
3129 length = maxBuckets;
3130 }
3131 int *buckets = malloc(sizeof(int)*length);
3132 if (buckets == NULL)
3133 {
3134 fclose(f);
3135 return;
3136 }
3137 memset(buckets, 0, sizeof(int)*length);
3138 for (i = 0; i < sampleNum;i++)
3139 {
3140 uint32_t address = samples[i];
3141 long long a = address-min;
3142 long long b = length-1;
3143 long long c = addressSpace-1;
3144 int index_t = (a*b)/c; /* danger!!!! int32 overflows */
3145 buckets[index_t]++;
3146 }
3147
3148 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
3149 writeLong(f, min); /* low_pc */
3150 writeLong(f, max); /* high_pc */
3151 writeLong(f, length); /* # of samples */
3152 writeLong(f, 100); /* KLUDGE! We lie, ca. 100Hz best case. */
3153 writeString(f, "seconds");
3154 for (i = 0; i < (15-strlen("seconds")); i++)
3155 writeData(f, &zero, 1);
3156 writeString(f, "s");
3157
3158 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
3159
3160 char *data = malloc(2*length);
3161 if (data != NULL)
3162 {
3163 for (i = 0; i < length;i++)
3164 {
3165 int val;
3166 val = buckets[i];
3167 if (val > 65535)
3168 {
3169 val = 65535;
3170 }
3171 data[i*2]=val&0xff;
3172 data[i*2 + 1]=(val >> 8)&0xff;
3173 }
3174 free(buckets);
3175 writeData(f, data, length * 2);
3176 free(data);
3177 } else
3178 {
3179 free(buckets);
3180 }
3181
3182 fclose(f);
3183 }
3184
3185 /* profiling samples the CPU PC as quickly as OpenOCD is able,
3186 * which will be used as a random sampling of PC */
3187 COMMAND_HANDLER(handle_profile_command)
3188 {
3189 struct target *target = get_current_target(CMD_CTX);
3190 struct timeval timeout, now;
3191
3192 gettimeofday(&timeout, NULL);
3193 if (CMD_ARGC != 2)
3194 {
3195 return ERROR_COMMAND_SYNTAX_ERROR;
3196 }
3197 unsigned offset;
3198 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], offset);
3199
3200 timeval_add_time(&timeout, offset, 0);
3201
3202 /**
3203 * @todo: Some cores let us sample the PC without the
3204 * annoying halt/resume step; for example, ARMv7 PCSR.
3205 * Provide a way to use that more efficient mechanism.
3206 */
3207
3208 command_print(CMD_CTX, "Starting profiling. Halting and resuming the target as often as we can...");
3209
3210 static const int maxSample = 10000;
3211 uint32_t *samples = malloc(sizeof(uint32_t)*maxSample);
3212 if (samples == NULL)
3213 return ERROR_OK;
3214
3215 int numSamples = 0;
3216 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
3217 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
3218
3219 for (;;)
3220 {
3221 int retval<