struct fileio: improve member types
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2009 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 ***************************************************************************/
32 #ifdef HAVE_CONFIG_H
33 #include "config.h"
34 #endif
35
36 #include "target.h"
37 #include "target_type.h"
38 #include "target_request.h"
39 #include "breakpoints.h"
40 #include "time_support.h"
41 #include "register.h"
42 #include "trace.h"
43 #include "image.h"
44 #include "jtag.h"
45
46
47 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj *const *argv);
48
49 static int target_array2mem(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv);
50 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv);
51
52 /* targets */
53 extern struct target_type arm7tdmi_target;
54 extern struct target_type arm720t_target;
55 extern struct target_type arm9tdmi_target;
56 extern struct target_type arm920t_target;
57 extern struct target_type arm966e_target;
58 extern struct target_type arm926ejs_target;
59 extern struct target_type fa526_target;
60 extern struct target_type feroceon_target;
61 extern struct target_type dragonite_target;
62 extern struct target_type xscale_target;
63 extern struct target_type cortexm3_target;
64 extern struct target_type cortexa8_target;
65 extern struct target_type arm11_target;
66 extern struct target_type mips_m4k_target;
67 extern struct target_type avr_target;
68
69 struct target_type *target_types[] =
70 {
71 &arm7tdmi_target,
72 &arm9tdmi_target,
73 &arm920t_target,
74 &arm720t_target,
75 &arm966e_target,
76 &arm926ejs_target,
77 &fa526_target,
78 &feroceon_target,
79 &dragonite_target,
80 &xscale_target,
81 &cortexm3_target,
82 &cortexa8_target,
83 &arm11_target,
84 &mips_m4k_target,
85 &avr_target,
86 NULL,
87 };
88
89 struct target *all_targets = NULL;
90 struct target_event_callback *target_event_callbacks = NULL;
91 struct target_timer_callback *target_timer_callbacks = NULL;
92
93 const Jim_Nvp nvp_assert[] = {
94 { .name = "assert", NVP_ASSERT },
95 { .name = "deassert", NVP_DEASSERT },
96 { .name = "T", NVP_ASSERT },
97 { .name = "F", NVP_DEASSERT },
98 { .name = "t", NVP_ASSERT },
99 { .name = "f", NVP_DEASSERT },
100 { .name = NULL, .value = -1 }
101 };
102
103 const Jim_Nvp nvp_error_target[] = {
104 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
105 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
106 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
107 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
108 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
109 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
110 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
111 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
112 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
113 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
114 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
115 { .value = -1, .name = NULL }
116 };
117
118 const char *target_strerror_safe(int err)
119 {
120 const Jim_Nvp *n;
121
122 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
123 if (n->name == NULL) {
124 return "unknown";
125 } else {
126 return n->name;
127 }
128 }
129
130 static const Jim_Nvp nvp_target_event[] = {
131 { .value = TARGET_EVENT_OLD_gdb_program_config , .name = "old-gdb_program_config" },
132 { .value = TARGET_EVENT_OLD_pre_resume , .name = "old-pre_resume" },
133
134 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
135 { .value = TARGET_EVENT_HALTED, .name = "halted" },
136 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
137 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
138 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
139
140 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
141 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
142
143 /* historical name */
144
145 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
146
147 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
148 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
149 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
150 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
151 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
152 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
153 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
154 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
155 { .value = TARGET_EVENT_RESET_INIT , .name = "reset-init" },
156 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
157
158 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
159 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
160
161 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
162 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
163
164 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
165 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
166
167 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
168 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
169
170 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
171 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
172
173 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
174 { .value = TARGET_EVENT_RESUMED , .name = "resume-ok" },
175 { .value = TARGET_EVENT_RESUME_END , .name = "resume-end" },
176
177 { .name = NULL, .value = -1 }
178 };
179
180 const Jim_Nvp nvp_target_state[] = {
181 { .name = "unknown", .value = TARGET_UNKNOWN },
182 { .name = "running", .value = TARGET_RUNNING },
183 { .name = "halted", .value = TARGET_HALTED },
184 { .name = "reset", .value = TARGET_RESET },
185 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
186 { .name = NULL, .value = -1 },
187 };
188
189 const Jim_Nvp nvp_target_debug_reason [] = {
190 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
191 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
192 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
193 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
194 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
195 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
196 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
197 { .name = NULL, .value = -1 },
198 };
199
200 const Jim_Nvp nvp_target_endian[] = {
201 { .name = "big", .value = TARGET_BIG_ENDIAN },
202 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
203 { .name = "be", .value = TARGET_BIG_ENDIAN },
204 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
205 { .name = NULL, .value = -1 },
206 };
207
208 const Jim_Nvp nvp_reset_modes[] = {
209 { .name = "unknown", .value = RESET_UNKNOWN },
210 { .name = "run" , .value = RESET_RUN },
211 { .name = "halt" , .value = RESET_HALT },
212 { .name = "init" , .value = RESET_INIT },
213 { .name = NULL , .value = -1 },
214 };
215
216 const char *
217 target_state_name( struct target *t )
218 {
219 const char *cp;
220 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
221 if( !cp ){
222 LOG_ERROR("Invalid target state: %d", (int)(t->state));
223 cp = "(*BUG*unknown*BUG*)";
224 }
225 return cp;
226 }
227
228 /* determine the number of the new target */
229 static int new_target_number(void)
230 {
231 struct target *t;
232 int x;
233
234 /* number is 0 based */
235 x = -1;
236 t = all_targets;
237 while (t) {
238 if (x < t->target_number) {
239 x = t->target_number;
240 }
241 t = t->next;
242 }
243 return x + 1;
244 }
245
246 /* read a uint32_t from a buffer in target memory endianness */
247 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
248 {
249 if (target->endianness == TARGET_LITTLE_ENDIAN)
250 return le_to_h_u32(buffer);
251 else
252 return be_to_h_u32(buffer);
253 }
254
255 /* read a uint16_t from a buffer in target memory endianness */
256 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
257 {
258 if (target->endianness == TARGET_LITTLE_ENDIAN)
259 return le_to_h_u16(buffer);
260 else
261 return be_to_h_u16(buffer);
262 }
263
264 /* read a uint8_t from a buffer in target memory endianness */
265 uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
266 {
267 return *buffer & 0x0ff;
268 }
269
270 /* write a uint32_t to a buffer in target memory endianness */
271 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
272 {
273 if (target->endianness == TARGET_LITTLE_ENDIAN)
274 h_u32_to_le(buffer, value);
275 else
276 h_u32_to_be(buffer, value);
277 }
278
279 /* write a uint16_t to a buffer in target memory endianness */
280 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
281 {
282 if (target->endianness == TARGET_LITTLE_ENDIAN)
283 h_u16_to_le(buffer, value);
284 else
285 h_u16_to_be(buffer, value);
286 }
287
288 /* write a uint8_t to a buffer in target memory endianness */
289 void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
290 {
291 *buffer = value;
292 }
293
294 /* return a pointer to a configured target; id is name or number */
295 struct target *get_target(const char *id)
296 {
297 struct target *target;
298
299 /* try as tcltarget name */
300 for (target = all_targets; target; target = target->next) {
301 if (target->cmd_name == NULL)
302 continue;
303 if (strcmp(id, target->cmd_name) == 0)
304 return target;
305 }
306
307 /* It's OK to remove this fallback sometime after August 2010 or so */
308
309 /* no match, try as number */
310 unsigned num;
311 if (parse_uint(id, &num) != ERROR_OK)
312 return NULL;
313
314 for (target = all_targets; target; target = target->next) {
315 if (target->target_number == (int)num) {
316 LOG_WARNING("use '%s' as target identifier, not '%u'",
317 target->cmd_name, num);
318 return target;
319 }
320 }
321
322 return NULL;
323 }
324
325 /* returns a pointer to the n-th configured target */
326 static struct target *get_target_by_num(int num)
327 {
328 struct target *target = all_targets;
329
330 while (target) {
331 if (target->target_number == num) {
332 return target;
333 }
334 target = target->next;
335 }
336
337 return NULL;
338 }
339
340 struct target* get_current_target(struct command_context *cmd_ctx)
341 {
342 struct target *target = get_target_by_num(cmd_ctx->current_target);
343
344 if (target == NULL)
345 {
346 LOG_ERROR("BUG: current_target out of bounds");
347 exit(-1);
348 }
349
350 return target;
351 }
352
353 int target_poll(struct target *target)
354 {
355 int retval;
356
357 /* We can't poll until after examine */
358 if (!target_was_examined(target))
359 {
360 /* Fail silently lest we pollute the log */
361 return ERROR_FAIL;
362 }
363
364 retval = target->type->poll(target);
365 if (retval != ERROR_OK)
366 return retval;
367
368 if (target->halt_issued)
369 {
370 if (target->state == TARGET_HALTED)
371 {
372 target->halt_issued = false;
373 } else
374 {
375 long long t = timeval_ms() - target->halt_issued_time;
376 if (t>1000)
377 {
378 target->halt_issued = false;
379 LOG_INFO("Halt timed out, wake up GDB.");
380 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
381 }
382 }
383 }
384
385 return ERROR_OK;
386 }
387
388 int target_halt(struct target *target)
389 {
390 int retval;
391 /* We can't poll until after examine */
392 if (!target_was_examined(target))
393 {
394 LOG_ERROR("Target not examined yet");
395 return ERROR_FAIL;
396 }
397
398 retval = target->type->halt(target);
399 if (retval != ERROR_OK)
400 return retval;
401
402 target->halt_issued = true;
403 target->halt_issued_time = timeval_ms();
404
405 return ERROR_OK;
406 }
407
408 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
409 {
410 int retval;
411
412 /* We can't poll until after examine */
413 if (!target_was_examined(target))
414 {
415 LOG_ERROR("Target not examined yet");
416 return ERROR_FAIL;
417 }
418
419 /* note that resume *must* be asynchronous. The CPU can halt before we poll. The CPU can
420 * even halt at the current PC as a result of a software breakpoint being inserted by (a bug?)
421 * the application.
422 */
423 if ((retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution)) != ERROR_OK)
424 return retval;
425
426 return retval;
427 }
428
429 int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
430 {
431 char buf[100];
432 int retval;
433 Jim_Nvp *n;
434 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
435 if (n->name == NULL) {
436 LOG_ERROR("invalid reset mode");
437 return ERROR_FAIL;
438 }
439
440 /* disable polling during reset to make reset event scripts
441 * more predictable, i.e. dr/irscan & pathmove in events will
442 * not have JTAG operations injected into the middle of a sequence.
443 */
444 bool save_poll = jtag_poll_get_enabled();
445
446 jtag_poll_set_enabled(false);
447
448 sprintf(buf, "ocd_process_reset %s", n->name);
449 retval = Jim_Eval(interp, buf);
450
451 jtag_poll_set_enabled(save_poll);
452
453 if (retval != JIM_OK) {
454 Jim_PrintErrorMessage(interp);
455 return ERROR_FAIL;
456 }
457
458 /* We want any events to be processed before the prompt */
459 retval = target_call_timer_callbacks_now();
460
461 return retval;
462 }
463
464 static int identity_virt2phys(struct target *target,
465 uint32_t virtual, uint32_t *physical)
466 {
467 *physical = virtual;
468 return ERROR_OK;
469 }
470
471 static int no_mmu(struct target *target, int *enabled)
472 {
473 *enabled = 0;
474 return ERROR_OK;
475 }
476
477 static int default_examine(struct target *target)
478 {
479 target_set_examined(target);
480 return ERROR_OK;
481 }
482
483 int target_examine_one(struct target *target)
484 {
485 return target->type->examine(target);
486 }
487
488 static int jtag_enable_callback(enum jtag_event event, void *priv)
489 {
490 struct target *target = priv;
491
492 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
493 return ERROR_OK;
494
495 jtag_unregister_event_callback(jtag_enable_callback, target);
496 return target_examine_one(target);
497 }
498
499
500 /* Targets that correctly implement init + examine, i.e.
501 * no communication with target during init:
502 *
503 * XScale
504 */
505 int target_examine(void)
506 {
507 int retval = ERROR_OK;
508 struct target *target;
509
510 for (target = all_targets; target; target = target->next)
511 {
512 /* defer examination, but don't skip it */
513 if (!target->tap->enabled) {
514 jtag_register_event_callback(jtag_enable_callback,
515 target);
516 continue;
517 }
518 if ((retval = target_examine_one(target)) != ERROR_OK)
519 return retval;
520 }
521 return retval;
522 }
523 const char *target_get_name(struct target *target)
524 {
525 return target->type->name;
526 }
527
528 static int target_write_memory_imp(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
529 {
530 if (!target_was_examined(target))
531 {
532 LOG_ERROR("Target not examined yet");
533 return ERROR_FAIL;
534 }
535 return target->type->write_memory_imp(target, address, size, count, buffer);
536 }
537
538 static int target_read_memory_imp(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
539 {
540 if (!target_was_examined(target))
541 {
542 LOG_ERROR("Target not examined yet");
543 return ERROR_FAIL;
544 }
545 return target->type->read_memory_imp(target, address, size, count, buffer);
546 }
547
548 static int target_soft_reset_halt_imp(struct target *target)
549 {
550 if (!target_was_examined(target))
551 {
552 LOG_ERROR("Target not examined yet");
553 return ERROR_FAIL;
554 }
555 if (!target->type->soft_reset_halt_imp) {
556 LOG_ERROR("Target %s does not support soft_reset_halt",
557 target->cmd_name);
558 return ERROR_FAIL;
559 }
560 return target->type->soft_reset_halt_imp(target);
561 }
562
563 static int target_run_algorithm_imp(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_param, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info)
564 {
565 if (!target_was_examined(target))
566 {
567 LOG_ERROR("Target not examined yet");
568 return ERROR_FAIL;
569 }
570 return target->type->run_algorithm_imp(target, num_mem_params, mem_params, num_reg_params, reg_param, entry_point, exit_point, timeout_ms, arch_info);
571 }
572
573 int target_read_memory(struct target *target,
574 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
575 {
576 return target->type->read_memory(target, address, size, count, buffer);
577 }
578
579 int target_read_phys_memory(struct target *target,
580 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
581 {
582 return target->type->read_phys_memory(target, address, size, count, buffer);
583 }
584
585 int target_write_memory(struct target *target,
586 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
587 {
588 return target->type->write_memory(target, address, size, count, buffer);
589 }
590
591 int target_write_phys_memory(struct target *target,
592 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
593 {
594 return target->type->write_phys_memory(target, address, size, count, buffer);
595 }
596
597 int target_bulk_write_memory(struct target *target,
598 uint32_t address, uint32_t count, uint8_t *buffer)
599 {
600 return target->type->bulk_write_memory(target, address, count, buffer);
601 }
602
603 int target_add_breakpoint(struct target *target,
604 struct breakpoint *breakpoint)
605 {
606 return target->type->add_breakpoint(target, breakpoint);
607 }
608 int target_remove_breakpoint(struct target *target,
609 struct breakpoint *breakpoint)
610 {
611 return target->type->remove_breakpoint(target, breakpoint);
612 }
613
614 int target_add_watchpoint(struct target *target,
615 struct watchpoint *watchpoint)
616 {
617 return target->type->add_watchpoint(target, watchpoint);
618 }
619 int target_remove_watchpoint(struct target *target,
620 struct watchpoint *watchpoint)
621 {
622 return target->type->remove_watchpoint(target, watchpoint);
623 }
624
625 int target_get_gdb_reg_list(struct target *target,
626 struct reg **reg_list[], int *reg_list_size)
627 {
628 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size);
629 }
630 int target_step(struct target *target,
631 int current, uint32_t address, int handle_breakpoints)
632 {
633 return target->type->step(target, current, address, handle_breakpoints);
634 }
635
636
637 int target_run_algorithm(struct target *target,
638 int num_mem_params, struct mem_param *mem_params,
639 int num_reg_params, struct reg_param *reg_param,
640 uint32_t entry_point, uint32_t exit_point,
641 int timeout_ms, void *arch_info)
642 {
643 return target->type->run_algorithm(target,
644 num_mem_params, mem_params, num_reg_params, reg_param,
645 entry_point, exit_point, timeout_ms, arch_info);
646 }
647
648 /**
649 * Reset the @c examined flag for the given target.
650 * Pure paranoia -- targets are zeroed on allocation.
651 */
652 static void target_reset_examined(struct target *target)
653 {
654 target->examined = false;
655 }
656
657
658
659 static int default_mrc(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm, uint32_t *value)
660 {
661 LOG_ERROR("Not implemented: %s", __func__);
662 return ERROR_FAIL;
663 }
664
665 static int default_mcr(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm, uint32_t value)
666 {
667 LOG_ERROR("Not implemented: %s", __func__);
668 return ERROR_FAIL;
669 }
670
671 static int arm_cp_check(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm)
672 {
673 /* basic check */
674 if (!target_was_examined(target))
675 {
676 LOG_ERROR("Target not examined yet");
677 return ERROR_FAIL;
678 }
679
680 if ((cpnum <0) || (cpnum > 15))
681 {
682 LOG_ERROR("Illegal co-processor %d", cpnum);
683 return ERROR_FAIL;
684 }
685
686 if (op1 > 7)
687 {
688 LOG_ERROR("Illegal op1");
689 return ERROR_FAIL;
690 }
691
692 if (op2 > 7)
693 {
694 LOG_ERROR("Illegal op2");
695 return ERROR_FAIL;
696 }
697
698 if (CRn > 15)
699 {
700 LOG_ERROR("Illegal CRn");
701 return ERROR_FAIL;
702 }
703
704 if (CRm > 15)
705 {
706 LOG_ERROR("Illegal CRm");
707 return ERROR_FAIL;
708 }
709
710 return ERROR_OK;
711 }
712
713 int target_mrc(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm, uint32_t *value)
714 {
715 int retval;
716
717 retval = arm_cp_check(target, cpnum, op1, op2, CRn, CRm);
718 if (retval != ERROR_OK)
719 return retval;
720
721 return target->type->mrc(target, cpnum, op1, op2, CRn, CRm, value);
722 }
723
724 int target_mcr(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm, uint32_t value)
725 {
726 int retval;
727
728 retval = arm_cp_check(target, cpnum, op1, op2, CRn, CRm);
729 if (retval != ERROR_OK)
730 return retval;
731
732 return target->type->mcr(target, cpnum, op1, op2, CRn, CRm, value);
733 }
734
735 static int
736 err_read_phys_memory(struct target *target, uint32_t address,
737 uint32_t size, uint32_t count, uint8_t *buffer)
738 {
739 LOG_ERROR("Not implemented: %s", __func__);
740 return ERROR_FAIL;
741 }
742
743 static int
744 err_write_phys_memory(struct target *target, uint32_t address,
745 uint32_t size, uint32_t count, uint8_t *buffer)
746 {
747 LOG_ERROR("Not implemented: %s", __func__);
748 return ERROR_FAIL;
749 }
750
751 int target_init(struct command_context *cmd_ctx)
752 {
753 struct target *target;
754 int retval;
755
756 for (target = all_targets; target; target = target->next) {
757 struct target_type *type = target->type;
758
759 target_reset_examined(target);
760 if (target->type->examine == NULL)
761 {
762 target->type->examine = default_examine;
763 }
764
765 if ((retval = target->type->init_target(cmd_ctx, target)) != ERROR_OK)
766 {
767 LOG_ERROR("target '%s' init failed", target_get_name(target));
768 return retval;
769 }
770
771 /**
772 * @todo MCR/MRC are ARM-specific; don't require them in
773 * all targets, or for ARMs without coprocessors.
774 */
775 if (target->type->mcr == NULL)
776 {
777 target->type->mcr = default_mcr;
778 } else
779 {
780 /* FIX! multiple targets will generally register global commands
781 * multiple times. Only register this one if *one* of the
782 * targets need the command. Hmm... make it a command on the
783 * Jim Tcl target object?
784 */
785 register_jim(cmd_ctx, "mcr", jim_mcrmrc, "write coprocessor <cpnum> <op1> <op2> <CRn> <CRm> <value>");
786 }
787
788 if (target->type->mrc == NULL)
789 {
790 target->type->mrc = default_mrc;
791 } else
792 {
793 register_jim(cmd_ctx, "mrc", jim_mcrmrc, "read coprocessor <cpnum> <op1> <op2> <CRn> <CRm>");
794 }
795
796
797 /**
798 * @todo get rid of those *memory_imp() methods, now that all
799 * callers are using target_*_memory() accessors ... and make
800 * sure the "physical" paths handle the same issues.
801 */
802
803 /* a non-invasive way(in terms of patches) to add some code that
804 * runs before the type->write/read_memory implementation
805 */
806 target->type->write_memory_imp = target->type->write_memory;
807 target->type->write_memory = target_write_memory_imp;
808 target->type->read_memory_imp = target->type->read_memory;
809 target->type->read_memory = target_read_memory_imp;
810 target->type->soft_reset_halt_imp = target->type->soft_reset_halt;
811 target->type->soft_reset_halt = target_soft_reset_halt_imp;
812 target->type->run_algorithm_imp = target->type->run_algorithm;
813 target->type->run_algorithm = target_run_algorithm_imp;
814
815 /* Sanity-check MMU support ... stub in what we must, to help
816 * implement it in stages, but warn if we need to do so.
817 */
818 if (type->mmu) {
819 if (type->write_phys_memory == NULL) {
820 LOG_ERROR("type '%s' is missing %s",
821 type->name,
822 "write_phys_memory");
823 type->write_phys_memory = err_write_phys_memory;
824 }
825 if (type->read_phys_memory == NULL) {
826 LOG_ERROR("type '%s' is missing %s",
827 type->name,
828 "read_phys_memory");
829 type->read_phys_memory = err_read_phys_memory;
830 }
831 if (type->virt2phys == NULL) {
832 LOG_ERROR("type '%s' is missing %s",
833 type->name,
834 "virt2phys");
835 type->virt2phys = identity_virt2phys;
836 }
837
838 /* Make sure no-MMU targets all behave the same: make no
839 * distinction between physical and virtual addresses, and
840 * ensure that virt2phys() is always an identity mapping.
841 */
842 } else {
843 if (type->write_phys_memory
844 || type->read_phys_memory
845 || type->virt2phys)
846 LOG_WARNING("type '%s' has broken MMU hooks",
847 type->name);
848
849 type->mmu = no_mmu;
850 type->write_phys_memory = type->write_memory;
851 type->read_phys_memory = type->read_memory;
852 type->virt2phys = identity_virt2phys;
853 }
854 }
855
856 if (all_targets)
857 {
858 if ((retval = target_register_user_commands(cmd_ctx)) != ERROR_OK)
859 return retval;
860 if ((retval = target_register_timer_callback(handle_target, 100, 1, NULL)) != ERROR_OK)
861 return retval;
862 }
863
864 return ERROR_OK;
865 }
866
867 int target_register_event_callback(int (*callback)(struct target *target, enum target_event event, void *priv), void *priv)
868 {
869 struct target_event_callback **callbacks_p = &target_event_callbacks;
870
871 if (callback == NULL)
872 {
873 return ERROR_INVALID_ARGUMENTS;
874 }
875
876 if (*callbacks_p)
877 {
878 while ((*callbacks_p)->next)
879 callbacks_p = &((*callbacks_p)->next);
880 callbacks_p = &((*callbacks_p)->next);
881 }
882
883 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
884 (*callbacks_p)->callback = callback;
885 (*callbacks_p)->priv = priv;
886 (*callbacks_p)->next = NULL;
887
888 return ERROR_OK;
889 }
890
891 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
892 {
893 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
894 struct timeval now;
895
896 if (callback == NULL)
897 {
898 return ERROR_INVALID_ARGUMENTS;
899 }
900
901 if (*callbacks_p)
902 {
903 while ((*callbacks_p)->next)
904 callbacks_p = &((*callbacks_p)->next);
905 callbacks_p = &((*callbacks_p)->next);
906 }
907
908 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
909 (*callbacks_p)->callback = callback;
910 (*callbacks_p)->periodic = periodic;
911 (*callbacks_p)->time_ms = time_ms;
912
913 gettimeofday(&now, NULL);
914 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
915 time_ms -= (time_ms % 1000);
916 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
917 if ((*callbacks_p)->when.tv_usec > 1000000)
918 {
919 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
920 (*callbacks_p)->when.tv_sec += 1;
921 }
922
923 (*callbacks_p)->priv = priv;
924 (*callbacks_p)->next = NULL;
925
926 return ERROR_OK;
927 }
928
929 int target_unregister_event_callback(int (*callback)(struct target *target, enum target_event event, void *priv), void *priv)
930 {
931 struct target_event_callback **p = &target_event_callbacks;
932 struct target_event_callback *c = target_event_callbacks;
933
934 if (callback == NULL)
935 {
936 return ERROR_INVALID_ARGUMENTS;
937 }
938
939 while (c)
940 {
941 struct target_event_callback *next = c->next;
942 if ((c->callback == callback) && (c->priv == priv))
943 {
944 *p = next;
945 free(c);
946 return ERROR_OK;
947 }
948 else
949 p = &(c->next);
950 c = next;
951 }
952
953 return ERROR_OK;
954 }
955
956 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
957 {
958 struct target_timer_callback **p = &target_timer_callbacks;
959 struct target_timer_callback *c = target_timer_callbacks;
960
961 if (callback == NULL)
962 {
963 return ERROR_INVALID_ARGUMENTS;
964 }
965
966 while (c)
967 {
968 struct target_timer_callback *next = c->next;
969 if ((c->callback == callback) && (c->priv == priv))
970 {
971 *p = next;
972 free(c);
973 return ERROR_OK;
974 }
975 else
976 p = &(c->next);
977 c = next;
978 }
979
980 return ERROR_OK;
981 }
982
983 int target_call_event_callbacks(struct target *target, enum target_event event)
984 {
985 struct target_event_callback *callback = target_event_callbacks;
986 struct target_event_callback *next_callback;
987
988 if (event == TARGET_EVENT_HALTED)
989 {
990 /* execute early halted first */
991 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
992 }
993
994 LOG_DEBUG("target event %i (%s)",
995 event,
996 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
997
998 target_handle_event(target, event);
999
1000 while (callback)
1001 {
1002 next_callback = callback->next;
1003 callback->callback(target, event, callback->priv);
1004 callback = next_callback;
1005 }
1006
1007 return ERROR_OK;
1008 }
1009
1010 static int target_timer_callback_periodic_restart(
1011 struct target_timer_callback *cb, struct timeval *now)
1012 {
1013 int time_ms = cb->time_ms;
1014 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1015 time_ms -= (time_ms % 1000);
1016 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1017 if (cb->when.tv_usec > 1000000)
1018 {
1019 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1020 cb->when.tv_sec += 1;
1021 }
1022 return ERROR_OK;
1023 }
1024
1025 static int target_call_timer_callback(struct target_timer_callback *cb,
1026 struct timeval *now)
1027 {
1028 cb->callback(cb->priv);
1029
1030 if (cb->periodic)
1031 return target_timer_callback_periodic_restart(cb, now);
1032
1033 return target_unregister_timer_callback(cb->callback, cb->priv);
1034 }
1035
1036 static int target_call_timer_callbacks_check_time(int checktime)
1037 {
1038 keep_alive();
1039
1040 struct timeval now;
1041 gettimeofday(&now, NULL);
1042
1043 struct target_timer_callback *callback = target_timer_callbacks;
1044 while (callback)
1045 {
1046 // cleaning up may unregister and free this callback
1047 struct target_timer_callback *next_callback = callback->next;
1048
1049 bool call_it = callback->callback &&
1050 ((!checktime && callback->periodic) ||
1051 now.tv_sec > callback->when.tv_sec ||
1052 (now.tv_sec == callback->when.tv_sec &&
1053 now.tv_usec >= callback->when.tv_usec));
1054
1055 if (call_it)
1056 {
1057 int retval = target_call_timer_callback(callback, &now);
1058 if (retval != ERROR_OK)
1059 return retval;
1060 }
1061
1062 callback = next_callback;
1063 }
1064
1065 return ERROR_OK;
1066 }
1067
1068 int target_call_timer_callbacks(void)
1069 {
1070 return target_call_timer_callbacks_check_time(1);
1071 }
1072
1073 /* invoke periodic callbacks immediately */
1074 int target_call_timer_callbacks_now(void)
1075 {
1076 return target_call_timer_callbacks_check_time(0);
1077 }
1078
1079 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1080 {
1081 struct working_area *c = target->working_areas;
1082 struct working_area *new_wa = NULL;
1083
1084 /* Reevaluate working area address based on MMU state*/
1085 if (target->working_areas == NULL)
1086 {
1087 int retval;
1088 int enabled;
1089
1090 retval = target->type->mmu(target, &enabled);
1091 if (retval != ERROR_OK)
1092 {
1093 return retval;
1094 }
1095
1096 if (!enabled) {
1097 if (target->working_area_phys_spec) {
1098 LOG_DEBUG("MMU disabled, using physical "
1099 "address for working memory 0x%08x",
1100 (unsigned)target->working_area_phys);
1101 target->working_area = target->working_area_phys;
1102 } else {
1103 LOG_ERROR("No working memory available. "
1104 "Specify -work-area-phys to target.");
1105 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1106 }
1107 } else {
1108 if (target->working_area_virt_spec) {
1109 LOG_DEBUG("MMU enabled, using virtual "
1110 "address for working memory 0x%08x",
1111 (unsigned)target->working_area_virt);
1112 target->working_area = target->working_area_virt;
1113 } else {
1114 LOG_ERROR("No working memory available. "
1115 "Specify -work-area-virt to target.");
1116 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1117 }
1118 }
1119 }
1120
1121 /* only allocate multiples of 4 byte */
1122 if (size % 4)
1123 {
1124 LOG_ERROR("BUG: code tried to allocate unaligned number of bytes (0x%08x), padding", ((unsigned)(size)));
1125 size = (size + 3) & (~3);
1126 }
1127
1128 /* see if there's already a matching working area */
1129 while (c)
1130 {
1131 if ((c->free) && (c->size == size))
1132 {
1133 new_wa = c;
1134 break;
1135 }
1136 c = c->next;
1137 }
1138
1139 /* if not, allocate a new one */
1140 if (!new_wa)
1141 {
1142 struct working_area **p = &target->working_areas;
1143 uint32_t first_free = target->working_area;
1144 uint32_t free_size = target->working_area_size;
1145
1146 c = target->working_areas;
1147 while (c)
1148 {
1149 first_free += c->size;
1150 free_size -= c->size;
1151 p = &c->next;
1152 c = c->next;
1153 }
1154
1155 if (free_size < size)
1156 {
1157 LOG_WARNING("not enough working area available(requested %u, free %u)",
1158 (unsigned)(size), (unsigned)(free_size));
1159 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1160 }
1161
1162 LOG_DEBUG("allocated new working area at address 0x%08x", (unsigned)first_free);
1163
1164 new_wa = malloc(sizeof(struct working_area));
1165 new_wa->next = NULL;
1166 new_wa->size = size;
1167 new_wa->address = first_free;
1168
1169 if (target->backup_working_area)
1170 {
1171 int retval;
1172 new_wa->backup = malloc(new_wa->size);
1173 if ((retval = target_read_memory(target, new_wa->address, 4, new_wa->size / 4, new_wa->backup)) != ERROR_OK)
1174 {
1175 free(new_wa->backup);
1176 free(new_wa);
1177 return retval;
1178 }
1179 }
1180 else
1181 {
1182 new_wa->backup = NULL;
1183 }
1184
1185 /* put new entry in list */
1186 *p = new_wa;
1187 }
1188
1189 /* mark as used, and return the new (reused) area */
1190 new_wa->free = 0;
1191 *area = new_wa;
1192
1193 /* user pointer */
1194 new_wa->user = area;
1195
1196 return ERROR_OK;
1197 }
1198
1199 int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1200 {
1201 if (area->free)
1202 return ERROR_OK;
1203
1204 if (restore && target->backup_working_area)
1205 {
1206 int retval;
1207 if ((retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup)) != ERROR_OK)
1208 return retval;
1209 }
1210
1211 area->free = 1;
1212
1213 /* mark user pointer invalid */
1214 *area->user = NULL;
1215 area->user = NULL;
1216
1217 return ERROR_OK;
1218 }
1219
1220 int target_free_working_area(struct target *target, struct working_area *area)
1221 {
1222 return target_free_working_area_restore(target, area, 1);
1223 }
1224
1225 /* free resources and restore memory, if restoring memory fails,
1226 * free up resources anyway
1227 */
1228 void target_free_all_working_areas_restore(struct target *target, int restore)
1229 {
1230 struct working_area *c = target->working_areas;
1231
1232 while (c)
1233 {
1234 struct working_area *next = c->next;
1235 target_free_working_area_restore(target, c, restore);
1236
1237 if (c->backup)
1238 free(c->backup);
1239
1240 free(c);
1241
1242 c = next;
1243 }
1244
1245 target->working_areas = NULL;
1246 }
1247
1248 void target_free_all_working_areas(struct target *target)
1249 {
1250 target_free_all_working_areas_restore(target, 1);
1251 }
1252
1253 int target_arch_state(struct target *target)
1254 {
1255 int retval;
1256 if (target == NULL)
1257 {
1258 LOG_USER("No target has been configured");
1259 return ERROR_OK;
1260 }
1261
1262 LOG_USER("target state: %s", target_state_name( target ));
1263
1264 if (target->state != TARGET_HALTED)
1265 return ERROR_OK;
1266
1267 retval = target->type->arch_state(target);
1268 return retval;
1269 }
1270
1271 /* Single aligned words are guaranteed to use 16 or 32 bit access
1272 * mode respectively, otherwise data is handled as quickly as
1273 * possible
1274 */
1275 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1276 {
1277 int retval;
1278 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1279 (int)size, (unsigned)address);
1280
1281 if (!target_was_examined(target))
1282 {
1283 LOG_ERROR("Target not examined yet");
1284 return ERROR_FAIL;
1285 }
1286
1287 if (size == 0) {
1288 return ERROR_OK;
1289 }
1290
1291 if ((address + size - 1) < address)
1292 {
1293 /* GDB can request this when e.g. PC is 0xfffffffc*/
1294 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1295 (unsigned)address,
1296 (unsigned)size);
1297 return ERROR_FAIL;
1298 }
1299
1300 if (((address % 2) == 0) && (size == 2))
1301 {
1302 return target_write_memory(target, address, 2, 1, buffer);
1303 }
1304
1305 /* handle unaligned head bytes */
1306 if (address % 4)
1307 {
1308 uint32_t unaligned = 4 - (address % 4);
1309
1310 if (unaligned > size)
1311 unaligned = size;
1312
1313 if ((retval = target_write_memory(target, address, 1, unaligned, buffer)) != ERROR_OK)
1314 return retval;
1315
1316 buffer += unaligned;
1317 address += unaligned;
1318 size -= unaligned;
1319 }
1320
1321 /* handle aligned words */
1322 if (size >= 4)
1323 {
1324 int aligned = size - (size % 4);
1325
1326 /* use bulk writes above a certain limit. This may have to be changed */
1327 if (aligned > 128)
1328 {
1329 if ((retval = target->type->bulk_write_memory(target, address, aligned / 4, buffer)) != ERROR_OK)
1330 return retval;
1331 }
1332 else
1333 {
1334 if ((retval = target_write_memory(target, address, 4, aligned / 4, buffer)) != ERROR_OK)
1335 return retval;
1336 }
1337
1338 buffer += aligned;
1339 address += aligned;
1340 size -= aligned;
1341 }
1342
1343 /* handle tail writes of less than 4 bytes */
1344 if (size > 0)
1345 {
1346 if ((retval = target_write_memory(target, address, 1, size, buffer)) != ERROR_OK)
1347 return retval;
1348 }
1349
1350 return ERROR_OK;
1351 }
1352
1353 /* Single aligned words are guaranteed to use 16 or 32 bit access
1354 * mode respectively, otherwise data is handled as quickly as
1355 * possible
1356 */
1357 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1358 {
1359 int retval;
1360 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
1361 (int)size, (unsigned)address);
1362
1363 if (!target_was_examined(target))
1364 {
1365 LOG_ERROR("Target not examined yet");
1366 return ERROR_FAIL;
1367 }
1368
1369 if (size == 0) {
1370 return ERROR_OK;
1371 }
1372
1373 if ((address + size - 1) < address)
1374 {
1375 /* GDB can request this when e.g. PC is 0xfffffffc*/
1376 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
1377 address,
1378 size);
1379 return ERROR_FAIL;
1380 }
1381
1382 if (((address % 2) == 0) && (size == 2))
1383 {
1384 return target_read_memory(target, address, 2, 1, buffer);
1385 }
1386
1387 /* handle unaligned head bytes */
1388 if (address % 4)
1389 {
1390 uint32_t unaligned = 4 - (address % 4);
1391
1392 if (unaligned > size)
1393 unaligned = size;
1394
1395 if ((retval = target_read_memory(target, address, 1, unaligned, buffer)) != ERROR_OK)
1396 return retval;
1397
1398 buffer += unaligned;
1399 address += unaligned;
1400 size -= unaligned;
1401 }
1402
1403 /* handle aligned words */
1404 if (size >= 4)
1405 {
1406 int aligned = size - (size % 4);
1407
1408 if ((retval = target_read_memory(target, address, 4, aligned / 4, buffer)) != ERROR_OK)
1409 return retval;
1410
1411 buffer += aligned;
1412 address += aligned;
1413 size -= aligned;
1414 }
1415
1416 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
1417 if(size >=2)
1418 {
1419 int aligned = size - (size%2);
1420 retval = target_read_memory(target, address, 2, aligned / 2, buffer);
1421 if (retval != ERROR_OK)
1422 return retval;
1423
1424 buffer += aligned;
1425 address += aligned;
1426 size -= aligned;
1427 }
1428 /* handle tail writes of less than 4 bytes */
1429 if (size > 0)
1430 {
1431 if ((retval = target_read_memory(target, address, 1, size, buffer)) != ERROR_OK)
1432 return retval;
1433 }
1434
1435 return ERROR_OK;
1436 }
1437
1438 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
1439 {
1440 uint8_t *buffer;
1441 int retval;
1442 uint32_t i;
1443 uint32_t checksum = 0;
1444 if (!target_was_examined(target))
1445 {
1446 LOG_ERROR("Target not examined yet");
1447 return ERROR_FAIL;
1448 }
1449
1450 if ((retval = target->type->checksum_memory(target, address,
1451 size, &checksum)) != ERROR_OK)
1452 {
1453 buffer = malloc(size);
1454 if (buffer == NULL)
1455 {
1456 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
1457 return ERROR_INVALID_ARGUMENTS;
1458 }
1459 retval = target_read_buffer(target, address, size, buffer);
1460 if (retval != ERROR_OK)
1461 {
1462 free(buffer);
1463 return retval;
1464 }
1465
1466 /* convert to target endianess */
1467 for (i = 0; i < (size/sizeof(uint32_t)); i++)
1468 {
1469 uint32_t target_data;
1470 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
1471 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
1472 }
1473
1474 retval = image_calculate_checksum(buffer, size, &checksum);
1475 free(buffer);
1476 }
1477
1478 *crc = checksum;
1479
1480 return retval;
1481 }
1482
1483 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
1484 {
1485 int retval;
1486 if (!target_was_examined(target))
1487 {
1488 LOG_ERROR("Target not examined yet");
1489 return ERROR_FAIL;
1490 }
1491
1492 if (target->type->blank_check_memory == 0)
1493 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1494
1495 retval = target->type->blank_check_memory(target, address, size, blank);
1496
1497 return retval;
1498 }
1499
1500 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
1501 {
1502 uint8_t value_buf[4];
1503 if (!target_was_examined(target))
1504 {
1505 LOG_ERROR("Target not examined yet");
1506 return ERROR_FAIL;
1507 }
1508
1509 int retval = target_read_memory(target, address, 4, 1, value_buf);
1510
1511 if (retval == ERROR_OK)
1512 {
1513 *value = target_buffer_get_u32(target, value_buf);
1514 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1515 address,
1516 *value);
1517 }
1518 else
1519 {
1520 *value = 0x0;
1521 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1522 address);
1523 }
1524
1525 return retval;
1526 }
1527
1528 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
1529 {
1530 uint8_t value_buf[2];
1531 if (!target_was_examined(target))
1532 {
1533 LOG_ERROR("Target not examined yet");
1534 return ERROR_FAIL;
1535 }
1536
1537 int retval = target_read_memory(target, address, 2, 1, value_buf);
1538
1539 if (retval == ERROR_OK)
1540 {
1541 *value = target_buffer_get_u16(target, value_buf);
1542 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
1543 address,
1544 *value);
1545 }
1546 else
1547 {
1548 *value = 0x0;
1549 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1550 address);
1551 }
1552
1553 return retval;
1554 }
1555
1556 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
1557 {
1558 int retval = target_read_memory(target, address, 1, 1, value);
1559 if (!target_was_examined(target))
1560 {
1561 LOG_ERROR("Target not examined yet");
1562 return ERROR_FAIL;
1563 }
1564
1565 if (retval == ERROR_OK)
1566 {
1567 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1568 address,
1569 *value);
1570 }
1571 else
1572 {
1573 *value = 0x0;
1574 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
1575 address);
1576 }
1577
1578 return retval;
1579 }
1580
1581 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
1582 {
1583 int retval;
1584 uint8_t value_buf[4];
1585 if (!target_was_examined(target))
1586 {
1587 LOG_ERROR("Target not examined yet");
1588 return ERROR_FAIL;
1589 }
1590
1591 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
1592 address,
1593 value);
1594
1595 target_buffer_set_u32(target, value_buf, value);
1596 if ((retval = target_write_memory(target, address, 4, 1, value_buf)) != ERROR_OK)
1597 {
1598 LOG_DEBUG("failed: %i", retval);
1599 }
1600
1601 return retval;
1602 }
1603
1604 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
1605 {
1606 int retval;
1607 uint8_t value_buf[2];
1608 if (!target_was_examined(target))
1609 {
1610 LOG_ERROR("Target not examined yet");
1611 return ERROR_FAIL;
1612 }
1613
1614 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
1615 address,
1616 value);
1617
1618 target_buffer_set_u16(target, value_buf, value);
1619 if ((retval = target_write_memory(target, address, 2, 1, value_buf)) != ERROR_OK)
1620 {
1621 LOG_DEBUG("failed: %i", retval);
1622 }
1623
1624 return retval;
1625 }
1626
1627 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
1628 {
1629 int retval;
1630 if (!target_was_examined(target))
1631 {
1632 LOG_ERROR("Target not examined yet");
1633 return ERROR_FAIL;
1634 }
1635
1636 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
1637 address, value);
1638
1639 if ((retval = target_write_memory(target, address, 1, 1, &value)) != ERROR_OK)
1640 {
1641 LOG_DEBUG("failed: %i", retval);
1642 }
1643
1644 return retval;
1645 }
1646
1647 COMMAND_HANDLER(handle_targets_command)
1648 {
1649 struct target *target = all_targets;
1650
1651 if (argc == 1)
1652 {
1653 target = get_target(args[0]);
1654 if (target == NULL) {
1655 command_print(cmd_ctx,"Target: %s is unknown, try one of:\n", args[0]);
1656 goto DumpTargets;
1657 }
1658 if (!target->tap->enabled) {
1659 command_print(cmd_ctx,"Target: TAP %s is disabled, "
1660 "can't be the current target\n",
1661 target->tap->dotted_name);
1662 return ERROR_FAIL;
1663 }
1664
1665 cmd_ctx->current_target = target->target_number;
1666 return ERROR_OK;
1667 }
1668 DumpTargets:
1669
1670 target = all_targets;
1671 command_print(cmd_ctx, " TargetName Type Endian TapName State ");
1672 command_print(cmd_ctx, "-- ------------------ ---------- ------ ------------------ ------------");
1673 while (target)
1674 {
1675 const char *state;
1676 char marker = ' ';
1677
1678 if (target->tap->enabled)
1679 state = target_state_name( target );
1680 else
1681 state = "tap-disabled";
1682
1683 if (cmd_ctx->current_target == target->target_number)
1684 marker = '*';
1685
1686 /* keep columns lined up to match the headers above */
1687 command_print(cmd_ctx, "%2d%c %-18s %-10s %-6s %-18s %s",
1688 target->target_number,
1689 marker,
1690 target->cmd_name,
1691 target_get_name(target),
1692 Jim_Nvp_value2name_simple(nvp_target_endian,
1693 target->endianness)->name,
1694 target->tap->dotted_name,
1695 state);
1696 target = target->next;
1697 }
1698
1699 return ERROR_OK;
1700 }
1701
1702 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
1703
1704 static int powerDropout;
1705 static int srstAsserted;
1706
1707 static int runPowerRestore;
1708 static int runPowerDropout;
1709 static int runSrstAsserted;
1710 static int runSrstDeasserted;
1711
1712 static int sense_handler(void)
1713 {
1714 static int prevSrstAsserted = 0;
1715 static int prevPowerdropout = 0;
1716
1717 int retval;
1718 if ((retval = jtag_power_dropout(&powerDropout)) != ERROR_OK)
1719 return retval;
1720
1721 int powerRestored;
1722 powerRestored = prevPowerdropout && !powerDropout;
1723 if (powerRestored)
1724 {
1725 runPowerRestore = 1;
1726 }
1727
1728 long long current = timeval_ms();
1729 static long long lastPower = 0;
1730 int waitMore = lastPower + 2000 > current;
1731 if (powerDropout && !waitMore)
1732 {
1733 runPowerDropout = 1;
1734 lastPower = current;
1735 }
1736
1737 if ((retval = jtag_srst_asserted(&srstAsserted)) != ERROR_OK)
1738 return retval;
1739
1740 int srstDeasserted;
1741 srstDeasserted = prevSrstAsserted && !srstAsserted;
1742
1743 static long long lastSrst = 0;
1744 waitMore = lastSrst + 2000 > current;
1745 if (srstDeasserted && !waitMore)
1746 {
1747 runSrstDeasserted = 1;
1748 lastSrst = current;
1749 }
1750
1751 if (!prevSrstAsserted && srstAsserted)
1752 {
1753 runSrstAsserted = 1;
1754 }
1755
1756 prevSrstAsserted = srstAsserted;
1757 prevPowerdropout = powerDropout;
1758
1759 if (srstDeasserted || powerRestored)
1760 {
1761 /* Other than logging the event we can't do anything here.
1762 * Issuing a reset is a particularly bad idea as we might
1763 * be inside a reset already.
1764 */
1765 }
1766
1767 return ERROR_OK;
1768 }
1769
1770 static void target_call_event_callbacks_all(enum target_event e) {
1771 struct target *target;
1772 target = all_targets;
1773 while (target) {
1774 target_call_event_callbacks(target, e);
1775 target = target->next;
1776 }
1777 }
1778
1779 /* process target state changes */
1780 int handle_target(void *priv)
1781 {
1782 int retval = ERROR_OK;
1783
1784 /* we do not want to recurse here... */
1785 static int recursive = 0;
1786 if (! recursive)
1787 {
1788 recursive = 1;
1789 sense_handler();
1790 /* danger! running these procedures can trigger srst assertions and power dropouts.
1791 * We need to avoid an infinite loop/recursion here and we do that by
1792 * clearing the flags after running these events.
1793 */
1794 int did_something = 0;
1795 if (runSrstAsserted)
1796 {
1797 target_call_event_callbacks_all(TARGET_EVENT_GDB_HALT);
1798 Jim_Eval(interp, "srst_asserted");
1799 did_something = 1;
1800 }
1801 if (runSrstDeasserted)
1802 {
1803 Jim_Eval(interp, "srst_deasserted");
1804 did_something = 1;
1805 }
1806 if (runPowerDropout)
1807 {
1808 target_call_event_callbacks_all(TARGET_EVENT_GDB_HALT);
1809 Jim_Eval(interp, "power_dropout");
1810 did_something = 1;
1811 }
1812 if (runPowerRestore)
1813 {
1814 Jim_Eval(interp, "power_restore");
1815 did_something = 1;
1816 }
1817
1818 if (did_something)
1819 {
1820 /* clear detect flags */
1821 sense_handler();
1822 }
1823
1824 /* clear action flags */
1825
1826 runSrstAsserted = 0;
1827 runSrstDeasserted = 0;
1828 runPowerRestore = 0;
1829 runPowerDropout = 0;
1830
1831 recursive = 0;
1832 }
1833
1834 /* Poll targets for state changes unless that's globally disabled.
1835 * Skip targets that are currently disabled.
1836 */
1837 for (struct target *target = all_targets;
1838 is_jtag_poll_safe() && target;
1839 target = target->next)
1840 {
1841 if (!target->tap->enabled)
1842 continue;
1843
1844 /* only poll target if we've got power and srst isn't asserted */
1845 if (!powerDropout && !srstAsserted)
1846 {
1847 /* polling may fail silently until the target has been examined */
1848 if ((retval = target_poll(target)) != ERROR_OK)
1849 {
1850 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1851 return retval;
1852 }
1853 }
1854 }
1855
1856 return retval;
1857 }
1858
1859 COMMAND_HANDLER(handle_reg_command)
1860 {
1861 struct target *target;
1862 struct reg *reg = NULL;
1863 int count = 0;
1864 char *value;
1865
1866 LOG_DEBUG("-");
1867
1868 target = get_current_target(cmd_ctx);
1869
1870 /* list all available registers for the current target */
1871 if (argc == 0)
1872 {
1873 struct reg_cache *cache = target->reg_cache;
1874
1875 count = 0;
1876 while (cache)
1877 {
1878 int i;
1879
1880 command_print(cmd_ctx, "===== %s", cache->name);
1881
1882 for (i = 0, reg = cache->reg_list;
1883 i < cache->num_regs;
1884 i++, reg++, count++)
1885 {
1886 /* only print cached values if they are valid */
1887 if (reg->valid) {
1888 value = buf_to_str(reg->value,
1889 reg->size, 16);
1890 command_print(cmd_ctx,
1891 "(%i) %s (/%" PRIu32 "): 0x%s%s",
1892 count, reg->name,
1893 reg->size, value,
1894 reg->dirty
1895 ? " (dirty)"
1896 : "");
1897 free(value);
1898 } else {
1899 command_print(cmd_ctx, "(%i) %s (/%" PRIu32 ")",
1900 count, reg->name,
1901 reg->size) ;
1902 }
1903 }
1904 cache = cache->next;
1905 }
1906
1907 return ERROR_OK;
1908 }
1909
1910 /* access a single register by its ordinal number */
1911 if ((args[0][0] >= '0') && (args[0][0] <= '9'))
1912 {
1913 unsigned num;
1914 COMMAND_PARSE_NUMBER(uint, args[0], num);
1915
1916 struct reg_cache *cache = target->reg_cache;
1917 count = 0;
1918 while (cache)
1919 {
1920 int i;
1921 for (i = 0; i < cache->num_regs; i++)
1922 {
1923 if (count++ == (int)num)
1924 {
1925 reg = &cache->reg_list[i];
1926 break;
1927 }
1928 }
1929 if (reg)
1930 break;
1931 cache = cache->next;
1932 }
1933
1934 if (!reg)
1935 {
1936 command_print(cmd_ctx, "%i is out of bounds, the current target has only %i registers (0 - %i)", num, count, count - 1);
1937 return ERROR_OK;
1938 }
1939 } else /* access a single register by its name */
1940 {
1941 reg = register_get_by_name(target->reg_cache, args[0], 1);
1942
1943 if (!reg)
1944 {
1945 command_print(cmd_ctx, "register %s not found in current target", args[0]);
1946 return ERROR_OK;
1947 }
1948 }
1949
1950 /* display a register */
1951 if ((argc == 1) || ((argc == 2) && !((args[1][0] >= '0') && (args[1][0] <= '9'))))
1952 {
1953 if ((argc == 2) && (strcmp(args[1], "force") == 0))
1954 reg->valid = 0;
1955
1956 if (reg->valid == 0)
1957 {
1958 struct reg_arch_type *arch_type = register_get_arch_type(reg->arch_type);
1959 arch_type->get(reg);
1960 }
1961 value = buf_to_str(reg->value, reg->size, 16);
1962 command_print(cmd_ctx, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
1963 free(value);
1964 return ERROR_OK;
1965 }
1966
1967 /* set register value */
1968 if (argc == 2)
1969 {
1970 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
1971 str_to_buf(args[1], strlen(args[1]), buf, reg->size, 0);
1972
1973 struct reg_arch_type *arch_type = register_get_arch_type(reg->arch_type);
1974 arch_type->set(reg, buf);
1975
1976 value = buf_to_str(reg->value, reg->size, 16);
1977 command_print(cmd_ctx, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
1978 free(value);
1979
1980 free(buf);
1981
1982 return ERROR_OK;
1983 }
1984
1985 command_print(cmd_ctx, "usage: reg <#|name> [value]");
1986
1987 return ERROR_OK;
1988 }
1989
1990 COMMAND_HANDLER(handle_poll_command)
1991 {
1992 int retval = ERROR_OK;
1993 struct target *target = get_current_target(cmd_ctx);
1994
1995 if (argc == 0)
1996 {
1997 command_print(cmd_ctx, "background polling: %s",
1998 jtag_poll_get_enabled() ? "on" : "off");
1999 command_print(cmd_ctx, "TAP: %s (%s)",
2000 target->tap->dotted_name,
2001 target->tap->enabled ? "enabled" : "disabled");
2002 if (!target->tap->enabled)
2003 return ERROR_OK;
2004 if ((retval = target_poll(target)) != ERROR_OK)
2005 return retval;
2006 if ((retval = target_arch_state(target)) != ERROR_OK)
2007 return retval;
2008
2009 }
2010 else if (argc == 1)
2011 {
2012 if (strcmp(args[0], "on") == 0)
2013 {
2014 jtag_poll_set_enabled(true);
2015 }
2016 else if (strcmp(args[0], "off") == 0)
2017 {
2018 jtag_poll_set_enabled(false);
2019 }
2020 else
2021 {
2022 command_print(cmd_ctx, "arg is \"on\" or \"off\"");
2023 }
2024 } else
2025 {
2026 return ERROR_COMMAND_SYNTAX_ERROR;
2027 }
2028
2029 return retval;
2030 }
2031
2032 COMMAND_HANDLER(handle_wait_halt_command)
2033 {
2034 if (argc > 1)
2035 return ERROR_COMMAND_SYNTAX_ERROR;
2036
2037 unsigned ms = 5000;
2038 if (1 == argc)
2039 {
2040 int retval = parse_uint(args[0], &ms);
2041 if (ERROR_OK != retval)
2042 {
2043 command_print(cmd_ctx, "usage: %s [seconds]", CMD_NAME);
2044 return ERROR_COMMAND_SYNTAX_ERROR;
2045 }
2046 // convert seconds (given) to milliseconds (needed)
2047 ms *= 1000;
2048 }
2049
2050 struct target *target = get_current_target(cmd_ctx);
2051 return target_wait_state(target, TARGET_HALTED, ms);
2052 }
2053
2054 /* wait for target state to change. The trick here is to have a low
2055 * latency for short waits and not to suck up all the CPU time
2056 * on longer waits.
2057 *
2058 * After 500ms, keep_alive() is invoked
2059 */
2060 int target_wait_state(struct target *target, enum target_state state, int ms)
2061 {
2062 int retval;
2063 long long then = 0, cur;
2064 int once = 1;
2065
2066 for (;;)
2067 {
2068 if ((retval = target_poll(target)) != ERROR_OK)
2069 return retval;
2070 if (target->state == state)
2071 {
2072 break;
2073 }
2074 cur = timeval_ms();
2075 if (once)
2076 {
2077 once = 0;
2078 then = timeval_ms();
2079 LOG_DEBUG("waiting for target %s...",
2080 Jim_Nvp_value2name_simple(nvp_target_state,state)->name);
2081 }
2082
2083 if (cur-then > 500)
2084 {
2085 keep_alive();
2086 }
2087
2088 if ((cur-then) > ms)
2089 {
2090 LOG_ERROR("timed out while waiting for target %s",
2091 Jim_Nvp_value2name_simple(nvp_target_state,state)->name);
2092 return ERROR_FAIL;
2093 }
2094 }
2095
2096 return ERROR_OK;
2097 }
2098
2099 COMMAND_HANDLER(handle_halt_command)
2100 {
2101 LOG_DEBUG("-");
2102
2103 struct target *target = get_current_target(cmd_ctx);
2104 int retval = target_halt(target);
2105 if (ERROR_OK != retval)
2106 return retval;
2107
2108 if (argc == 1)
2109 {
2110 unsigned wait;
2111 retval = parse_uint(args[0], &wait);
2112 if (ERROR_OK != retval)
2113 return ERROR_COMMAND_SYNTAX_ERROR;
2114 if (!wait)
2115 return ERROR_OK;
2116 }
2117
2118 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2119 }
2120
2121 COMMAND_HANDLER(handle_soft_reset_halt_command)
2122 {
2123 struct target *target = get_current_target(cmd_ctx);
2124
2125 LOG_USER("requesting target halt and executing a soft reset");
2126
2127 target->type->soft_reset_halt(target);
2128
2129 return ERROR_OK;
2130 }
2131
2132 COMMAND_HANDLER(handle_reset_command)
2133 {
2134 if (argc > 1)
2135 return ERROR_COMMAND_SYNTAX_ERROR;
2136
2137 enum target_reset_mode reset_mode = RESET_RUN;
2138 if (argc == 1)
2139 {
2140 const Jim_Nvp *n;
2141 n = Jim_Nvp_name2value_simple(nvp_reset_modes, args[0]);
2142 if ((n->name == NULL) || (n->value == RESET_UNKNOWN)) {
2143 return ERROR_COMMAND_SYNTAX_ERROR;
2144 }
2145 reset_mode = n->value;
2146 }
2147
2148 /* reset *all* targets */
2149 return target_process_reset(cmd_ctx, reset_mode);
2150 }
2151
2152
2153 COMMAND_HANDLER(handle_resume_command)
2154 {
2155 int current = 1;
2156 if (argc > 1)
2157 return ERROR_COMMAND_SYNTAX_ERROR;
2158
2159 struct target *target = get_current_target(cmd_ctx);
2160 target_handle_event(target, TARGET_EVENT_OLD_pre_resume);
2161
2162 /* with no args, resume from current pc, addr = 0,
2163 * with one arguments, addr = args[0],
2164 * handle breakpoints, not debugging */
2165 uint32_t addr = 0;
2166 if (argc == 1)
2167 {
2168 COMMAND_PARSE_NUMBER(u32, args[0], addr);
2169 current = 0;
2170 }
2171
2172 return target_resume(target, current, addr, 1, 0);
2173 }
2174
2175 COMMAND_HANDLER(handle_step_command)
2176 {
2177 if (argc > 1)
2178 return ERROR_COMMAND_SYNTAX_ERROR;
2179
2180 LOG_DEBUG("-");
2181
2182 /* with no args, step from current pc, addr = 0,
2183 * with one argument addr = args[0],
2184 * handle breakpoints, debugging */
2185 uint32_t addr = 0;
2186 int current_pc = 1;
2187 if (argc == 1)
2188 {
2189 COMMAND_PARSE_NUMBER(u32, args[0], addr);
2190 current_pc = 0;
2191 }
2192
2193 struct target *target = get_current_target(cmd_ctx);
2194
2195 return target->type->step(target, current_pc, addr, 1);
2196 }
2197
2198 static void handle_md_output(struct command_context *cmd_ctx,
2199 struct target *target, uint32_t address, unsigned size,
2200 unsigned count, const uint8_t *buffer)
2201 {
2202 const unsigned line_bytecnt = 32;
2203 unsigned line_modulo = line_bytecnt / size;
2204
2205 char output[line_bytecnt * 4 + 1];
2206 unsigned output_len = 0;
2207
2208 const char *value_fmt;
2209 switch (size) {
2210 case 4: value_fmt = "%8.8x "; break;
2211 case 2: value_fmt = "%4.2x "; break;
2212 case 1: value_fmt = "%2.2x "; break;
2213 default:
2214 LOG_ERROR("invalid memory read size: %u", size);
2215 exit(-1);
2216 }
2217
2218 for (unsigned i = 0; i < count; i++)
2219 {
2220 if (i % line_modulo == 0)
2221 {
2222 output_len += snprintf(output + output_len,
2223 sizeof(output) - output_len,
2224 "0x%8.8x: ",
2225 (unsigned)(address + (i*size)));
2226 }
2227
2228 uint32_t value = 0;
2229 const uint8_t *value_ptr = buffer + i * size;
2230 switch (size) {
2231 case 4: value = target_buffer_get_u32(target, value_ptr); break;
2232 case 2: value = target_buffer_get_u16(target, value_ptr); break;
2233 case 1: value = *value_ptr;
2234 }
2235 output_len += snprintf(output + output_len,
2236 sizeof(output) - output_len,
2237 value_fmt, value);
2238
2239 if ((i % line_modulo == line_modulo - 1) || (i == count - 1))
2240 {
2241 command_print(cmd_ctx, "%s", output);
2242 output_len = 0;
2243 }
2244 }
2245 }
2246
2247 COMMAND_HANDLER(handle_md_command)
2248 {
2249 if (argc < 1)
2250 return ERROR_COMMAND_SYNTAX_ERROR;
2251
2252 unsigned size = 0;
2253 const char *cmd_name = CMD_NAME;
2254 switch (cmd_name[6]) {
2255 case 'w': size = 4; break;
2256 case 'h': size = 2; break;
2257 case 'b': size = 1; break;
2258 default: return ERROR_COMMAND_SYNTAX_ERROR;
2259 }
2260
2261 bool physical=strcmp(args[0], "phys")==0;
2262 int (*fn)(struct target *target,
2263 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
2264 if (physical)
2265 {
2266 argc--;
2267 args++;
2268 fn=target_read_phys_memory;
2269 } else
2270 {
2271 fn=target_read_memory;
2272 }
2273 if ((argc < 1) || (argc > 2))
2274 {
2275 return ERROR_COMMAND_SYNTAX_ERROR;
2276 }
2277
2278 uint32_t address;
2279 COMMAND_PARSE_NUMBER(u32, args[0], address);
2280
2281 unsigned count = 1;
2282 if (argc == 2)
2283 COMMAND_PARSE_NUMBER(uint, args[1], count);
2284
2285 uint8_t *buffer = calloc(count, size);
2286
2287 struct target *target = get_current_target(cmd_ctx);
2288 int retval = fn(target, address, size, count, buffer);
2289 if (ERROR_OK == retval)
2290 handle_md_output(cmd_ctx, target, address, size, count, buffer);
2291
2292 free(buffer);
2293
2294 return retval;
2295 }
2296
2297 COMMAND_HANDLER(handle_mw_command)
2298 {
2299 if (argc < 2)
2300 {
2301 return ERROR_COMMAND_SYNTAX_ERROR;
2302 }
2303 bool physical=strcmp(args[0], "phys")==0;
2304 int (*fn)(struct target *target,
2305 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
2306 const char *cmd_name = CMD_NAME;
2307 if (physical)
2308 {
2309 argc--;
2310 args++;
2311 fn=target_write_phys_memory;
2312 } else
2313 {
2314 fn=target_write_memory;
2315 }
2316 if ((argc < 2) || (argc > 3))
2317 return ERROR_COMMAND_SYNTAX_ERROR;
2318
2319 uint32_t address;
2320 COMMAND_PARSE_NUMBER(u32, args[0], address);
2321
2322 uint32_t value;
2323 COMMAND_PARSE_NUMBER(u32, args[1], value);
2324
2325 unsigned count = 1;
2326 if (argc == 3)
2327 COMMAND_PARSE_NUMBER(uint, args[2], count);
2328
2329 struct target *target = get_current_target(cmd_ctx);
2330 unsigned wordsize;
2331 uint8_t value_buf[4];
2332 switch (cmd_name[6])
2333 {
2334 case 'w':
2335 wordsize = 4;
2336 target_buffer_set_u32(target, value_buf, value);
2337 break;
2338 case 'h':
2339 wordsize = 2;
2340 target_buffer_set_u16(target, value_buf, value);
2341 break;
2342 case 'b':
2343 wordsize = 1;
2344 value_buf[0] = value;
2345 break;
2346 default:
2347 return ERROR_COMMAND_SYNTAX_ERROR;
2348 }
2349 for (unsigned i = 0; i < count; i++)
2350 {
2351 int retval = fn(target,
2352 address + i * wordsize, wordsize, 1, value_buf);
2353 if (ERROR_OK != retval)
2354 return retval;
2355 keep_alive();
2356 }
2357
2358 return ERROR_OK;
2359
2360 }
2361
2362 static COMMAND_HELPER(parse_load_image_command_args, struct image *image,
2363 uint32_t *min_address, uint32_t *max_address)
2364 {
2365 if (argc < 1 || argc > 5)
2366 return ERROR_COMMAND_SYNTAX_ERROR;
2367
2368 /* a base address isn't always necessary,
2369 * default to 0x0 (i.e. don't relocate) */
2370 if (argc >= 2)
2371 {
2372 uint32_t addr;
2373 COMMAND_PARSE_NUMBER(u32, args[1], addr);
2374 image->base_address = addr;
2375 image->base_address_set = 1;
2376 }
2377 else
2378 image->base_address_set = 0;
2379
2380 image->start_address_set = 0;
2381
2382 if (argc >= 4)
2383 {
2384 COMMAND_PARSE_NUMBER(u32, args[3], *min_address);
2385 }
2386 if (argc == 5)
2387 {
2388 COMMAND_PARSE_NUMBER(u32, args[4], *max_address);
2389 // use size (given) to find max (required)
2390 *max_address += *min_address;
2391 }
2392
2393 if (*min_address > *max_address)
2394 return ERROR_COMMAND_SYNTAX_ERROR;
2395
2396 return ERROR_OK;
2397 }
2398
2399 COMMAND_HANDLER(handle_load_image_command)
2400 {
2401 uint8_t *buffer;
2402 uint32_t buf_cnt;
2403 uint32_t image_size;
2404 uint32_t min_address = 0;
2405 uint32_t max_address = 0xffffffff;
2406 int i;
2407 struct image image;
2408
2409 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_args,
2410 &image, &min_address, &max_address);
2411 if (ERROR_OK != retval)
2412 return retval;
2413
2414 struct target *target = get_current_target(cmd_ctx);
2415
2416 struct duration bench;
2417 duration_start(&bench);
2418
2419 if (image_open(&image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
2420 {
2421 return ERROR_OK;
2422 }
2423
2424 image_size = 0x0;
2425 retval = ERROR_OK;
2426 for (i = 0; i < image.num_sections; i++)
2427 {
2428 buffer = malloc(image.sections[i].size);
2429 if (buffer == NULL)
2430 {
2431 command_print(cmd_ctx,
2432 "error allocating buffer for section (%d bytes)",
2433 (int)(image.sections[i].size));
2434 break;
2435 }
2436
2437 if ((retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt)) != ERROR_OK)
2438 {
2439 free(buffer);
2440 break;
2441 }
2442
2443 uint32_t offset = 0;
2444 uint32_t length = buf_cnt;
2445
2446 /* DANGER!!! beware of unsigned comparision here!!! */
2447
2448 if ((image.sections[i].base_address + buf_cnt >= min_address)&&
2449 (image.sections[i].base_address < max_address))
2450 {
2451 if (image.sections[i].base_address < min_address)
2452 {
2453 /* clip addresses below */
2454 offset += min_address-image.sections[i].base_address;
2455 length -= offset;
2456 }
2457
2458 if (image.sections[i].base_address + buf_cnt > max_address)
2459 {
2460 length -= (image.sections[i].base_address + buf_cnt)-max_address;
2461 }
2462
2463 if ((retval = target_write_buffer(target, image.sections[i].base_address + offset, length, buffer + offset)) != ERROR_OK)
2464 {
2465 free(buffer);
2466 break;
2467 }
2468 image_size += length;
2469 command_print(cmd_ctx, "%u bytes written at address 0x%8.8" PRIx32 "",
2470 (unsigned int)length,
2471 image.sections[i].base_address + offset);
2472 }
2473
2474 free(buffer);
2475 }
2476
2477 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2478 {
2479 command_print(cmd_ctx, "downloaded %" PRIu32 " bytes "
2480 "in %fs (%0.3f kb/s)", image_size,
2481 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2482 }
2483
2484 image_close(&image);
2485
2486 return retval;
2487
2488 }
2489
2490 COMMAND_HANDLER(handle_dump_image_command)
2491 {
2492 struct fileio fileio;
2493
2494 uint8_t buffer[560];
2495 int retvaltemp;
2496
2497
2498 struct target *target = get_current_target(cmd_ctx);
2499
2500 if (argc != 3)
2501 {
2502 command_print(cmd_ctx, "usage: dump_image <filename> <address> <size>");
2503 return ERROR_OK;
2504 }
2505
2506 uint32_t address;
2507 COMMAND_PARSE_NUMBER(u32, args[1], address);
2508 uint32_t size;
2509 COMMAND_PARSE_NUMBER(u32, args[2], size);
2510
2511 if (fileio_open(&fileio, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
2512 {
2513 return ERROR_OK;
2514 }
2515
2516 struct duration bench;
2517 duration_start(&bench);
2518
2519 int retval = ERROR_OK;
2520 while (size > 0)
2521 {
2522 uint32_t size_written;
2523 uint32_t this_run_size = (size > 560) ? 560 : size;
2524 retval = target_read_buffer(target, address, this_run_size, buffer);
2525 if (retval != ERROR_OK)
2526 {
2527 break;
2528 }
2529
2530 retval = fileio_write(&fileio, this_run_size, buffer, &size_written);
2531 if (retval != ERROR_OK)
2532 {
2533 break;
2534 }
2535
2536 size -= this_run_size;
2537 address += this_run_size;
2538 }
2539
2540 if ((retvaltemp = fileio_close(&fileio)) != ERROR_OK)
2541 return retvaltemp;
2542
2543 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2544 {
2545 command_print(cmd_ctx,
2546 "dumped %zu bytes in %fs (%0.3f kb/s)", fileio.size,
2547 duration_elapsed(&bench), duration_kbps(&bench, fileio.size));
2548 }
2549
2550 return retval;
2551 }
2552
2553 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
2554 {
2555 uint8_t *buffer;
2556 uint32_t buf_cnt;
2557 uint32_t image_size;
2558 int i;
2559 int retval;
2560 uint32_t checksum = 0;
2561 uint32_t mem_checksum = 0;
2562
2563 struct image image;
2564
2565 struct target *target = get_current_target(cmd_ctx);
2566
2567 if (argc < 1)
2568 {
2569 return ERROR_COMMAND_SYNTAX_ERROR;
2570 }
2571
2572 if (!target)
2573 {
2574 LOG_ERROR("no target selected");
2575 return ERROR_FAIL;
2576 }
2577
2578 struct duration bench;
2579 duration_start(&bench);
2580
2581 if (argc >= 2)
2582 {
2583 uint32_t addr;
2584 COMMAND_PARSE_NUMBER(u32, args[1], addr);
2585 image.base_address = addr;
2586 image.base_address_set = 1;
2587 }
2588 else
2589 {
2590 image.base_address_set = 0;
2591 image.base_address = 0x0;
2592 }
2593
2594 image.start_address_set = 0;
2595
2596 if ((retval = image_open(&image, args[0], (argc == 3) ? args[2] : NULL)) != ERROR_OK)
2597 {
2598 return retval;
2599 }
2600
2601 image_size = 0x0;
2602 retval = ERROR_OK;
2603 for (i = 0; i < image.num_sections; i++)
2604 {
2605 buffer = malloc(image.sections[i].size);
2606 if (buffer == NULL)
2607 {
2608 command_print(cmd_ctx,
2609 "error allocating buffer for section (%d bytes)",
2610 (int)(image.sections[i].size));
2611 break;
2612 }
2613 if ((retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt)) != ERROR_OK)
2614 {
2615 free(buffer);
2616 break;
2617 }
2618
2619 if (verify)
2620 {
2621 /* calculate checksum of image */
2622 image_calculate_checksum(buffer, buf_cnt, &checksum);
2623
2624 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
2625 if (retval != ERROR_OK)
2626 {
2627 free(buffer);
2628 break;
2629 }
2630
2631 if (checksum != mem_checksum)
2632 {
2633 /* failed crc checksum, fall back to a binary compare */
2634 uint8_t *data;
2635
2636 command_print(cmd_ctx, "checksum mismatch - attempting binary compare");
2637
2638 data = (uint8_t*)malloc(buf_cnt);
2639
2640 /* Can we use 32bit word accesses? */
2641 int size = 1;
2642 int count = buf_cnt;
2643 if ((count % 4) == 0)
2644 {
2645 size *= 4;
2646 count /= 4;
2647 }
2648 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
2649 if (retval == ERROR_OK)
2650 {
2651 uint32_t t;
2652 for (t = 0; t < buf_cnt; t++)
2653 {
2654 if (data[t] != buffer[t])
2655 {
2656 command_print(cmd_ctx,
2657 "Verify operation failed address 0x%08x. Was 0x%02x instead of 0x%02x\n",
2658 (unsigned)(t + image.sections[i].base_address),
2659 data[t],
2660 buffer[t]);
2661 free(data);
2662 free(buffer);
2663 retval = ERROR_FAIL;
2664 goto done;
2665 }
2666 if ((t%16384) == 0)
2667 {
2668 keep_alive();
2669 }
2670 }
2671 }
2672
2673 free(data);
2674 }
2675 } else
2676 {
2677 command_print(cmd_ctx, "address 0x%08" PRIx32 " length 0x%08" PRIx32 "",
2678 image.sections[i].base_address,
2679 buf_cnt);
2680 }
2681
2682 free(buffer);
2683 image_size += buf_cnt;
2684 }
2685 done:
2686 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK))
2687 {
2688 command_print(cmd_ctx, "verified %" PRIu32 " bytes "
2689 "in %fs (%0.3f kb/s)", image_size,
2690 duration_elapsed(&bench), duration_kbps(&bench, image_size));
2691 }
2692
2693 image_close(&image);
2694
2695 return retval;
2696 }
2697
2698 COMMAND_HANDLER(handle_verify_image_command)
2699 {
2700 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
2701 }
2702
2703 COMMAND_HANDLER(handle_test_image_command)
2704 {
2705 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
2706 }
2707
2708 static int handle_bp_command_list(struct command_context *cmd_ctx)
2709 {
2710 struct target *target = get_current_target(cmd_ctx);
2711 struct breakpoint *breakpoint = target->breakpoints;
2712 while (breakpoint)
2713 {
2714 if (breakpoint->type == BKPT_SOFT)
2715 {
2716 char* buf = buf_to_str(breakpoint->orig_instr,
2717 breakpoint->length, 16);
2718 command_print(cmd_ctx, "0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
2719 breakpoint->address,
2720 breakpoint->length,
2721 breakpoint->set, buf);
2722 free(buf);
2723 }
2724 else
2725 {
2726 command_print(cmd_ctx, "0x%8.8" PRIx32 ", 0x%x, %i",
2727 breakpoint->address,
2728 breakpoint->length, breakpoint->set);
2729 }
2730
2731 breakpoint = breakpoint->next;
2732 }
2733 return ERROR_OK;
2734 }
2735
2736 static int handle_bp_command_set(struct command_context *cmd_ctx,
2737 uint32_t addr, uint32_t length, int hw)
2738 {
2739 struct target *target = get_current_target(cmd_ctx);
2740 int retval = breakpoint_add(target, addr, length, hw);
2741 if (ERROR_OK == retval)
2742 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
2743 else
2744 LOG_ERROR("Failure setting breakpoint");
2745 return retval;
2746 }
2747
2748 COMMAND_HANDLER(handle_bp_command)
2749 {
2750 if (argc == 0)
2751 return handle_bp_command_list(cmd_ctx);
2752
2753 if (argc < 2 || argc > 3)
2754 {
2755 command_print(cmd_ctx, "usage: bp <address> <length> ['hw']");
2756 return ERROR_COMMAND_SYNTAX_ERROR;
2757 }
2758
2759 uint32_t addr;
2760 COMMAND_PARSE_NUMBER(u32, args[0], addr);
2761 uint32_t length;
2762 COMMAND_PARSE_NUMBER(u32, args[1], length);
2763
2764 int hw = BKPT_SOFT;
2765 if (argc == 3)
2766 {
2767 if (strcmp(args[2], "hw") == 0)
2768 hw = BKPT_HARD;
2769 else
2770 return ERROR_COMMAND_SYNTAX_ERROR;
2771 }
2772
2773 return handle_bp_command_set(cmd_ctx, addr, length, hw);
2774 }
2775
2776 COMMAND_HANDLER(handle_rbp_command)
2777 {
2778 if (argc != 1)
2779 return ERROR_COMMAND_SYNTAX_ERROR;
2780
2781 uint32_t addr;
2782 COMMAND_PARSE_NUMBER(u32, args[0], addr);
2783
2784 struct target *target = get_current_target(cmd_ctx);
2785 breakpoint_remove(target, addr);
2786
2787 return ERROR_OK;
2788 }
2789
2790 COMMAND_HANDLER(handle_wp_command)
2791 {
2792 struct target *target = get_current_target(cmd_ctx);
2793
2794 if (argc == 0)
2795 {
2796 struct watchpoint *watchpoint = target->watchpoints;
2797
2798 while (watchpoint)
2799 {
2800 command_print(cmd_ctx, "address: 0x%8.8" PRIx32
2801 ", len: 0x%8.8" PRIx32
2802 ", r/w/a: %i, value: 0x%8.8" PRIx32
2803 ", mask: 0x%8.8" PRIx32,
2804 watchpoint->address,
2805 watchpoint->length,
2806 (int)watchpoint->rw,
2807 watchpoint->value,
2808 watchpoint->mask);
2809 watchpoint = watchpoint->next;
2810 }
2811 return ERROR_OK;
2812 }
2813
2814 enum watchpoint_rw type = WPT_ACCESS;
2815 uint32_t addr = 0;
2816 uint32_t length = 0;
2817 uint32_t data_value = 0x0;
2818 uint32_t data_mask = 0xffffffff;
2819
2820 switch (argc)
2821 {
2822 case 5:
2823 COMMAND_PARSE_NUMBER(u32, args[4], data_mask);
2824 // fall through
2825 case 4:
2826 COMMAND_PARSE_NUMBER(u32, args[3], data_value);
2827 // fall through
2828 case 3:
2829 switch (args[2][0])
2830 {
2831 case 'r':
2832 type = WPT_READ;
2833 break;
2834 case 'w':
2835 type = WPT_WRITE;
2836 break;
2837 case 'a':
2838 type = WPT_ACCESS;
2839 break;
2840 default:
2841 LOG_ERROR("invalid watchpoint mode ('%c')", args[2][0]);
2842 return ERROR_COMMAND_SYNTAX_ERROR;
2843 }
2844 // fall through
2845 case 2:
2846 COMMAND_PARSE_NUMBER(u32, args[1], length);
2847 COMMAND_PARSE_NUMBER(u32, args[0], addr);
2848 break;
2849
2850 default:
2851 command_print(cmd_ctx, "usage: wp [address length "
2852 "[(r|w|a) [value [mask]]]]");
2853 return ERROR_COMMAND_SYNTAX_ERROR;
2854 }
2855
2856 int retval = watchpoint_add(target, addr, length, type,
2857 data_value, data_mask);
2858 if (ERROR_OK != retval)
2859 LOG_ERROR("Failure setting watchpoints");
2860
2861 return retval;
2862 }
2863
2864 COMMAND_HANDLER(handle_rwp_command)
2865 {
2866 if (argc != 1)
2867 return ERROR_COMMAND_SYNTAX_ERROR;
2868
2869 uint32_t addr;
2870 COMMAND_PARSE_NUMBER(u32, args[0], addr);
2871
2872 struct target *target = get_current_target(cmd_ctx);
2873 watchpoint_remove(target, addr);
2874
2875 return ERROR_OK;
2876 }
2877
2878
2879 /**
2880 * Translate a virtual address to a physical address.
2881 *
2882 * The low-level target implementation must have logged a detailed error
2883 * which is forwarded to telnet/GDB session.
2884 */
2885 COMMAND_HANDLER(handle_virt2phys_command)
2886 {
2887 if (argc != 1)
2888 return ERROR_COMMAND_SYNTAX_ERROR;
2889
2890 uint32_t va;
2891 COMMAND_PARSE_NUMBER(u32, args[0], va);
2892 uint32_t pa;
2893
2894 struct target *target = get_current_target(cmd_ctx);
2895 int retval = target->type->virt2phys(target, va, &pa);
2896 if (retval == ERROR_OK)
2897 command_print(cmd_ctx, "Physical address 0x%08" PRIx32 "", pa);
2898
2899 return retval;
2900 }
2901
2902 static void writeData(FILE *f, const void *data, size_t len)
2903 {
2904 size_t written = fwrite(data, 1, len, f);
2905 if (written != len)
2906 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
2907 }
2908
2909 static void writeLong(FILE *f, int l)
2910 {
2911 int i;
2912 for (i = 0; i < 4; i++)
2913 {
2914 char c = (l >> (i*8))&0xff;
2915 writeData(f, &c, 1);
2916 }
2917
2918 }
2919
2920 static void writeString(FILE *f, char *s)
2921 {
2922 writeData(f, s, strlen(s));
2923 }
2924
2925 /* Dump a gmon.out histogram file. */
2926 static void writeGmon(uint32_t *samples, uint32_t sampleNum, const char *filename)
2927 {
2928 uint32_t i;
2929 FILE *f = fopen(filename, "w");
2930 if (f == NULL)
2931 return;
2932 writeString(f, "gmon");
2933 writeLong(f, 0x00000001); /* Version */
2934 writeLong(f, 0); /* padding */
2935 writeLong(f, 0); /* padding */
2936 writeLong(f, 0); /* padding */
2937
2938 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
2939 writeData(f, &zero, 1);
2940
2941 /* figure out bucket size */
2942 uint32_t min = samples[0];
2943 uint32_t max = samples[0];
2944 for (i = 0; i < sampleNum; i++)
2945 {
2946 if (min > samples[i])
2947 {
2948 min = samples[i];
2949 }
2950 if (max < samples[i])
2951 {
2952 max = samples[i];
2953 }
2954 }
2955
2956 int addressSpace = (max-min + 1);
2957
2958 static const uint32_t maxBuckets = 256 * 1024; /* maximum buckets. */
2959 uint32_t length = addressSpace;
2960 if (length > maxBuckets)
2961 {
2962 length = maxBuckets;
2963 }
2964 int *buckets = malloc(sizeof(int)*length);
2965 if (buckets == NULL)
2966 {
2967 fclose(f);
2968 return;
2969 }
2970 memset(buckets, 0, sizeof(int)*length);
2971 for (i = 0; i < sampleNum;i++)
2972 {
2973 uint32_t address = samples[i];
2974 long long a = address-min;
2975 long long b = length-1;
2976 long long c = addressSpace-1;
2977 int index = (a*b)/c; /* danger!!!! int32 overflows */
2978 buckets[index]++;
2979 }
2980
2981 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
2982 writeLong(f, min); /* low_pc */
2983 writeLong(f, max); /* high_pc */
2984 writeLong(f, length); /* # of samples */
2985 writeLong(f, 64000000); /* 64MHz */
2986 writeString(f, "seconds");
2987 for (i = 0; i < (15-strlen("seconds")); i++)
2988 writeData(f, &zero, 1);
2989 writeString(f, "s");
2990
2991 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
2992
2993 char *data = malloc(2*length);
2994 if (data != NULL)
2995 {
2996 for (i = 0; i < length;i++)
2997 {
2998 int val;
2999 val = buckets[i];
3000 if (val > 65535)
3001 {
3002 val = 65535;
3003 }
3004 data[i*2]=val&0xff;
3005 data[i*2 + 1]=(val >> 8)&0xff;
3006 }
3007 free(buckets);
3008 writeData(f, data, length * 2);
3009 free(data);
3010 } else
3011 {
3012 free(buckets);
3013 }
3014
3015 fclose(f);
3016 }
3017
3018 /* profiling samples the CPU PC as quickly as OpenOCD is able, which will be used as a random sampling of PC */
3019 COMMAND_HANDLER(handle_profile_command)
3020 {
3021 struct target *target = get_current_target(cmd_ctx);
3022 struct timeval timeout, now;
3023
3024 gettimeofday(&timeout, NULL);
3025 if (argc != 2)
3026 {
3027 return ERROR_COMMAND_SYNTAX_ERROR;
3028 }
3029 unsigned offset;
3030 COMMAND_PARSE_NUMBER(uint, args[0], offset);
3031
3032 timeval_add_time(&timeout, offset, 0);
3033
3034 command_print(cmd_ctx, "Starting profiling. Halting and resuming the target as often as we can...");
3035
3036 static const int maxSample = 10000;
3037 uint32_t *samples = malloc(sizeof(uint32_t)*maxSample);
3038 if (samples == NULL)
3039 return ERROR_OK;
3040
3041 int numSamples = 0;
3042 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
3043 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
3044
3045 for (;;)
3046 {
3047 int retval;
3048 target_poll(target);
3049 if (target->state == TARGET_HALTED)
3050 {
3051 uint32_t t=*((uint32_t *)reg->value);
3052 samples[numSamples++]=t;
3053 retval = target_resume(target, 1, 0, 0, 0); /* current pc, addr = 0, do not handle breakpoints, not debugging */
3054 target_poll(target);
3055 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
3056 } else if (target->state == TARGET_RUNNING)
3057 {
3058 /* We want to quickly sample the PC. */
3059 if ((retval = target_halt(target)) != ERROR_OK)
3060 {
3061 free(samples);
3062 return retval;
3063 }
3064 } else
3065 {
3066 command_print(cmd_ctx, "Target not halted or running");
3067 retval = ERROR_OK;
3068 break;
3069 }
3070 if (retval != ERROR_OK)
3071 {
3072 break;
3073 }
3074
3075 gettimeofday(&now, NULL);
3076 if ((numSamples >= maxSample) || ((now.tv_sec >= timeout.tv_sec) && (now.tv_usec >= timeout.tv_usec)))
3077 {
3078 command_print(cmd_ctx, "Profiling completed. %d samples.", numSamples);
3079 if ((retval = target_poll(target)) != ERROR_OK)
3080 {
3081 free(samples);
3082 return retval;
3083 }
3084 if (target->state == TARGET_HALTED)
3085 {
3086 target_resume(target, 1, 0, 0, 0); /* current pc, addr = 0, do not handle breakpoints, not debugging */
3087 }
3088 if ((retval = target_poll(target)) != ERROR_OK)
3089 {
3090 free(samples);
3091 return retval;
3092 }
3093 writeGmon(samples, numSamples, args[1]);
3094 command_print(cmd_ctx, "Wrote %s", args[1]);
3095 break;
3096 }
3097 }
3098 free(samples);
3099
3100 return ERROR_OK;
3101 }
3102
3103 static int new_int_array_element(Jim_Interp * interp, const char *varname, int idx, uint32_t val)
3104 {
3105 char *namebuf;
3106 Jim_Obj *nameObjPtr, *valObjPtr;
3107 int result;
3108
3109 namebuf = alloc_printf("%s(%d)", varname, idx);
3110 if (!namebuf)
3111 return JIM_ERR;
3112
3113 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
3114 valObjPtr = Jim_NewIntObj(interp, val);
3115 if (!nameObjPtr || !valObjPtr)
3116 {
3117 free(namebuf);
3118 return JIM_ERR;
3119 }
3120
3121 Jim_IncrRefCount(nameObjPtr);
3122 Jim_IncrRefCount(valObjPtr);
3123 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
3124 Jim_DecrRefCount(interp, nameObjPtr);
3125 Jim_DecrRefCount(interp, valObjPtr);
3126 free(namebuf);
3127 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
3128 return result;
3129 }
3130
3131 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
3132 {
3133 struct command_context *context;
3134 struct target *target;
3135
3136 context = Jim_GetAssocData(interp, "context");
3137 if (context == NULL)
3138 {
3139 LOG_ERROR("mem2array: no command context");
3140 return JIM_ERR;
3141 }
3142 target = get_current_target(context);
3143 if (target == NULL)
3144 {
3145 LOG_ERROR("mem2array: no current target");
3146 return JIM_ERR;
3147 }
3148
3149 return target_mem2array(interp, target, argc-1, argv + 1);
3150 }
3151
3152 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
3153 {
3154 long l;
3155 uint32_t width;
3156 int len;
3157 uint32_t addr;
3158 uint32_t count;
3159 uint32_t v;
3160 const char *varname;
3161 uint8_t buffer[4096];
3162 int n, e, retval;
3163 uint32_t i;
3164
3165 /* argv[1] = name of array to receive the data
3166 * argv[2] = desired width
3167 * argv[3] = memory address
3168 * argv[4] = count of times to read
3169 */
3170 if (argc != 4) {
3171 Jim_WrongNumArgs(interp, 1, argv, "varname width addr nelems");
3172 return JIM_ERR;
3173 }
3174 varname = Jim_GetString(argv[0], &len);
3175 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
3176
3177 e = Jim_GetLong(interp, argv[1], &l);
3178 width = l;
3179 if (e != JIM_OK) {
3180 return e;
3181 }
3182
3183 e = Jim_GetLong(interp, argv[2], &l);
3184 addr = l;
3185 if (e != JIM_OK) {
3186 return e;
3187 }
3188 e = Jim_GetLong(interp, argv[3], &l);
3189 len = l;
3190 if (e != JIM_OK) {
3191 return e;
3192 }
3193 switch (width) {
3194 case 8:
3195 width = 1;
3196 break;
3197 case 16:
3198 width = 2;
3199 break;
3200 case 32:
3201 width = 4