target/target: call event handlers around examine when polling resumes
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 ***************************************************************************/
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 #include <helper/time_support.h>
47 #include <jtag/jtag.h>
48 #include <flash/nor/core.h>
49
50 #include "target.h"
51 #include "target_type.h"
52 #include "target_request.h"
53 #include "breakpoints.h"
54 #include "register.h"
55 #include "trace.h"
56 #include "image.h"
57 #include "rtos/rtos.h"
58 #include "transport/transport.h"
59
60 /* default halt wait timeout (ms) */
61 #define DEFAULT_HALT_TIMEOUT 5000
62
63 static int target_read_buffer_default(struct target *target, uint32_t address,
64 uint32_t count, uint8_t *buffer);
65 static int target_write_buffer_default(struct target *target, uint32_t address,
66 uint32_t count, const uint8_t *buffer);
67 static int target_array2mem(Jim_Interp *interp, struct target *target,
68 int argc, Jim_Obj * const *argv);
69 static int target_mem2array(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_register_user_commands(struct command_context *cmd_ctx);
72 static int target_get_gdb_fileio_info_default(struct target *target,
73 struct gdb_fileio_info *fileio_info);
74 static int target_gdb_fileio_end_default(struct target *target, int retcode,
75 int fileio_errno, bool ctrl_c);
76 static int target_profiling_default(struct target *target, uint32_t *samples,
77 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds);
78
79 /* targets */
80 extern struct target_type arm7tdmi_target;
81 extern struct target_type arm720t_target;
82 extern struct target_type arm9tdmi_target;
83 extern struct target_type arm920t_target;
84 extern struct target_type arm966e_target;
85 extern struct target_type arm946e_target;
86 extern struct target_type arm926ejs_target;
87 extern struct target_type fa526_target;
88 extern struct target_type feroceon_target;
89 extern struct target_type dragonite_target;
90 extern struct target_type xscale_target;
91 extern struct target_type cortexm_target;
92 extern struct target_type cortexa_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type mips_m4k_target;
96 extern struct target_type avr_target;
97 extern struct target_type dsp563xx_target;
98 extern struct target_type dsp5680xx_target;
99 extern struct target_type testee_target;
100 extern struct target_type avr32_ap7k_target;
101 extern struct target_type hla_target;
102 extern struct target_type nds32_v2_target;
103 extern struct target_type nds32_v3_target;
104 extern struct target_type nds32_v3m_target;
105 extern struct target_type or1k_target;
106 extern struct target_type quark_x10xx_target;
107
108 static struct target_type *target_types[] = {
109 &arm7tdmi_target,
110 &arm9tdmi_target,
111 &arm920t_target,
112 &arm720t_target,
113 &arm966e_target,
114 &arm946e_target,
115 &arm926ejs_target,
116 &fa526_target,
117 &feroceon_target,
118 &dragonite_target,
119 &xscale_target,
120 &cortexm_target,
121 &cortexa_target,
122 &cortexr4_target,
123 &arm11_target,
124 &mips_m4k_target,
125 &avr_target,
126 &dsp563xx_target,
127 &dsp5680xx_target,
128 &testee_target,
129 &avr32_ap7k_target,
130 &hla_target,
131 &nds32_v2_target,
132 &nds32_v3_target,
133 &nds32_v3m_target,
134 &or1k_target,
135 &quark_x10xx_target,
136 NULL,
137 };
138
139 struct target *all_targets;
140 static struct target_event_callback *target_event_callbacks;
141 static struct target_timer_callback *target_timer_callbacks;
142 LIST_HEAD(target_reset_callback_list);
143 static const int polling_interval = 100;
144
145 static const Jim_Nvp nvp_assert[] = {
146 { .name = "assert", NVP_ASSERT },
147 { .name = "deassert", NVP_DEASSERT },
148 { .name = "T", NVP_ASSERT },
149 { .name = "F", NVP_DEASSERT },
150 { .name = "t", NVP_ASSERT },
151 { .name = "f", NVP_DEASSERT },
152 { .name = NULL, .value = -1 }
153 };
154
155 static const Jim_Nvp nvp_error_target[] = {
156 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
157 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
158 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
159 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
160 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
161 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
162 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
163 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
164 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
165 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
166 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
167 { .value = -1, .name = NULL }
168 };
169
170 static const char *target_strerror_safe(int err)
171 {
172 const Jim_Nvp *n;
173
174 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
175 if (n->name == NULL)
176 return "unknown";
177 else
178 return n->name;
179 }
180
181 static const Jim_Nvp nvp_target_event[] = {
182
183 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
184 { .value = TARGET_EVENT_HALTED, .name = "halted" },
185 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
186 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
187 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
188
189 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
190 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
191
192 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
193 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
194 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
195 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
196 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
197 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
198 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
199 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
200 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
201 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
202 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
203 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
204
205 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
206 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
207
208 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
209 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
210
211 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
212 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
213
214 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
215 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
216
217 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
218 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
219
220 { .name = NULL, .value = -1 }
221 };
222
223 static const Jim_Nvp nvp_target_state[] = {
224 { .name = "unknown", .value = TARGET_UNKNOWN },
225 { .name = "running", .value = TARGET_RUNNING },
226 { .name = "halted", .value = TARGET_HALTED },
227 { .name = "reset", .value = TARGET_RESET },
228 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
229 { .name = NULL, .value = -1 },
230 };
231
232 static const Jim_Nvp nvp_target_debug_reason[] = {
233 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
234 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
235 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
236 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
237 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
238 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
239 { .name = "program-exit" , .value = DBG_REASON_EXIT },
240 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
241 { .name = NULL, .value = -1 },
242 };
243
244 static const Jim_Nvp nvp_target_endian[] = {
245 { .name = "big", .value = TARGET_BIG_ENDIAN },
246 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
247 { .name = "be", .value = TARGET_BIG_ENDIAN },
248 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
249 { .name = NULL, .value = -1 },
250 };
251
252 static const Jim_Nvp nvp_reset_modes[] = {
253 { .name = "unknown", .value = RESET_UNKNOWN },
254 { .name = "run" , .value = RESET_RUN },
255 { .name = "halt" , .value = RESET_HALT },
256 { .name = "init" , .value = RESET_INIT },
257 { .name = NULL , .value = -1 },
258 };
259
260 const char *debug_reason_name(struct target *t)
261 {
262 const char *cp;
263
264 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
265 t->debug_reason)->name;
266 if (!cp) {
267 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
268 cp = "(*BUG*unknown*BUG*)";
269 }
270 return cp;
271 }
272
273 const char *target_state_name(struct target *t)
274 {
275 const char *cp;
276 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
277 if (!cp) {
278 LOG_ERROR("Invalid target state: %d", (int)(t->state));
279 cp = "(*BUG*unknown*BUG*)";
280 }
281 return cp;
282 }
283
284 const char *target_event_name(enum target_event event)
285 {
286 const char *cp;
287 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
288 if (!cp) {
289 LOG_ERROR("Invalid target event: %d", (int)(event));
290 cp = "(*BUG*unknown*BUG*)";
291 }
292 return cp;
293 }
294
295 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
296 {
297 const char *cp;
298 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
299 if (!cp) {
300 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
301 cp = "(*BUG*unknown*BUG*)";
302 }
303 return cp;
304 }
305
306 /* determine the number of the new target */
307 static int new_target_number(void)
308 {
309 struct target *t;
310 int x;
311
312 /* number is 0 based */
313 x = -1;
314 t = all_targets;
315 while (t) {
316 if (x < t->target_number)
317 x = t->target_number;
318 t = t->next;
319 }
320 return x + 1;
321 }
322
323 /* read a uint64_t from a buffer in target memory endianness */
324 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
325 {
326 if (target->endianness == TARGET_LITTLE_ENDIAN)
327 return le_to_h_u64(buffer);
328 else
329 return be_to_h_u64(buffer);
330 }
331
332 /* read a uint32_t from a buffer in target memory endianness */
333 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
334 {
335 if (target->endianness == TARGET_LITTLE_ENDIAN)
336 return le_to_h_u32(buffer);
337 else
338 return be_to_h_u32(buffer);
339 }
340
341 /* read a uint24_t from a buffer in target memory endianness */
342 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
343 {
344 if (target->endianness == TARGET_LITTLE_ENDIAN)
345 return le_to_h_u24(buffer);
346 else
347 return be_to_h_u24(buffer);
348 }
349
350 /* read a uint16_t from a buffer in target memory endianness */
351 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
352 {
353 if (target->endianness == TARGET_LITTLE_ENDIAN)
354 return le_to_h_u16(buffer);
355 else
356 return be_to_h_u16(buffer);
357 }
358
359 /* read a uint8_t from a buffer in target memory endianness */
360 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
361 {
362 return *buffer & 0x0ff;
363 }
364
365 /* write a uint64_t to a buffer in target memory endianness */
366 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
367 {
368 if (target->endianness == TARGET_LITTLE_ENDIAN)
369 h_u64_to_le(buffer, value);
370 else
371 h_u64_to_be(buffer, value);
372 }
373
374 /* write a uint32_t to a buffer in target memory endianness */
375 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
376 {
377 if (target->endianness == TARGET_LITTLE_ENDIAN)
378 h_u32_to_le(buffer, value);
379 else
380 h_u32_to_be(buffer, value);
381 }
382
383 /* write a uint24_t to a buffer in target memory endianness */
384 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
385 {
386 if (target->endianness == TARGET_LITTLE_ENDIAN)
387 h_u24_to_le(buffer, value);
388 else
389 h_u24_to_be(buffer, value);
390 }
391
392 /* write a uint16_t to a buffer in target memory endianness */
393 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
394 {
395 if (target->endianness == TARGET_LITTLE_ENDIAN)
396 h_u16_to_le(buffer, value);
397 else
398 h_u16_to_be(buffer, value);
399 }
400
401 /* write a uint8_t to a buffer in target memory endianness */
402 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
403 {
404 *buffer = value;
405 }
406
407 /* write a uint64_t array to a buffer in target memory endianness */
408 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
409 {
410 uint32_t i;
411 for (i = 0; i < count; i++)
412 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
413 }
414
415 /* write a uint32_t array to a buffer in target memory endianness */
416 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
417 {
418 uint32_t i;
419 for (i = 0; i < count; i++)
420 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
421 }
422
423 /* write a uint16_t array to a buffer in target memory endianness */
424 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
425 {
426 uint32_t i;
427 for (i = 0; i < count; i++)
428 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
429 }
430
431 /* write a uint64_t array to a buffer in target memory endianness */
432 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
433 {
434 uint32_t i;
435 for (i = 0; i < count; i++)
436 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
437 }
438
439 /* write a uint32_t array to a buffer in target memory endianness */
440 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
441 {
442 uint32_t i;
443 for (i = 0; i < count; i++)
444 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
445 }
446
447 /* write a uint16_t array to a buffer in target memory endianness */
448 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
449 {
450 uint32_t i;
451 for (i = 0; i < count; i++)
452 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
453 }
454
455 /* return a pointer to a configured target; id is name or number */
456 struct target *get_target(const char *id)
457 {
458 struct target *target;
459
460 /* try as tcltarget name */
461 for (target = all_targets; target; target = target->next) {
462 if (target_name(target) == NULL)
463 continue;
464 if (strcmp(id, target_name(target)) == 0)
465 return target;
466 }
467
468 /* It's OK to remove this fallback sometime after August 2010 or so */
469
470 /* no match, try as number */
471 unsigned num;
472 if (parse_uint(id, &num) != ERROR_OK)
473 return NULL;
474
475 for (target = all_targets; target; target = target->next) {
476 if (target->target_number == (int)num) {
477 LOG_WARNING("use '%s' as target identifier, not '%u'",
478 target_name(target), num);
479 return target;
480 }
481 }
482
483 return NULL;
484 }
485
486 /* returns a pointer to the n-th configured target */
487 static struct target *get_target_by_num(int num)
488 {
489 struct target *target = all_targets;
490
491 while (target) {
492 if (target->target_number == num)
493 return target;
494 target = target->next;
495 }
496
497 return NULL;
498 }
499
500 struct target *get_current_target(struct command_context *cmd_ctx)
501 {
502 struct target *target = get_target_by_num(cmd_ctx->current_target);
503
504 if (target == NULL) {
505 LOG_ERROR("BUG: current_target out of bounds");
506 exit(-1);
507 }
508
509 return target;
510 }
511
512 int target_poll(struct target *target)
513 {
514 int retval;
515
516 /* We can't poll until after examine */
517 if (!target_was_examined(target)) {
518 /* Fail silently lest we pollute the log */
519 return ERROR_FAIL;
520 }
521
522 retval = target->type->poll(target);
523 if (retval != ERROR_OK)
524 return retval;
525
526 if (target->halt_issued) {
527 if (target->state == TARGET_HALTED)
528 target->halt_issued = false;
529 else {
530 long long t = timeval_ms() - target->halt_issued_time;
531 if (t > DEFAULT_HALT_TIMEOUT) {
532 target->halt_issued = false;
533 LOG_INFO("Halt timed out, wake up GDB.");
534 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
535 }
536 }
537 }
538
539 return ERROR_OK;
540 }
541
542 int target_halt(struct target *target)
543 {
544 int retval;
545 /* We can't poll until after examine */
546 if (!target_was_examined(target)) {
547 LOG_ERROR("Target not examined yet");
548 return ERROR_FAIL;
549 }
550
551 retval = target->type->halt(target);
552 if (retval != ERROR_OK)
553 return retval;
554
555 target->halt_issued = true;
556 target->halt_issued_time = timeval_ms();
557
558 return ERROR_OK;
559 }
560
561 /**
562 * Make the target (re)start executing using its saved execution
563 * context (possibly with some modifications).
564 *
565 * @param target Which target should start executing.
566 * @param current True to use the target's saved program counter instead
567 * of the address parameter
568 * @param address Optionally used as the program counter.
569 * @param handle_breakpoints True iff breakpoints at the resumption PC
570 * should be skipped. (For example, maybe execution was stopped by
571 * such a breakpoint, in which case it would be counterprodutive to
572 * let it re-trigger.
573 * @param debug_execution False if all working areas allocated by OpenOCD
574 * should be released and/or restored to their original contents.
575 * (This would for example be true to run some downloaded "helper"
576 * algorithm code, which resides in one such working buffer and uses
577 * another for data storage.)
578 *
579 * @todo Resolve the ambiguity about what the "debug_execution" flag
580 * signifies. For example, Target implementations don't agree on how
581 * it relates to invalidation of the register cache, or to whether
582 * breakpoints and watchpoints should be enabled. (It would seem wrong
583 * to enable breakpoints when running downloaded "helper" algorithms
584 * (debug_execution true), since the breakpoints would be set to match
585 * target firmware being debugged, not the helper algorithm.... and
586 * enabling them could cause such helpers to malfunction (for example,
587 * by overwriting data with a breakpoint instruction. On the other
588 * hand the infrastructure for running such helpers might use this
589 * procedure but rely on hardware breakpoint to detect termination.)
590 */
591 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
592 {
593 int retval;
594
595 /* We can't poll until after examine */
596 if (!target_was_examined(target)) {
597 LOG_ERROR("Target not examined yet");
598 return ERROR_FAIL;
599 }
600
601 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
602
603 /* note that resume *must* be asynchronous. The CPU can halt before
604 * we poll. The CPU can even halt at the current PC as a result of
605 * a software breakpoint being inserted by (a bug?) the application.
606 */
607 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
608 if (retval != ERROR_OK)
609 return retval;
610
611 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
612
613 return retval;
614 }
615
616 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
617 {
618 char buf[100];
619 int retval;
620 Jim_Nvp *n;
621 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
622 if (n->name == NULL) {
623 LOG_ERROR("invalid reset mode");
624 return ERROR_FAIL;
625 }
626
627 struct target *target;
628 for (target = all_targets; target; target = target->next)
629 target_call_reset_callbacks(target, reset_mode);
630
631 /* disable polling during reset to make reset event scripts
632 * more predictable, i.e. dr/irscan & pathmove in events will
633 * not have JTAG operations injected into the middle of a sequence.
634 */
635 bool save_poll = jtag_poll_get_enabled();
636
637 jtag_poll_set_enabled(false);
638
639 sprintf(buf, "ocd_process_reset %s", n->name);
640 retval = Jim_Eval(cmd_ctx->interp, buf);
641
642 jtag_poll_set_enabled(save_poll);
643
644 if (retval != JIM_OK) {
645 Jim_MakeErrorMessage(cmd_ctx->interp);
646 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
647 return ERROR_FAIL;
648 }
649
650 /* We want any events to be processed before the prompt */
651 retval = target_call_timer_callbacks_now();
652
653 for (target = all_targets; target; target = target->next) {
654 target->type->check_reset(target);
655 target->running_alg = false;
656 }
657
658 return retval;
659 }
660
661 static int identity_virt2phys(struct target *target,
662 uint32_t virtual, uint32_t *physical)
663 {
664 *physical = virtual;
665 return ERROR_OK;
666 }
667
668 static int no_mmu(struct target *target, int *enabled)
669 {
670 *enabled = 0;
671 return ERROR_OK;
672 }
673
674 static int default_examine(struct target *target)
675 {
676 target_set_examined(target);
677 return ERROR_OK;
678 }
679
680 /* no check by default */
681 static int default_check_reset(struct target *target)
682 {
683 return ERROR_OK;
684 }
685
686 int target_examine_one(struct target *target)
687 {
688 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
689
690 int retval = target->type->examine(target);
691 if (retval != ERROR_OK)
692 return retval;
693
694 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
695
696 return ERROR_OK;
697 }
698
699 static int jtag_enable_callback(enum jtag_event event, void *priv)
700 {
701 struct target *target = priv;
702
703 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
704 return ERROR_OK;
705
706 jtag_unregister_event_callback(jtag_enable_callback, target);
707
708 return target_examine_one(target);
709 }
710
711 /* Targets that correctly implement init + examine, i.e.
712 * no communication with target during init:
713 *
714 * XScale
715 */
716 int target_examine(void)
717 {
718 int retval = ERROR_OK;
719 struct target *target;
720
721 for (target = all_targets; target; target = target->next) {
722 /* defer examination, but don't skip it */
723 if (!target->tap->enabled) {
724 jtag_register_event_callback(jtag_enable_callback,
725 target);
726 continue;
727 }
728
729 retval = target_examine_one(target);
730 if (retval != ERROR_OK)
731 return retval;
732 }
733 return retval;
734 }
735
736 const char *target_type_name(struct target *target)
737 {
738 return target->type->name;
739 }
740
741 static int target_soft_reset_halt(struct target *target)
742 {
743 if (!target_was_examined(target)) {
744 LOG_ERROR("Target not examined yet");
745 return ERROR_FAIL;
746 }
747 if (!target->type->soft_reset_halt) {
748 LOG_ERROR("Target %s does not support soft_reset_halt",
749 target_name(target));
750 return ERROR_FAIL;
751 }
752 return target->type->soft_reset_halt(target);
753 }
754
755 /**
756 * Downloads a target-specific native code algorithm to the target,
757 * and executes it. * Note that some targets may need to set up, enable,
758 * and tear down a breakpoint (hard or * soft) to detect algorithm
759 * termination, while others may support lower overhead schemes where
760 * soft breakpoints embedded in the algorithm automatically terminate the
761 * algorithm.
762 *
763 * @param target used to run the algorithm
764 * @param arch_info target-specific description of the algorithm.
765 */
766 int target_run_algorithm(struct target *target,
767 int num_mem_params, struct mem_param *mem_params,
768 int num_reg_params, struct reg_param *reg_param,
769 uint32_t entry_point, uint32_t exit_point,
770 int timeout_ms, void *arch_info)
771 {
772 int retval = ERROR_FAIL;
773
774 if (!target_was_examined(target)) {
775 LOG_ERROR("Target not examined yet");
776 goto done;
777 }
778 if (!target->type->run_algorithm) {
779 LOG_ERROR("Target type '%s' does not support %s",
780 target_type_name(target), __func__);
781 goto done;
782 }
783
784 target->running_alg = true;
785 retval = target->type->run_algorithm(target,
786 num_mem_params, mem_params,
787 num_reg_params, reg_param,
788 entry_point, exit_point, timeout_ms, arch_info);
789 target->running_alg = false;
790
791 done:
792 return retval;
793 }
794
795 /**
796 * Downloads a target-specific native code algorithm to the target,
797 * executes and leaves it running.
798 *
799 * @param target used to run the algorithm
800 * @param arch_info target-specific description of the algorithm.
801 */
802 int target_start_algorithm(struct target *target,
803 int num_mem_params, struct mem_param *mem_params,
804 int num_reg_params, struct reg_param *reg_params,
805 uint32_t entry_point, uint32_t exit_point,
806 void *arch_info)
807 {
808 int retval = ERROR_FAIL;
809
810 if (!target_was_examined(target)) {
811 LOG_ERROR("Target not examined yet");
812 goto done;
813 }
814 if (!target->type->start_algorithm) {
815 LOG_ERROR("Target type '%s' does not support %s",
816 target_type_name(target), __func__);
817 goto done;
818 }
819 if (target->running_alg) {
820 LOG_ERROR("Target is already running an algorithm");
821 goto done;
822 }
823
824 target->running_alg = true;
825 retval = target->type->start_algorithm(target,
826 num_mem_params, mem_params,
827 num_reg_params, reg_params,
828 entry_point, exit_point, arch_info);
829
830 done:
831 return retval;
832 }
833
834 /**
835 * Waits for an algorithm started with target_start_algorithm() to complete.
836 *
837 * @param target used to run the algorithm
838 * @param arch_info target-specific description of the algorithm.
839 */
840 int target_wait_algorithm(struct target *target,
841 int num_mem_params, struct mem_param *mem_params,
842 int num_reg_params, struct reg_param *reg_params,
843 uint32_t exit_point, int timeout_ms,
844 void *arch_info)
845 {
846 int retval = ERROR_FAIL;
847
848 if (!target->type->wait_algorithm) {
849 LOG_ERROR("Target type '%s' does not support %s",
850 target_type_name(target), __func__);
851 goto done;
852 }
853 if (!target->running_alg) {
854 LOG_ERROR("Target is not running an algorithm");
855 goto done;
856 }
857
858 retval = target->type->wait_algorithm(target,
859 num_mem_params, mem_params,
860 num_reg_params, reg_params,
861 exit_point, timeout_ms, arch_info);
862 if (retval != ERROR_TARGET_TIMEOUT)
863 target->running_alg = false;
864
865 done:
866 return retval;
867 }
868
869 /**
870 * Executes a target-specific native code algorithm in the target.
871 * It differs from target_run_algorithm in that the algorithm is asynchronous.
872 * Because of this it requires an compliant algorithm:
873 * see contrib/loaders/flash/stm32f1x.S for example.
874 *
875 * @param target used to run the algorithm
876 */
877
878 int target_run_flash_async_algorithm(struct target *target,
879 const uint8_t *buffer, uint32_t count, int block_size,
880 int num_mem_params, struct mem_param *mem_params,
881 int num_reg_params, struct reg_param *reg_params,
882 uint32_t buffer_start, uint32_t buffer_size,
883 uint32_t entry_point, uint32_t exit_point, void *arch_info)
884 {
885 int retval;
886 int timeout = 0;
887
888 const uint8_t *buffer_orig = buffer;
889
890 /* Set up working area. First word is write pointer, second word is read pointer,
891 * rest is fifo data area. */
892 uint32_t wp_addr = buffer_start;
893 uint32_t rp_addr = buffer_start + 4;
894 uint32_t fifo_start_addr = buffer_start + 8;
895 uint32_t fifo_end_addr = buffer_start + buffer_size;
896
897 uint32_t wp = fifo_start_addr;
898 uint32_t rp = fifo_start_addr;
899
900 /* validate block_size is 2^n */
901 assert(!block_size || !(block_size & (block_size - 1)));
902
903 retval = target_write_u32(target, wp_addr, wp);
904 if (retval != ERROR_OK)
905 return retval;
906 retval = target_write_u32(target, rp_addr, rp);
907 if (retval != ERROR_OK)
908 return retval;
909
910 /* Start up algorithm on target and let it idle while writing the first chunk */
911 retval = target_start_algorithm(target, num_mem_params, mem_params,
912 num_reg_params, reg_params,
913 entry_point,
914 exit_point,
915 arch_info);
916
917 if (retval != ERROR_OK) {
918 LOG_ERROR("error starting target flash write algorithm");
919 return retval;
920 }
921
922 while (count > 0) {
923
924 retval = target_read_u32(target, rp_addr, &rp);
925 if (retval != ERROR_OK) {
926 LOG_ERROR("failed to get read pointer");
927 break;
928 }
929
930 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
931 (size_t) (buffer - buffer_orig), count, wp, rp);
932
933 if (rp == 0) {
934 LOG_ERROR("flash write algorithm aborted by target");
935 retval = ERROR_FLASH_OPERATION_FAILED;
936 break;
937 }
938
939 if ((rp & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
940 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
941 break;
942 }
943
944 /* Count the number of bytes available in the fifo without
945 * crossing the wrap around. Make sure to not fill it completely,
946 * because that would make wp == rp and that's the empty condition. */
947 uint32_t thisrun_bytes;
948 if (rp > wp)
949 thisrun_bytes = rp - wp - block_size;
950 else if (rp > fifo_start_addr)
951 thisrun_bytes = fifo_end_addr - wp;
952 else
953 thisrun_bytes = fifo_end_addr - wp - block_size;
954
955 if (thisrun_bytes == 0) {
956 /* Throttle polling a bit if transfer is (much) faster than flash
957 * programming. The exact delay shouldn't matter as long as it's
958 * less than buffer size / flash speed. This is very unlikely to
959 * run when using high latency connections such as USB. */
960 alive_sleep(10);
961
962 /* to stop an infinite loop on some targets check and increment a timeout
963 * this issue was observed on a stellaris using the new ICDI interface */
964 if (timeout++ >= 500) {
965 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
966 return ERROR_FLASH_OPERATION_FAILED;
967 }
968 continue;
969 }
970
971 /* reset our timeout */
972 timeout = 0;
973
974 /* Limit to the amount of data we actually want to write */
975 if (thisrun_bytes > count * block_size)
976 thisrun_bytes = count * block_size;
977
978 /* Write data to fifo */
979 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
980 if (retval != ERROR_OK)
981 break;
982
983 /* Update counters and wrap write pointer */
984 buffer += thisrun_bytes;
985 count -= thisrun_bytes / block_size;
986 wp += thisrun_bytes;
987 if (wp >= fifo_end_addr)
988 wp = fifo_start_addr;
989
990 /* Store updated write pointer to target */
991 retval = target_write_u32(target, wp_addr, wp);
992 if (retval != ERROR_OK)
993 break;
994 }
995
996 if (retval != ERROR_OK) {
997 /* abort flash write algorithm on target */
998 target_write_u32(target, wp_addr, 0);
999 }
1000
1001 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1002 num_reg_params, reg_params,
1003 exit_point,
1004 10000,
1005 arch_info);
1006
1007 if (retval2 != ERROR_OK) {
1008 LOG_ERROR("error waiting for target flash write algorithm");
1009 retval = retval2;
1010 }
1011
1012 return retval;
1013 }
1014
1015 int target_read_memory(struct target *target,
1016 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1017 {
1018 if (!target_was_examined(target)) {
1019 LOG_ERROR("Target not examined yet");
1020 return ERROR_FAIL;
1021 }
1022 return target->type->read_memory(target, address, size, count, buffer);
1023 }
1024
1025 int target_read_phys_memory(struct target *target,
1026 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1027 {
1028 if (!target_was_examined(target)) {
1029 LOG_ERROR("Target not examined yet");
1030 return ERROR_FAIL;
1031 }
1032 return target->type->read_phys_memory(target, address, size, count, buffer);
1033 }
1034
1035 int target_write_memory(struct target *target,
1036 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1037 {
1038 if (!target_was_examined(target)) {
1039 LOG_ERROR("Target not examined yet");
1040 return ERROR_FAIL;
1041 }
1042 return target->type->write_memory(target, address, size, count, buffer);
1043 }
1044
1045 int target_write_phys_memory(struct target *target,
1046 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1047 {
1048 if (!target_was_examined(target)) {
1049 LOG_ERROR("Target not examined yet");
1050 return ERROR_FAIL;
1051 }
1052 return target->type->write_phys_memory(target, address, size, count, buffer);
1053 }
1054
1055 int target_add_breakpoint(struct target *target,
1056 struct breakpoint *breakpoint)
1057 {
1058 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1059 LOG_WARNING("target %s is not halted", target_name(target));
1060 return ERROR_TARGET_NOT_HALTED;
1061 }
1062 return target->type->add_breakpoint(target, breakpoint);
1063 }
1064
1065 int target_add_context_breakpoint(struct target *target,
1066 struct breakpoint *breakpoint)
1067 {
1068 if (target->state != TARGET_HALTED) {
1069 LOG_WARNING("target %s is not halted", target_name(target));
1070 return ERROR_TARGET_NOT_HALTED;
1071 }
1072 return target->type->add_context_breakpoint(target, breakpoint);
1073 }
1074
1075 int target_add_hybrid_breakpoint(struct target *target,
1076 struct breakpoint *breakpoint)
1077 {
1078 if (target->state != TARGET_HALTED) {
1079 LOG_WARNING("target %s is not halted", target_name(target));
1080 return ERROR_TARGET_NOT_HALTED;
1081 }
1082 return target->type->add_hybrid_breakpoint(target, breakpoint);
1083 }
1084
1085 int target_remove_breakpoint(struct target *target,
1086 struct breakpoint *breakpoint)
1087 {
1088 return target->type->remove_breakpoint(target, breakpoint);
1089 }
1090
1091 int target_add_watchpoint(struct target *target,
1092 struct watchpoint *watchpoint)
1093 {
1094 if (target->state != TARGET_HALTED) {
1095 LOG_WARNING("target %s is not halted", target_name(target));
1096 return ERROR_TARGET_NOT_HALTED;
1097 }
1098 return target->type->add_watchpoint(target, watchpoint);
1099 }
1100 int target_remove_watchpoint(struct target *target,
1101 struct watchpoint *watchpoint)
1102 {
1103 return target->type->remove_watchpoint(target, watchpoint);
1104 }
1105 int target_hit_watchpoint(struct target *target,
1106 struct watchpoint **hit_watchpoint)
1107 {
1108 if (target->state != TARGET_HALTED) {
1109 LOG_WARNING("target %s is not halted", target->cmd_name);
1110 return ERROR_TARGET_NOT_HALTED;
1111 }
1112
1113 if (target->type->hit_watchpoint == NULL) {
1114 /* For backward compatible, if hit_watchpoint is not implemented,
1115 * return ERROR_FAIL such that gdb_server will not take the nonsense
1116 * information. */
1117 return ERROR_FAIL;
1118 }
1119
1120 return target->type->hit_watchpoint(target, hit_watchpoint);
1121 }
1122
1123 int target_get_gdb_reg_list(struct target *target,
1124 struct reg **reg_list[], int *reg_list_size,
1125 enum target_register_class reg_class)
1126 {
1127 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1128 }
1129 int target_step(struct target *target,
1130 int current, uint32_t address, int handle_breakpoints)
1131 {
1132 return target->type->step(target, current, address, handle_breakpoints);
1133 }
1134
1135 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1136 {
1137 if (target->state != TARGET_HALTED) {
1138 LOG_WARNING("target %s is not halted", target->cmd_name);
1139 return ERROR_TARGET_NOT_HALTED;
1140 }
1141 return target->type->get_gdb_fileio_info(target, fileio_info);
1142 }
1143
1144 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1145 {
1146 if (target->state != TARGET_HALTED) {
1147 LOG_WARNING("target %s is not halted", target->cmd_name);
1148 return ERROR_TARGET_NOT_HALTED;
1149 }
1150 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1151 }
1152
1153 int target_profiling(struct target *target, uint32_t *samples,
1154 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1155 {
1156 if (target->state != TARGET_HALTED) {
1157 LOG_WARNING("target %s is not halted", target->cmd_name);
1158 return ERROR_TARGET_NOT_HALTED;
1159 }
1160 return target->type->profiling(target, samples, max_num_samples,
1161 num_samples, seconds);
1162 }
1163
1164 /**
1165 * Reset the @c examined flag for the given target.
1166 * Pure paranoia -- targets are zeroed on allocation.
1167 */
1168 static void target_reset_examined(struct target *target)
1169 {
1170 target->examined = false;
1171 }
1172
1173 static int err_read_phys_memory(struct target *target, uint32_t address,
1174 uint32_t size, uint32_t count, uint8_t *buffer)
1175 {
1176 LOG_ERROR("Not implemented: %s", __func__);
1177 return ERROR_FAIL;
1178 }
1179
1180 static int err_write_phys_memory(struct target *target, uint32_t address,
1181 uint32_t size, uint32_t count, const uint8_t *buffer)
1182 {
1183 LOG_ERROR("Not implemented: %s", __func__);
1184 return ERROR_FAIL;
1185 }
1186
1187 static int handle_target(void *priv);
1188
1189 static int target_init_one(struct command_context *cmd_ctx,
1190 struct target *target)
1191 {
1192 target_reset_examined(target);
1193
1194 struct target_type *type = target->type;
1195 if (type->examine == NULL)
1196 type->examine = default_examine;
1197
1198 if (type->check_reset == NULL)
1199 type->check_reset = default_check_reset;
1200
1201 assert(type->init_target != NULL);
1202
1203 int retval = type->init_target(cmd_ctx, target);
1204 if (ERROR_OK != retval) {
1205 LOG_ERROR("target '%s' init failed", target_name(target));
1206 return retval;
1207 }
1208
1209 /* Sanity-check MMU support ... stub in what we must, to help
1210 * implement it in stages, but warn if we need to do so.
1211 */
1212 if (type->mmu) {
1213 if (type->write_phys_memory == NULL) {
1214 LOG_ERROR("type '%s' is missing write_phys_memory",
1215 type->name);
1216 type->write_phys_memory = err_write_phys_memory;
1217 }
1218 if (type->read_phys_memory == NULL) {
1219 LOG_ERROR("type '%s' is missing read_phys_memory",
1220 type->name);
1221 type->read_phys_memory = err_read_phys_memory;
1222 }
1223 if (type->virt2phys == NULL) {
1224 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1225 type->virt2phys = identity_virt2phys;
1226 }
1227 } else {
1228 /* Make sure no-MMU targets all behave the same: make no
1229 * distinction between physical and virtual addresses, and
1230 * ensure that virt2phys() is always an identity mapping.
1231 */
1232 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1233 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1234
1235 type->mmu = no_mmu;
1236 type->write_phys_memory = type->write_memory;
1237 type->read_phys_memory = type->read_memory;
1238 type->virt2phys = identity_virt2phys;
1239 }
1240
1241 if (target->type->read_buffer == NULL)
1242 target->type->read_buffer = target_read_buffer_default;
1243
1244 if (target->type->write_buffer == NULL)
1245 target->type->write_buffer = target_write_buffer_default;
1246
1247 if (target->type->get_gdb_fileio_info == NULL)
1248 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1249
1250 if (target->type->gdb_fileio_end == NULL)
1251 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1252
1253 if (target->type->profiling == NULL)
1254 target->type->profiling = target_profiling_default;
1255
1256 return ERROR_OK;
1257 }
1258
1259 static int target_init(struct command_context *cmd_ctx)
1260 {
1261 struct target *target;
1262 int retval;
1263
1264 for (target = all_targets; target; target = target->next) {
1265 retval = target_init_one(cmd_ctx, target);
1266 if (ERROR_OK != retval)
1267 return retval;
1268 }
1269
1270 if (!all_targets)
1271 return ERROR_OK;
1272
1273 retval = target_register_user_commands(cmd_ctx);
1274 if (ERROR_OK != retval)
1275 return retval;
1276
1277 retval = target_register_timer_callback(&handle_target,
1278 polling_interval, 1, cmd_ctx->interp);
1279 if (ERROR_OK != retval)
1280 return retval;
1281
1282 return ERROR_OK;
1283 }
1284
1285 COMMAND_HANDLER(handle_target_init_command)
1286 {
1287 int retval;
1288
1289 if (CMD_ARGC != 0)
1290 return ERROR_COMMAND_SYNTAX_ERROR;
1291
1292 static bool target_initialized;
1293 if (target_initialized) {
1294 LOG_INFO("'target init' has already been called");
1295 return ERROR_OK;
1296 }
1297 target_initialized = true;
1298
1299 retval = command_run_line(CMD_CTX, "init_targets");
1300 if (ERROR_OK != retval)
1301 return retval;
1302
1303 retval = command_run_line(CMD_CTX, "init_target_events");
1304 if (ERROR_OK != retval)
1305 return retval;
1306
1307 retval = command_run_line(CMD_CTX, "init_board");
1308 if (ERROR_OK != retval)
1309 return retval;
1310
1311 LOG_DEBUG("Initializing targets...");
1312 return target_init(CMD_CTX);
1313 }
1314
1315 int target_register_event_callback(int (*callback)(struct target *target,
1316 enum target_event event, void *priv), void *priv)
1317 {
1318 struct target_event_callback **callbacks_p = &target_event_callbacks;
1319
1320 if (callback == NULL)
1321 return ERROR_COMMAND_SYNTAX_ERROR;
1322
1323 if (*callbacks_p) {
1324 while ((*callbacks_p)->next)
1325 callbacks_p = &((*callbacks_p)->next);
1326 callbacks_p = &((*callbacks_p)->next);
1327 }
1328
1329 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1330 (*callbacks_p)->callback = callback;
1331 (*callbacks_p)->priv = priv;
1332 (*callbacks_p)->next = NULL;
1333
1334 return ERROR_OK;
1335 }
1336
1337 int target_register_reset_callback(int (*callback)(struct target *target,
1338 enum target_reset_mode reset_mode, void *priv), void *priv)
1339 {
1340 struct target_reset_callback *entry;
1341
1342 if (callback == NULL)
1343 return ERROR_COMMAND_SYNTAX_ERROR;
1344
1345 entry = malloc(sizeof(struct target_reset_callback));
1346 if (entry == NULL) {
1347 LOG_ERROR("error allocating buffer for reset callback entry");
1348 return ERROR_COMMAND_SYNTAX_ERROR;
1349 }
1350
1351 entry->callback = callback;
1352 entry->priv = priv;
1353 list_add(&entry->list, &target_reset_callback_list);
1354
1355
1356 return ERROR_OK;
1357 }
1358
1359 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1360 {
1361 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1362 struct timeval now;
1363
1364 if (callback == NULL)
1365 return ERROR_COMMAND_SYNTAX_ERROR;
1366
1367 if (*callbacks_p) {
1368 while ((*callbacks_p)->next)
1369 callbacks_p = &((*callbacks_p)->next);
1370 callbacks_p = &((*callbacks_p)->next);
1371 }
1372
1373 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1374 (*callbacks_p)->callback = callback;
1375 (*callbacks_p)->periodic = periodic;
1376 (*callbacks_p)->time_ms = time_ms;
1377 (*callbacks_p)->removed = false;
1378
1379 gettimeofday(&now, NULL);
1380 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
1381 time_ms -= (time_ms % 1000);
1382 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
1383 if ((*callbacks_p)->when.tv_usec > 1000000) {
1384 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
1385 (*callbacks_p)->when.tv_sec += 1;
1386 }
1387
1388 (*callbacks_p)->priv = priv;
1389 (*callbacks_p)->next = NULL;
1390
1391 return ERROR_OK;
1392 }
1393
1394 int target_unregister_event_callback(int (*callback)(struct target *target,
1395 enum target_event event, void *priv), void *priv)
1396 {
1397 struct target_event_callback **p = &target_event_callbacks;
1398 struct target_event_callback *c = target_event_callbacks;
1399
1400 if (callback == NULL)
1401 return ERROR_COMMAND_SYNTAX_ERROR;
1402
1403 while (c) {
1404 struct target_event_callback *next = c->next;
1405 if ((c->callback == callback) && (c->priv == priv)) {
1406 *p = next;
1407 free(c);
1408 return ERROR_OK;
1409 } else
1410 p = &(c->next);
1411 c = next;
1412 }
1413
1414 return ERROR_OK;
1415 }
1416
1417 int target_unregister_reset_callback(int (*callback)(struct target *target,
1418 enum target_reset_mode reset_mode, void *priv), void *priv)
1419 {
1420 struct target_reset_callback *entry;
1421
1422 if (callback == NULL)
1423 return ERROR_COMMAND_SYNTAX_ERROR;
1424
1425 list_for_each_entry(entry, &target_reset_callback_list, list) {
1426 if (entry->callback == callback && entry->priv == priv) {
1427 list_del(&entry->list);
1428 free(entry);
1429 break;
1430 }
1431 }
1432
1433 return ERROR_OK;
1434 }
1435
1436 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1437 {
1438 if (callback == NULL)
1439 return ERROR_COMMAND_SYNTAX_ERROR;
1440
1441 for (struct target_timer_callback *c = target_timer_callbacks;
1442 c; c = c->next) {
1443 if ((c->callback == callback) && (c->priv == priv)) {
1444 c->removed = true;
1445 return ERROR_OK;
1446 }
1447 }
1448
1449 return ERROR_FAIL;
1450 }
1451
1452 int target_call_event_callbacks(struct target *target, enum target_event event)
1453 {
1454 struct target_event_callback *callback = target_event_callbacks;
1455 struct target_event_callback *next_callback;
1456
1457 if (event == TARGET_EVENT_HALTED) {
1458 /* execute early halted first */
1459 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1460 }
1461
1462 LOG_DEBUG("target event %i (%s)", event,
1463 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1464
1465 target_handle_event(target, event);
1466
1467 while (callback) {
1468 next_callback = callback->next;
1469 callback->callback(target, event, callback->priv);
1470 callback = next_callback;
1471 }
1472
1473 return ERROR_OK;
1474 }
1475
1476 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1477 {
1478 struct target_reset_callback *callback;
1479
1480 LOG_DEBUG("target reset %i (%s)", reset_mode,
1481 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1482
1483 list_for_each_entry(callback, &target_reset_callback_list, list)
1484 callback->callback(target, reset_mode, callback->priv);
1485
1486 return ERROR_OK;
1487 }
1488
1489 static int target_timer_callback_periodic_restart(
1490 struct target_timer_callback *cb, struct timeval *now)
1491 {
1492 int time_ms = cb->time_ms;
1493 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1494 time_ms -= (time_ms % 1000);
1495 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1496 if (cb->when.tv_usec > 1000000) {
1497 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1498 cb->when.tv_sec += 1;
1499 }
1500 return ERROR_OK;
1501 }
1502
1503 static int target_call_timer_callback(struct target_timer_callback *cb,
1504 struct timeval *now)
1505 {
1506 cb->callback(cb->priv);
1507
1508 if (cb->periodic)
1509 return target_timer_callback_periodic_restart(cb, now);
1510
1511 return target_unregister_timer_callback(cb->callback, cb->priv);
1512 }
1513
1514 static int target_call_timer_callbacks_check_time(int checktime)
1515 {
1516 static bool callback_processing;
1517
1518 /* Do not allow nesting */
1519 if (callback_processing)
1520 return ERROR_OK;
1521
1522 callback_processing = true;
1523
1524 keep_alive();
1525
1526 struct timeval now;
1527 gettimeofday(&now, NULL);
1528
1529 /* Store an address of the place containing a pointer to the
1530 * next item; initially, that's a standalone "root of the
1531 * list" variable. */
1532 struct target_timer_callback **callback = &target_timer_callbacks;
1533 while (*callback) {
1534 if ((*callback)->removed) {
1535 struct target_timer_callback *p = *callback;
1536 *callback = (*callback)->next;
1537 free(p);
1538 continue;
1539 }
1540
1541 bool call_it = (*callback)->callback &&
1542 ((!checktime && (*callback)->periodic) ||
1543 now.tv_sec > (*callback)->when.tv_sec ||
1544 (now.tv_sec == (*callback)->when.tv_sec &&
1545 now.tv_usec >= (*callback)->when.tv_usec));
1546
1547 if (call_it)
1548 target_call_timer_callback(*callback, &now);
1549
1550 callback = &(*callback)->next;
1551 }
1552
1553 callback_processing = false;
1554 return ERROR_OK;
1555 }
1556
1557 int target_call_timer_callbacks(void)
1558 {
1559 return target_call_timer_callbacks_check_time(1);
1560 }
1561
1562 /* invoke periodic callbacks immediately */
1563 int target_call_timer_callbacks_now(void)
1564 {
1565 return target_call_timer_callbacks_check_time(0);
1566 }
1567
1568 /* Prints the working area layout for debug purposes */
1569 static void print_wa_layout(struct target *target)
1570 {
1571 struct working_area *c = target->working_areas;
1572
1573 while (c) {
1574 LOG_DEBUG("%c%c 0x%08"PRIx32"-0x%08"PRIx32" (%"PRIu32" bytes)",
1575 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1576 c->address, c->address + c->size - 1, c->size);
1577 c = c->next;
1578 }
1579 }
1580
1581 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1582 static void target_split_working_area(struct working_area *area, uint32_t size)
1583 {
1584 assert(area->free); /* Shouldn't split an allocated area */
1585 assert(size <= area->size); /* Caller should guarantee this */
1586
1587 /* Split only if not already the right size */
1588 if (size < area->size) {
1589 struct working_area *new_wa = malloc(sizeof(*new_wa));
1590
1591 if (new_wa == NULL)
1592 return;
1593
1594 new_wa->next = area->next;
1595 new_wa->size = area->size - size;
1596 new_wa->address = area->address + size;
1597 new_wa->backup = NULL;
1598 new_wa->user = NULL;
1599 new_wa->free = true;
1600
1601 area->next = new_wa;
1602 area->size = size;
1603
1604 /* If backup memory was allocated to this area, it has the wrong size
1605 * now so free it and it will be reallocated if/when needed */
1606 if (area->backup) {
1607 free(area->backup);
1608 area->backup = NULL;
1609 }
1610 }
1611 }
1612
1613 /* Merge all adjacent free areas into one */
1614 static void target_merge_working_areas(struct target *target)
1615 {
1616 struct working_area *c = target->working_areas;
1617
1618 while (c && c->next) {
1619 assert(c->next->address == c->address + c->size); /* This is an invariant */
1620
1621 /* Find two adjacent free areas */
1622 if (c->free && c->next->free) {
1623 /* Merge the last into the first */
1624 c->size += c->next->size;
1625
1626 /* Remove the last */
1627 struct working_area *to_be_freed = c->next;
1628 c->next = c->next->next;
1629 if (to_be_freed->backup)
1630 free(to_be_freed->backup);
1631 free(to_be_freed);
1632
1633 /* If backup memory was allocated to the remaining area, it's has
1634 * the wrong size now */
1635 if (c->backup) {
1636 free(c->backup);
1637 c->backup = NULL;
1638 }
1639 } else {
1640 c = c->next;
1641 }
1642 }
1643 }
1644
1645 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1646 {
1647 /* Reevaluate working area address based on MMU state*/
1648 if (target->working_areas == NULL) {
1649 int retval;
1650 int enabled;
1651
1652 retval = target->type->mmu(target, &enabled);
1653 if (retval != ERROR_OK)
1654 return retval;
1655
1656 if (!enabled) {
1657 if (target->working_area_phys_spec) {
1658 LOG_DEBUG("MMU disabled, using physical "
1659 "address for working memory 0x%08"PRIx32,
1660 target->working_area_phys);
1661 target->working_area = target->working_area_phys;
1662 } else {
1663 LOG_ERROR("No working memory available. "
1664 "Specify -work-area-phys to target.");
1665 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1666 }
1667 } else {
1668 if (target->working_area_virt_spec) {
1669 LOG_DEBUG("MMU enabled, using virtual "
1670 "address for working memory 0x%08"PRIx32,
1671 target->working_area_virt);
1672 target->working_area = target->working_area_virt;
1673 } else {
1674 LOG_ERROR("No working memory available. "
1675 "Specify -work-area-virt to target.");
1676 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1677 }
1678 }
1679
1680 /* Set up initial working area on first call */
1681 struct working_area *new_wa = malloc(sizeof(*new_wa));
1682 if (new_wa) {
1683 new_wa->next = NULL;
1684 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1685 new_wa->address = target->working_area;
1686 new_wa->backup = NULL;
1687 new_wa->user = NULL;
1688 new_wa->free = true;
1689 }
1690
1691 target->working_areas = new_wa;
1692 }
1693
1694 /* only allocate multiples of 4 byte */
1695 if (size % 4)
1696 size = (size + 3) & (~3UL);
1697
1698 struct working_area *c = target->working_areas;
1699
1700 /* Find the first large enough working area */
1701 while (c) {
1702 if (c->free && c->size >= size)
1703 break;
1704 c = c->next;
1705 }
1706
1707 if (c == NULL)
1708 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1709
1710 /* Split the working area into the requested size */
1711 target_split_working_area(c, size);
1712
1713 LOG_DEBUG("allocated new working area of %"PRIu32" bytes at address 0x%08"PRIx32, size, c->address);
1714
1715 if (target->backup_working_area) {
1716 if (c->backup == NULL) {
1717 c->backup = malloc(c->size);
1718 if (c->backup == NULL)
1719 return ERROR_FAIL;
1720 }
1721
1722 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1723 if (retval != ERROR_OK)
1724 return retval;
1725 }
1726
1727 /* mark as used, and return the new (reused) area */
1728 c->free = false;
1729 *area = c;
1730
1731 /* user pointer */
1732 c->user = area;
1733
1734 print_wa_layout(target);
1735
1736 return ERROR_OK;
1737 }
1738
1739 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1740 {
1741 int retval;
1742
1743 retval = target_alloc_working_area_try(target, size, area);
1744 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1745 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1746 return retval;
1747
1748 }
1749
1750 static int target_restore_working_area(struct target *target, struct working_area *area)
1751 {
1752 int retval = ERROR_OK;
1753
1754 if (target->backup_working_area && area->backup != NULL) {
1755 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1756 if (retval != ERROR_OK)
1757 LOG_ERROR("failed to restore %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1758 area->size, area->address);
1759 }
1760
1761 return retval;
1762 }
1763
1764 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1765 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1766 {
1767 int retval = ERROR_OK;
1768
1769 if (area->free)
1770 return retval;
1771
1772 if (restore) {
1773 retval = target_restore_working_area(target, area);
1774 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1775 if (retval != ERROR_OK)
1776 return retval;
1777 }
1778
1779 area->free = true;
1780
1781 LOG_DEBUG("freed %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1782 area->size, area->address);
1783
1784 /* mark user pointer invalid */
1785 /* TODO: Is this really safe? It points to some previous caller's memory.
1786 * How could we know that the area pointer is still in that place and not
1787 * some other vital data? What's the purpose of this, anyway? */
1788 *area->user = NULL;
1789 area->user = NULL;
1790
1791 target_merge_working_areas(target);
1792
1793 print_wa_layout(target);
1794
1795 return retval;
1796 }
1797
1798 int target_free_working_area(struct target *target, struct working_area *area)
1799 {
1800 return target_free_working_area_restore(target, area, 1);
1801 }
1802
1803 /* free resources and restore memory, if restoring memory fails,
1804 * free up resources anyway
1805 */
1806 static void target_free_all_working_areas_restore(struct target *target, int restore)
1807 {
1808 struct working_area *c = target->working_areas;
1809
1810 LOG_DEBUG("freeing all working areas");
1811
1812 /* Loop through all areas, restoring the allocated ones and marking them as free */
1813 while (c) {
1814 if (!c->free) {
1815 if (restore)
1816 target_restore_working_area(target, c);
1817 c->free = true;
1818 *c->user = NULL; /* Same as above */
1819 c->user = NULL;
1820 }
1821 c = c->next;
1822 }
1823
1824 /* Run a merge pass to combine all areas into one */
1825 target_merge_working_areas(target);
1826
1827 print_wa_layout(target);
1828 }
1829
1830 void target_free_all_working_areas(struct target *target)
1831 {
1832 target_free_all_working_areas_restore(target, 1);
1833 }
1834
1835 /* Find the largest number of bytes that can be allocated */
1836 uint32_t target_get_working_area_avail(struct target *target)
1837 {
1838 struct working_area *c = target->working_areas;
1839 uint32_t max_size = 0;
1840
1841 if (c == NULL)
1842 return target->working_area_size;
1843
1844 while (c) {
1845 if (c->free && max_size < c->size)
1846 max_size = c->size;
1847
1848 c = c->next;
1849 }
1850
1851 return max_size;
1852 }
1853
1854 int target_arch_state(struct target *target)
1855 {
1856 int retval;
1857 if (target == NULL) {
1858 LOG_USER("No target has been configured");
1859 return ERROR_OK;
1860 }
1861
1862 LOG_USER("target state: %s", target_state_name(target));
1863
1864 if (target->state != TARGET_HALTED)
1865 return ERROR_OK;
1866
1867 retval = target->type->arch_state(target);
1868 return retval;
1869 }
1870
1871 static int target_get_gdb_fileio_info_default(struct target *target,
1872 struct gdb_fileio_info *fileio_info)
1873 {
1874 /* If target does not support semi-hosting function, target
1875 has no need to provide .get_gdb_fileio_info callback.
1876 It just return ERROR_FAIL and gdb_server will return "Txx"
1877 as target halted every time. */
1878 return ERROR_FAIL;
1879 }
1880
1881 static int target_gdb_fileio_end_default(struct target *target,
1882 int retcode, int fileio_errno, bool ctrl_c)
1883 {
1884 return ERROR_OK;
1885 }
1886
1887 static int target_profiling_default(struct target *target, uint32_t *samples,
1888 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1889 {
1890 struct timeval timeout, now;
1891
1892 gettimeofday(&timeout, NULL);
1893 timeval_add_time(&timeout, seconds, 0);
1894
1895 LOG_INFO("Starting profiling. Halting and resuming the"
1896 " target as often as we can...");
1897
1898 uint32_t sample_count = 0;
1899 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
1900 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
1901
1902 int retval = ERROR_OK;
1903 for (;;) {
1904 target_poll(target);
1905 if (target->state == TARGET_HALTED) {
1906 uint32_t t = buf_get_u32(reg->value, 0, 32);
1907 samples[sample_count++] = t;
1908 /* current pc, addr = 0, do not handle breakpoints, not debugging */
1909 retval = target_resume(target, 1, 0, 0, 0);
1910 target_poll(target);
1911 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
1912 } else if (target->state == TARGET_RUNNING) {
1913 /* We want to quickly sample the PC. */
1914 retval = target_halt(target);
1915 } else {
1916 LOG_INFO("Target not halted or running");
1917 retval = ERROR_OK;
1918 break;
1919 }
1920
1921 if (retval != ERROR_OK)
1922 break;
1923
1924 gettimeofday(&now, NULL);
1925 if ((sample_count >= max_num_samples) ||
1926 ((now.tv_sec >= timeout.tv_sec) && (now.tv_usec >= timeout.tv_usec))) {
1927 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
1928 break;
1929 }
1930 }
1931
1932 *num_samples = sample_count;
1933 return retval;
1934 }
1935
1936 /* Single aligned words are guaranteed to use 16 or 32 bit access
1937 * mode respectively, otherwise data is handled as quickly as
1938 * possible
1939 */
1940 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1941 {
1942 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1943 (int)size, (unsigned)address);
1944
1945 if (!target_was_examined(target)) {
1946 LOG_ERROR("Target not examined yet");
1947 return ERROR_FAIL;
1948 }
1949
1950 if (size == 0)
1951 return ERROR_OK;
1952
1953 if ((address + size - 1) < address) {
1954 /* GDB can request this when e.g. PC is 0xfffffffc*/
1955 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1956 (unsigned)address,
1957 (unsigned)size);
1958 return ERROR_FAIL;
1959 }
1960
1961 return target->type->write_buffer(target, address, size, buffer);
1962 }
1963
1964 static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t count, const uint8_t *buffer)
1965 {
1966 uint32_t size;
1967
1968 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
1969 * will have something to do with the size we leave to it. */
1970 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
1971 if (address & size) {
1972 int retval = target_write_memory(target, address, size, 1, buffer);
1973 if (retval != ERROR_OK)
1974 return retval;
1975 address += size;
1976 count -= size;
1977 buffer += size;
1978 }
1979 }
1980
1981 /* Write the data with as large access size as possible. */
1982 for (; size > 0; size /= 2) {
1983 uint32_t aligned = count - count % size;
1984 if (aligned > 0) {
1985 int retval = target_write_memory(target, address, size, aligned / size, buffer);
1986 if (retval != ERROR_OK)
1987 return retval;
1988 address += aligned;
1989 count -= aligned;
1990 buffer += aligned;
1991 }
1992 }
1993
1994 return ERROR_OK;
1995 }
1996
1997 /* Single aligned words are guaranteed to use 16 or 32 bit access
1998 * mode respectively, otherwise data is handled as quickly as
1999 * possible
2000 */
2001 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
2002 {
2003 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
2004 (int)size, (unsigned)address);
2005
2006 if (!target_was_examined(target)) {
2007 LOG_ERROR("Target not examined yet");
2008 return ERROR_FAIL;
2009 }
2010
2011 if (size == 0)
2012 return ERROR_OK;
2013
2014 if ((address + size - 1) < address) {
2015 /* GDB can request this when e.g. PC is 0xfffffffc*/
2016 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
2017 address,
2018 size);
2019 return ERROR_FAIL;
2020 }
2021
2022 return target->type->read_buffer(target, address, size, buffer);
2023 }
2024
2025 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t count, uint8_t *buffer)
2026 {
2027 uint32_t size;
2028
2029 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2030 * will have something to do with the size we leave to it. */
2031 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2032 if (address & size) {
2033 int retval = target_read_memory(target, address, size, 1, buffer);
2034 if (retval != ERROR_OK)
2035 return retval;
2036 address += size;
2037 count -= size;
2038 buffer += size;
2039 }
2040 }
2041
2042 /* Read the data with as large access size as possible. */
2043 for (; size > 0; size /= 2) {
2044 uint32_t aligned = count - count % size;
2045 if (aligned > 0) {
2046 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2047 if (retval != ERROR_OK)
2048 return retval;
2049 address += aligned;
2050 count -= aligned;
2051 buffer += aligned;
2052 }
2053 }
2054
2055 return ERROR_OK;
2056 }
2057
2058 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
2059 {
2060 uint8_t *buffer;
2061 int retval;
2062 uint32_t i;
2063 uint32_t checksum = 0;
2064 if (!target_was_examined(target)) {
2065 LOG_ERROR("Target not examined yet");
2066 return ERROR_FAIL;
2067 }
2068
2069 retval = target->type->checksum_memory(target, address, size, &checksum);
2070 if (retval != ERROR_OK) {
2071 buffer = malloc(size);
2072 if (buffer == NULL) {
2073 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
2074 return ERROR_COMMAND_SYNTAX_ERROR;
2075 }
2076 retval = target_read_buffer(target, address, size, buffer);
2077 if (retval != ERROR_OK) {
2078 free(buffer);
2079 return retval;
2080 }
2081
2082 /* convert to target endianness */
2083 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2084 uint32_t target_data;
2085 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2086 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2087 }
2088
2089 retval = image_calculate_checksum(buffer, size, &checksum);
2090 free(buffer);
2091 }
2092
2093 *crc = checksum;
2094
2095 return retval;
2096 }
2097
2098 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
2099 {
2100 int retval;
2101 if (!target_was_examined(target)) {
2102 LOG_ERROR("Target not examined yet");
2103 return ERROR_FAIL;
2104 }
2105
2106 if (target->type->blank_check_memory == 0)
2107 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2108
2109 retval = target->type->blank_check_memory(target, address, size, blank);
2110
2111 return retval;
2112 }
2113
2114 int target_read_u64(struct target *target, uint64_t address, uint64_t *value)
2115 {
2116 uint8_t value_buf[8];
2117 if (!target_was_examined(target)) {
2118 LOG_ERROR("Target not examined yet");
2119 return ERROR_FAIL;
2120 }
2121
2122 int retval = target_read_memory(target, address, 8, 1, value_buf);
2123
2124 if (retval == ERROR_OK) {
2125 *value = target_buffer_get_u64(target, value_buf);
2126 LOG_DEBUG("address: 0x%" PRIx64 ", value: 0x%16.16" PRIx64 "",
2127 address,
2128 *value);
2129 } else {
2130 *value = 0x0;
2131 LOG_DEBUG("address: 0x%" PRIx64 " failed",
2132 address);
2133 }
2134
2135 return retval;
2136 }
2137
2138 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
2139 {
2140 uint8_t value_buf[4];
2141 if (!target_was_examined(target)) {
2142 LOG_ERROR("Target not examined yet");
2143 return ERROR_FAIL;
2144 }
2145
2146 int retval = target_read_memory(target, address, 4, 1, value_buf);
2147
2148 if (retval == ERROR_OK) {
2149 *value = target_buffer_get_u32(target, value_buf);
2150 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
2151 address,
2152 *value);
2153 } else {
2154 *value = 0x0;
2155 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
2156 address);
2157 }
2158
2159 return retval;
2160 }
2161
2162 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
2163 {
2164 uint8_t value_buf[2];
2165 if (!target_was_examined(target)) {
2166 LOG_ERROR("Target not examined yet");
2167 return ERROR_FAIL;
2168 }
2169
2170 int retval = target_read_memory(target, address, 2, 1, value_buf);
2171
2172 if (retval == ERROR_OK) {
2173 *value = target_buffer_get_u16(target, value_buf);
2174 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
2175 address,
2176 *value);
2177 } else {
2178 *value = 0x0;
2179 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
2180 address);
2181 }
2182
2183 return retval;
2184 }
2185
2186 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
2187 {
2188 if (!target_was_examined(target)) {
2189 LOG_ERROR("Target not examined yet");
2190 return ERROR_FAIL;
2191 }
2192
2193 int retval = target_read_memory(target, address, 1, 1, value);
2194
2195 if (retval == ERROR_OK) {
2196 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
2197 address,
2198 *value);
2199 } else {
2200 *value = 0x0;
2201 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
2202 address);
2203 }
2204
2205 return retval;
2206 }
2207
2208 int target_write_u64(struct target *target, uint64_t address, uint64_t value)
2209 {
2210 int retval;
2211 uint8_t value_buf[8];
2212 if (!target_was_examined(target)) {
2213 LOG_ERROR("Target not examined yet");
2214 return ERROR_FAIL;
2215 }
2216
2217 LOG_DEBUG("address: 0x%" PRIx64 ", value: 0x%16.16" PRIx64 "",
2218 address,
2219 value);
2220
2221 target_buffer_set_u64(target, value_buf, value);
2222 retval = target_write_memory(target, address, 8, 1, value_buf);
2223 if (retval != ERROR_OK)
2224 LOG_DEBUG("failed: %i", retval);
2225
2226 return retval;
2227 }
2228
2229 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
2230 {
2231 int retval;
2232 uint8_t value_buf[4];
2233 if (!target_was_examined(target)) {
2234 LOG_ERROR("Target not examined yet");
2235 return ERROR_FAIL;
2236 }
2237
2238 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
2239 address,
2240 value);
2241
2242 target_buffer_set_u32(target, value_buf, value);
2243 retval = target_write_memory(target, address, 4, 1, value_buf);
2244 if (retval != ERROR_OK)
2245 LOG_DEBUG("failed: %i", retval);
2246
2247 return retval;
2248 }
2249
2250 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
2251 {
2252 int retval;
2253 uint8_t value_buf[2];
2254 if (!target_was_examined(target)) {
2255 LOG_ERROR("Target not examined yet");
2256 return ERROR_FAIL;
2257 }
2258
2259 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
2260 address,
2261 value);
2262
2263 target_buffer_set_u16(target, value_buf, value);
2264 retval = target_write_memory(target, address, 2, 1, value_buf);
2265 if (retval != ERROR_OK)
2266 LOG_DEBUG("failed: %i", retval);
2267
2268 return retval;
2269 }
2270
2271 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
2272 {
2273 int retval;
2274 if (!target_was_examined(target)) {
2275 LOG_ERROR("Target not examined yet");
2276 return ERROR_FAIL;
2277 }
2278
2279 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
2280 address, value);
2281
2282 retval = target_write_memory(target, address, 1, 1, &value);
2283 if (retval != ERROR_OK)
2284 LOG_DEBUG("failed: %i", retval);
2285
2286 return retval;
2287 }
2288
2289 static int find_target(struct command_context *cmd_ctx, const char *name)
2290 {
2291 struct target *target = get_target(name);
2292 if (target == NULL) {
2293 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2294 return ERROR_FAIL;
2295 }
2296 if (!target->tap->enabled) {
2297 LOG_USER("Target: TAP %s is disabled, "
2298 "can't be the current target\n",
2299 target->tap->dotted_name);
2300 return ERROR_FAIL;
2301 }
2302
2303 cmd_ctx->current_target = target->target_number;
2304 return ERROR_OK;
2305 }
2306
2307
2308 COMMAND_HANDLER(handle_targets_command)
2309 {
2310 int retval = ERROR_OK;
2311 if (CMD_ARGC == 1) {
2312 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2313 if (retval == ERROR_OK) {
2314 /* we're done! */
2315 return retval;
2316 }
2317 }
2318
2319 struct target *target = all_targets;
2320 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2321 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2322 while (target) {
2323 const char *state;
2324 char marker = ' ';
2325
2326 if (target->tap->enabled)
2327 state = target_state_name(target);
2328 else
2329 state = "tap-disabled";
2330
2331 if (CMD_CTX->current_target == target->target_number)
2332 marker = '*';
2333
2334 /* keep columns lined up to match the headers above */
2335 command_print(CMD_CTX,
2336 "%2d%c %-18s %-10s %-6s %-18s %s",
2337 target->target_number,
2338 marker,
2339 target_name(target),
2340 target_type_name(target),
2341 Jim_Nvp_value2name_simple(nvp_target_endian,
2342 target->endianness)->name,
2343 target->tap->dotted_name,
2344 state);
2345 target = target->next;
2346 }
2347
2348 return retval;
2349 }
2350
2351 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2352
2353 static int powerDropout;
2354 static int srstAsserted;
2355
2356 static int runPowerRestore;
2357 static int runPowerDropout;
2358 static int runSrstAsserted;
2359 static int runSrstDeasserted;
2360
2361 static int sense_handler(void)
2362 {
2363 static int prevSrstAsserted;
2364 static int prevPowerdropout;
2365
2366 int retval = jtag_power_dropout(&powerDropout);
2367 if (retval != ERROR_OK)
2368 return retval;
2369
2370 int powerRestored;
2371 powerRestored = prevPowerdropout && !powerDropout;
2372 if (powerRestored)
2373 runPowerRestore = 1;
2374
2375 long long current = timeval_ms();
2376 static long long lastPower;
2377 int waitMore = lastPower + 2000 > current;
2378 if (powerDropout && !waitMore) {
2379 runPowerDropout = 1;
2380 lastPower = current;
2381 }
2382
2383 retval = jtag_srst_asserted(&srstAsserted);
2384 if (retval != ERROR_OK)
2385 return retval;
2386
2387 int srstDeasserted;
2388 srstDeasserted = prevSrstAsserted && !srstAsserted;
2389
2390 static long long lastSrst;
2391 waitMore = lastSrst + 2000 > current;
2392 if (srstDeasserted && !waitMore) {
2393 runSrstDeasserted = 1;
2394 lastSrst = current;
2395 }
2396
2397 if (!prevSrstAsserted && srstAsserted)
2398 runSrstAsserted = 1;
2399
2400 prevSrstAsserted = srstAsserted;
2401 prevPowerdropout = powerDropout;
2402
2403 if (srstDeasserted || powerRestored) {
2404 /* Other than logging the event we can't do anything here.
2405 * Issuing a reset is a particularly bad idea as we might
2406 * be inside a reset already.
2407 */
2408 }
2409
2410 return ERROR_OK;
2411 }
2412
2413 /* process target state changes */
2414 static int handle_target(void *priv)
2415 {
2416 Jim_Interp *interp = (Jim_Interp *)priv;
2417 int retval = ERROR_OK;
2418
2419 if (!is_jtag_poll_safe()) {
2420 /* polling is disabled currently */
2421 return ERROR_OK;
2422 }
2423
2424 /* we do not want to recurse here... */
2425 static int recursive;
2426 if (!recursive) {
2427 recursive = 1;
2428 sense_handler();
2429 /* danger! running these procedures can trigger srst assertions and power dropouts.
2430 * We need to avoid an infinite loop/recursion here and we do that by
2431 * clearing the flags after running these events.
2432 */
2433 int did_something = 0;
2434 if (runSrstAsserted) {
2435 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2436 Jim_Eval(interp, "srst_asserted");
2437 did_something = 1;
2438 }
2439 if (runSrstDeasserted) {
2440 Jim_Eval(interp, "srst_deasserted");
2441 did_something = 1;
2442 }
2443 if (runPowerDropout) {
2444 LOG_INFO("Power dropout detected, running power_dropout proc.");
2445 Jim_Eval(interp, "power_dropout");
2446 did_something = 1;
2447 }
2448 if (runPowerRestore) {
2449 Jim_Eval(interp, "power_restore");
2450 did_something = 1;
2451 }
2452
2453 if (did_something) {
2454 /* clear detect flags */
2455 sense_handler();
2456 }
2457
2458 /* clear action flags */
2459
2460 runSrstAsserted = 0;
2461 runSrstDeasserted = 0;
2462 runPowerRestore = 0;
2463 runPowerDropout = 0;
2464
2465 recursive = 0;
2466 }
2467
2468 /* Poll targets for state changes unless that's globally disabled.
2469 * Skip targets that are currently disabled.
2470 */
2471 for (struct target *target = all_targets;
2472 is_jtag_poll_safe() && target;
2473 target = target->next) {
2474
2475 if (!target_was_examined(target))
2476 continue;
2477
2478 if (!target->tap->enabled)
2479 continue;
2480
2481 if (target->backoff.times > target->backoff.count) {
2482 /* do not poll this time as we failed previously */
2483 target->backoff.count++;
2484 continue;
2485 }
2486 target->backoff.count = 0;
2487
2488 /* only poll target if we've got power and srst isn't asserted */
2489 if (!powerDropout && !srstAsserted) {
2490 /* polling may fail silently until the target has been examined */
2491 retval = target_poll(target);
2492 if (retval != ERROR_OK) {
2493 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2494 if (target->backoff.times * polling_interval < 5000) {
2495 target->backoff.times *= 2;
2496 target->backoff.times++;
2497 }
2498 LOG_USER("Polling target %s failed, GDB will be halted. Polling again in %dms",
2499 target_name(target),
2500 target->backoff.times * polling_interval);
2501
2502 /* Tell GDB to halt the debugger. This allows the user to
2503 * run monitor commands to handle the situation.
2504 */
2505 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2506 return retval;
2507 }
2508 /* Since we succeeded, we reset backoff count */
2509 if (target->backoff.times > 0) {
2510 LOG_USER("Polling target %s succeeded again, trying to reexamine", target_name(target));
2511 target_reset_examined(target);
2512 retval = target_examine_one(target);
2513 /* Target examination could have failed due to unstable connection,
2514 * but we set the examined flag anyway to repoll it later */
2515 if (retval != ERROR_OK) {
2516 target->examined = true;
2517 return retval;
2518 }
2519 }
2520
2521 target->backoff.times = 0;
2522 }
2523 }
2524
2525 return retval;
2526 }
2527
2528 COMMAND_HANDLER(handle_reg_command)
2529 {
2530 struct target *target;
2531 struct reg *reg = NULL;
2532 unsigned count = 0;
2533 char *value;
2534
2535 LOG_DEBUG("-");
2536
2537 target = get_current_target(CMD_CTX);
2538
2539 /* list all available registers for the current target */
2540 if (CMD_ARGC == 0) {
2541 struct reg_cache *cache = target->reg_cache;
2542
2543 count = 0;
2544 while (cache) {
2545 unsigned i;
2546
2547 command_print(CMD_CTX, "===== %s", cache->name);
2548
2549 for (i = 0, reg = cache->reg_list;
2550 i < cache->num_regs;
2551 i++, reg++, count++) {
2552 /* only print cached values if they are valid */
2553 if (reg->valid) {
2554 value = buf_to_str(reg->value,
2555 reg->size, 16);
2556 command_print(CMD_CTX,
2557 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2558 count, reg->name,
2559 reg->size, value,
2560 reg->dirty
2561 ? " (dirty)"
2562 : "");
2563 free(value);
2564 } else {
2565 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2566 count, reg->name,
2567 reg->size) ;
2568 }
2569 }
2570 cache = cache->next;
2571 }
2572
2573 return ERROR_OK;
2574 }
2575
2576 /* access a single register by its ordinal number */
2577 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2578 unsigned num;
2579 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2580
2581 struct reg_cache *cache = target->reg_cache;
2582 count = 0;
2583 while (cache) {
2584 unsigned i;
2585 for (i = 0; i < cache->num_regs; i++) {
2586 if (count++ == num) {
2587 reg = &cache->reg_list[i];
2588 break;
2589 }
2590 }
2591 if (reg)
2592 break;
2593 cache = cache->next;
2594 }
2595
2596 if (!reg) {
2597 command_print(CMD_CTX, "%i is out of bounds, the current target "
2598 "has only %i registers (0 - %i)", num, count, count - 1);
2599 return ERROR_OK;
2600 }
2601 } else {
2602 /* access a single register by its name */
2603 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2604
2605 if (!reg) {
2606 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2607 return ERROR_OK;
2608 }
2609 }
2610
2611 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2612
2613 /* display a register */
2614 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2615 && (CMD_ARGV[1][0] <= '9')))) {
2616 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2617 reg->valid = 0;
2618
2619 if (reg->valid == 0)
2620 reg->type->get(reg);
2621 value = buf_to_str(reg->value, reg->size, 16);
2622 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2623 free(value);
2624 return ERROR_OK;
2625 }
2626
2627 /* set register value */
2628 if (CMD_ARGC == 2) {
2629 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2630 if (buf == NULL)
2631 return ERROR_FAIL;
2632 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2633
2634 reg->type->set(reg, buf);
2635
2636 value = buf_to_str(reg->value, reg->size, 16);
2637 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2638 free(value);
2639
2640 free(buf);
2641
2642 return ERROR_OK;
2643 }
2644
2645 return ERROR_COMMAND_SYNTAX_ERROR;
2646 }
2647
2648 COMMAND_HANDLER(handle_poll_command)
2649 {
2650 int retval = ERROR_OK;
2651 struct target *target = get_current_target(CMD_CTX);
2652
2653 if (CMD_ARGC == 0) {
2654 command_print(CMD_CTX, "background polling: %s",
2655 jtag_poll_get_enabled() ? "on" : "off");
2656 command_print(CMD_CTX, "TAP: %s (%s)",
2657 target->tap->dotted_name,
2658 target->tap->enabled ? "enabled" : "disabled");
2659 if (!target->tap->enabled)
2660 return ERROR_OK;
2661 retval = target_poll(target);
2662 if (retval != ERROR_OK)
2663 return retval;
2664 retval = target_arch_state(target);
2665 if (retval != ERROR_OK)
2666 return retval;
2667 } else if (CMD_ARGC == 1) {
2668 bool enable;
2669 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2670 jtag_poll_set_enabled(enable);
2671 } else
2672 return ERROR_COMMAND_SYNTAX_ERROR;
2673
2674 return retval;
2675 }
2676
2677 COMMAND_HANDLER(handle_wait_halt_command)
2678 {
2679 if (CMD_ARGC > 1)
2680 return ERROR_COMMAND_SYNTAX_ERROR;
2681
2682 unsigned ms = DEFAULT_HALT_TIMEOUT;
2683 if (1 == CMD_ARGC) {
2684 int retval = parse_uint(CMD_ARGV[0], &ms);
2685 if (ERROR_OK != retval)
2686 return ERROR_COMMAND_SYNTAX_ERROR;
2687 }
2688
2689 struct target *target = get_current_target(CMD_CTX);
2690 return target_wait_state(target, TARGET_HALTED, ms);
2691 }
2692
2693 /* wait for target state to change. The trick here is to have a low
2694 * latency for short waits and not to suck up all the CPU time
2695 * on longer waits.
2696 *
2697 * After 500ms, keep_alive() is invoked
2698 */
2699 int target_wait_state(struct target *target, enum target_state state, int ms)
2700 {
2701 int retval;
2702 long long then = 0, cur;
2703 int once = 1;
2704
2705 for (;;) {
2706 retval = target_poll(target);
2707 if (retval != ERROR_OK)
2708 return retval;
2709 if (target->state == state)
2710 break;
2711 cur = timeval_ms();
2712 if (once) {
2713 once = 0;
2714 then = timeval_ms();
2715 LOG_DEBUG("waiting for target %s...",
2716 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2717 }
2718
2719 if (cur-then > 500)
2720 keep_alive();
2721
2722 if ((cur-then) > ms) {
2723 LOG_ERROR("timed out while waiting for target %s",
2724 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2725 return ERROR_FAIL;
2726 }
2727 }
2728
2729 return ERROR_OK;
2730 }
2731
2732 COMMAND_HANDLER(handle_halt_command)
2733 {
2734 LOG_DEBUG("-");
2735
2736 struct target *target = get_current_target(CMD_CTX);
2737 int retval = target_halt(target);
2738 if (ERROR_OK != retval)
2739 return retval;
2740
2741 if (CMD_ARGC == 1) {
2742 unsigned wait_local;
2743 retval = parse_uint(CMD_ARGV[0], &wait_local);
2744 if (ERROR_OK != retval)
2745 return ERROR_COMMAND_SYNTAX_ERROR;
2746 if (!wait_local)
2747 return ERROR_OK;
2748 }
2749
2750 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2751 }
2752
2753 COMMAND_HANDLER(handle_soft_reset_halt_command)
2754 {
2755 struct target *target = get_current_target(CMD_CTX);
2756
2757 LOG_USER("requesting target halt and executing a soft reset");
2758
2759 target_soft_reset_halt(target);
2760
2761 return ERROR_OK;
2762 }
2763
2764 COMMAND_HANDLER(handle_reset_command)
2765 {
2766 if (CMD_ARGC > 1)
2767 return ERROR_COMMAND_SYNTAX_ERROR;
2768
2769 enum target_reset_mode reset_mode = RESET_RUN;
2770 if (CMD_ARGC == 1) {
2771 const Jim_Nvp *n;
2772 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2773 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
2774 return ERROR_COMMAND_SYNTAX_ERROR;
2775 reset_mode = n->value;
2776 }
2777
2778 /* reset *all* targets */
2779 return target_process_reset(CMD_CTX, reset_mode);
2780 }
2781
2782
2783 COMMAND_HANDLER(handle_resume_command)
2784 {
2785 int current = 1;
2786 if (CMD_ARGC > 1)
2787 return ERROR_COMMAND_SYNTAX_ERROR;
2788
2789 struct target *target = get_current_target(CMD_CTX);
2790
2791 /* with no CMD_ARGV, resume from current pc, addr = 0,
2792 * with one arguments, addr = CMD_ARGV[0],
2793 * handle breakpoints, not debugging */
2794 uint32_t addr = 0;
2795 if (CMD_ARGC == 1) {
2796 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2797 current = 0;
2798 }
2799
2800 return target_resume(target, current, addr, 1, 0);
2801 }
2802
2803 COMMAND_HANDLER(handle_step_command)
2804 {
2805 if (CMD_ARGC > 1)
2806 return ERROR_COMMAND_SYNTAX_ERROR;
2807
2808 LOG_DEBUG("-");
2809
2810 /* with no CMD_ARGV, step from current pc, addr = 0,
2811 * with one argument addr = CMD_ARGV[0],
2812 * handle breakpoints, debugging */
2813 uint32_t addr = 0;
2814 int current_pc = 1;
2815 if (CMD_ARGC == 1) {
2816 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2817 current_pc = 0;
2818 }
2819
2820 struct target *target = get_current_target(CMD_CTX);
2821
2822 return target->type->step(target, current_pc, addr, 1);
2823 }
2824
2825 static void handle_md_output(struct command_context *cmd_ctx,
2826 struct target *target, uint32_t address, unsigned size,
2827 unsigned count, const uint8_t *buffer)
2828 {
2829 const unsigned line_bytecnt = 32;
2830 unsigned line_modulo = line_bytecnt / size;
2831
2832 char output[line_bytecnt * 4 + 1];
2833 unsigned output_len = 0;
2834
2835 const char *value_fmt;
2836 switch (size) {
2837 case 4:
2838 value_fmt = "%8.8x ";
2839 break;
2840 case 2:
2841 value_fmt = "%4.4x ";
2842 break;
2843 case 1:
2844 value_fmt = "%2.2x ";
2845 break;
2846 default:
2847 /* "can't happen", caller checked */
2848 LOG_ERROR("invalid memory read size: %u", size);
2849 return;
2850 }
2851
2852 for (unsigned i = 0; i < count; i++) {
2853 if (i % line_modulo == 0) {
2854 output_len += snprintf(output + output_len,
2855 sizeof(output) - output_len,
2856 "0x%8.8x: ",
2857 (unsigned)(address + (i*size)));
2858 }
2859
2860 uint32_t value = 0;
2861 const uint8_t *value_ptr = buffer + i * size;
2862 switch (size) {
2863 case 4:
2864 value = target_buffer_get_u32(target, value_ptr);
2865 break;
2866 case 2:
2867 value = target_buffer_get_u16(target, value_ptr);
2868 break;
2869 case 1:
2870 value = *value_ptr;
2871 }
2872 output_len += snprintf(output + output_len,
2873 sizeof(output) - output_len,
2874 value_fmt, value);
2875
2876 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
2877 command_print(cmd_ctx, "%s", output);
2878 output_len = 0;
2879 }
2880 }
2881 }
2882
2883 COMMAND_HANDLER(handle_md_command)
2884 {
2885 if (CMD_ARGC < 1)
2886 return ERROR_COMMAND_SYNTAX_ERROR;
2887
2888 unsigned size = 0;
2889 switch (CMD_NAME[2]) {
2890 case 'w':
2891 size = 4;
2892 break;
2893 case 'h':
2894 size = 2;
2895 break;
2896 case 'b':
2897 size = 1;
2898 break;
2899 default:
2900 return ERROR_COMMAND_SYNTAX_ERROR;
2901 }
2902
2903 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2904 int (*fn)(struct target *target,
2905 uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
2906 if (physical) {
2907 CMD_ARGC--;
2908 CMD_ARGV++;
2909 fn = target_read_phys_memory;
2910 } else
2911 fn = target_read_memory;
2912 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2913 return ERROR_COMMAND_SYNTAX_ERROR;
2914
2915 uint32_t address;
2916 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2917
2918 unsigned count = 1;
2919 if (CMD_ARGC == 2)
2920 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2921
2922 uint8_t *buffer = calloc(count, size);
2923
2924 struct target *target = get_current_target(CMD_CTX);
2925 int retval = fn(target, address, size, count, buffer);
2926 if (ERROR_OK == retval)
2927 handle_md_output(CMD_CTX, target, address, size, count, buffer);
2928
2929 free(buffer);
2930
2931 return retval;
2932 }
2933
2934 typedef int (*target_write_fn)(struct target *target,
2935 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
2936
2937 static int target_fill_mem(struct target *target,
2938 uint32_t address,
2939 target_write_fn fn,
2940 unsigned data_size,
2941 /* value */
2942 uint32_t b,
2943 /* count */
2944 unsigned c)
2945 {
2946 /* We have to write in reasonably large chunks to be able
2947 * to fill large memory areas with any sane speed */
2948 const unsigned chunk_size = 16384;
2949 uint8_t *target_buf = malloc(chunk_size * data_size);
2950 if (target_buf == NULL) {
2951 LOG_ERROR("Out of memory");
2952 return ERROR_FAIL;
2953 }
2954
2955 for (unsigned i = 0; i < chunk_size; i++) {
2956 switch (data_size) {
2957 case 4:
2958 target_buffer_set_u32(target, target_buf + i * data_size, b);
2959 break;
2960 case 2:
2961 target_buffer_set_u16(target, target_buf + i * data_size, b);
2962 break;
2963 case 1:
2964 target_buffer_set_u8(target, target_buf + i * data_size, b);
2965 break;
2966 default:
2967 exit(-1);
2968 }
2969 }
2970
2971 int retval = ERROR_OK;
2972
2973 for (unsigned x = 0; x < c; x += chunk_size) {
2974 unsigned current;
2975 current = c - x;
2976 if (current > chunk_size)
2977 current = chunk_size;
2978 retval = fn(target, address + x * data_size, data_size, current, target_buf);
2979 if (retval != ERROR_OK)
2980 break;
2981 /* avoid GDB timeouts */
2982 keep_alive();
2983 }
2984 free(target_buf);
2985
2986 return retval;
2987 }
2988
2989
2990 COMMAND_HANDLER(handle_mw_command)
2991 {
2992 if (CMD_ARGC < 2)
2993 return ERROR_COMMAND_SYNTAX_ERROR;
2994 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2995 target_write_fn fn;
2996 if (physical) {
2997 CMD_ARGC--;
2998 CMD_ARGV++;
2999 fn = target_write_phys_memory;
3000 } else
3001 fn = target_write_memory;
3002 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3003 return ERROR_COMMAND_SYNTAX_ERROR;
3004
3005 uint32_t address;
3006 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
3007
3008 uint32_t value;
3009 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3010
3011 unsigned count = 1;
3012 if (CMD_ARGC == 3)
3013 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3014
3015 struct target *target = get_current_target(CMD_CTX);
3016 unsigned wordsize;
3017 switch (CMD_NAME[2]) {
3018 case 'w':
3019 wordsize = 4;
3020 break;
3021 case 'h':
3022 wordsize = 2;
3023 break;
3024 case 'b':
3025 wordsize = 1;
3026 break;
3027 default:
3028 return ERROR_COMMAND_SYNTAX_ERROR;
3029 }
3030
3031 return target_fill_mem(target, address, fn, wordsize, value, count);
3032 }
3033
3034 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3035 uint32_t *min_address, uint32_t *max_address)
3036 {
3037 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3038 return ERROR_COMMAND_SYNTAX_ERROR;
3039
3040 /* a base address isn't always necessary,
3041 * default to 0x0 (i.e. don't relocate) */
3042 if (CMD_ARGC >= 2) {
3043 uint32_t addr;
3044 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
3045 image->base_address = addr;
3046 image->base_address_set = 1;
3047 } else
3048 image->base_address_set = 0;
3049
3050 image->start_address_set = 0;
3051
3052 if (CMD_ARGC >= 4)
3053 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
3054 if (CMD_ARGC == 5) {
3055 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
3056 /* use size (given) to find max (required) */
3057 *max_address += *min_address;
3058 }
3059
3060 if (*min_address > *max_address)
3061 return ERROR_COMMAND_SYNTAX_ERROR;
3062
3063 return ERROR_OK;
3064 }
3065
3066 COMMAND_HANDLER(handle_load_image_command)
3067 {
3068 uint8_t *buffer;
3069 size_t buf_cnt;
3070 uint32_t image_size;
3071 uint32_t min_address = 0;
3072 uint32_t max_address = 0xffffffff;
3073 int i;
3074 struct image image;
3075
3076 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3077 &image, &min_address, &max_address);
3078 if (ERROR_OK != retval)
3079 return retval;
3080
3081 struct target *target = get_current_target(CMD_CTX);
3082
3083 struct duration bench;
3084 duration_start(&bench);
3085
3086 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3087 return ERROR_OK;
3088
3089 image_size = 0x0;
3090 retval = ERROR_OK;
3091 for (i = 0; i < image.num_sections; i++) {
3092 buffer = malloc(image.sections[i].size);
3093 if (buffer == NULL) {
3094 command_print(CMD_CTX,
3095 "error allocating buffer for section (%d bytes)",
3096 (int)(image.sections[i].size));
3097 break;
3098 }
3099
3100 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3101 if (retval != ERROR_OK) {
3102 free(buffer);
3103 break;
3104 }
3105
3106 uint32_t offset = 0;
3107 uint32_t length = buf_cnt;
3108
3109 /* DANGER!!! beware of unsigned comparision here!!! */
3110
3111 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3112 (image.sections[i].base_address < max_address)) {
3113
3114 if (image.sections[i].base_address < min_address) {
3115 /* clip addresses below */
3116 offset += min_address-image.sections[i].base_address;
3117 length -= offset;
3118 }
3119
3120 if (image.sections[i].base_address + buf_cnt > max_address)
3121 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3122
3123 retval = target_write_buffer(target,
3124 image.sections[i].base_address + offset, length, buffer + offset);
3125 if (retval != ERROR_OK) {
3126 free(buffer);
3127 break;
3128 }
3129 image_size += length;
3130 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
3131 (unsigned int)length,
3132 image.sections[i].base_address + offset);
3133 }
3134
3135 free(buffer);
3136 }
3137
3138 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3139 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
3140 "in %fs (%0.3f KiB/s)", image_size,
3141 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3142 }
3143
3144 image_close(&image);
3145
3146 return retval;
3147
3148 }
3149
3150 COMMAND_HANDLER(handle_dump_image_command)
3151 {
3152 struct fileio fileio;
3153 uint8_t *buffer;
3154 int retval, retvaltemp;
3155 uint32_t address, size;
3156 struct duration bench;
3157 struct target *target = get_current_target(CMD_CTX);
3158
3159 if (CMD_ARGC != 3)
3160 return ERROR_COMMAND_SYNTAX_ERROR;
3161
3162 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
3163 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
3164
3165 uint32_t buf_size = (size > 4096) ? 4096 : size;
3166 buffer = malloc(buf_size);
3167 if (!buffer)
3168 return ERROR_FAIL;
3169
3170 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3171 if (retval != ERROR_OK) {
3172 free(buffer);
3173 return retval;
3174 }
3175
3176 duration_start(&bench);