92411c8c2399cf77681c5013022aff04dae4a554
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 ***************************************************************************/
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 #include <helper/time_support.h>
47 #include <jtag/jtag.h>
48 #include <flash/nor/core.h>
49
50 #include "target.h"
51 #include "target_type.h"
52 #include "target_request.h"
53 #include "breakpoints.h"
54 #include "register.h"
55 #include "trace.h"
56 #include "image.h"
57 #include "rtos/rtos.h"
58 #include "transport/transport.h"
59
60 /* default halt wait timeout (ms) */
61 #define DEFAULT_HALT_TIMEOUT 5000
62
63 static int target_read_buffer_default(struct target *target, uint32_t address,
64 uint32_t count, uint8_t *buffer);
65 static int target_write_buffer_default(struct target *target, uint32_t address,
66 uint32_t count, const uint8_t *buffer);
67 static int target_array2mem(Jim_Interp *interp, struct target *target,
68 int argc, Jim_Obj * const *argv);
69 static int target_mem2array(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_register_user_commands(struct command_context *cmd_ctx);
72 static int target_get_gdb_fileio_info_default(struct target *target,
73 struct gdb_fileio_info *fileio_info);
74 static int target_gdb_fileio_end_default(struct target *target, int retcode,
75 int fileio_errno, bool ctrl_c);
76 static int target_profiling_default(struct target *target, uint32_t *samples,
77 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds);
78
79 /* targets */
80 extern struct target_type arm7tdmi_target;
81 extern struct target_type arm720t_target;
82 extern struct target_type arm9tdmi_target;
83 extern struct target_type arm920t_target;
84 extern struct target_type arm966e_target;
85 extern struct target_type arm946e_target;
86 extern struct target_type arm926ejs_target;
87 extern struct target_type fa526_target;
88 extern struct target_type feroceon_target;
89 extern struct target_type dragonite_target;
90 extern struct target_type xscale_target;
91 extern struct target_type cortexm_target;
92 extern struct target_type cortexa_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type mips_m4k_target;
96 extern struct target_type avr_target;
97 extern struct target_type dsp563xx_target;
98 extern struct target_type dsp5680xx_target;
99 extern struct target_type testee_target;
100 extern struct target_type avr32_ap7k_target;
101 extern struct target_type hla_target;
102 extern struct target_type nds32_v2_target;
103 extern struct target_type nds32_v3_target;
104 extern struct target_type nds32_v3m_target;
105 extern struct target_type or1k_target;
106 extern struct target_type quark_x10xx_target;
107
108 static struct target_type *target_types[] = {
109 &arm7tdmi_target,
110 &arm9tdmi_target,
111 &arm920t_target,
112 &arm720t_target,
113 &arm966e_target,
114 &arm946e_target,
115 &arm926ejs_target,
116 &fa526_target,
117 &feroceon_target,
118 &dragonite_target,
119 &xscale_target,
120 &cortexm_target,
121 &cortexa_target,
122 &cortexr4_target,
123 &arm11_target,
124 &mips_m4k_target,
125 &avr_target,
126 &dsp563xx_target,
127 &dsp5680xx_target,
128 &testee_target,
129 &avr32_ap7k_target,
130 &hla_target,
131 &nds32_v2_target,
132 &nds32_v3_target,
133 &nds32_v3m_target,
134 &or1k_target,
135 &quark_x10xx_target,
136 NULL,
137 };
138
139 struct target *all_targets;
140 static struct target_event_callback *target_event_callbacks;
141 static struct target_timer_callback *target_timer_callbacks;
142 static const int polling_interval = 100;
143
144 static const Jim_Nvp nvp_assert[] = {
145 { .name = "assert", NVP_ASSERT },
146 { .name = "deassert", NVP_DEASSERT },
147 { .name = "T", NVP_ASSERT },
148 { .name = "F", NVP_DEASSERT },
149 { .name = "t", NVP_ASSERT },
150 { .name = "f", NVP_DEASSERT },
151 { .name = NULL, .value = -1 }
152 };
153
154 static const Jim_Nvp nvp_error_target[] = {
155 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
156 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
157 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
158 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
159 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
160 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
161 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
162 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
163 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
164 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
165 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
166 { .value = -1, .name = NULL }
167 };
168
169 static const char *target_strerror_safe(int err)
170 {
171 const Jim_Nvp *n;
172
173 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
174 if (n->name == NULL)
175 return "unknown";
176 else
177 return n->name;
178 }
179
180 static const Jim_Nvp nvp_target_event[] = {
181
182 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
183 { .value = TARGET_EVENT_HALTED, .name = "halted" },
184 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
185 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
186 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
187
188 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
189 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
190
191 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
192 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
193 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
194 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
195 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
196 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
197 { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
198 { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
199 { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
200 { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
201 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
202 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
203
204 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
205 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
206
207 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
208 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
209
210 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
211 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
212
213 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
214 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
215
216 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
217 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
218
219 { .name = NULL, .value = -1 }
220 };
221
222 static const Jim_Nvp nvp_target_state[] = {
223 { .name = "unknown", .value = TARGET_UNKNOWN },
224 { .name = "running", .value = TARGET_RUNNING },
225 { .name = "halted", .value = TARGET_HALTED },
226 { .name = "reset", .value = TARGET_RESET },
227 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
228 { .name = NULL, .value = -1 },
229 };
230
231 static const Jim_Nvp nvp_target_debug_reason[] = {
232 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
233 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
234 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
235 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
236 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
237 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
238 { .name = "program-exit" , .value = DBG_REASON_EXIT },
239 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
240 { .name = NULL, .value = -1 },
241 };
242
243 static const Jim_Nvp nvp_target_endian[] = {
244 { .name = "big", .value = TARGET_BIG_ENDIAN },
245 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
246 { .name = "be", .value = TARGET_BIG_ENDIAN },
247 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
248 { .name = NULL, .value = -1 },
249 };
250
251 static const Jim_Nvp nvp_reset_modes[] = {
252 { .name = "unknown", .value = RESET_UNKNOWN },
253 { .name = "run" , .value = RESET_RUN },
254 { .name = "halt" , .value = RESET_HALT },
255 { .name = "init" , .value = RESET_INIT },
256 { .name = NULL , .value = -1 },
257 };
258
259 const char *debug_reason_name(struct target *t)
260 {
261 const char *cp;
262
263 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
264 t->debug_reason)->name;
265 if (!cp) {
266 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
267 cp = "(*BUG*unknown*BUG*)";
268 }
269 return cp;
270 }
271
272 const char *target_state_name(struct target *t)
273 {
274 const char *cp;
275 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
276 if (!cp) {
277 LOG_ERROR("Invalid target state: %d", (int)(t->state));
278 cp = "(*BUG*unknown*BUG*)";
279 }
280 return cp;
281 }
282
283 /* determine the number of the new target */
284 static int new_target_number(void)
285 {
286 struct target *t;
287 int x;
288
289 /* number is 0 based */
290 x = -1;
291 t = all_targets;
292 while (t) {
293 if (x < t->target_number)
294 x = t->target_number;
295 t = t->next;
296 }
297 return x + 1;
298 }
299
300 /* read a uint64_t from a buffer in target memory endianness */
301 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
302 {
303 if (target->endianness == TARGET_LITTLE_ENDIAN)
304 return le_to_h_u64(buffer);
305 else
306 return be_to_h_u64(buffer);
307 }
308
309 /* read a uint32_t from a buffer in target memory endianness */
310 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
311 {
312 if (target->endianness == TARGET_LITTLE_ENDIAN)
313 return le_to_h_u32(buffer);
314 else
315 return be_to_h_u32(buffer);
316 }
317
318 /* read a uint24_t from a buffer in target memory endianness */
319 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
320 {
321 if (target->endianness == TARGET_LITTLE_ENDIAN)
322 return le_to_h_u24(buffer);
323 else
324 return be_to_h_u24(buffer);
325 }
326
327 /* read a uint16_t from a buffer in target memory endianness */
328 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
329 {
330 if (target->endianness == TARGET_LITTLE_ENDIAN)
331 return le_to_h_u16(buffer);
332 else
333 return be_to_h_u16(buffer);
334 }
335
336 /* read a uint8_t from a buffer in target memory endianness */
337 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
338 {
339 return *buffer & 0x0ff;
340 }
341
342 /* write a uint64_t to a buffer in target memory endianness */
343 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
344 {
345 if (target->endianness == TARGET_LITTLE_ENDIAN)
346 h_u64_to_le(buffer, value);
347 else
348 h_u64_to_be(buffer, value);
349 }
350
351 /* write a uint32_t to a buffer in target memory endianness */
352 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
353 {
354 if (target->endianness == TARGET_LITTLE_ENDIAN)
355 h_u32_to_le(buffer, value);
356 else
357 h_u32_to_be(buffer, value);
358 }
359
360 /* write a uint24_t to a buffer in target memory endianness */
361 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
362 {
363 if (target->endianness == TARGET_LITTLE_ENDIAN)
364 h_u24_to_le(buffer, value);
365 else
366 h_u24_to_be(buffer, value);
367 }
368
369 /* write a uint16_t to a buffer in target memory endianness */
370 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
371 {
372 if (target->endianness == TARGET_LITTLE_ENDIAN)
373 h_u16_to_le(buffer, value);
374 else
375 h_u16_to_be(buffer, value);
376 }
377
378 /* write a uint8_t to a buffer in target memory endianness */
379 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
380 {
381 *buffer = value;
382 }
383
384 /* write a uint64_t array to a buffer in target memory endianness */
385 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
386 {
387 uint32_t i;
388 for (i = 0; i < count; i++)
389 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
390 }
391
392 /* write a uint32_t array to a buffer in target memory endianness */
393 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
394 {
395 uint32_t i;
396 for (i = 0; i < count; i++)
397 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
398 }
399
400 /* write a uint16_t array to a buffer in target memory endianness */
401 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
402 {
403 uint32_t i;
404 for (i = 0; i < count; i++)
405 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
406 }
407
408 /* write a uint64_t array to a buffer in target memory endianness */
409 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
410 {
411 uint32_t i;
412 for (i = 0; i < count; i++)
413 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
414 }
415
416 /* write a uint32_t array to a buffer in target memory endianness */
417 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
418 {
419 uint32_t i;
420 for (i = 0; i < count; i++)
421 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
422 }
423
424 /* write a uint16_t array to a buffer in target memory endianness */
425 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
426 {
427 uint32_t i;
428 for (i = 0; i < count; i++)
429 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
430 }
431
432 /* return a pointer to a configured target; id is name or number */
433 struct target *get_target(const char *id)
434 {
435 struct target *target;
436
437 /* try as tcltarget name */
438 for (target = all_targets; target; target = target->next) {
439 if (target_name(target) == NULL)
440 continue;
441 if (strcmp(id, target_name(target)) == 0)
442 return target;
443 }
444
445 /* It's OK to remove this fallback sometime after August 2010 or so */
446
447 /* no match, try as number */
448 unsigned num;
449 if (parse_uint(id, &num) != ERROR_OK)
450 return NULL;
451
452 for (target = all_targets; target; target = target->next) {
453 if (target->target_number == (int)num) {
454 LOG_WARNING("use '%s' as target identifier, not '%u'",
455 target_name(target), num);
456 return target;
457 }
458 }
459
460 return NULL;
461 }
462
463 /* returns a pointer to the n-th configured target */
464 static struct target *get_target_by_num(int num)
465 {
466 struct target *target = all_targets;
467
468 while (target) {
469 if (target->target_number == num)
470 return target;
471 target = target->next;
472 }
473
474 return NULL;
475 }
476
477 struct target *get_current_target(struct command_context *cmd_ctx)
478 {
479 struct target *target = get_target_by_num(cmd_ctx->current_target);
480
481 if (target == NULL) {
482 LOG_ERROR("BUG: current_target out of bounds");
483 exit(-1);
484 }
485
486 return target;
487 }
488
489 int target_poll(struct target *target)
490 {
491 int retval;
492
493 /* We can't poll until after examine */
494 if (!target_was_examined(target)) {
495 /* Fail silently lest we pollute the log */
496 return ERROR_FAIL;
497 }
498
499 retval = target->type->poll(target);
500 if (retval != ERROR_OK)
501 return retval;
502
503 if (target->halt_issued) {
504 if (target->state == TARGET_HALTED)
505 target->halt_issued = false;
506 else {
507 long long t = timeval_ms() - target->halt_issued_time;
508 if (t > DEFAULT_HALT_TIMEOUT) {
509 target->halt_issued = false;
510 LOG_INFO("Halt timed out, wake up GDB.");
511 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
512 }
513 }
514 }
515
516 return ERROR_OK;
517 }
518
519 int target_halt(struct target *target)
520 {
521 int retval;
522 /* We can't poll until after examine */
523 if (!target_was_examined(target)) {
524 LOG_ERROR("Target not examined yet");
525 return ERROR_FAIL;
526 }
527
528 retval = target->type->halt(target);
529 if (retval != ERROR_OK)
530 return retval;
531
532 target->halt_issued = true;
533 target->halt_issued_time = timeval_ms();
534
535 return ERROR_OK;
536 }
537
538 /**
539 * Make the target (re)start executing using its saved execution
540 * context (possibly with some modifications).
541 *
542 * @param target Which target should start executing.
543 * @param current True to use the target's saved program counter instead
544 * of the address parameter
545 * @param address Optionally used as the program counter.
546 * @param handle_breakpoints True iff breakpoints at the resumption PC
547 * should be skipped. (For example, maybe execution was stopped by
548 * such a breakpoint, in which case it would be counterprodutive to
549 * let it re-trigger.
550 * @param debug_execution False if all working areas allocated by OpenOCD
551 * should be released and/or restored to their original contents.
552 * (This would for example be true to run some downloaded "helper"
553 * algorithm code, which resides in one such working buffer and uses
554 * another for data storage.)
555 *
556 * @todo Resolve the ambiguity about what the "debug_execution" flag
557 * signifies. For example, Target implementations don't agree on how
558 * it relates to invalidation of the register cache, or to whether
559 * breakpoints and watchpoints should be enabled. (It would seem wrong
560 * to enable breakpoints when running downloaded "helper" algorithms
561 * (debug_execution true), since the breakpoints would be set to match
562 * target firmware being debugged, not the helper algorithm.... and
563 * enabling them could cause such helpers to malfunction (for example,
564 * by overwriting data with a breakpoint instruction. On the other
565 * hand the infrastructure for running such helpers might use this
566 * procedure but rely on hardware breakpoint to detect termination.)
567 */
568 int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
569 {
570 int retval;
571
572 /* We can't poll until after examine */
573 if (!target_was_examined(target)) {
574 LOG_ERROR("Target not examined yet");
575 return ERROR_FAIL;
576 }
577
578 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
579
580 /* note that resume *must* be asynchronous. The CPU can halt before
581 * we poll. The CPU can even halt at the current PC as a result of
582 * a software breakpoint being inserted by (a bug?) the application.
583 */
584 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
585 if (retval != ERROR_OK)
586 return retval;
587
588 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
589
590 return retval;
591 }
592
593 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
594 {
595 char buf[100];
596 int retval;
597 Jim_Nvp *n;
598 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
599 if (n->name == NULL) {
600 LOG_ERROR("invalid reset mode");
601 return ERROR_FAIL;
602 }
603
604 /* disable polling during reset to make reset event scripts
605 * more predictable, i.e. dr/irscan & pathmove in events will
606 * not have JTAG operations injected into the middle of a sequence.
607 */
608 bool save_poll = jtag_poll_get_enabled();
609
610 jtag_poll_set_enabled(false);
611
612 sprintf(buf, "ocd_process_reset %s", n->name);
613 retval = Jim_Eval(cmd_ctx->interp, buf);
614
615 jtag_poll_set_enabled(save_poll);
616
617 if (retval != JIM_OK) {
618 Jim_MakeErrorMessage(cmd_ctx->interp);
619 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
620 return ERROR_FAIL;
621 }
622
623 /* We want any events to be processed before the prompt */
624 retval = target_call_timer_callbacks_now();
625
626 struct target *target;
627 for (target = all_targets; target; target = target->next) {
628 target->type->check_reset(target);
629 target->running_alg = false;
630 }
631
632 return retval;
633 }
634
635 static int identity_virt2phys(struct target *target,
636 uint32_t virtual, uint32_t *physical)
637 {
638 *physical = virtual;
639 return ERROR_OK;
640 }
641
642 static int no_mmu(struct target *target, int *enabled)
643 {
644 *enabled = 0;
645 return ERROR_OK;
646 }
647
648 static int default_examine(struct target *target)
649 {
650 target_set_examined(target);
651 return ERROR_OK;
652 }
653
654 /* no check by default */
655 static int default_check_reset(struct target *target)
656 {
657 return ERROR_OK;
658 }
659
660 int target_examine_one(struct target *target)
661 {
662 return target->type->examine(target);
663 }
664
665 static int jtag_enable_callback(enum jtag_event event, void *priv)
666 {
667 struct target *target = priv;
668
669 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
670 return ERROR_OK;
671
672 jtag_unregister_event_callback(jtag_enable_callback, target);
673
674 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
675
676 int retval = target_examine_one(target);
677 if (retval != ERROR_OK)
678 return retval;
679
680 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
681
682 return retval;
683 }
684
685 /* Targets that correctly implement init + examine, i.e.
686 * no communication with target during init:
687 *
688 * XScale
689 */
690 int target_examine(void)
691 {
692 int retval = ERROR_OK;
693 struct target *target;
694
695 for (target = all_targets; target; target = target->next) {
696 /* defer examination, but don't skip it */
697 if (!target->tap->enabled) {
698 jtag_register_event_callback(jtag_enable_callback,
699 target);
700 continue;
701 }
702
703 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
704
705 retval = target_examine_one(target);
706 if (retval != ERROR_OK)
707 return retval;
708
709 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
710 }
711 return retval;
712 }
713
714 const char *target_type_name(struct target *target)
715 {
716 return target->type->name;
717 }
718
719 static int target_soft_reset_halt(struct target *target)
720 {
721 if (!target_was_examined(target)) {
722 LOG_ERROR("Target not examined yet");
723 return ERROR_FAIL;
724 }
725 if (!target->type->soft_reset_halt) {
726 LOG_ERROR("Target %s does not support soft_reset_halt",
727 target_name(target));
728 return ERROR_FAIL;
729 }
730 return target->type->soft_reset_halt(target);
731 }
732
733 /**
734 * Downloads a target-specific native code algorithm to the target,
735 * and executes it. * Note that some targets may need to set up, enable,
736 * and tear down a breakpoint (hard or * soft) to detect algorithm
737 * termination, while others may support lower overhead schemes where
738 * soft breakpoints embedded in the algorithm automatically terminate the
739 * algorithm.
740 *
741 * @param target used to run the algorithm
742 * @param arch_info target-specific description of the algorithm.
743 */
744 int target_run_algorithm(struct target *target,
745 int num_mem_params, struct mem_param *mem_params,
746 int num_reg_params, struct reg_param *reg_param,
747 uint32_t entry_point, uint32_t exit_point,
748 int timeout_ms, void *arch_info)
749 {
750 int retval = ERROR_FAIL;
751
752 if (!target_was_examined(target)) {
753 LOG_ERROR("Target not examined yet");
754 goto done;
755 }
756 if (!target->type->run_algorithm) {
757 LOG_ERROR("Target type '%s' does not support %s",
758 target_type_name(target), __func__);
759 goto done;
760 }
761
762 target->running_alg = true;
763 retval = target->type->run_algorithm(target,
764 num_mem_params, mem_params,
765 num_reg_params, reg_param,
766 entry_point, exit_point, timeout_ms, arch_info);
767 target->running_alg = false;
768
769 done:
770 return retval;
771 }
772
773 /**
774 * Downloads a target-specific native code algorithm to the target,
775 * executes and leaves it running.
776 *
777 * @param target used to run the algorithm
778 * @param arch_info target-specific description of the algorithm.
779 */
780 int target_start_algorithm(struct target *target,
781 int num_mem_params, struct mem_param *mem_params,
782 int num_reg_params, struct reg_param *reg_params,
783 uint32_t entry_point, uint32_t exit_point,
784 void *arch_info)
785 {
786 int retval = ERROR_FAIL;
787
788 if (!target_was_examined(target)) {
789 LOG_ERROR("Target not examined yet");
790 goto done;
791 }
792 if (!target->type->start_algorithm) {
793 LOG_ERROR("Target type '%s' does not support %s",
794 target_type_name(target), __func__);
795 goto done;
796 }
797 if (target->running_alg) {
798 LOG_ERROR("Target is already running an algorithm");
799 goto done;
800 }
801
802 target->running_alg = true;
803 retval = target->type->start_algorithm(target,
804 num_mem_params, mem_params,
805 num_reg_params, reg_params,
806 entry_point, exit_point, arch_info);
807
808 done:
809 return retval;
810 }
811
812 /**
813 * Waits for an algorithm started with target_start_algorithm() to complete.
814 *
815 * @param target used to run the algorithm
816 * @param arch_info target-specific description of the algorithm.
817 */
818 int target_wait_algorithm(struct target *target,
819 int num_mem_params, struct mem_param *mem_params,
820 int num_reg_params, struct reg_param *reg_params,
821 uint32_t exit_point, int timeout_ms,
822 void *arch_info)
823 {
824 int retval = ERROR_FAIL;
825
826 if (!target->type->wait_algorithm) {
827 LOG_ERROR("Target type '%s' does not support %s",
828 target_type_name(target), __func__);
829 goto done;
830 }
831 if (!target->running_alg) {
832 LOG_ERROR("Target is not running an algorithm");
833 goto done;
834 }
835
836 retval = target->type->wait_algorithm(target,
837 num_mem_params, mem_params,
838 num_reg_params, reg_params,
839 exit_point, timeout_ms, arch_info);
840 if (retval != ERROR_TARGET_TIMEOUT)
841 target->running_alg = false;
842
843 done:
844 return retval;
845 }
846
847 /**
848 * Executes a target-specific native code algorithm in the target.
849 * It differs from target_run_algorithm in that the algorithm is asynchronous.
850 * Because of this it requires an compliant algorithm:
851 * see contrib/loaders/flash/stm32f1x.S for example.
852 *
853 * @param target used to run the algorithm
854 */
855
856 int target_run_flash_async_algorithm(struct target *target,
857 const uint8_t *buffer, uint32_t count, int block_size,
858 int num_mem_params, struct mem_param *mem_params,
859 int num_reg_params, struct reg_param *reg_params,
860 uint32_t buffer_start, uint32_t buffer_size,
861 uint32_t entry_point, uint32_t exit_point, void *arch_info)
862 {
863 int retval;
864 int timeout = 0;
865
866 /* Set up working area. First word is write pointer, second word is read pointer,
867 * rest is fifo data area. */
868 uint32_t wp_addr = buffer_start;
869 uint32_t rp_addr = buffer_start + 4;
870 uint32_t fifo_start_addr = buffer_start + 8;
871 uint32_t fifo_end_addr = buffer_start + buffer_size;
872
873 uint32_t wp = fifo_start_addr;
874 uint32_t rp = fifo_start_addr;
875
876 /* validate block_size is 2^n */
877 assert(!block_size || !(block_size & (block_size - 1)));
878
879 retval = target_write_u32(target, wp_addr, wp);
880 if (retval != ERROR_OK)
881 return retval;
882 retval = target_write_u32(target, rp_addr, rp);
883 if (retval != ERROR_OK)
884 return retval;
885
886 /* Start up algorithm on target and let it idle while writing the first chunk */
887 retval = target_start_algorithm(target, num_mem_params, mem_params,
888 num_reg_params, reg_params,
889 entry_point,
890 exit_point,
891 arch_info);
892
893 if (retval != ERROR_OK) {
894 LOG_ERROR("error starting target flash write algorithm");
895 return retval;
896 }
897
898 while (count > 0) {
899
900 retval = target_read_u32(target, rp_addr, &rp);
901 if (retval != ERROR_OK) {
902 LOG_ERROR("failed to get read pointer");
903 break;
904 }
905
906 LOG_DEBUG("count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32, count, wp, rp);
907
908 if (rp == 0) {
909 LOG_ERROR("flash write algorithm aborted by target");
910 retval = ERROR_FLASH_OPERATION_FAILED;
911 break;
912 }
913
914 if ((rp & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
915 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
916 break;
917 }
918
919 /* Count the number of bytes available in the fifo without
920 * crossing the wrap around. Make sure to not fill it completely,
921 * because that would make wp == rp and that's the empty condition. */
922 uint32_t thisrun_bytes;
923 if (rp > wp)
924 thisrun_bytes = rp - wp - block_size;
925 else if (rp > fifo_start_addr)
926 thisrun_bytes = fifo_end_addr - wp;
927 else
928 thisrun_bytes = fifo_end_addr - wp - block_size;
929
930 if (thisrun_bytes == 0) {
931 /* Throttle polling a bit if transfer is (much) faster than flash
932 * programming. The exact delay shouldn't matter as long as it's
933 * less than buffer size / flash speed. This is very unlikely to
934 * run when using high latency connections such as USB. */
935 alive_sleep(10);
936
937 /* to stop an infinite loop on some targets check and increment a timeout
938 * this issue was observed on a stellaris using the new ICDI interface */
939 if (timeout++ >= 500) {
940 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
941 return ERROR_FLASH_OPERATION_FAILED;
942 }
943 continue;
944 }
945
946 /* reset our timeout */
947 timeout = 0;
948
949 /* Limit to the amount of data we actually want to write */
950 if (thisrun_bytes > count * block_size)
951 thisrun_bytes = count * block_size;
952
953 /* Write data to fifo */
954 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
955 if (retval != ERROR_OK)
956 break;
957
958 /* Update counters and wrap write pointer */
959 buffer += thisrun_bytes;
960 count -= thisrun_bytes / block_size;
961 wp += thisrun_bytes;
962 if (wp >= fifo_end_addr)
963 wp = fifo_start_addr;
964
965 /* Store updated write pointer to target */
966 retval = target_write_u32(target, wp_addr, wp);
967 if (retval != ERROR_OK)
968 break;
969 }
970
971 if (retval != ERROR_OK) {
972 /* abort flash write algorithm on target */
973 target_write_u32(target, wp_addr, 0);
974 }
975
976 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
977 num_reg_params, reg_params,
978 exit_point,
979 10000,
980 arch_info);
981
982 if (retval2 != ERROR_OK) {
983 LOG_ERROR("error waiting for target flash write algorithm");
984 retval = retval2;
985 }
986
987 return retval;
988 }
989
990 int target_read_memory(struct target *target,
991 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
992 {
993 if (!target_was_examined(target)) {
994 LOG_ERROR("Target not examined yet");
995 return ERROR_FAIL;
996 }
997 return target->type->read_memory(target, address, size, count, buffer);
998 }
999
1000 int target_read_phys_memory(struct target *target,
1001 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1002 {
1003 if (!target_was_examined(target)) {
1004 LOG_ERROR("Target not examined yet");
1005 return ERROR_FAIL;
1006 }
1007 return target->type->read_phys_memory(target, address, size, count, buffer);
1008 }
1009
1010 int target_write_memory(struct target *target,
1011 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1012 {
1013 if (!target_was_examined(target)) {
1014 LOG_ERROR("Target not examined yet");
1015 return ERROR_FAIL;
1016 }
1017 return target->type->write_memory(target, address, size, count, buffer);
1018 }
1019
1020 int target_write_phys_memory(struct target *target,
1021 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1022 {
1023 if (!target_was_examined(target)) {
1024 LOG_ERROR("Target not examined yet");
1025 return ERROR_FAIL;
1026 }
1027 return target->type->write_phys_memory(target, address, size, count, buffer);
1028 }
1029
1030 int target_add_breakpoint(struct target *target,
1031 struct breakpoint *breakpoint)
1032 {
1033 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1034 LOG_WARNING("target %s is not halted", target_name(target));
1035 return ERROR_TARGET_NOT_HALTED;
1036 }
1037 return target->type->add_breakpoint(target, breakpoint);
1038 }
1039
1040 int target_add_context_breakpoint(struct target *target,
1041 struct breakpoint *breakpoint)
1042 {
1043 if (target->state != TARGET_HALTED) {
1044 LOG_WARNING("target %s is not halted", target_name(target));
1045 return ERROR_TARGET_NOT_HALTED;
1046 }
1047 return target->type->add_context_breakpoint(target, breakpoint);
1048 }
1049
1050 int target_add_hybrid_breakpoint(struct target *target,
1051 struct breakpoint *breakpoint)
1052 {
1053 if (target->state != TARGET_HALTED) {
1054 LOG_WARNING("target %s is not halted", target_name(target));
1055 return ERROR_TARGET_NOT_HALTED;
1056 }
1057 return target->type->add_hybrid_breakpoint(target, breakpoint);
1058 }
1059
1060 int target_remove_breakpoint(struct target *target,
1061 struct breakpoint *breakpoint)
1062 {
1063 return target->type->remove_breakpoint(target, breakpoint);
1064 }
1065
1066 int target_add_watchpoint(struct target *target,
1067 struct watchpoint *watchpoint)
1068 {
1069 if (target->state != TARGET_HALTED) {
1070 LOG_WARNING("target %s is not halted", target_name(target));
1071 return ERROR_TARGET_NOT_HALTED;
1072 }
1073 return target->type->add_watchpoint(target, watchpoint);
1074 }
1075 int target_remove_watchpoint(struct target *target,
1076 struct watchpoint *watchpoint)
1077 {
1078 return target->type->remove_watchpoint(target, watchpoint);
1079 }
1080 int target_hit_watchpoint(struct target *target,
1081 struct watchpoint **hit_watchpoint)
1082 {
1083 if (target->state != TARGET_HALTED) {
1084 LOG_WARNING("target %s is not halted", target->cmd_name);
1085 return ERROR_TARGET_NOT_HALTED;
1086 }
1087
1088 if (target->type->hit_watchpoint == NULL) {
1089 /* For backward compatible, if hit_watchpoint is not implemented,
1090 * return ERROR_FAIL such that gdb_server will not take the nonsense
1091 * information. */
1092 return ERROR_FAIL;
1093 }
1094
1095 return target->type->hit_watchpoint(target, hit_watchpoint);
1096 }
1097
1098 int target_get_gdb_reg_list(struct target *target,
1099 struct reg **reg_list[], int *reg_list_size,
1100 enum target_register_class reg_class)
1101 {
1102 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1103 }
1104 int target_step(struct target *target,
1105 int current, uint32_t address, int handle_breakpoints)
1106 {
1107 return target->type->step(target, current, address, handle_breakpoints);
1108 }
1109
1110 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1111 {
1112 if (target->state != TARGET_HALTED) {
1113 LOG_WARNING("target %s is not halted", target->cmd_name);
1114 return ERROR_TARGET_NOT_HALTED;
1115 }
1116 return target->type->get_gdb_fileio_info(target, fileio_info);
1117 }
1118
1119 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1120 {
1121 if (target->state != TARGET_HALTED) {
1122 LOG_WARNING("target %s is not halted", target->cmd_name);
1123 return ERROR_TARGET_NOT_HALTED;
1124 }
1125 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1126 }
1127
1128 int target_profiling(struct target *target, uint32_t *samples,
1129 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1130 {
1131 if (target->state != TARGET_HALTED) {
1132 LOG_WARNING("target %s is not halted", target->cmd_name);
1133 return ERROR_TARGET_NOT_HALTED;
1134 }
1135 return target->type->profiling(target, samples, max_num_samples,
1136 num_samples, seconds);
1137 }
1138
1139 /**
1140 * Reset the @c examined flag for the given target.
1141 * Pure paranoia -- targets are zeroed on allocation.
1142 */
1143 static void target_reset_examined(struct target *target)
1144 {
1145 target->examined = false;
1146 }
1147
1148 static int err_read_phys_memory(struct target *target, uint32_t address,
1149 uint32_t size, uint32_t count, uint8_t *buffer)
1150 {
1151 LOG_ERROR("Not implemented: %s", __func__);
1152 return ERROR_FAIL;
1153 }
1154
1155 static int err_write_phys_memory(struct target *target, uint32_t address,
1156 uint32_t size, uint32_t count, const uint8_t *buffer)
1157 {
1158 LOG_ERROR("Not implemented: %s", __func__);
1159 return ERROR_FAIL;
1160 }
1161
1162 static int handle_target(void *priv);
1163
1164 static int target_init_one(struct command_context *cmd_ctx,
1165 struct target *target)
1166 {
1167 target_reset_examined(target);
1168
1169 struct target_type *type = target->type;
1170 if (type->examine == NULL)
1171 type->examine = default_examine;
1172
1173 if (type->check_reset == NULL)
1174 type->check_reset = default_check_reset;
1175
1176 assert(type->init_target != NULL);
1177
1178 int retval = type->init_target(cmd_ctx, target);
1179 if (ERROR_OK != retval) {
1180 LOG_ERROR("target '%s' init failed", target_name(target));
1181 return retval;
1182 }
1183
1184 /* Sanity-check MMU support ... stub in what we must, to help
1185 * implement it in stages, but warn if we need to do so.
1186 */
1187 if (type->mmu) {
1188 if (type->write_phys_memory == NULL) {
1189 LOG_ERROR("type '%s' is missing write_phys_memory",
1190 type->name);
1191 type->write_phys_memory = err_write_phys_memory;
1192 }
1193 if (type->read_phys_memory == NULL) {
1194 LOG_ERROR("type '%s' is missing read_phys_memory",
1195 type->name);
1196 type->read_phys_memory = err_read_phys_memory;
1197 }
1198 if (type->virt2phys == NULL) {
1199 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1200 type->virt2phys = identity_virt2phys;
1201 }
1202 } else {
1203 /* Make sure no-MMU targets all behave the same: make no
1204 * distinction between physical and virtual addresses, and
1205 * ensure that virt2phys() is always an identity mapping.
1206 */
1207 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1208 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1209
1210 type->mmu = no_mmu;
1211 type->write_phys_memory = type->write_memory;
1212 type->read_phys_memory = type->read_memory;
1213 type->virt2phys = identity_virt2phys;
1214 }
1215
1216 if (target->type->read_buffer == NULL)
1217 target->type->read_buffer = target_read_buffer_default;
1218
1219 if (target->type->write_buffer == NULL)
1220 target->type->write_buffer = target_write_buffer_default;
1221
1222 if (target->type->get_gdb_fileio_info == NULL)
1223 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1224
1225 if (target->type->gdb_fileio_end == NULL)
1226 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1227
1228 if (target->type->profiling == NULL)
1229 target->type->profiling = target_profiling_default;
1230
1231 return ERROR_OK;
1232 }
1233
1234 static int target_init(struct command_context *cmd_ctx)
1235 {
1236 struct target *target;
1237 int retval;
1238
1239 for (target = all_targets; target; target = target->next) {
1240 retval = target_init_one(cmd_ctx, target);
1241 if (ERROR_OK != retval)
1242 return retval;
1243 }
1244
1245 if (!all_targets)
1246 return ERROR_OK;
1247
1248 retval = target_register_user_commands(cmd_ctx);
1249 if (ERROR_OK != retval)
1250 return retval;
1251
1252 retval = target_register_timer_callback(&handle_target,
1253 polling_interval, 1, cmd_ctx->interp);
1254 if (ERROR_OK != retval)
1255 return retval;
1256
1257 return ERROR_OK;
1258 }
1259
1260 COMMAND_HANDLER(handle_target_init_command)
1261 {
1262 int retval;
1263
1264 if (CMD_ARGC != 0)
1265 return ERROR_COMMAND_SYNTAX_ERROR;
1266
1267 static bool target_initialized;
1268 if (target_initialized) {
1269 LOG_INFO("'target init' has already been called");
1270 return ERROR_OK;
1271 }
1272 target_initialized = true;
1273
1274 retval = command_run_line(CMD_CTX, "init_targets");
1275 if (ERROR_OK != retval)
1276 return retval;
1277
1278 retval = command_run_line(CMD_CTX, "init_target_events");
1279 if (ERROR_OK != retval)
1280 return retval;
1281
1282 retval = command_run_line(CMD_CTX, "init_board");
1283 if (ERROR_OK != retval)
1284 return retval;
1285
1286 LOG_DEBUG("Initializing targets...");
1287 return target_init(CMD_CTX);
1288 }
1289
1290 int target_register_event_callback(int (*callback)(struct target *target,
1291 enum target_event event, void *priv), void *priv)
1292 {
1293 struct target_event_callback **callbacks_p = &target_event_callbacks;
1294
1295 if (callback == NULL)
1296 return ERROR_COMMAND_SYNTAX_ERROR;
1297
1298 if (*callbacks_p) {
1299 while ((*callbacks_p)->next)
1300 callbacks_p = &((*callbacks_p)->next);
1301 callbacks_p = &((*callbacks_p)->next);
1302 }
1303
1304 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1305 (*callbacks_p)->callback = callback;
1306 (*callbacks_p)->priv = priv;
1307 (*callbacks_p)->next = NULL;
1308
1309 return ERROR_OK;
1310 }
1311
1312 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1313 {
1314 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1315 struct timeval now;
1316
1317 if (callback == NULL)
1318 return ERROR_COMMAND_SYNTAX_ERROR;
1319
1320 if (*callbacks_p) {
1321 while ((*callbacks_p)->next)
1322 callbacks_p = &((*callbacks_p)->next);
1323 callbacks_p = &((*callbacks_p)->next);
1324 }
1325
1326 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1327 (*callbacks_p)->callback = callback;
1328 (*callbacks_p)->periodic = periodic;
1329 (*callbacks_p)->time_ms = time_ms;
1330
1331 gettimeofday(&now, NULL);
1332 (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
1333 time_ms -= (time_ms % 1000);
1334 (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
1335 if ((*callbacks_p)->when.tv_usec > 1000000) {
1336 (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
1337 (*callbacks_p)->when.tv_sec += 1;
1338 }
1339
1340 (*callbacks_p)->priv = priv;
1341 (*callbacks_p)->next = NULL;
1342
1343 return ERROR_OK;
1344 }
1345
1346 int target_unregister_event_callback(int (*callback)(struct target *target,
1347 enum target_event event, void *priv), void *priv)
1348 {
1349 struct target_event_callback **p = &target_event_callbacks;
1350 struct target_event_callback *c = target_event_callbacks;
1351
1352 if (callback == NULL)
1353 return ERROR_COMMAND_SYNTAX_ERROR;
1354
1355 while (c) {
1356 struct target_event_callback *next = c->next;
1357 if ((c->callback == callback) && (c->priv == priv)) {
1358 *p = next;
1359 free(c);
1360 return ERROR_OK;
1361 } else
1362 p = &(c->next);
1363 c = next;
1364 }
1365
1366 return ERROR_OK;
1367 }
1368
1369 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1370 {
1371 struct target_timer_callback **p = &target_timer_callbacks;
1372 struct target_timer_callback *c = target_timer_callbacks;
1373
1374 if (callback == NULL)
1375 return ERROR_COMMAND_SYNTAX_ERROR;
1376
1377 while (c) {
1378 struct target_timer_callback *next = c->next;
1379 if ((c->callback == callback) && (c->priv == priv)) {
1380 *p = next;
1381 free(c);
1382 return ERROR_OK;
1383 } else
1384 p = &(c->next);
1385 c = next;
1386 }
1387
1388 return ERROR_OK;
1389 }
1390
1391 int target_call_event_callbacks(struct target *target, enum target_event event)
1392 {
1393 struct target_event_callback *callback = target_event_callbacks;
1394 struct target_event_callback *next_callback;
1395
1396 if (event == TARGET_EVENT_HALTED) {
1397 /* execute early halted first */
1398 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1399 }
1400
1401 LOG_DEBUG("target event %i (%s)", event,
1402 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1403
1404 target_handle_event(target, event);
1405
1406 while (callback) {
1407 next_callback = callback->next;
1408 callback->callback(target, event, callback->priv);
1409 callback = next_callback;
1410 }
1411
1412 return ERROR_OK;
1413 }
1414
1415 static int target_timer_callback_periodic_restart(
1416 struct target_timer_callback *cb, struct timeval *now)
1417 {
1418 int time_ms = cb->time_ms;
1419 cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
1420 time_ms -= (time_ms % 1000);
1421 cb->when.tv_sec = now->tv_sec + time_ms / 1000;
1422 if (cb->when.tv_usec > 1000000) {
1423 cb->when.tv_usec = cb->when.tv_usec - 1000000;
1424 cb->when.tv_sec += 1;
1425 }
1426 return ERROR_OK;
1427 }
1428
1429 static int target_call_timer_callback(struct target_timer_callback *cb,
1430 struct timeval *now)
1431 {
1432 cb->callback(cb->priv);
1433
1434 if (cb->periodic)
1435 return target_timer_callback_periodic_restart(cb, now);
1436
1437 return target_unregister_timer_callback(cb->callback, cb->priv);
1438 }
1439
1440 static int target_call_timer_callbacks_check_time(int checktime)
1441 {
1442 keep_alive();
1443
1444 struct timeval now;
1445 gettimeofday(&now, NULL);
1446
1447 struct target_timer_callback *callback = target_timer_callbacks;
1448 while (callback) {
1449 /* cleaning up may unregister and free this callback */
1450 struct target_timer_callback *next_callback = callback->next;
1451
1452 bool call_it = callback->callback &&
1453 ((!checktime && callback->periodic) ||
1454 now.tv_sec > callback->when.tv_sec ||
1455 (now.tv_sec == callback->when.tv_sec &&
1456 now.tv_usec >= callback->when.tv_usec));
1457
1458 if (call_it) {
1459 int retval = target_call_timer_callback(callback, &now);
1460 if (retval != ERROR_OK)
1461 return retval;
1462 }
1463
1464 callback = next_callback;
1465 }
1466
1467 return ERROR_OK;
1468 }
1469
1470 int target_call_timer_callbacks(void)
1471 {
1472 return target_call_timer_callbacks_check_time(1);
1473 }
1474
1475 /* invoke periodic callbacks immediately */
1476 int target_call_timer_callbacks_now(void)
1477 {
1478 return target_call_timer_callbacks_check_time(0);
1479 }
1480
1481 /* Prints the working area layout for debug purposes */
1482 static void print_wa_layout(struct target *target)
1483 {
1484 struct working_area *c = target->working_areas;
1485
1486 while (c) {
1487 LOG_DEBUG("%c%c 0x%08"PRIx32"-0x%08"PRIx32" (%"PRIu32" bytes)",
1488 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1489 c->address, c->address + c->size - 1, c->size);
1490 c = c->next;
1491 }
1492 }
1493
1494 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1495 static void target_split_working_area(struct working_area *area, uint32_t size)
1496 {
1497 assert(area->free); /* Shouldn't split an allocated area */
1498 assert(size <= area->size); /* Caller should guarantee this */
1499
1500 /* Split only if not already the right size */
1501 if (size < area->size) {
1502 struct working_area *new_wa = malloc(sizeof(*new_wa));
1503
1504 if (new_wa == NULL)
1505 return;
1506
1507 new_wa->next = area->next;
1508 new_wa->size = area->size - size;
1509 new_wa->address = area->address + size;
1510 new_wa->backup = NULL;
1511 new_wa->user = NULL;
1512 new_wa->free = true;
1513
1514 area->next = new_wa;
1515 area->size = size;
1516
1517 /* If backup memory was allocated to this area, it has the wrong size
1518 * now so free it and it will be reallocated if/when needed */
1519 if (area->backup) {
1520 free(area->backup);
1521 area->backup = NULL;
1522 }
1523 }
1524 }
1525
1526 /* Merge all adjacent free areas into one */
1527 static void target_merge_working_areas(struct target *target)
1528 {
1529 struct working_area *c = target->working_areas;
1530
1531 while (c && c->next) {
1532 assert(c->next->address == c->address + c->size); /* This is an invariant */
1533
1534 /* Find two adjacent free areas */
1535 if (c->free && c->next->free) {
1536 /* Merge the last into the first */
1537 c->size += c->next->size;
1538
1539 /* Remove the last */
1540 struct working_area *to_be_freed = c->next;
1541 c->next = c->next->next;
1542 if (to_be_freed->backup)
1543 free(to_be_freed->backup);
1544 free(to_be_freed);
1545
1546 /* If backup memory was allocated to the remaining area, it's has
1547 * the wrong size now */
1548 if (c->backup) {
1549 free(c->backup);
1550 c->backup = NULL;
1551 }
1552 } else {
1553 c = c->next;
1554 }
1555 }
1556 }
1557
1558 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1559 {
1560 /* Reevaluate working area address based on MMU state*/
1561 if (target->working_areas == NULL) {
1562 int retval;
1563 int enabled;
1564
1565 retval = target->type->mmu(target, &enabled);
1566 if (retval != ERROR_OK)
1567 return retval;
1568
1569 if (!enabled) {
1570 if (target->working_area_phys_spec) {
1571 LOG_DEBUG("MMU disabled, using physical "
1572 "address for working memory 0x%08"PRIx32,
1573 target->working_area_phys);
1574 target->working_area = target->working_area_phys;
1575 } else {
1576 LOG_ERROR("No working memory available. "
1577 "Specify -work-area-phys to target.");
1578 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1579 }
1580 } else {
1581 if (target->working_area_virt_spec) {
1582 LOG_DEBUG("MMU enabled, using virtual "
1583 "address for working memory 0x%08"PRIx32,
1584 target->working_area_virt);
1585 target->working_area = target->working_area_virt;
1586 } else {
1587 LOG_ERROR("No working memory available. "
1588 "Specify -work-area-virt to target.");
1589 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1590 }
1591 }
1592
1593 /* Set up initial working area on first call */
1594 struct working_area *new_wa = malloc(sizeof(*new_wa));
1595 if (new_wa) {
1596 new_wa->next = NULL;
1597 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1598 new_wa->address = target->working_area;
1599 new_wa->backup = NULL;
1600 new_wa->user = NULL;
1601 new_wa->free = true;
1602 }
1603
1604 target->working_areas = new_wa;
1605 }
1606
1607 /* only allocate multiples of 4 byte */
1608 if (size % 4)
1609 size = (size + 3) & (~3UL);
1610
1611 struct working_area *c = target->working_areas;
1612
1613 /* Find the first large enough working area */
1614 while (c) {
1615 if (c->free && c->size >= size)
1616 break;
1617 c = c->next;
1618 }
1619
1620 if (c == NULL)
1621 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1622
1623 /* Split the working area into the requested size */
1624 target_split_working_area(c, size);
1625
1626 LOG_DEBUG("allocated new working area of %"PRIu32" bytes at address 0x%08"PRIx32, size, c->address);
1627
1628 if (target->backup_working_area) {
1629 if (c->backup == NULL) {
1630 c->backup = malloc(c->size);
1631 if (c->backup == NULL)
1632 return ERROR_FAIL;
1633 }
1634
1635 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1636 if (retval != ERROR_OK)
1637 return retval;
1638 }
1639
1640 /* mark as used, and return the new (reused) area */
1641 c->free = false;
1642 *area = c;
1643
1644 /* user pointer */
1645 c->user = area;
1646
1647 print_wa_layout(target);
1648
1649 return ERROR_OK;
1650 }
1651
1652 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1653 {
1654 int retval;
1655
1656 retval = target_alloc_working_area_try(target, size, area);
1657 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1658 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1659 return retval;
1660
1661 }
1662
1663 static int target_restore_working_area(struct target *target, struct working_area *area)
1664 {
1665 int retval = ERROR_OK;
1666
1667 if (target->backup_working_area && area->backup != NULL) {
1668 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1669 if (retval != ERROR_OK)
1670 LOG_ERROR("failed to restore %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1671 area->size, area->address);
1672 }
1673
1674 return retval;
1675 }
1676
1677 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1678 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1679 {
1680 int retval = ERROR_OK;
1681
1682 if (area->free)
1683 return retval;
1684
1685 if (restore) {
1686 retval = target_restore_working_area(target, area);
1687 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1688 if (retval != ERROR_OK)
1689 return retval;
1690 }
1691
1692 area->free = true;
1693
1694 LOG_DEBUG("freed %"PRIu32" bytes of working area at address 0x%08"PRIx32,
1695 area->size, area->address);
1696
1697 /* mark user pointer invalid */
1698 /* TODO: Is this really safe? It points to some previous caller's memory.
1699 * How could we know that the area pointer is still in that place and not
1700 * some other vital data? What's the purpose of this, anyway? */
1701 *area->user = NULL;
1702 area->user = NULL;
1703
1704 target_merge_working_areas(target);
1705
1706 print_wa_layout(target);
1707
1708 return retval;
1709 }
1710
1711 int target_free_working_area(struct target *target, struct working_area *area)
1712 {
1713 return target_free_working_area_restore(target, area, 1);
1714 }
1715
1716 /* free resources and restore memory, if restoring memory fails,
1717 * free up resources anyway
1718 */
1719 static void target_free_all_working_areas_restore(struct target *target, int restore)
1720 {
1721 struct working_area *c = target->working_areas;
1722
1723 LOG_DEBUG("freeing all working areas");
1724
1725 /* Loop through all areas, restoring the allocated ones and marking them as free */
1726 while (c) {
1727 if (!c->free) {
1728 if (restore)
1729 target_restore_working_area(target, c);
1730 c->free = true;
1731 *c->user = NULL; /* Same as above */
1732 c->user = NULL;
1733 }
1734 c = c->next;
1735 }
1736
1737 /* Run a merge pass to combine all areas into one */
1738 target_merge_working_areas(target);
1739
1740 print_wa_layout(target);
1741 }
1742
1743 void target_free_all_working_areas(struct target *target)
1744 {
1745 target_free_all_working_areas_restore(target, 1);
1746 }
1747
1748 /* Find the largest number of bytes that can be allocated */
1749 uint32_t target_get_working_area_avail(struct target *target)
1750 {
1751 struct working_area *c = target->working_areas;
1752 uint32_t max_size = 0;
1753
1754 if (c == NULL)
1755 return target->working_area_size;
1756
1757 while (c) {
1758 if (c->free && max_size < c->size)
1759 max_size = c->size;
1760
1761 c = c->next;
1762 }
1763
1764 return max_size;
1765 }
1766
1767 int target_arch_state(struct target *target)
1768 {
1769 int retval;
1770 if (target == NULL) {
1771 LOG_USER("No target has been configured");
1772 return ERROR_OK;
1773 }
1774
1775 LOG_USER("target state: %s", target_state_name(target));
1776
1777 if (target->state != TARGET_HALTED)
1778 return ERROR_OK;
1779
1780 retval = target->type->arch_state(target);
1781 return retval;
1782 }
1783
1784 static int target_get_gdb_fileio_info_default(struct target *target,
1785 struct gdb_fileio_info *fileio_info)
1786 {
1787 /* If target does not support semi-hosting function, target
1788 has no need to provide .get_gdb_fileio_info callback.
1789 It just return ERROR_FAIL and gdb_server will return "Txx"
1790 as target halted every time. */
1791 return ERROR_FAIL;
1792 }
1793
1794 static int target_gdb_fileio_end_default(struct target *target,
1795 int retcode, int fileio_errno, bool ctrl_c)
1796 {
1797 return ERROR_OK;
1798 }
1799
1800 static int target_profiling_default(struct target *target, uint32_t *samples,
1801 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1802 {
1803 struct timeval timeout, now;
1804
1805 gettimeofday(&timeout, NULL);
1806 timeval_add_time(&timeout, seconds, 0);
1807
1808 LOG_INFO("Starting profiling. Halting and resuming the"
1809 " target as often as we can...");
1810
1811 uint32_t sample_count = 0;
1812 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
1813 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
1814
1815 int retval = ERROR_OK;
1816 for (;;) {
1817 target_poll(target);
1818 if (target->state == TARGET_HALTED) {
1819 uint32_t t = *((uint32_t *)reg->value);
1820 samples[sample_count++] = t;
1821 /* current pc, addr = 0, do not handle breakpoints, not debugging */
1822 retval = target_resume(target, 1, 0, 0, 0);
1823 target_poll(target);
1824 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
1825 } else if (target->state == TARGET_RUNNING) {
1826 /* We want to quickly sample the PC. */
1827 retval = target_halt(target);
1828 } else {
1829 LOG_INFO("Target not halted or running");
1830 retval = ERROR_OK;
1831 break;
1832 }
1833
1834 if (retval != ERROR_OK)
1835 break;
1836
1837 gettimeofday(&now, NULL);
1838 if ((sample_count >= max_num_samples) ||
1839 ((now.tv_sec >= timeout.tv_sec) && (now.tv_usec >= timeout.tv_usec))) {
1840 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
1841 break;
1842 }
1843 }
1844
1845 *num_samples = sample_count;
1846 return retval;
1847 }
1848
1849 /* Single aligned words are guaranteed to use 16 or 32 bit access
1850 * mode respectively, otherwise data is handled as quickly as
1851 * possible
1852 */
1853 int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer)
1854 {
1855 LOG_DEBUG("writing buffer of %i byte at 0x%8.8x",
1856 (int)size, (unsigned)address);
1857
1858 if (!target_was_examined(target)) {
1859 LOG_ERROR("Target not examined yet");
1860 return ERROR_FAIL;
1861 }
1862
1863 if (size == 0)
1864 return ERROR_OK;
1865
1866 if ((address + size - 1) < address) {
1867 /* GDB can request this when e.g. PC is 0xfffffffc*/
1868 LOG_ERROR("address + size wrapped(0x%08x, 0x%08x)",
1869 (unsigned)address,
1870 (unsigned)size);
1871 return ERROR_FAIL;
1872 }
1873
1874 return target->type->write_buffer(target, address, size, buffer);
1875 }
1876
1877 static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t count, const uint8_t *buffer)
1878 {
1879 uint32_t size;
1880
1881 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
1882 * will have something to do with the size we leave to it. */
1883 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
1884 if (address & size) {
1885 int retval = target_write_memory(target, address, size, 1, buffer);
1886 if (retval != ERROR_OK)
1887 return retval;
1888 address += size;
1889 count -= size;
1890 buffer += size;
1891 }
1892 }
1893
1894 /* Write the data with as large access size as possible. */
1895 for (; size > 0; size /= 2) {
1896 uint32_t aligned = count - count % size;
1897 if (aligned > 0) {
1898 int retval = target_write_memory(target, address, size, aligned / size, buffer);
1899 if (retval != ERROR_OK)
1900 return retval;
1901 address += aligned;
1902 count -= aligned;
1903 buffer += aligned;
1904 }
1905 }
1906
1907 return ERROR_OK;
1908 }
1909
1910 /* Single aligned words are guaranteed to use 16 or 32 bit access
1911 * mode respectively, otherwise data is handled as quickly as
1912 * possible
1913 */
1914 int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer)
1915 {
1916 LOG_DEBUG("reading buffer of %i byte at 0x%8.8x",
1917 (int)size, (unsigned)address);
1918
1919 if (!target_was_examined(target)) {
1920 LOG_ERROR("Target not examined yet");
1921 return ERROR_FAIL;
1922 }
1923
1924 if (size == 0)
1925 return ERROR_OK;
1926
1927 if ((address + size - 1) < address) {
1928 /* GDB can request this when e.g. PC is 0xfffffffc*/
1929 LOG_ERROR("address + size wrapped(0x%08" PRIx32 ", 0x%08" PRIx32 ")",
1930 address,
1931 size);
1932 return ERROR_FAIL;
1933 }
1934
1935 return target->type->read_buffer(target, address, size, buffer);
1936 }
1937
1938 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t count, uint8_t *buffer)
1939 {
1940 uint32_t size;
1941
1942 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
1943 * will have something to do with the size we leave to it. */
1944 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
1945 if (address & size) {
1946 int retval = target_read_memory(target, address, size, 1, buffer);
1947 if (retval != ERROR_OK)
1948 return retval;
1949 address += size;
1950 count -= size;
1951 buffer += size;
1952 }
1953 }
1954
1955 /* Read the data with as large access size as possible. */
1956 for (; size > 0; size /= 2) {
1957 uint32_t aligned = count - count % size;
1958 if (aligned > 0) {
1959 int retval = target_read_memory(target, address, size, aligned / size, buffer);
1960 if (retval != ERROR_OK)
1961 return retval;
1962 address += aligned;
1963 count -= aligned;
1964 buffer += aligned;
1965 }
1966 }
1967
1968 return ERROR_OK;
1969 }
1970
1971 int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc)
1972 {
1973 uint8_t *buffer;
1974 int retval;
1975 uint32_t i;
1976 uint32_t checksum = 0;
1977 if (!target_was_examined(target)) {
1978 LOG_ERROR("Target not examined yet");
1979 return ERROR_FAIL;
1980 }
1981
1982 retval = target->type->checksum_memory(target, address, size, &checksum);
1983 if (retval != ERROR_OK) {
1984 buffer = malloc(size);
1985 if (buffer == NULL) {
1986 LOG_ERROR("error allocating buffer for section (%d bytes)", (int)size);
1987 return ERROR_COMMAND_SYNTAX_ERROR;
1988 }
1989 retval = target_read_buffer(target, address, size, buffer);
1990 if (retval != ERROR_OK) {
1991 free(buffer);
1992 return retval;
1993 }
1994
1995 /* convert to target endianness */
1996 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
1997 uint32_t target_data;
1998 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
1999 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2000 }
2001
2002 retval = image_calculate_checksum(buffer, size, &checksum);
2003 free(buffer);
2004 }
2005
2006 *crc = checksum;
2007
2008 return retval;
2009 }
2010
2011 int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank)
2012 {
2013 int retval;
2014 if (!target_was_examined(target)) {
2015 LOG_ERROR("Target not examined yet");
2016 return ERROR_FAIL;
2017 }
2018
2019 if (target->type->blank_check_memory == 0)
2020 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2021
2022 retval = target->type->blank_check_memory(target, address, size, blank);
2023
2024 return retval;
2025 }
2026
2027 int target_read_u64(struct target *target, uint64_t address, uint64_t *value)
2028 {
2029 uint8_t value_buf[8];
2030 if (!target_was_examined(target)) {
2031 LOG_ERROR("Target not examined yet");
2032 return ERROR_FAIL;
2033 }
2034
2035 int retval = target_read_memory(target, address, 8, 1, value_buf);
2036
2037 if (retval == ERROR_OK) {
2038 *value = target_buffer_get_u64(target, value_buf);
2039 LOG_DEBUG("address: 0x%" PRIx64 ", value: 0x%16.16" PRIx64 "",
2040 address,
2041 *value);
2042 } else {
2043 *value = 0x0;
2044 LOG_DEBUG("address: 0x%" PRIx64 " failed",
2045 address);
2046 }
2047
2048 return retval;
2049 }
2050
2051 int target_read_u32(struct target *target, uint32_t address, uint32_t *value)
2052 {
2053 uint8_t value_buf[4];
2054 if (!target_was_examined(target)) {
2055 LOG_ERROR("Target not examined yet");
2056 return ERROR_FAIL;
2057 }
2058
2059 int retval = target_read_memory(target, address, 4, 1, value_buf);
2060
2061 if (retval == ERROR_OK) {
2062 *value = target_buffer_get_u32(target, value_buf);
2063 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
2064 address,
2065 *value);
2066 } else {
2067 *value = 0x0;
2068 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
2069 address);
2070 }
2071
2072 return retval;
2073 }
2074
2075 int target_read_u16(struct target *target, uint32_t address, uint16_t *value)
2076 {
2077 uint8_t value_buf[2];
2078 if (!target_was_examined(target)) {
2079 LOG_ERROR("Target not examined yet");
2080 return ERROR_FAIL;
2081 }
2082
2083 int retval = target_read_memory(target, address, 2, 1, value_buf);
2084
2085 if (retval == ERROR_OK) {
2086 *value = target_buffer_get_u16(target, value_buf);
2087 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4x",
2088 address,
2089 *value);
2090 } else {
2091 *value = 0x0;
2092 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
2093 address);
2094 }
2095
2096 return retval;
2097 }
2098
2099 int target_read_u8(struct target *target, uint32_t address, uint8_t *value)
2100 {
2101 if (!target_was_examined(target)) {
2102 LOG_ERROR("Target not examined yet");
2103 return ERROR_FAIL;
2104 }
2105
2106 int retval = target_read_memory(target, address, 1, 1, value);
2107
2108 if (retval == ERROR_OK) {
2109 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
2110 address,
2111 *value);
2112 } else {
2113 *value = 0x0;
2114 LOG_DEBUG("address: 0x%8.8" PRIx32 " failed",
2115 address);
2116 }
2117
2118 return retval;
2119 }
2120
2121 int target_write_u64(struct target *target, uint64_t address, uint64_t value)
2122 {
2123 int retval;
2124 uint8_t value_buf[8];
2125 if (!target_was_examined(target)) {
2126 LOG_ERROR("Target not examined yet");
2127 return ERROR_FAIL;
2128 }
2129
2130 LOG_DEBUG("address: 0x%" PRIx64 ", value: 0x%16.16" PRIx64 "",
2131 address,
2132 value);
2133
2134 target_buffer_set_u64(target, value_buf, value);
2135 retval = target_write_memory(target, address, 8, 1, value_buf);
2136 if (retval != ERROR_OK)
2137 LOG_DEBUG("failed: %i", retval);
2138
2139 return retval;
2140 }
2141
2142 int target_write_u32(struct target *target, uint32_t address, uint32_t value)
2143 {
2144 int retval;
2145 uint8_t value_buf[4];
2146 if (!target_was_examined(target)) {
2147 LOG_ERROR("Target not examined yet");
2148 return ERROR_FAIL;
2149 }
2150
2151 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "",
2152 address,
2153 value);
2154
2155 target_buffer_set_u32(target, value_buf, value);
2156 retval = target_write_memory(target, address, 4, 1, value_buf);
2157 if (retval != ERROR_OK)
2158 LOG_DEBUG("failed: %i", retval);
2159
2160 return retval;
2161 }
2162
2163 int target_write_u16(struct target *target, uint32_t address, uint16_t value)
2164 {
2165 int retval;
2166 uint8_t value_buf[2];
2167 if (!target_was_examined(target)) {
2168 LOG_ERROR("Target not examined yet");
2169 return ERROR_FAIL;
2170 }
2171
2172 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8x",
2173 address,
2174 value);
2175
2176 target_buffer_set_u16(target, value_buf, value);
2177 retval = target_write_memory(target, address, 2, 1, value_buf);
2178 if (retval != ERROR_OK)
2179 LOG_DEBUG("failed: %i", retval);
2180
2181 return retval;
2182 }
2183
2184 int target_write_u8(struct target *target, uint32_t address, uint8_t value)
2185 {
2186 int retval;
2187 if (!target_was_examined(target)) {
2188 LOG_ERROR("Target not examined yet");
2189 return ERROR_FAIL;
2190 }
2191
2192 LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2x",
2193 address, value);
2194
2195 retval = target_write_memory(target, address, 1, 1, &value);
2196 if (retval != ERROR_OK)
2197 LOG_DEBUG("failed: %i", retval);
2198
2199 return retval;
2200 }
2201
2202 static int find_target(struct command_context *cmd_ctx, const char *name)
2203 {
2204 struct target *target = get_target(name);
2205 if (target == NULL) {
2206 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2207 return ERROR_FAIL;
2208 }
2209 if (!target->tap->enabled) {
2210 LOG_USER("Target: TAP %s is disabled, "
2211 "can't be the current target\n",
2212 target->tap->dotted_name);
2213 return ERROR_FAIL;
2214 }
2215
2216 cmd_ctx->current_target = target->target_number;
2217 return ERROR_OK;
2218 }
2219
2220
2221 COMMAND_HANDLER(handle_targets_command)
2222 {
2223 int retval = ERROR_OK;
2224 if (CMD_ARGC == 1) {
2225 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2226 if (retval == ERROR_OK) {
2227 /* we're done! */
2228 return retval;
2229 }
2230 }
2231
2232 struct target *target = all_targets;
2233 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2234 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2235 while (target) {
2236 const char *state;
2237 char marker = ' ';
2238
2239 if (target->tap->enabled)
2240 state = target_state_name(target);
2241 else
2242 state = "tap-disabled";
2243
2244 if (CMD_CTX->current_target == target->target_number)
2245 marker = '*';
2246
2247 /* keep columns lined up to match the headers above */
2248 command_print(CMD_CTX,
2249 "%2d%c %-18s %-10s %-6s %-18s %s",
2250 target->target_number,
2251 marker,
2252 target_name(target),
2253 target_type_name(target),
2254 Jim_Nvp_value2name_simple(nvp_target_endian,
2255 target->endianness)->name,
2256 target->tap->dotted_name,
2257 state);
2258 target = target->next;
2259 }
2260
2261 return retval;
2262 }
2263
2264 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2265
2266 static int powerDropout;
2267 static int srstAsserted;
2268
2269 static int runPowerRestore;
2270 static int runPowerDropout;
2271 static int runSrstAsserted;
2272 static int runSrstDeasserted;
2273
2274 static int sense_handler(void)
2275 {
2276 static int prevSrstAsserted;
2277 static int prevPowerdropout;
2278
2279 int retval = jtag_power_dropout(&powerDropout);
2280 if (retval != ERROR_OK)
2281 return retval;
2282
2283 int powerRestored;
2284 powerRestored = prevPowerdropout && !powerDropout;
2285 if (powerRestored)
2286 runPowerRestore = 1;
2287
2288 long long current = timeval_ms();
2289 static long long lastPower;
2290 int waitMore = lastPower + 2000 > current;
2291 if (powerDropout && !waitMore) {
2292 runPowerDropout = 1;
2293 lastPower = current;
2294 }
2295
2296 retval = jtag_srst_asserted(&srstAsserted);
2297 if (retval != ERROR_OK)
2298 return retval;
2299
2300 int srstDeasserted;
2301 srstDeasserted = prevSrstAsserted && !srstAsserted;
2302
2303 static long long lastSrst;
2304 waitMore = lastSrst + 2000 > current;
2305 if (srstDeasserted && !waitMore) {
2306 runSrstDeasserted = 1;
2307 lastSrst = current;
2308 }
2309
2310 if (!prevSrstAsserted && srstAsserted)
2311 runSrstAsserted = 1;
2312
2313 prevSrstAsserted = srstAsserted;
2314 prevPowerdropout = powerDropout;
2315
2316 if (srstDeasserted || powerRestored) {
2317 /* Other than logging the event we can't do anything here.
2318 * Issuing a reset is a particularly bad idea as we might
2319 * be inside a reset already.
2320 */
2321 }
2322
2323 return ERROR_OK;
2324 }
2325
2326 /* process target state changes */
2327 static int handle_target(void *priv)
2328 {
2329 Jim_Interp *interp = (Jim_Interp *)priv;
2330 int retval = ERROR_OK;
2331
2332 if (!is_jtag_poll_safe()) {
2333 /* polling is disabled currently */
2334 return ERROR_OK;
2335 }
2336
2337 /* we do not want to recurse here... */
2338 static int recursive;
2339 if (!recursive) {
2340 recursive = 1;
2341 sense_handler();
2342 /* danger! running these procedures can trigger srst assertions and power dropouts.
2343 * We need to avoid an infinite loop/recursion here and we do that by
2344 * clearing the flags after running these events.
2345 */
2346 int did_something = 0;
2347 if (runSrstAsserted) {
2348 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2349 Jim_Eval(interp, "srst_asserted");
2350 did_something = 1;
2351 }
2352 if (runSrstDeasserted) {
2353 Jim_Eval(interp, "srst_deasserted");
2354 did_something = 1;
2355 }
2356 if (runPowerDropout) {
2357 LOG_INFO("Power dropout detected, running power_dropout proc.");
2358 Jim_Eval(interp, "power_dropout");
2359 did_something = 1;
2360 }
2361 if (runPowerRestore) {
2362 Jim_Eval(interp, "power_restore");
2363 did_something = 1;
2364 }
2365
2366 if (did_something) {
2367 /* clear detect flags */
2368 sense_handler();
2369 }
2370
2371 /* clear action flags */
2372
2373 runSrstAsserted = 0;
2374 runSrstDeasserted = 0;
2375 runPowerRestore = 0;
2376 runPowerDropout = 0;
2377
2378 recursive = 0;
2379 }
2380
2381 /* Poll targets for state changes unless that's globally disabled.
2382 * Skip targets that are currently disabled.
2383 */
2384 for (struct target *target = all_targets;
2385 is_jtag_poll_safe() && target;
2386 target = target->next) {
2387
2388 if (!target_was_examined(target))
2389 continue;
2390
2391 if (!target->tap->enabled)
2392 continue;
2393
2394 if (target->backoff.times > target->backoff.count) {
2395 /* do not poll this time as we failed previously */
2396 target->backoff.count++;
2397 continue;
2398 }
2399 target->backoff.count = 0;
2400
2401 /* only poll target if we've got power and srst isn't asserted */
2402 if (!powerDropout && !srstAsserted) {
2403 /* polling may fail silently until the target has been examined */
2404 retval = target_poll(target);
2405 if (retval != ERROR_OK) {
2406 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2407 if (target->backoff.times * polling_interval < 5000) {
2408 target->backoff.times *= 2;
2409 target->backoff.times++;
2410 }
2411 LOG_USER("Polling target %s failed, GDB will be halted. Polling again in %dms",
2412 target_name(target),
2413 target->backoff.times * polling_interval);
2414
2415 /* Tell GDB to halt the debugger. This allows the user to
2416 * run monitor commands to handle the situation.
2417 */
2418 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2419 return retval;
2420 }
2421 /* Since we succeeded, we reset backoff count */
2422 if (target->backoff.times > 0) {
2423 LOG_USER("Polling target %s succeeded again, trying to reexamine", target_name(target));
2424 target_reset_examined(target);
2425 target_examine_one(target);
2426 }
2427
2428 target->backoff.times = 0;
2429 }
2430 }
2431
2432 return retval;
2433 }
2434
2435 COMMAND_HANDLER(handle_reg_command)
2436 {
2437 struct target *target;
2438 struct reg *reg = NULL;
2439 unsigned count = 0;
2440 char *value;
2441
2442 LOG_DEBUG("-");
2443
2444 target = get_current_target(CMD_CTX);
2445
2446 /* list all available registers for the current target */
2447 if (CMD_ARGC == 0) {
2448 struct reg_cache *cache = target->reg_cache;
2449
2450 count = 0;
2451 while (cache) {
2452 unsigned i;
2453
2454 command_print(CMD_CTX, "===== %s", cache->name);
2455
2456 for (i = 0, reg = cache->reg_list;
2457 i < cache->num_regs;
2458 i++, reg++, count++) {
2459 /* only print cached values if they are valid */
2460 if (reg->valid) {
2461 value = buf_to_str(reg->value,
2462 reg->size, 16);
2463 command_print(CMD_CTX,
2464 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2465 count, reg->name,
2466 reg->size, value,
2467 reg->dirty
2468 ? " (dirty)"
2469 : "");
2470 free(value);
2471 } else {
2472 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2473 count, reg->name,
2474 reg->size) ;
2475 }
2476 }
2477 cache = cache->next;
2478 }
2479
2480 return ERROR_OK;
2481 }
2482
2483 /* access a single register by its ordinal number */
2484 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2485 unsigned num;
2486 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2487
2488 struct reg_cache *cache = target->reg_cache;
2489 count = 0;
2490 while (cache) {
2491 unsigned i;
2492 for (i = 0; i < cache->num_regs; i++) {
2493 if (count++ == num) {
2494 reg = &cache->reg_list[i];
2495 break;
2496 }
2497 }
2498 if (reg)
2499 break;
2500 cache = cache->next;
2501 }
2502
2503 if (!reg) {
2504 command_print(CMD_CTX, "%i is out of bounds, the current target "
2505 "has only %i registers (0 - %i)", num, count, count - 1);
2506 return ERROR_OK;
2507 }
2508 } else {
2509 /* access a single register by its name */
2510 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2511
2512 if (!reg) {
2513 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2514 return ERROR_OK;
2515 }
2516 }
2517
2518 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2519
2520 /* display a register */
2521 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2522 && (CMD_ARGV[1][0] <= '9')))) {
2523 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2524 reg->valid = 0;
2525
2526 if (reg->valid == 0)
2527 reg->type->get(reg);
2528 value = buf_to_str(reg->value, reg->size, 16);
2529 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2530 free(value);
2531 return ERROR_OK;
2532 }
2533
2534 /* set register value */
2535 if (CMD_ARGC == 2) {
2536 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2537 if (buf == NULL)
2538 return ERROR_FAIL;
2539 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2540
2541 reg->type->set(reg, buf);
2542
2543 value = buf_to_str(reg->value, reg->size, 16);
2544 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2545 free(value);
2546
2547 free(buf);
2548
2549 return ERROR_OK;
2550 }
2551
2552 return ERROR_COMMAND_SYNTAX_ERROR;
2553 }
2554
2555 COMMAND_HANDLER(handle_poll_command)
2556 {
2557 int retval = ERROR_OK;
2558 struct target *target = get_current_target(CMD_CTX);
2559
2560 if (CMD_ARGC == 0) {
2561 command_print(CMD_CTX, "background polling: %s",
2562 jtag_poll_get_enabled() ? "on" : "off");
2563 command_print(CMD_CTX, "TAP: %s (%s)",
2564 target->tap->dotted_name,
2565 target->tap->enabled ? "enabled" : "disabled");
2566 if (!target->tap->enabled)
2567 return ERROR_OK;
2568 retval = target_poll(target);
2569 if (retval != ERROR_OK)
2570 return retval;
2571 retval = target_arch_state(target);
2572 if (retval != ERROR_OK)
2573 return retval;
2574 } else if (CMD_ARGC == 1) {
2575 bool enable;
2576 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2577 jtag_poll_set_enabled(enable);
2578 } else
2579 return ERROR_COMMAND_SYNTAX_ERROR;
2580
2581 return retval;
2582 }
2583
2584 COMMAND_HANDLER(handle_wait_halt_command)
2585 {
2586 if (CMD_ARGC > 1)
2587 return ERROR_COMMAND_SYNTAX_ERROR;
2588
2589 unsigned ms = DEFAULT_HALT_TIMEOUT;
2590 if (1 == CMD_ARGC) {
2591 int retval = parse_uint(CMD_ARGV[0], &ms);
2592 if (ERROR_OK != retval)
2593 return ERROR_COMMAND_SYNTAX_ERROR;
2594 }
2595
2596 struct target *target = get_current_target(CMD_CTX);
2597 return target_wait_state(target, TARGET_HALTED, ms);
2598 }
2599
2600 /* wait for target state to change. The trick here is to have a low
2601 * latency for short waits and not to suck up all the CPU time
2602 * on longer waits.
2603 *
2604 * After 500ms, keep_alive() is invoked
2605 */
2606 int target_wait_state(struct target *target, enum target_state state, int ms)
2607 {
2608 int retval;
2609 long long then = 0, cur;
2610 int once = 1;
2611
2612 for (;;) {
2613 retval = target_poll(target);
2614 if (retval != ERROR_OK)
2615 return retval;
2616 if (target->state == state)
2617 break;
2618 cur = timeval_ms();
2619 if (once) {
2620 once = 0;
2621 then = timeval_ms();
2622 LOG_DEBUG("waiting for target %s...",
2623 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2624 }
2625
2626 if (cur-then > 500)
2627 keep_alive();
2628
2629 if ((cur-then) > ms) {
2630 LOG_ERROR("timed out while waiting for target %s",
2631 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2632 return ERROR_FAIL;
2633 }
2634 }
2635
2636 return ERROR_OK;
2637 }
2638
2639 COMMAND_HANDLER(handle_halt_command)
2640 {
2641 LOG_DEBUG("-");
2642
2643 struct target *target = get_current_target(CMD_CTX);
2644 int retval = target_halt(target);
2645 if (ERROR_OK != retval)
2646 return retval;
2647
2648 if (CMD_ARGC == 1) {
2649 unsigned wait_local;
2650 retval = parse_uint(CMD_ARGV[0], &wait_local);
2651 if (ERROR_OK != retval)
2652 return ERROR_COMMAND_SYNTAX_ERROR;
2653 if (!wait_local)
2654 return ERROR_OK;
2655 }
2656
2657 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2658 }
2659
2660 COMMAND_HANDLER(handle_soft_reset_halt_command)
2661 {
2662 struct target *target = get_current_target(CMD_CTX);
2663
2664 LOG_USER("requesting target halt and executing a soft reset");
2665
2666 target_soft_reset_halt(target);
2667
2668 return ERROR_OK;
2669 }
2670
2671 COMMAND_HANDLER(handle_reset_command)
2672 {
2673 if (CMD_ARGC > 1)
2674 return ERROR_COMMAND_SYNTAX_ERROR;
2675
2676 enum target_reset_mode reset_mode = RESET_RUN;
2677 if (CMD_ARGC == 1) {
2678 const Jim_Nvp *n;
2679 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
2680 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
2681 return ERROR_COMMAND_SYNTAX_ERROR;
2682 reset_mode = n->value;
2683 }
2684
2685 /* reset *all* targets */
2686 return target_process_reset(CMD_CTX, reset_mode);
2687 }
2688
2689
2690 COMMAND_HANDLER(handle_resume_command)
2691 {
2692 int current = 1;
2693 if (CMD_ARGC > 1)
2694 return ERROR_COMMAND_SYNTAX_ERROR;
2695
2696 struct target *target = get_current_target(CMD_CTX);
2697
2698 /* with no CMD_ARGV, resume from current pc, addr = 0,
2699 * with one arguments, addr = CMD_ARGV[0],
2700 * handle breakpoints, not debugging */
2701 uint32_t addr = 0;
2702 if (CMD_ARGC == 1) {
2703 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2704 current = 0;
2705 }
2706
2707 return target_resume(target, current, addr, 1, 0);
2708 }
2709
2710 COMMAND_HANDLER(handle_step_command)
2711 {
2712 if (CMD_ARGC > 1)
2713 return ERROR_COMMAND_SYNTAX_ERROR;
2714
2715 LOG_DEBUG("-");
2716
2717 /* with no CMD_ARGV, step from current pc, addr = 0,
2718 * with one argument addr = CMD_ARGV[0],
2719 * handle breakpoints, debugging */
2720 uint32_t addr = 0;
2721 int current_pc = 1;
2722 if (CMD_ARGC == 1) {
2723 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
2724 current_pc = 0;
2725 }
2726
2727 struct target *target = get_current_target(CMD_CTX);
2728
2729 return target->type->step(target, current_pc, addr, 1);
2730 }
2731
2732 static void handle_md_output(struct command_context *cmd_ctx,
2733 struct target *target, uint32_t address, unsigned size,
2734 unsigned count, const uint8_t *buffer)
2735 {
2736 const unsigned line_bytecnt = 32;
2737 unsigned line_modulo = line_bytecnt / size;
2738
2739 char output[line_bytecnt * 4 + 1];
2740 unsigned output_len = 0;
2741
2742 const char *value_fmt;
2743 switch (size) {
2744 case 4:
2745 value_fmt = "%8.8x ";
2746 break;
2747 case 2:
2748 value_fmt = "%4.4x ";
2749 break;
2750 case 1:
2751 value_fmt = "%2.2x ";
2752 break;
2753 default:
2754 /* "can't happen", caller checked */
2755 LOG_ERROR("invalid memory read size: %u", size);
2756 return;
2757 }
2758
2759 for (unsigned i = 0; i < count; i++) {
2760 if (i % line_modulo == 0) {
2761 output_len += snprintf(output + output_len,
2762 sizeof(output) - output_len,
2763 "0x%8.8x: ",
2764 (unsigned)(address + (i*size)));
2765 }
2766
2767 uint32_t value = 0;
2768 const uint8_t *value_ptr = buffer + i * size;
2769 switch (size) {
2770 case 4:
2771 value = target_buffer_get_u32(target, value_ptr);
2772 break;
2773 case 2:
2774 value = target_buffer_get_u16(target, value_ptr);
2775 break;
2776 case 1:
2777 value = *value_ptr;
2778 }
2779 output_len += snprintf(output + output_len,
2780 sizeof(output) - output_len,
2781 value_fmt, value);
2782
2783 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
2784 command_print(cmd_ctx, "%s", output);
2785 output_len = 0;
2786 }
2787 }
2788 }
2789
2790 COMMAND_HANDLER(handle_md_command)
2791 {
2792 if (CMD_ARGC < 1)
2793 return ERROR_COMMAND_SYNTAX_ERROR;
2794
2795 unsigned size = 0;
2796 switch (CMD_NAME[2]) {
2797 case 'w':
2798 size = 4;
2799 break;
2800 case 'h':
2801 size = 2;
2802 break;
2803 case 'b':
2804 size = 1;
2805 break;
2806 default:
2807 return ERROR_COMMAND_SYNTAX_ERROR;
2808 }
2809
2810 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2811 int (*fn)(struct target *target,
2812 uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
2813 if (physical) {
2814 CMD_ARGC--;
2815 CMD_ARGV++;
2816 fn = target_read_phys_memory;
2817 } else
2818 fn = target_read_memory;
2819 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
2820 return ERROR_COMMAND_SYNTAX_ERROR;
2821
2822 uint32_t address;
2823 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2824
2825 unsigned count = 1;
2826 if (CMD_ARGC == 2)
2827 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
2828
2829 uint8_t *buffer = calloc(count, size);
2830
2831 struct target *target = get_current_target(CMD_CTX);
2832 int retval = fn(target, address, size, count, buffer);
2833 if (ERROR_OK == retval)
2834 handle_md_output(CMD_CTX, target, address, size, count, buffer);
2835
2836 free(buffer);
2837
2838 return retval;
2839 }
2840
2841 typedef int (*target_write_fn)(struct target *target,
2842 uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
2843
2844 static int target_fill_mem(struct target *target,
2845 uint32_t address,
2846 target_write_fn fn,
2847 unsigned data_size,
2848 /* value */
2849 uint32_t b,
2850 /* count */
2851 unsigned c)
2852 {
2853 /* We have to write in reasonably large chunks to be able
2854 * to fill large memory areas with any sane speed */
2855 const unsigned chunk_size = 16384;
2856 uint8_t *target_buf = malloc(chunk_size * data_size);
2857 if (target_buf == NULL) {
2858 LOG_ERROR("Out of memory");
2859 return ERROR_FAIL;
2860 }
2861
2862 for (unsigned i = 0; i < chunk_size; i++) {
2863 switch (data_size) {
2864 case 4:
2865 target_buffer_set_u32(target, target_buf + i * data_size, b);
2866 break;
2867 case 2:
2868 target_buffer_set_u16(target, target_buf + i * data_size, b);
2869 break;
2870 case 1:
2871 target_buffer_set_u8(target, target_buf + i * data_size, b);
2872 break;
2873 default:
2874 exit(-1);
2875 }
2876 }
2877
2878 int retval = ERROR_OK;
2879
2880 for (unsigned x = 0; x < c; x += chunk_size) {
2881 unsigned current;
2882 current = c - x;
2883 if (current > chunk_size)
2884 current = chunk_size;
2885 retval = fn(target, address + x * data_size, data_size, current, target_buf);
2886 if (retval != ERROR_OK)
2887 break;
2888 /* avoid GDB timeouts */
2889 keep_alive();
2890 }
2891 free(target_buf);
2892
2893 return retval;
2894 }
2895
2896
2897 COMMAND_HANDLER(handle_mw_command)
2898 {
2899 if (CMD_ARGC < 2)
2900 return ERROR_COMMAND_SYNTAX_ERROR;
2901 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
2902 target_write_fn fn;
2903 if (physical) {
2904 CMD_ARGC--;
2905 CMD_ARGV++;
2906 fn = target_write_phys_memory;
2907 } else
2908 fn = target_write_memory;
2909 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
2910 return ERROR_COMMAND_SYNTAX_ERROR;
2911
2912 uint32_t address;
2913 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2914
2915 uint32_t value;
2916 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2917
2918 unsigned count = 1;
2919 if (CMD_ARGC == 3)
2920 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
2921
2922 struct target *target = get_current_target(CMD_CTX);
2923 unsigned wordsize;
2924 switch (CMD_NAME[2]) {
2925 case 'w':
2926 wordsize = 4;
2927 break;
2928 case 'h':
2929 wordsize = 2;
2930 break;
2931 case 'b':
2932 wordsize = 1;
2933 break;
2934 default:
2935 return ERROR_COMMAND_SYNTAX_ERROR;
2936 }
2937
2938 return target_fill_mem(target, address, fn, wordsize, value, count);
2939 }
2940
2941 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
2942 uint32_t *min_address, uint32_t *max_address)
2943 {
2944 if (CMD_ARGC < 1 || CMD_ARGC > 5)
2945 return ERROR_COMMAND_SYNTAX_ERROR;
2946
2947 /* a base address isn't always necessary,
2948 * default to 0x0 (i.e. don't relocate) */
2949 if (CMD_ARGC >= 2) {
2950 uint32_t addr;
2951 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
2952 image->base_address = addr;
2953 image->base_address_set = 1;
2954 } else
2955 image->base_address_set = 0;
2956
2957 image->start_address_set = 0;
2958
2959 if (CMD_ARGC >= 4)
2960 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address);
2961 if (CMD_ARGC == 5) {
2962 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address);
2963 /* use size (given) to find max (required) */
2964 *max_address += *min_address;
2965 }
2966
2967 if (*min_address > *max_address)
2968 return ERROR_COMMAND_SYNTAX_ERROR;
2969
2970 return ERROR_OK;
2971 }
2972
2973 COMMAND_HANDLER(handle_load_image_command)
2974 {
2975 uint8_t *buffer;
2976 size_t buf_cnt;
2977 uint32_t image_size;
2978 uint32_t min_address = 0;
2979 uint32_t max_address = 0xffffffff;
2980 int i;
2981 struct image image;
2982
2983 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
2984 &image, &min_address, &max_address);
2985 if (ERROR_OK != retval)
2986 return retval;
2987
2988 struct target *target = get_current_target(CMD_CTX);
2989
2990 struct duration bench;
2991 duration_start(&bench);
2992
2993 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
2994 return ERROR_OK;
2995
2996 image_size = 0x0;
2997 retval = ERROR_OK;
2998 for (i = 0; i < image.num_sections; i++) {
2999 buffer = malloc(image.sections[i].size);
3000 if (buffer == NULL) {
3001 command_print(CMD_CTX,
3002 "error allocating buffer for section (%d bytes)",
3003 (int)(image.sections[i].size));
3004 break;
3005 }
3006
3007 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3008 if (retval != ERROR_OK) {
3009 free(buffer);
3010 break;
3011 }
3012
3013 uint32_t offset = 0;
3014 uint32_t length = buf_cnt;
3015
3016 /* DANGER!!! beware of unsigned comparision here!!! */
3017
3018 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3019 (image.sections[i].base_address < max_address)) {
3020
3021 if (image.sections[i].base_address < min_address) {
3022 /* clip addresses below */
3023 offset += min_address-image.sections[i].base_address;
3024 length -= offset;
3025 }
3026
3027 if (image.sections[i].base_address + buf_cnt > max_address)
3028 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3029
3030 retval = target_write_buffer(target,
3031 image.sections[i].base_address + offset, length, buffer + offset);
3032 if (retval != ERROR_OK) {
3033 free(buffer);
3034 break;
3035 }
3036 image_size += length;
3037 command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "",
3038 (unsigned int)length,
3039 image.sections[i].base_address + offset);
3040 }
3041
3042 free(buffer);
3043 }
3044
3045 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3046 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
3047 "in %fs (%0.3f KiB/s)", image_size,
3048 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3049 }
3050
3051 image_close(&image);
3052
3053 return retval;
3054
3055 }
3056
3057 COMMAND_HANDLER(handle_dump_image_command)
3058 {
3059 struct fileio fileio;
3060 uint8_t *buffer;
3061 int retval, retvaltemp;
3062 uint32_t address, size;
3063 struct duration bench;
3064 struct target *target = get_current_target(CMD_CTX);
3065
3066 if (CMD_ARGC != 3)
3067 return ERROR_COMMAND_SYNTAX_ERROR;
3068
3069 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address);
3070 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size);
3071
3072 uint32_t buf_size = (size > 4096) ? 4096 : size;
3073 buffer = malloc(buf_size);
3074 if (!buffer)
3075 return ERROR_FAIL;
3076
3077 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3078 if (retval != ERROR_OK) {
3079 free(buffer);
3080 return retval;
3081 }
3082
3083 duration_start(&bench);
3084
3085 while (size > 0) {
3086 size_t size_written;
3087 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3088 retval = target_read_buffer(target, address, this_run_size, buffer);
3089 if (retval != ERROR_OK)
3090 break;
3091
3092 retval = fileio_write(&fileio, this_run_size, buffer, &size_written);
3093 if (retval != ERROR_OK)
3094 break;
3095
3096 size -= this_run_size;
3097 address += this_run_size;
3098 }
3099
3100 free(buffer);
3101
3102 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3103 int filesize;
3104 retval = fileio_size(&fileio, &filesize);
3105 if (retval != ERROR_OK)
3106 return retval;
3107 command_print(CMD_CTX,
3108 "dumped %ld bytes in %fs (%0.3f KiB/s)", (long)filesize,
3109 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3110 }
3111
3112 retvaltemp = fileio_close(&fileio);
3113 if (retvaltemp != ERROR_OK)
3114 return retvaltemp;
3115
3116 return retval;
3117 }
3118
3119 static COMMAND_HELPER(handle_verify_image_command_internal, int verify)
3120 {
3121 uint8_t *buffer;
3122 size_t buf_cnt;
3123 uint32_t image_size;
3124 int i;
3125 int retval;
3126 uint32_t checksum = 0;
3127 uint32_t mem_checksum = 0;
3128
3129 struct image image;
3130
3131 struct target *target = get_current_target(CMD_CTX);
3132
3133 if (CMD_ARGC < 1)
3134 return ERROR_COMMAND_SYNTAX_ERROR;
3135
3136 if (!target) {
3137 LOG_ERROR("no target selected");
3138 return ERROR_FAIL;
3139 }
3140
3141 struct duration bench;
3142 duration_start(&bench);
3143
3144 if (CMD_ARGC >= 2) {
3145 uint32_t addr;
3146 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr);
3147 image.base_address = addr;
3148 image.base_address_set = 1;
3149 } else {
3150 image.base_address_set = 0;
3151 image.base_address = 0x0;
3152 }
3153
3154 image.start_address_set = 0;
3155
3156 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3157 if (retval != ERROR_OK)
3158 return retval;
3159
3160 image_size = 0x0;
3161 int diffs = 0;
3162 retval = ERROR_OK;
3163 for (i = 0; i < image.num_sections; i++) {
3164 buffer = malloc(image.sections[i].size);
3165 if (buffer == NULL) {
3166 command_print(CMD_CTX,
3167 "error allocating buffer for section (%d bytes)",
3168 (int)(image.sections[i].size));
3169 break;
3170 }
3171 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3172 if (retval != ERROR_OK) {
3173 free(buffer);
3174 break;
3175 }
3176
3177 if (verify) {
3178 /* calculate checksum of image */
3179 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3180 if (retval != ERROR_OK) {
3181 free(buffer);
3182 break;
3183 }
3184
3185 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3186 if (retval != ERROR_OK) {
3187 free(buffer);
3188 break;
3189 }
3190
3191 if (checksum != mem_checksum) {
3192 /* failed crc checksum, fall back to a binary compare */
3193 uint8_t *data;
3194
3195 if (diffs == 0)
3196 LOG_ERROR("checksum mismatch - attempting binary compare");
3197
3198 data = malloc(buf_cnt);
3199
3200 /* Can we use 32bit word accesses? */
3201 int size = 1;
3202 int count = buf_cnt;
3203 if ((count % 4) == 0) {
3204 size *= 4;
3205 count /= 4;
3206 }
3207 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
3208 if (retval == ERROR_OK) {
3209 uint32_t t;
3210 for (t = 0; t < buf_cnt; t++) {
3211 if (data[t] != buffer[t]) {
3212 command_print(CMD_CTX,
3213 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3214 diffs,
3215 (unsigned)(t + image.sections[i].base_address),
3216 data[t],
3217 buffer[t]);
3218 if (diffs++ >= 127) {
3219 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
3220 free(data);
3221 free(buffer);
3222 goto done;
3223 }
3224 }
3225 keep_alive();
3226 }
3227 }
3228 free(data);
3229 }
3230 } else {
3231 command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx",
3232 image.sections[i].base_address,
3233 buf_cnt);
3234 }
3235
3236 free(buffer);
3237 image_size += buf_cnt;
3238 }
3239 if (diffs > 0)
3240 command_print(CMD_CTX, "No more differences found.");
3241 done:
3242 if (diffs > 0)
3243 retval = ERROR_FAIL;
3244 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3245 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3246 "in %fs (%0.3f KiB/s)", image_size,
3247 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3248 }
3249
3250 image_close(&image);
3251
3252 return retval;
3253 }
3254
3255 COMMAND_HANDLER(handle_verify_image_command)
3256 {
3257 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 1);
3258 }
3259
3260 COMMAND_HANDLER(handle_test_image_command)
3261 {
3262 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, 0);
3263 }
3264
3265 static int handle_bp_command_list(struct command_context *cmd_ctx)
3266 {
3267 struct target *target = get_current_target(cmd_ctx);
3268 struct breakpoint *breakpoint = target->breakpoints;
3269 while (breakpoint) {
3270 if (breakpoint->type == BKPT_SOFT) {
3271 char *buf = buf_to_str(breakpoint->orig_instr,
3272 breakpoint->length, 16);
3273 command_print(cmd_ctx, "IVA breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i, 0x%s",
3274 breakpoint->address,
3275 breakpoint->length,
3276 breakpoint->set, buf);
3277 free(buf);
3278 } else {
3279 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3280 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3281 breakpoint->asid,
3282 breakpoint->length, breakpoint->set);
3283 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3284 command_print(cmd_ctx, "Hybrid breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3285 breakpoint->address,
3286 breakpoint->length, breakpoint->set);
3287 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3288 breakpoint->asid);
3289 } else
3290 command_print(cmd_ctx, "Breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i",
3291 breakpoint->address,
3292 breakpoint->length, breakpoint->set);
3293 }
3294
3295 breakpoint = breakpoint->next;
3296 }
3297 return ERROR_OK;
3298 }
3299
3300 static int handle_bp_command_set(struct command_context *cmd_ctx,
3301 uint32_t addr, uint32_t asid, uint32_t length, int hw)
3302 {
3303 struct target *target = get_current_target(cmd_ctx);
3304
3305 if (asid == 0) {
3306 int retval = breakpoint_add(target, addr, length, hw);
3307 if (ERROR_OK == retval)
3308 command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr);
3309 else {
3310 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
3311 return retval;
3312 }
3313 } else if (addr == 0) {
3314 int retval = context_breakpoint_add(target, asid, length, hw);
3315 if (ERROR_OK == retval)
3316 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3317 else {
3318 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
3319 return retval;
3320 }
3321 } else {
3322 int retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3323 if (ERROR_OK == retval)
3324 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3325 else {
3326 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3327 return retval;
3328 }
3329 }
3330 return ERROR_OK;
3331 }
3332
3333 COMMAND_HANDLER(handle_bp_command)
3334 {
3335 uint32_t addr;
3336 uint32_t asid;
3337 uint32_t length;
3338 int hw = BKPT_SOFT;
3339
3340 switch (CMD_ARGC) {
3341 case 0:
3342 return handle_bp_command_list(CMD_CTX);
3343
3344 case 2:
3345 asid = 0;
3346 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3347 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3348 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3349
3350 case 3:
3351 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3352 hw = BKPT_HARD;
3353 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3354
3355 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3356
3357 asid = 0;
3358 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3359 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3360 hw = BKPT_HARD;
3361 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3362 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3363 addr = 0;
3364 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3365 }
3366
3367 case 4:
3368 hw = BKPT_HARD;
3369 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3370 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3371 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3372 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3373
3374 default:
3375 return ERROR_COMMAND_SYNTAX_ERROR;
3376 }
3377 }
3378
3379 COMMAND_HANDLER(handle_rbp_command)
3380 {
3381 if (CMD_ARGC != 1)
3382 return ERROR_COMMAND_SYNTAX_ERROR;
3383
3384 uint32_t addr;
3385 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3386
3387 struct target *target = get_current_target(CMD_CTX);
3388 breakpoint_remove(target, addr);
3389
3390 return ERROR_OK;
3391 }
3392
3393 COMMAND_HANDLER(handle_wp_command)
3394 {
3395 struct target *target = get_current_target(CMD_CTX);
3396
3397 if (CMD_ARGC == 0) {
3398 struct watchpoint *watchpoint = target->watchpoints;
3399
3400 while (watchpoint) {
3401 command_print(CMD_CTX, "address: 0x%8.8" PRIx32
3402 ", len: 0x%8.8" PRIx32
3403 ", r/w/a: %i, value: 0x%8.8" PRIx32
3404 ", mask: 0x%8.8" PRIx32,
3405 watchpoint->address,
3406 watchpoint->length,
3407 (int)watchpoint->rw,
3408 watchpoint->value,
3409 watchpoint->mask);
3410 watchpoint = watchpoint->next;
3411 }
3412 return ERROR_OK;
3413 }
3414
3415 enum watchpoint_rw type = WPT_ACCESS;
3416 uint32_t addr = 0;
3417 uint32_t length = 0;
3418 uint32_t data_value = 0x0;
3419 uint32_t data_mask = 0xffffffff;
3420
3421 switch (CMD_ARGC) {
3422 case 5:
3423 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3424 /* fall through */
3425 case 4:
3426 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3427 /* fall through */
3428 case 3:
3429 switch (CMD_ARGV[2][0]) {
3430 case 'r':
3431 type = WPT_READ;
3432 break;
3433 case 'w':
3434 type = WPT_WRITE;
3435 break;
3436 case 'a':
3437 type = WPT_ACCESS;
3438 break;
3439 default:
3440 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3441 return ERROR_COMMAND_SYNTAX_ERROR;
3442 }
3443 /* fall through */
3444 case 2:
3445 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3446 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3447 break;
3448
3449 default:
3450 return ERROR_COMMAND_SYNTAX_ERROR;
3451 }
3452
3453 int retval = watchpoint_add(target, addr, length, type,
3454 data_value, data_mask);
3455 if (ERROR_OK != retval)
3456 LOG_ERROR("Failure setting watchpoints");
3457
3458 return retval;
3459 }
3460
3461 COMMAND_HANDLER(handle_rwp_command)
3462 {
3463 if (CMD_ARGC != 1)
3464 return ERROR_COMMAND_SYNTAX_ERROR;
3465
3466 uint32_t addr;
3467 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3468
3469 struct target *target = get_current_target(CMD_CTX);
3470 watchpoint_remove(target, addr);
3471
3472 return ERROR_OK;
3473 }
3474
3475 /**
3476 * Translate a virtual address to a physical address.
3477 *
3478 * The low-level target implementation must have logged a detailed error
3479 * which is forwarded to telnet/GDB session.
3480 */
3481 COMMAND_HANDLER(handle_virt2phys_command)
3482 {
3483 if (CMD_ARGC != 1)
3484 return ERROR_COMMAND_SYNTAX_ERROR;
3485
3486 uint32_t va;
3487 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], va);
3488 uint32_t pa;
3489
3490 struct target *target = get_current_target(CMD_CTX);
3491 int retval = target->type->virt2phys(target, va, &pa);
3492 if (retval == ERROR_OK)
3493 command_print(CMD_CTX, "Physical address 0x%08" PRIx32 "", pa);
3494
3495 return retval;
3496 }
3497
3498 static void writeData(FILE *f, const void *data, size_t len)
3499 {
3500 size_t written = fwrite(data, 1, len, f);
3501 if (written != len)
3502 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3503 }
3504
3505 static void writeLong(FILE *f, int l)
3506 {
3507 int i;
3508 for (i = 0; i < 4; i++) {
3509 char c = (l >> (i*8))&0xff;
3510 writeData(f, &c, 1);
3511 }
3512
3513 }
3514
3515 static void writeString(FILE *f, char *s)
3516 {
3517 writeData(f, s, strlen(s));
3518 }
3519
3520 typedef unsigned char UNIT[2]; /* unit of profiling */
3521
3522 /* Dump a gmon.out histogram file. */
3523 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename,
3524 bool with_range, uint32_t start_address, uint32_t end_address)
3525 {
3526 uint32_t i;
3527 FILE *f = fopen(filename, "w");
3528 if (f == NULL)
3529 return;
3530 writeString(f, "gmon");
3531 writeLong(f, 0x00000001); /* Version */
3532 writeLong(f, 0); /* padding */
3533 writeLong(f, 0); /* padding */
3534 writeLong(f, 0); /* padding */
3535
3536 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
3537 writeData(f, &zero, 1);
3538
3539 /* figure out bucket size */
3540 uint32_t min;
3541 uint32_t max;
3542 if (with_range) {
3543 min = start_address;
3544 max = end_address;
3545 } else {
3546 min = samples[0];
3547 max = samples[0];
3548 for (i = 0; i < sampleNum; i++) {
3549 if (min > samples[i])
3550 min = samples[i];
3551 if (max < samples[i])
3552 max = samples[i];
3553 }
3554
3555 /* max should be (largest sample + 1)
3556 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
3557 max++;
3558 }
3559
3560 int addressSpace = max - min;
3561 assert(addressSpace >= 2);
3562
3563 /* FIXME: What is the reasonable number of buckets?
3564 * The profiling result will be more accurate if there are enough buckets. */
3565 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
3566 uint32_t numBuckets = addressSpace / sizeof(UNIT);
3567 if (numBuckets > maxBuckets)
3568 numBuckets = maxBuckets;
3569 int *buckets = malloc(sizeof(int) * numBuckets);
3570 if (buckets == NULL) {
3571 fclose(f);
3572 return;
3573 }
3574 memset(buckets, 0, sizeof(int) * numBuckets);
3575 for (i = 0; i < sampleNum; i++) {
3576 uint32_t address = samples[i];
3577
3578 if ((address < min) || (max <= address))
3579 continue;
3580
3581 long long a = address - min;
3582 long long b = numBuckets;
3583 long long c = addressSpace;
3584 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
3585 buckets[index_t]++;
3586 }
3587
3588 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
3589 writeLong(f, min); /* low_pc */
3590 writeLong(f, max); /* high_pc */
3591 writeLong(f, numBuckets); /* # of buckets */
3592 writeLong(f, 100); /* KLUDGE! We lie, ca. 100Hz best case. */
3593 writeString(f, "seconds");
3594 for (i = 0; i < (15-strlen("seconds")); i++)
3595 writeData(f, &zero, 1);
3596 writeString(f, "s");
3597
3598 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
3599
3600 char *data = malloc(2 * numBuckets);
3601 if (data != NULL) {
3602 for (i = 0; i < numBuckets; i++) {
3603 int val;
3604 val = buckets[i];
3605 if (val > 65535)
3606 val = 65535;
3607 data[i * 2] = val&0xff;
3608 data[i * 2 + 1] = (val >> 8) & 0xff;
3609 }
3610 free(buckets);
3611 writeData(f, data, numBuckets * 2);
3612 free(data);
3613 } else
3614 free(buckets);
3615
3616 fclose(f);
3617 }
3618
3619 /* profiling samples the CPU PC as quickly as OpenOCD is able,
3620 * which will be used as a random sampling of PC */
3621 COMMAND_HANDLER(handle_profile_command)
3622 {
3623 struct target *target = get_current_target(CMD_CTX);
3624
3625 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
3626 return ERROR_COMMAND_SYNTAX_ERROR;
3627
3628 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
3629 uint32_t offset;
3630 uint32_t num_of_samples;
3631 int retval = ERROR_OK;
3632
3633 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
3634
3635 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
3636 if (samples == NULL) {
3637 LOG_ERROR("No memory to store samples.");
3638 return ERROR_FAIL;
3639 }
3640
3641 /**
3642 * Some cores let us sample the PC without the
3643 * annoying halt/resume step; for example, ARMv7 PCSR.
3644 * Provide a way to use that more efficient mechanism.
3645 */
3646 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
3647 &num_of_samples, offset);