target: check return value of register get/set callbacks
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75
76 /* targets */
77 extern struct target_type arm7tdmi_target;
78 extern struct target_type arm720t_target;
79 extern struct target_type arm9tdmi_target;
80 extern struct target_type arm920t_target;
81 extern struct target_type arm966e_target;
82 extern struct target_type arm946e_target;
83 extern struct target_type arm926ejs_target;
84 extern struct target_type fa526_target;
85 extern struct target_type feroceon_target;
86 extern struct target_type dragonite_target;
87 extern struct target_type xscale_target;
88 extern struct target_type cortexm_target;
89 extern struct target_type cortexa_target;
90 extern struct target_type aarch64_target;
91 extern struct target_type cortexr4_target;
92 extern struct target_type arm11_target;
93 extern struct target_type ls1_sap_target;
94 extern struct target_type mips_m4k_target;
95 extern struct target_type mips_mips64_target;
96 extern struct target_type avr_target;
97 extern struct target_type dsp563xx_target;
98 extern struct target_type dsp5680xx_target;
99 extern struct target_type testee_target;
100 extern struct target_type avr32_ap7k_target;
101 extern struct target_type hla_target;
102 extern struct target_type nds32_v2_target;
103 extern struct target_type nds32_v3_target;
104 extern struct target_type nds32_v3m_target;
105 extern struct target_type or1k_target;
106 extern struct target_type quark_x10xx_target;
107 extern struct target_type quark_d20xx_target;
108 extern struct target_type stm8_target;
109 extern struct target_type riscv_target;
110 extern struct target_type mem_ap_target;
111 extern struct target_type esirisc_target;
112 extern struct target_type arcv2_target;
113
114 static struct target_type *target_types[] = {
115 &arm7tdmi_target,
116 &arm9tdmi_target,
117 &arm920t_target,
118 &arm720t_target,
119 &arm966e_target,
120 &arm946e_target,
121 &arm926ejs_target,
122 &fa526_target,
123 &feroceon_target,
124 &dragonite_target,
125 &xscale_target,
126 &cortexm_target,
127 &cortexa_target,
128 &cortexr4_target,
129 &arm11_target,
130 &ls1_sap_target,
131 &mips_m4k_target,
132 &avr_target,
133 &dsp563xx_target,
134 &dsp5680xx_target,
135 &testee_target,
136 &avr32_ap7k_target,
137 &hla_target,
138 &nds32_v2_target,
139 &nds32_v3_target,
140 &nds32_v3m_target,
141 &or1k_target,
142 &quark_x10xx_target,
143 &quark_d20xx_target,
144 &stm8_target,
145 &riscv_target,
146 &mem_ap_target,
147 &esirisc_target,
148 &arcv2_target,
149 &aarch64_target,
150 &mips_mips64_target,
151 NULL,
152 };
153
154 struct target *all_targets;
155 static struct target_event_callback *target_event_callbacks;
156 static struct target_timer_callback *target_timer_callbacks;
157 static LIST_HEAD(target_reset_callback_list);
158 static LIST_HEAD(target_trace_callback_list);
159 static const int polling_interval = 100;
160
161 static const struct jim_nvp nvp_assert[] = {
162 { .name = "assert", NVP_ASSERT },
163 { .name = "deassert", NVP_DEASSERT },
164 { .name = "T", NVP_ASSERT },
165 { .name = "F", NVP_DEASSERT },
166 { .name = "t", NVP_ASSERT },
167 { .name = "f", NVP_DEASSERT },
168 { .name = NULL, .value = -1 }
169 };
170
171 static const struct jim_nvp nvp_error_target[] = {
172 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
173 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
174 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
175 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
176 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
177 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
178 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
179 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
180 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
181 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
182 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
183 { .value = -1, .name = NULL }
184 };
185
186 static const char *target_strerror_safe(int err)
187 {
188 const struct jim_nvp *n;
189
190 n = jim_nvp_value2name_simple(nvp_error_target, err);
191 if (n->name == NULL)
192 return "unknown";
193 else
194 return n->name;
195 }
196
197 static const struct jim_nvp nvp_target_event[] = {
198
199 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
200 { .value = TARGET_EVENT_HALTED, .name = "halted" },
201 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
202 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
203 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
204 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
205 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
206
207 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
208 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
209
210 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
211 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
212 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
213 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
214 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
215 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
216 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
217 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
218
219 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
220 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
221 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
222
223 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
224 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
225
226 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
227 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
228
229 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
230 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
231
232 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
233 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
234
235 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
236
237 { .name = NULL, .value = -1 }
238 };
239
240 static const struct jim_nvp nvp_target_state[] = {
241 { .name = "unknown", .value = TARGET_UNKNOWN },
242 { .name = "running", .value = TARGET_RUNNING },
243 { .name = "halted", .value = TARGET_HALTED },
244 { .name = "reset", .value = TARGET_RESET },
245 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
246 { .name = NULL, .value = -1 },
247 };
248
249 static const struct jim_nvp nvp_target_debug_reason[] = {
250 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
251 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
252 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
253 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
254 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
255 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
256 { .name = "program-exit", .value = DBG_REASON_EXIT },
257 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
258 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const struct jim_nvp nvp_target_endian[] = {
263 { .name = "big", .value = TARGET_BIG_ENDIAN },
264 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
265 { .name = "be", .value = TARGET_BIG_ENDIAN },
266 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
267 { .name = NULL, .value = -1 },
268 };
269
270 static const struct jim_nvp nvp_reset_modes[] = {
271 { .name = "unknown", .value = RESET_UNKNOWN },
272 { .name = "run", .value = RESET_RUN },
273 { .name = "halt", .value = RESET_HALT },
274 { .name = "init", .value = RESET_INIT },
275 { .name = NULL, .value = -1 },
276 };
277
278 const char *debug_reason_name(struct target *t)
279 {
280 const char *cp;
281
282 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
283 t->debug_reason)->name;
284 if (!cp) {
285 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
286 cp = "(*BUG*unknown*BUG*)";
287 }
288 return cp;
289 }
290
291 const char *target_state_name(struct target *t)
292 {
293 const char *cp;
294 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
295 if (!cp) {
296 LOG_ERROR("Invalid target state: %d", (int)(t->state));
297 cp = "(*BUG*unknown*BUG*)";
298 }
299
300 if (!target_was_examined(t) && t->defer_examine)
301 cp = "examine deferred";
302
303 return cp;
304 }
305
306 const char *target_event_name(enum target_event event)
307 {
308 const char *cp;
309 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
310 if (!cp) {
311 LOG_ERROR("Invalid target event: %d", (int)(event));
312 cp = "(*BUG*unknown*BUG*)";
313 }
314 return cp;
315 }
316
317 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
318 {
319 const char *cp;
320 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
321 if (!cp) {
322 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
323 cp = "(*BUG*unknown*BUG*)";
324 }
325 return cp;
326 }
327
328 /* determine the number of the new target */
329 static int new_target_number(void)
330 {
331 struct target *t;
332 int x;
333
334 /* number is 0 based */
335 x = -1;
336 t = all_targets;
337 while (t) {
338 if (x < t->target_number)
339 x = t->target_number;
340 t = t->next;
341 }
342 return x + 1;
343 }
344
345 static void append_to_list_all_targets(struct target *target)
346 {
347 struct target **t = &all_targets;
348
349 while (*t)
350 t = &((*t)->next);
351 *t = target;
352 }
353
354 /* read a uint64_t from a buffer in target memory endianness */
355 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
356 {
357 if (target->endianness == TARGET_LITTLE_ENDIAN)
358 return le_to_h_u64(buffer);
359 else
360 return be_to_h_u64(buffer);
361 }
362
363 /* read a uint32_t from a buffer in target memory endianness */
364 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
365 {
366 if (target->endianness == TARGET_LITTLE_ENDIAN)
367 return le_to_h_u32(buffer);
368 else
369 return be_to_h_u32(buffer);
370 }
371
372 /* read a uint24_t from a buffer in target memory endianness */
373 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 return le_to_h_u24(buffer);
377 else
378 return be_to_h_u24(buffer);
379 }
380
381 /* read a uint16_t from a buffer in target memory endianness */
382 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 return le_to_h_u16(buffer);
386 else
387 return be_to_h_u16(buffer);
388 }
389
390 /* write a uint64_t to a buffer in target memory endianness */
391 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 h_u64_to_le(buffer, value);
395 else
396 h_u64_to_be(buffer, value);
397 }
398
399 /* write a uint32_t to a buffer in target memory endianness */
400 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
401 {
402 if (target->endianness == TARGET_LITTLE_ENDIAN)
403 h_u32_to_le(buffer, value);
404 else
405 h_u32_to_be(buffer, value);
406 }
407
408 /* write a uint24_t to a buffer in target memory endianness */
409 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
410 {
411 if (target->endianness == TARGET_LITTLE_ENDIAN)
412 h_u24_to_le(buffer, value);
413 else
414 h_u24_to_be(buffer, value);
415 }
416
417 /* write a uint16_t to a buffer in target memory endianness */
418 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
419 {
420 if (target->endianness == TARGET_LITTLE_ENDIAN)
421 h_u16_to_le(buffer, value);
422 else
423 h_u16_to_be(buffer, value);
424 }
425
426 /* write a uint8_t to a buffer in target memory endianness */
427 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
428 {
429 *buffer = value;
430 }
431
432 /* write a uint64_t array to a buffer in target memory endianness */
433 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
434 {
435 uint32_t i;
436 for (i = 0; i < count; i++)
437 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
438 }
439
440 /* write a uint32_t array to a buffer in target memory endianness */
441 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
442 {
443 uint32_t i;
444 for (i = 0; i < count; i++)
445 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
446 }
447
448 /* write a uint16_t array to a buffer in target memory endianness */
449 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
450 {
451 uint32_t i;
452 for (i = 0; i < count; i++)
453 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
454 }
455
456 /* write a uint64_t array to a buffer in target memory endianness */
457 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
458 {
459 uint32_t i;
460 for (i = 0; i < count; i++)
461 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
462 }
463
464 /* write a uint32_t array to a buffer in target memory endianness */
465 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
466 {
467 uint32_t i;
468 for (i = 0; i < count; i++)
469 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
470 }
471
472 /* write a uint16_t array to a buffer in target memory endianness */
473 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
474 {
475 uint32_t i;
476 for (i = 0; i < count; i++)
477 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
478 }
479
480 /* return a pointer to a configured target; id is name or number */
481 struct target *get_target(const char *id)
482 {
483 struct target *target;
484
485 /* try as tcltarget name */
486 for (target = all_targets; target; target = target->next) {
487 if (target_name(target) == NULL)
488 continue;
489 if (strcmp(id, target_name(target)) == 0)
490 return target;
491 }
492
493 /* It's OK to remove this fallback sometime after August 2010 or so */
494
495 /* no match, try as number */
496 unsigned num;
497 if (parse_uint(id, &num) != ERROR_OK)
498 return NULL;
499
500 for (target = all_targets; target; target = target->next) {
501 if (target->target_number == (int)num) {
502 LOG_WARNING("use '%s' as target identifier, not '%u'",
503 target_name(target), num);
504 return target;
505 }
506 }
507
508 return NULL;
509 }
510
511 /* returns a pointer to the n-th configured target */
512 struct target *get_target_by_num(int num)
513 {
514 struct target *target = all_targets;
515
516 while (target) {
517 if (target->target_number == num)
518 return target;
519 target = target->next;
520 }
521
522 return NULL;
523 }
524
525 struct target *get_current_target(struct command_context *cmd_ctx)
526 {
527 struct target *target = get_current_target_or_null(cmd_ctx);
528
529 if (target == NULL) {
530 LOG_ERROR("BUG: current_target out of bounds");
531 exit(-1);
532 }
533
534 return target;
535 }
536
537 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
538 {
539 return cmd_ctx->current_target_override
540 ? cmd_ctx->current_target_override
541 : cmd_ctx->current_target;
542 }
543
544 int target_poll(struct target *target)
545 {
546 int retval;
547
548 /* We can't poll until after examine */
549 if (!target_was_examined(target)) {
550 /* Fail silently lest we pollute the log */
551 return ERROR_FAIL;
552 }
553
554 retval = target->type->poll(target);
555 if (retval != ERROR_OK)
556 return retval;
557
558 if (target->halt_issued) {
559 if (target->state == TARGET_HALTED)
560 target->halt_issued = false;
561 else {
562 int64_t t = timeval_ms() - target->halt_issued_time;
563 if (t > DEFAULT_HALT_TIMEOUT) {
564 target->halt_issued = false;
565 LOG_INFO("Halt timed out, wake up GDB.");
566 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
567 }
568 }
569 }
570
571 return ERROR_OK;
572 }
573
574 int target_halt(struct target *target)
575 {
576 int retval;
577 /* We can't poll until after examine */
578 if (!target_was_examined(target)) {
579 LOG_ERROR("Target not examined yet");
580 return ERROR_FAIL;
581 }
582
583 retval = target->type->halt(target);
584 if (retval != ERROR_OK)
585 return retval;
586
587 target->halt_issued = true;
588 target->halt_issued_time = timeval_ms();
589
590 return ERROR_OK;
591 }
592
593 /**
594 * Make the target (re)start executing using its saved execution
595 * context (possibly with some modifications).
596 *
597 * @param target Which target should start executing.
598 * @param current True to use the target's saved program counter instead
599 * of the address parameter
600 * @param address Optionally used as the program counter.
601 * @param handle_breakpoints True iff breakpoints at the resumption PC
602 * should be skipped. (For example, maybe execution was stopped by
603 * such a breakpoint, in which case it would be counterproductive to
604 * let it re-trigger.
605 * @param debug_execution False if all working areas allocated by OpenOCD
606 * should be released and/or restored to their original contents.
607 * (This would for example be true to run some downloaded "helper"
608 * algorithm code, which resides in one such working buffer and uses
609 * another for data storage.)
610 *
611 * @todo Resolve the ambiguity about what the "debug_execution" flag
612 * signifies. For example, Target implementations don't agree on how
613 * it relates to invalidation of the register cache, or to whether
614 * breakpoints and watchpoints should be enabled. (It would seem wrong
615 * to enable breakpoints when running downloaded "helper" algorithms
616 * (debug_execution true), since the breakpoints would be set to match
617 * target firmware being debugged, not the helper algorithm.... and
618 * enabling them could cause such helpers to malfunction (for example,
619 * by overwriting data with a breakpoint instruction. On the other
620 * hand the infrastructure for running such helpers might use this
621 * procedure but rely on hardware breakpoint to detect termination.)
622 */
623 int target_resume(struct target *target, int current, target_addr_t address,
624 int handle_breakpoints, int debug_execution)
625 {
626 int retval;
627
628 /* We can't poll until after examine */
629 if (!target_was_examined(target)) {
630 LOG_ERROR("Target not examined yet");
631 return ERROR_FAIL;
632 }
633
634 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
635
636 /* note that resume *must* be asynchronous. The CPU can halt before
637 * we poll. The CPU can even halt at the current PC as a result of
638 * a software breakpoint being inserted by (a bug?) the application.
639 */
640 /*
641 * resume() triggers the event 'resumed'. The execution of TCL commands
642 * in the event handler causes the polling of targets. If the target has
643 * already halted for a breakpoint, polling will run the 'halted' event
644 * handler before the pending 'resumed' handler.
645 * Disable polling during resume() to guarantee the execution of handlers
646 * in the correct order.
647 */
648 bool save_poll = jtag_poll_get_enabled();
649 jtag_poll_set_enabled(false);
650 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
651 jtag_poll_set_enabled(save_poll);
652 if (retval != ERROR_OK)
653 return retval;
654
655 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
656
657 return retval;
658 }
659
660 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
661 {
662 char buf[100];
663 int retval;
664 struct jim_nvp *n;
665 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
666 if (n->name == NULL) {
667 LOG_ERROR("invalid reset mode");
668 return ERROR_FAIL;
669 }
670
671 struct target *target;
672 for (target = all_targets; target; target = target->next)
673 target_call_reset_callbacks(target, reset_mode);
674
675 /* disable polling during reset to make reset event scripts
676 * more predictable, i.e. dr/irscan & pathmove in events will
677 * not have JTAG operations injected into the middle of a sequence.
678 */
679 bool save_poll = jtag_poll_get_enabled();
680
681 jtag_poll_set_enabled(false);
682
683 sprintf(buf, "ocd_process_reset %s", n->name);
684 retval = Jim_Eval(cmd->ctx->interp, buf);
685
686 jtag_poll_set_enabled(save_poll);
687
688 if (retval != JIM_OK) {
689 Jim_MakeErrorMessage(cmd->ctx->interp);
690 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
691 return ERROR_FAIL;
692 }
693
694 /* We want any events to be processed before the prompt */
695 retval = target_call_timer_callbacks_now();
696
697 for (target = all_targets; target; target = target->next) {
698 target->type->check_reset(target);
699 target->running_alg = false;
700 }
701
702 return retval;
703 }
704
705 static int identity_virt2phys(struct target *target,
706 target_addr_t virtual, target_addr_t *physical)
707 {
708 *physical = virtual;
709 return ERROR_OK;
710 }
711
712 static int no_mmu(struct target *target, int *enabled)
713 {
714 *enabled = 0;
715 return ERROR_OK;
716 }
717
718 static int default_examine(struct target *target)
719 {
720 target_set_examined(target);
721 return ERROR_OK;
722 }
723
724 /* no check by default */
725 static int default_check_reset(struct target *target)
726 {
727 return ERROR_OK;
728 }
729
730 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
731 * Keep in sync */
732 int target_examine_one(struct target *target)
733 {
734 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
735
736 int retval = target->type->examine(target);
737 if (retval != ERROR_OK) {
738 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
739 return retval;
740 }
741
742 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
743
744 return ERROR_OK;
745 }
746
747 static int jtag_enable_callback(enum jtag_event event, void *priv)
748 {
749 struct target *target = priv;
750
751 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
752 return ERROR_OK;
753
754 jtag_unregister_event_callback(jtag_enable_callback, target);
755
756 return target_examine_one(target);
757 }
758
759 /* Targets that correctly implement init + examine, i.e.
760 * no communication with target during init:
761 *
762 * XScale
763 */
764 int target_examine(void)
765 {
766 int retval = ERROR_OK;
767 struct target *target;
768
769 for (target = all_targets; target; target = target->next) {
770 /* defer examination, but don't skip it */
771 if (!target->tap->enabled) {
772 jtag_register_event_callback(jtag_enable_callback,
773 target);
774 continue;
775 }
776
777 if (target->defer_examine)
778 continue;
779
780 int retval2 = target_examine_one(target);
781 if (retval2 != ERROR_OK) {
782 LOG_WARNING("target %s examination failed", target_name(target));
783 retval = retval2;
784 }
785 }
786 return retval;
787 }
788
789 const char *target_type_name(struct target *target)
790 {
791 return target->type->name;
792 }
793
794 static int target_soft_reset_halt(struct target *target)
795 {
796 if (!target_was_examined(target)) {
797 LOG_ERROR("Target not examined yet");
798 return ERROR_FAIL;
799 }
800 if (!target->type->soft_reset_halt) {
801 LOG_ERROR("Target %s does not support soft_reset_halt",
802 target_name(target));
803 return ERROR_FAIL;
804 }
805 return target->type->soft_reset_halt(target);
806 }
807
808 /**
809 * Downloads a target-specific native code algorithm to the target,
810 * and executes it. * Note that some targets may need to set up, enable,
811 * and tear down a breakpoint (hard or * soft) to detect algorithm
812 * termination, while others may support lower overhead schemes where
813 * soft breakpoints embedded in the algorithm automatically terminate the
814 * algorithm.
815 *
816 * @param target used to run the algorithm
817 * @param num_mem_params
818 * @param mem_params
819 * @param num_reg_params
820 * @param reg_param
821 * @param entry_point
822 * @param exit_point
823 * @param timeout_ms
824 * @param arch_info target-specific description of the algorithm.
825 */
826 int target_run_algorithm(struct target *target,
827 int num_mem_params, struct mem_param *mem_params,
828 int num_reg_params, struct reg_param *reg_param,
829 uint32_t entry_point, uint32_t exit_point,
830 int timeout_ms, void *arch_info)
831 {
832 int retval = ERROR_FAIL;
833
834 if (!target_was_examined(target)) {
835 LOG_ERROR("Target not examined yet");
836 goto done;
837 }
838 if (!target->type->run_algorithm) {
839 LOG_ERROR("Target type '%s' does not support %s",
840 target_type_name(target), __func__);
841 goto done;
842 }
843
844 target->running_alg = true;
845 retval = target->type->run_algorithm(target,
846 num_mem_params, mem_params,
847 num_reg_params, reg_param,
848 entry_point, exit_point, timeout_ms, arch_info);
849 target->running_alg = false;
850
851 done:
852 return retval;
853 }
854
855 /**
856 * Executes a target-specific native code algorithm and leaves it running.
857 *
858 * @param target used to run the algorithm
859 * @param num_mem_params
860 * @param mem_params
861 * @param num_reg_params
862 * @param reg_params
863 * @param entry_point
864 * @param exit_point
865 * @param arch_info target-specific description of the algorithm.
866 */
867 int target_start_algorithm(struct target *target,
868 int num_mem_params, struct mem_param *mem_params,
869 int num_reg_params, struct reg_param *reg_params,
870 uint32_t entry_point, uint32_t exit_point,
871 void *arch_info)
872 {
873 int retval = ERROR_FAIL;
874
875 if (!target_was_examined(target)) {
876 LOG_ERROR("Target not examined yet");
877 goto done;
878 }
879 if (!target->type->start_algorithm) {
880 LOG_ERROR("Target type '%s' does not support %s",
881 target_type_name(target), __func__);
882 goto done;
883 }
884 if (target->running_alg) {
885 LOG_ERROR("Target is already running an algorithm");
886 goto done;
887 }
888
889 target->running_alg = true;
890 retval = target->type->start_algorithm(target,
891 num_mem_params, mem_params,
892 num_reg_params, reg_params,
893 entry_point, exit_point, arch_info);
894
895 done:
896 return retval;
897 }
898
899 /**
900 * Waits for an algorithm started with target_start_algorithm() to complete.
901 *
902 * @param target used to run the algorithm
903 * @param num_mem_params
904 * @param mem_params
905 * @param num_reg_params
906 * @param reg_params
907 * @param exit_point
908 * @param timeout_ms
909 * @param arch_info target-specific description of the algorithm.
910 */
911 int target_wait_algorithm(struct target *target,
912 int num_mem_params, struct mem_param *mem_params,
913 int num_reg_params, struct reg_param *reg_params,
914 uint32_t exit_point, int timeout_ms,
915 void *arch_info)
916 {
917 int retval = ERROR_FAIL;
918
919 if (!target->type->wait_algorithm) {
920 LOG_ERROR("Target type '%s' does not support %s",
921 target_type_name(target), __func__);
922 goto done;
923 }
924 if (!target->running_alg) {
925 LOG_ERROR("Target is not running an algorithm");
926 goto done;
927 }
928
929 retval = target->type->wait_algorithm(target,
930 num_mem_params, mem_params,
931 num_reg_params, reg_params,
932 exit_point, timeout_ms, arch_info);
933 if (retval != ERROR_TARGET_TIMEOUT)
934 target->running_alg = false;
935
936 done:
937 return retval;
938 }
939
940 /**
941 * Streams data to a circular buffer on target intended for consumption by code
942 * running asynchronously on target.
943 *
944 * This is intended for applications where target-specific native code runs
945 * on the target, receives data from the circular buffer, does something with
946 * it (most likely writing it to a flash memory), and advances the circular
947 * buffer pointer.
948 *
949 * This assumes that the helper algorithm has already been loaded to the target,
950 * but has not been started yet. Given memory and register parameters are passed
951 * to the algorithm.
952 *
953 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
954 * following format:
955 *
956 * [buffer_start + 0, buffer_start + 4):
957 * Write Pointer address (aka head). Written and updated by this
958 * routine when new data is written to the circular buffer.
959 * [buffer_start + 4, buffer_start + 8):
960 * Read Pointer address (aka tail). Updated by code running on the
961 * target after it consumes data.
962 * [buffer_start + 8, buffer_start + buffer_size):
963 * Circular buffer contents.
964 *
965 * See contrib/loaders/flash/stm32f1x.S for an example.
966 *
967 * @param target used to run the algorithm
968 * @param buffer address on the host where data to be sent is located
969 * @param count number of blocks to send
970 * @param block_size size in bytes of each block
971 * @param num_mem_params count of memory-based params to pass to algorithm
972 * @param mem_params memory-based params to pass to algorithm
973 * @param num_reg_params count of register-based params to pass to algorithm
974 * @param reg_params memory-based params to pass to algorithm
975 * @param buffer_start address on the target of the circular buffer structure
976 * @param buffer_size size of the circular buffer structure
977 * @param entry_point address on the target to execute to start the algorithm
978 * @param exit_point address at which to set a breakpoint to catch the
979 * end of the algorithm; can be 0 if target triggers a breakpoint itself
980 * @param arch_info
981 */
982
983 int target_run_flash_async_algorithm(struct target *target,
984 const uint8_t *buffer, uint32_t count, int block_size,
985 int num_mem_params, struct mem_param *mem_params,
986 int num_reg_params, struct reg_param *reg_params,
987 uint32_t buffer_start, uint32_t buffer_size,
988 uint32_t entry_point, uint32_t exit_point, void *arch_info)
989 {
990 int retval;
991 int timeout = 0;
992
993 const uint8_t *buffer_orig = buffer;
994
995 /* Set up working area. First word is write pointer, second word is read pointer,
996 * rest is fifo data area. */
997 uint32_t wp_addr = buffer_start;
998 uint32_t rp_addr = buffer_start + 4;
999 uint32_t fifo_start_addr = buffer_start + 8;
1000 uint32_t fifo_end_addr = buffer_start + buffer_size;
1001
1002 uint32_t wp = fifo_start_addr;
1003 uint32_t rp = fifo_start_addr;
1004
1005 /* validate block_size is 2^n */
1006 assert(!block_size || !(block_size & (block_size - 1)));
1007
1008 retval = target_write_u32(target, wp_addr, wp);
1009 if (retval != ERROR_OK)
1010 return retval;
1011 retval = target_write_u32(target, rp_addr, rp);
1012 if (retval != ERROR_OK)
1013 return retval;
1014
1015 /* Start up algorithm on target and let it idle while writing the first chunk */
1016 retval = target_start_algorithm(target, num_mem_params, mem_params,
1017 num_reg_params, reg_params,
1018 entry_point,
1019 exit_point,
1020 arch_info);
1021
1022 if (retval != ERROR_OK) {
1023 LOG_ERROR("error starting target flash write algorithm");
1024 return retval;
1025 }
1026
1027 while (count > 0) {
1028
1029 retval = target_read_u32(target, rp_addr, &rp);
1030 if (retval != ERROR_OK) {
1031 LOG_ERROR("failed to get read pointer");
1032 break;
1033 }
1034
1035 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1036 (size_t) (buffer - buffer_orig), count, wp, rp);
1037
1038 if (rp == 0) {
1039 LOG_ERROR("flash write algorithm aborted by target");
1040 retval = ERROR_FLASH_OPERATION_FAILED;
1041 break;
1042 }
1043
1044 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1045 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1046 break;
1047 }
1048
1049 /* Count the number of bytes available in the fifo without
1050 * crossing the wrap around. Make sure to not fill it completely,
1051 * because that would make wp == rp and that's the empty condition. */
1052 uint32_t thisrun_bytes;
1053 if (rp > wp)
1054 thisrun_bytes = rp - wp - block_size;
1055 else if (rp > fifo_start_addr)
1056 thisrun_bytes = fifo_end_addr - wp;
1057 else
1058 thisrun_bytes = fifo_end_addr - wp - block_size;
1059
1060 if (thisrun_bytes == 0) {
1061 /* Throttle polling a bit if transfer is (much) faster than flash
1062 * programming. The exact delay shouldn't matter as long as it's
1063 * less than buffer size / flash speed. This is very unlikely to
1064 * run when using high latency connections such as USB. */
1065 alive_sleep(2);
1066
1067 /* to stop an infinite loop on some targets check and increment a timeout
1068 * this issue was observed on a stellaris using the new ICDI interface */
1069 if (timeout++ >= 2500) {
1070 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1071 return ERROR_FLASH_OPERATION_FAILED;
1072 }
1073 continue;
1074 }
1075
1076 /* reset our timeout */
1077 timeout = 0;
1078
1079 /* Limit to the amount of data we actually want to write */
1080 if (thisrun_bytes > count * block_size)
1081 thisrun_bytes = count * block_size;
1082
1083 /* Force end of large blocks to be word aligned */
1084 if (thisrun_bytes >= 16)
1085 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1086
1087 /* Write data to fifo */
1088 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1089 if (retval != ERROR_OK)
1090 break;
1091
1092 /* Update counters and wrap write pointer */
1093 buffer += thisrun_bytes;
1094 count -= thisrun_bytes / block_size;
1095 wp += thisrun_bytes;
1096 if (wp >= fifo_end_addr)
1097 wp = fifo_start_addr;
1098
1099 /* Store updated write pointer to target */
1100 retval = target_write_u32(target, wp_addr, wp);
1101 if (retval != ERROR_OK)
1102 break;
1103
1104 /* Avoid GDB timeouts */
1105 keep_alive();
1106 }
1107
1108 if (retval != ERROR_OK) {
1109 /* abort flash write algorithm on target */
1110 target_write_u32(target, wp_addr, 0);
1111 }
1112
1113 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1114 num_reg_params, reg_params,
1115 exit_point,
1116 10000,
1117 arch_info);
1118
1119 if (retval2 != ERROR_OK) {
1120 LOG_ERROR("error waiting for target flash write algorithm");
1121 retval = retval2;
1122 }
1123
1124 if (retval == ERROR_OK) {
1125 /* check if algorithm set rp = 0 after fifo writer loop finished */
1126 retval = target_read_u32(target, rp_addr, &rp);
1127 if (retval == ERROR_OK && rp == 0) {
1128 LOG_ERROR("flash write algorithm aborted by target");
1129 retval = ERROR_FLASH_OPERATION_FAILED;
1130 }
1131 }
1132
1133 return retval;
1134 }
1135
1136 int target_run_read_async_algorithm(struct target *target,
1137 uint8_t *buffer, uint32_t count, int block_size,
1138 int num_mem_params, struct mem_param *mem_params,
1139 int num_reg_params, struct reg_param *reg_params,
1140 uint32_t buffer_start, uint32_t buffer_size,
1141 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1142 {
1143 int retval;
1144 int timeout = 0;
1145
1146 const uint8_t *buffer_orig = buffer;
1147
1148 /* Set up working area. First word is write pointer, second word is read pointer,
1149 * rest is fifo data area. */
1150 uint32_t wp_addr = buffer_start;
1151 uint32_t rp_addr = buffer_start + 4;
1152 uint32_t fifo_start_addr = buffer_start + 8;
1153 uint32_t fifo_end_addr = buffer_start + buffer_size;
1154
1155 uint32_t wp = fifo_start_addr;
1156 uint32_t rp = fifo_start_addr;
1157
1158 /* validate block_size is 2^n */
1159 assert(!block_size || !(block_size & (block_size - 1)));
1160
1161 retval = target_write_u32(target, wp_addr, wp);
1162 if (retval != ERROR_OK)
1163 return retval;
1164 retval = target_write_u32(target, rp_addr, rp);
1165 if (retval != ERROR_OK)
1166 return retval;
1167
1168 /* Start up algorithm on target */
1169 retval = target_start_algorithm(target, num_mem_params, mem_params,
1170 num_reg_params, reg_params,
1171 entry_point,
1172 exit_point,
1173 arch_info);
1174
1175 if (retval != ERROR_OK) {
1176 LOG_ERROR("error starting target flash read algorithm");
1177 return retval;
1178 }
1179
1180 while (count > 0) {
1181 retval = target_read_u32(target, wp_addr, &wp);
1182 if (retval != ERROR_OK) {
1183 LOG_ERROR("failed to get write pointer");
1184 break;
1185 }
1186
1187 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1188 (size_t)(buffer - buffer_orig), count, wp, rp);
1189
1190 if (wp == 0) {
1191 LOG_ERROR("flash read algorithm aborted by target");
1192 retval = ERROR_FLASH_OPERATION_FAILED;
1193 break;
1194 }
1195
1196 if (((wp - fifo_start_addr) & (block_size - 1)) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1197 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1198 break;
1199 }
1200
1201 /* Count the number of bytes available in the fifo without
1202 * crossing the wrap around. */
1203 uint32_t thisrun_bytes;
1204 if (wp >= rp)
1205 thisrun_bytes = wp - rp;
1206 else
1207 thisrun_bytes = fifo_end_addr - rp;
1208
1209 if (thisrun_bytes == 0) {
1210 /* Throttle polling a bit if transfer is (much) faster than flash
1211 * reading. The exact delay shouldn't matter as long as it's
1212 * less than buffer size / flash speed. This is very unlikely to
1213 * run when using high latency connections such as USB. */
1214 alive_sleep(2);
1215
1216 /* to stop an infinite loop on some targets check and increment a timeout
1217 * this issue was observed on a stellaris using the new ICDI interface */
1218 if (timeout++ >= 2500) {
1219 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1220 return ERROR_FLASH_OPERATION_FAILED;
1221 }
1222 continue;
1223 }
1224
1225 /* Reset our timeout */
1226 timeout = 0;
1227
1228 /* Limit to the amount of data we actually want to read */
1229 if (thisrun_bytes > count * block_size)
1230 thisrun_bytes = count * block_size;
1231
1232 /* Force end of large blocks to be word aligned */
1233 if (thisrun_bytes >= 16)
1234 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1235
1236 /* Read data from fifo */
1237 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1238 if (retval != ERROR_OK)
1239 break;
1240
1241 /* Update counters and wrap write pointer */
1242 buffer += thisrun_bytes;
1243 count -= thisrun_bytes / block_size;
1244 rp += thisrun_bytes;
1245 if (rp >= fifo_end_addr)
1246 rp = fifo_start_addr;
1247
1248 /* Store updated write pointer to target */
1249 retval = target_write_u32(target, rp_addr, rp);
1250 if (retval != ERROR_OK)
1251 break;
1252
1253 /* Avoid GDB timeouts */
1254 keep_alive();
1255
1256 }
1257
1258 if (retval != ERROR_OK) {
1259 /* abort flash write algorithm on target */
1260 target_write_u32(target, rp_addr, 0);
1261 }
1262
1263 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1264 num_reg_params, reg_params,
1265 exit_point,
1266 10000,
1267 arch_info);
1268
1269 if (retval2 != ERROR_OK) {
1270 LOG_ERROR("error waiting for target flash write algorithm");
1271 retval = retval2;
1272 }
1273
1274 if (retval == ERROR_OK) {
1275 /* check if algorithm set wp = 0 after fifo writer loop finished */
1276 retval = target_read_u32(target, wp_addr, &wp);
1277 if (retval == ERROR_OK && wp == 0) {
1278 LOG_ERROR("flash read algorithm aborted by target");
1279 retval = ERROR_FLASH_OPERATION_FAILED;
1280 }
1281 }
1282
1283 return retval;
1284 }
1285
1286 int target_read_memory(struct target *target,
1287 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1288 {
1289 if (!target_was_examined(target)) {
1290 LOG_ERROR("Target not examined yet");
1291 return ERROR_FAIL;
1292 }
1293 if (!target->type->read_memory) {
1294 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1295 return ERROR_FAIL;
1296 }
1297 return target->type->read_memory(target, address, size, count, buffer);
1298 }
1299
1300 int target_read_phys_memory(struct target *target,
1301 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1302 {
1303 if (!target_was_examined(target)) {
1304 LOG_ERROR("Target not examined yet");
1305 return ERROR_FAIL;
1306 }
1307 if (!target->type->read_phys_memory) {
1308 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1309 return ERROR_FAIL;
1310 }
1311 return target->type->read_phys_memory(target, address, size, count, buffer);
1312 }
1313
1314 int target_write_memory(struct target *target,
1315 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1316 {
1317 if (!target_was_examined(target)) {
1318 LOG_ERROR("Target not examined yet");
1319 return ERROR_FAIL;
1320 }
1321 if (!target->type->write_memory) {
1322 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1323 return ERROR_FAIL;
1324 }
1325 return target->type->write_memory(target, address, size, count, buffer);
1326 }
1327
1328 int target_write_phys_memory(struct target *target,
1329 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1330 {
1331 if (!target_was_examined(target)) {
1332 LOG_ERROR("Target not examined yet");
1333 return ERROR_FAIL;
1334 }
1335 if (!target->type->write_phys_memory) {
1336 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1337 return ERROR_FAIL;
1338 }
1339 return target->type->write_phys_memory(target, address, size, count, buffer);
1340 }
1341
1342 int target_add_breakpoint(struct target *target,
1343 struct breakpoint *breakpoint)
1344 {
1345 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1346 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1347 return ERROR_TARGET_NOT_HALTED;
1348 }
1349 return target->type->add_breakpoint(target, breakpoint);
1350 }
1351
1352 int target_add_context_breakpoint(struct target *target,
1353 struct breakpoint *breakpoint)
1354 {
1355 if (target->state != TARGET_HALTED) {
1356 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1357 return ERROR_TARGET_NOT_HALTED;
1358 }
1359 return target->type->add_context_breakpoint(target, breakpoint);
1360 }
1361
1362 int target_add_hybrid_breakpoint(struct target *target,
1363 struct breakpoint *breakpoint)
1364 {
1365 if (target->state != TARGET_HALTED) {
1366 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1367 return ERROR_TARGET_NOT_HALTED;
1368 }
1369 return target->type->add_hybrid_breakpoint(target, breakpoint);
1370 }
1371
1372 int target_remove_breakpoint(struct target *target,
1373 struct breakpoint *breakpoint)
1374 {
1375 return target->type->remove_breakpoint(target, breakpoint);
1376 }
1377
1378 int target_add_watchpoint(struct target *target,
1379 struct watchpoint *watchpoint)
1380 {
1381 if (target->state != TARGET_HALTED) {
1382 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1383 return ERROR_TARGET_NOT_HALTED;
1384 }
1385 return target->type->add_watchpoint(target, watchpoint);
1386 }
1387 int target_remove_watchpoint(struct target *target,
1388 struct watchpoint *watchpoint)
1389 {
1390 return target->type->remove_watchpoint(target, watchpoint);
1391 }
1392 int target_hit_watchpoint(struct target *target,
1393 struct watchpoint **hit_watchpoint)
1394 {
1395 if (target->state != TARGET_HALTED) {
1396 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1397 return ERROR_TARGET_NOT_HALTED;
1398 }
1399
1400 if (target->type->hit_watchpoint == NULL) {
1401 /* For backward compatible, if hit_watchpoint is not implemented,
1402 * return ERROR_FAIL such that gdb_server will not take the nonsense
1403 * information. */
1404 return ERROR_FAIL;
1405 }
1406
1407 return target->type->hit_watchpoint(target, hit_watchpoint);
1408 }
1409
1410 const char *target_get_gdb_arch(struct target *target)
1411 {
1412 if (target->type->get_gdb_arch == NULL)
1413 return NULL;
1414 return target->type->get_gdb_arch(target);
1415 }
1416
1417 int target_get_gdb_reg_list(struct target *target,
1418 struct reg **reg_list[], int *reg_list_size,
1419 enum target_register_class reg_class)
1420 {
1421 int result = ERROR_FAIL;
1422
1423 if (!target_was_examined(target)) {
1424 LOG_ERROR("Target not examined yet");
1425 goto done;
1426 }
1427
1428 result = target->type->get_gdb_reg_list(target, reg_list,
1429 reg_list_size, reg_class);
1430
1431 done:
1432 if (result != ERROR_OK) {
1433 *reg_list = NULL;
1434 *reg_list_size = 0;
1435 }
1436 return result;
1437 }
1438
1439 int target_get_gdb_reg_list_noread(struct target *target,
1440 struct reg **reg_list[], int *reg_list_size,
1441 enum target_register_class reg_class)
1442 {
1443 if (target->type->get_gdb_reg_list_noread &&
1444 target->type->get_gdb_reg_list_noread(target, reg_list,
1445 reg_list_size, reg_class) == ERROR_OK)
1446 return ERROR_OK;
1447 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1448 }
1449
1450 bool target_supports_gdb_connection(struct target *target)
1451 {
1452 /*
1453 * exclude all the targets that don't provide get_gdb_reg_list
1454 * or that have explicit gdb_max_connection == 0
1455 */
1456 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1457 }
1458
1459 int target_step(struct target *target,
1460 int current, target_addr_t address, int handle_breakpoints)
1461 {
1462 int retval;
1463
1464 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1465
1466 retval = target->type->step(target, current, address, handle_breakpoints);
1467 if (retval != ERROR_OK)
1468 return retval;
1469
1470 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1471
1472 return retval;
1473 }
1474
1475 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1476 {
1477 if (target->state != TARGET_HALTED) {
1478 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1479 return ERROR_TARGET_NOT_HALTED;
1480 }
1481 return target->type->get_gdb_fileio_info(target, fileio_info);
1482 }
1483
1484 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1485 {
1486 if (target->state != TARGET_HALTED) {
1487 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1488 return ERROR_TARGET_NOT_HALTED;
1489 }
1490 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1491 }
1492
1493 target_addr_t target_address_max(struct target *target)
1494 {
1495 unsigned bits = target_address_bits(target);
1496 if (sizeof(target_addr_t) * 8 == bits)
1497 return (target_addr_t) -1;
1498 else
1499 return (((target_addr_t) 1) << bits) - 1;
1500 }
1501
1502 unsigned target_address_bits(struct target *target)
1503 {
1504 if (target->type->address_bits)
1505 return target->type->address_bits(target);
1506 return 32;
1507 }
1508
1509 unsigned int target_data_bits(struct target *target)
1510 {
1511 if (target->type->data_bits)
1512 return target->type->data_bits(target);
1513 return 32;
1514 }
1515
1516 static int target_profiling(struct target *target, uint32_t *samples,
1517 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1518 {
1519 return target->type->profiling(target, samples, max_num_samples,
1520 num_samples, seconds);
1521 }
1522
1523 /**
1524 * Reset the @c examined flag for the given target.
1525 * Pure paranoia -- targets are zeroed on allocation.
1526 */
1527 static void target_reset_examined(struct target *target)
1528 {
1529 target->examined = false;
1530 }
1531
1532 static int handle_target(void *priv);
1533
1534 static int target_init_one(struct command_context *cmd_ctx,
1535 struct target *target)
1536 {
1537 target_reset_examined(target);
1538
1539 struct target_type *type = target->type;
1540 if (type->examine == NULL)
1541 type->examine = default_examine;
1542
1543 if (type->check_reset == NULL)
1544 type->check_reset = default_check_reset;
1545
1546 assert(type->init_target != NULL);
1547
1548 int retval = type->init_target(cmd_ctx, target);
1549 if (ERROR_OK != retval) {
1550 LOG_ERROR("target '%s' init failed", target_name(target));
1551 return retval;
1552 }
1553
1554 /* Sanity-check MMU support ... stub in what we must, to help
1555 * implement it in stages, but warn if we need to do so.
1556 */
1557 if (type->mmu) {
1558 if (type->virt2phys == NULL) {
1559 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1560 type->virt2phys = identity_virt2phys;
1561 }
1562 } else {
1563 /* Make sure no-MMU targets all behave the same: make no
1564 * distinction between physical and virtual addresses, and
1565 * ensure that virt2phys() is always an identity mapping.
1566 */
1567 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1568 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1569
1570 type->mmu = no_mmu;
1571 type->write_phys_memory = type->write_memory;
1572 type->read_phys_memory = type->read_memory;
1573 type->virt2phys = identity_virt2phys;
1574 }
1575
1576 if (target->type->read_buffer == NULL)
1577 target->type->read_buffer = target_read_buffer_default;
1578
1579 if (target->type->write_buffer == NULL)
1580 target->type->write_buffer = target_write_buffer_default;
1581
1582 if (target->type->get_gdb_fileio_info == NULL)
1583 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1584
1585 if (target->type->gdb_fileio_end == NULL)
1586 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1587
1588 if (target->type->profiling == NULL)
1589 target->type->profiling = target_profiling_default;
1590
1591 return ERROR_OK;
1592 }
1593
1594 static int target_init(struct command_context *cmd_ctx)
1595 {
1596 struct target *target;
1597 int retval;
1598
1599 for (target = all_targets; target; target = target->next) {
1600 retval = target_init_one(cmd_ctx, target);
1601 if (ERROR_OK != retval)
1602 return retval;
1603 }
1604
1605 if (!all_targets)
1606 return ERROR_OK;
1607
1608 retval = target_register_user_commands(cmd_ctx);
1609 if (ERROR_OK != retval)
1610 return retval;
1611
1612 retval = target_register_timer_callback(&handle_target,
1613 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1614 if (ERROR_OK != retval)
1615 return retval;
1616
1617 return ERROR_OK;
1618 }
1619
1620 COMMAND_HANDLER(handle_target_init_command)
1621 {
1622 int retval;
1623
1624 if (CMD_ARGC != 0)
1625 return ERROR_COMMAND_SYNTAX_ERROR;
1626
1627 static bool target_initialized;
1628 if (target_initialized) {
1629 LOG_INFO("'target init' has already been called");
1630 return ERROR_OK;
1631 }
1632 target_initialized = true;
1633
1634 retval = command_run_line(CMD_CTX, "init_targets");
1635 if (ERROR_OK != retval)
1636 return retval;
1637
1638 retval = command_run_line(CMD_CTX, "init_target_events");
1639 if (ERROR_OK != retval)
1640 return retval;
1641
1642 retval = command_run_line(CMD_CTX, "init_board");
1643 if (ERROR_OK != retval)
1644 return retval;
1645
1646 LOG_DEBUG("Initializing targets...");
1647 return target_init(CMD_CTX);
1648 }
1649
1650 int target_register_event_callback(int (*callback)(struct target *target,
1651 enum target_event event, void *priv), void *priv)
1652 {
1653 struct target_event_callback **callbacks_p = &target_event_callbacks;
1654
1655 if (callback == NULL)
1656 return ERROR_COMMAND_SYNTAX_ERROR;
1657
1658 if (*callbacks_p) {
1659 while ((*callbacks_p)->next)
1660 callbacks_p = &((*callbacks_p)->next);
1661 callbacks_p = &((*callbacks_p)->next);
1662 }
1663
1664 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1665 (*callbacks_p)->callback = callback;
1666 (*callbacks_p)->priv = priv;
1667 (*callbacks_p)->next = NULL;
1668
1669 return ERROR_OK;
1670 }
1671
1672 int target_register_reset_callback(int (*callback)(struct target *target,
1673 enum target_reset_mode reset_mode, void *priv), void *priv)
1674 {
1675 struct target_reset_callback *entry;
1676
1677 if (callback == NULL)
1678 return ERROR_COMMAND_SYNTAX_ERROR;
1679
1680 entry = malloc(sizeof(struct target_reset_callback));
1681 if (entry == NULL) {
1682 LOG_ERROR("error allocating buffer for reset callback entry");
1683 return ERROR_COMMAND_SYNTAX_ERROR;
1684 }
1685
1686 entry->callback = callback;
1687 entry->priv = priv;
1688 list_add(&entry->list, &target_reset_callback_list);
1689
1690
1691 return ERROR_OK;
1692 }
1693
1694 int target_register_trace_callback(int (*callback)(struct target *target,
1695 size_t len, uint8_t *data, void *priv), void *priv)
1696 {
1697 struct target_trace_callback *entry;
1698
1699 if (callback == NULL)
1700 return ERROR_COMMAND_SYNTAX_ERROR;
1701
1702 entry = malloc(sizeof(struct target_trace_callback));
1703 if (entry == NULL) {
1704 LOG_ERROR("error allocating buffer for trace callback entry");
1705 return ERROR_COMMAND_SYNTAX_ERROR;
1706 }
1707
1708 entry->callback = callback;
1709 entry->priv = priv;
1710 list_add(&entry->list, &target_trace_callback_list);
1711
1712
1713 return ERROR_OK;
1714 }
1715
1716 int target_register_timer_callback(int (*callback)(void *priv),
1717 unsigned int time_ms, enum target_timer_type type, void *priv)
1718 {
1719 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1720
1721 if (callback == NULL)
1722 return ERROR_COMMAND_SYNTAX_ERROR;
1723
1724 if (*callbacks_p) {
1725 while ((*callbacks_p)->next)
1726 callbacks_p = &((*callbacks_p)->next);
1727 callbacks_p = &((*callbacks_p)->next);
1728 }
1729
1730 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1731 (*callbacks_p)->callback = callback;
1732 (*callbacks_p)->type = type;
1733 (*callbacks_p)->time_ms = time_ms;
1734 (*callbacks_p)->removed = false;
1735
1736 gettimeofday(&(*callbacks_p)->when, NULL);
1737 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1738
1739 (*callbacks_p)->priv = priv;
1740 (*callbacks_p)->next = NULL;
1741
1742 return ERROR_OK;
1743 }
1744
1745 int target_unregister_event_callback(int (*callback)(struct target *target,
1746 enum target_event event, void *priv), void *priv)
1747 {
1748 struct target_event_callback **p = &target_event_callbacks;
1749 struct target_event_callback *c = target_event_callbacks;
1750
1751 if (callback == NULL)
1752 return ERROR_COMMAND_SYNTAX_ERROR;
1753
1754 while (c) {
1755 struct target_event_callback *next = c->next;
1756 if ((c->callback == callback) && (c->priv == priv)) {
1757 *p = next;
1758 free(c);
1759 return ERROR_OK;
1760 } else
1761 p = &(c->next);
1762 c = next;
1763 }
1764
1765 return ERROR_OK;
1766 }
1767
1768 int target_unregister_reset_callback(int (*callback)(struct target *target,
1769 enum target_reset_mode reset_mode, void *priv), void *priv)
1770 {
1771 struct target_reset_callback *entry;
1772
1773 if (callback == NULL)
1774 return ERROR_COMMAND_SYNTAX_ERROR;
1775
1776 list_for_each_entry(entry, &target_reset_callback_list, list) {
1777 if (entry->callback == callback && entry->priv == priv) {
1778 list_del(&entry->list);
1779 free(entry);
1780 break;
1781 }
1782 }
1783
1784 return ERROR_OK;
1785 }
1786
1787 int target_unregister_trace_callback(int (*callback)(struct target *target,
1788 size_t len, uint8_t *data, void *priv), void *priv)
1789 {
1790 struct target_trace_callback *entry;
1791
1792 if (callback == NULL)
1793 return ERROR_COMMAND_SYNTAX_ERROR;
1794
1795 list_for_each_entry(entry, &target_trace_callback_list, list) {
1796 if (entry->callback == callback && entry->priv == priv) {
1797 list_del(&entry->list);
1798 free(entry);
1799 break;
1800 }
1801 }
1802
1803 return ERROR_OK;
1804 }
1805
1806 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1807 {
1808 if (callback == NULL)
1809 return ERROR_COMMAND_SYNTAX_ERROR;
1810
1811 for (struct target_timer_callback *c = target_timer_callbacks;
1812 c; c = c->next) {
1813 if ((c->callback == callback) && (c->priv == priv)) {
1814 c->removed = true;
1815 return ERROR_OK;
1816 }
1817 }
1818
1819 return ERROR_FAIL;
1820 }
1821
1822 int target_call_event_callbacks(struct target *target, enum target_event event)
1823 {
1824 struct target_event_callback *callback = target_event_callbacks;
1825 struct target_event_callback *next_callback;
1826
1827 if (event == TARGET_EVENT_HALTED) {
1828 /* execute early halted first */
1829 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1830 }
1831
1832 LOG_DEBUG("target event %i (%s) for core %s", event,
1833 jim_nvp_value2name_simple(nvp_target_event, event)->name,
1834 target_name(target));
1835
1836 target_handle_event(target, event);
1837
1838 while (callback) {
1839 next_callback = callback->next;
1840 callback->callback(target, event, callback->priv);
1841 callback = next_callback;
1842 }
1843
1844 return ERROR_OK;
1845 }
1846
1847 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1848 {
1849 struct target_reset_callback *callback;
1850
1851 LOG_DEBUG("target reset %i (%s)", reset_mode,
1852 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1853
1854 list_for_each_entry(callback, &target_reset_callback_list, list)
1855 callback->callback(target, reset_mode, callback->priv);
1856
1857 return ERROR_OK;
1858 }
1859
1860 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1861 {
1862 struct target_trace_callback *callback;
1863
1864 list_for_each_entry(callback, &target_trace_callback_list, list)
1865 callback->callback(target, len, data, callback->priv);
1866
1867 return ERROR_OK;
1868 }
1869
1870 static int target_timer_callback_periodic_restart(
1871 struct target_timer_callback *cb, struct timeval *now)
1872 {
1873 cb->when = *now;
1874 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1875 return ERROR_OK;
1876 }
1877
1878 static int target_call_timer_callback(struct target_timer_callback *cb,
1879 struct timeval *now)
1880 {
1881 cb->callback(cb->priv);
1882
1883 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1884 return target_timer_callback_periodic_restart(cb, now);
1885
1886 return target_unregister_timer_callback(cb->callback, cb->priv);
1887 }
1888
1889 static int target_call_timer_callbacks_check_time(int checktime)
1890 {
1891 static bool callback_processing;
1892
1893 /* Do not allow nesting */
1894 if (callback_processing)
1895 return ERROR_OK;
1896
1897 callback_processing = true;
1898
1899 keep_alive();
1900
1901 struct timeval now;
1902 gettimeofday(&now, NULL);
1903
1904 /* Store an address of the place containing a pointer to the
1905 * next item; initially, that's a standalone "root of the
1906 * list" variable. */
1907 struct target_timer_callback **callback = &target_timer_callbacks;
1908 while (callback && *callback) {
1909 if ((*callback)->removed) {
1910 struct target_timer_callback *p = *callback;
1911 *callback = (*callback)->next;
1912 free(p);
1913 continue;
1914 }
1915
1916 bool call_it = (*callback)->callback &&
1917 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1918 timeval_compare(&now, &(*callback)->when) >= 0);
1919
1920 if (call_it)
1921 target_call_timer_callback(*callback, &now);
1922
1923 callback = &(*callback)->next;
1924 }
1925
1926 callback_processing = false;
1927 return ERROR_OK;
1928 }
1929
1930 int target_call_timer_callbacks(void)
1931 {
1932 return target_call_timer_callbacks_check_time(1);
1933 }
1934
1935 /* invoke periodic callbacks immediately */
1936 int target_call_timer_callbacks_now(void)
1937 {
1938 return target_call_timer_callbacks_check_time(0);
1939 }
1940
1941 /* Prints the working area layout for debug purposes */
1942 static void print_wa_layout(struct target *target)
1943 {
1944 struct working_area *c = target->working_areas;
1945
1946 while (c) {
1947 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1948 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1949 c->address, c->address + c->size - 1, c->size);
1950 c = c->next;
1951 }
1952 }
1953
1954 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1955 static void target_split_working_area(struct working_area *area, uint32_t size)
1956 {
1957 assert(area->free); /* Shouldn't split an allocated area */
1958 assert(size <= area->size); /* Caller should guarantee this */
1959
1960 /* Split only if not already the right size */
1961 if (size < area->size) {
1962 struct working_area *new_wa = malloc(sizeof(*new_wa));
1963
1964 if (new_wa == NULL)
1965 return;
1966
1967 new_wa->next = area->next;
1968 new_wa->size = area->size - size;
1969 new_wa->address = area->address + size;
1970 new_wa->backup = NULL;
1971 new_wa->user = NULL;
1972 new_wa->free = true;
1973
1974 area->next = new_wa;
1975 area->size = size;
1976
1977 /* If backup memory was allocated to this area, it has the wrong size
1978 * now so free it and it will be reallocated if/when needed */
1979 free(area->backup);
1980 area->backup = NULL;
1981 }
1982 }
1983
1984 /* Merge all adjacent free areas into one */
1985 static void target_merge_working_areas(struct target *target)
1986 {
1987 struct working_area *c = target->working_areas;
1988
1989 while (c && c->next) {
1990 assert(c->next->address == c->address + c->size); /* This is an invariant */
1991
1992 /* Find two adjacent free areas */
1993 if (c->free && c->next->free) {
1994 /* Merge the last into the first */
1995 c->size += c->next->size;
1996
1997 /* Remove the last */
1998 struct working_area *to_be_freed = c->next;
1999 c->next = c->next->next;
2000 free(to_be_freed->backup);
2001 free(to_be_freed);
2002
2003 /* If backup memory was allocated to the remaining area, it's has
2004 * the wrong size now */
2005 free(c->backup);
2006 c->backup = NULL;
2007 } else {
2008 c = c->next;
2009 }
2010 }
2011 }
2012
2013 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2014 {
2015 /* Reevaluate working area address based on MMU state*/
2016 if (target->working_areas == NULL) {
2017 int retval;
2018 int enabled;
2019
2020 retval = target->type->mmu(target, &enabled);
2021 if (retval != ERROR_OK)
2022 return retval;
2023
2024 if (!enabled) {
2025 if (target->working_area_phys_spec) {
2026 LOG_DEBUG("MMU disabled, using physical "
2027 "address for working memory " TARGET_ADDR_FMT,
2028 target->working_area_phys);
2029 target->working_area = target->working_area_phys;
2030 } else {
2031 LOG_ERROR("No working memory available. "
2032 "Specify -work-area-phys to target.");
2033 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2034 }
2035 } else {
2036 if (target->working_area_virt_spec) {
2037 LOG_DEBUG("MMU enabled, using virtual "
2038 "address for working memory " TARGET_ADDR_FMT,
2039 target->working_area_virt);
2040 target->working_area = target->working_area_virt;
2041 } else {
2042 LOG_ERROR("No working memory available. "
2043 "Specify -work-area-virt to target.");
2044 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2045 }
2046 }
2047
2048 /* Set up initial working area on first call */
2049 struct working_area *new_wa = malloc(sizeof(*new_wa));
2050 if (new_wa) {
2051 new_wa->next = NULL;
2052 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2053 new_wa->address = target->working_area;
2054 new_wa->backup = NULL;
2055 new_wa->user = NULL;
2056 new_wa->free = true;
2057 }
2058
2059 target->working_areas = new_wa;
2060 }
2061
2062 /* only allocate multiples of 4 byte */
2063 if (size % 4)
2064 size = (size + 3) & (~3UL);
2065
2066 struct working_area *c = target->working_areas;
2067
2068 /* Find the first large enough working area */
2069 while (c) {
2070 if (c->free && c->size >= size)
2071 break;
2072 c = c->next;
2073 }
2074
2075 if (c == NULL)
2076 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2077
2078 /* Split the working area into the requested size */
2079 target_split_working_area(c, size);
2080
2081 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2082 size, c->address);
2083
2084 if (target->backup_working_area) {
2085 if (c->backup == NULL) {
2086 c->backup = malloc(c->size);
2087 if (c->backup == NULL)
2088 return ERROR_FAIL;
2089 }
2090
2091 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2092 if (retval != ERROR_OK)
2093 return retval;
2094 }
2095
2096 /* mark as used, and return the new (reused) area */
2097 c->free = false;
2098 *area = c;
2099
2100 /* user pointer */
2101 c->user = area;
2102
2103 print_wa_layout(target);
2104
2105 return ERROR_OK;
2106 }
2107
2108 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2109 {
2110 int retval;
2111
2112 retval = target_alloc_working_area_try(target, size, area);
2113 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2114 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2115 return retval;
2116
2117 }
2118
2119 static int target_restore_working_area(struct target *target, struct working_area *area)
2120 {
2121 int retval = ERROR_OK;
2122
2123 if (target->backup_working_area && area->backup != NULL) {
2124 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2125 if (retval != ERROR_OK)
2126 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2127 area->size, area->address);
2128 }
2129
2130 return retval;
2131 }
2132
2133 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2134 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2135 {
2136 int retval = ERROR_OK;
2137
2138 if (area->free)
2139 return retval;
2140
2141 if (restore) {
2142 retval = target_restore_working_area(target, area);
2143 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2144 if (retval != ERROR_OK)
2145 return retval;
2146 }
2147
2148 area->free = true;
2149
2150 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2151 area->size, area->address);
2152
2153 /* mark user pointer invalid */
2154 /* TODO: Is this really safe? It points to some previous caller's memory.
2155 * How could we know that the area pointer is still in that place and not
2156 * some other vital data? What's the purpose of this, anyway? */
2157 *area->user = NULL;
2158 area->user = NULL;
2159
2160 target_merge_working_areas(target);
2161
2162 print_wa_layout(target);
2163
2164 return retval;
2165 }
2166
2167 int target_free_working_area(struct target *target, struct working_area *area)
2168 {
2169 return target_free_working_area_restore(target, area, 1);
2170 }
2171
2172 /* free resources and restore memory, if restoring memory fails,
2173 * free up resources anyway
2174 */
2175 static void target_free_all_working_areas_restore(struct target *target, int restore)
2176 {
2177 struct working_area *c = target->working_areas;
2178
2179 LOG_DEBUG("freeing all working areas");
2180
2181 /* Loop through all areas, restoring the allocated ones and marking them as free */
2182 while (c) {
2183 if (!c->free) {
2184 if (restore)
2185 target_restore_working_area(target, c);
2186 c->free = true;
2187 *c->user = NULL; /* Same as above */
2188 c->user = NULL;
2189 }
2190 c = c->next;
2191 }
2192
2193 /* Run a merge pass to combine all areas into one */
2194 target_merge_working_areas(target);
2195
2196 print_wa_layout(target);
2197 }
2198
2199 void target_free_all_working_areas(struct target *target)
2200 {
2201 target_free_all_working_areas_restore(target, 1);
2202
2203 /* Now we have none or only one working area marked as free */
2204 if (target->working_areas) {
2205 /* Free the last one to allow on-the-fly moving and resizing */
2206 free(target->working_areas->backup);
2207 free(target->working_areas);
2208 target->working_areas = NULL;
2209 }
2210 }
2211
2212 /* Find the largest number of bytes that can be allocated */
2213 uint32_t target_get_working_area_avail(struct target *target)
2214 {
2215 struct working_area *c = target->working_areas;
2216 uint32_t max_size = 0;
2217
2218 if (c == NULL)
2219 return target->working_area_size;
2220
2221 while (c) {
2222 if (c->free && max_size < c->size)
2223 max_size = c->size;
2224
2225 c = c->next;
2226 }
2227
2228 return max_size;
2229 }
2230
2231 static void target_destroy(struct target *target)
2232 {
2233 if (target->type->deinit_target)
2234 target->type->deinit_target(target);
2235
2236 free(target->semihosting);
2237
2238 jtag_unregister_event_callback(jtag_enable_callback, target);
2239
2240 struct target_event_action *teap = target->event_action;
2241 while (teap) {
2242 struct target_event_action *next = teap->next;
2243 Jim_DecrRefCount(teap->interp, teap->body);
2244 free(teap);
2245 teap = next;
2246 }
2247
2248 target_free_all_working_areas(target);
2249
2250 /* release the targets SMP list */
2251 if (target->smp) {
2252 struct target_list *head = target->head;
2253 while (head != NULL) {
2254 struct target_list *pos = head->next;
2255 head->target->smp = 0;
2256 free(head);
2257 head = pos;
2258 }
2259 target->smp = 0;
2260 }
2261
2262 rtos_destroy(target);
2263
2264 free(target->gdb_port_override);
2265 free(target->type);
2266 free(target->trace_info);
2267 free(target->fileio_info);
2268 free(target->cmd_name);
2269 free(target);
2270 }
2271
2272 void target_quit(void)
2273 {
2274 struct target_event_callback *pe = target_event_callbacks;
2275 while (pe) {
2276 struct target_event_callback *t = pe->next;
2277 free(pe);
2278 pe = t;
2279 }
2280 target_event_callbacks = NULL;
2281
2282 struct target_timer_callback *pt = target_timer_callbacks;
2283 while (pt) {
2284 struct target_timer_callback *t = pt->next;
2285 free(pt);
2286 pt = t;
2287 }
2288 target_timer_callbacks = NULL;
2289
2290 for (struct target *target = all_targets; target;) {
2291 struct target *tmp;
2292
2293 tmp = target->next;
2294 target_destroy(target);
2295 target = tmp;
2296 }
2297
2298 all_targets = NULL;
2299 }
2300
2301 int target_arch_state(struct target *target)
2302 {
2303 int retval;
2304 if (target == NULL) {
2305 LOG_WARNING("No target has been configured");
2306 return ERROR_OK;
2307 }
2308
2309 if (target->state != TARGET_HALTED)
2310 return ERROR_OK;
2311
2312 retval = target->type->arch_state(target);
2313 return retval;
2314 }
2315
2316 static int target_get_gdb_fileio_info_default(struct target *target,
2317 struct gdb_fileio_info *fileio_info)
2318 {
2319 /* If target does not support semi-hosting function, target
2320 has no need to provide .get_gdb_fileio_info callback.
2321 It just return ERROR_FAIL and gdb_server will return "Txx"
2322 as target halted every time. */
2323 return ERROR_FAIL;
2324 }
2325
2326 static int target_gdb_fileio_end_default(struct target *target,
2327 int retcode, int fileio_errno, bool ctrl_c)
2328 {
2329 return ERROR_OK;
2330 }
2331
2332 int target_profiling_default(struct target *target, uint32_t *samples,
2333 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2334 {
2335 struct timeval timeout, now;
2336
2337 gettimeofday(&timeout, NULL);
2338 timeval_add_time(&timeout, seconds, 0);
2339
2340 LOG_INFO("Starting profiling. Halting and resuming the"
2341 " target as often as we can...");
2342
2343 uint32_t sample_count = 0;
2344 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2345 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2346
2347 int retval = ERROR_OK;
2348 for (;;) {
2349 target_poll(target);
2350 if (target->state == TARGET_HALTED) {
2351 uint32_t t = buf_get_u32(reg->value, 0, 32);
2352 samples[sample_count++] = t;
2353 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2354 retval = target_resume(target, 1, 0, 0, 0);
2355 target_poll(target);
2356 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2357 } else if (target->state == TARGET_RUNNING) {
2358 /* We want to quickly sample the PC. */
2359 retval = target_halt(target);
2360 } else {
2361 LOG_INFO("Target not halted or running");
2362 retval = ERROR_OK;
2363 break;
2364 }
2365
2366 if (retval != ERROR_OK)
2367 break;
2368
2369 gettimeofday(&now, NULL);
2370 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2371 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2372 break;
2373 }
2374 }
2375
2376 *num_samples = sample_count;
2377 return retval;
2378 }
2379
2380 /* Single aligned words are guaranteed to use 16 or 32 bit access
2381 * mode respectively, otherwise data is handled as quickly as
2382 * possible
2383 */
2384 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2385 {
2386 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2387 size, address);
2388
2389 if (!target_was_examined(target)) {
2390 LOG_ERROR("Target not examined yet");
2391 return ERROR_FAIL;
2392 }
2393
2394 if (size == 0)
2395 return ERROR_OK;
2396
2397 if ((address + size - 1) < address) {
2398 /* GDB can request this when e.g. PC is 0xfffffffc */
2399 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2400 address,
2401 size);
2402 return ERROR_FAIL;
2403 }
2404
2405 return target->type->write_buffer(target, address, size, buffer);
2406 }
2407
2408 static int target_write_buffer_default(struct target *target,
2409 target_addr_t address, uint32_t count, const uint8_t *buffer)
2410 {
2411 uint32_t size;
2412 unsigned int data_bytes = target_data_bits(target) / 8;
2413
2414 /* Align up to maximum bytes. The loop condition makes sure the next pass
2415 * will have something to do with the size we leave to it. */
2416 for (size = 1;
2417 size < data_bytes && count >= size * 2 + (address & size);
2418 size *= 2) {
2419 if (address & size) {
2420 int retval = target_write_memory(target, address, size, 1, buffer);
2421 if (retval != ERROR_OK)
2422 return retval;
2423 address += size;
2424 count -= size;
2425 buffer += size;
2426 }
2427 }
2428
2429 /* Write the data with as large access size as possible. */
2430 for (; size > 0; size /= 2) {
2431 uint32_t aligned = count - count % size;
2432 if (aligned > 0) {
2433 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2434 if (retval != ERROR_OK)
2435 return retval;
2436 address += aligned;
2437 count -= aligned;
2438 buffer += aligned;
2439 }
2440 }
2441
2442 return ERROR_OK;
2443 }
2444
2445 /* Single aligned words are guaranteed to use 16 or 32 bit access
2446 * mode respectively, otherwise data is handled as quickly as
2447 * possible
2448 */
2449 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2450 {
2451 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2452 size, address);
2453
2454 if (!target_was_examined(target)) {
2455 LOG_ERROR("Target not examined yet");
2456 return ERROR_FAIL;
2457 }
2458
2459 if (size == 0)
2460 return ERROR_OK;
2461
2462 if ((address + size - 1) < address) {
2463 /* GDB can request this when e.g. PC is 0xfffffffc */
2464 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2465 address,
2466 size);
2467 return ERROR_FAIL;
2468 }
2469
2470 return target->type->read_buffer(target, address, size, buffer);
2471 }
2472
2473 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2474 {
2475 uint32_t size;
2476 unsigned int data_bytes = target_data_bits(target) / 8;
2477
2478 /* Align up to maximum bytes. The loop condition makes sure the next pass
2479 * will have something to do with the size we leave to it. */
2480 for (size = 1;
2481 size < data_bytes && count >= size * 2 + (address & size);
2482 size *= 2) {
2483 if (address & size) {
2484 int retval = target_read_memory(target, address, size, 1, buffer);
2485 if (retval != ERROR_OK)
2486 return retval;
2487 address += size;
2488 count -= size;
2489 buffer += size;
2490 }
2491 }
2492
2493 /* Read the data with as large access size as possible. */
2494 for (; size > 0; size /= 2) {
2495 uint32_t aligned = count - count % size;
2496 if (aligned > 0) {
2497 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2498 if (retval != ERROR_OK)
2499 return retval;
2500 address += aligned;
2501 count -= aligned;
2502 buffer += aligned;
2503 }
2504 }
2505
2506 return ERROR_OK;
2507 }
2508
2509 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2510 {
2511 uint8_t *buffer;
2512 int retval;
2513 uint32_t i;
2514 uint32_t checksum = 0;
2515 if (!target_was_examined(target)) {
2516 LOG_ERROR("Target not examined yet");
2517 return ERROR_FAIL;
2518 }
2519
2520 retval = target->type->checksum_memory(target, address, size, &checksum);
2521 if (retval != ERROR_OK) {
2522 buffer = malloc(size);
2523 if (buffer == NULL) {
2524 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2525 return ERROR_COMMAND_SYNTAX_ERROR;
2526 }
2527 retval = target_read_buffer(target, address, size, buffer);
2528 if (retval != ERROR_OK) {
2529 free(buffer);
2530 return retval;
2531 }
2532
2533 /* convert to target endianness */
2534 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2535 uint32_t target_data;
2536 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2537 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2538 }
2539
2540 retval = image_calculate_checksum(buffer, size, &checksum);
2541 free(buffer);
2542 }
2543
2544 *crc = checksum;
2545
2546 return retval;
2547 }
2548
2549 int target_blank_check_memory(struct target *target,
2550 struct target_memory_check_block *blocks, int num_blocks,
2551 uint8_t erased_value)
2552 {
2553 if (!target_was_examined(target)) {
2554 LOG_ERROR("Target not examined yet");
2555 return ERROR_FAIL;
2556 }
2557
2558 if (target->type->blank_check_memory == NULL)
2559 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2560
2561 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2562 }
2563
2564 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2565 {
2566 uint8_t value_buf[8];
2567 if (!target_was_examined(target)) {
2568 LOG_ERROR("Target not examined yet");
2569 return ERROR_FAIL;
2570 }
2571
2572 int retval = target_read_memory(target, address, 8, 1, value_buf);
2573
2574 if (retval == ERROR_OK) {
2575 *value = target_buffer_get_u64(target, value_buf);
2576 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2577 address,
2578 *value);
2579 } else {
2580 *value = 0x0;
2581 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2582 address);
2583 }
2584
2585 return retval;
2586 }
2587
2588 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2589 {
2590 uint8_t value_buf[4];
2591 if (!target_was_examined(target)) {
2592 LOG_ERROR("Target not examined yet");
2593 return ERROR_FAIL;
2594 }
2595
2596 int retval = target_read_memory(target, address, 4, 1, value_buf);
2597
2598 if (retval == ERROR_OK) {
2599 *value = target_buffer_get_u32(target, value_buf);
2600 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2601 address,
2602 *value);
2603 } else {
2604 *value = 0x0;
2605 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2606 address);
2607 }
2608
2609 return retval;
2610 }
2611
2612 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2613 {
2614 uint8_t value_buf[2];
2615 if (!target_was_examined(target)) {
2616 LOG_ERROR("Target not examined yet");
2617 return ERROR_FAIL;
2618 }
2619
2620 int retval = target_read_memory(target, address, 2, 1, value_buf);
2621
2622 if (retval == ERROR_OK) {
2623 *value = target_buffer_get_u16(target, value_buf);
2624 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2625 address,
2626 *value);
2627 } else {
2628 *value = 0x0;
2629 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2630 address);
2631 }
2632
2633 return retval;
2634 }
2635
2636 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2637 {
2638 if (!target_was_examined(target)) {
2639 LOG_ERROR("Target not examined yet");
2640 return ERROR_FAIL;
2641 }
2642
2643 int retval = target_read_memory(target, address, 1, 1, value);
2644
2645 if (retval == ERROR_OK) {
2646 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2647 address,
2648 *value);
2649 } else {
2650 *value = 0x0;
2651 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2652 address);
2653 }
2654
2655 return retval;
2656 }
2657
2658 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2659 {
2660 int retval;
2661 uint8_t value_buf[8];
2662 if (!target_was_examined(target)) {
2663 LOG_ERROR("Target not examined yet");
2664 return ERROR_FAIL;
2665 }
2666
2667 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2668 address,
2669 value);
2670
2671 target_buffer_set_u64(target, value_buf, value);
2672 retval = target_write_memory(target, address, 8, 1, value_buf);
2673 if (retval != ERROR_OK)
2674 LOG_DEBUG("failed: %i", retval);
2675
2676 return retval;
2677 }
2678
2679 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2680 {
2681 int retval;
2682 uint8_t value_buf[4];
2683 if (!target_was_examined(target)) {
2684 LOG_ERROR("Target not examined yet");
2685 return ERROR_FAIL;
2686 }
2687
2688 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2689 address,
2690 value);
2691
2692 target_buffer_set_u32(target, value_buf, value);
2693 retval = target_write_memory(target, address, 4, 1, value_buf);
2694 if (retval != ERROR_OK)
2695 LOG_DEBUG("failed: %i", retval);
2696
2697 return retval;
2698 }
2699
2700 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2701 {
2702 int retval;
2703 uint8_t value_buf[2];
2704 if (!target_was_examined(target)) {
2705 LOG_ERROR("Target not examined yet");
2706 return ERROR_FAIL;
2707 }
2708
2709 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2710 address,
2711 value);
2712
2713 target_buffer_set_u16(target, value_buf, value);
2714 retval = target_write_memory(target, address, 2, 1, value_buf);
2715 if (retval != ERROR_OK)
2716 LOG_DEBUG("failed: %i", retval);
2717
2718 return retval;
2719 }
2720
2721 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2722 {
2723 int retval;
2724 if (!target_was_examined(target)) {
2725 LOG_ERROR("Target not examined yet");
2726 return ERROR_FAIL;
2727 }
2728
2729 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2730 address, value);
2731
2732 retval = target_write_memory(target, address, 1, 1, &value);
2733 if (retval != ERROR_OK)
2734 LOG_DEBUG("failed: %i", retval);
2735
2736 return retval;
2737 }
2738
2739 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2740 {
2741 int retval;
2742 uint8_t value_buf[8];
2743 if (!target_was_examined(target)) {
2744 LOG_ERROR("Target not examined yet");
2745 return ERROR_FAIL;
2746 }
2747
2748 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2749 address,
2750 value);
2751
2752 target_buffer_set_u64(target, value_buf, value);
2753 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2754 if (retval != ERROR_OK)
2755 LOG_DEBUG("failed: %i", retval);
2756
2757 return retval;
2758 }
2759
2760 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2761 {
2762 int retval;
2763 uint8_t value_buf[4];
2764 if (!target_was_examined(target)) {
2765 LOG_ERROR("Target not examined yet");
2766 return ERROR_FAIL;
2767 }
2768
2769 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2770 address,
2771 value);
2772
2773 target_buffer_set_u32(target, value_buf, value);
2774 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2775 if (retval != ERROR_OK)
2776 LOG_DEBUG("failed: %i", retval);
2777
2778 return retval;
2779 }
2780
2781 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2782 {
2783 int retval;
2784 uint8_t value_buf[2];
2785 if (!target_was_examined(target)) {
2786 LOG_ERROR("Target not examined yet");
2787 return ERROR_FAIL;
2788 }
2789
2790 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2791 address,
2792 value);
2793
2794 target_buffer_set_u16(target, value_buf, value);
2795 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2796 if (retval != ERROR_OK)
2797 LOG_DEBUG("failed: %i", retval);
2798
2799 return retval;
2800 }
2801
2802 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2803 {
2804 int retval;
2805 if (!target_was_examined(target)) {
2806 LOG_ERROR("Target not examined yet");
2807 return ERROR_FAIL;
2808 }
2809
2810 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2811 address, value);
2812
2813 retval = target_write_phys_memory(target, address, 1, 1, &value);
2814 if (retval != ERROR_OK)
2815 LOG_DEBUG("failed: %i", retval);
2816
2817 return retval;
2818 }
2819
2820 static int find_target(struct command_invocation *cmd, const char *name)
2821 {
2822 struct target *target = get_target(name);
2823 if (target == NULL) {
2824 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2825 return ERROR_FAIL;
2826 }
2827 if (!target->tap->enabled) {
2828 command_print(cmd, "Target: TAP %s is disabled, "
2829 "can't be the current target\n",
2830 target->tap->dotted_name);
2831 return ERROR_FAIL;
2832 }
2833
2834 cmd->ctx->current_target = target;
2835 if (cmd->ctx->current_target_override)
2836 cmd->ctx->current_target_override = target;
2837
2838 return ERROR_OK;
2839 }
2840
2841
2842 COMMAND_HANDLER(handle_targets_command)
2843 {
2844 int retval = ERROR_OK;
2845 if (CMD_ARGC == 1) {
2846 retval = find_target(CMD, CMD_ARGV[0]);
2847 if (retval == ERROR_OK) {
2848 /* we're done! */
2849 return retval;
2850 }
2851 }
2852
2853 struct target *target = all_targets;
2854 command_print(CMD, " TargetName Type Endian TapName State ");
2855 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2856 while (target) {
2857 const char *state;
2858 char marker = ' ';
2859
2860 if (target->tap->enabled)
2861 state = target_state_name(target);
2862 else
2863 state = "tap-disabled";
2864
2865 if (CMD_CTX->current_target == target)
2866 marker = '*';
2867
2868 /* keep columns lined up to match the headers above */
2869 command_print(CMD,
2870 "%2d%c %-18s %-10s %-6s %-18s %s",
2871 target->target_number,
2872 marker,
2873 target_name(target),
2874 target_type_name(target),
2875 jim_nvp_value2name_simple(nvp_target_endian,
2876 target->endianness)->name,
2877 target->tap->dotted_name,
2878 state);
2879 target = target->next;
2880 }
2881
2882 return retval;
2883 }
2884
2885 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2886
2887 static int powerDropout;
2888 static int srstAsserted;
2889
2890 static int runPowerRestore;
2891 static int runPowerDropout;
2892 static int runSrstAsserted;
2893 static int runSrstDeasserted;
2894
2895 static int sense_handler(void)
2896 {
2897 static int prevSrstAsserted;
2898 static int prevPowerdropout;
2899
2900 int retval = jtag_power_dropout(&powerDropout);
2901 if (retval != ERROR_OK)
2902 return retval;
2903
2904 int powerRestored;
2905 powerRestored = prevPowerdropout && !powerDropout;
2906 if (powerRestored)
2907 runPowerRestore = 1;
2908
2909 int64_t current = timeval_ms();
2910 static int64_t lastPower;
2911 bool waitMore = lastPower + 2000 > current;
2912 if (powerDropout && !waitMore) {
2913 runPowerDropout = 1;
2914 lastPower = current;
2915 }
2916
2917 retval = jtag_srst_asserted(&srstAsserted);
2918 if (retval != ERROR_OK)
2919 return retval;
2920
2921 int srstDeasserted;
2922 srstDeasserted = prevSrstAsserted && !srstAsserted;
2923
2924 static int64_t lastSrst;
2925 waitMore = lastSrst + 2000 > current;
2926 if (srstDeasserted && !waitMore) {
2927 runSrstDeasserted = 1;
2928 lastSrst = current;
2929 }
2930
2931 if (!prevSrstAsserted && srstAsserted)
2932 runSrstAsserted = 1;
2933
2934 prevSrstAsserted = srstAsserted;
2935 prevPowerdropout = powerDropout;
2936
2937 if (srstDeasserted || powerRestored) {
2938 /* Other than logging the event we can't do anything here.
2939 * Issuing a reset is a particularly bad idea as we might
2940 * be inside a reset already.
2941 */
2942 }
2943
2944 return ERROR_OK;
2945 }
2946
2947 /* process target state changes */
2948 static int handle_target(void *priv)
2949 {
2950 Jim_Interp *interp = (Jim_Interp *)priv;
2951 int retval = ERROR_OK;
2952
2953 if (!is_jtag_poll_safe()) {
2954 /* polling is disabled currently */
2955 return ERROR_OK;
2956 }
2957
2958 /* we do not want to recurse here... */
2959 static int recursive;
2960 if (!recursive) {
2961 recursive = 1;
2962 sense_handler();
2963 /* danger! running these procedures can trigger srst assertions and power dropouts.
2964 * We need to avoid an infinite loop/recursion here and we do that by
2965 * clearing the flags after running these events.
2966 */
2967 int did_something = 0;
2968 if (runSrstAsserted) {
2969 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2970 Jim_Eval(interp, "srst_asserted");
2971 did_something = 1;
2972 }
2973 if (runSrstDeasserted) {
2974 Jim_Eval(interp, "srst_deasserted");
2975 did_something = 1;
2976 }
2977 if (runPowerDropout) {
2978 LOG_INFO("Power dropout detected, running power_dropout proc.");
2979 Jim_Eval(interp, "power_dropout");
2980 did_something = 1;
2981 }
2982 if (runPowerRestore) {
2983 Jim_Eval(interp, "power_restore");
2984 did_something = 1;
2985 }
2986
2987 if (did_something) {
2988 /* clear detect flags */
2989 sense_handler();
2990 }
2991
2992 /* clear action flags */
2993
2994 runSrstAsserted = 0;
2995 runSrstDeasserted = 0;
2996 runPowerRestore = 0;
2997 runPowerDropout = 0;
2998
2999 recursive = 0;
3000 }
3001
3002 /* Poll targets for state changes unless that's globally disabled.
3003 * Skip targets that are currently disabled.
3004 */
3005 for (struct target *target = all_targets;
3006 is_jtag_poll_safe() && target;
3007 target = target->next) {
3008
3009 if (!target_was_examined(target))
3010 continue;
3011
3012 if (!target->tap->enabled)
3013 continue;
3014
3015 if (target->backoff.times > target->backoff.count) {
3016 /* do not poll this time as we failed previously */
3017 target->backoff.count++;
3018 continue;
3019 }
3020 target->backoff.count = 0;
3021
3022 /* only poll target if we've got power and srst isn't asserted */
3023 if (!powerDropout && !srstAsserted) {
3024 /* polling may fail silently until the target has been examined */
3025 retval = target_poll(target);
3026 if (retval != ERROR_OK) {
3027 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3028 if (target->backoff.times * polling_interval < 5000) {
3029 target->backoff.times *= 2;
3030 target->backoff.times++;
3031 }
3032
3033 /* Tell GDB to halt the debugger. This allows the user to
3034 * run monitor commands to handle the situation.
3035 */
3036 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3037 }
3038 if (target->backoff.times > 0) {
3039 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3040 target_reset_examined(target);
3041 retval = target_examine_one(target);
3042 /* Target examination could have failed due to unstable connection,
3043 * but we set the examined flag anyway to repoll it later */
3044 if (retval != ERROR_OK) {
3045 target->examined = true;
3046 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3047 target->backoff.times * polling_interval);
3048 return retval;
3049 }
3050 }
3051
3052 /* Since we succeeded, we reset backoff count */
3053 target->backoff.times = 0;
3054 }
3055 }
3056
3057 return retval;
3058 }
3059
3060 COMMAND_HANDLER(handle_reg_command)
3061 {
3062 LOG_DEBUG("-");
3063
3064 struct target *target = get_current_target(CMD_CTX);
3065 struct reg *reg = NULL;
3066
3067 /* list all available registers for the current target */
3068 if (CMD_ARGC == 0) {
3069 struct reg_cache *cache = target->reg_cache;
3070
3071 unsigned int count = 0;
3072 while (cache) {
3073 unsigned i;
3074
3075 command_print(CMD, "===== %s", cache->name);
3076
3077 for (i = 0, reg = cache->reg_list;
3078 i < cache->num_regs;
3079 i++, reg++, count++) {
3080 if (reg->exist == false || reg->hidden)
3081 continue;
3082 /* only print cached values if they are valid */
3083 if (reg->valid) {
3084 char *value = buf_to_hex_str(reg->value,
3085 reg->size);
3086 command_print(CMD,
3087 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3088 count, reg->name,
3089 reg->size, value,
3090 reg->dirty
3091 ? " (dirty)"
3092 : "");
3093 free(value);
3094 } else {
3095 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3096 count, reg->name,
3097 reg->size);
3098 }
3099 }
3100 cache = cache->next;
3101 }
3102
3103 return ERROR_OK;
3104 }
3105
3106 /* access a single register by its ordinal number */
3107 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3108 unsigned num;
3109 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3110
3111 struct reg_cache *cache = target->reg_cache;
3112 unsigned int count = 0;
3113 while (cache) {
3114 unsigned i;
3115 for (i = 0; i < cache->num_regs; i++) {
3116 if (count++ == num) {
3117 reg = &cache->reg_list[i];
3118 break;
3119 }
3120 }
3121 if (reg)
3122 break;
3123 cache = cache->next;
3124 }
3125
3126 if (!reg) {
3127 command_print(CMD, "%i is out of bounds, the current target "
3128 "has only %i registers (0 - %i)", num, count, count - 1);
3129 return ERROR_OK;
3130 }
3131 } else {
3132 /* access a single register by its name */
3133 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3134
3135 if (!reg)
3136 goto not_found;
3137 }
3138
3139 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
3140
3141 if (!reg->exist)
3142 goto not_found;
3143
3144 /* display a register */
3145 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3146 && (CMD_ARGV[1][0] <= '9')))) {
3147 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3148 reg->valid = 0;
3149
3150 if (reg->valid == 0) {
3151 int retval = reg->type->get(reg);
3152 if (retval != ERROR_OK) {
3153 LOG_ERROR("Could not read register '%s'", reg->name);
3154 return retval;
3155 }
3156 }
3157 char *value = buf_to_hex_str(reg->value, reg->size);
3158 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3159 free(value);
3160 return ERROR_OK;
3161 }
3162
3163 /* set register value */
3164 if (CMD_ARGC == 2) {
3165 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3166 if (buf == NULL)
3167 return ERROR_FAIL;
3168 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3169
3170 int retval = reg->type->set(reg, buf);
3171 if (retval != ERROR_OK) {
3172 LOG_ERROR("Could not write to register '%s'", reg->name);
3173 } else {
3174 char *value = buf_to_hex_str(reg->value, reg->size);
3175 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3176 free(value);
3177 }
3178
3179 free(buf);
3180
3181 return retval;
3182 }
3183
3184 return ERROR_COMMAND_SYNTAX_ERROR;
3185
3186 not_found:
3187 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3188 return ERROR_OK;
3189 }
3190
3191 COMMAND_HANDLER(handle_poll_command)
3192 {
3193 int retval = ERROR_OK;
3194 struct target *target = get_current_target(CMD_CTX);
3195
3196 if (CMD_ARGC == 0) {
3197 command_print(CMD, "background polling: %s",
3198 jtag_poll_get_enabled() ? "on" : "off");
3199 command_print(CMD, "TAP: %s (%s)",
3200 target->tap->dotted_name,
3201 target->tap->enabled ? "enabled" : "disabled");
3202 if (!target->tap->enabled)
3203 return ERROR_OK;
3204 retval = target_poll(target);
3205 if (retval != ERROR_OK)
3206 return retval;
3207 retval = target_arch_state(target);
3208 if (retval != ERROR_OK)
3209 return retval;
3210 } else if (CMD_ARGC == 1) {
3211 bool enable;
3212 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3213 jtag_poll_set_enabled(enable);
3214 } else
3215 return ERROR_COMMAND_SYNTAX_ERROR;
3216
3217 return retval;
3218 }
3219
3220 COMMAND_HANDLER(handle_wait_halt_command)
3221 {
3222 if (CMD_ARGC > 1)
3223 return ERROR_COMMAND_SYNTAX_ERROR;
3224
3225 unsigned ms = DEFAULT_HALT_TIMEOUT;
3226 if (1 == CMD_ARGC) {
3227 int retval = parse_uint(CMD_ARGV[0], &ms);
3228 if (ERROR_OK != retval)
3229 return ERROR_COMMAND_SYNTAX_ERROR;
3230 }
3231
3232 struct target *target = get_current_target(CMD_CTX);
3233 return target_wait_state(target, TARGET_HALTED, ms);
3234 }
3235
3236 /* wait for target state to change. The trick here is to have a low
3237 * latency for short waits and not to suck up all the CPU time
3238 * on longer waits.
3239 *
3240 * After 500ms, keep_alive() is invoked
3241 */
3242 int target_wait_state(struct target *target, enum target_state state, int ms)
3243 {
3244 int retval;
3245 int64_t then = 0, cur;
3246 bool once = true;
3247
3248 for (;;) {
3249 retval = target_poll(target);
3250 if (retval != ERROR_OK)
3251 return retval;
3252 if (target->state == state)
3253 break;
3254 cur = timeval_ms();
3255 if (once) {
3256 once = false;
3257 then = timeval_ms();
3258 LOG_DEBUG("waiting for target %s...",
3259 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3260 }
3261
3262 if (cur-then > 500)
3263 keep_alive();
3264
3265 if ((cur-then) > ms) {
3266 LOG_ERROR("timed out while waiting for target %s",
3267 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3268 return ERROR_FAIL;
3269 }
3270 }
3271
3272 return ERROR_OK;
3273 }
3274
3275 COMMAND_HANDLER(handle_halt_command)
3276 {
3277 LOG_DEBUG("-");
3278
3279 struct target *target = get_current_target(CMD_CTX);
3280
3281 target->verbose_halt_msg = true;
3282
3283 int retval = target_halt(target);
3284 if (ERROR_OK != retval)
3285 return retval;
3286
3287 if (CMD_ARGC == 1) {
3288 unsigned wait_local;
3289 retval = parse_uint(CMD_ARGV[0], &wait_local);
3290 if (ERROR_OK != retval)
3291 return ERROR_COMMAND_SYNTAX_ERROR;
3292 if (!wait_local)
3293 return ERROR_OK;
3294 }
3295
3296 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3297 }
3298
3299 COMMAND_HANDLER(handle_soft_reset_halt_command)
3300 {
3301 struct target *target = get_current_target(CMD_CTX);
3302
3303 LOG_USER("requesting target halt and executing a soft reset");
3304
3305 target_soft_reset_halt(target);
3306
3307 return ERROR_OK;
3308 }
3309
3310 COMMAND_HANDLER(handle_reset_command)
3311 {
3312 if (CMD_ARGC > 1)
3313 return ERROR_COMMAND_SYNTAX_ERROR;
3314
3315 enum target_reset_mode reset_mode = RESET_RUN;
3316 if (CMD_ARGC == 1) {
3317 const struct jim_nvp *n;
3318 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3319 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
3320 return ERROR_COMMAND_SYNTAX_ERROR;
3321 reset_mode = n->value;
3322 }
3323
3324 /* reset *all* targets */
3325 return target_process_reset(CMD, reset_mode);
3326 }
3327
3328
3329 COMMAND_HANDLER(handle_resume_command)
3330 {
3331 int current = 1;
3332 if (CMD_ARGC > 1)
3333 return ERROR_COMMAND_SYNTAX_ERROR;
3334
3335 struct target *target = get_current_target(CMD_CTX);
3336
3337 /* with no CMD_ARGV, resume from current pc, addr = 0,
3338 * with one arguments, addr = CMD_ARGV[0],
3339 * handle breakpoints, not debugging */
3340 target_addr_t addr = 0;
3341 if (CMD_ARGC == 1) {
3342 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3343 current = 0;
3344 }
3345
3346 return target_resume(target, current, addr, 1, 0);
3347 }
3348
3349 COMMAND_HANDLER(handle_step_command)
3350 {
3351 if (CMD_ARGC > 1)
3352 return ERROR_COMMAND_SYNTAX_ERROR;
3353
3354 LOG_DEBUG("-");
3355
3356 /* with no CMD_ARGV, step from current pc, addr = 0,
3357 * with one argument addr = CMD_ARGV[0],
3358 * handle breakpoints, debugging */
3359 target_addr_t addr = 0;
3360 int current_pc = 1;
3361 if (CMD_ARGC == 1) {
3362 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3363 current_pc = 0;
3364 }
3365
3366 struct target *target = get_current_target(CMD_CTX);
3367
3368 return target_step(target, current_pc, addr, 1);
3369 }
3370
3371 void target_handle_md_output(struct command_invocation *cmd,
3372 struct target *target, target_addr_t address, unsigned size,
3373 unsigned count, const uint8_t *buffer)
3374 {
3375 const unsigned line_bytecnt = 32;
3376 unsigned line_modulo = line_bytecnt / size;
3377
3378 char output[line_bytecnt * 4 + 1];
3379 unsigned output_len = 0;
3380
3381 const char *value_fmt;
3382 switch (size) {
3383 case 8:
3384 value_fmt = "%16.16"PRIx64" ";
3385 break;
3386 case 4:
3387 value_fmt = "%8.8"PRIx64" ";
3388 break;
3389 case 2:
3390 value_fmt = "%4.4"PRIx64" ";
3391 break;
3392 case 1:
3393 value_fmt = "%2.2"PRIx64" ";
3394 break;
3395 default:
3396 /* "can't happen", caller checked */
3397 LOG_ERROR("invalid memory read size: %u", size);
3398 return;
3399 }
3400
3401 for (unsigned i = 0; i < count; i++) {
3402 if (i % line_modulo == 0) {
3403 output_len += snprintf(output + output_len,
3404 sizeof(output) - output_len,
3405 TARGET_ADDR_FMT ": ",
3406 (address + (i * size)));
3407 }
3408
3409 uint64_t value = 0;
3410 const uint8_t *value_ptr = buffer + i * size;
3411 switch (size) {
3412 case 8:
3413 value = target_buffer_get_u64(target, value_ptr);
3414 break;
3415 case 4:
3416 value = target_buffer_get_u32(target, value_ptr);
3417 break;
3418 case 2:
3419 value = target_buffer_get_u16(target, value_ptr);
3420 break;
3421 case 1:
3422 value = *value_ptr;
3423 }
3424 output_len += snprintf(output + output_len,
3425 sizeof(output) - output_len,
3426 value_fmt, value);
3427
3428 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3429 command_print(cmd, "%s", output);
3430 output_len = 0;
3431 }
3432 }
3433 }
3434
3435 COMMAND_HANDLER(handle_md_command)
3436 {
3437 if (CMD_ARGC < 1)
3438 return ERROR_COMMAND_SYNTAX_ERROR;
3439
3440 unsigned size = 0;
3441 switch (CMD_NAME[2]) {
3442 case 'd':
3443 size = 8;
3444 break;
3445 case 'w':
3446 size = 4;
3447 break;
3448 case 'h':
3449 size = 2;
3450 break;
3451 case 'b':
3452 size = 1;
3453 break;
3454 default:
3455 return ERROR_COMMAND_SYNTAX_ERROR;
3456 }
3457
3458 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3459 int (*fn)(struct target *target,
3460 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3461 if (physical) {
3462 CMD_ARGC--;
3463 CMD_ARGV++;
3464 fn = target_read_phys_memory;
3465 } else
3466 fn = target_read_memory;
3467 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3468 return ERROR_COMMAND_SYNTAX_ERROR;
3469
3470 target_addr_t address;
3471 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3472
3473 unsigned count = 1;
3474 if (CMD_ARGC == 2)
3475 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3476
3477 uint8_t *buffer = calloc(count, size);
3478 if (buffer == NULL) {
3479 LOG_ERROR("Failed to allocate md read buffer");
3480 return ERROR_FAIL;
3481 }
3482
3483 struct target *target = get_current_target(CMD_CTX);
3484 int retval = fn(target, address, size, count, buffer);
3485 if (ERROR_OK == retval)
3486 target_handle_md_output(CMD, target, address, size, count, buffer);
3487
3488 free(buffer);
3489
3490 return retval;
3491 }
3492
3493 typedef int (*target_write_fn)(struct target *target,
3494 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3495
3496 static int target_fill_mem(struct target *target,
3497 target_addr_t address,
3498 target_write_fn fn,
3499 unsigned data_size,
3500 /* value */
3501 uint64_t b,
3502 /* count */
3503 unsigned c)
3504 {
3505 /* We have to write in reasonably large chunks to be able
3506 * to fill large memory areas with any sane speed */
3507 const unsigned chunk_size = 16384;
3508 uint8_t *target_buf = malloc(chunk_size * data_size);
3509 if (target_buf == NULL) {
3510 LOG_ERROR("Out of memory");
3511 return ERROR_FAIL;
3512 }
3513
3514 for (unsigned i = 0; i < chunk_size; i++) {
3515 switch (data_size) {
3516 case 8:
3517 target_buffer_set_u64(target, target_buf + i * data_size, b);
3518 break;
3519 case 4:
3520 target_buffer_set_u32(target, target_buf + i * data_size, b);
3521 break;
3522 case 2:
3523 target_buffer_set_u16(target, target_buf + i * data_size, b);
3524 break;
3525 case 1:
3526 target_buffer_set_u8(target, target_buf + i * data_size, b);
3527 break;
3528 default:
3529 exit(-1);
3530 }
3531 }
3532
3533 int retval = ERROR_OK;
3534
3535 for (unsigned x = 0; x < c; x += chunk_size) {
3536 unsigned current;
3537 current = c - x;
3538 if (current > chunk_size)
3539 current = chunk_size;
3540 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3541 if (retval != ERROR_OK)
3542 break;
3543 /* avoid GDB timeouts */
3544 keep_alive();
3545 }
3546 free(target_buf);
3547
3548 return retval;
3549 }
3550
3551
3552 COMMAND_HANDLER(handle_mw_command)
3553 {
3554 if (CMD_ARGC < 2)
3555 return ERROR_COMMAND_SYNTAX_ERROR;
3556 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3557 target_write_fn fn;
3558 if (physical) {
3559 CMD_ARGC--;
3560 CMD_ARGV++;
3561 fn = target_write_phys_memory;
3562 } else
3563 fn = target_write_memory;
3564 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3565 return ERROR_COMMAND_SYNTAX_ERROR;
3566
3567 target_addr_t address;
3568 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3569
3570 uint64_t value;
3571 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3572
3573 unsigned count = 1;
3574 if (CMD_ARGC == 3)
3575 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3576
3577 struct target *target = get_current_target(CMD_CTX);
3578 unsigned wordsize;
3579 switch (CMD_NAME[2]) {
3580 case 'd':
3581 wordsize = 8;
3582 break;
3583 case 'w':
3584 wordsize = 4;
3585 break;
3586 case 'h':
3587 wordsize = 2;
3588 break;
3589 case 'b':
3590 wordsize = 1;
3591 break;
3592 default:
3593 return ERROR_COMMAND_SYNTAX_ERROR;
3594 }
3595
3596 return target_fill_mem(target, address, fn, wordsize, value, count);
3597 }
3598
3599 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3600 target_addr_t *min_address, target_addr_t *max_address)
3601 {
3602 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3603 return ERROR_COMMAND_SYNTAX_ERROR;
3604
3605 /* a base address isn't always necessary,
3606 * default to 0x0 (i.e. don't relocate) */
3607 if (CMD_ARGC >= 2) {
3608 target_addr_t addr;
3609 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3610 image->base_address = addr;
3611 image->base_address_set = true;
3612 } else
3613 image->base_address_set = false;
3614
3615 image->start_address_set = false;
3616
3617 if (CMD_ARGC >= 4)
3618 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3619 if (CMD_ARGC == 5) {
3620 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3621 /* use size (given) to find max (required) */
3622 *max_address += *min_address;
3623 }
3624
3625 if (*min_address > *max_address)
3626 return ERROR_COMMAND_SYNTAX_ERROR;
3627
3628 return ERROR_OK;
3629 }
3630
3631 COMMAND_HANDLER(handle_load_image_command)
3632 {
3633 uint8_t *buffer;
3634 size_t buf_cnt;
3635 uint32_t image_size;
3636 target_addr_t min_address = 0;
3637 target_addr_t max_address = -1;
3638 struct image image;
3639
3640 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3641 &image, &min_address, &max_address);
3642 if (ERROR_OK != retval)
3643 return retval;
3644
3645 struct target *target = get_current_target(CMD_CTX);
3646
3647 struct duration bench;
3648 duration_start(&bench);
3649
3650 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3651 return ERROR_FAIL;
3652
3653 image_size = 0x0;
3654 retval = ERROR_OK;
3655 for (unsigned int i = 0; i < image.num_sections; i++) {
3656 buffer = malloc(image.sections[i].size);
3657 if (buffer == NULL) {
3658 command_print(CMD,
3659 "error allocating buffer for section (%d bytes)",
3660 (int)(image.sections[i].size));
3661 retval = ERROR_FAIL;
3662 break;
3663 }
3664
3665 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3666 if (retval != ERROR_OK) {
3667 free(buffer);
3668 break;
3669 }
3670
3671 uint32_t offset = 0;
3672 uint32_t length = buf_cnt;
3673
3674 /* DANGER!!! beware of unsigned comparison here!!! */
3675
3676 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3677 (image.sections[i].base_address < max_address)) {
3678
3679 if (image.sections[i].base_address < min_address) {
3680 /* clip addresses below */
3681 offset += min_address-image.sections[i].base_address;
3682 length -= offset;
3683 }
3684
3685 if (image.sections[i].base_address + buf_cnt > max_address)
3686 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3687
3688 retval = target_write_buffer(target,
3689 image.sections[i].base_address + offset, length, buffer + offset);
3690 if (retval != ERROR_OK) {
3691 free(buffer);
3692 break;
3693 }
3694 image_size += length;
3695 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3696 (unsigned int)length,
3697 image.sections[i].base_address + offset);
3698 }
3699
3700 free(buffer);
3701 }
3702
3703 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3704 command_print(CMD, "downloaded %" PRIu32 " bytes "
3705 "in %fs (%0.3f KiB/s)", image_size,
3706 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3707 }
3708
3709 image_close(&image);
3710
3711 return retval;
3712
3713 }
3714
3715 COMMAND_HANDLER(handle_dump_image_command)
3716 {
3717 struct fileio *fileio;
3718 uint8_t *buffer;
3719 int retval, retvaltemp;
3720 target_addr_t address, size;
3721 struct duration bench;
3722 struct target *target = get_current_target(CMD_CTX);
3723
3724 if (CMD_ARGC != 3)
3725 return ERROR_COMMAND_SYNTAX_ERROR;
3726
3727 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3728 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3729
3730 uint32_t buf_size = (size > 4096) ? 4096 : size;
3731 buffer = malloc(buf_size);
3732 if (!buffer)
3733 return ERROR_FAIL;
3734
3735 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3736 if (retval != ERROR_OK) {
3737 free(buffer);
3738 return retval;
3739 }
3740
3741 duration_start(&bench);
3742
3743 while (size > 0) {
3744 size_t size_written;
3745 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3746 retval = target_read_buffer(target, address, this_run_size, buffer);
3747 if (retval != ERROR_OK)
3748 break;
3749
3750 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3751 if (retval != ERROR_OK)
3752 break;
3753
3754 size -= this_run_size;
3755 address += this_run_size;
3756 }
3757
3758 free(buffer);
3759
3760 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3761 size_t filesize;
3762 retval = fileio_size(fileio, &filesize);
3763 if (retval != ERROR_OK)
3764 return retval;
3765 command_print(CMD,
3766 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3767 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3768 }
3769
3770 retvaltemp = fileio_close(fileio);
3771 if (retvaltemp != ERROR_OK)
3772 return retvaltemp;
3773
3774 return retval;
3775 }
3776
3777 enum verify_mode {
3778 IMAGE_TEST = 0,
3779 IMAGE_VERIFY = 1,
3780 IMAGE_CHECKSUM_ONLY = 2
3781 };
3782
3783 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3784 {
3785 uint8_t *buffer;
3786 size_t buf_cnt;
3787 uint32_t image_size;
3788 int retval;
3789 uint32_t checksum = 0;
3790 uint32_t mem_checksum = 0;
3791
3792 struct image image;
3793
3794 struct target *target = get_current_target(CMD_CTX);
3795
3796 if (CMD_ARGC < 1)
3797 return ERROR_COMMAND_SYNTAX_ERROR;
3798
3799 if (!target) {
3800 LOG_ERROR("no target selected");
3801 return ERROR_FAIL;
3802 }
3803
3804 struct duration bench;
3805 duration_start(&bench);
3806
3807 if (CMD_ARGC >= 2) {
3808 target_addr_t addr;
3809 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3810 image.base_address = addr;
3811 image.base_address_set = true;
3812 } else {
3813 image.base_address_set = false;
3814 image.base_address = 0x0;
3815 }
3816
3817 image.start_address_set = false;
3818
3819 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3820 if (retval != ERROR_OK)
3821 return retval;
3822
3823 image_size = 0x0;
3824 int diffs = 0;
3825 retval = ERROR_OK;
3826 for (unsigned int i = 0; i < image.num_sections; i++) {
3827 buffer = malloc(image.sections[i].size);
3828 if (buffer == NULL) {
3829 command_print(CMD,
3830 "error allocating buffer for section (%" PRIu32 " bytes)",
3831 image.sections[i].size);
3832 break;
3833 }
3834 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3835 if (retval != ERROR_OK) {
3836 free(buffer);
3837 break;
3838 }
3839
3840 if (verify >= IMAGE_VERIFY) {
3841 /* calculate checksum of image */
3842 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3843 if (retval != ERROR_OK) {
3844 free(buffer);
3845 break;
3846 }
3847
3848 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3849 if (retval != ERROR_OK) {
3850 free(buffer);
3851 break;
3852 }
3853 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3854 LOG_ERROR("checksum mismatch");
3855 free(buffer);
3856 retval = ERROR_FAIL;
3857 goto done;
3858 }
3859 if (checksum != mem_checksum) {
3860 /* failed crc checksum, fall back to a binary compare */
3861 uint8_t *data;
3862
3863 if (diffs == 0)
3864 LOG_ERROR("checksum mismatch - attempting binary compare");
3865
3866 data = malloc(buf_cnt);
3867
3868 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3869 if (retval == ERROR_OK) {
3870 uint32_t t;
3871 for (t = 0; t < buf_cnt; t++) {
3872 if (data[t] != buffer[t]) {
3873 command_print(CMD,
3874 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3875 diffs,
3876 (unsigned)(t + image.sections[i].base_address),
3877 data[t],
3878 buffer[t]);
3879 if (diffs++ >= 127) {
3880 command_print(CMD, "More than 128 errors, the rest are not printed.");
3881 free(data);
3882 free(buffer);
3883 goto done;
3884 }
3885 }
3886 keep_alive();
3887 }
3888 }
3889 free(data);
3890 }
3891 } else {
3892 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3893 image.sections[i].base_address,
3894 buf_cnt);
3895 }
3896
3897 free(buffer);
3898 image_size += buf_cnt;
3899 }
3900 if (diffs > 0)
3901 command_print(CMD, "No more differences found.");
3902 done:
3903 if (diffs > 0)
3904 retval = ERROR_FAIL;
3905 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3906 command_print(CMD, "verified %" PRIu32 " bytes "
3907 "in %fs (%0.3f KiB/s)", image_size,
3908 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3909 }
3910
3911 image_close(&image);
3912
3913 return retval;
3914 }
3915
3916 COMMAND_HANDLER(handle_verify_image_checksum_command)
3917 {
3918 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3919 }
3920
3921 COMMAND_HANDLER(handle_verify_image_command)
3922 {
3923 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3924 }
3925
3926 COMMAND_HANDLER(handle_test_image_command)
3927 {
3928 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3929 }
3930
3931 static int handle_bp_command_list(struct command_invocation *cmd)
3932 {
3933 struct target *target = get_current_target(cmd->ctx);
3934 struct breakpoint *breakpoint = target->breakpoints;
3935 while (breakpoint) {
3936 if (breakpoint->type == BKPT_SOFT) {
3937 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3938 breakpoint->length);
3939 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3940 breakpoint->address,
3941 breakpoint->length,
3942 breakpoint->set, buf);
3943 free(buf);
3944 } else {
3945 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3946 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3947 breakpoint->asid,
3948 breakpoint->length, breakpoint->set);
3949 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3950 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3951 breakpoint->address,
3952 breakpoint->length, breakpoint->set);
3953 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3954 breakpoint->asid);
3955 } else
3956 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3957 breakpoint->address,
3958 breakpoint->length, breakpoint->set);
3959 }
3960
3961 breakpoint = breakpoint->next;
3962 }
3963 return ERROR_OK;
3964 }
3965
3966 static int handle_bp_command_set(struct command_invocation *cmd,
3967 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3968 {
3969 struct target *target = get_current_target(cmd->ctx);
3970 int retval;
3971
3972 if (asid == 0) {
3973 retval = breakpoint_add(target, addr, length, hw);
3974 /* error is always logged in breakpoint_add(), do not print it again */
3975 if (ERROR_OK == retval)
3976 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3977
3978 } else if (addr == 0) {
3979 if (target->type->add_context_breakpoint == NULL) {
3980 LOG_ERROR("Context breakpoint not available");
3981 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3982 }
3983 retval = context_breakpoint_add(target, asid, length, hw);
3984 /* error is always logged in context_breakpoint_add(), do not print it again */
3985 if (ERROR_OK == retval)
3986 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3987
3988 } else {
3989 if (target->type->add_hybrid_breakpoint == NULL) {
3990 LOG_ERROR("Hybrid breakpoint not available");
3991 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3992 }
3993 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3994 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3995 if (ERROR_OK == retval)
3996 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3997 }
3998 return retval;
3999 }
4000
4001 COMMAND_HANDLER(handle_bp_command)
4002 {
4003 target_addr_t addr;
4004 uint32_t asid;
4005 uint32_t length;
4006 int hw = BKPT_SOFT;
4007
4008 switch (CMD_ARGC) {
4009 case 0:
4010 return handle_bp_command_list(CMD);
4011
4012 case 2:
4013 asid = 0;
4014 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4015 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4016 return handle_bp_command_set(CMD, addr, asid, length, hw);
4017
4018 case 3:
4019 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4020 hw = BKPT_HARD;
4021 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4022 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4023 asid = 0;
4024 return handle_bp_command_set(CMD, addr, asid, length, hw);
4025 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4026 hw = BKPT_HARD;
4027 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4028 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4029 addr = 0;
4030 return handle_bp_command_set(CMD, addr, asid, length, hw);
4031 }
4032 /* fallthrough */
4033 case 4:
4034 hw = BKPT_HARD;
4035 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4036 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4037 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4038 return handle_bp_command_set(CMD, addr, asid, length, hw);
4039
4040 default:
4041 return ERROR_COMMAND_SYNTAX_ERROR;
4042 }
4043 }
4044
4045 COMMAND_HANDLER(handle_rbp_command)
4046 {
4047 if (CMD_ARGC != 1)
4048 return ERROR_COMMAND_SYNTAX_ERROR;
4049
4050 struct target *target = get_current_target(CMD_CTX);
4051
4052 if (!strcmp(CMD_ARGV[0], "all")) {
4053 breakpoint_remove_all(target);
4054 } else {
4055 target_addr_t addr;
4056 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4057
4058 breakpoint_remove(target, addr);
4059 }
4060
4061 return ERROR_OK;
4062 }
4063
4064 COMMAND_HANDLER(handle_wp_command)
4065 {
4066 struct target *target = get_current_target(CMD_CTX);
4067
4068 if (CMD_ARGC == 0) {
4069 struct watchpoint *watchpoint = target->watchpoints;
4070
4071 while (watchpoint) {
4072 command_print(CMD, "address: " TARGET_ADDR_FMT
4073 ", len: 0x%8.8" PRIx32
4074 ", r/w/a: %i, value: 0x%8.8" PRIx32
4075 ", mask: 0x%8.8" PRIx32,
4076 watchpoint->address,
4077 watchpoint->length,
4078 (int)watchpoint->rw,
4079 watchpoint->value,
4080 watchpoint->mask);
4081 watchpoint = watchpoint->next;
4082 }
4083 return ERROR_OK;
4084 }
4085
4086 enum watchpoint_rw type = WPT_ACCESS;
4087 target_addr_t addr = 0;
4088 uint32_t length = 0;
4089 uint32_t data_value = 0x0;
4090 uint32_t data_mask = 0xffffffff;
4091
4092 switch (CMD_ARGC) {
4093 case 5:
4094 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4095 /* fall through */
4096 case 4:
4097 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4098 /* fall through */
4099 case 3:
4100 switch (CMD_ARGV[2][0]) {
4101 case 'r':
4102 type = WPT_READ;
4103 break;
4104 case 'w':
4105 type = WPT_WRITE;
4106 break;
4107 case 'a':
4108 type = WPT_ACCESS;
4109 break;
4110 default:
4111 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4112 return ERROR_COMMAND_SYNTAX_ERROR;
4113 }
4114 /* fall through */
4115 case 2:
4116 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4117 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4118 break;
4119
4120 default:
4121 return ERROR_COMMAND_SYNTAX_ERROR;
4122 }
4123
4124 int retval = watchpoint_add(target, addr, length, type,
4125 data_value, data_mask);
4126 if (ERROR_OK != retval)
4127 LOG_ERROR("Failure setting watchpoints");
4128
4129 return retval;
4130 }
4131
4132 COMMAND_HANDLER(handle_rwp_command)
4133 {
4134 if (CMD_ARGC != 1)
4135 return ERROR_COMMAND_SYNTAX_ERROR;
4136
4137 target_addr_t addr;
4138 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4139
4140 struct target *target = get_current_target(CMD_CTX);
4141 watchpoint_remove(target, addr);
4142
4143 return ERROR_OK;
4144 }
4145
4146 /**
4147 * Translate a virtual address to a physical address.
4148 *
4149 * The low-level target implementation must have logged a detailed error
4150 * which is forwarded to telnet/GDB session.
4151 */
4152 COMMAND_HANDLER(handle_virt2phys_command)
4153 {
4154 if (CMD_ARGC != 1)
4155 return ERROR_COMMAND_SYNTAX_ERROR;
4156
4157 target_addr_t va;
4158 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4159 target_addr_t pa;
4160
4161 struct target *target = get_current_target(CMD_CTX);
4162 int retval = target->type->virt2phys(target, va, &pa);
4163 if (retval == ERROR_OK)
4164 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4165
4166 return retval;
4167 }
4168
4169 static void writeData(FILE *f, const void *data, size_t len)
4170 {
4171 size_t written = fwrite(data, 1, len, f);
4172 if (written != len)
4173 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4174 }
4175
4176 static void writeLong(FILE *f, int l, struct target *target)
4177 {
4178 uint8_t val[4];
4179
4180 target_buffer_set_u32(target, val, l);
4181 writeData(f, val, 4);
4182 }
4183
4184 static void writeString(FILE *f, char *s)
4185 {
4186 writeData(f, s, strlen(s));
4187 }
4188
4189 typedef unsigned char UNIT[2]; /* unit of profiling */
4190
4191 /* Dump a gmon.out histogram file. */
4192 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
4193 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4194 {
4195 uint32_t i;
4196 FILE *f = fopen(filename, "w");
4197 if (f == NULL)
4198 return;
4199 writeString(f, "gmon");
4200 writeLong(f, 0x00000001, target); /* Version */
4201 writeLong(f, 0, target); /* padding */
4202 writeLong(f, 0, target); /* padding */
4203 writeLong(f, 0, target); /* padding */
4204
4205 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4206 writeData(f, &zero, 1);
4207
4208 /* figure out bucket size */
4209 uint32_t min;
4210 uint32_t max;
4211 if (with_range) {
4212 min = start_address;
4213 max = end_address;
4214 } else {
4215 min = samples[0];
4216 max = samples[0];
4217 for (i = 0; i < sampleNum; i++) {
4218 if (min > samples[i])
4219 min = samples[i];
4220 if (max < samples[i])
4221 max = samples[i];
4222 }
4223
4224 /* max should be (largest sample + 1)
4225 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4226 max++;
4227 }
4228
4229 int addressSpace = max - min;
4230 assert(addressSpace >= 2);
4231
4232 /* FIXME: What is the reasonable number of buckets?
4233 * The profiling result will be more accurate if there are enough buckets. */
4234 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
4235 uint32_t numBuckets = addressSpace / sizeof(UNIT);
4236 if (numBuckets > maxBuckets)
4237 numBuckets = maxBuckets;
4238 int *buckets = malloc(sizeof(int) * numBuckets);
4239 if (buckets == NULL) {
4240 fclose(f);
4241 return;
4242 }
4243 memset(buckets, 0, sizeof(int) * numBuckets);
4244 for (i = 0; i < sampleNum; i++) {
4245 uint32_t address = samples[i];
4246
4247 if ((address < min) || (max <= address))
4248 continue;
4249
4250 long long a = address - min;
4251 long long b = numBuckets;
4252 long long c = addressSpace;
4253 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4254 buckets[index_t]++;
4255 }
4256
4257 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4258 writeLong(f, min, target); /* low_pc */
4259 writeLong(f, max, target); /* high_pc */
4260 writeLong(f, numBuckets, target); /* # of buckets */
4261 float sample_rate = sampleNum / (duration_ms / 1000.0);
4262 writeLong(f, sample_rate, target);
4263 writeString(f, "seconds");
4264 for (i = 0; i < (15-strlen("seconds")); i++)
4265 writeData(f, &zero, 1);
4266 writeString(f, "s");
4267
4268 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4269
4270 char *data = malloc(2 * numBuckets);
4271 if (data != NULL) {
4272 for (i = 0; i < numBuckets; i++) {
4273 int val;
4274 val = buckets[i];
4275 if (val > 65535)
4276 val = 65535;
4277 data[i * 2] = val&0xff;
4278 data[i * 2 + 1] = (val >> 8) & 0xff;
4279 }
4280 free(buckets);
4281 writeData(f, data, numBuckets * 2);
4282 free(data);
4283 } else
4284 free(buckets);
4285
4286 fclose(f);
4287 }
4288
4289 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4290 * which will be used as a random sampling of PC */
4291 COMMAND_HANDLER(handle_profile_command)
4292 {
4293 struct target *target = get_current_target(CMD_CTX);
4294
4295 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4296 return ERROR_COMMAND_SYNTAX_ERROR;
4297
4298 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4299 uint32_t offset;
4300 uint32_t num_of_samples;
4301 int retval = ERROR_OK;
4302 bool halted_before_profiling = target->state == TARGET_HALTED;
4303
4304 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4305
4306 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4307 if (samples == NULL) {
4308 LOG_ERROR("No memory to store samples.");
4309 return ERROR_FAIL;
4310 }
4311
4312 uint64_t timestart_ms = timeval_ms();
4313 /**
4314 * Some cores let us sample the PC without the
4315 * annoying halt/resume step; for example, ARMv7 PCSR.
4316 * Provide a way to use that more efficient mechanism.
4317 */
4318 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4319 &num_of_samples, offset);
4320 if (retval != ERROR_OK) {
4321 free(samples);
4322 return retval;
4323 }
4324 uint32_t duration_ms = timeval_ms() - timestart_ms;
4325
4326 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4327
4328 retval = target_poll(target);
4329 if (retval != ERROR_OK) {
4330 free(samples);
4331 return retval;
4332 }
4333
4334 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4335 /* The target was halted before we started and is running now. Halt it,
4336 * for consistency. */
4337 retval = target_halt(target);
4338 if (retval != ERROR_OK) {
4339 free(samples);
4340 return retval;
4341 }
4342 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4343 /* The target was running before we started and is halted now. Resume
4344 * it, for consistency. */
4345 retval = target_resume(target, 1, 0, 0, 0);
4346 if (retval != ERROR_OK) {
4347 free(samples);
4348 return retval;
4349 }
4350 }
4351
4352 retval = target_poll(target);
4353 if (retval != ERROR_OK) {
4354 free(samples);
4355 return retval;
4356 }
4357
4358 uint32_t start_address = 0;
4359 uint32_t end_address = 0;
4360 bool with_range = false;
4361 if (CMD_ARGC == 4) {
4362 with_range = true;
4363 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4364 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4365 }
4366
4367 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4368 with_range, start_address, end_address, target, duration_ms);
4369 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4370
4371 free(samples);
4372 return retval;
4373 }
4374
4375 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
4376 {
4377 char *namebuf;
4378 Jim_Obj *nameObjPtr, *valObjPtr;
4379 int result;
4380
4381 namebuf = alloc_printf("%s(%d)", varname, idx);
4382 if (!namebuf)
4383 return JIM_ERR;
4384
4385 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4386 valObjPtr = Jim_NewIntObj(interp, val);
4387 if (!nameObjPtr || !valObjPtr) {
4388 free(namebuf);
4389 return JIM_ERR;
4390 }
4391
4392 Jim_IncrRefCount(nameObjPtr);
4393 Jim_IncrRefCount(valObjPtr);
4394 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
4395 Jim_DecrRefCount(interp, nameObjPtr);
4396 Jim_DecrRefCount(interp, valObjPtr);
4397 free(namebuf);
4398 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4399 return result;
4400 }
4401
4402 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4403 {
4404 struct command_context *context;
4405 struct target *target;
4406
4407 context = current_command_context(interp);
4408 assert(context != NULL);
4409
4410 target = get_current_target(context);
4411 if (target == NULL) {
4412 LOG_ERROR("mem2array: no current target");
4413 return JIM_ERR;
4414 }
4415
4416 return target_mem2array(interp, target, argc - 1, argv + 1);
4417 }
4418
4419 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4420 {
4421 long l;
4422 uint32_t width;
4423 int len;
4424 uint32_t addr;
4425 uint32_t count;
4426 uint32_t v;
4427 const char *varname;
4428 const char *phys;
4429 bool is_phys;
4430 int n, e, retval;
4431 uint32_t i;
4432
4433 /* argv[1] = name of array to receive the data
4434 * argv[2] = desired width
4435 * argv[3] = memory address
4436 * argv[4] = count of times to read
4437 */
4438
4439 if (argc < 4 || argc > 5) {
4440 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4441 return JIM_ERR;
4442 }
4443 varname = Jim_GetString(argv[0], &len);
4444 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4445
4446 e = Jim_GetLong(interp, argv[1], &l);
4447 width = l;
4448 if (e != JIM_OK)
4449 return e;
4450
4451 e = Jim_GetLong(interp, argv[2], &l);
4452 addr = l;
4453 if (e != JIM_OK)
4454 return e;
4455 e = Jim_GetLong(interp, argv[3], &l);
4456 len = l;
4457 if (e != JIM_OK)
4458 return e;
4459 is_phys = false;
4460 if (argc > 4) {
4461 phys = Jim_GetString(argv[4], &n);
4462 if (!strncmp(phys, "phys", n))
4463 is_phys = true;
4464 else
4465 return JIM_ERR;
4466 }
4467 switch (width) {
4468 case 8:
4469 width = 1;
4470 break;
4471 case 16:
4472 width = 2;
4473 break;
4474 case 32:
4475 width = 4;
4476 break;
4477 default:
4478 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4479 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
4480 return JIM_ERR;
4481 }
4482 if (len == 0) {
4483 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4484 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4485 return JIM_ERR;
4486 }
4487 if ((addr + (len * width)) < addr) {
4488 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4489 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4490 return JIM_ERR;
4491 }
4492 /* absurd transfer size? */
4493 if (len > 65536) {
4494 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4495 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
4496 return JIM_ERR;
4497 }
4498
4499 if ((width == 1) ||
4500 ((width == 2) && ((addr & 1) == 0)) ||
4501 ((width == 4) && ((addr & 3) == 0))) {
4502 /* all is well */
4503 } else {
4504 char buf[100];
4505 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4506 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRIu32 " byte reads",
4507 addr,
4508 width);
4509 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4510 return JIM_ERR;
4511 }
4512
4513 /* Transfer loop */
4514
4515 /* index counter */
4516 n = 0;
4517
4518 size_t buffersize = 4096;
4519 uint8_t *buffer = malloc(buffersize);
4520 if (buffer == NULL)
4521 return JIM_ERR;
4522
4523 /* assume ok */
4524 e = JIM_OK;
4525 while (len) {
4526 /* Slurp... in buffer size chunks */
4527
4528 count = len; /* in objects.. */
4529 if (count > (buffersize / width))
4530 count = (buffersize / width);
4531
4532 if (is_phys)
4533 retval = target_read_phys_memory(target, addr, width, count, buffer);
4534 else
4535 retval = target_read_memory(target, addr, width, count, buffer);
4536 if (retval != ERROR_OK) {
4537 /* BOO !*/
4538 LOG_ERROR("mem2array: Read @ 0x%08" PRIx32 ", w=%" PRIu32 ", cnt=%" PRIu32 ", failed",
4539 addr,
4540 width,
4541 count);
4542 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4543 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4544 e = JIM_ERR;
4545 break;
4546 } else {
4547 v = 0; /* shut up gcc */
4548 for (i = 0; i < count ; i++, n++) {
4549 switch (width) {
4550 case 4:
4551 v = target_buffer_get_u32(target, &buffer[i*width]);
4552 break;
4553 case 2:
4554 v = target_buffer_get_u16(target, &buffer[i*width]);
4555 break;
4556 case 1:
4557 v = buffer[i] & 0x0ff;
4558 break;
4559 }
4560 new_int_array_element(interp, varname, n, v);
4561 }
4562 len -= count;
4563 addr += count * width;
4564 }
4565 }
4566
4567 free(buffer);
4568
4569 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4570
4571 return e;
4572 }
4573
4574 static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
4575 {
4576 char *namebuf;
4577 Jim_Obj *nameObjPtr, *valObjPtr;
4578 int result;
4579 long l;
4580
4581 namebuf = alloc_printf("%s(%d)", varname, idx);
4582 if (!namebuf)
4583 return JIM_ERR;
4584
4585 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4586 if (!nameObjPtr) {
4587 free(namebuf);
4588 return JIM_ERR;
4589 }
4590
4591 Jim_IncrRefCount(nameObjPtr);
4592 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
4593 Jim_DecrRefCount(interp, nameObjPtr);
4594 free(namebuf);
4595 if (valObjPtr == NULL)
4596 return JIM_ERR;
4597
4598 result = Jim_GetLong(interp, valObjPtr, &l);
4599 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
4600 *val = l;
4601 return result;
4602 }
4603
4604 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4605 {
4606 struct command_context *context;
4607 struct target *target;
4608
4609 context = current_command_context(interp);
4610 assert(context != NULL);
4611
4612 target = get_current_target(context);
4613 if (target == NULL) {
4614 LOG_ERROR("array2mem: no current target");
4615 return JIM_ERR;
4616 }
4617
4618 return target_array2mem(interp, target, argc-1, argv + 1);
4619 }
4620
4621 static int target_array2mem(Jim_Interp *interp, struct target *target,
4622 int argc, Jim_Obj *const *argv)
4623 {
4624 long l;
4625 uint32_t width;
4626 int len;
4627 uint32_t addr;
4628 uint32_t count;
4629 uint32_t v;
4630 const char *varname;
4631 const char *phys;
4632 bool is_phys;
4633 int n, e, retval;
4634 uint32_t i;
4635
4636 /* argv[1] = name of array to get the data
4637 * argv[2] = desired width
4638 * argv[3] = memory address
4639 * argv[4] = count to write
4640 */
4641 if (argc < 4 || argc > 5) {
4642 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4643 return JIM_ERR;
4644 }
4645 varname = Jim_GetString(argv[0], &len);
4646 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4647
4648 e = Jim_GetLong(interp, argv[1], &l);
4649 width = l;
4650 if (e != JIM_OK)
4651 return e;
4652
4653 e = Jim_GetLong(interp, argv[2], &l);
4654 addr = l;
4655 if (e != JIM_OK)
4656 return e;
4657 e = Jim_GetLong(interp, argv[3], &l);
4658 len = l;
4659 if (e != JIM_OK)
4660 return e;
4661 is_phys = false;
4662 if (argc > 4) {
4663 phys = Jim_GetString(argv[4], &n);
4664 if (!strncmp(phys, "phys", n))
4665 is_phys = true;
4666 else
4667 return JIM_ERR;
4668 }
4669 switch (width) {
4670 case 8:
4671 width = 1;
4672 break;
4673 case 16:
4674 width = 2;
4675 break;
4676 case 32:
4677 width = 4;
4678 break;
4679 default:
4680 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4681 Jim_AppendStrings(interp, Jim_GetResult(interp),
4682 "Invalid width param, must be 8/16/32", NULL);
4683 return JIM_ERR;
4684 }
4685 if (len == 0) {
4686 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4687 Jim_AppendStrings(interp, Jim_GetResult(interp),
4688 "array2mem: zero width read?", NULL);
4689 return JIM_ERR;
4690 }
4691 if ((addr + (len * width)) < addr) {
4692 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4693 Jim_AppendStrings(interp, Jim_GetResult(interp),
4694 "array2mem: addr + len - wraps to zero?", NULL);
4695 return JIM_ERR;
4696 }
4697 /* absurd transfer size? */
4698 if (len > 65536) {
4699 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4700 Jim_AppendStrings(interp, Jim_GetResult(interp),
4701 "array2mem: absurd > 64K item request", NULL);
4702 return JIM_ERR;
4703 }
4704
4705 if ((width == 1) ||
4706 ((width == 2) && ((addr & 1) == 0)) ||
4707 ((width == 4) && ((addr & 3) == 0))) {
4708 /* all is well */
4709 } else {
4710 char buf[100];
4711 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4712 sprintf(buf, "array2mem address: 0x%08" PRIx32 " is not aligned for %" PRIu32 " byte reads",
4713 addr,
4714 width);
4715 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4716 return JIM_ERR;
4717 }
4718
4719 /* Transfer loop */
4720
4721 /* index counter */
4722 n = 0;
4723 /* assume ok */
4724 e = JIM_OK;
4725
4726 size_t buffersize = 4096;
4727 uint8_t *buffer = malloc(buffersize);
4728 if (buffer == NULL)
4729 return JIM_ERR;
4730
4731 while (len) {
4732 /* Slurp... in buffer size chunks */
4733
4734 count = len; /* in objects.. */
4735 if (count > (buffersize / width))
4736 count = (buffersize / width);
4737
4738 v = 0; /* shut up gcc */
4739 for (i = 0; i < count; i++, n++) {
4740 get_int_array_element(interp, varname, n, &v);
4741 switch (width) {
4742 case 4:
4743 target_buffer_set_u32(target, &buffer[i * width], v);
4744 break;
4745 case 2:
4746 target_buffer_set_u16(target, &buffer[i * width], v);
4747 break;
4748 case 1:
4749 buffer[i] = v & 0x0ff;
4750 break;
4751 }
4752 }
4753 len -= count;
4754
4755 if (is_phys)
4756 retval = target_write_phys_memory(target, addr, width, count, buffer);
4757 else
4758 retval = target_write_memory(target, addr, width, count, buffer);
4759 if (retval != ERROR_OK) {
4760 /* BOO !*/
4761 LOG_ERROR("array2mem: Write @ 0x%08" PRIx32 ", w=%" PRIu32 ", cnt=%" PRIu32 ", failed",
4762 addr,
4763 width,
4764 count);
4765 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4766 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4767 e = JIM_ERR;
4768 break;
4769 }
4770 addr += count * width;
4771 }
4772
4773 free(buffer);
4774
4775 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4776
4777 return e;
4778 }
4779
4780 /* FIX? should we propagate errors here rather than printing them
4781 * and continuing?
4782 */
4783 void target_handle_event(struct target *target, enum target_event e)
4784 {
4785 struct target_event_action *teap;
4786 int retval;
4787
4788 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4789 if (teap->event == e) {
4790 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4791 target->target_number,
4792 target_name(target),
4793 target_type_name(target),
4794 e,
4795 jim_nvp_value2name_simple(nvp_target_event, e)->name,
4796 Jim_GetString(teap->body, NULL));
4797
4798 /* Override current target by the target an event
4799 * is issued from (lot of scripts need it).
4800 * Return back to previous override as soon
4801 * as the handler processing is done */
4802 struct command_context *cmd_ctx = current_command_context(teap->interp);
4803 struct target *saved_target_override = cmd_ctx->current_target_override;
4804 cmd_ctx->current_target_override = target;
4805
4806 retval = Jim_EvalObj(teap->interp, teap->body);
4807
4808 cmd_ctx->current_target_override = saved_target_override;
4809
4810 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4811 return;
4812
4813 if (retval == JIM_RETURN)
4814 retval = teap->interp->returnCode;
4815
4816 if (retval != JIM_OK) {
4817 Jim_MakeErrorMessage(teap->interp);
4818 LOG_USER("Error executing event %s on target %s:\n%s",
4819 jim_nvp_value2name_simple(nvp_target_event, e)->name,
4820 target_name(target),
4821 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4822 /* clean both error code and stacktrace before return */
4823 Jim_Eval(teap->interp, "error \"\" \"\"");
4824 }
4825 }
4826 }
4827 }
4828
4829 /**
4830 * Returns true only if the target has a handler for the specified event.
4831 */
4832 bool target_has_event_action(struct target *target, enum target_event event)
4833 {
4834 struct target_event_action *teap;
4835
4836 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4837 if (teap->event == event)
4838 return true;
4839 }
4840 return false;
4841 }
4842
4843 enum target_cfg_param {
4844 TCFG_TYPE,
4845 TCFG_EVENT,
4846 TCFG_WORK_AREA_VIRT,
4847 TCFG_WORK_AREA_PHYS,
4848 TCFG_WORK_AREA_SIZE,
4849 TCFG_WORK_AREA_BACKUP,
4850 TCFG_ENDIAN,
4851 TCFG_COREID,
4852 TCFG_CHAIN_POSITION,
4853 TCFG_DBGBASE,
4854 TCFG_RTOS,
4855 TCFG_DEFER_EXAMINE,
4856 TCFG_GDB_PORT,
4857 TCFG_GDB_MAX_CONNECTIONS,
4858 };
4859
4860 static struct jim_nvp nvp_config_opts[] = {
4861 { .name = "-type", .value = TCFG_TYPE },
4862 { .name = "-event", .value = TCFG_EVENT },
4863 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4864 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4865 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4866 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4867 { .name = "-endian", .value = TCFG_ENDIAN },
4868 { .name = "-coreid", .value = TCFG_COREID },
4869 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4870 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4871 { .name = "-rtos", .value = TCFG_RTOS },
4872 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4873 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4874 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
4875 { .name = NULL, .value = -1 }
4876 };
4877
4878 static int target_configure(struct jim_getopt_info *goi, struct target *target)
4879 {
4880 struct jim_nvp *n;
4881 Jim_Obj *o;
4882 jim_wide w;
4883 int e;
4884
4885 /* parse config or cget options ... */
4886 while (goi->argc > 0) {
4887 Jim_SetEmptyResult(goi->interp);
4888 /* jim_getopt_debug(goi); */
4889
4890 if (target->type->target_jim_configure) {
4891 /* target defines a configure function */
4892 /* target gets first dibs on parameters */
4893 e = (*(target->type->target_jim_configure))(target, goi);
4894 if (e == JIM_OK) {
4895 /* more? */
4896 continue;
4897 }
4898 if (e == JIM_ERR) {
4899 /* An error */
4900 return e;
4901 }
4902 /* otherwise we 'continue' below */
4903 }
4904 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
4905 if (e != JIM_OK) {
4906 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
4907 return e;
4908 }
4909 switch (n->value) {
4910 case TCFG_TYPE:
4911 /* not settable */
4912 if (goi->isconfigure) {
4913 Jim_SetResultFormatted(goi->interp,
4914 "not settable: %s", n->name);
4915 return JIM_ERR;
4916 } else {
4917 no_params:
4918 if (goi->argc != 0) {
4919 Jim_WrongNumArgs(goi->interp,
4920 goi->argc, goi->argv,
4921 "NO PARAMS");
4922 return JIM_ERR;
4923 }
4924 }
4925 Jim_SetResultString(goi->interp,
4926 target_type_name(target), -1);
4927 /* loop for more */
4928 break;
4929 case TCFG_EVENT:
4930 if (goi->argc == 0) {
4931 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4932 return JIM_ERR;
4933 }
4934
4935 e = jim_getopt_nvp(goi, nvp_target_event, &n);
4936 if (e != JIM_OK) {
4937 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
4938 return e;
4939 }
4940
4941 if (goi->isconfigure) {
4942 if (goi->argc != 1) {
4943 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4944 return JIM_ERR;
4945 }
4946 } else {
4947 if (goi->argc != 0) {
4948 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4949 return JIM_ERR;
4950 }
4951 }
4952
4953 {
4954 struct target_event_action *teap;
4955
4956 teap = target->event_action;
4957 /* replace existing? */
4958 while (teap) {
4959 if (teap->event == (enum target_event)n->value)
4960 break;
4961 teap = teap->next;
4962 }
4963
4964 if (goi->isconfigure) {
4965 /* START_DEPRECATED_TPIU */
4966 if (n->value == TARGET_EVENT_TRACE_CONFIG)
4967 LOG_INFO("DEPRECATED target event %s", n->name);
4968 /* END_DEPRECATED_TPIU */
4969
4970 bool replace = true;
4971 if (teap == NULL) {
4972 /* create new */
4973 teap = calloc(1, sizeof(*teap));
4974 replace = false;
4975 }
4976 teap->event = n->value;
4977 teap->interp = goi->interp;
4978 jim_getopt_obj(goi, &o);
4979 if (teap->body)
4980 Jim_DecrRefCount(teap->interp, teap->body);
4981 teap->body = Jim_DuplicateObj(goi->interp, o);
4982 /*
4983 * FIXME:
4984 * Tcl/TK - "tk events" have a nice feature.
4985 * See the "BIND" command.
4986 * We should support that here.
4987 * You can specify %X and %Y in the event code.
4988 * The idea is: %T - target name.
4989 * The idea is: %N - target number
4990 * The idea is: %E - event name.
4991 */
4992 Jim_IncrRefCount(teap->body);
4993
4994 if (!replace) {
4995 /* add to head of event list */
4996 teap->next = target->event_action;
4997 target->event_action = teap;
4998 }
4999 Jim_SetEmptyResult(goi->interp);
5000 } else {
5001 /* get */
5002 if (teap == NULL)
5003 Jim_SetEmptyResult(goi->interp);
5004 else
5005 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5006 }
5007 }
5008 /* loop for more */
5009 break;
5010
5011 case TCFG_WORK_AREA_VIRT:
5012 if (goi->isconfigure) {
5013 target_free_all_working_areas(target);
5014 e = jim_getopt_wide(goi, &w);
5015 if (e != JIM_OK)
5016 return e;
5017 target->working_area_virt = w;
5018 target->working_area_virt_spec = true;
5019 } else {
5020 if (goi->argc != 0)
5021 goto no_params;
5022 }
5023 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5024 /* loop for more */
5025 break;
5026
5027 case TCFG_WORK_AREA_PHYS:
5028 if (goi->isconfigure) {
5029 target_free_all_working_areas(target);
5030 e = jim_getopt_wide(goi, &w);
5031 if (e != JIM_OK)
5032 return e;
5033 target->working_area_phys = w;
5034 target->working_area_phys_spec = true;
5035 } else {
5036 if (goi->argc != 0)
5037 goto no_params;
5038 }
5039 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5040 /* loop for more */
5041 break;
5042
5043 case TCFG_WORK_AREA_SIZE:
5044 if (goi->isconfigure) {
5045 target_free_all_working_areas(target);
5046 e = jim_getopt_wide(goi, &w);
5047 if (e != JIM_OK)
5048 return e;
5049 target->working_area_size = w;
5050 } else {
5051 if (goi->argc != 0)
5052 goto no_params;
5053 }
5054 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5055 /* loop for more */
5056 break;
5057
5058 case TCFG_WORK_AREA_BACKUP:
5059 if (goi->isconfigure) {
5060 target_free_all_working_areas(target);
5061 e = jim_getopt_wide(goi, &w);
5062 if (e != JIM_OK)
5063 return e;
5064 /* make this exactly 1 or 0 */
5065 target->backup_working_area = (!!w);
5066 } else {
5067 if (goi->argc != 0)
5068 goto no_params;
5069 }
5070 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5071 /* loop for more e*/
5072 break;
5073
5074
5075 case TCFG_ENDIAN:
5076 if (goi->isconfigure) {
5077 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5078 if (e != JIM_OK) {
5079 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5080 return e;
5081 }
5082 target->endianness = n->value;
5083 } else {
5084 if (goi->argc != 0)
5085 goto no_params;
5086 }
5087 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5088 if (n->name == NULL) {
5089 target->endianness = TARGET_LITTLE_ENDIAN;
5090 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5091 }
5092 Jim_SetResultString(goi->interp, n->name, -1);
5093 /* loop for more */
5094 break;
5095
5096 case TCFG_COREID:
5097 if (goi->isconfigure) {
5098 e = jim_getopt_wide(goi, &w);
5099 if (e != JIM_OK)
5100 return e;
5101 target->coreid = (int32_t)w;
5102 } else {
5103 if (goi->argc != 0)
5104 goto no_params;
5105 }
5106 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5107 /* loop for more */
5108 break;
5109
5110 case TCFG_CHAIN_POSITION:
5111 if (goi->isconfigure) {
5112 Jim_Obj *o_t;
5113 struct jtag_tap *tap;
5114
5115 if (target->has_dap) {
5116 Jim_SetResultString(goi->interp,
5117 "target requires -dap parameter instead of -chain-position!", -1);
5118 return JIM_ERR;
5119 }
5120
5121 target_free_all_working_areas(target);
5122 e = jim_getopt_obj(goi, &o_t);
5123 if (e != JIM_OK)
5124 return e;
5125 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5126 if (tap == NULL)
5127 return JIM_ERR;
5128 target->tap = tap;
5129 target->tap_configured = true;
5130 } else {
5131 if (goi->argc != 0)
5132 goto no_params;
5133 }
5134 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5135 /* loop for more e*/
5136 break;
5137 case TCFG_DBGBASE:
5138 if (goi->isconfigure) {
5139 e = jim_getopt_wide(goi, &w);
5140 if (e != JIM_OK)
5141 return e;
5142 target->dbgbase = (uint32_t)w;
5143 target->dbgbase_set = true;
5144 } else {
5145 if (goi->argc != 0)
5146 goto no_params;
5147 }
5148 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5149 /* loop for more */
5150 break;
5151 case TCFG_RTOS:
5152 /* RTOS */
5153 {
5154 int result = rtos_create(goi, target);
5155 if (result != JIM_OK)
5156 return result;
5157 }
5158 /* loop for more */
5159 break;
5160
5161 case TCFG_DEFER_EXAMINE:
5162 /* DEFER_EXAMINE */
5163 target->defer_examine = true;
5164 /* loop for more */
5165 break;
5166
5167 case TCFG_GDB_PORT:
5168 if (goi->isconfigure) {
5169 struct command_context *cmd_ctx = current_command_context(goi->interp);
5170 if (cmd_ctx->mode != COMMAND_CONFIG) {
5171 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5172 return JIM_ERR;
5173 }
5174
5175 const char *s;
5176 e = jim_getopt_string(goi, &s, NULL);
5177 if (e != JIM_OK)
5178 return e;
5179 free(target->gdb_port_override);
5180 target->gdb_port_override = strdup(s);
5181 } else {
5182 if (goi->argc != 0)
5183 goto no_params;
5184 }
5185 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5186 /* loop for more */
5187 break;
5188
5189 case TCFG_GDB_MAX_CONNECTIONS:
5190 if (goi->isconfigure) {
5191 struct command_context *cmd_ctx = current_command_context(goi->interp);
5192 if (cmd_ctx->mode != COMMAND_CONFIG) {
5193 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5194 return JIM_ERR;
5195 }
5196
5197 e = jim_getopt_wide(goi, &w);
5198 if (e != JIM_OK)
5199 return e;
5200 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5201 } else {
5202 if (goi->argc != 0)
5203 goto no_params;
5204 }
5205 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5206 break;
5207 }
5208 } /* while (goi->argc) */
5209
5210
5211 /* done - we return */
5212 return JIM_OK;
5213 }
5214
5215 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5216 {
5217 struct command *c = jim_to_command(interp);
5218 struct jim_getopt_info goi;
5219
5220 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5221 goi.isconfigure = !strcmp(c->name, "configure");
5222 if (goi.argc < 1) {
5223 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5224 "missing: -option ...");
5225 return JIM_ERR;
5226 }
5227 struct command_context *cmd_ctx = current_command_context(interp);
5228 assert(cmd_ctx);
5229 struct target *target = get_current_target(cmd_ctx);
5230 return target_configure(&goi, target);
5231 }
5232
5233 static int jim_target_mem2array(Jim_Interp *interp,
5234 int argc, Jim_Obj *const *argv)
5235 {
5236 struct command_context *cmd_ctx = current_command_context(interp);
5237 assert(cmd_ctx);
5238 struct target *target = get_current_target(cmd_ctx);
5239 return target_mem2array(interp, target, argc - 1, argv + 1);
5240 }
5241
5242 static int jim_target_array2mem(Jim_Interp *interp,
5243 int argc, Jim_Obj *const *argv)
5244 {
5245 struct command_context *cmd_ctx = current_command_context(interp);
5246 assert(cmd_ctx);
5247 struct target *target = get_current_target(cmd_ctx);
5248 return target_array2mem(interp, target, argc - 1, argv + 1);
5249 }
5250
5251 static int jim_target_tap_disabled(Jim_Interp *interp)
5252 {
5253 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5254 return JIM_ERR;
5255 }
5256
5257 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5258 {
5259 bool allow_defer = false;
5260
5261 struct jim_getopt_info goi;
5262 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5263 if (goi.argc > 1) {
5264 const char *cmd_name = Jim_GetString(argv[0], NULL);
5265 Jim_SetResultFormatted(goi.interp,
5266 "usage: %s ['allow-defer']", cmd_name);
5267 return JIM_ERR;
5268 }
5269 if (goi.argc > 0 &&
5270 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5271 /* consume it */
5272 Jim_Obj *obj;
5273 int e = jim_getopt_obj(&goi, &obj);
5274 if (e != JIM_OK)
5275 return e;
5276 allow_defer = true;
5277 }
5278
5279 struct command_context *cmd_ctx = current_command_context(interp);
5280 assert(cmd_ctx);
5281 struct target *target = get_current_target(cmd_ctx);
5282 if (!target->tap->enabled)
5283 return jim_target_tap_disabled(interp);
5284
5285 if (allow_defer && target->defer_examine) {
5286 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5287 LOG_INFO("Use arp_examine command to examine it manually!");
5288 return JIM_OK;
5289 }
5290
5291 int e = target->type->examine(target);
5292 if (e != ERROR_OK)
5293 return JIM_ERR;
5294 return JIM_OK;
5295 }
5296
5297 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5298 {
5299 struct command_context *cmd_ctx = current_command_context(interp);
5300 assert(cmd_ctx);
5301 struct target *target = get_current_target(cmd_ctx);
5302
5303 Jim_SetResultBool(interp, target_was_examined(target));
5304 return JIM_OK;
5305 }
5306
5307 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5308 {
5309 struct command_context *cmd_ctx = current_command_context(interp);
5310 assert(cmd_ctx);
5311 struct target *target = get_current_target(cmd_ctx);
5312
5313 Jim_SetResultBool(interp, target->defer_examine);
5314 return JIM_OK;
5315 }
5316
5317 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5318 {
5319 if (argc != 1) {
5320 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5321 return JIM_ERR;
5322 }
5323 struct command_context *cmd_ctx = current_command_context(interp);
5324 assert(cmd_ctx);
5325 struct target *target = get_current_target(cmd_ctx);
5326
5327 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5328 return JIM_ERR;
5329
5330 return JIM_OK;
5331 }
5332
5333 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5334 {
5335 if (argc != 1) {
5336 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5337 return JIM_ERR;
5338 }
5339 struct command_context *cmd_ctx = current_command_context(interp);
5340 assert(cmd_ctx);
5341 struct target *target = get_current_target(cmd_ctx);
5342 if (!target->tap->enabled)
5343 return jim_target_tap_disabled(interp);
5344
5345 int e;
5346 if (!(target_was_examined(target)))
5347 e = ERROR_TARGET_NOT_EXAMINED;
5348 else
5349 e = target->type->poll(target);
5350 if (e != ERROR_OK)
5351 return JIM_ERR;
5352 return JIM_OK;
5353 }
5354
5355 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5356 {
5357 struct jim_getopt_info goi;
5358 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5359
5360 if (goi.argc != 2) {
5361 Jim_WrongNumArgs(interp, 0, argv,
5362 "([tT]|[fF]|assert|deassert) BOOL");
5363 return JIM_ERR;
5364 }
5365
5366 struct jim_nvp *n;
5367 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5368 if (e != JIM_OK) {
5369 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5370 return e;
5371 }
5372 /* the halt or not param */
5373 jim_wide a;
5374 e = jim_getopt_wide(&goi, &a);
5375 if (e != JIM_OK)
5376 return e;
5377
5378 struct command_context *cmd_ctx = current_command_context(interp);
5379 assert(cmd_ctx);
5380 struct target *target = get_current_target(cmd_ctx);
5381 if (!target->tap->enabled)
5382 return jim_target_tap_disabled(interp);
5383
5384 if (!target->type->assert_reset || !target->type->deassert_reset) {
5385 Jim_SetResultFormatted(interp,
5386 "No target-specific reset for %s",
5387 target_name(target));
5388 return JIM_ERR;
5389 }
5390
5391 if (target->defer_examine)
5392 target_reset_examined(target);
5393
5394 /* determine if we should halt or not. */
5395 target->reset_halt = (a != 0);
5396 /* When this happens - all workareas are invalid. */
5397 target_free_all_working_areas_restore(target, 0);
5398
5399 /* do the assert */
5400 if (n->value == NVP_ASSERT)
5401 e = target->type->assert_reset(target);
5402 else
5403 e = target->type->deassert_reset(target);
5404 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5405 }
5406
5407 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5408 {
5409 if (argc != 1) {
5410 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5411 return JIM_ERR;
5412 }
5413 struct command_context *cmd_ctx = current_command_context(interp);
5414 assert(cmd_ctx);
5415 struct target *target = get_current_target(cmd_ctx);
5416 if (!target->tap->enabled)
5417 return jim_target_tap_disabled(interp);
5418 int e = target->type->halt(target);
5419 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5420 }
5421
5422 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5423 {
5424 struct jim_getopt_info goi;
5425 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5426
5427 /* params: <name> statename timeoutmsecs */
5428 if (goi.argc != 2) {
5429 const char *cmd_name = Jim_GetString(argv[0], NULL);
5430 Jim_SetResultFormatted(goi.interp,
5431 "%s <state_name> <timeout_in_msec>", cmd_name);
5432 return JIM_ERR;
5433 }
5434
5435 struct jim_nvp *n;
5436 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5437 if (e != JIM_OK) {
5438 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5439 return e;
5440 }
5441 jim_wide a;
5442 e = jim_getopt_wide(&goi, &a);
5443 if (e != JIM_OK)
5444 return e;
5445 struct command_context *cmd_ctx = current_command_context(interp);
5446 assert(cmd_ctx);
5447 struct target *target = get_current_target(cmd_ctx);
5448 if (!target->tap->enabled)
5449 return jim_target_tap_disabled(interp);
5450
5451 e = target_wait_state(target, n->value, a);
5452 if (e != ERROR_OK) {
5453 Jim_Obj *eObj = Jim_NewIntObj(interp, e);
5454 Jim_SetResultFormatted(goi.interp,
5455 "target: %s wait %s fails (%#s) %s",
5456 target_name(target), n->name,
5457 eObj, target_strerror_safe(e));
5458 return JIM_ERR;
5459 }
5460 return JIM_OK;
5461 }
5462 /* List for human, Events defined for this target.
5463 * scripts/programs should use 'name cget -event NAME'
5464 */
5465 COMMAND_HANDLER(handle_target_event_list)
5466 {
5467 struct target *target = get_current_target(CMD_CTX);
5468 struct target_event_action *teap = target->event_action;
5469
5470 command_print(CMD, "Event actions for target (%d) %s\n",
5471 target->target_number,
5472 target_name(target));
5473 command_print(CMD, "%-25s | Body", "Event");
5474 command_print(CMD, "------------------------- | "
5475 "----------------------------------------");
5476 while (teap) {
5477 struct jim_nvp *opt = jim_nvp_value2name_simple(nvp_target_event, teap->event);
5478 command_print(CMD, "%-25s | %s",
5479 opt->name, Jim_GetString(teap->body, NULL));
5480 teap = teap->next;
5481 }
5482 command_print(CMD, "***END***");
5483 return ERROR_OK;
5484 }
5485 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5486 {
5487 if (argc != 1) {
5488 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5489 return JIM_ERR;
5490 }
5491 struct command_context *cmd_ctx = current_command_context(interp);
5492 assert(cmd_ctx);
5493 struct target *target = get_current_target(cmd_ctx);
5494 Jim_SetResultString(interp, target_state_name(target), -1);
5495 return JIM_OK;
5496 }
5497 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5498 {
5499 struct jim_getopt_info goi;
5500 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5501 if (goi.argc != 1) {
5502 const char *cmd_name = Jim_GetString(argv[0], NULL);
5503 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5504 return JIM_ERR;
5505 }
5506 struct jim_nvp *n;
5507 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5508 if (e != JIM_OK) {
5509 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5510 return e;
5511 }
5512 struct command_context *cmd_ctx = current_command_context(interp);
5513 assert(cmd_ctx);
5514 struct target *target = get_current_target(cmd_ctx);
5515 target_handle_event(target, n->value);
5516 return JIM_OK;
5517 }
5518
5519 static const struct command_registration target_instance_command_handlers[] = {
5520 {
5521 .name = "configure",
5522 .mode = COMMAND_ANY,
5523 .jim_handler = jim_target_configure,
5524 .help = "configure a new target for use",
5525 .usage = "[target_attribute ...]",
5526 },
5527 {
5528 .name = "cget",
5529 .mode = COMMAND_ANY,
5530 .jim_handler = jim_target_configure,
5531 .help = "returns the specified target attribute",
5532 .usage = "target_attribute",
5533 },
5534 {
5535 .name = "mwd",
5536 .handler = handle_mw_command,
5537 .mode = COMMAND_EXEC,
5538 .help = "Write 64-bit word(s) to target memory",
5539 .usage = "address data [count]",
5540 },
5541 {
5542 .name = "mww",
5543 .handler = handle_mw_command,
5544 .mode = COMMAND_EXEC,
5545 .help = "Write 32-bit word(s) to target memory",
5546 .usage = "address data [count]",
5547 },
5548 {
5549 .name = "mwh",
5550 .handler = handle_mw_command,
5551 .mode = COMMAND_EXEC,
5552 .help = "Write 16-bit half-word(s) to target memory",
5553 .usage = "address data [count]",
5554 },
5555 {
5556 .name = "mwb",
5557 .handler = handle_mw_command,
5558 .mode = COMMAND_EXEC,
5559 .help = "Write byte(s) to target memory",
5560 .usage = "address data [count]",
5561 },
5562 {
5563 .name = "mdd",
5564 .handler = handle_md_command,
5565 .mode = COMMAND_EXEC,
5566 .help = "Display target memory as 64-bit words",
5567 .usage = "address [count]",
5568 },
5569 {
5570 .name = "mdw",
5571 .handler = handle_md_command,
5572 .mode = COMMAND_EXEC,
5573 .help = "Display target memory as 32-bit words",
5574 .usage = "address [count]",
5575 },
5576 {
5577 .name = "mdh",
5578 .handler = handle_md_command,
5579 .mode = COMMAND_EXEC,
5580 .help = "Display target memory as 16-bit half-words",
5581 .usage = "address [count]",
5582 },
5583 {
5584 .name = "mdb",
5585 .handler = handle_md_command,
5586 .mode = COMMAND_EXEC,
5587 .help = "Display target memory as 8-bit bytes",
5588 .usage = "address [count]",
5589 },
5590 {
5591 .name = "array2mem",
5592 .mode = COMMAND_EXEC,
5593 .jim_handler = jim_target_array2mem,
5594 .help = "Writes Tcl array of 8/16/32 bit numbers "
5595 "to target memory",
5596 .usage = "arrayname bitwidth address count",
5597 },
5598 {
5599 .name = "mem2array",
5600 .mode = COMMAND_EXEC,
5601 .jim_handler = jim_target_mem2array,
5602 .help = "Loads Tcl array of 8/16/32 bit numbers "
5603 "from target memory",
5604 .usage = "arrayname bitwidth address count",
5605 },
5606 {
5607 .name = "eventlist",
5608 .handler = handle_target_event_list,
5609 .mode = COMMAND_EXEC,
5610 .help = "displays a table of events defined for this target",
5611 .usage = "",
5612 },
5613 {
5614 .name = "curstate",
5615 .mode = COMMAND_EXEC,
5616 .jim_handler = jim_target_current_state,
5617 .help = "displays the current state of this target",
5618 },
5619 {
5620 .name = "arp_examine",
5621 .mode = COMMAND_EXEC,
5622 .jim_handler = jim_target_examine,
5623 .help = "used internally for reset processing",
5624 .usage = "['allow-defer']",
5625 },
5626 {
5627 .name = "was_examined",
5628 .mode = COMMAND_EXEC,
5629 .jim_handler = jim_target_was_examined,
5630 .help = "used internally for reset processing",
5631 },
5632 {
5633 .name = "examine_deferred",
5634 .mode = COMMAND_EXEC,
5635 .jim_handler = jim_target_examine_deferred,
5636 .help = "used internally for reset processing",
5637 },
5638 {
5639 .name = "arp_halt_gdb",
5640 .mode = COMMAND_EXEC,
5641 .jim_handler = jim_target_halt_gdb,
5642 .help = "used internally for reset processing to halt GDB",
5643 },
5644 {
5645 .name = "arp_poll",
5646 .mode = COMMAND_EXEC,
5647 .jim_handler = jim_target_poll,
5648 .help = "used internally for reset processing",
5649 },
5650 {
5651 .name = "arp_reset",
5652 .mode = COMMAND_EXEC,
5653 .jim_handler = jim_target_reset,
5654 .help = "used internally for reset processing",
5655 },
5656 {
5657 .name = "arp_halt",
5658 .mode = COMMAND_EXEC,
5659 .jim_handler = jim_target_halt,
5660 .help = "used internally for reset processing",
5661 },
5662 {
5663 .name = "arp_waitstate",
5664 .mode = COMMAND_EXEC,
5665 .jim_handler = jim_target_wait_state,
5666 .help = "used internally for reset processing",
5667 },
5668 {
5669 .name = "invoke-event",
5670 .mode = COMMAND_EXEC,
5671 .jim_handler = jim_target_invoke_event,
5672 .help = "invoke handler for specified event",
5673 .usage = "event_name",
5674 },
5675 COMMAND_REGISTRATION_DONE
5676 };
5677
5678 static int target_create(struct jim_getopt_info *goi)
5679 {
5680 Jim_Obj *new_cmd;
5681 Jim_Cmd *cmd;
5682 const char *cp;
5683 int e;
5684 int x;
5685 struct target *target;
5686 struct command_context *cmd_ctx;
5687
5688 cmd_ctx = current_command_context(goi->interp);
5689 assert(cmd_ctx != NULL);
5690
5691 if (goi->argc < 3) {
5692 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5693 return JIM_ERR;
5694 }
5695
5696 /* COMMAND */
5697 jim_getopt_obj(goi, &new_cmd);
5698 /* does this command exist? */
5699 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5700 if (cmd) {
5701 cp = Jim_GetString(new_cmd, NULL);
5702 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5703 return JIM_ERR;
5704 }
5705
5706 /* TYPE */
5707 e = jim_getopt_string(goi, &cp, NULL);
5708 if (e != JIM_OK)
5709 return e;
5710 struct transport *tr = get_current_transport();
5711 if (tr->override_target) {
5712 e = tr->override_target(&cp);
5713 if (e != ERROR_OK) {
5714 LOG_ERROR("The selected transport doesn't support this target");
5715 return JIM_ERR;
5716 }
5717 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5718 }
5719 /* now does target type exist */
5720 for (x = 0 ; target_types[x] ; x++) {
5721 if (0 == strcmp(cp, target_types[x]->name)) {
5722 /* found */
5723 break;
5724 }
5725 }
5726 if (target_types[x] == NULL) {
5727 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5728 for (x = 0 ; target_types[x] ; x++) {
5729 if (target_types[x + 1]) {
5730 Jim_AppendStrings(goi->interp,
5731 Jim_GetResult(goi->interp),
5732 target_types[x]->name,
5733 ", ", NULL);
5734 } else {
5735 Jim_AppendStrings(goi->interp,
5736 Jim_GetResult(goi->interp),
5737 " or ",
5738 target_types[x]->name, NULL);
5739 }
5740 }
5741 return JIM_ERR;
5742 }
5743
5744 /* Create it */
5745 target = calloc(1, sizeof(struct target));
5746 if (!target) {
5747 LOG_ERROR("Out of memory");
5748 return JIM_ERR;
5749 }
5750
5751 /* set target number */
5752 target->target_number = new_target_number();
5753
5754 /* allocate memory for each unique target type */
5755 target->type = malloc(sizeof(struct target_type));
5756 if (!target->type) {
5757 LOG_ERROR("Out of memory");
5758 free(target);
5759 return JIM_ERR;
5760 }
5761
5762 memcpy(target->type, target_types[x], sizeof(struct target_type));
5763
5764 /* default to first core, override with -coreid */
5765 target->coreid = 0;
5766
5767 target->working_area = 0x0;
5768 target->working_area_size = 0x0;
5769 target->working_areas = NULL;
5770 target->backup_working_area = 0;
5771
5772 target->state = TARGET_UNKNOWN;
5773 target->debug_reason = DBG_REASON_UNDEFINED;
5774 target->reg_cache = NULL;
5775 target->breakpoints = NULL;
5776 target->watchpoints = NULL;
5777 target->next = NULL;
5778 target->arch_info = NULL;
5779
5780 target->verbose_halt_msg = true;
5781
5782 target->halt_issued = false;
5783
5784 /* initialize trace information */
5785 target->trace_info = calloc(1, sizeof(struct trace));
5786 if (!target->trace_info) {
5787 LOG_ERROR("Out of memory");
5788 free(target->type);
5789 free(target);
5790 return JIM_ERR;
5791 }
5792
5793 target->dbgmsg = NULL;
5794 target->dbg_msg_enabled = 0;
5795
5796 target->endianness = TARGET_ENDIAN_UNKNOWN;
5797
5798 target->rtos = NULL;
5799 target->rtos_auto_detect = false;
5800
5801 target->gdb_port_override = NULL;
5802 target->gdb_max_connections = 1;
5803
5804 /* Do the rest as "configure" options */
5805 goi->isconfigure = 1;
5806 e = target_configure(goi, target);
5807
5808 if (e == JIM_OK) {
5809 if (target->has_dap) {
5810 if (!target->dap_configured) {
5811 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5812 e = JIM_ERR;
5813 }
5814 } else {
5815 if (!target->tap_configured) {
5816 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5817 e = JIM_ERR;
5818 }
5819 }
5820 /* tap must be set after target was configured */
5821 if (target->tap == NULL)
5822 e = JIM_ERR;
5823 }
5824
5825 if (e != JIM_OK) {
5826 rtos_destroy(target);
5827 free(target->gdb_port_override);
5828 free(target->trace_info);
5829 free(target->type);
5830 free(target);
5831 return e;
5832 }
5833
5834 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5835 /* default endian to little if not specified */
5836 target->endianness = TARGET_LITTLE_ENDIAN;
5837 }
5838
5839 cp = Jim_GetString(new_cmd, NULL);
5840 target->cmd_name = strdup(cp);
5841 if (!target->cmd_name) {
5842 LOG_ERROR("Out of memory");
5843 rtos_destroy(target);
5844 free(target->gdb_port_override);
5845 free(target->trace_info);
5846 free(target->type);
5847 free(target);
5848 return JIM_ERR;
5849 }
5850
5851 if (target->type->target_create) {
5852 e = (*(target->type->target_create))(target, goi->interp);
5853 if (e != ERROR_OK) {
5854 LOG_DEBUG("target_create failed");
5855 free(target->cmd_name);
5856 rtos_destroy(target);
5857 free(target->gdb_port_override);
5858 free(target->trace_info);
5859 free(target->type);
5860 free(target);
5861 return JIM_ERR;
5862 }
5863 }
5864
5865 /* create the target specific commands */
5866 if (target->type->commands) {
5867 e = register_commands(cmd_ctx, NULL, target->type->commands);
5868 if (ERROR_OK != e)
5869 LOG_ERROR("unable to register '%s' commands", cp);
5870 }
5871
5872 /* now - create the new target name command */
5873 const struct command_registration target_subcommands[] = {
5874 {
5875 .chain = target_instance_command_handlers,
5876 },
5877 {
5878 .chain = target->type->commands,
5879 },
5880 COMMAND_REGISTRATION_DONE
5881 };
5882 const struct command_registration target_commands[] = {
5883 {
5884 .name = cp,
5885 .mode = COMMAND_ANY,
5886 .help = "target command group",
5887 .usage = "",
5888 .chain = target_subcommands,
5889 },
5890 COMMAND_REGISTRATION_DONE
5891 };
5892 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
5893 if (e != ERROR_OK) {
5894 if (target->type->deinit_target)
5895 target->type->deinit_target(target);
5896 free(target->cmd_name);
5897 rtos_destroy(target);
5898 free(target->gdb_port_override);
5899 free(target->trace_info);
5900 free(target->type);
5901 free(target);
5902 return JIM_ERR;
5903 }
5904
5905 /* append to end of list */
5906 append_to_list_all_targets(target);
5907
5908 cmd_ctx->current_target = target;
5909 return JIM_OK;
5910 }
5911
5912 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5913 {
5914 if (argc != 1) {
5915 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5916 return JIM_ERR;
5917 }
5918 struct command_context *cmd_ctx = current_command_context(interp);
5919 assert(cmd_ctx != NULL);
5920
5921 struct target *target = get_current_target_or_null(cmd_ctx);
5922 if (target)
5923 Jim_SetResultString(interp, target_name(target), -1);
5924 return JIM_OK;
5925 }
5926
5927 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5928 {
5929 if (argc != 1) {
5930 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5931 return JIM_ERR;
5932 }
5933 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5934 for (unsigned x = 0; NULL != target_types[x]; x++) {
5935 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5936 Jim_NewStringObj(interp, target_types[x]->name, -1));
5937 }
5938 return JIM_OK;
5939 }
5940
5941 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5942 {
5943 if (argc != 1) {
5944 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5945 return JIM_ERR;
5946 }
5947 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5948 struct target *target = all_targets;
5949 while (target) {
5950 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5951 Jim_NewStringObj(interp, target_name(target), -1));
5952 target = target->next;
5953 }
5954 return JIM_OK;
5955 }
5956
5957 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5958 {
5959 int i;
5960 const char *targetname;
5961 int retval, len;
5962 struct target *target = (struct target *) NULL;
5963 struct target_list *head, *curr, *new;
5964 curr = (struct target_list *) NULL;
5965 head = (struct target_list *) NULL;
5966
5967 retval = 0;
5968 LOG_DEBUG("%d", argc);
5969 /* argv[1] = target to associate in smp
5970 * argv[2] = target to associate in smp
5971 * argv[3] ...
5972 */
5973
5974 for (i = 1; i < argc; i++) {
5975
5976 targetname = Jim_GetString(argv[i], &len);
5977 target = get_target(targetname);
5978 LOG_DEBUG("%s ", targetname);
5979 if (target) {
5980 new = malloc(sizeof(struct target_list));
5981 new->target = target;
5982 new->next = (struct target_list *)NULL;
5983 if (head == (struct target_list *)NULL) {
5984 head = new;
5985 curr = head;
5986 } else {
5987 curr->next = new;
5988 curr = new;
5989 }
5990 }
5991 }
5992 /* now parse the list of cpu and put the target in smp mode*/
5993 curr = head;
5994
5995 while (curr != (struct target_list *)NULL) {
5996 target = curr->target;
5997 target->smp = 1;
5998 target->head = head;
5999 curr = curr->next;
6000 }
6001
6002 if (target && target->rtos)
6003 retval = rtos_smp_init(head->target);
6004
6005 return retval;
6006 }
6007
6008
6009 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6010 {
6011 struct jim_getopt_info goi;
6012 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6013 if (goi.argc < 3) {
6014 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6015 "<name> <target_type> [<target_options> ...]");
6016 return JIM_ERR;
6017 }
6018 return target_create(&goi);
6019 }
6020
6021 static const struct command_registration target_subcommand_handlers[] = {
6022 {
6023 .name = "init",
6024 .mode = COMMAND_CONFIG,
6025 .handler = handle_target_init_command,
6026 .help = "initialize targets",
6027 .usage = "",
6028 },
6029 {
6030 .name = "create",
6031 .mode = COMMAND_CONFIG,
6032 .jim_handler = jim_target_create,
6033 .usage = "name type '-chain-position' name [options ...]",
6034 .help = "Creates and selects a new target",
6035 },
6036 {
6037 .name = "current",
6038 .mode = COMMAND_ANY,
6039 .jim_handler = jim_target_current,
6040 .help = "Returns the currently selected target",
6041 },
6042 {
6043 .name = "types",
6044 .mode = COMMAND_ANY,
6045 .jim_handler = jim_target_types,
6046 .help = "Returns the available target types as "
6047 "a list of strings",
6048 },
6049 {
6050 .name = "names",
6051 .mode = COMMAND_ANY,
6052 .jim_handler = jim_target_names,
6053 .help = "Returns the names of all targets as a list of strings",
6054 },
6055 {
6056 .name = "smp",
6057 .mode = COMMAND_ANY,
6058 .jim_handler = jim_target_smp,
6059 .usage = "targetname1 targetname2 ...",
6060 .help = "gather several target in a smp list"
6061 },
6062
6063 COMMAND_REGISTRATION_DONE
6064 };
6065
6066 struct FastLoad {
6067 target_addr_t address;
6068 uint8_t *data;
6069 int length;
6070
6071 };
6072
6073 static int fastload_num;
6074 static struct FastLoad *fastload;
6075
6076 static void free_fastload(void)
6077 {
6078 if (fastload != NULL) {
6079 for (int i = 0; i < fastload_num; i++)
6080 free(fastload[i].data);
6081 free(fastload);
6082 fastload = NULL;
6083 }
6084 }
6085
6086 COMMAND_HANDLER(handle_fast_load_image_command)
6087 {
6088 uint8_t *buffer;
6089 size_t buf_cnt;
6090 uint32_t image_size;
6091 target_addr_t min_address = 0;
6092 target_addr_t max_address = -1;
6093
6094 struct image image;
6095
6096 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
6097 &image, &min_address, &max_address);
6098 if (ERROR_OK != retval)
6099 return retval;
6100
6101 struct duration bench;
6102 duration_start(&bench);
6103
6104 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6105 if (retval != ERROR_OK)
6106 return retval;
6107
6108 image_size = 0x0;
6109 retval = ERROR_OK;
6110 fastload_num = image.num_sections;
6111 fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
6112 if (fastload == NULL) {
6113 command_print(CMD, "out of memory");
6114 image_close(&image);
6115 return ERROR_FAIL;
6116 }
6117 memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
6118 for (unsigned int i = 0; i < image.num_sections; i++) {
6119 buffer = malloc(image.sections[i].size);
6120 if (buffer == NULL) {
6121 command_print(CMD, "error allocating buffer for section (%d bytes)",
6122 (int)(image.sections[i].size));
6123 retval = ERROR_FAIL;
6124 break;
6125 }
6126
6127 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6128 if (retval != ERROR_OK) {
6129 free(buffer);
6130 break;
6131 }
6132
6133 uint32_t offset = 0;
6134 uint32_t length = buf_cnt;
6135
6136 /* DANGER!!! beware of unsigned comparison here!!! */
6137
6138 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6139 (image.sections[i].base_address < max_address)) {
6140 if (image.sections[i].base_address < min_address) {
6141 /* clip addresses below */
6142 offset += min_address-image.sections[i].base_address;
6143 length -= offset;
6144 }
6145
6146 if (image.sections[i].base_address + buf_cnt > max_address)
6147 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6148
6149 fastload[i].address = image.sections[i].base_address + offset;
6150 fastload[i].data = malloc(length);
6151 if (fastload[i].data == NULL) {
6152 free(buffer);
6153 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6154 length);
6155 retval = ERROR_FAIL;
6156 break;
6157 }
6158 memcpy(fastload[i].data, buffer + offset, length);
6159 fastload[i].length = length;
6160
6161 image_size += length;
6162 command_print(CMD, "%u bytes written at address 0x%8.8x",
6163 (unsigned int)length,
6164 ((unsigned int)(image.sections[i].base_address + offset)));
6165 }
6166
6167 free(buffer);
6168 }
6169
6170 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
6171 command_print(CMD, "Loaded %" PRIu32 " bytes "
6172 "in %fs (%0.3f KiB/s)", image_size,
6173 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6174
6175 command_print(CMD,
6176 "WARNING: image has not been loaded to target!"
6177 "You can issue a 'fast_load' to finish loading.");
6178 }
6179
6180 image_close(&image);
6181
6182 if (retval != ERROR_OK)
6183 free_fastload();
6184
6185 return retval;
6186 }
6187
6188 COMMAND_HANDLER(handle_fast_load_command)
6189 {
6190 if (CMD_ARGC > 0)
6191 return ERROR_COMMAND_SYNTAX_ERROR;
6192 if (fastload == NULL) {
6193 LOG_ERROR("No image in memory");
6194 return ERROR_FAIL;
6195 }
6196 int i;
6197 int64_t ms = timeval_ms();
6198 int size = 0;
6199 int retval = ERROR_OK;
6200 for (i = 0; i < fastload_num; i++) {
6201 struct target *target = get_current_target(CMD_CTX);
6202 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6203 (unsigned int)(fastload[i].address),
6204 (unsigned int)(fastload[i].length));
6205 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6206 if (retval != ERROR_OK)
6207 break;
6208 size += fastload[i].length;
6209 }
6210 if (retval == ERROR_OK) {
6211 int64_t after = timeval_ms();
6212 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6213 }
6214 return retval;
6215 }
6216
6217 static const struct command_registration target_command_handlers[] = {
6218 {
6219 .name = "targets",
6220 .handler = handle_targets_command,
6221 .mode = COMMAND_ANY,
6222 .help = "change current default target (one parameter) "
6223 "or prints table of all targets (no parameters)",
6224 .usage = "[target]",
6225 },
6226 {
6227 .name = "target",
6228 .mode = COMMAND_CONFIG,
6229 .help = "configure target",
6230 .chain = target_subcommand_handlers,
6231 .usage = "",
6232 },
6233 COMMAND_REGISTRATION_DONE
6234 };
6235
6236 int target_register_commands(struct command_context *cmd_ctx)
6237 {
6238 return register_commands(cmd_ctx, NULL, target_command_handlers);
6239 }
6240
6241 static bool target_reset_nag = true;
6242
6243 bool get_target_reset_nag(void)
6244 {
6245 return target_reset_nag;
6246 }
6247
6248 COMMAND_HANDLER(handle_target_reset_nag)
6249 {
6250 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6251 &target_reset_nag, "Nag after each reset about options to improve "
6252 "performance");
6253 }
6254
6255 COMMAND_HANDLER(handle_ps_command)
6256 {
6257 struct target *target = get_current_target(CMD_CTX);
6258 char *display;
6259 if (target->state != TARGET_HALTED) {
6260 LOG_INFO("target not halted !!");
6261 return ERROR_OK;
6262 }
6263
6264 if ((target->rtos) && (target->rtos->type)
6265 && (target->rtos->type->ps_command)) {
6266 display = target->rtos->type->ps_command(target);
6267 command_print(CMD, "%s", display);
6268 free(display);
6269 return ERROR_OK;
6270 } else {
6271 LOG_INFO("failed");
6272 return ERROR_TARGET_FAILURE;
6273 }
6274 }
6275
6276 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6277 {
6278 if (text != NULL)
6279 command_print_sameline(cmd, "%s", text);
6280 for (int i = 0; i < size; i++)
6281 command_print_sameline(cmd, " %02x", buf[i]);
6282 command_print(cmd, " ");
6283 }
6284
6285 COMMAND_HANDLER(handle_test_mem_access_command)
6286 {
6287 struct target *target = get_current_target(CMD_CTX);
6288 uint32_t test_size;
6289 int retval = ERROR_OK;
6290
6291 if (target->state != TARGET_HALTED) {
6292 LOG_INFO("target not halted !!");
6293 return ERROR_FAIL;
6294 }
6295
6296 if (CMD_ARGC != 1)
6297 return ERROR_COMMAND_SYNTAX_ERROR;
6298
6299 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6300
6301 /* Test reads */
6302 size_t num_bytes = test_size + 4;
6303
6304 struct working_area *wa = NULL;
6305 retval = target_alloc_working_area(target, num_bytes, &wa);
6306 if (retval != ERROR_OK) {
6307 LOG_ERROR("Not enough working area");
6308 return ERROR_FAIL;
6309 }
6310
6311 uint8_t *test_pattern = malloc(num_bytes);
6312
6313 for (size_t i = 0; i < num_bytes; i++)
6314 test_pattern[i] = rand();
6315
6316 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6317 if (retval != ERROR_OK) {
6318 LOG_ERROR("Test pattern write failed");
6319 goto out;
6320 }
6321
6322 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6323 for (int size = 1; size <= 4; size *= 2) {
6324 for (int offset = 0; offset < 4; offset++) {
6325 uint32_t count = test_size / size;
6326 size_t host_bufsiz = (count + 2) * size + host_offset;
6327 uint8_t *read_ref = malloc(host_bufsiz);
6328 uint8_t *read_buf = malloc(host_bufsiz);
6329
6330 for (size_t i = 0; i < host_bufsiz; i++) {
6331 read_ref[i] = rand();
6332 read_buf[i] = read_ref[i];
6333 }
6334 command_print_sameline(CMD,
6335 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6336 size, offset, host_offset ? "un" : "");
6337
6338 struct duration bench;
6339 duration_start(&bench);
6340
6341 retval = target_read_memory(target, wa->address + offset, size, count,
6342 read_buf + size + host_offset);
6343
6344 duration_measure(&bench);
6345
6346 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6347 command_print(CMD, "Unsupported alignment");
6348 goto next;
6349 } else if (retval != ERROR_OK) {
6350 command_print(CMD, "Memory read failed");
6351 goto next;
6352 }
6353
6354 /* replay on host */
6355 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6356
6357 /* check result */
6358 int result = memcmp(read_ref, read_buf, host_bufsiz);
6359 if (result == 0) {
6360 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6361 duration_elapsed(&bench),
6362 duration_kbps(&bench, count * size));
6363 } else {
6364 command_print(CMD, "Compare failed");
6365 binprint(CMD, "ref:", read_ref, host_bufsiz);
6366 binprint(CMD, "buf:", read_buf, host_bufsiz);
6367 }
6368 next:
6369 free(read_ref);
6370 free(read_buf);
6371 }
6372 }
6373 }
6374
6375 out:
6376 free(test_pattern);
6377
6378 if (wa != NULL)
6379 target_free_working_area(target, wa);
6380
6381 /* Test writes */
6382 num_bytes = test_size + 4 + 4 + 4;
6383
6384 retval = target_alloc_working_area(target, num_bytes, &wa);
6385 if (retval != ERROR_OK) {
6386 LOG_ERROR("Not enough working area");
6387 return ERROR_FAIL;
6388 }
6389
6390 test_pattern = malloc(num_bytes);
6391
6392 for (size_t i = 0; i < num_bytes; i++)
6393 test_pattern[i] = rand();
6394
6395 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6396 for (int size = 1; size <= 4; size *= 2) {
6397 for (int offset = 0; offset < 4; offset++) {
6398 uint32_t count = test_size / size;
6399 size_t host_bufsiz = count * size + host_offset;
6400 uint8_t *read_ref = malloc(num_bytes);
6401 uint8_t *read_buf = malloc(num_bytes);
6402 uint8_t *write_buf = malloc(host_bufsiz);
6403
6404 for (size_t i = 0; i < host_bufsiz; i++)
6405 write_buf[i] = rand();
6406 command_print_sameline(CMD,
6407 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6408 size, offset, host_offset ? "un" : "");
6409
6410 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6411 if (retval != ERROR_OK) {
6412 command_print(CMD, "Test pattern write failed");
6413 goto nextw;
6414 }
6415
6416 /* replay on host */
6417 memcpy(read_ref, test_pattern, num_bytes);
6418 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6419
6420 struct duration bench;
6421 duration_start(&bench);
6422
6423 retval = target_write_memory(target, wa->address + size + offset, size, count,
6424 write_buf + host_offset);
6425
6426 duration_measure(&bench);
6427
6428 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6429 command_print(CMD, "Unsupported alignment");
6430 goto nextw;
6431 } else if (retval != ERROR_OK) {
6432 command_print(CMD, "Memory write failed");
6433 goto nextw;
6434 }
6435
6436 /* read back */
6437 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6438 if (retval != ERROR_OK) {
6439 command_print(CMD, "Test pattern write failed");
6440 goto nextw;
6441 }
6442
6443 /* check result */
6444 int result = memcmp(read_ref, read_buf, num_bytes);
6445 if (result == 0) {
6446 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6447 duration_elapsed(&bench),
6448 duration_kbps(&bench, count * size));
6449 } else {
6450 command_print(CMD, "Compare failed");
6451 binprint(CMD, "ref:", read_ref, num_bytes);
6452 binprint(CMD, "buf:", read_buf, num_bytes);
6453 }
6454 nextw:
6455 free(read_ref);
6456 free(read_buf);
6457 }
6458 }
6459 }
6460
6461 free(test_pattern);
6462
6463 if (wa != NULL)
6464 target_free_working_area(target, wa);
6465 return retval;
6466 }
6467
6468 static const struct command_registration target_exec_command_handlers[] = {
6469 {
6470 .name = "fast_load_image",
6471 .handler = handle_fast_load_image_command,
6472 .mode = COMMAND_ANY,
6473 .help = "Load image into server memory for later use by "
6474 "fast_load; primarily for profiling",
6475 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6476 "[min_address [max_length]]",
6477 },
6478 {
6479 .name = "fast_load",
6480 .handler = handle_fast_load_command,
6481 .mode = COMMAND_EXEC,
6482 .help = "loads active fast load image to current target "
6483 "- mainly for profiling purposes",
6484 .usage = "",
6485 },
6486 {
6487 .name = "profile",
6488 .handler = handle_profile_command,
6489 .mode = COMMAND_EXEC,
6490 .usage = "seconds filename [start end]",
6491 .help = "profiling samples the CPU PC",
6492 },
6493 /** @todo don't register virt2phys() unless target supports it */
6494 {
6495 .name = "virt2phys",
6496 .handler = handle_virt2phys_command,
6497 .mode = COMMAND_ANY,
6498 .help = "translate a virtual address into a physical address",
6499 .usage = "virtual_address",
6500 },
6501 {
6502 .name = "reg",
6503 .handler = handle_reg_command,
6504 .mode = COMMAND_EXEC,
6505 .help = "display (reread from target with \"force\") or set a register; "
6506 "with no arguments, displays all registers and their values",
6507 .usage = "[(register_number|register_name) [(value|'force')]]",
6508 },
6509 {
6510 .name = "poll",
6511 .handler = handle_poll_command,
6512 .mode = COMMAND_EXEC,
6513 .help = "poll target state; or reconfigure background polling",
6514 .usage = "['on'|'off']",
6515 },
6516 {
6517 .name = "wait_halt",
6518 .handler = handle_wait_halt_command,
6519 .mode = COMMAND_EXEC,
6520 .help = "wait up to the specified number of milliseconds "
6521 "(default 5000) for a previously requested halt",
6522 .usage = "[milliseconds]",
6523 },
6524 {
6525 .name = "halt",
6526 .handler = handle_halt_command,
6527 .mode = COMMAND_EXEC,
6528 .help = "request target to halt, then wait up to the specified "
6529 "number of milliseconds (default 5000) for it to complete",
6530 .usage = "[milliseconds]",
6531 },
6532 {
6533 .name = "resume",
6534 .handler = handle_resume_command,
6535 .mode = COMMAND_EXEC,
6536 .help = "resume target execution from current PC or address",
6537 .usage = "[address]",
6538 },
6539 {
6540 .name = "reset",
6541 .handler = handle_reset_command,
6542 .mode = COMMAND_EXEC,
6543 .usage = "[run|halt|init]",
6544 .help = "Reset all targets into the specified mode. "
6545 "Default reset mode is run, if not given.",
6546 },
6547 {
6548 .name = "soft_reset_halt",
6549 .handler = handle_soft_reset_halt_command,
6550 .mode = COMMAND_EXEC,
6551 .usage = "",
6552 .help = "halt the target and do a soft reset",
6553 },
6554 {
6555 .name = "step",
6556 .handler = handle_step_command,
6557 .mode = COMMAND_EXEC,
6558 .help = "step one instruction from current PC or address",
6559 .usage = "[address]",
6560 },
6561 {
6562 .name = "mdd",
6563 .handler = handle_md_command,
6564 .mode = COMMAND_EXEC,
6565 .help = "display memory double-words",
6566 .usage = "['phys'] address [count]",
6567 },
6568 {
6569 .name = "mdw",
6570 .handler = handle_md_command,
6571 .mode = COMMAND_EXEC,
6572 .help = "display memory words",
6573 .usage = "['phys'] address [count]",
6574 },
6575 {
6576 .name = "mdh",
6577 .handler = handle_md_command,
6578 .mode = COMMAND_EXEC,
6579 .help = "display memory half-words",
6580 .usage = "['phys'] address [count]",
6581 },
6582 {
6583 .name = "mdb",
6584 .handler = handle_md_command,
6585 .mode = COMMAND_EXEC,
6586 .help = "display memory bytes",
6587 .usage = "['phys'] address [count]",
6588 },
6589 {
6590 .name = "mwd",
6591 .handler = handle_mw_command,
6592 .mode = COMMAND_EXEC,
6593 .help = "write memory double-word",
6594 .usage = "['phys'] address value [count]",
6595 },
6596 {
6597 .name = "mww",
6598 .handler = handle_mw_command,
6599 .mode = COMMAND_EXEC,
6600 .help = "write memory word",
6601 .usage = "['phys'] address value [count]",
6602 },
6603 {
6604 .name = "mwh",
6605 .handler = handle_mw_command,
6606 .mode = COMMAND_EXEC,
6607 .help = "write memory half-word",
6608 .usage = "['phys'] address value [count]",
6609 },
6610 {
6611 .name = "mwb",
6612 .handler = handle_mw_command,
6613 .mode = COMMAND_EXEC,
6614 .help = "write memory byte",
6615 .usage = "['phys'] address value [count]",
6616 },
6617 {
6618 .name = "bp",
6619 .handler = handle_bp_command,
6620 .mode = COMMAND_EXEC,
6621 .help = "list or set hardware or software breakpoint",
6622 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6623 },
6624 {
6625 .name = "rbp",
6626 .handler = handle_rbp_command,
6627 .mode = COMMAND_EXEC,
6628 .help = "remove breakpoint",
6629 .usage = "'all' | address",
6630 },
6631 {
6632 .name = "wp",
6633 .handler = handle_wp_command,
6634 .mode = COMMAND_EXEC,
6635 .help = "list (no params) or create watchpoints",
6636 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6637 },
6638 {
6639 .name = "rwp",
6640 .handler = handle_rwp_command,
6641 .mode = COMMAND_EXEC,
6642 .help = "remove watchpoint",
6643 .usage = "address",
6644 },
6645 {
6646 .name = "load_image",
6647 .handler = handle_load_image_command,
6648 .mode = COMMAND_EXEC,
6649 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6650 "[min_address] [max_length]",
6651 },
6652 {
6653 .name = "dump_image",
6654 .handler = handle_dump_image_command,
6655 .mode = COMMAND_EXEC,
6656 .usage = "filename address size",
6657 },
6658 {
6659 .name = "verify_image_checksum",
6660 .handler = handle_verify_image_checksum_command,
6661 .mode = COMMAND_EXEC,
6662 .usage = "filename [offset [type]]",
6663 },
6664 {
6665 .name = "verify_image",
6666 .handler = handle_verify_image_command,
6667 .mode = COMMAND_EXEC,
6668 .usage = "filename [offset [type]]",
6669 },
6670 {
6671 .name = "test_image",
6672 .handler = handle_test_image_command,
6673 .mode = COMMAND_EXEC,
6674 .usage = "filename [offset [type]]",
6675 },
6676 {
6677 .name = "mem2array",
6678 .mode = COMMAND_EXEC,
6679 .jim_handler = jim_mem2array,
6680 .help = "read 8/16/32 bit memory and return as a TCL array "
6681 "for script processing",
6682 .usage = "arrayname bitwidth address count",
6683 },
6684 {
6685 .name = "array2mem",
6686 .mode = COMMAND_EXEC,
6687 .jim_handler = jim_array2mem,
6688 .help = "convert a TCL array to memory locations "
6689 "and write the 8/16/32 bit values",
6690 .usage = "arrayname bitwidth address count",
6691 },
6692 {
6693 .name = "reset_nag",
6694 .handler = handle_target_reset_nag,
6695 .mode = COMMAND_ANY,
6696 .help = "Nag after each reset about options that could have been "
6697 "enabled to improve performance.",
6698 .usage = "['enable'|'disable']",
6699 },
6700 {
6701 .name = "ps",
6702 .handler = handle_ps_command,
6703 .mode = COMMAND_EXEC,
6704 .help = "list all tasks",
6705 .usage = "",
6706 },
6707 {
6708 .name = "test_mem_access",
6709 .handler = handle_test_mem_access_command,
6710 .mode = COMMAND_EXEC,
6711 .help = "Test the target's memory access functions",
6712 .usage = "size",
6713 },
6714
6715 COMMAND_REGISTRATION_DONE
6716 };
6717 static int target_register_user_commands(struct command_context *cmd_ctx)
6718 {
6719 int retval = ERROR_OK;
6720 retval = target_request_register_commands(cmd_ctx);
6721 if (retval != ERROR_OK)
6722 return retval;
6723
6724 retval = trace_register_commands(cmd_ctx);
6725 if (retval != ERROR_OK)
6726 return retval;
6727
6728
6729 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6730 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)