target: use target_event_name()
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59
60 /* default halt wait timeout (ms) */
61 #define DEFAULT_HALT_TIMEOUT 5000
62
63 static int target_read_buffer_default(struct target *target, target_addr_t address,
64 uint32_t count, uint8_t *buffer);
65 static int target_write_buffer_default(struct target *target, target_addr_t address,
66 uint32_t count, const uint8_t *buffer);
67 static int target_array2mem(Jim_Interp *interp, struct target *target,
68 int argc, Jim_Obj * const *argv);
69 static int target_mem2array(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_register_user_commands(struct command_context *cmd_ctx);
72 static int target_get_gdb_fileio_info_default(struct target *target,
73 struct gdb_fileio_info *fileio_info);
74 static int target_gdb_fileio_end_default(struct target *target, int retcode,
75 int fileio_errno, bool ctrl_c);
76
77 /* targets */
78 extern struct target_type arm7tdmi_target;
79 extern struct target_type arm720t_target;
80 extern struct target_type arm9tdmi_target;
81 extern struct target_type arm920t_target;
82 extern struct target_type arm966e_target;
83 extern struct target_type arm946e_target;
84 extern struct target_type arm926ejs_target;
85 extern struct target_type fa526_target;
86 extern struct target_type feroceon_target;
87 extern struct target_type dragonite_target;
88 extern struct target_type xscale_target;
89 extern struct target_type cortexm_target;
90 extern struct target_type cortexa_target;
91 extern struct target_type aarch64_target;
92 extern struct target_type cortexr4_target;
93 extern struct target_type arm11_target;
94 extern struct target_type ls1_sap_target;
95 extern struct target_type mips_m4k_target;
96 extern struct target_type mips_mips64_target;
97 extern struct target_type avr_target;
98 extern struct target_type dsp563xx_target;
99 extern struct target_type dsp5680xx_target;
100 extern struct target_type testee_target;
101 extern struct target_type avr32_ap7k_target;
102 extern struct target_type hla_target;
103 extern struct target_type nds32_v2_target;
104 extern struct target_type nds32_v3_target;
105 extern struct target_type nds32_v3m_target;
106 extern struct target_type or1k_target;
107 extern struct target_type quark_x10xx_target;
108 extern struct target_type quark_d20xx_target;
109 extern struct target_type stm8_target;
110 extern struct target_type riscv_target;
111 extern struct target_type mem_ap_target;
112 extern struct target_type esirisc_target;
113 extern struct target_type arcv2_target;
114
115 static struct target_type *target_types[] = {
116 &arm7tdmi_target,
117 &arm9tdmi_target,
118 &arm920t_target,
119 &arm720t_target,
120 &arm966e_target,
121 &arm946e_target,
122 &arm926ejs_target,
123 &fa526_target,
124 &feroceon_target,
125 &dragonite_target,
126 &xscale_target,
127 &cortexm_target,
128 &cortexa_target,
129 &cortexr4_target,
130 &arm11_target,
131 &ls1_sap_target,
132 &mips_m4k_target,
133 &avr_target,
134 &dsp563xx_target,
135 &dsp5680xx_target,
136 &testee_target,
137 &avr32_ap7k_target,
138 &hla_target,
139 &nds32_v2_target,
140 &nds32_v3_target,
141 &nds32_v3m_target,
142 &or1k_target,
143 &quark_x10xx_target,
144 &quark_d20xx_target,
145 &stm8_target,
146 &riscv_target,
147 &mem_ap_target,
148 &esirisc_target,
149 &arcv2_target,
150 &aarch64_target,
151 &mips_mips64_target,
152 NULL,
153 };
154
155 struct target *all_targets;
156 static struct target_event_callback *target_event_callbacks;
157 static struct target_timer_callback *target_timer_callbacks;
158 static int64_t target_timer_next_event_value;
159 static LIST_HEAD(target_reset_callback_list);
160 static LIST_HEAD(target_trace_callback_list);
161 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
162
163 static const struct jim_nvp nvp_assert[] = {
164 { .name = "assert", NVP_ASSERT },
165 { .name = "deassert", NVP_DEASSERT },
166 { .name = "T", NVP_ASSERT },
167 { .name = "F", NVP_DEASSERT },
168 { .name = "t", NVP_ASSERT },
169 { .name = "f", NVP_DEASSERT },
170 { .name = NULL, .value = -1 }
171 };
172
173 static const struct jim_nvp nvp_error_target[] = {
174 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
175 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
176 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
177 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
178 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
179 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
180 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
181 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
182 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
183 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
184 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
185 { .value = -1, .name = NULL }
186 };
187
188 static const char *target_strerror_safe(int err)
189 {
190 const struct jim_nvp *n;
191
192 n = jim_nvp_value2name_simple(nvp_error_target, err);
193 if (!n->name)
194 return "unknown";
195 else
196 return n->name;
197 }
198
199 static const struct jim_nvp nvp_target_event[] = {
200
201 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
202 { .value = TARGET_EVENT_HALTED, .name = "halted" },
203 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
204 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
205 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
206 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
207 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
208
209 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
210 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
211
212 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
213 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
214 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
215 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
216 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
217 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
218 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
219 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
220
221 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
222 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
223 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
224
225 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
226 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
227
228 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
229 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
230
231 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
232 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
233
234 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
235 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
236
237 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
238
239 { .name = NULL, .value = -1 }
240 };
241
242 static const struct jim_nvp nvp_target_state[] = {
243 { .name = "unknown", .value = TARGET_UNKNOWN },
244 { .name = "running", .value = TARGET_RUNNING },
245 { .name = "halted", .value = TARGET_HALTED },
246 { .name = "reset", .value = TARGET_RESET },
247 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
248 { .name = NULL, .value = -1 },
249 };
250
251 static const struct jim_nvp nvp_target_debug_reason[] = {
252 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
253 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
254 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
255 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
256 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
257 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
258 { .name = "program-exit", .value = DBG_REASON_EXIT },
259 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
260 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
261 { .name = NULL, .value = -1 },
262 };
263
264 static const struct jim_nvp nvp_target_endian[] = {
265 { .name = "big", .value = TARGET_BIG_ENDIAN },
266 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
267 { .name = "be", .value = TARGET_BIG_ENDIAN },
268 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
269 { .name = NULL, .value = -1 },
270 };
271
272 static const struct jim_nvp nvp_reset_modes[] = {
273 { .name = "unknown", .value = RESET_UNKNOWN },
274 { .name = "run", .value = RESET_RUN },
275 { .name = "halt", .value = RESET_HALT },
276 { .name = "init", .value = RESET_INIT },
277 { .name = NULL, .value = -1 },
278 };
279
280 const char *debug_reason_name(struct target *t)
281 {
282 const char *cp;
283
284 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
285 t->debug_reason)->name;
286 if (!cp) {
287 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
288 cp = "(*BUG*unknown*BUG*)";
289 }
290 return cp;
291 }
292
293 const char *target_state_name(struct target *t)
294 {
295 const char *cp;
296 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
297 if (!cp) {
298 LOG_ERROR("Invalid target state: %d", (int)(t->state));
299 cp = "(*BUG*unknown*BUG*)";
300 }
301
302 if (!target_was_examined(t) && t->defer_examine)
303 cp = "examine deferred";
304
305 return cp;
306 }
307
308 const char *target_event_name(enum target_event event)
309 {
310 const char *cp;
311 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
312 if (!cp) {
313 LOG_ERROR("Invalid target event: %d", (int)(event));
314 cp = "(*BUG*unknown*BUG*)";
315 }
316 return cp;
317 }
318
319 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
320 {
321 const char *cp;
322 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
323 if (!cp) {
324 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
325 cp = "(*BUG*unknown*BUG*)";
326 }
327 return cp;
328 }
329
330 /* determine the number of the new target */
331 static int new_target_number(void)
332 {
333 struct target *t;
334 int x;
335
336 /* number is 0 based */
337 x = -1;
338 t = all_targets;
339 while (t) {
340 if (x < t->target_number)
341 x = t->target_number;
342 t = t->next;
343 }
344 return x + 1;
345 }
346
347 static void append_to_list_all_targets(struct target *target)
348 {
349 struct target **t = &all_targets;
350
351 while (*t)
352 t = &((*t)->next);
353 *t = target;
354 }
355
356 /* read a uint64_t from a buffer in target memory endianness */
357 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
358 {
359 if (target->endianness == TARGET_LITTLE_ENDIAN)
360 return le_to_h_u64(buffer);
361 else
362 return be_to_h_u64(buffer);
363 }
364
365 /* read a uint32_t from a buffer in target memory endianness */
366 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
367 {
368 if (target->endianness == TARGET_LITTLE_ENDIAN)
369 return le_to_h_u32(buffer);
370 else
371 return be_to_h_u32(buffer);
372 }
373
374 /* read a uint24_t from a buffer in target memory endianness */
375 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
376 {
377 if (target->endianness == TARGET_LITTLE_ENDIAN)
378 return le_to_h_u24(buffer);
379 else
380 return be_to_h_u24(buffer);
381 }
382
383 /* read a uint16_t from a buffer in target memory endianness */
384 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
385 {
386 if (target->endianness == TARGET_LITTLE_ENDIAN)
387 return le_to_h_u16(buffer);
388 else
389 return be_to_h_u16(buffer);
390 }
391
392 /* write a uint64_t to a buffer in target memory endianness */
393 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
394 {
395 if (target->endianness == TARGET_LITTLE_ENDIAN)
396 h_u64_to_le(buffer, value);
397 else
398 h_u64_to_be(buffer, value);
399 }
400
401 /* write a uint32_t to a buffer in target memory endianness */
402 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
403 {
404 if (target->endianness == TARGET_LITTLE_ENDIAN)
405 h_u32_to_le(buffer, value);
406 else
407 h_u32_to_be(buffer, value);
408 }
409
410 /* write a uint24_t to a buffer in target memory endianness */
411 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
412 {
413 if (target->endianness == TARGET_LITTLE_ENDIAN)
414 h_u24_to_le(buffer, value);
415 else
416 h_u24_to_be(buffer, value);
417 }
418
419 /* write a uint16_t to a buffer in target memory endianness */
420 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
421 {
422 if (target->endianness == TARGET_LITTLE_ENDIAN)
423 h_u16_to_le(buffer, value);
424 else
425 h_u16_to_be(buffer, value);
426 }
427
428 /* write a uint8_t to a buffer in target memory endianness */
429 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
430 {
431 *buffer = value;
432 }
433
434 /* write a uint64_t array to a buffer in target memory endianness */
435 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
436 {
437 uint32_t i;
438 for (i = 0; i < count; i++)
439 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
440 }
441
442 /* write a uint32_t array to a buffer in target memory endianness */
443 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
444 {
445 uint32_t i;
446 for (i = 0; i < count; i++)
447 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
448 }
449
450 /* write a uint16_t array to a buffer in target memory endianness */
451 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
452 {
453 uint32_t i;
454 for (i = 0; i < count; i++)
455 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
456 }
457
458 /* write a uint64_t array to a buffer in target memory endianness */
459 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
460 {
461 uint32_t i;
462 for (i = 0; i < count; i++)
463 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
464 }
465
466 /* write a uint32_t array to a buffer in target memory endianness */
467 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
468 {
469 uint32_t i;
470 for (i = 0; i < count; i++)
471 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
472 }
473
474 /* write a uint16_t array to a buffer in target memory endianness */
475 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
476 {
477 uint32_t i;
478 for (i = 0; i < count; i++)
479 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
480 }
481
482 /* return a pointer to a configured target; id is name or number */
483 struct target *get_target(const char *id)
484 {
485 struct target *target;
486
487 /* try as tcltarget name */
488 for (target = all_targets; target; target = target->next) {
489 if (!target_name(target))
490 continue;
491 if (strcmp(id, target_name(target)) == 0)
492 return target;
493 }
494
495 /* It's OK to remove this fallback sometime after August 2010 or so */
496
497 /* no match, try as number */
498 unsigned num;
499 if (parse_uint(id, &num) != ERROR_OK)
500 return NULL;
501
502 for (target = all_targets; target; target = target->next) {
503 if (target->target_number == (int)num) {
504 LOG_WARNING("use '%s' as target identifier, not '%u'",
505 target_name(target), num);
506 return target;
507 }
508 }
509
510 return NULL;
511 }
512
513 /* returns a pointer to the n-th configured target */
514 struct target *get_target_by_num(int num)
515 {
516 struct target *target = all_targets;
517
518 while (target) {
519 if (target->target_number == num)
520 return target;
521 target = target->next;
522 }
523
524 return NULL;
525 }
526
527 struct target *get_current_target(struct command_context *cmd_ctx)
528 {
529 struct target *target = get_current_target_or_null(cmd_ctx);
530
531 if (!target) {
532 LOG_ERROR("BUG: current_target out of bounds");
533 exit(-1);
534 }
535
536 return target;
537 }
538
539 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
540 {
541 return cmd_ctx->current_target_override
542 ? cmd_ctx->current_target_override
543 : cmd_ctx->current_target;
544 }
545
546 int target_poll(struct target *target)
547 {
548 int retval;
549
550 /* We can't poll until after examine */
551 if (!target_was_examined(target)) {
552 /* Fail silently lest we pollute the log */
553 return ERROR_FAIL;
554 }
555
556 retval = target->type->poll(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 if (target->halt_issued) {
561 if (target->state == TARGET_HALTED)
562 target->halt_issued = false;
563 else {
564 int64_t t = timeval_ms() - target->halt_issued_time;
565 if (t > DEFAULT_HALT_TIMEOUT) {
566 target->halt_issued = false;
567 LOG_INFO("Halt timed out, wake up GDB.");
568 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
569 }
570 }
571 }
572
573 return ERROR_OK;
574 }
575
576 int target_halt(struct target *target)
577 {
578 int retval;
579 /* We can't poll until after examine */
580 if (!target_was_examined(target)) {
581 LOG_ERROR("Target not examined yet");
582 return ERROR_FAIL;
583 }
584
585 retval = target->type->halt(target);
586 if (retval != ERROR_OK)
587 return retval;
588
589 target->halt_issued = true;
590 target->halt_issued_time = timeval_ms();
591
592 return ERROR_OK;
593 }
594
595 /**
596 * Make the target (re)start executing using its saved execution
597 * context (possibly with some modifications).
598 *
599 * @param target Which target should start executing.
600 * @param current True to use the target's saved program counter instead
601 * of the address parameter
602 * @param address Optionally used as the program counter.
603 * @param handle_breakpoints True iff breakpoints at the resumption PC
604 * should be skipped. (For example, maybe execution was stopped by
605 * such a breakpoint, in which case it would be counterproductive to
606 * let it re-trigger.
607 * @param debug_execution False if all working areas allocated by OpenOCD
608 * should be released and/or restored to their original contents.
609 * (This would for example be true to run some downloaded "helper"
610 * algorithm code, which resides in one such working buffer and uses
611 * another for data storage.)
612 *
613 * @todo Resolve the ambiguity about what the "debug_execution" flag
614 * signifies. For example, Target implementations don't agree on how
615 * it relates to invalidation of the register cache, or to whether
616 * breakpoints and watchpoints should be enabled. (It would seem wrong
617 * to enable breakpoints when running downloaded "helper" algorithms
618 * (debug_execution true), since the breakpoints would be set to match
619 * target firmware being debugged, not the helper algorithm.... and
620 * enabling them could cause such helpers to malfunction (for example,
621 * by overwriting data with a breakpoint instruction. On the other
622 * hand the infrastructure for running such helpers might use this
623 * procedure but rely on hardware breakpoint to detect termination.)
624 */
625 int target_resume(struct target *target, int current, target_addr_t address,
626 int handle_breakpoints, int debug_execution)
627 {
628 int retval;
629
630 /* We can't poll until after examine */
631 if (!target_was_examined(target)) {
632 LOG_ERROR("Target not examined yet");
633 return ERROR_FAIL;
634 }
635
636 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
637
638 /* note that resume *must* be asynchronous. The CPU can halt before
639 * we poll. The CPU can even halt at the current PC as a result of
640 * a software breakpoint being inserted by (a bug?) the application.
641 */
642 /*
643 * resume() triggers the event 'resumed'. The execution of TCL commands
644 * in the event handler causes the polling of targets. If the target has
645 * already halted for a breakpoint, polling will run the 'halted' event
646 * handler before the pending 'resumed' handler.
647 * Disable polling during resume() to guarantee the execution of handlers
648 * in the correct order.
649 */
650 bool save_poll = jtag_poll_get_enabled();
651 jtag_poll_set_enabled(false);
652 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
653 jtag_poll_set_enabled(save_poll);
654 if (retval != ERROR_OK)
655 return retval;
656
657 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
658
659 return retval;
660 }
661
662 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
663 {
664 char buf[100];
665 int retval;
666 struct jim_nvp *n;
667 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
668 if (!n->name) {
669 LOG_ERROR("invalid reset mode");
670 return ERROR_FAIL;
671 }
672
673 struct target *target;
674 for (target = all_targets; target; target = target->next)
675 target_call_reset_callbacks(target, reset_mode);
676
677 /* disable polling during reset to make reset event scripts
678 * more predictable, i.e. dr/irscan & pathmove in events will
679 * not have JTAG operations injected into the middle of a sequence.
680 */
681 bool save_poll = jtag_poll_get_enabled();
682
683 jtag_poll_set_enabled(false);
684
685 sprintf(buf, "ocd_process_reset %s", n->name);
686 retval = Jim_Eval(cmd->ctx->interp, buf);
687
688 jtag_poll_set_enabled(save_poll);
689
690 if (retval != JIM_OK) {
691 Jim_MakeErrorMessage(cmd->ctx->interp);
692 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
693 return ERROR_FAIL;
694 }
695
696 /* We want any events to be processed before the prompt */
697 retval = target_call_timer_callbacks_now();
698
699 for (target = all_targets; target; target = target->next) {
700 target->type->check_reset(target);
701 target->running_alg = false;
702 }
703
704 return retval;
705 }
706
707 static int identity_virt2phys(struct target *target,
708 target_addr_t virtual, target_addr_t *physical)
709 {
710 *physical = virtual;
711 return ERROR_OK;
712 }
713
714 static int no_mmu(struct target *target, int *enabled)
715 {
716 *enabled = 0;
717 return ERROR_OK;
718 }
719
720 /**
721 * Reset the @c examined flag for the given target.
722 * Pure paranoia -- targets are zeroed on allocation.
723 */
724 static inline void target_reset_examined(struct target *target)
725 {
726 target->examined = false;
727 }
728
729 static int default_examine(struct target *target)
730 {
731 target_set_examined(target);
732 return ERROR_OK;
733 }
734
735 /* no check by default */
736 static int default_check_reset(struct target *target)
737 {
738 return ERROR_OK;
739 }
740
741 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
742 * Keep in sync */
743 int target_examine_one(struct target *target)
744 {
745 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
746
747 int retval = target->type->examine(target);
748 if (retval != ERROR_OK) {
749 target_reset_examined(target);
750 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
751 return retval;
752 }
753
754 target_set_examined(target);
755 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
756
757 return ERROR_OK;
758 }
759
760 static int jtag_enable_callback(enum jtag_event event, void *priv)
761 {
762 struct target *target = priv;
763
764 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
765 return ERROR_OK;
766
767 jtag_unregister_event_callback(jtag_enable_callback, target);
768
769 return target_examine_one(target);
770 }
771
772 /* Targets that correctly implement init + examine, i.e.
773 * no communication with target during init:
774 *
775 * XScale
776 */
777 int target_examine(void)
778 {
779 int retval = ERROR_OK;
780 struct target *target;
781
782 for (target = all_targets; target; target = target->next) {
783 /* defer examination, but don't skip it */
784 if (!target->tap->enabled) {
785 jtag_register_event_callback(jtag_enable_callback,
786 target);
787 continue;
788 }
789
790 if (target->defer_examine)
791 continue;
792
793 int retval2 = target_examine_one(target);
794 if (retval2 != ERROR_OK) {
795 LOG_WARNING("target %s examination failed", target_name(target));
796 retval = retval2;
797 }
798 }
799 return retval;
800 }
801
802 const char *target_type_name(struct target *target)
803 {
804 return target->type->name;
805 }
806
807 static int target_soft_reset_halt(struct target *target)
808 {
809 if (!target_was_examined(target)) {
810 LOG_ERROR("Target not examined yet");
811 return ERROR_FAIL;
812 }
813 if (!target->type->soft_reset_halt) {
814 LOG_ERROR("Target %s does not support soft_reset_halt",
815 target_name(target));
816 return ERROR_FAIL;
817 }
818 return target->type->soft_reset_halt(target);
819 }
820
821 /**
822 * Downloads a target-specific native code algorithm to the target,
823 * and executes it. * Note that some targets may need to set up, enable,
824 * and tear down a breakpoint (hard or * soft) to detect algorithm
825 * termination, while others may support lower overhead schemes where
826 * soft breakpoints embedded in the algorithm automatically terminate the
827 * algorithm.
828 *
829 * @param target used to run the algorithm
830 * @param num_mem_params
831 * @param mem_params
832 * @param num_reg_params
833 * @param reg_param
834 * @param entry_point
835 * @param exit_point
836 * @param timeout_ms
837 * @param arch_info target-specific description of the algorithm.
838 */
839 int target_run_algorithm(struct target *target,
840 int num_mem_params, struct mem_param *mem_params,
841 int num_reg_params, struct reg_param *reg_param,
842 target_addr_t entry_point, target_addr_t exit_point,
843 int timeout_ms, void *arch_info)
844 {
845 int retval = ERROR_FAIL;
846
847 if (!target_was_examined(target)) {
848 LOG_ERROR("Target not examined yet");
849 goto done;
850 }
851 if (!target->type->run_algorithm) {
852 LOG_ERROR("Target type '%s' does not support %s",
853 target_type_name(target), __func__);
854 goto done;
855 }
856
857 target->running_alg = true;
858 retval = target->type->run_algorithm(target,
859 num_mem_params, mem_params,
860 num_reg_params, reg_param,
861 entry_point, exit_point, timeout_ms, arch_info);
862 target->running_alg = false;
863
864 done:
865 return retval;
866 }
867
868 /**
869 * Executes a target-specific native code algorithm and leaves it running.
870 *
871 * @param target used to run the algorithm
872 * @param num_mem_params
873 * @param mem_params
874 * @param num_reg_params
875 * @param reg_params
876 * @param entry_point
877 * @param exit_point
878 * @param arch_info target-specific description of the algorithm.
879 */
880 int target_start_algorithm(struct target *target,
881 int num_mem_params, struct mem_param *mem_params,
882 int num_reg_params, struct reg_param *reg_params,
883 target_addr_t entry_point, target_addr_t exit_point,
884 void *arch_info)
885 {
886 int retval = ERROR_FAIL;
887
888 if (!target_was_examined(target)) {
889 LOG_ERROR("Target not examined yet");
890 goto done;
891 }
892 if (!target->type->start_algorithm) {
893 LOG_ERROR("Target type '%s' does not support %s",
894 target_type_name(target), __func__);
895 goto done;
896 }
897 if (target->running_alg) {
898 LOG_ERROR("Target is already running an algorithm");
899 goto done;
900 }
901
902 target->running_alg = true;
903 retval = target->type->start_algorithm(target,
904 num_mem_params, mem_params,
905 num_reg_params, reg_params,
906 entry_point, exit_point, arch_info);
907
908 done:
909 return retval;
910 }
911
912 /**
913 * Waits for an algorithm started with target_start_algorithm() to complete.
914 *
915 * @param target used to run the algorithm
916 * @param num_mem_params
917 * @param mem_params
918 * @param num_reg_params
919 * @param reg_params
920 * @param exit_point
921 * @param timeout_ms
922 * @param arch_info target-specific description of the algorithm.
923 */
924 int target_wait_algorithm(struct target *target,
925 int num_mem_params, struct mem_param *mem_params,
926 int num_reg_params, struct reg_param *reg_params,
927 target_addr_t exit_point, int timeout_ms,
928 void *arch_info)
929 {
930 int retval = ERROR_FAIL;
931
932 if (!target->type->wait_algorithm) {
933 LOG_ERROR("Target type '%s' does not support %s",
934 target_type_name(target), __func__);
935 goto done;
936 }
937 if (!target->running_alg) {
938 LOG_ERROR("Target is not running an algorithm");
939 goto done;
940 }
941
942 retval = target->type->wait_algorithm(target,
943 num_mem_params, mem_params,
944 num_reg_params, reg_params,
945 exit_point, timeout_ms, arch_info);
946 if (retval != ERROR_TARGET_TIMEOUT)
947 target->running_alg = false;
948
949 done:
950 return retval;
951 }
952
953 /**
954 * Streams data to a circular buffer on target intended for consumption by code
955 * running asynchronously on target.
956 *
957 * This is intended for applications where target-specific native code runs
958 * on the target, receives data from the circular buffer, does something with
959 * it (most likely writing it to a flash memory), and advances the circular
960 * buffer pointer.
961 *
962 * This assumes that the helper algorithm has already been loaded to the target,
963 * but has not been started yet. Given memory and register parameters are passed
964 * to the algorithm.
965 *
966 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
967 * following format:
968 *
969 * [buffer_start + 0, buffer_start + 4):
970 * Write Pointer address (aka head). Written and updated by this
971 * routine when new data is written to the circular buffer.
972 * [buffer_start + 4, buffer_start + 8):
973 * Read Pointer address (aka tail). Updated by code running on the
974 * target after it consumes data.
975 * [buffer_start + 8, buffer_start + buffer_size):
976 * Circular buffer contents.
977 *
978 * See contrib/loaders/flash/stm32f1x.S for an example.
979 *
980 * @param target used to run the algorithm
981 * @param buffer address on the host where data to be sent is located
982 * @param count number of blocks to send
983 * @param block_size size in bytes of each block
984 * @param num_mem_params count of memory-based params to pass to algorithm
985 * @param mem_params memory-based params to pass to algorithm
986 * @param num_reg_params count of register-based params to pass to algorithm
987 * @param reg_params memory-based params to pass to algorithm
988 * @param buffer_start address on the target of the circular buffer structure
989 * @param buffer_size size of the circular buffer structure
990 * @param entry_point address on the target to execute to start the algorithm
991 * @param exit_point address at which to set a breakpoint to catch the
992 * end of the algorithm; can be 0 if target triggers a breakpoint itself
993 * @param arch_info
994 */
995
996 int target_run_flash_async_algorithm(struct target *target,
997 const uint8_t *buffer, uint32_t count, int block_size,
998 int num_mem_params, struct mem_param *mem_params,
999 int num_reg_params, struct reg_param *reg_params,
1000 uint32_t buffer_start, uint32_t buffer_size,
1001 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1002 {
1003 int retval;
1004 int timeout = 0;
1005
1006 const uint8_t *buffer_orig = buffer;
1007
1008 /* Set up working area. First word is write pointer, second word is read pointer,
1009 * rest is fifo data area. */
1010 uint32_t wp_addr = buffer_start;
1011 uint32_t rp_addr = buffer_start + 4;
1012 uint32_t fifo_start_addr = buffer_start + 8;
1013 uint32_t fifo_end_addr = buffer_start + buffer_size;
1014
1015 uint32_t wp = fifo_start_addr;
1016 uint32_t rp = fifo_start_addr;
1017
1018 /* validate block_size is 2^n */
1019 assert(IS_PWR_OF_2(block_size));
1020
1021 retval = target_write_u32(target, wp_addr, wp);
1022 if (retval != ERROR_OK)
1023 return retval;
1024 retval = target_write_u32(target, rp_addr, rp);
1025 if (retval != ERROR_OK)
1026 return retval;
1027
1028 /* Start up algorithm on target and let it idle while writing the first chunk */
1029 retval = target_start_algorithm(target, num_mem_params, mem_params,
1030 num_reg_params, reg_params,
1031 entry_point,
1032 exit_point,
1033 arch_info);
1034
1035 if (retval != ERROR_OK) {
1036 LOG_ERROR("error starting target flash write algorithm");
1037 return retval;
1038 }
1039
1040 while (count > 0) {
1041
1042 retval = target_read_u32(target, rp_addr, &rp);
1043 if (retval != ERROR_OK) {
1044 LOG_ERROR("failed to get read pointer");
1045 break;
1046 }
1047
1048 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1049 (size_t) (buffer - buffer_orig), count, wp, rp);
1050
1051 if (rp == 0) {
1052 LOG_ERROR("flash write algorithm aborted by target");
1053 retval = ERROR_FLASH_OPERATION_FAILED;
1054 break;
1055 }
1056
1057 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1058 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1059 break;
1060 }
1061
1062 /* Count the number of bytes available in the fifo without
1063 * crossing the wrap around. Make sure to not fill it completely,
1064 * because that would make wp == rp and that's the empty condition. */
1065 uint32_t thisrun_bytes;
1066 if (rp > wp)
1067 thisrun_bytes = rp - wp - block_size;
1068 else if (rp > fifo_start_addr)
1069 thisrun_bytes = fifo_end_addr - wp;
1070 else
1071 thisrun_bytes = fifo_end_addr - wp - block_size;
1072
1073 if (thisrun_bytes == 0) {
1074 /* Throttle polling a bit if transfer is (much) faster than flash
1075 * programming. The exact delay shouldn't matter as long as it's
1076 * less than buffer size / flash speed. This is very unlikely to
1077 * run when using high latency connections such as USB. */
1078 alive_sleep(2);
1079
1080 /* to stop an infinite loop on some targets check and increment a timeout
1081 * this issue was observed on a stellaris using the new ICDI interface */
1082 if (timeout++ >= 2500) {
1083 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1084 return ERROR_FLASH_OPERATION_FAILED;
1085 }
1086 continue;
1087 }
1088
1089 /* reset our timeout */
1090 timeout = 0;
1091
1092 /* Limit to the amount of data we actually want to write */
1093 if (thisrun_bytes > count * block_size)
1094 thisrun_bytes = count * block_size;
1095
1096 /* Force end of large blocks to be word aligned */
1097 if (thisrun_bytes >= 16)
1098 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1099
1100 /* Write data to fifo */
1101 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1102 if (retval != ERROR_OK)
1103 break;
1104
1105 /* Update counters and wrap write pointer */
1106 buffer += thisrun_bytes;
1107 count -= thisrun_bytes / block_size;
1108 wp += thisrun_bytes;
1109 if (wp >= fifo_end_addr)
1110 wp = fifo_start_addr;
1111
1112 /* Store updated write pointer to target */
1113 retval = target_write_u32(target, wp_addr, wp);
1114 if (retval != ERROR_OK)
1115 break;
1116
1117 /* Avoid GDB timeouts */
1118 keep_alive();
1119 }
1120
1121 if (retval != ERROR_OK) {
1122 /* abort flash write algorithm on target */
1123 target_write_u32(target, wp_addr, 0);
1124 }
1125
1126 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1127 num_reg_params, reg_params,
1128 exit_point,
1129 10000,
1130 arch_info);
1131
1132 if (retval2 != ERROR_OK) {
1133 LOG_ERROR("error waiting for target flash write algorithm");
1134 retval = retval2;
1135 }
1136
1137 if (retval == ERROR_OK) {
1138 /* check if algorithm set rp = 0 after fifo writer loop finished */
1139 retval = target_read_u32(target, rp_addr, &rp);
1140 if (retval == ERROR_OK && rp == 0) {
1141 LOG_ERROR("flash write algorithm aborted by target");
1142 retval = ERROR_FLASH_OPERATION_FAILED;
1143 }
1144 }
1145
1146 return retval;
1147 }
1148
1149 int target_run_read_async_algorithm(struct target *target,
1150 uint8_t *buffer, uint32_t count, int block_size,
1151 int num_mem_params, struct mem_param *mem_params,
1152 int num_reg_params, struct reg_param *reg_params,
1153 uint32_t buffer_start, uint32_t buffer_size,
1154 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1155 {
1156 int retval;
1157 int timeout = 0;
1158
1159 const uint8_t *buffer_orig = buffer;
1160
1161 /* Set up working area. First word is write pointer, second word is read pointer,
1162 * rest is fifo data area. */
1163 uint32_t wp_addr = buffer_start;
1164 uint32_t rp_addr = buffer_start + 4;
1165 uint32_t fifo_start_addr = buffer_start + 8;
1166 uint32_t fifo_end_addr = buffer_start + buffer_size;
1167
1168 uint32_t wp = fifo_start_addr;
1169 uint32_t rp = fifo_start_addr;
1170
1171 /* validate block_size is 2^n */
1172 assert(IS_PWR_OF_2(block_size));
1173
1174 retval = target_write_u32(target, wp_addr, wp);
1175 if (retval != ERROR_OK)
1176 return retval;
1177 retval = target_write_u32(target, rp_addr, rp);
1178 if (retval != ERROR_OK)
1179 return retval;
1180
1181 /* Start up algorithm on target */
1182 retval = target_start_algorithm(target, num_mem_params, mem_params,
1183 num_reg_params, reg_params,
1184 entry_point,
1185 exit_point,
1186 arch_info);
1187
1188 if (retval != ERROR_OK) {
1189 LOG_ERROR("error starting target flash read algorithm");
1190 return retval;
1191 }
1192
1193 while (count > 0) {
1194 retval = target_read_u32(target, wp_addr, &wp);
1195 if (retval != ERROR_OK) {
1196 LOG_ERROR("failed to get write pointer");
1197 break;
1198 }
1199
1200 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1201 (size_t)(buffer - buffer_orig), count, wp, rp);
1202
1203 if (wp == 0) {
1204 LOG_ERROR("flash read algorithm aborted by target");
1205 retval = ERROR_FLASH_OPERATION_FAILED;
1206 break;
1207 }
1208
1209 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1210 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1211 break;
1212 }
1213
1214 /* Count the number of bytes available in the fifo without
1215 * crossing the wrap around. */
1216 uint32_t thisrun_bytes;
1217 if (wp >= rp)
1218 thisrun_bytes = wp - rp;
1219 else
1220 thisrun_bytes = fifo_end_addr - rp;
1221
1222 if (thisrun_bytes == 0) {
1223 /* Throttle polling a bit if transfer is (much) faster than flash
1224 * reading. The exact delay shouldn't matter as long as it's
1225 * less than buffer size / flash speed. This is very unlikely to
1226 * run when using high latency connections such as USB. */
1227 alive_sleep(2);
1228
1229 /* to stop an infinite loop on some targets check and increment a timeout
1230 * this issue was observed on a stellaris using the new ICDI interface */
1231 if (timeout++ >= 2500) {
1232 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1233 return ERROR_FLASH_OPERATION_FAILED;
1234 }
1235 continue;
1236 }
1237
1238 /* Reset our timeout */
1239 timeout = 0;
1240
1241 /* Limit to the amount of data we actually want to read */
1242 if (thisrun_bytes > count * block_size)
1243 thisrun_bytes = count * block_size;
1244
1245 /* Force end of large blocks to be word aligned */
1246 if (thisrun_bytes >= 16)
1247 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1248
1249 /* Read data from fifo */
1250 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1251 if (retval != ERROR_OK)
1252 break;
1253
1254 /* Update counters and wrap write pointer */
1255 buffer += thisrun_bytes;
1256 count -= thisrun_bytes / block_size;
1257 rp += thisrun_bytes;
1258 if (rp >= fifo_end_addr)
1259 rp = fifo_start_addr;
1260
1261 /* Store updated write pointer to target */
1262 retval = target_write_u32(target, rp_addr, rp);
1263 if (retval != ERROR_OK)
1264 break;
1265
1266 /* Avoid GDB timeouts */
1267 keep_alive();
1268
1269 }
1270
1271 if (retval != ERROR_OK) {
1272 /* abort flash write algorithm on target */
1273 target_write_u32(target, rp_addr, 0);
1274 }
1275
1276 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1277 num_reg_params, reg_params,
1278 exit_point,
1279 10000,
1280 arch_info);
1281
1282 if (retval2 != ERROR_OK) {
1283 LOG_ERROR("error waiting for target flash write algorithm");
1284 retval = retval2;
1285 }
1286
1287 if (retval == ERROR_OK) {
1288 /* check if algorithm set wp = 0 after fifo writer loop finished */
1289 retval = target_read_u32(target, wp_addr, &wp);
1290 if (retval == ERROR_OK && wp == 0) {
1291 LOG_ERROR("flash read algorithm aborted by target");
1292 retval = ERROR_FLASH_OPERATION_FAILED;
1293 }
1294 }
1295
1296 return retval;
1297 }
1298
1299 int target_read_memory(struct target *target,
1300 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1301 {
1302 if (!target_was_examined(target)) {
1303 LOG_ERROR("Target not examined yet");
1304 return ERROR_FAIL;
1305 }
1306 if (!target->type->read_memory) {
1307 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1308 return ERROR_FAIL;
1309 }
1310 return target->type->read_memory(target, address, size, count, buffer);
1311 }
1312
1313 int target_read_phys_memory(struct target *target,
1314 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1315 {
1316 if (!target_was_examined(target)) {
1317 LOG_ERROR("Target not examined yet");
1318 return ERROR_FAIL;
1319 }
1320 if (!target->type->read_phys_memory) {
1321 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1322 return ERROR_FAIL;
1323 }
1324 return target->type->read_phys_memory(target, address, size, count, buffer);
1325 }
1326
1327 int target_write_memory(struct target *target,
1328 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1329 {
1330 if (!target_was_examined(target)) {
1331 LOG_ERROR("Target not examined yet");
1332 return ERROR_FAIL;
1333 }
1334 if (!target->type->write_memory) {
1335 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1336 return ERROR_FAIL;
1337 }
1338 return target->type->write_memory(target, address, size, count, buffer);
1339 }
1340
1341 int target_write_phys_memory(struct target *target,
1342 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1343 {
1344 if (!target_was_examined(target)) {
1345 LOG_ERROR("Target not examined yet");
1346 return ERROR_FAIL;
1347 }
1348 if (!target->type->write_phys_memory) {
1349 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1350 return ERROR_FAIL;
1351 }
1352 return target->type->write_phys_memory(target, address, size, count, buffer);
1353 }
1354
1355 int target_add_breakpoint(struct target *target,
1356 struct breakpoint *breakpoint)
1357 {
1358 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1359 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1360 return ERROR_TARGET_NOT_HALTED;
1361 }
1362 return target->type->add_breakpoint(target, breakpoint);
1363 }
1364
1365 int target_add_context_breakpoint(struct target *target,
1366 struct breakpoint *breakpoint)
1367 {
1368 if (target->state != TARGET_HALTED) {
1369 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1370 return ERROR_TARGET_NOT_HALTED;
1371 }
1372 return target->type->add_context_breakpoint(target, breakpoint);
1373 }
1374
1375 int target_add_hybrid_breakpoint(struct target *target,
1376 struct breakpoint *breakpoint)
1377 {
1378 if (target->state != TARGET_HALTED) {
1379 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1380 return ERROR_TARGET_NOT_HALTED;
1381 }
1382 return target->type->add_hybrid_breakpoint(target, breakpoint);
1383 }
1384
1385 int target_remove_breakpoint(struct target *target,
1386 struct breakpoint *breakpoint)
1387 {
1388 return target->type->remove_breakpoint(target, breakpoint);
1389 }
1390
1391 int target_add_watchpoint(struct target *target,
1392 struct watchpoint *watchpoint)
1393 {
1394 if (target->state != TARGET_HALTED) {
1395 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1396 return ERROR_TARGET_NOT_HALTED;
1397 }
1398 return target->type->add_watchpoint(target, watchpoint);
1399 }
1400 int target_remove_watchpoint(struct target *target,
1401 struct watchpoint *watchpoint)
1402 {
1403 return target->type->remove_watchpoint(target, watchpoint);
1404 }
1405 int target_hit_watchpoint(struct target *target,
1406 struct watchpoint **hit_watchpoint)
1407 {
1408 if (target->state != TARGET_HALTED) {
1409 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1410 return ERROR_TARGET_NOT_HALTED;
1411 }
1412
1413 if (!target->type->hit_watchpoint) {
1414 /* For backward compatible, if hit_watchpoint is not implemented,
1415 * return ERROR_FAIL such that gdb_server will not take the nonsense
1416 * information. */
1417 return ERROR_FAIL;
1418 }
1419
1420 return target->type->hit_watchpoint(target, hit_watchpoint);
1421 }
1422
1423 const char *target_get_gdb_arch(struct target *target)
1424 {
1425 if (!target->type->get_gdb_arch)
1426 return NULL;
1427 return target->type->get_gdb_arch(target);
1428 }
1429
1430 int target_get_gdb_reg_list(struct target *target,
1431 struct reg **reg_list[], int *reg_list_size,
1432 enum target_register_class reg_class)
1433 {
1434 int result = ERROR_FAIL;
1435
1436 if (!target_was_examined(target)) {
1437 LOG_ERROR("Target not examined yet");
1438 goto done;
1439 }
1440
1441 result = target->type->get_gdb_reg_list(target, reg_list,
1442 reg_list_size, reg_class);
1443
1444 done:
1445 if (result != ERROR_OK) {
1446 *reg_list = NULL;
1447 *reg_list_size = 0;
1448 }
1449 return result;
1450 }
1451
1452 int target_get_gdb_reg_list_noread(struct target *target,
1453 struct reg **reg_list[], int *reg_list_size,
1454 enum target_register_class reg_class)
1455 {
1456 if (target->type->get_gdb_reg_list_noread &&
1457 target->type->get_gdb_reg_list_noread(target, reg_list,
1458 reg_list_size, reg_class) == ERROR_OK)
1459 return ERROR_OK;
1460 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1461 }
1462
1463 bool target_supports_gdb_connection(struct target *target)
1464 {
1465 /*
1466 * exclude all the targets that don't provide get_gdb_reg_list
1467 * or that have explicit gdb_max_connection == 0
1468 */
1469 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1470 }
1471
1472 int target_step(struct target *target,
1473 int current, target_addr_t address, int handle_breakpoints)
1474 {
1475 int retval;
1476
1477 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1478
1479 retval = target->type->step(target, current, address, handle_breakpoints);
1480 if (retval != ERROR_OK)
1481 return retval;
1482
1483 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1484
1485 return retval;
1486 }
1487
1488 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1489 {
1490 if (target->state != TARGET_HALTED) {
1491 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1492 return ERROR_TARGET_NOT_HALTED;
1493 }
1494 return target->type->get_gdb_fileio_info(target, fileio_info);
1495 }
1496
1497 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1498 {
1499 if (target->state != TARGET_HALTED) {
1500 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1501 return ERROR_TARGET_NOT_HALTED;
1502 }
1503 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1504 }
1505
1506 target_addr_t target_address_max(struct target *target)
1507 {
1508 unsigned bits = target_address_bits(target);
1509 if (sizeof(target_addr_t) * 8 == bits)
1510 return (target_addr_t) -1;
1511 else
1512 return (((target_addr_t) 1) << bits) - 1;
1513 }
1514
1515 unsigned target_address_bits(struct target *target)
1516 {
1517 if (target->type->address_bits)
1518 return target->type->address_bits(target);
1519 return 32;
1520 }
1521
1522 unsigned int target_data_bits(struct target *target)
1523 {
1524 if (target->type->data_bits)
1525 return target->type->data_bits(target);
1526 return 32;
1527 }
1528
1529 static int target_profiling(struct target *target, uint32_t *samples,
1530 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1531 {
1532 return target->type->profiling(target, samples, max_num_samples,
1533 num_samples, seconds);
1534 }
1535
1536 static int handle_target(void *priv);
1537
1538 static int target_init_one(struct command_context *cmd_ctx,
1539 struct target *target)
1540 {
1541 target_reset_examined(target);
1542
1543 struct target_type *type = target->type;
1544 if (!type->examine)
1545 type->examine = default_examine;
1546
1547 if (!type->check_reset)
1548 type->check_reset = default_check_reset;
1549
1550 assert(type->init_target);
1551
1552 int retval = type->init_target(cmd_ctx, target);
1553 if (retval != ERROR_OK) {
1554 LOG_ERROR("target '%s' init failed", target_name(target));
1555 return retval;
1556 }
1557
1558 /* Sanity-check MMU support ... stub in what we must, to help
1559 * implement it in stages, but warn if we need to do so.
1560 */
1561 if (type->mmu) {
1562 if (!type->virt2phys) {
1563 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1564 type->virt2phys = identity_virt2phys;
1565 }
1566 } else {
1567 /* Make sure no-MMU targets all behave the same: make no
1568 * distinction between physical and virtual addresses, and
1569 * ensure that virt2phys() is always an identity mapping.
1570 */
1571 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1572 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1573
1574 type->mmu = no_mmu;
1575 type->write_phys_memory = type->write_memory;
1576 type->read_phys_memory = type->read_memory;
1577 type->virt2phys = identity_virt2phys;
1578 }
1579
1580 if (!target->type->read_buffer)
1581 target->type->read_buffer = target_read_buffer_default;
1582
1583 if (!target->type->write_buffer)
1584 target->type->write_buffer = target_write_buffer_default;
1585
1586 if (!target->type->get_gdb_fileio_info)
1587 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1588
1589 if (!target->type->gdb_fileio_end)
1590 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1591
1592 if (!target->type->profiling)
1593 target->type->profiling = target_profiling_default;
1594
1595 return ERROR_OK;
1596 }
1597
1598 static int target_init(struct command_context *cmd_ctx)
1599 {
1600 struct target *target;
1601 int retval;
1602
1603 for (target = all_targets; target; target = target->next) {
1604 retval = target_init_one(cmd_ctx, target);
1605 if (retval != ERROR_OK)
1606 return retval;
1607 }
1608
1609 if (!all_targets)
1610 return ERROR_OK;
1611
1612 retval = target_register_user_commands(cmd_ctx);
1613 if (retval != ERROR_OK)
1614 return retval;
1615
1616 retval = target_register_timer_callback(&handle_target,
1617 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1618 if (retval != ERROR_OK)
1619 return retval;
1620
1621 return ERROR_OK;
1622 }
1623
1624 COMMAND_HANDLER(handle_target_init_command)
1625 {
1626 int retval;
1627
1628 if (CMD_ARGC != 0)
1629 return ERROR_COMMAND_SYNTAX_ERROR;
1630
1631 static bool target_initialized;
1632 if (target_initialized) {
1633 LOG_INFO("'target init' has already been called");
1634 return ERROR_OK;
1635 }
1636 target_initialized = true;
1637
1638 retval = command_run_line(CMD_CTX, "init_targets");
1639 if (retval != ERROR_OK)
1640 return retval;
1641
1642 retval = command_run_line(CMD_CTX, "init_target_events");
1643 if (retval != ERROR_OK)
1644 return retval;
1645
1646 retval = command_run_line(CMD_CTX, "init_board");
1647 if (retval != ERROR_OK)
1648 return retval;
1649
1650 LOG_DEBUG("Initializing targets...");
1651 return target_init(CMD_CTX);
1652 }
1653
1654 int target_register_event_callback(int (*callback)(struct target *target,
1655 enum target_event event, void *priv), void *priv)
1656 {
1657 struct target_event_callback **callbacks_p = &target_event_callbacks;
1658
1659 if (!callback)
1660 return ERROR_COMMAND_SYNTAX_ERROR;
1661
1662 if (*callbacks_p) {
1663 while ((*callbacks_p)->next)
1664 callbacks_p = &((*callbacks_p)->next);
1665 callbacks_p = &((*callbacks_p)->next);
1666 }
1667
1668 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1669 (*callbacks_p)->callback = callback;
1670 (*callbacks_p)->priv = priv;
1671 (*callbacks_p)->next = NULL;
1672
1673 return ERROR_OK;
1674 }
1675
1676 int target_register_reset_callback(int (*callback)(struct target *target,
1677 enum target_reset_mode reset_mode, void *priv), void *priv)
1678 {
1679 struct target_reset_callback *entry;
1680
1681 if (!callback)
1682 return ERROR_COMMAND_SYNTAX_ERROR;
1683
1684 entry = malloc(sizeof(struct target_reset_callback));
1685 if (!entry) {
1686 LOG_ERROR("error allocating buffer for reset callback entry");
1687 return ERROR_COMMAND_SYNTAX_ERROR;
1688 }
1689
1690 entry->callback = callback;
1691 entry->priv = priv;
1692 list_add(&entry->list, &target_reset_callback_list);
1693
1694
1695 return ERROR_OK;
1696 }
1697
1698 int target_register_trace_callback(int (*callback)(struct target *target,
1699 size_t len, uint8_t *data, void *priv), void *priv)
1700 {
1701 struct target_trace_callback *entry;
1702
1703 if (!callback)
1704 return ERROR_COMMAND_SYNTAX_ERROR;
1705
1706 entry = malloc(sizeof(struct target_trace_callback));
1707 if (!entry) {
1708 LOG_ERROR("error allocating buffer for trace callback entry");
1709 return ERROR_COMMAND_SYNTAX_ERROR;
1710 }
1711
1712 entry->callback = callback;
1713 entry->priv = priv;
1714 list_add(&entry->list, &target_trace_callback_list);
1715
1716
1717 return ERROR_OK;
1718 }
1719
1720 int target_register_timer_callback(int (*callback)(void *priv),
1721 unsigned int time_ms, enum target_timer_type type, void *priv)
1722 {
1723 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1724
1725 if (!callback)
1726 return ERROR_COMMAND_SYNTAX_ERROR;
1727
1728 if (*callbacks_p) {
1729 while ((*callbacks_p)->next)
1730 callbacks_p = &((*callbacks_p)->next);
1731 callbacks_p = &((*callbacks_p)->next);
1732 }
1733
1734 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1735 (*callbacks_p)->callback = callback;
1736 (*callbacks_p)->type = type;
1737 (*callbacks_p)->time_ms = time_ms;
1738 (*callbacks_p)->removed = false;
1739
1740 (*callbacks_p)->when = timeval_ms() + time_ms;
1741 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1742
1743 (*callbacks_p)->priv = priv;
1744 (*callbacks_p)->next = NULL;
1745
1746 return ERROR_OK;
1747 }
1748
1749 int target_unregister_event_callback(int (*callback)(struct target *target,
1750 enum target_event event, void *priv), void *priv)
1751 {
1752 struct target_event_callback **p = &target_event_callbacks;
1753 struct target_event_callback *c = target_event_callbacks;
1754
1755 if (!callback)
1756 return ERROR_COMMAND_SYNTAX_ERROR;
1757
1758 while (c) {
1759 struct target_event_callback *next = c->next;
1760 if ((c->callback == callback) && (c->priv == priv)) {
1761 *p = next;
1762 free(c);
1763 return ERROR_OK;
1764 } else
1765 p = &(c->next);
1766 c = next;
1767 }
1768
1769 return ERROR_OK;
1770 }
1771
1772 int target_unregister_reset_callback(int (*callback)(struct target *target,
1773 enum target_reset_mode reset_mode, void *priv), void *priv)
1774 {
1775 struct target_reset_callback *entry;
1776
1777 if (!callback)
1778 return ERROR_COMMAND_SYNTAX_ERROR;
1779
1780 list_for_each_entry(entry, &target_reset_callback_list, list) {
1781 if (entry->callback == callback && entry->priv == priv) {
1782 list_del(&entry->list);
1783 free(entry);
1784 break;
1785 }
1786 }
1787
1788 return ERROR_OK;
1789 }
1790
1791 int target_unregister_trace_callback(int (*callback)(struct target *target,
1792 size_t len, uint8_t *data, void *priv), void *priv)
1793 {
1794 struct target_trace_callback *entry;
1795
1796 if (!callback)
1797 return ERROR_COMMAND_SYNTAX_ERROR;
1798
1799 list_for_each_entry(entry, &target_trace_callback_list, list) {
1800 if (entry->callback == callback && entry->priv == priv) {
1801 list_del(&entry->list);
1802 free(entry);
1803 break;
1804 }
1805 }
1806
1807 return ERROR_OK;
1808 }
1809
1810 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1811 {
1812 if (!callback)
1813 return ERROR_COMMAND_SYNTAX_ERROR;
1814
1815 for (struct target_timer_callback *c = target_timer_callbacks;
1816 c; c = c->next) {
1817 if ((c->callback == callback) && (c->priv == priv)) {
1818 c->removed = true;
1819 return ERROR_OK;
1820 }
1821 }
1822
1823 return ERROR_FAIL;
1824 }
1825
1826 int target_call_event_callbacks(struct target *target, enum target_event event)
1827 {
1828 struct target_event_callback *callback = target_event_callbacks;
1829 struct target_event_callback *next_callback;
1830
1831 if (event == TARGET_EVENT_HALTED) {
1832 /* execute early halted first */
1833 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1834 }
1835
1836 LOG_DEBUG("target event %i (%s) for core %s", event,
1837 target_event_name(event),
1838 target_name(target));
1839
1840 target_handle_event(target, event);
1841
1842 while (callback) {
1843 next_callback = callback->next;
1844 callback->callback(target, event, callback->priv);
1845 callback = next_callback;
1846 }
1847
1848 return ERROR_OK;
1849 }
1850
1851 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1852 {
1853 struct target_reset_callback *callback;
1854
1855 LOG_DEBUG("target reset %i (%s)", reset_mode,
1856 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1857
1858 list_for_each_entry(callback, &target_reset_callback_list, list)
1859 callback->callback(target, reset_mode, callback->priv);
1860
1861 return ERROR_OK;
1862 }
1863
1864 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1865 {
1866 struct target_trace_callback *callback;
1867
1868 list_for_each_entry(callback, &target_trace_callback_list, list)
1869 callback->callback(target, len, data, callback->priv);
1870
1871 return ERROR_OK;
1872 }
1873
1874 static int target_timer_callback_periodic_restart(
1875 struct target_timer_callback *cb, int64_t *now)
1876 {
1877 cb->when = *now + cb->time_ms;
1878 return ERROR_OK;
1879 }
1880
1881 static int target_call_timer_callback(struct target_timer_callback *cb,
1882 int64_t *now)
1883 {
1884 cb->callback(cb->priv);
1885
1886 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1887 return target_timer_callback_periodic_restart(cb, now);
1888
1889 return target_unregister_timer_callback(cb->callback, cb->priv);
1890 }
1891
1892 static int target_call_timer_callbacks_check_time(int checktime)
1893 {
1894 static bool callback_processing;
1895
1896 /* Do not allow nesting */
1897 if (callback_processing)
1898 return ERROR_OK;
1899
1900 callback_processing = true;
1901
1902 keep_alive();
1903
1904 int64_t now = timeval_ms();
1905
1906 /* Initialize to a default value that's a ways into the future.
1907 * The loop below will make it closer to now if there are
1908 * callbacks that want to be called sooner. */
1909 target_timer_next_event_value = now + 1000;
1910
1911 /* Store an address of the place containing a pointer to the
1912 * next item; initially, that's a standalone "root of the
1913 * list" variable. */
1914 struct target_timer_callback **callback = &target_timer_callbacks;
1915 while (callback && *callback) {
1916 if ((*callback)->removed) {
1917 struct target_timer_callback *p = *callback;
1918 *callback = (*callback)->next;
1919 free(p);
1920 continue;
1921 }
1922
1923 bool call_it = (*callback)->callback &&
1924 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1925 now >= (*callback)->when);
1926
1927 if (call_it)
1928 target_call_timer_callback(*callback, &now);
1929
1930 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1931 target_timer_next_event_value = (*callback)->when;
1932
1933 callback = &(*callback)->next;
1934 }
1935
1936 callback_processing = false;
1937 return ERROR_OK;
1938 }
1939
1940 int target_call_timer_callbacks()
1941 {
1942 return target_call_timer_callbacks_check_time(1);
1943 }
1944
1945 /* invoke periodic callbacks immediately */
1946 int target_call_timer_callbacks_now()
1947 {
1948 return target_call_timer_callbacks_check_time(0);
1949 }
1950
1951 int64_t target_timer_next_event(void)
1952 {
1953 return target_timer_next_event_value;
1954 }
1955
1956 /* Prints the working area layout for debug purposes */
1957 static void print_wa_layout(struct target *target)
1958 {
1959 struct working_area *c = target->working_areas;
1960
1961 while (c) {
1962 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1963 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1964 c->address, c->address + c->size - 1, c->size);
1965 c = c->next;
1966 }
1967 }
1968
1969 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1970 static void target_split_working_area(struct working_area *area, uint32_t size)
1971 {
1972 assert(area->free); /* Shouldn't split an allocated area */
1973 assert(size <= area->size); /* Caller should guarantee this */
1974
1975 /* Split only if not already the right size */
1976 if (size < area->size) {
1977 struct working_area *new_wa = malloc(sizeof(*new_wa));
1978
1979 if (!new_wa)
1980 return;
1981
1982 new_wa->next = area->next;
1983 new_wa->size = area->size - size;
1984 new_wa->address = area->address + size;
1985 new_wa->backup = NULL;
1986 new_wa->user = NULL;
1987 new_wa->free = true;
1988
1989 area->next = new_wa;
1990 area->size = size;
1991
1992 /* If backup memory was allocated to this area, it has the wrong size
1993 * now so free it and it will be reallocated if/when needed */
1994 free(area->backup);
1995 area->backup = NULL;
1996 }
1997 }
1998
1999 /* Merge all adjacent free areas into one */
2000 static void target_merge_working_areas(struct target *target)
2001 {
2002 struct working_area *c = target->working_areas;
2003
2004 while (c && c->next) {
2005 assert(c->next->address == c->address + c->size); /* This is an invariant */
2006
2007 /* Find two adjacent free areas */
2008 if (c->free && c->next->free) {
2009 /* Merge the last into the first */
2010 c->size += c->next->size;
2011
2012 /* Remove the last */
2013 struct working_area *to_be_freed = c->next;
2014 c->next = c->next->next;
2015 free(to_be_freed->backup);
2016 free(to_be_freed);
2017
2018 /* If backup memory was allocated to the remaining area, it's has
2019 * the wrong size now */
2020 free(c->backup);
2021 c->backup = NULL;
2022 } else {
2023 c = c->next;
2024 }
2025 }
2026 }
2027
2028 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2029 {
2030 /* Reevaluate working area address based on MMU state*/
2031 if (!target->working_areas) {
2032 int retval;
2033 int enabled;
2034
2035 retval = target->type->mmu(target, &enabled);
2036 if (retval != ERROR_OK)
2037 return retval;
2038
2039 if (!enabled) {
2040 if (target->working_area_phys_spec) {
2041 LOG_DEBUG("MMU disabled, using physical "
2042 "address for working memory " TARGET_ADDR_FMT,
2043 target->working_area_phys);
2044 target->working_area = target->working_area_phys;
2045 } else {
2046 LOG_ERROR("No working memory available. "
2047 "Specify -work-area-phys to target.");
2048 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2049 }
2050 } else {
2051 if (target->working_area_virt_spec) {
2052 LOG_DEBUG("MMU enabled, using virtual "
2053 "address for working memory " TARGET_ADDR_FMT,
2054 target->working_area_virt);
2055 target->working_area = target->working_area_virt;
2056 } else {
2057 LOG_ERROR("No working memory available. "
2058 "Specify -work-area-virt to target.");
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060 }
2061 }
2062
2063 /* Set up initial working area on first call */
2064 struct working_area *new_wa = malloc(sizeof(*new_wa));
2065 if (new_wa) {
2066 new_wa->next = NULL;
2067 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2068 new_wa->address = target->working_area;
2069 new_wa->backup = NULL;
2070 new_wa->user = NULL;
2071 new_wa->free = true;
2072 }
2073
2074 target->working_areas = new_wa;
2075 }
2076
2077 /* only allocate multiples of 4 byte */
2078 if (size % 4)
2079 size = (size + 3) & (~3UL);
2080
2081 struct working_area *c = target->working_areas;
2082
2083 /* Find the first large enough working area */
2084 while (c) {
2085 if (c->free && c->size >= size)
2086 break;
2087 c = c->next;
2088 }
2089
2090 if (!c)
2091 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2092
2093 /* Split the working area into the requested size */
2094 target_split_working_area(c, size);
2095
2096 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2097 size, c->address);
2098
2099 if (target->backup_working_area) {
2100 if (!c->backup) {
2101 c->backup = malloc(c->size);
2102 if (!c->backup)
2103 return ERROR_FAIL;
2104 }
2105
2106 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2107 if (retval != ERROR_OK)
2108 return retval;
2109 }
2110
2111 /* mark as used, and return the new (reused) area */
2112 c->free = false;
2113 *area = c;
2114
2115 /* user pointer */
2116 c->user = area;
2117
2118 print_wa_layout(target);
2119
2120 return ERROR_OK;
2121 }
2122
2123 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2124 {
2125 int retval;
2126
2127 retval = target_alloc_working_area_try(target, size, area);
2128 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2129 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2130 return retval;
2131
2132 }
2133
2134 static int target_restore_working_area(struct target *target, struct working_area *area)
2135 {
2136 int retval = ERROR_OK;
2137
2138 if (target->backup_working_area && area->backup) {
2139 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2140 if (retval != ERROR_OK)
2141 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2142 area->size, area->address);
2143 }
2144
2145 return retval;
2146 }
2147
2148 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2149 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2150 {
2151 if (!area || area->free)
2152 return ERROR_OK;
2153
2154 int retval = ERROR_OK;
2155 if (restore) {
2156 retval = target_restore_working_area(target, area);
2157 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2158 if (retval != ERROR_OK)
2159 return retval;
2160 }
2161
2162 area->free = true;
2163
2164 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2165 area->size, area->address);
2166
2167 /* mark user pointer invalid */
2168 /* TODO: Is this really safe? It points to some previous caller's memory.
2169 * How could we know that the area pointer is still in that place and not
2170 * some other vital data? What's the purpose of this, anyway? */
2171 *area->user = NULL;
2172 area->user = NULL;
2173
2174 target_merge_working_areas(target);
2175
2176 print_wa_layout(target);
2177
2178 return retval;
2179 }
2180
2181 int target_free_working_area(struct target *target, struct working_area *area)
2182 {
2183 return target_free_working_area_restore(target, area, 1);
2184 }
2185
2186 /* free resources and restore memory, if restoring memory fails,
2187 * free up resources anyway
2188 */
2189 static void target_free_all_working_areas_restore(struct target *target, int restore)
2190 {
2191 struct working_area *c = target->working_areas;
2192
2193 LOG_DEBUG("freeing all working areas");
2194
2195 /* Loop through all areas, restoring the allocated ones and marking them as free */
2196 while (c) {
2197 if (!c->free) {
2198 if (restore)
2199 target_restore_working_area(target, c);
2200 c->free = true;
2201 *c->user = NULL; /* Same as above */
2202 c->user = NULL;
2203 }
2204 c = c->next;
2205 }
2206
2207 /* Run a merge pass to combine all areas into one */
2208 target_merge_working_areas(target);
2209
2210 print_wa_layout(target);
2211 }
2212
2213 void target_free_all_working_areas(struct target *target)
2214 {
2215 target_free_all_working_areas_restore(target, 1);
2216
2217 /* Now we have none or only one working area marked as free */
2218 if (target->working_areas) {
2219 /* Free the last one to allow on-the-fly moving and resizing */
2220 free(target->working_areas->backup);
2221 free(target->working_areas);
2222 target->working_areas = NULL;
2223 }
2224 }
2225
2226 /* Find the largest number of bytes that can be allocated */
2227 uint32_t target_get_working_area_avail(struct target *target)
2228 {
2229 struct working_area *c = target->working_areas;
2230 uint32_t max_size = 0;
2231
2232 if (!c)
2233 return target->working_area_size;
2234
2235 while (c) {
2236 if (c->free && max_size < c->size)
2237 max_size = c->size;
2238
2239 c = c->next;
2240 }
2241
2242 return max_size;
2243 }
2244
2245 static void target_destroy(struct target *target)
2246 {
2247 if (target->type->deinit_target)
2248 target->type->deinit_target(target);
2249
2250 free(target->semihosting);
2251
2252 jtag_unregister_event_callback(jtag_enable_callback, target);
2253
2254 struct target_event_action *teap = target->event_action;
2255 while (teap) {
2256 struct target_event_action *next = teap->next;
2257 Jim_DecrRefCount(teap->interp, teap->body);
2258 free(teap);
2259 teap = next;
2260 }
2261
2262 target_free_all_working_areas(target);
2263
2264 /* release the targets SMP list */
2265 if (target->smp) {
2266 struct target_list *head = target->head;
2267 while (head) {
2268 struct target_list *pos = head->next;
2269 head->target->smp = 0;
2270 free(head);
2271 head = pos;
2272 }
2273 target->smp = 0;
2274 }
2275
2276 rtos_destroy(target);
2277
2278 free(target->gdb_port_override);
2279 free(target->type);
2280 free(target->trace_info);
2281 free(target->fileio_info);
2282 free(target->cmd_name);
2283 free(target);
2284 }
2285
2286 void target_quit(void)
2287 {
2288 struct target_event_callback *pe = target_event_callbacks;
2289 while (pe) {
2290 struct target_event_callback *t = pe->next;
2291 free(pe);
2292 pe = t;
2293 }
2294 target_event_callbacks = NULL;
2295
2296 struct target_timer_callback *pt = target_timer_callbacks;
2297 while (pt) {
2298 struct target_timer_callback *t = pt->next;
2299 free(pt);
2300 pt = t;
2301 }
2302 target_timer_callbacks = NULL;
2303
2304 for (struct target *target = all_targets; target;) {
2305 struct target *tmp;
2306
2307 tmp = target->next;
2308 target_destroy(target);
2309 target = tmp;
2310 }
2311
2312 all_targets = NULL;
2313 }
2314
2315 int target_arch_state(struct target *target)
2316 {
2317 int retval;
2318 if (!target) {
2319 LOG_WARNING("No target has been configured");
2320 return ERROR_OK;
2321 }
2322
2323 if (target->state != TARGET_HALTED)
2324 return ERROR_OK;
2325
2326 retval = target->type->arch_state(target);
2327 return retval;
2328 }
2329
2330 static int target_get_gdb_fileio_info_default(struct target *target,
2331 struct gdb_fileio_info *fileio_info)
2332 {
2333 /* If target does not support semi-hosting function, target
2334 has no need to provide .get_gdb_fileio_info callback.
2335 It just return ERROR_FAIL and gdb_server will return "Txx"
2336 as target halted every time. */
2337 return ERROR_FAIL;
2338 }
2339
2340 static int target_gdb_fileio_end_default(struct target *target,
2341 int retcode, int fileio_errno, bool ctrl_c)
2342 {
2343 return ERROR_OK;
2344 }
2345
2346 int target_profiling_default(struct target *target, uint32_t *samples,
2347 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2348 {
2349 struct timeval timeout, now;
2350
2351 gettimeofday(&timeout, NULL);
2352 timeval_add_time(&timeout, seconds, 0);
2353
2354 LOG_INFO("Starting profiling. Halting and resuming the"
2355 " target as often as we can...");
2356
2357 uint32_t sample_count = 0;
2358 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2359 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2360
2361 int retval = ERROR_OK;
2362 for (;;) {
2363 target_poll(target);
2364 if (target->state == TARGET_HALTED) {
2365 uint32_t t = buf_get_u32(reg->value, 0, 32);
2366 samples[sample_count++] = t;
2367 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2368 retval = target_resume(target, 1, 0, 0, 0);
2369 target_poll(target);
2370 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2371 } else if (target->state == TARGET_RUNNING) {
2372 /* We want to quickly sample the PC. */
2373 retval = target_halt(target);
2374 } else {
2375 LOG_INFO("Target not halted or running");
2376 retval = ERROR_OK;
2377 break;
2378 }
2379
2380 if (retval != ERROR_OK)
2381 break;
2382
2383 gettimeofday(&now, NULL);
2384 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2385 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2386 break;
2387 }
2388 }
2389
2390 *num_samples = sample_count;
2391 return retval;
2392 }
2393
2394 /* Single aligned words are guaranteed to use 16 or 32 bit access
2395 * mode respectively, otherwise data is handled as quickly as
2396 * possible
2397 */
2398 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2399 {
2400 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2401 size, address);
2402
2403 if (!target_was_examined(target)) {
2404 LOG_ERROR("Target not examined yet");
2405 return ERROR_FAIL;
2406 }
2407
2408 if (size == 0)
2409 return ERROR_OK;
2410
2411 if ((address + size - 1) < address) {
2412 /* GDB can request this when e.g. PC is 0xfffffffc */
2413 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2414 address,
2415 size);
2416 return ERROR_FAIL;
2417 }
2418
2419 return target->type->write_buffer(target, address, size, buffer);
2420 }
2421
2422 static int target_write_buffer_default(struct target *target,
2423 target_addr_t address, uint32_t count, const uint8_t *buffer)
2424 {
2425 uint32_t size;
2426 unsigned int data_bytes = target_data_bits(target) / 8;
2427
2428 /* Align up to maximum bytes. The loop condition makes sure the next pass
2429 * will have something to do with the size we leave to it. */
2430 for (size = 1;
2431 size < data_bytes && count >= size * 2 + (address & size);
2432 size *= 2) {
2433 if (address & size) {
2434 int retval = target_write_memory(target, address, size, 1, buffer);
2435 if (retval != ERROR_OK)
2436 return retval;
2437 address += size;
2438 count -= size;
2439 buffer += size;
2440 }
2441 }
2442
2443 /* Write the data with as large access size as possible. */
2444 for (; size > 0; size /= 2) {
2445 uint32_t aligned = count - count % size;
2446 if (aligned > 0) {
2447 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2448 if (retval != ERROR_OK)
2449 return retval;
2450 address += aligned;
2451 count -= aligned;
2452 buffer += aligned;
2453 }
2454 }
2455
2456 return ERROR_OK;
2457 }
2458
2459 /* Single aligned words are guaranteed to use 16 or 32 bit access
2460 * mode respectively, otherwise data is handled as quickly as
2461 * possible
2462 */
2463 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2464 {
2465 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2466 size, address);
2467
2468 if (!target_was_examined(target)) {
2469 LOG_ERROR("Target not examined yet");
2470 return ERROR_FAIL;
2471 }
2472
2473 if (size == 0)
2474 return ERROR_OK;
2475
2476 if ((address + size - 1) < address) {
2477 /* GDB can request this when e.g. PC is 0xfffffffc */
2478 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2479 address,
2480 size);
2481 return ERROR_FAIL;
2482 }
2483
2484 return target->type->read_buffer(target, address, size, buffer);
2485 }
2486
2487 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2488 {
2489 uint32_t size;
2490 unsigned int data_bytes = target_data_bits(target) / 8;
2491
2492 /* Align up to maximum bytes. The loop condition makes sure the next pass
2493 * will have something to do with the size we leave to it. */
2494 for (size = 1;
2495 size < data_bytes && count >= size * 2 + (address & size);
2496 size *= 2) {
2497 if (address & size) {
2498 int retval = target_read_memory(target, address, size, 1, buffer);
2499 if (retval != ERROR_OK)
2500 return retval;
2501 address += size;
2502 count -= size;
2503 buffer += size;
2504 }
2505 }
2506
2507 /* Read the data with as large access size as possible. */
2508 for (; size > 0; size /= 2) {
2509 uint32_t aligned = count - count % size;
2510 if (aligned > 0) {
2511 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2512 if (retval != ERROR_OK)
2513 return retval;
2514 address += aligned;
2515 count -= aligned;
2516 buffer += aligned;
2517 }
2518 }
2519
2520 return ERROR_OK;
2521 }
2522
2523 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2524 {
2525 uint8_t *buffer;
2526 int retval;
2527 uint32_t i;
2528 uint32_t checksum = 0;
2529 if (!target_was_examined(target)) {
2530 LOG_ERROR("Target not examined yet");
2531 return ERROR_FAIL;
2532 }
2533 if (!target->type->checksum_memory) {
2534 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2535 return ERROR_FAIL;
2536 }
2537
2538 retval = target->type->checksum_memory(target, address, size, &checksum);
2539 if (retval != ERROR_OK) {
2540 buffer = malloc(size);
2541 if (!buffer) {
2542 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2543 return ERROR_COMMAND_SYNTAX_ERROR;
2544 }
2545 retval = target_read_buffer(target, address, size, buffer);
2546 if (retval != ERROR_OK) {
2547 free(buffer);
2548 return retval;
2549 }
2550
2551 /* convert to target endianness */
2552 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2553 uint32_t target_data;
2554 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2555 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2556 }
2557
2558 retval = image_calculate_checksum(buffer, size, &checksum);
2559 free(buffer);
2560 }
2561
2562 *crc = checksum;
2563
2564 return retval;
2565 }
2566
2567 int target_blank_check_memory(struct target *target,
2568 struct target_memory_check_block *blocks, int num_blocks,
2569 uint8_t erased_value)
2570 {
2571 if (!target_was_examined(target)) {
2572 LOG_ERROR("Target not examined yet");
2573 return ERROR_FAIL;
2574 }
2575
2576 if (!target->type->blank_check_memory)
2577 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2578
2579 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2580 }
2581
2582 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2583 {
2584 uint8_t value_buf[8];
2585 if (!target_was_examined(target)) {
2586 LOG_ERROR("Target not examined yet");
2587 return ERROR_FAIL;
2588 }
2589
2590 int retval = target_read_memory(target, address, 8, 1, value_buf);
2591
2592 if (retval == ERROR_OK) {
2593 *value = target_buffer_get_u64(target, value_buf);
2594 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2595 address,
2596 *value);
2597 } else {
2598 *value = 0x0;
2599 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2600 address);
2601 }
2602
2603 return retval;
2604 }
2605
2606 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2607 {
2608 uint8_t value_buf[4];
2609 if (!target_was_examined(target)) {
2610 LOG_ERROR("Target not examined yet");
2611 return ERROR_FAIL;
2612 }
2613
2614 int retval = target_read_memory(target, address, 4, 1, value_buf);
2615
2616 if (retval == ERROR_OK) {
2617 *value = target_buffer_get_u32(target, value_buf);
2618 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2619 address,
2620 *value);
2621 } else {
2622 *value = 0x0;
2623 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2624 address);
2625 }
2626
2627 return retval;
2628 }
2629
2630 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2631 {
2632 uint8_t value_buf[2];
2633 if (!target_was_examined(target)) {
2634 LOG_ERROR("Target not examined yet");
2635 return ERROR_FAIL;
2636 }
2637
2638 int retval = target_read_memory(target, address, 2, 1, value_buf);
2639
2640 if (retval == ERROR_OK) {
2641 *value = target_buffer_get_u16(target, value_buf);
2642 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2643 address,
2644 *value);
2645 } else {
2646 *value = 0x0;
2647 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2648 address);
2649 }
2650
2651 return retval;
2652 }
2653
2654 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2655 {
2656 if (!target_was_examined(target)) {
2657 LOG_ERROR("Target not examined yet");
2658 return ERROR_FAIL;
2659 }
2660
2661 int retval = target_read_memory(target, address, 1, 1, value);
2662
2663 if (retval == ERROR_OK) {
2664 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2665 address,
2666 *value);
2667 } else {
2668 *value = 0x0;
2669 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2670 address);
2671 }
2672
2673 return retval;
2674 }
2675
2676 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2677 {
2678 int retval;
2679 uint8_t value_buf[8];
2680 if (!target_was_examined(target)) {
2681 LOG_ERROR("Target not examined yet");
2682 return ERROR_FAIL;
2683 }
2684
2685 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2686 address,
2687 value);
2688
2689 target_buffer_set_u64(target, value_buf, value);
2690 retval = target_write_memory(target, address, 8, 1, value_buf);
2691 if (retval != ERROR_OK)
2692 LOG_DEBUG("failed: %i", retval);
2693
2694 return retval;
2695 }
2696
2697 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2698 {
2699 int retval;
2700 uint8_t value_buf[4];
2701 if (!target_was_examined(target)) {
2702 LOG_ERROR("Target not examined yet");
2703 return ERROR_FAIL;
2704 }
2705
2706 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2707 address,
2708 value);
2709
2710 target_buffer_set_u32(target, value_buf, value);
2711 retval = target_write_memory(target, address, 4, 1, value_buf);
2712 if (retval != ERROR_OK)
2713 LOG_DEBUG("failed: %i", retval);
2714
2715 return retval;
2716 }
2717
2718 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2719 {
2720 int retval;
2721 uint8_t value_buf[2];
2722 if (!target_was_examined(target)) {
2723 LOG_ERROR("Target not examined yet");
2724 return ERROR_FAIL;
2725 }
2726
2727 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2728 address,
2729 value);
2730
2731 target_buffer_set_u16(target, value_buf, value);
2732 retval = target_write_memory(target, address, 2, 1, value_buf);
2733 if (retval != ERROR_OK)
2734 LOG_DEBUG("failed: %i", retval);
2735
2736 return retval;
2737 }
2738
2739 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2740 {
2741 int retval;
2742 if (!target_was_examined(target)) {
2743 LOG_ERROR("Target not examined yet");
2744 return ERROR_FAIL;
2745 }
2746
2747 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2748 address, value);
2749
2750 retval = target_write_memory(target, address, 1, 1, &value);
2751 if (retval != ERROR_OK)
2752 LOG_DEBUG("failed: %i", retval);
2753
2754 return retval;
2755 }
2756
2757 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2758 {
2759 int retval;
2760 uint8_t value_buf[8];
2761 if (!target_was_examined(target)) {
2762 LOG_ERROR("Target not examined yet");
2763 return ERROR_FAIL;
2764 }
2765
2766 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2767 address,
2768 value);
2769
2770 target_buffer_set_u64(target, value_buf, value);
2771 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2772 if (retval != ERROR_OK)
2773 LOG_DEBUG("failed: %i", retval);
2774
2775 return retval;
2776 }
2777
2778 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2779 {
2780 int retval;
2781 uint8_t value_buf[4];
2782 if (!target_was_examined(target)) {
2783 LOG_ERROR("Target not examined yet");
2784 return ERROR_FAIL;
2785 }
2786
2787 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2788 address,
2789 value);
2790
2791 target_buffer_set_u32(target, value_buf, value);
2792 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2793 if (retval != ERROR_OK)
2794 LOG_DEBUG("failed: %i", retval);
2795
2796 return retval;
2797 }
2798
2799 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2800 {
2801 int retval;
2802 uint8_t value_buf[2];
2803 if (!target_was_examined(target)) {
2804 LOG_ERROR("Target not examined yet");
2805 return ERROR_FAIL;
2806 }
2807
2808 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2809 address,
2810 value);
2811
2812 target_buffer_set_u16(target, value_buf, value);
2813 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2814 if (retval != ERROR_OK)
2815 LOG_DEBUG("failed: %i", retval);
2816
2817 return retval;
2818 }
2819
2820 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2821 {
2822 int retval;
2823 if (!target_was_examined(target)) {
2824 LOG_ERROR("Target not examined yet");
2825 return ERROR_FAIL;
2826 }
2827
2828 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2829 address, value);
2830
2831 retval = target_write_phys_memory(target, address, 1, 1, &value);
2832 if (retval != ERROR_OK)
2833 LOG_DEBUG("failed: %i", retval);
2834
2835 return retval;
2836 }
2837
2838 static int find_target(struct command_invocation *cmd, const char *name)
2839 {
2840 struct target *target = get_target(name);
2841 if (!target) {
2842 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2843 return ERROR_FAIL;
2844 }
2845 if (!target->tap->enabled) {
2846 command_print(cmd, "Target: TAP %s is disabled, "
2847 "can't be the current target\n",
2848 target->tap->dotted_name);
2849 return ERROR_FAIL;
2850 }
2851
2852 cmd->ctx->current_target = target;
2853 if (cmd->ctx->current_target_override)
2854 cmd->ctx->current_target_override = target;
2855
2856 return ERROR_OK;
2857 }
2858
2859
2860 COMMAND_HANDLER(handle_targets_command)
2861 {
2862 int retval = ERROR_OK;
2863 if (CMD_ARGC == 1) {
2864 retval = find_target(CMD, CMD_ARGV[0]);
2865 if (retval == ERROR_OK) {
2866 /* we're done! */
2867 return retval;
2868 }
2869 }
2870
2871 struct target *target = all_targets;
2872 command_print(CMD, " TargetName Type Endian TapName State ");
2873 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2874 while (target) {
2875 const char *state;
2876 char marker = ' ';
2877
2878 if (target->tap->enabled)
2879 state = target_state_name(target);
2880 else
2881 state = "tap-disabled";
2882
2883 if (CMD_CTX->current_target == target)
2884 marker = '*';
2885
2886 /* keep columns lined up to match the headers above */
2887 command_print(CMD,
2888 "%2d%c %-18s %-10s %-6s %-18s %s",
2889 target->target_number,
2890 marker,
2891 target_name(target),
2892 target_type_name(target),
2893 jim_nvp_value2name_simple(nvp_target_endian,
2894 target->endianness)->name,
2895 target->tap->dotted_name,
2896 state);
2897 target = target->next;
2898 }
2899
2900 return retval;
2901 }
2902
2903 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2904
2905 static int power_dropout;
2906 static int srst_asserted;
2907
2908 static int run_power_restore;
2909 static int run_power_dropout;
2910 static int run_srst_asserted;
2911 static int run_srst_deasserted;
2912
2913 static int sense_handler(void)
2914 {
2915 static int prev_srst_asserted;
2916 static int prev_power_dropout;
2917
2918 int retval = jtag_power_dropout(&power_dropout);
2919 if (retval != ERROR_OK)
2920 return retval;
2921
2922 int power_restored;
2923 power_restored = prev_power_dropout && !power_dropout;
2924 if (power_restored)
2925 run_power_restore = 1;
2926
2927 int64_t current = timeval_ms();
2928 static int64_t last_power;
2929 bool wait_more = last_power + 2000 > current;
2930 if (power_dropout && !wait_more) {
2931 run_power_dropout = 1;
2932 last_power = current;
2933 }
2934
2935 retval = jtag_srst_asserted(&srst_asserted);
2936 if (retval != ERROR_OK)
2937 return retval;
2938
2939 int srst_deasserted;
2940 srst_deasserted = prev_srst_asserted && !srst_asserted;
2941
2942 static int64_t last_srst;
2943 wait_more = last_srst + 2000 > current;
2944 if (srst_deasserted && !wait_more) {
2945 run_srst_deasserted = 1;
2946 last_srst = current;
2947 }
2948
2949 if (!prev_srst_asserted && srst_asserted)
2950 run_srst_asserted = 1;
2951
2952 prev_srst_asserted = srst_asserted;
2953 prev_power_dropout = power_dropout;
2954
2955 if (srst_deasserted || power_restored) {
2956 /* Other than logging the event we can't do anything here.
2957 * Issuing a reset is a particularly bad idea as we might
2958 * be inside a reset already.
2959 */
2960 }
2961
2962 return ERROR_OK;
2963 }
2964
2965 /* process target state changes */
2966 static int handle_target(void *priv)
2967 {
2968 Jim_Interp *interp = (Jim_Interp *)priv;
2969 int retval = ERROR_OK;
2970
2971 if (!is_jtag_poll_safe()) {
2972 /* polling is disabled currently */
2973 return ERROR_OK;
2974 }
2975
2976 /* we do not want to recurse here... */
2977 static int recursive;
2978 if (!recursive) {
2979 recursive = 1;
2980 sense_handler();
2981 /* danger! running these procedures can trigger srst assertions and power dropouts.
2982 * We need to avoid an infinite loop/recursion here and we do that by
2983 * clearing the flags after running these events.
2984 */
2985 int did_something = 0;
2986 if (run_srst_asserted) {
2987 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2988 Jim_Eval(interp, "srst_asserted");
2989 did_something = 1;
2990 }
2991 if (run_srst_deasserted) {
2992 Jim_Eval(interp, "srst_deasserted");
2993 did_something = 1;
2994 }
2995 if (run_power_dropout) {
2996 LOG_INFO("Power dropout detected, running power_dropout proc.");
2997 Jim_Eval(interp, "power_dropout");
2998 did_something = 1;
2999 }
3000 if (run_power_restore) {
3001 Jim_Eval(interp, "power_restore");
3002 did_something = 1;
3003 }
3004
3005 if (did_something) {
3006 /* clear detect flags */
3007 sense_handler();
3008 }
3009
3010 /* clear action flags */
3011
3012 run_srst_asserted = 0;
3013 run_srst_deasserted = 0;
3014 run_power_restore = 0;
3015 run_power_dropout = 0;
3016
3017 recursive = 0;
3018 }
3019
3020 /* Poll targets for state changes unless that's globally disabled.
3021 * Skip targets that are currently disabled.
3022 */
3023 for (struct target *target = all_targets;
3024 is_jtag_poll_safe() && target;
3025 target = target->next) {
3026
3027 if (!target_was_examined(target))
3028 continue;
3029
3030 if (!target->tap->enabled)
3031 continue;
3032
3033 if (target->backoff.times > target->backoff.count) {
3034 /* do not poll this time as we failed previously */
3035 target->backoff.count++;
3036 continue;
3037 }
3038 target->backoff.count = 0;
3039
3040 /* only poll target if we've got power and srst isn't asserted */
3041 if (!power_dropout && !srst_asserted) {
3042 /* polling may fail silently until the target has been examined */
3043 retval = target_poll(target);
3044 if (retval != ERROR_OK) {
3045 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3046 if (target->backoff.times * polling_interval < 5000) {
3047 target->backoff.times *= 2;
3048 target->backoff.times++;
3049 }
3050
3051 /* Tell GDB to halt the debugger. This allows the user to
3052 * run monitor commands to handle the situation.
3053 */
3054 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3055 }
3056 if (target->backoff.times > 0) {
3057 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3058 target_reset_examined(target);
3059 retval = target_examine_one(target);
3060 /* Target examination could have failed due to unstable connection,
3061 * but we set the examined flag anyway to repoll it later */
3062 if (retval != ERROR_OK) {
3063 target_set_examined(target);
3064 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3065 target->backoff.times * polling_interval);
3066 return retval;
3067 }
3068 }
3069
3070 /* Since we succeeded, we reset backoff count */
3071 target->backoff.times = 0;
3072 }
3073 }
3074
3075 return retval;
3076 }
3077
3078 COMMAND_HANDLER(handle_reg_command)
3079 {
3080 LOG_DEBUG("-");
3081
3082 struct target *target = get_current_target(CMD_CTX);
3083 struct reg *reg = NULL;
3084
3085 /* list all available registers for the current target */
3086 if (CMD_ARGC == 0) {
3087 struct reg_cache *cache = target->reg_cache;
3088
3089 unsigned int count = 0;
3090 while (cache) {
3091 unsigned i;
3092
3093 command_print(CMD, "===== %s", cache->name);
3094
3095 for (i = 0, reg = cache->reg_list;
3096 i < cache->num_regs;
3097 i++, reg++, count++) {
3098 if (reg->exist == false || reg->hidden)
3099 continue;
3100 /* only print cached values if they are valid */
3101 if (reg->valid) {
3102 char *value = buf_to_hex_str(reg->value,
3103 reg->size);
3104 command_print(CMD,
3105 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3106 count, reg->name,
3107 reg->size, value,
3108 reg->dirty
3109 ? " (dirty)"
3110 : "");
3111 free(value);
3112 } else {
3113 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3114 count, reg->name,
3115 reg->size);
3116 }
3117 }
3118 cache = cache->next;
3119 }
3120
3121 return ERROR_OK;
3122 }
3123
3124 /* access a single register by its ordinal number */
3125 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3126 unsigned num;
3127 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3128
3129 struct reg_cache *cache = target->reg_cache;
3130 unsigned int count = 0;
3131 while (cache) {
3132 unsigned i;
3133 for (i = 0; i < cache->num_regs; i++) {
3134 if (count++ == num) {
3135 reg = &cache->reg_list[i];
3136 break;
3137 }
3138 }
3139 if (reg)
3140 break;
3141 cache = cache->next;
3142 }
3143
3144 if (!reg) {
3145 command_print(CMD, "%i is out of bounds, the current target "
3146 "has only %i registers (0 - %i)", num, count, count - 1);
3147 return ERROR_OK;
3148 }
3149 } else {
3150 /* access a single register by its name */
3151 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3152
3153 if (!reg)
3154 goto not_found;
3155 }
3156
3157 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3158
3159 if (!reg->exist)
3160 goto not_found;
3161
3162 /* display a register */
3163 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3164 && (CMD_ARGV[1][0] <= '9')))) {
3165 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3166 reg->valid = 0;
3167
3168 if (reg->valid == 0) {
3169 int retval = reg->type->get(reg);
3170 if (retval != ERROR_OK) {
3171 LOG_ERROR("Could not read register '%s'", reg->name);
3172 return retval;
3173 }
3174 }
3175 char *value = buf_to_hex_str(reg->value, reg->size);
3176 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3177 free(value);
3178 return ERROR_OK;
3179 }
3180
3181 /* set register value */
3182 if (CMD_ARGC == 2) {
3183 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3184 if (!buf)
3185 return ERROR_FAIL;
3186 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3187
3188 int retval = reg->type->set(reg, buf);
3189 if (retval != ERROR_OK) {
3190 LOG_ERROR("Could not write to register '%s'", reg->name);
3191 } else {
3192 char *value = buf_to_hex_str(reg->value, reg->size);
3193 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3194 free(value);
3195 }
3196
3197 free(buf);
3198
3199 return retval;
3200 }
3201
3202 return ERROR_COMMAND_SYNTAX_ERROR;
3203
3204 not_found:
3205 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3206 return ERROR_OK;
3207 }
3208
3209 COMMAND_HANDLER(handle_poll_command)
3210 {
3211 int retval = ERROR_OK;
3212 struct target *target = get_current_target(CMD_CTX);
3213
3214 if (CMD_ARGC == 0) {
3215 command_print(CMD, "background polling: %s",
3216 jtag_poll_get_enabled() ? "on" : "off");
3217 command_print(CMD, "TAP: %s (%s)",
3218 target->tap->dotted_name,
3219 target->tap->enabled ? "enabled" : "disabled");
3220 if (!target->tap->enabled)
3221 return ERROR_OK;
3222 retval = target_poll(target);
3223 if (retval != ERROR_OK)
3224 return retval;
3225 retval = target_arch_state(target);
3226 if (retval != ERROR_OK)
3227 return retval;
3228 } else if (CMD_ARGC == 1) {
3229 bool enable;
3230 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3231 jtag_poll_set_enabled(enable);
3232 } else
3233 return ERROR_COMMAND_SYNTAX_ERROR;
3234
3235 return retval;
3236 }
3237
3238 COMMAND_HANDLER(handle_wait_halt_command)
3239 {
3240 if (CMD_ARGC > 1)
3241 return ERROR_COMMAND_SYNTAX_ERROR;
3242
3243 unsigned ms = DEFAULT_HALT_TIMEOUT;
3244 if (1 == CMD_ARGC) {
3245 int retval = parse_uint(CMD_ARGV[0], &ms);
3246 if (retval != ERROR_OK)
3247 return ERROR_COMMAND_SYNTAX_ERROR;
3248 }
3249
3250 struct target *target = get_current_target(CMD_CTX);
3251 return target_wait_state(target, TARGET_HALTED, ms);
3252 }
3253
3254 /* wait for target state to change. The trick here is to have a low
3255 * latency for short waits and not to suck up all the CPU time
3256 * on longer waits.
3257 *
3258 * After 500ms, keep_alive() is invoked
3259 */
3260 int target_wait_state(struct target *target, enum target_state state, int ms)
3261 {
3262 int retval;
3263 int64_t then = 0, cur;
3264 bool once = true;
3265
3266 for (;;) {
3267 retval = target_poll(target);
3268 if (retval != ERROR_OK)
3269 return retval;
3270 if (target->state == state)
3271 break;
3272 cur = timeval_ms();
3273 if (once) {
3274 once = false;
3275 then = timeval_ms();
3276 LOG_DEBUG("waiting for target %s...",
3277 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3278 }
3279
3280 if (cur-then > 500)
3281 keep_alive();
3282
3283 if ((cur-then) > ms) {
3284 LOG_ERROR("timed out while waiting for target %s",
3285 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3286 return ERROR_FAIL;
3287 }
3288 }
3289
3290 return ERROR_OK;
3291 }
3292
3293 COMMAND_HANDLER(handle_halt_command)
3294 {
3295 LOG_DEBUG("-");
3296
3297 struct target *target = get_current_target(CMD_CTX);
3298
3299 target->verbose_halt_msg = true;
3300
3301 int retval = target_halt(target);
3302 if (retval != ERROR_OK)
3303 return retval;
3304
3305 if (CMD_ARGC == 1) {
3306 unsigned wait_local;
3307 retval = parse_uint(CMD_ARGV[0], &wait_local);
3308 if (retval != ERROR_OK)
3309 return ERROR_COMMAND_SYNTAX_ERROR;
3310 if (!wait_local)
3311 return ERROR_OK;
3312 }
3313
3314 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3315 }
3316
3317 COMMAND_HANDLER(handle_soft_reset_halt_command)
3318 {
3319 struct target *target = get_current_target(CMD_CTX);
3320
3321 LOG_USER("requesting target halt and executing a soft reset");
3322
3323 target_soft_reset_halt(target);
3324
3325 return ERROR_OK;
3326 }
3327
3328 COMMAND_HANDLER(handle_reset_command)
3329 {
3330 if (CMD_ARGC > 1)
3331 return ERROR_COMMAND_SYNTAX_ERROR;
3332
3333 enum target_reset_mode reset_mode = RESET_RUN;
3334 if (CMD_ARGC == 1) {
3335 const struct jim_nvp *n;
3336 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3337 if ((!n->name) || (n->value == RESET_UNKNOWN))
3338 return ERROR_COMMAND_SYNTAX_ERROR;
3339 reset_mode = n->value;
3340 }
3341
3342 /* reset *all* targets */
3343 return target_process_reset(CMD, reset_mode);
3344 }
3345
3346
3347 COMMAND_HANDLER(handle_resume_command)
3348 {
3349 int current = 1;
3350 if (CMD_ARGC > 1)
3351 return ERROR_COMMAND_SYNTAX_ERROR;
3352
3353 struct target *target = get_current_target(CMD_CTX);
3354
3355 /* with no CMD_ARGV, resume from current pc, addr = 0,
3356 * with one arguments, addr = CMD_ARGV[0],
3357 * handle breakpoints, not debugging */
3358 target_addr_t addr = 0;
3359 if (CMD_ARGC == 1) {
3360 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3361 current = 0;
3362 }
3363
3364 return target_resume(target, current, addr, 1, 0);
3365 }
3366
3367 COMMAND_HANDLER(handle_step_command)
3368 {
3369 if (CMD_ARGC > 1)
3370 return ERROR_COMMAND_SYNTAX_ERROR;
3371
3372 LOG_DEBUG("-");
3373
3374 /* with no CMD_ARGV, step from current pc, addr = 0,
3375 * with one argument addr = CMD_ARGV[0],
3376 * handle breakpoints, debugging */
3377 target_addr_t addr = 0;
3378 int current_pc = 1;
3379 if (CMD_ARGC == 1) {
3380 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3381 current_pc = 0;
3382 }
3383
3384 struct target *target = get_current_target(CMD_CTX);
3385
3386 return target_step(target, current_pc, addr, 1);
3387 }
3388
3389 void target_handle_md_output(struct command_invocation *cmd,
3390 struct target *target, target_addr_t address, unsigned size,
3391 unsigned count, const uint8_t *buffer)
3392 {
3393 const unsigned line_bytecnt = 32;
3394 unsigned line_modulo = line_bytecnt / size;
3395
3396 char output[line_bytecnt * 4 + 1];
3397 unsigned output_len = 0;
3398
3399 const char *value_fmt;
3400 switch (size) {
3401 case 8:
3402 value_fmt = "%16.16"PRIx64" ";
3403 break;
3404 case 4:
3405 value_fmt = "%8.8"PRIx64" ";
3406 break;
3407 case 2:
3408 value_fmt = "%4.4"PRIx64" ";
3409 break;
3410 case 1:
3411 value_fmt = "%2.2"PRIx64" ";
3412 break;
3413 default:
3414 /* "can't happen", caller checked */
3415 LOG_ERROR("invalid memory read size: %u", size);
3416 return;
3417 }
3418
3419 for (unsigned i = 0; i < count; i++) {
3420 if (i % line_modulo == 0) {
3421 output_len += snprintf(output + output_len,
3422 sizeof(output) - output_len,
3423 TARGET_ADDR_FMT ": ",
3424 (address + (i * size)));
3425 }
3426
3427 uint64_t value = 0;
3428 const uint8_t *value_ptr = buffer + i * size;
3429 switch (size) {
3430 case 8:
3431 value = target_buffer_get_u64(target, value_ptr);
3432 break;
3433 case 4:
3434 value = target_buffer_get_u32(target, value_ptr);
3435 break;
3436 case 2:
3437 value = target_buffer_get_u16(target, value_ptr);
3438 break;
3439 case 1:
3440 value = *value_ptr;
3441 }
3442 output_len += snprintf(output + output_len,
3443 sizeof(output) - output_len,
3444 value_fmt, value);
3445
3446 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3447 command_print(cmd, "%s", output);
3448 output_len = 0;
3449 }
3450 }
3451 }
3452
3453 COMMAND_HANDLER(handle_md_command)
3454 {
3455 if (CMD_ARGC < 1)
3456 return ERROR_COMMAND_SYNTAX_ERROR;
3457
3458 unsigned size = 0;
3459 switch (CMD_NAME[2]) {
3460 case 'd':
3461 size = 8;
3462 break;
3463 case 'w':
3464 size = 4;
3465 break;
3466 case 'h':
3467 size = 2;
3468 break;
3469 case 'b':
3470 size = 1;
3471 break;
3472 default:
3473 return ERROR_COMMAND_SYNTAX_ERROR;
3474 }
3475
3476 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3477 int (*fn)(struct target *target,
3478 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3479 if (physical) {
3480 CMD_ARGC--;
3481 CMD_ARGV++;
3482 fn = target_read_phys_memory;
3483 } else
3484 fn = target_read_memory;
3485 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3486 return ERROR_COMMAND_SYNTAX_ERROR;
3487
3488 target_addr_t address;
3489 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3490
3491 unsigned count = 1;
3492 if (CMD_ARGC == 2)
3493 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3494
3495 uint8_t *buffer = calloc(count, size);
3496 if (!buffer) {
3497 LOG_ERROR("Failed to allocate md read buffer");
3498 return ERROR_FAIL;
3499 }
3500
3501 struct target *target = get_current_target(CMD_CTX);
3502 int retval = fn(target, address, size, count, buffer);
3503 if (retval == ERROR_OK)
3504 target_handle_md_output(CMD, target, address, size, count, buffer);
3505
3506 free(buffer);
3507
3508 return retval;
3509 }
3510
3511 typedef int (*target_write_fn)(struct target *target,
3512 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3513
3514 static int target_fill_mem(struct target *target,
3515 target_addr_t address,
3516 target_write_fn fn,
3517 unsigned data_size,
3518 /* value */
3519 uint64_t b,
3520 /* count */
3521 unsigned c)
3522 {
3523 /* We have to write in reasonably large chunks to be able
3524 * to fill large memory areas with any sane speed */
3525 const unsigned chunk_size = 16384;
3526 uint8_t *target_buf = malloc(chunk_size * data_size);
3527 if (!target_buf) {
3528 LOG_ERROR("Out of memory");
3529 return ERROR_FAIL;
3530 }
3531
3532 for (unsigned i = 0; i < chunk_size; i++) {
3533 switch (data_size) {
3534 case 8:
3535 target_buffer_set_u64(target, target_buf + i * data_size, b);
3536 break;
3537 case 4:
3538 target_buffer_set_u32(target, target_buf + i * data_size, b);
3539 break;
3540 case 2:
3541 target_buffer_set_u16(target, target_buf + i * data_size, b);
3542 break;
3543 case 1:
3544 target_buffer_set_u8(target, target_buf + i * data_size, b);
3545 break;
3546 default:
3547 exit(-1);
3548 }
3549 }
3550
3551 int retval = ERROR_OK;
3552
3553 for (unsigned x = 0; x < c; x += chunk_size) {
3554 unsigned current;
3555 current = c - x;
3556 if (current > chunk_size)
3557 current = chunk_size;
3558 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3559 if (retval != ERROR_OK)
3560 break;
3561 /* avoid GDB timeouts */
3562 keep_alive();
3563 }
3564 free(target_buf);
3565
3566 return retval;
3567 }
3568
3569
3570 COMMAND_HANDLER(handle_mw_command)
3571 {
3572 if (CMD_ARGC < 2)
3573 return ERROR_COMMAND_SYNTAX_ERROR;
3574 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3575 target_write_fn fn;
3576 if (physical) {
3577 CMD_ARGC--;
3578 CMD_ARGV++;
3579 fn = target_write_phys_memory;
3580 } else
3581 fn = target_write_memory;
3582 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3583 return ERROR_COMMAND_SYNTAX_ERROR;
3584
3585 target_addr_t address;
3586 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3587
3588 uint64_t value;
3589 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3590
3591 unsigned count = 1;
3592 if (CMD_ARGC == 3)
3593 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3594
3595 struct target *target = get_current_target(CMD_CTX);
3596 unsigned wordsize;
3597 switch (CMD_NAME[2]) {
3598 case 'd':
3599 wordsize = 8;
3600 break;
3601 case 'w':
3602 wordsize = 4;
3603 break;
3604 case 'h':
3605 wordsize = 2;
3606 break;
3607 case 'b':
3608 wordsize = 1;
3609 break;
3610 default:
3611 return ERROR_COMMAND_SYNTAX_ERROR;
3612 }
3613
3614 return target_fill_mem(target, address, fn, wordsize, value, count);
3615 }
3616
3617 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3618 target_addr_t *min_address, target_addr_t *max_address)
3619 {
3620 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3621 return ERROR_COMMAND_SYNTAX_ERROR;
3622
3623 /* a base address isn't always necessary,
3624 * default to 0x0 (i.e. don't relocate) */
3625 if (CMD_ARGC >= 2) {
3626 target_addr_t addr;
3627 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3628 image->base_address = addr;
3629 image->base_address_set = true;
3630 } else
3631 image->base_address_set = false;
3632
3633 image->start_address_set = false;
3634
3635 if (CMD_ARGC >= 4)
3636 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3637 if (CMD_ARGC == 5) {
3638 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3639 /* use size (given) to find max (required) */
3640 *max_address += *min_address;
3641 }
3642
3643 if (*min_address > *max_address)
3644 return ERROR_COMMAND_SYNTAX_ERROR;
3645
3646 return ERROR_OK;
3647 }
3648
3649 COMMAND_HANDLER(handle_load_image_command)
3650 {
3651 uint8_t *buffer;
3652 size_t buf_cnt;
3653 uint32_t image_size;
3654 target_addr_t min_address = 0;
3655 target_addr_t max_address = -1;
3656 struct image image;
3657
3658 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3659 &image, &min_address, &max_address);
3660 if (retval != ERROR_OK)
3661 return retval;
3662
3663 struct target *target = get_current_target(CMD_CTX);
3664
3665 struct duration bench;
3666 duration_start(&bench);
3667
3668 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3669 return ERROR_FAIL;
3670
3671 image_size = 0x0;
3672 retval = ERROR_OK;
3673 for (unsigned int i = 0; i < image.num_sections; i++) {
3674 buffer = malloc(image.sections[i].size);
3675 if (!buffer) {
3676 command_print(CMD,
3677 "error allocating buffer for section (%d bytes)",
3678 (int)(image.sections[i].size));
3679 retval = ERROR_FAIL;
3680 break;
3681 }
3682
3683 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3684 if (retval != ERROR_OK) {
3685 free(buffer);
3686 break;
3687 }
3688
3689 uint32_t offset = 0;
3690 uint32_t length = buf_cnt;
3691
3692 /* DANGER!!! beware of unsigned comparison here!!! */
3693
3694 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3695 (image.sections[i].base_address < max_address)) {
3696
3697 if (image.sections[i].base_address < min_address) {
3698 /* clip addresses below */
3699 offset += min_address-image.sections[i].base_address;
3700 length -= offset;
3701 }
3702
3703 if (image.sections[i].base_address + buf_cnt > max_address)
3704 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3705
3706 retval = target_write_buffer(target,
3707 image.sections[i].base_address + offset, length, buffer + offset);
3708 if (retval != ERROR_OK) {
3709 free(buffer);
3710 break;
3711 }
3712 image_size += length;
3713 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3714 (unsigned int)length,
3715 image.sections[i].base_address + offset);
3716 }
3717
3718 free(buffer);
3719 }
3720
3721 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3722 command_print(CMD, "downloaded %" PRIu32 " bytes "
3723 "in %fs (%0.3f KiB/s)", image_size,
3724 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3725 }
3726
3727 image_close(&image);
3728
3729 return retval;
3730
3731 }
3732
3733 COMMAND_HANDLER(handle_dump_image_command)
3734 {
3735 struct fileio *fileio;
3736 uint8_t *buffer;
3737 int retval, retvaltemp;
3738 target_addr_t address, size;
3739 struct duration bench;
3740 struct target *target = get_current_target(CMD_CTX);
3741
3742 if (CMD_ARGC != 3)
3743 return ERROR_COMMAND_SYNTAX_ERROR;
3744
3745 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3746 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3747
3748 uint32_t buf_size = (size > 4096) ? 4096 : size;
3749 buffer = malloc(buf_size);
3750 if (!buffer)
3751 return ERROR_FAIL;
3752
3753 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3754 if (retval != ERROR_OK) {
3755 free(buffer);
3756 return retval;
3757 }
3758
3759 duration_start(&bench);
3760
3761 while (size > 0) {
3762 size_t size_written;
3763 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3764 retval = target_read_buffer(target, address, this_run_size, buffer);
3765 if (retval != ERROR_OK)
3766 break;
3767
3768 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3769 if (retval != ERROR_OK)
3770 break;
3771
3772 size -= this_run_size;
3773 address += this_run_size;
3774 }
3775
3776 free(buffer);
3777
3778 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3779 size_t filesize;
3780 retval = fileio_size(fileio, &filesize);
3781 if (retval != ERROR_OK)
3782 return retval;
3783 command_print(CMD,
3784 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3785 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3786 }
3787
3788 retvaltemp = fileio_close(fileio);
3789 if (retvaltemp != ERROR_OK)
3790 return retvaltemp;
3791
3792 return retval;
3793 }
3794
3795 enum verify_mode {
3796 IMAGE_TEST = 0,
3797 IMAGE_VERIFY = 1,
3798 IMAGE_CHECKSUM_ONLY = 2
3799 };
3800
3801 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3802 {
3803 uint8_t *buffer;
3804 size_t buf_cnt;
3805 uint32_t image_size;
3806 int retval;
3807 uint32_t checksum = 0;
3808 uint32_t mem_checksum = 0;
3809
3810 struct image image;
3811
3812 struct target *target = get_current_target(CMD_CTX);
3813
3814 if (CMD_ARGC < 1)
3815 return ERROR_COMMAND_SYNTAX_ERROR;
3816
3817 if (!target) {
3818 LOG_ERROR("no target selected");
3819 return ERROR_FAIL;
3820 }
3821
3822 struct duration bench;
3823 duration_start(&bench);
3824
3825 if (CMD_ARGC >= 2) {
3826 target_addr_t addr;
3827 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3828 image.base_address = addr;
3829 image.base_address_set = true;
3830 } else {
3831 image.base_address_set = false;
3832 image.base_address = 0x0;
3833 }
3834
3835 image.start_address_set = false;
3836
3837 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3838 if (retval != ERROR_OK)
3839 return retval;
3840
3841 image_size = 0x0;
3842 int diffs = 0;
3843 retval = ERROR_OK;
3844 for (unsigned int i = 0; i < image.num_sections; i++) {
3845 buffer = malloc(image.sections[i].size);
3846 if (!buffer) {
3847 command_print(CMD,
3848 "error allocating buffer for section (%" PRIu32 " bytes)",
3849 image.sections[i].size);
3850 break;
3851 }
3852 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3853 if (retval != ERROR_OK) {
3854 free(buffer);
3855 break;
3856 }
3857
3858 if (verify >= IMAGE_VERIFY) {
3859 /* calculate checksum of image */
3860 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3861 if (retval != ERROR_OK) {
3862 free(buffer);
3863 break;
3864 }
3865
3866 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3867 if (retval != ERROR_OK) {
3868 free(buffer);
3869 break;
3870 }
3871 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3872 LOG_ERROR("checksum mismatch");
3873 free(buffer);
3874 retval = ERROR_FAIL;
3875 goto done;
3876 }
3877 if (checksum != mem_checksum) {
3878 /* failed crc checksum, fall back to a binary compare */
3879 uint8_t *data;
3880
3881 if (diffs == 0)
3882 LOG_ERROR("checksum mismatch - attempting binary compare");
3883
3884 data = malloc(buf_cnt);
3885
3886 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3887 if (retval == ERROR_OK) {
3888 uint32_t t;
3889 for (t = 0; t < buf_cnt; t++) {
3890 if (data[t] != buffer[t]) {
3891 command_print(CMD,
3892 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3893 diffs,
3894 (unsigned)(t + image.sections[i].base_address),
3895 data[t],
3896 buffer[t]);
3897 if (diffs++ >= 127) {
3898 command_print(CMD, "More than 128 errors, the rest are not printed.");
3899 free(data);
3900 free(buffer);
3901 goto done;
3902 }
3903 }
3904 keep_alive();
3905 }
3906 }
3907 free(data);
3908 }
3909 } else {
3910 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3911 image.sections[i].base_address,
3912 buf_cnt);
3913 }
3914
3915 free(buffer);
3916 image_size += buf_cnt;
3917 }
3918 if (diffs > 0)
3919 command_print(CMD, "No more differences found.");
3920 done:
3921 if (diffs > 0)
3922 retval = ERROR_FAIL;
3923 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3924 command_print(CMD, "verified %" PRIu32 " bytes "
3925 "in %fs (%0.3f KiB/s)", image_size,
3926 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3927 }
3928
3929 image_close(&image);
3930
3931 return retval;
3932 }
3933
3934 COMMAND_HANDLER(handle_verify_image_checksum_command)
3935 {
3936 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3937 }
3938
3939 COMMAND_HANDLER(handle_verify_image_command)
3940 {
3941 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3942 }
3943
3944 COMMAND_HANDLER(handle_test_image_command)
3945 {
3946 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3947 }
3948
3949 static int handle_bp_command_list(struct command_invocation *cmd)
3950 {
3951 struct target *target = get_current_target(cmd->ctx);
3952 struct breakpoint *breakpoint = target->breakpoints;
3953 while (breakpoint) {
3954 if (breakpoint->type == BKPT_SOFT) {
3955 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3956 breakpoint->length);
3957 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3958 breakpoint->address,
3959 breakpoint->length,
3960 breakpoint->set, buf);
3961 free(buf);
3962 } else {
3963 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3964 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3965 breakpoint->asid,
3966 breakpoint->length, breakpoint->set);
3967 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3968 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3969 breakpoint->address,
3970 breakpoint->length, breakpoint->set);
3971 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3972 breakpoint->asid);
3973 } else
3974 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3975 breakpoint->address,
3976 breakpoint->length, breakpoint->set);
3977 }
3978
3979 breakpoint = breakpoint->next;
3980 }
3981 return ERROR_OK;
3982 }
3983
3984 static int handle_bp_command_set(struct command_invocation *cmd,
3985 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3986 {
3987 struct target *target = get_current_target(cmd->ctx);
3988 int retval;
3989
3990 if (asid == 0) {
3991 retval = breakpoint_add(target, addr, length, hw);
3992 /* error is always logged in breakpoint_add(), do not print it again */
3993 if (retval == ERROR_OK)
3994 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3995
3996 } else if (addr == 0) {
3997 if (!target->type->add_context_breakpoint) {
3998 LOG_ERROR("Context breakpoint not available");
3999 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4000 }
4001 retval = context_breakpoint_add(target, asid, length, hw);
4002 /* error is always logged in context_breakpoint_add(), do not print it again */
4003 if (retval == ERROR_OK)
4004 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4005
4006 } else {
4007 if (!target->type->add_hybrid_breakpoint) {
4008 LOG_ERROR("Hybrid breakpoint not available");
4009 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4010 }
4011 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4012 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4013 if (retval == ERROR_OK)
4014 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4015 }
4016 return retval;
4017 }
4018
4019 COMMAND_HANDLER(handle_bp_command)
4020 {
4021 target_addr_t addr;
4022 uint32_t asid;
4023 uint32_t length;
4024 int hw = BKPT_SOFT;
4025
4026 switch (CMD_ARGC) {
4027 case 0:
4028 return handle_bp_command_list(CMD);
4029
4030 case 2:
4031 asid = 0;
4032 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4033 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4034 return handle_bp_command_set(CMD, addr, asid, length, hw);
4035
4036 case 3:
4037 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4038 hw = BKPT_HARD;
4039 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4040 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4041 asid = 0;
4042 return handle_bp_command_set(CMD, addr, asid, length, hw);
4043 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4044 hw = BKPT_HARD;
4045 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4046 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4047 addr = 0;
4048 return handle_bp_command_set(CMD, addr, asid, length, hw);
4049 }
4050 /* fallthrough */
4051 case 4:
4052 hw = BKPT_HARD;
4053 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4054 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4055 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4056 return handle_bp_command_set(CMD, addr, asid, length, hw);
4057
4058 default:
4059 return ERROR_COMMAND_SYNTAX_ERROR;
4060 }
4061 }
4062
4063 COMMAND_HANDLER(handle_rbp_command)
4064 {
4065 if (CMD_ARGC != 1)
4066 return ERROR_COMMAND_SYNTAX_ERROR;
4067
4068 struct target *target = get_current_target(CMD_CTX);
4069
4070 if (!strcmp(CMD_ARGV[0], "all")) {
4071 breakpoint_remove_all(target);
4072 } else {
4073 target_addr_t addr;
4074 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4075
4076 breakpoint_remove(target, addr);
4077 }
4078
4079 return ERROR_OK;
4080 }
4081
4082 COMMAND_HANDLER(handle_wp_command)
4083 {
4084 struct target *target = get_current_target(CMD_CTX);
4085
4086 if (CMD_ARGC == 0) {
4087 struct watchpoint *watchpoint = target->watchpoints;
4088
4089 while (watchpoint) {
4090 command_print(CMD, "address: " TARGET_ADDR_FMT
4091 ", len: 0x%8.8" PRIx32
4092 ", r/w/a: %i, value: 0x%8.8" PRIx32
4093 ", mask: 0x%8.8" PRIx32,
4094 watchpoint->address,
4095 watchpoint->length,
4096 (int)watchpoint->rw,
4097 watchpoint->value,
4098 watchpoint->mask);
4099 watchpoint = watchpoint->next;
4100 }
4101 return ERROR_OK;
4102 }
4103
4104 enum watchpoint_rw type = WPT_ACCESS;
4105 target_addr_t addr = 0;
4106 uint32_t length = 0;
4107 uint32_t data_value = 0x0;
4108 uint32_t data_mask = 0xffffffff;
4109
4110 switch (CMD_ARGC) {
4111 case 5:
4112 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4113 /* fall through */
4114 case 4:
4115 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4116 /* fall through */
4117 case 3:
4118 switch (CMD_ARGV[2][0]) {
4119 case 'r':
4120 type = WPT_READ;
4121 break;
4122 case 'w':
4123 type = WPT_WRITE;
4124 break;
4125 case 'a':
4126 type = WPT_ACCESS;
4127 break;
4128 default:
4129 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4130 return ERROR_COMMAND_SYNTAX_ERROR;
4131 }
4132 /* fall through */
4133 case 2:
4134 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4135 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4136 break;
4137
4138 default:
4139 return ERROR_COMMAND_SYNTAX_ERROR;
4140 }
4141
4142 int retval = watchpoint_add(target, addr, length, type,
4143 data_value, data_mask);
4144 if (retval != ERROR_OK)
4145 LOG_ERROR("Failure setting watchpoints");
4146
4147 return retval;
4148 }
4149
4150 COMMAND_HANDLER(handle_rwp_command)
4151 {
4152 if (CMD_ARGC != 1)
4153 return ERROR_COMMAND_SYNTAX_ERROR;
4154
4155 target_addr_t addr;
4156 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4157
4158 struct target *target = get_current_target(CMD_CTX);
4159 watchpoint_remove(target, addr);
4160
4161 return ERROR_OK;
4162 }
4163
4164 /**
4165 * Translate a virtual address to a physical address.
4166 *
4167 * The low-level target implementation must have logged a detailed error
4168 * which is forwarded to telnet/GDB session.
4169 */
4170 COMMAND_HANDLER(handle_virt2phys_command)
4171 {
4172 if (CMD_ARGC != 1)
4173 return ERROR_COMMAND_SYNTAX_ERROR;
4174
4175 target_addr_t va;
4176 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4177 target_addr_t pa;
4178
4179 struct target *target = get_current_target(CMD_CTX);
4180 int retval = target->type->virt2phys(target, va, &pa);
4181 if (retval == ERROR_OK)
4182 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4183
4184 return retval;
4185 }
4186
4187 static void write_data(FILE *f, const void *data, size_t len)
4188 {
4189 size_t written = fwrite(data, 1, len, f);
4190 if (written != len)
4191 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4192 }
4193
4194 static void write_long(FILE *f, int l, struct target *target)
4195 {
4196 uint8_t val[4];
4197
4198 target_buffer_set_u32(target, val, l);
4199 write_data(f, val, 4);
4200 }
4201
4202 static void write_string(FILE *f, char *s)
4203 {
4204 write_data(f, s, strlen(s));
4205 }
4206
4207 typedef unsigned char UNIT[2]; /* unit of profiling */
4208
4209 /* Dump a gmon.out histogram file. */
4210 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4211 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4212 {
4213 uint32_t i;
4214 FILE *f = fopen(filename, "w");
4215 if (!f)
4216 return;
4217 write_string(f, "gmon");
4218 write_long(f, 0x00000001, target); /* Version */
4219 write_long(f, 0, target); /* padding */
4220 write_long(f, 0, target); /* padding */
4221 write_long(f, 0, target); /* padding */
4222
4223 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4224 write_data(f, &zero, 1);
4225
4226 /* figure out bucket size */
4227 uint32_t min;
4228 uint32_t max;
4229 if (with_range) {
4230 min = start_address;
4231 max = end_address;
4232 } else {
4233 min = samples[0];
4234 max = samples[0];
4235 for (i = 0; i < sample_num; i++) {
4236 if (min > samples[i])
4237 min = samples[i];
4238 if (max < samples[i])
4239 max = samples[i];
4240 }
4241
4242 /* max should be (largest sample + 1)
4243 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4244 max++;
4245 }
4246
4247 int address_space = max - min;
4248 assert(address_space >= 2);
4249
4250 /* FIXME: What is the reasonable number of buckets?
4251 * The profiling result will be more accurate if there are enough buckets. */
4252 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4253 uint32_t num_buckets = address_space / sizeof(UNIT);
4254 if (num_buckets > max_buckets)
4255 num_buckets = max_buckets;
4256 int *buckets = malloc(sizeof(int) * num_buckets);
4257 if (!buckets) {
4258 fclose(f);
4259 return;
4260 }
4261 memset(buckets, 0, sizeof(int) * num_buckets);
4262 for (i = 0; i < sample_num; i++) {
4263 uint32_t address = samples[i];
4264
4265 if ((address < min) || (max <= address))
4266 continue;
4267
4268 long long a = address - min;
4269 long long b = num_buckets;
4270 long long c = address_space;
4271 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4272 buckets[index_t]++;
4273 }
4274
4275 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4276 write_long(f, min, target); /* low_pc */
4277 write_long(f, max, target); /* high_pc */
4278 write_long(f, num_buckets, target); /* # of buckets */
4279 float sample_rate = sample_num / (duration_ms / 1000.0);
4280 write_long(f, sample_rate, target);
4281 write_string(f, "seconds");
4282 for (i = 0; i < (15-strlen("seconds")); i++)
4283 write_data(f, &zero, 1);
4284 write_string(f, "s");
4285
4286 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4287
4288 char *data = malloc(2 * num_buckets);
4289 if (data) {
4290 for (i = 0; i < num_buckets; i++) {
4291 int val;
4292 val = buckets[i];
4293 if (val > 65535)
4294 val = 65535;
4295 data[i * 2] = val&0xff;
4296 data[i * 2 + 1] = (val >> 8) & 0xff;
4297 }
4298 free(buckets);
4299 write_data(f, data, num_buckets * 2);
4300 free(data);
4301 } else
4302 free(buckets);
4303
4304 fclose(f);
4305 }
4306
4307 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4308 * which will be used as a random sampling of PC */
4309 COMMAND_HANDLER(handle_profile_command)
4310 {
4311 struct target *target = get_current_target(CMD_CTX);
4312
4313 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4314 return ERROR_COMMAND_SYNTAX_ERROR;
4315
4316 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4317 uint32_t offset;
4318 uint32_t num_of_samples;
4319 int retval = ERROR_OK;
4320 bool halted_before_profiling = target->state == TARGET_HALTED;
4321
4322 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4323
4324 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4325 if (!samples) {
4326 LOG_ERROR("No memory to store samples.");
4327 return ERROR_FAIL;
4328 }
4329
4330 uint64_t timestart_ms = timeval_ms();
4331 /**
4332 * Some cores let us sample the PC without the
4333 * annoying halt/resume step; for example, ARMv7 PCSR.
4334 * Provide a way to use that more efficient mechanism.
4335 */
4336 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4337 &num_of_samples, offset);
4338 if (retval != ERROR_OK) {
4339 free(samples);
4340 return retval;
4341 }
4342 uint32_t duration_ms = timeval_ms() - timestart_ms;
4343
4344 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4345
4346 retval = target_poll(target);
4347 if (retval != ERROR_OK) {
4348 free(samples);
4349 return retval;
4350 }
4351
4352 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4353 /* The target was halted before we started and is running now. Halt it,
4354 * for consistency. */
4355 retval = target_halt(target);
4356 if (retval != ERROR_OK) {
4357 free(samples);
4358 return retval;
4359 }
4360 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4361 /* The target was running before we started and is halted now. Resume
4362 * it, for consistency. */
4363 retval = target_resume(target, 1, 0, 0, 0);
4364 if (retval != ERROR_OK) {
4365 free(samples);
4366 return retval;
4367 }
4368 }
4369
4370 retval = target_poll(target);
4371 if (retval != ERROR_OK) {
4372 free(samples);
4373 return retval;
4374 }
4375
4376 uint32_t start_address = 0;
4377 uint32_t end_address = 0;
4378 bool with_range = false;
4379 if (CMD_ARGC == 4) {
4380 with_range = true;
4381 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4382 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4383 }
4384
4385 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4386 with_range, start_address, end_address, target, duration_ms);
4387 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4388
4389 free(samples);
4390 return retval;
4391 }
4392
4393 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4394 {
4395 char *namebuf;
4396 Jim_Obj *obj_name, *obj_val;
4397 int result;
4398
4399 namebuf = alloc_printf("%s(%d)", varname, idx);
4400 if (!namebuf)
4401 return JIM_ERR;
4402
4403 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4404 jim_wide wide_val = val;
4405 obj_val = Jim_NewWideObj(interp, wide_val);
4406 if (!obj_name || !obj_val) {
4407 free(namebuf);
4408 return JIM_ERR;
4409 }
4410
4411 Jim_IncrRefCount(obj_name);
4412 Jim_IncrRefCount(obj_val);
4413 result = Jim_SetVariable(interp, obj_name, obj_val);
4414 Jim_DecrRefCount(interp, obj_name);
4415 Jim_DecrRefCount(interp, obj_val);
4416 free(namebuf);
4417 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4418 return result;
4419 }
4420
4421 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4422 {
4423 struct command_context *context;
4424 struct target *target;
4425
4426 context = current_command_context(interp);
4427 assert(context);
4428
4429 target = get_current_target(context);
4430 if (!target) {
4431 LOG_ERROR("mem2array: no current target");
4432 return JIM_ERR;
4433 }
4434
4435 return target_mem2array(interp, target, argc - 1, argv + 1);
4436 }
4437
4438 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4439 {
4440 int e;
4441
4442 /* argv[0] = name of array to receive the data
4443 * argv[1] = desired element width in bits
4444 * argv[2] = memory address
4445 * argv[3] = count of times to read
4446 * argv[4] = optional "phys"
4447 */
4448 if (argc < 4 || argc > 5) {
4449 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4450 return JIM_ERR;
4451 }
4452
4453 /* Arg 0: Name of the array variable */
4454 const char *varname = Jim_GetString(argv[0], NULL);
4455
4456 /* Arg 1: Bit width of one element */
4457 long l;
4458 e = Jim_GetLong(interp, argv[1], &l);
4459 if (e != JIM_OK)
4460 return e;
4461 const unsigned int width_bits = l;
4462
4463 if (width_bits != 8 &&
4464 width_bits != 16 &&
4465 width_bits != 32 &&
4466 width_bits != 64) {
4467 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4468 Jim_AppendStrings(interp, Jim_GetResult(interp),
4469 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4470 return JIM_ERR;
4471 }
4472 const unsigned int width = width_bits / 8;
4473
4474 /* Arg 2: Memory address */
4475 jim_wide wide_addr;
4476 e = Jim_GetWide(interp, argv[2], &wide_addr);
4477 if (e != JIM_OK)
4478 return e;
4479 target_addr_t addr = (target_addr_t)wide_addr;
4480
4481 /* Arg 3: Number of elements to read */
4482 e = Jim_GetLong(interp, argv[3], &l);
4483 if (e != JIM_OK)
4484 return e;
4485 size_t len = l;
4486
4487 /* Arg 4: phys */
4488 bool is_phys = false;
4489 if (argc > 4) {
4490 int str_len = 0;
4491 const char *phys = Jim_GetString(argv[4], &str_len);
4492 if (!strncmp(phys, "phys", str_len))
4493 is_phys = true;
4494 else
4495 return JIM_ERR;
4496 }
4497
4498 /* Argument checks */
4499 if (len == 0) {
4500 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4501 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4502 return JIM_ERR;
4503 }
4504 if ((addr + (len * width)) < addr) {
4505 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4506 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4507 return JIM_ERR;
4508 }
4509 if (len > 65536) {
4510 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4511 Jim_AppendStrings(interp, Jim_GetResult(interp),
4512 "mem2array: too large read request, exceeds 64K items", NULL);
4513 return JIM_ERR;
4514 }
4515
4516 if ((width == 1) ||
4517 ((width == 2) && ((addr & 1) == 0)) ||
4518 ((width == 4) && ((addr & 3) == 0)) ||
4519 ((width == 8) && ((addr & 7) == 0))) {
4520 /* alignment correct */
4521 } else {
4522 char buf[100];
4523 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4524 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4525 addr,
4526 width);
4527 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4528 return JIM_ERR;
4529 }
4530
4531 /* Transfer loop */
4532
4533 /* index counter */
4534 size_t idx = 0;
4535
4536 const size_t buffersize = 4096;
4537 uint8_t *buffer = malloc(buffersize);
4538 if (!buffer)
4539 return JIM_ERR;
4540
4541 /* assume ok */
4542 e = JIM_OK;
4543 while (len) {
4544 /* Slurp... in buffer size chunks */
4545 const unsigned int max_chunk_len = buffersize / width;
4546 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4547
4548 int retval;
4549 if (is_phys)
4550 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4551 else
4552 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4553 if (retval != ERROR_OK) {
4554 /* BOO !*/
4555 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4556 addr,
4557 width,
4558 chunk_len);
4559 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4560 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4561 e = JIM_ERR;
4562 break;
4563 } else {
4564 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4565 uint64_t v = 0;
4566 switch (width) {
4567 case 8:
4568 v = target_buffer_get_u64(target, &buffer[i*width]);
4569 break;
4570 case 4:
4571 v = target_buffer_get_u32(target, &buffer[i*width]);
4572 break;
4573 case 2:
4574 v = target_buffer_get_u16(target, &buffer[i*width]);
4575 break;
4576 case 1:
4577 v = buffer[i] & 0x0ff;
4578 break;
4579 }
4580 new_u64_array_element(interp, varname, idx, v);
4581 }
4582 len -= chunk_len;
4583 addr += chunk_len * width;
4584 }
4585 }
4586
4587 free(buffer);
4588
4589 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4590
4591 return e;
4592 }
4593
4594 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4595 {
4596 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4597 if (!namebuf)
4598 return JIM_ERR;
4599
4600 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4601 if (!obj_name) {
4602 free(namebuf);
4603 return JIM_ERR;
4604 }
4605
4606 Jim_IncrRefCount(obj_name);
4607 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4608 Jim_DecrRefCount(interp, obj_name);
4609 free(namebuf);
4610 if (!obj_val)
4611 return JIM_ERR;
4612
4613 jim_wide wide_val;
4614 int result = Jim_GetWide(interp, obj_val, &wide_val);
4615 *val = wide_val;
4616 return result;
4617 }
4618
4619 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4620 {
4621 struct command_context *context;
4622 struct target *target;
4623
4624 context = current_command_context(interp);
4625 assert(context);
4626
4627 target = get_current_target(context);
4628 if (!target) {
4629 LOG_ERROR("array2mem: no current target");
4630 return JIM_ERR;
4631 }
4632
4633 return target_array2mem(interp, target, argc-1, argv + 1);
4634 }
4635
4636 static int target_array2mem(Jim_Interp *interp, struct target *target,
4637 int argc, Jim_Obj *const *argv)
4638 {
4639 int e;
4640
4641 /* argv[0] = name of array from which to read the data
4642 * argv[1] = desired element width in bits
4643 * argv[2] = memory address
4644 * argv[3] = number of elements to write
4645 * argv[4] = optional "phys"
4646 */
4647 if (argc < 4 || argc > 5) {
4648 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4649 return JIM_ERR;
4650 }
4651
4652 /* Arg 0: Name of the array variable */
4653 const char *varname = Jim_GetString(argv[0], NULL);
4654
4655 /* Arg 1: Bit width of one element */
4656 long l;
4657 e = Jim_GetLong(interp, argv[1], &l);
4658 if (e != JIM_OK)
4659 return e;
4660 const unsigned int width_bits = l;
4661
4662 if (width_bits != 8 &&
4663 width_bits != 16 &&
4664 width_bits != 32 &&
4665 width_bits != 64) {
4666 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4667 Jim_AppendStrings(interp, Jim_GetResult(interp),
4668 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4669 return JIM_ERR;
4670 }
4671 const unsigned int width = width_bits / 8;
4672
4673 /* Arg 2: Memory address */
4674 jim_wide wide_addr;
4675 e = Jim_GetWide(interp, argv[2], &wide_addr);
4676 if (e != JIM_OK)
4677 return e;
4678 target_addr_t addr = (target_addr_t)wide_addr;
4679
4680 /* Arg 3: Number of elements to write */
4681 e = Jim_GetLong(interp, argv[3], &l);
4682 if (e != JIM_OK)
4683 return e;
4684 size_t len = l;
4685
4686 /* Arg 4: Phys */
4687 bool is_phys = false;
4688 if (argc > 4) {
4689 int str_len = 0;
4690 const char *phys = Jim_GetString(argv[4], &str_len);
4691 if (!strncmp(phys, "phys", str_len))
4692 is_phys = true;
4693 else
4694 return JIM_ERR;
4695 }
4696
4697 /* Argument checks */
4698 if (len == 0) {
4699 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4700 Jim_AppendStrings(interp, Jim_GetResult(interp),
4701 "array2mem: zero width read?", NULL);
4702 return JIM_ERR;
4703 }
4704
4705 if ((addr + (len * width)) < addr) {
4706 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4707 Jim_AppendStrings(interp, Jim_GetResult(interp),
4708 "array2mem: addr + len - wraps to zero?", NULL);
4709 return JIM_ERR;
4710 }
4711
4712 if (len > 65536) {
4713 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4714 Jim_AppendStrings(interp, Jim_GetResult(interp),
4715 "array2mem: too large memory write request, exceeds 64K items", NULL);
4716 return JIM_ERR;
4717 }
4718
4719 if ((width == 1) ||
4720 ((width == 2) && ((addr & 1) == 0)) ||
4721 ((width == 4) && ((addr & 3) == 0)) ||
4722 ((width == 8) && ((addr & 7) == 0))) {
4723 /* alignment correct */
4724 } else {
4725 char buf[100];
4726 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4727 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4728 addr,
4729 width);
4730 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4731 return JIM_ERR;
4732 }
4733
4734 /* Transfer loop */
4735
4736 /* assume ok */
4737 e = JIM_OK;
4738
4739 const size_t buffersize = 4096;
4740 uint8_t *buffer = malloc(buffersize);
4741 if (!buffer)
4742 return JIM_ERR;
4743
4744 /* index counter */
4745 size_t idx = 0;
4746
4747 while (len) {
4748 /* Slurp... in buffer size chunks */
4749 const unsigned int max_chunk_len = buffersize / width;
4750
4751 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4752
4753 /* Fill the buffer */
4754 for (size_t i = 0; i < chunk_len; i++, idx++) {
4755 uint64_t v = 0;
4756 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4757 free(buffer);
4758 return JIM_ERR;
4759 }
4760 switch (width) {
4761 case 8:
4762 target_buffer_set_u64(target, &buffer[i * width], v);
4763 break;
4764 case 4:
4765 target_buffer_set_u32(target, &buffer[i * width], v);
4766 break;
4767 case 2:
4768 target_buffer_set_u16(target, &buffer[i * width], v);
4769 break;
4770 case 1:
4771 buffer[i] = v & 0x0ff;
4772 break;
4773 }
4774 }
4775 len -= chunk_len;
4776
4777 /* Write the buffer to memory */
4778 int retval;
4779 if (is_phys)
4780 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4781 else
4782 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4783 if (retval != ERROR_OK) {
4784 /* BOO !*/
4785 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4786 addr,
4787 width,
4788 chunk_len);
4789 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4790 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4791 e = JIM_ERR;
4792 break;
4793 }
4794 addr += chunk_len * width;
4795 }
4796
4797 free(buffer);
4798
4799 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4800
4801 return e;
4802 }
4803
4804 /* FIX? should we propagate errors here rather than printing them
4805 * and continuing?
4806 */
4807 void target_handle_event(struct target *target, enum target_event e)
4808 {
4809 struct target_event_action *teap;
4810 int retval;
4811
4812 for (teap = target->event_action; teap; teap = teap->next) {
4813 if (teap->event == e) {
4814 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4815 target->target_number,
4816 target_name(target),
4817 target_type_name(target),
4818 e,
4819 target_event_name(e),
4820 Jim_GetString(teap->body, NULL));
4821
4822 /* Override current target by the target an event
4823 * is issued from (lot of scripts need it).
4824 * Return back to previous override as soon
4825 * as the handler processing is done */
4826 struct command_context *cmd_ctx = current_command_context(teap->interp);
4827 struct target *saved_target_override = cmd_ctx->current_target_override;
4828 cmd_ctx->current_target_override = target;
4829
4830 retval = Jim_EvalObj(teap->interp, teap->body);
4831
4832 cmd_ctx->current_target_override = saved_target_override;
4833
4834 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4835 return;
4836
4837 if (retval == JIM_RETURN)
4838 retval = teap->interp->returnCode;
4839
4840 if (retval != JIM_OK) {
4841 Jim_MakeErrorMessage(teap->interp);
4842 LOG_USER("Error executing event %s on target %s:\n%s",
4843 target_event_name(e),
4844 target_name(target),
4845 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4846 /* clean both error code and stacktrace before return */
4847 Jim_Eval(teap->interp, "error \"\" \"\"");
4848 }
4849 }
4850 }
4851 }
4852
4853 /**
4854 * Returns true only if the target has a handler for the specified event.
4855 */
4856 bool target_has_event_action(struct target *target, enum target_event event)
4857 {
4858 struct target_event_action *teap;
4859
4860 for (teap = target->event_action; teap; teap = teap->next) {
4861 if (teap->event == event)
4862 return true;
4863 }
4864 return false;
4865 }
4866
4867 enum target_cfg_param {
4868 TCFG_TYPE,
4869 TCFG_EVENT,
4870 TCFG_WORK_AREA_VIRT,
4871 TCFG_WORK_AREA_PHYS,
4872 TCFG_WORK_AREA_SIZE,
4873 TCFG_WORK_AREA_BACKUP,
4874 TCFG_ENDIAN,
4875 TCFG_COREID,
4876 TCFG_CHAIN_POSITION,
4877 TCFG_DBGBASE,
4878 TCFG_RTOS,
4879 TCFG_DEFER_EXAMINE,
4880 TCFG_GDB_PORT,
4881 TCFG_GDB_MAX_CONNECTIONS,
4882 };
4883
4884 static struct jim_nvp nvp_config_opts[] = {
4885 { .name = "-type", .value = TCFG_TYPE },
4886 { .name = "-event", .value = TCFG_EVENT },
4887 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4888 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4889 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4890 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4891 { .name = "-endian", .value = TCFG_ENDIAN },
4892 { .name = "-coreid", .value = TCFG_COREID },
4893 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4894 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4895 { .name = "-rtos", .value = TCFG_RTOS },
4896 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4897 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4898 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
4899 { .name = NULL, .value = -1 }
4900 };
4901
4902 static int target_configure(struct jim_getopt_info *goi, struct target *target)
4903 {
4904 struct jim_nvp *n;
4905 Jim_Obj *o;
4906 jim_wide w;
4907 int e;
4908
4909 /* parse config or cget options ... */
4910 while (goi->argc > 0) {
4911 Jim_SetEmptyResult(goi->interp);
4912 /* jim_getopt_debug(goi); */
4913
4914 if (target->type->target_jim_configure) {
4915 /* target defines a configure function */
4916 /* target gets first dibs on parameters */
4917 e = (*(target->type->target_jim_configure))(target, goi);
4918 if (e == JIM_OK) {
4919 /* more? */
4920 continue;
4921 }
4922 if (e == JIM_ERR) {
4923 /* An error */
4924 return e;
4925 }
4926 /* otherwise we 'continue' below */
4927 }
4928 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
4929 if (e != JIM_OK) {
4930 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
4931 return e;
4932 }
4933 switch (n->value) {
4934 case TCFG_TYPE:
4935 /* not settable */
4936 if (goi->isconfigure) {
4937 Jim_SetResultFormatted(goi->interp,
4938 "not settable: %s", n->name);
4939 return JIM_ERR;
4940 } else {
4941 no_params:
4942 if (goi->argc != 0) {
4943 Jim_WrongNumArgs(goi->interp,
4944 goi->argc, goi->argv,
4945 "NO PARAMS");
4946 return JIM_ERR;
4947 }
4948 }
4949 Jim_SetResultString(goi->interp,
4950 target_type_name(target), -1);
4951 /* loop for more */
4952 break;
4953 case TCFG_EVENT:
4954 if (goi->argc == 0) {
4955 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4956 return JIM_ERR;
4957 }
4958
4959 e = jim_getopt_nvp(goi, nvp_target_event, &n);
4960 if (e != JIM_OK) {
4961 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
4962 return e;
4963 }
4964
4965 if (goi->isconfigure) {
4966 if (goi->argc != 1) {
4967 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4968 return JIM_ERR;
4969 }
4970 } else {
4971 if (goi->argc != 0) {
4972 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4973 return JIM_ERR;
4974 }
4975 }
4976
4977 {
4978 struct target_event_action *teap;
4979
4980 teap = target->event_action;
4981 /* replace existing? */
4982 while (teap) {
4983 if (teap->event == (enum target_event)n->value)
4984 break;
4985 teap = teap->next;
4986 }
4987
4988 if (goi->isconfigure) {
4989 /* START_DEPRECATED_TPIU */
4990 if (n->value == TARGET_EVENT_TRACE_CONFIG)
4991 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
4992 /* END_DEPRECATED_TPIU */
4993
4994 bool replace = true;
4995 if (!teap) {
4996 /* create new */
4997 teap = calloc(1, sizeof(*teap));
4998 replace = false;
4999 }
5000 teap->event = n->value;
5001 teap->interp = goi->interp;
5002 jim_getopt_obj(goi, &o);
5003 if (teap->body)
5004 Jim_DecrRefCount(teap->interp, teap->body);
5005 teap->body = Jim_DuplicateObj(goi->interp, o);
5006 /*
5007 * FIXME:
5008 * Tcl/TK - "tk events" have a nice feature.
5009 * See the "BIND" command.
5010 * We should support that here.
5011 * You can specify %X and %Y in the event code.
5012 * The idea is: %T - target name.
5013 * The idea is: %N - target number
5014 * The idea is: %E - event name.
5015 */
5016 Jim_IncrRefCount(teap->body);
5017
5018 if (!replace) {
5019 /* add to head of event list */
5020 teap->next = target->event_action;
5021 target->event_action = teap;
5022 }
5023 Jim_SetEmptyResult(goi->interp);
5024 } else {
5025 /* get */
5026 if (!teap)
5027 Jim_SetEmptyResult(goi->interp);
5028 else
5029 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5030 }
5031 }
5032 /* loop for more */
5033 break;
5034
5035 case TCFG_WORK_AREA_VIRT:
5036 if (goi->isconfigure) {
5037 target_free_all_working_areas(target);
5038 e = jim_getopt_wide(goi, &w);
5039 if (e != JIM_OK)
5040 return e;
5041 target->working_area_virt = w;
5042 target->working_area_virt_spec = true;
5043 } else {
5044 if (goi->argc != 0)
5045 goto no_params;
5046 }
5047 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5048 /* loop for more */
5049 break;
5050
5051 case TCFG_WORK_AREA_PHYS:
5052 if (goi->isconfigure) {
5053 target_free_all_working_areas(target);
5054 e = jim_getopt_wide(goi, &w);
5055 if (e != JIM_OK)
5056 return e;
5057 target->working_area_phys = w;
5058 target->working_area_phys_spec = true;
5059 } else {
5060 if (goi->argc != 0)
5061 goto no_params;
5062 }
5063 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5064 /* loop for more */
5065 break;
5066
5067 case TCFG_WORK_AREA_SIZE:
5068 if (goi->isconfigure) {
5069 target_free_all_working_areas(target);
5070 e = jim_getopt_wide(goi, &w);
5071 if (e != JIM_OK)
5072 return e;
5073 target->working_area_size = w;
5074 } else {
5075 if (goi->argc != 0)
5076 goto no_params;
5077 }
5078 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5079 /* loop for more */
5080 break;
5081
5082 case TCFG_WORK_AREA_BACKUP:
5083 if (goi->isconfigure) {
5084 target_free_all_working_areas(target);
5085 e = jim_getopt_wide(goi, &w);
5086 if (e != JIM_OK)
5087 return e;
5088 /* make this exactly 1 or 0 */
5089 target->backup_working_area = (!!w);
5090 } else {
5091 if (goi->argc != 0)
5092 goto no_params;
5093 }
5094 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5095 /* loop for more e*/
5096 break;
5097
5098
5099 case TCFG_ENDIAN:
5100 if (goi->isconfigure) {
5101 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5102 if (e != JIM_OK) {
5103 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5104 return e;
5105 }
5106 target->endianness = n->value;
5107 } else {
5108 if (goi->argc != 0)
5109 goto no_params;
5110 }
5111 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5112 if (!n->name) {
5113 target->endianness = TARGET_LITTLE_ENDIAN;
5114 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5115 }
5116 Jim_SetResultString(goi->interp, n->name, -1);
5117 /* loop for more */
5118 break;
5119
5120 case TCFG_COREID:
5121 if (goi->isconfigure) {
5122 e = jim_getopt_wide(goi, &w);
5123 if (e != JIM_OK)
5124 return e;
5125 target->coreid = (int32_t)w;
5126 } else {
5127 if (goi->argc != 0)
5128 goto no_params;
5129 }
5130 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5131 /* loop for more */
5132 break;
5133
5134 case TCFG_CHAIN_POSITION:
5135 if (goi->isconfigure) {
5136 Jim_Obj *o_t;
5137 struct jtag_tap *tap;
5138
5139 if (target->has_dap) {
5140 Jim_SetResultString(goi->interp,
5141 "target requires -dap parameter instead of -chain-position!", -1);
5142 return JIM_ERR;
5143 }
5144
5145 target_free_all_working_areas(target);
5146 e = jim_getopt_obj(goi, &o_t);
5147 if (e != JIM_OK)
5148 return e;
5149 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5150 if (!tap)
5151 return JIM_ERR;
5152 target->tap = tap;
5153 target->tap_configured = true;
5154 } else {
5155 if (goi->argc != 0)
5156 goto no_params;
5157 }
5158 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5159 /* loop for more e*/
5160 break;
5161 case TCFG_DBGBASE:
5162 if (goi->isconfigure) {
5163 e = jim_getopt_wide(goi, &w);
5164 if (e != JIM_OK)
5165 return e;
5166 target->dbgbase = (uint32_t)w;
5167 target->dbgbase_set = true;
5168 } else {
5169 if (goi->argc != 0)
5170 goto no_params;
5171 }
5172 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5173 /* loop for more */
5174 break;
5175 case TCFG_RTOS:
5176 /* RTOS */
5177 {
5178 int result = rtos_create(goi, target);
5179 if (result != JIM_OK)
5180 return result;
5181 }
5182 /* loop for more */
5183 break;
5184
5185 case TCFG_DEFER_EXAMINE:
5186 /* DEFER_EXAMINE */
5187 target->defer_examine = true;
5188 /* loop for more */
5189 break;
5190
5191 case TCFG_GDB_PORT:
5192 if (goi->isconfigure) {
5193 struct command_context *cmd_ctx = current_command_context(goi->interp);
5194 if (cmd_ctx->mode != COMMAND_CONFIG) {
5195 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5196 return JIM_ERR;
5197 }
5198
5199 const char *s;
5200 e = jim_getopt_string(goi, &s, NULL);
5201 if (e != JIM_OK)
5202 return e;
5203 free(target->gdb_port_override);
5204 target->gdb_port_override = strdup(s);
5205 } else {
5206 if (goi->argc != 0)
5207 goto no_params;
5208 }
5209 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5210 /* loop for more */
5211 break;
5212
5213 case TCFG_GDB_MAX_CONNECTIONS:
5214 if (goi->isconfigure) {
5215 struct command_context *cmd_ctx = current_command_context(goi->interp);
5216 if (cmd_ctx->mode != COMMAND_CONFIG) {
5217 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5218 return JIM_ERR;
5219 }
5220
5221 e = jim_getopt_wide(goi, &w);
5222 if (e != JIM_OK)
5223 return e;
5224 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5225 } else {
5226 if (goi->argc != 0)
5227 goto no_params;
5228 }
5229 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5230 break;
5231 }
5232 } /* while (goi->argc) */
5233
5234
5235 /* done - we return */
5236 return JIM_OK;
5237 }
5238
5239 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5240 {
5241 struct command *c = jim_to_command(interp);
5242 struct jim_getopt_info goi;
5243
5244 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5245 goi.isconfigure = !strcmp(c->name, "configure");
5246 if (goi.argc < 1) {
5247 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5248 "missing: -option ...");
5249 return JIM_ERR;
5250 }
5251 struct command_context *cmd_ctx = current_command_context(interp);
5252 assert(cmd_ctx);
5253 struct target *target = get_current_target(cmd_ctx);
5254 return target_configure(&goi, target);
5255 }
5256
5257 static int jim_target_mem2array(Jim_Interp *interp,
5258 int argc, Jim_Obj *const *argv)
5259 {
5260 struct command_context *cmd_ctx = current_command_context(interp);
5261 assert(cmd_ctx);
5262 struct target *target = get_current_target(cmd_ctx);
5263 return target_mem2array(interp, target, argc - 1, argv + 1);
5264 }
5265
5266 static int jim_target_array2mem(Jim_Interp *interp,
5267 int argc, Jim_Obj *const *argv)
5268 {
5269 struct command_context *cmd_ctx = current_command_context(interp);
5270 assert(cmd_ctx);
5271 struct target *target = get_current_target(cmd_ctx);
5272 return target_array2mem(interp, target, argc - 1, argv + 1);
5273 }
5274
5275 static int jim_target_tap_disabled(Jim_Interp *interp)
5276 {
5277 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5278 return JIM_ERR;
5279 }
5280
5281 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5282 {
5283 bool allow_defer = false;
5284
5285 struct jim_getopt_info goi;
5286 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5287 if (goi.argc > 1) {
5288 const char *cmd_name = Jim_GetString(argv[0], NULL);
5289 Jim_SetResultFormatted(goi.interp,
5290 "usage: %s ['allow-defer']", cmd_name);
5291 return JIM_ERR;
5292 }
5293 if (goi.argc > 0 &&
5294 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5295 /* consume it */
5296 Jim_Obj *obj;
5297 int e = jim_getopt_obj(&goi, &obj);
5298 if (e != JIM_OK)
5299 return e;
5300 allow_defer = true;
5301 }
5302
5303 struct command_context *cmd_ctx = current_command_context(interp);
5304 assert(cmd_ctx);
5305 struct target *target = get_current_target(cmd_ctx);
5306 if (!target->tap->enabled)
5307 return jim_target_tap_disabled(interp);
5308
5309 if (allow_defer && target->defer_examine) {
5310 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5311 LOG_INFO("Use arp_examine command to examine it manually!");
5312 return JIM_OK;
5313 }
5314
5315 int e = target->type->examine(target);
5316 if (e != ERROR_OK) {
5317 target_reset_examined(target);
5318 return JIM_ERR;
5319 }
5320
5321 target_set_examined(target);
5322
5323 return JIM_OK;
5324 }
5325
5326 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5327 {
5328 struct command_context *cmd_ctx = current_command_context(interp);
5329 assert(cmd_ctx);
5330 struct target *target = get_current_target(cmd_ctx);
5331
5332 Jim_SetResultBool(interp, target_was_examined(target));
5333 return JIM_OK;
5334 }
5335
5336 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5337 {
5338 struct command_context *cmd_ctx = current_command_context(interp);
5339 assert(cmd_ctx);
5340 struct target *target = get_current_target(cmd_ctx);
5341
5342 Jim_SetResultBool(interp, target->defer_examine);
5343 return JIM_OK;
5344 }
5345
5346 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5347 {
5348 if (argc != 1) {
5349 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5350 return JIM_ERR;
5351 }
5352 struct command_context *cmd_ctx = current_command_context(interp);
5353 assert(cmd_ctx);
5354 struct target *target = get_current_target(cmd_ctx);
5355
5356 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5357 return JIM_ERR;
5358
5359 return JIM_OK;
5360 }
5361
5362 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5363 {
5364 if (argc != 1) {
5365 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5366 return JIM_ERR;
5367 }
5368 struct command_context *cmd_ctx = current_command_context(interp);
5369 assert(cmd_ctx);
5370 struct target *target = get_current_target(cmd_ctx);
5371 if (!target->tap->enabled)
5372 return jim_target_tap_disabled(interp);
5373
5374 int e;
5375 if (!(target_was_examined(target)))
5376 e = ERROR_TARGET_NOT_EXAMINED;
5377 else
5378 e = target->type->poll(target);
5379 if (e != ERROR_OK)
5380 return JIM_ERR;
5381 return JIM_OK;
5382 }
5383
5384 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5385 {
5386 struct jim_getopt_info goi;
5387 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5388
5389 if (goi.argc != 2) {
5390 Jim_WrongNumArgs(interp, 0, argv,
5391 "([tT]|[fF]|assert|deassert) BOOL");
5392 return JIM_ERR;
5393 }
5394
5395 struct jim_nvp *n;
5396 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5397 if (e != JIM_OK) {
5398 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5399 return e;
5400 }
5401 /* the halt or not param */
5402 jim_wide a;
5403 e = jim_getopt_wide(&goi, &a);
5404 if (e != JIM_OK)
5405 return e;
5406
5407 struct command_context *cmd_ctx = current_command_context(interp);
5408 assert(cmd_ctx);
5409 struct target *target = get_current_target(cmd_ctx);
5410 if (!target->tap->enabled)
5411 return jim_target_tap_disabled(interp);
5412
5413 if (!target->type->assert_reset || !target->type->deassert_reset) {
5414 Jim_SetResultFormatted(interp,
5415 "No target-specific reset for %s",
5416 target_name(target));
5417 return JIM_ERR;
5418 }
5419
5420 if (target->defer_examine)
5421 target_reset_examined(target);
5422
5423 /* determine if we should halt or not. */
5424 target->reset_halt = (a != 0);
5425 /* When this happens - all workareas are invalid. */
5426 target_free_all_working_areas_restore(target, 0);
5427
5428 /* do the assert */
5429 if (n->value == NVP_ASSERT)
5430 e = target->type->assert_reset(target);
5431 else
5432 e = target->type->deassert_reset(target);
5433 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5434 }
5435
5436 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5437 {
5438 if (argc != 1) {
5439 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5440 return JIM_ERR;
5441 }
5442 struct command_context *cmd_ctx = current_command_context(interp);
5443 assert(cmd_ctx);
5444 struct target *target = get_current_target(cmd_ctx);
5445 if (!target->tap->enabled)
5446 return jim_target_tap_disabled(interp);
5447 int e = target->type->halt(target);
5448 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5449 }
5450
5451 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5452 {
5453 struct jim_getopt_info goi;
5454 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5455
5456 /* params: <name> statename timeoutmsecs */
5457 if (goi.argc != 2) {
5458 const char *cmd_name = Jim_GetString(argv[0], NULL);
5459 Jim_SetResultFormatted(goi.interp,
5460 "%s <state_name> <timeout_in_msec>", cmd_name);
5461 return JIM_ERR;
5462 }
5463
5464 struct jim_nvp *n;
5465 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5466 if (e != JIM_OK) {
5467 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5468 return e;
5469 }
5470 jim_wide a;
5471 e = jim_getopt_wide(&goi, &a);
5472 if (e != JIM_OK)
5473 return e;
5474 struct command_context *cmd_ctx = current_command_context(interp);
5475 assert(cmd_ctx);
5476 struct target *target = get_current_target(cmd_ctx);
5477 if (!target->tap->enabled)
5478 return jim_target_tap_disabled(interp);
5479
5480 e = target_wait_state(target, n->value, a);
5481 if (e != ERROR_OK) {
5482 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5483 Jim_SetResultFormatted(goi.interp,
5484 "target: %s wait %s fails (%#s) %s",
5485 target_name(target), n->name,
5486 obj, target_strerror_safe(e));
5487 return JIM_ERR;
5488 }
5489 return JIM_OK;
5490 }
5491 /* List for human, Events defined for this target.
5492 * scripts/programs should use 'name cget -event NAME'
5493 */
5494 COMMAND_HANDLER(handle_target_event_list)
5495 {
5496 struct target *target = get_current_target(CMD_CTX);
5497 struct target_event_action *teap = target->event_action;
5498
5499 command_print(CMD, "Event actions for target (%d) %s\n",
5500 target->target_number,
5501 target_name(target));
5502 command_print(CMD, "%-25s | Body", "Event");
5503 command_print(CMD, "------------------------- | "
5504 "----------------------------------------");
5505 while (teap) {
5506 command_print(CMD, "%-25s | %s",
5507 target_event_name(teap->event),
5508 Jim_GetString(teap->body, NULL));
5509 teap = teap->next;
5510 }
5511 command_print(CMD, "***END***");
5512 return ERROR_OK;
5513 }
5514 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5515 {
5516 if (argc != 1) {
5517 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5518 return JIM_ERR;
5519 }
5520 struct command_context *cmd_ctx = current_command_context(interp);
5521 assert(cmd_ctx);
5522 struct target *target = get_current_target(cmd_ctx);
5523 Jim_SetResultString(interp, target_state_name(target), -1);
5524 return JIM_OK;
5525 }
5526 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5527 {
5528 struct jim_getopt_info goi;
5529 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5530 if (goi.argc != 1) {
5531 const char *cmd_name = Jim_GetString(argv[0], NULL);
5532 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5533 return JIM_ERR;
5534 }
5535 struct jim_nvp *n;
5536 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5537 if (e != JIM_OK) {
5538 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5539 return e;
5540 }
5541 struct command_context *cmd_ctx = current_command_context(interp);
5542 assert(cmd_ctx);
5543 struct target *target = get_current_target(cmd_ctx);
5544 target_handle_event(target, n->value);
5545 return JIM_OK;
5546 }
5547
5548 static const struct command_registration target_instance_command_handlers[] = {
5549 {
5550 .name = "configure",
5551 .mode = COMMAND_ANY,
5552 .jim_handler = jim_target_configure,
5553 .help = "configure a new target for use",
5554 .usage = "[target_attribute ...]",
5555 },
5556 {
5557 .name = "cget",
5558 .mode = COMMAND_ANY,
5559 .jim_handler = jim_target_configure,
5560 .help = "returns the specified target attribute",
5561 .usage = "target_attribute",
5562 },
5563 {
5564 .name = "mwd",
5565 .handler = handle_mw_command,
5566 .mode = COMMAND_EXEC,
5567 .help = "Write 64-bit word(s) to target memory",
5568 .usage = "address data [count]",
5569 },
5570 {
5571 .name = "mww",
5572 .handler = handle_mw_command,
5573 .mode = COMMAND_EXEC,
5574 .help = "Write 32-bit word(s) to target memory",
5575 .usage = "address data [count]",
5576 },
5577 {
5578 .name = "mwh",
5579 .handler = handle_mw_command,
5580 .mode = COMMAND_EXEC,
5581 .help = "Write 16-bit half-word(s) to target memory",
5582 .usage = "address data [count]",
5583 },
5584 {
5585 .name = "mwb",
5586 .handler = handle_mw_command,
5587 .mode = COMMAND_EXEC,
5588 .help = "Write byte(s) to target memory",
5589 .usage = "address data [count]",
5590 },
5591 {
5592 .name = "mdd",
5593 .handler = handle_md_command,
5594 .mode = COMMAND_EXEC,
5595 .help = "Display target memory as 64-bit words",
5596 .usage = "address [count]",
5597 },
5598 {
5599 .name = "mdw",
5600 .handler = handle_md_command,
5601 .mode = COMMAND_EXEC,
5602 .help = "Display target memory as 32-bit words",
5603 .usage = "address [count]",
5604 },
5605 {
5606 .name = "mdh",
5607 .handler = handle_md_command,
5608 .mode = COMMAND_EXEC,
5609 .help = "Display target memory as 16-bit half-words",
5610 .usage = "address [count]",
5611 },
5612 {
5613 .name = "mdb",
5614 .handler = handle_md_command,
5615 .mode = COMMAND_EXEC,
5616 .help = "Display target memory as 8-bit bytes",
5617 .usage = "address [count]",
5618 },
5619 {
5620 .name = "array2mem",
5621 .mode = COMMAND_EXEC,
5622 .jim_handler = jim_target_array2mem,
5623 .help = "Writes Tcl array of 8/16/32 bit numbers "
5624 "to target memory",
5625 .usage = "arrayname bitwidth address count",
5626 },
5627 {
5628 .name = "mem2array",
5629 .mode = COMMAND_EXEC,
5630 .jim_handler = jim_target_mem2array,
5631 .help = "Loads Tcl array of 8/16/32 bit numbers "
5632 "from target memory",
5633 .usage = "arrayname bitwidth address count",
5634 },
5635 {
5636 .name = "eventlist",
5637 .handler = handle_target_event_list,
5638 .mode = COMMAND_EXEC,
5639 .help = "displays a table of events defined for this target",
5640 .usage = "",
5641 },
5642 {
5643 .name = "curstate",
5644 .mode = COMMAND_EXEC,
5645 .jim_handler = jim_target_current_state,
5646 .help = "displays the current state of this target",
5647 },
5648 {
5649 .name = "arp_examine",
5650 .mode = COMMAND_EXEC,
5651 .jim_handler = jim_target_examine,
5652 .help = "used internally for reset processing",
5653 .usage = "['allow-defer']",
5654 },
5655 {
5656 .name = "was_examined",
5657 .mode = COMMAND_EXEC,
5658 .jim_handler = jim_target_was_examined,
5659 .help = "used internally for reset processing",
5660 },
5661 {
5662 .name = "examine_deferred",
5663 .mode = COMMAND_EXEC,
5664 .jim_handler = jim_target_examine_deferred,
5665 .help = "used internally for reset processing",
5666 },
5667 {
5668 .name = "arp_halt_gdb",
5669 .mode = COMMAND_EXEC,
5670 .jim_handler = jim_target_halt_gdb,
5671 .help = "used internally for reset processing to halt GDB",
5672 },
5673 {
5674 .name = "arp_poll",
5675 .mode = COMMAND_EXEC,
5676 .jim_handler = jim_target_poll,
5677 .help = "used internally for reset processing",
5678 },
5679 {
5680 .name = "arp_reset",
5681 .mode = COMMAND_EXEC,
5682 .jim_handler = jim_target_reset,
5683 .help = "used internally for reset processing",
5684 },
5685 {
5686 .name = "arp_halt",
5687 .mode = COMMAND_EXEC,
5688 .jim_handler = jim_target_halt,
5689 .help = "used internally for reset processing",
5690 },
5691 {
5692 .name = "arp_waitstate",
5693 .mode = COMMAND_EXEC,
5694 .jim_handler = jim_target_wait_state,
5695 .help = "used internally for reset processing",
5696 },
5697 {
5698 .name = "invoke-event",
5699 .mode = COMMAND_EXEC,
5700 .jim_handler = jim_target_invoke_event,
5701 .help = "invoke handler for specified event",
5702 .usage = "event_name",
5703 },
5704 COMMAND_REGISTRATION_DONE
5705 };
5706
5707 static int target_create(struct jim_getopt_info *goi)
5708 {
5709 Jim_Obj *new_cmd;
5710 Jim_Cmd *cmd;
5711 const char *cp;
5712 int e;
5713 int x;
5714 struct target *target;
5715 struct command_context *cmd_ctx;
5716
5717 cmd_ctx = current_command_context(goi->interp);
5718 assert(cmd_ctx);
5719
5720 if (goi->argc < 3) {
5721 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5722 return JIM_ERR;
5723 }
5724
5725 /* COMMAND */
5726 jim_getopt_obj(goi, &new_cmd);
5727 /* does this command exist? */
5728 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
5729 if (cmd) {
5730 cp = Jim_GetString(new_cmd, NULL);
5731 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5732 return JIM_ERR;
5733 }
5734
5735 /* TYPE */
5736 e = jim_getopt_string(goi, &cp, NULL);
5737 if (e != JIM_OK)
5738 return e;
5739 struct transport *tr = get_current_transport();
5740 if (tr->override_target) {
5741 e = tr->override_target(&cp);
5742 if (e != ERROR_OK) {
5743 LOG_ERROR("The selected transport doesn't support this target");
5744 return JIM_ERR;
5745 }
5746 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5747 }
5748 /* now does target type exist */
5749 for (x = 0 ; target_types[x] ; x++) {
5750 if (strcmp(cp, target_types[x]->name) == 0) {
5751 /* found */
5752 break;
5753 }
5754 }
5755 if (!target_types[x]) {
5756 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5757 for (x = 0 ; target_types[x] ; x++) {
5758 if (target_types[x + 1]) {
5759 Jim_AppendStrings(goi->interp,
5760 Jim_GetResult(goi->interp),
5761 target_types[x]->name,
5762 ", ", NULL);
5763 } else {
5764 Jim_AppendStrings(goi->interp,
5765 Jim_GetResult(goi->interp),
5766 " or ",
5767 target_types[x]->name, NULL);
5768 }
5769 }
5770 return JIM_ERR;
5771 }
5772
5773 /* Create it */
5774 target = calloc(1, sizeof(struct target));
5775 if (!target) {
5776 LOG_ERROR("Out of memory");
5777 return JIM_ERR;
5778 }
5779
5780 /* set target number */
5781 target->target_number = new_target_number();
5782
5783 /* allocate memory for each unique target type */
5784 target->type = malloc(sizeof(struct target_type));
5785 if (!target->type) {
5786 LOG_ERROR("Out of memory");
5787 free(target);
5788 return JIM_ERR;
5789 }
5790
5791 memcpy(target->type, target_types[x], sizeof(struct target_type));
5792
5793 /* default to first core, override with -coreid */
5794 target->coreid = 0;
5795
5796 target->working_area = 0x0;
5797 target->working_area_size = 0x0;
5798 target->working_areas = NULL;
5799 target->backup_working_area = 0;
5800
5801 target->state = TARGET_UNKNOWN;
5802 target->debug_reason = DBG_REASON_UNDEFINED;
5803 target->reg_cache = NULL;
5804 target->breakpoints = NULL;
5805 target->watchpoints = NULL;
5806 target->next = NULL;
5807 target->arch_info = NULL;
5808
5809 target->verbose_halt_msg = true;
5810
5811 target->halt_issued = false;
5812
5813 /* initialize trace information */
5814 target->trace_info = calloc(1, sizeof(struct trace));
5815 if (!target->trace_info) {
5816 LOG_ERROR("Out of memory");
5817 free(target->type);
5818 free(target);
5819 return JIM_ERR;
5820 }
5821
5822 target->dbgmsg = NULL;
5823 target->dbg_msg_enabled = 0;
5824
5825 target->endianness = TARGET_ENDIAN_UNKNOWN;
5826
5827 target->rtos = NULL;
5828 target->rtos_auto_detect = false;
5829
5830 target->gdb_port_override = NULL;
5831 target->gdb_max_connections = 1;
5832
5833 /* Do the rest as "configure" options */
5834 goi->isconfigure = 1;
5835 e = target_configure(goi, target);
5836
5837 if (e == JIM_OK) {
5838 if (target->has_dap) {
5839 if (!target->dap_configured) {
5840 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5841 e = JIM_ERR;
5842 }
5843 } else {
5844 if (!target->tap_configured) {
5845 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5846 e = JIM_ERR;
5847 }
5848 }
5849 /* tap must be set after target was configured */
5850 if (!target->tap)
5851 e = JIM_ERR;
5852 }
5853
5854 if (e != JIM_OK) {
5855 rtos_destroy(target);
5856 free(target->gdb_port_override);
5857 free(target->trace_info);
5858 free(target->type);
5859 free(target);
5860 return e;
5861 }
5862
5863 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5864 /* default endian to little if not specified */
5865 target->endianness = TARGET_LITTLE_ENDIAN;
5866 }
5867
5868 cp = Jim_GetString(new_cmd, NULL);
5869 target->cmd_name = strdup(cp);
5870 if (!target->cmd_name) {
5871 LOG_ERROR("Out of memory");
5872 rtos_destroy(target);
5873 free(target->gdb_port_override);
5874 free(target->trace_info);
5875 free(target->type);
5876 free(target);
5877 return JIM_ERR;
5878 }
5879
5880 if (target->type->target_create) {
5881 e = (*(target->type->target_create))(target, goi->interp);
5882 if (e != ERROR_OK) {
5883 LOG_DEBUG("target_create failed");
5884 free(target->cmd_name);
5885 rtos_destroy(target);
5886 free(target->gdb_port_override);
5887 free(target->trace_info);
5888 free(target->type);
5889 free(target);
5890 return JIM_ERR;
5891 }
5892 }
5893
5894 /* create the target specific commands */
5895 if (target->type->commands) {
5896 e = register_commands(cmd_ctx, NULL, target->type->commands);
5897 if (e != ERROR_OK)
5898 LOG_ERROR("unable to register '%s' commands", cp);
5899 }
5900
5901 /* now - create the new target name command */
5902 const struct command_registration target_subcommands[] = {
5903 {
5904 .chain = target_instance_command_handlers,
5905 },
5906 {
5907 .chain = target->type->commands,
5908 },
5909 COMMAND_REGISTRATION_DONE
5910 };
5911 const struct command_registration target_commands[] = {
5912 {
5913 .name = cp,
5914 .mode = COMMAND_ANY,
5915 .help = "target command group",
5916 .usage = "",
5917 .chain = target_subcommands,
5918 },
5919 COMMAND_REGISTRATION_DONE
5920 };
5921 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
5922 if (e != ERROR_OK) {
5923 if (target->type->deinit_target)
5924 target->type->deinit_target(target);
5925 free(target->cmd_name);
5926 rtos_destroy(target);
5927 free(target->gdb_port_override);
5928 free(target->trace_info);
5929 free(target->type);
5930 free(target);
5931 return JIM_ERR;
5932 }
5933
5934 /* append to end of list */
5935 append_to_list_all_targets(target);
5936
5937 cmd_ctx->current_target = target;
5938 return JIM_OK;
5939 }
5940
5941 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5942 {
5943 if (argc != 1) {
5944 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5945 return JIM_ERR;
5946 }
5947 struct command_context *cmd_ctx = current_command_context(interp);
5948 assert(cmd_ctx);
5949
5950 struct target *target = get_current_target_or_null(cmd_ctx);
5951 if (target)
5952 Jim_SetResultString(interp, target_name(target), -1);
5953 return JIM_OK;
5954 }
5955
5956 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5957 {
5958 if (argc != 1) {
5959 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5960 return JIM_ERR;
5961 }
5962 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5963 for (unsigned x = 0; target_types[x]; x++) {
5964 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5965 Jim_NewStringObj(interp, target_types[x]->name, -1));
5966 }
5967 return JIM_OK;
5968 }
5969
5970 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5971 {
5972 if (argc != 1) {
5973 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5974 return JIM_ERR;
5975 }
5976 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5977 struct target *target = all_targets;
5978 while (target) {
5979 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5980 Jim_NewStringObj(interp, target_name(target), -1));
5981 target = target->next;
5982 }
5983 return JIM_OK;
5984 }
5985
5986 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5987 {
5988 int i;
5989 const char *targetname;
5990 int retval, len;
5991 struct target *target = NULL;
5992 struct target_list *head, *curr, *new;
5993 curr = NULL;
5994 head = NULL;
5995
5996 retval = 0;
5997 LOG_DEBUG("%d", argc);
5998 /* argv[1] = target to associate in smp
5999 * argv[2] = target to associate in smp
6000 * argv[3] ...
6001 */
6002
6003 for (i = 1; i < argc; i++) {
6004
6005 targetname = Jim_GetString(argv[i], &len);
6006 target = get_target(targetname);
6007 LOG_DEBUG("%s ", targetname);
6008 if (target) {
6009 new = malloc(sizeof(struct target_list));
6010 new->target = target;
6011 new->next = NULL;
6012 if (!head) {
6013 head = new;
6014 curr = head;
6015 } else {
6016 curr->next = new;
6017 curr = new;
6018 }
6019 }
6020 }
6021 /* now parse the list of cpu and put the target in smp mode*/
6022 curr = head;
6023
6024 while (curr) {
6025 target = curr->target;
6026 target->smp = 1;
6027 target->head = head;
6028 curr = curr->next;
6029 }
6030
6031 if (target && target->rtos)
6032 retval = rtos_smp_init(head->target);
6033
6034 return retval;
6035 }
6036
6037
6038 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6039 {
6040 struct jim_getopt_info goi;
6041 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6042 if (goi.argc < 3) {
6043 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6044 "<name> <target_type> [<target_options> ...]");
6045 return JIM_ERR;
6046 }
6047 return target_create(&goi);
6048 }
6049
6050 static const struct command_registration target_subcommand_handlers[] = {
6051 {
6052 .name = "init",
6053 .mode = COMMAND_CONFIG,
6054 .handler = handle_target_init_command,
6055 .help = "initialize targets",
6056 .usage = "",
6057 },
6058 {
6059 .name = "create",
6060 .mode = COMMAND_CONFIG,
6061 .jim_handler = jim_target_create,
6062 .usage = "name type '-chain-position' name [options ...]",
6063 .help = "Creates and selects a new target",
6064 },
6065 {
6066 .name = "current",
6067 .mode = COMMAND_ANY,
6068 .jim_handler = jim_target_current,
6069 .help = "Returns the currently selected target",
6070 },
6071 {
6072 .name = "types",
6073 .mode = COMMAND_ANY,
6074 .jim_handler = jim_target_types,
6075 .help = "Returns the available target types as "
6076 "a list of strings",
6077 },
6078 {
6079 .name = "names",
6080 .mode = COMMAND_ANY,
6081 .jim_handler = jim_target_names,
6082 .help = "Returns the names of all targets as a list of strings",
6083 },
6084 {
6085 .name = "smp",
6086 .mode = COMMAND_ANY,
6087 .jim_handler = jim_target_smp,
6088 .usage = "targetname1 targetname2 ...",
6089 .help = "gather several target in a smp list"
6090 },
6091
6092 COMMAND_REGISTRATION_DONE
6093 };
6094
6095 struct fast_load {
6096 target_addr_t address;
6097 uint8_t *data;
6098 int length;
6099
6100 };
6101
6102 static int fastload_num;
6103 static struct fast_load *fastload;
6104
6105 static void free_fastload(void)
6106 {
6107 if (fastload) {
6108 for (int i = 0; i < fastload_num; i++)
6109 free(fastload[i].data);
6110 free(fastload);
6111 fastload = NULL;
6112 }
6113 }
6114
6115 COMMAND_HANDLER(handle_fast_load_image_command)
6116 {
6117 uint8_t *buffer;
6118 size_t buf_cnt;
6119 uint32_t image_size;
6120 target_addr_t min_address = 0;
6121 target_addr_t max_address = -1;
6122
6123 struct image image;
6124
6125 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6126 &image, &min_address, &max_address);
6127 if (retval != ERROR_OK)
6128 return retval;
6129
6130 struct duration bench;
6131 duration_start(&bench);
6132
6133 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6134 if (retval != ERROR_OK)
6135 return retval;
6136
6137 image_size = 0x0;
6138 retval = ERROR_OK;
6139 fastload_num = image.num_sections;
6140 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6141 if (!fastload) {
6142 command_print(CMD, "out of memory");
6143 image_close(&image);
6144 return ERROR_FAIL;
6145 }
6146 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6147 for (unsigned int i = 0; i < image.num_sections; i++) {
6148 buffer = malloc(image.sections[i].size);
6149 if (!buffer) {
6150 command_print(CMD, "error allocating buffer for section (%d bytes)",
6151 (int)(image.sections[i].size));
6152 retval = ERROR_FAIL;
6153 break;
6154 }
6155
6156 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6157 if (retval != ERROR_OK) {
6158 free(buffer);
6159 break;
6160 }
6161
6162 uint32_t offset = 0;
6163 uint32_t length = buf_cnt;
6164
6165 /* DANGER!!! beware of unsigned comparison here!!! */
6166
6167 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6168 (image.sections[i].base_address < max_address)) {
6169 if (image.sections[i].base_address < min_address) {
6170 /* clip addresses below */
6171 offset += min_address-image.sections[i].base_address;
6172 length -= offset;
6173 }
6174
6175 if (image.sections[i].base_address + buf_cnt > max_address)
6176 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6177
6178 fastload[i].address = image.sections[i].base_address + offset;
6179 fastload[i].data = malloc(length);
6180 if (!fastload[i].data) {
6181 free(buffer);
6182 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6183 length);
6184 retval = ERROR_FAIL;
6185 break;
6186 }
6187 memcpy(fastload[i].data, buffer + offset, length);
6188 fastload[i].length = length;
6189
6190 image_size += length;
6191 command_print(CMD, "%u bytes written at address 0x%8.8x",
6192 (unsigned int)length,
6193 ((unsigned int)(image.sections[i].base_address + offset)));
6194 }
6195
6196 free(buffer);
6197 }
6198
6199 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6200 command_print(CMD, "Loaded %" PRIu32 " bytes "
6201 "in %fs (%0.3f KiB/s)", image_size,
6202 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6203
6204 command_print(CMD,
6205 "WARNING: image has not been loaded to target!"
6206 "You can issue a 'fast_load' to finish loading.");
6207 }
6208
6209 image_close(&image);
6210
6211 if (retval != ERROR_OK)
6212 free_fastload();
6213
6214 return retval;
6215 }
6216
6217 COMMAND_HANDLER(handle_fast_load_command)
6218 {
6219 if (CMD_ARGC > 0)
6220 return ERROR_COMMAND_SYNTAX_ERROR;
6221 if (!fastload) {
6222 LOG_ERROR("No image in memory");
6223 return ERROR_FAIL;
6224 }
6225 int i;
6226 int64_t ms = timeval_ms();
6227 int size = 0;
6228 int retval = ERROR_OK;
6229 for (i = 0; i < fastload_num; i++) {
6230 struct target *target = get_current_target(CMD_CTX);
6231 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6232 (unsigned int)(fastload[i].address),
6233 (unsigned int)(fastload[i].length));
6234 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6235 if (retval != ERROR_OK)
6236 break;
6237 size += fastload[i].length;
6238 }
6239 if (retval == ERROR_OK) {
6240 int64_t after = timeval_ms();
6241 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6242 }
6243 return retval;
6244 }
6245
6246 static const struct command_registration target_command_handlers[] = {
6247 {
6248 .name = "targets",
6249 .handler = handle_targets_command,
6250 .mode = COMMAND_ANY,
6251 .help = "change current default target (one parameter) "
6252 "or prints table of all targets (no parameters)",
6253 .usage = "[target]",
6254 },
6255 {
6256 .name = "target",
6257 .mode = COMMAND_CONFIG,
6258 .help = "configure target",
6259 .chain = target_subcommand_handlers,
6260 .usage = "",
6261 },
6262 COMMAND_REGISTRATION_DONE
6263 };
6264
6265 int target_register_commands(struct command_context *cmd_ctx)
6266 {
6267 return register_commands(cmd_ctx, NULL, target_command_handlers);
6268 }
6269
6270 static bool target_reset_nag = true;
6271
6272 bool get_target_reset_nag(void)
6273 {
6274 return target_reset_nag;
6275 }
6276
6277 COMMAND_HANDLER(handle_target_reset_nag)
6278 {
6279 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6280 &target_reset_nag, "Nag after each reset about options to improve "
6281 "performance");
6282 }
6283
6284 COMMAND_HANDLER(handle_ps_command)
6285 {
6286 struct target *target = get_current_target(CMD_CTX);
6287 char *display;
6288 if (target->state != TARGET_HALTED) {
6289 LOG_INFO("target not halted !!");
6290 return ERROR_OK;
6291 }
6292
6293 if ((target->rtos) && (target->rtos->type)
6294 && (target->rtos->type->ps_command)) {
6295 display = target->rtos->type->ps_command(target);
6296 command_print(CMD, "%s", display);
6297 free(display);
6298 return ERROR_OK;
6299 } else {
6300 LOG_INFO("failed");
6301 return ERROR_TARGET_FAILURE;
6302 }
6303 }
6304
6305 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6306 {
6307 if (text)
6308 command_print_sameline(cmd, "%s", text);
6309 for (int i = 0; i < size; i++)
6310 command_print_sameline(cmd, " %02x", buf[i]);
6311 command_print(cmd, " ");
6312 }
6313
6314 COMMAND_HANDLER(handle_test_mem_access_command)
6315 {
6316 struct target *target = get_current_target(CMD_CTX);
6317 uint32_t test_size;
6318 int retval = ERROR_OK;
6319
6320 if (target->state != TARGET_HALTED) {
6321 LOG_INFO("target not halted !!");
6322 return ERROR_FAIL;
6323 }
6324
6325 if (CMD_ARGC != 1)
6326 return ERROR_COMMAND_SYNTAX_ERROR;
6327
6328 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6329
6330 /* Test reads */
6331 size_t num_bytes = test_size + 4;
6332
6333 struct working_area *wa = NULL;
6334 retval = target_alloc_working_area(target, num_bytes, &wa);
6335 if (retval != ERROR_OK) {
6336 LOG_ERROR("Not enough working area");
6337 return ERROR_FAIL;
6338 }
6339
6340 uint8_t *test_pattern = malloc(num_bytes);
6341
6342 for (size_t i = 0; i < num_bytes; i++)
6343 test_pattern[i] = rand();
6344
6345 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6346 if (retval != ERROR_OK) {
6347 LOG_ERROR("Test pattern write failed");
6348 goto out;
6349 }
6350
6351 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6352 for (int size = 1; size <= 4; size *= 2) {
6353 for (int offset = 0; offset < 4; offset++) {
6354 uint32_t count = test_size / size;
6355 size_t host_bufsiz = (count + 2) * size + host_offset;
6356 uint8_t *read_ref = malloc(host_bufsiz);
6357 uint8_t *read_buf = malloc(host_bufsiz);
6358
6359 for (size_t i = 0; i < host_bufsiz; i++) {
6360 read_ref[i] = rand();
6361 read_buf[i] = read_ref[i];
6362 }
6363 command_print_sameline(CMD,
6364 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6365 size, offset, host_offset ? "un" : "");
6366
6367 struct duration bench;
6368 duration_start(&bench);
6369
6370 retval = target_read_memory(target, wa->address + offset, size, count,
6371 read_buf + size + host_offset);
6372
6373 duration_measure(&bench);
6374
6375 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6376 command_print(CMD, "Unsupported alignment");
6377 goto next;
6378 } else if (retval != ERROR_OK) {
6379 command_print(CMD, "Memory read failed");
6380 goto next;
6381 }
6382
6383 /* replay on host */
6384 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6385
6386 /* check result */
6387 int result = memcmp(read_ref, read_buf, host_bufsiz);
6388 if (result == 0) {
6389 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6390 duration_elapsed(&bench),
6391 duration_kbps(&bench, count * size));
6392 } else {
6393 command_print(CMD, "Compare failed");
6394 binprint(CMD, "ref:", read_ref, host_bufsiz);
6395 binprint(CMD, "buf:", read_buf, host_bufsiz);
6396 }
6397 next:
6398 free(read_ref);
6399 free(read_buf);
6400 }
6401 }
6402 }
6403
6404 out:
6405 free(test_pattern);
6406
6407 target_free_working_area(target, wa);
6408
6409 /* Test writes */
6410 num_bytes = test_size + 4 + 4 + 4;
6411
6412 retval = target_alloc_working_area(target, num_bytes, &wa);
6413 if (retval != ERROR_OK) {
6414 LOG_ERROR("Not enough working area");
6415 return ERROR_FAIL;
6416 }
6417
6418 test_pattern = malloc(num_bytes);
6419
6420 for (size_t i = 0; i < num_bytes; i++)
6421 test_pattern[i] = rand();
6422
6423 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6424 for (int size = 1; size <= 4; size *= 2) {
6425 for (int offset = 0; offset < 4; offset++) {
6426 uint32_t count = test_size / size;
6427 size_t host_bufsiz = count * size + host_offset;
6428 uint8_t *read_ref = malloc(num_bytes);
6429 uint8_t *read_buf = malloc(num_bytes);
6430 uint8_t *write_buf = malloc(host_bufsiz);
6431
6432 for (size_t i = 0; i < host_bufsiz; i++)
6433 write_buf[i] = rand();
6434 command_print_sameline(CMD,
6435 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6436 size, offset, host_offset ? "un" : "");
6437
6438 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6439 if (retval != ERROR_OK) {
6440 command_print(CMD, "Test pattern write failed");
6441 goto nextw;
6442 }
6443
6444 /* replay on host */
6445 memcpy(read_ref, test_pattern, num_bytes);
6446 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6447
6448 struct duration bench;
6449 duration_start(&bench);
6450
6451 retval = target_write_memory(target, wa->address + size + offset, size, count,
6452 write_buf + host_offset);
6453
6454 duration_measure(&bench);
6455
6456 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6457 command_print(CMD, "Unsupported alignment");
6458 goto nextw;
6459 } else if (retval != ERROR_OK) {
6460 command_print(CMD, "Memory write failed");
6461 goto nextw;
6462 }
6463
6464 /* read back */
6465 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6466 if (retval != ERROR_OK) {
6467 command_print(CMD, "Test pattern write failed");
6468 goto nextw;
6469 }
6470
6471 /* check result */
6472 int result = memcmp(read_ref, read_buf, num_bytes);
6473 if (result == 0) {
6474 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6475 duration_elapsed(&bench),
6476 duration_kbps(&bench, count * size));
6477 } else {
6478 command_print(CMD, "Compare failed");
6479 binprint(CMD, "ref:", read_ref, num_bytes);
6480 binprint(CMD, "buf:", read_buf, num_bytes);
6481 }
6482 nextw:
6483 free(read_ref);
6484 free(read_buf);
6485 }
6486 }
6487 }
6488
6489 free(test_pattern);
6490
6491 target_free_working_area(target, wa);
6492 return retval;
6493 }
6494
6495 static const struct command_registration target_exec_command_handlers[] = {
6496 {
6497 .name = "fast_load_image",
6498 .handler = handle_fast_load_image_command,
6499 .mode = COMMAND_ANY,
6500 .help = "Load image into server memory for later use by "
6501 "fast_load; primarily for profiling",
6502 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6503 "[min_address [max_length]]",
6504 },
6505 {
6506 .name = "fast_load",
6507 .handler = handle_fast_load_command,
6508 .mode = COMMAND_EXEC,
6509 .help = "loads active fast load image to current target "
6510 "- mainly for profiling purposes",
6511 .usage = "",
6512 },
6513 {
6514 .name = "profile",
6515 .handler = handle_profile_command,
6516 .mode = COMMAND_EXEC,
6517 .usage = "seconds filename [start end]",
6518 .help = "profiling samples the CPU PC",
6519 },
6520 /** @todo don't register virt2phys() unless target supports it */
6521 {
6522 .name = "virt2phys",
6523 .handler = handle_virt2phys_command,
6524 .mode = COMMAND_ANY,
6525 .help = "translate a virtual address into a physical address",
6526 .usage = "virtual_address",
6527 },
6528 {
6529 .name = "reg",
6530 .handler = handle_reg_command,
6531 .mode = COMMAND_EXEC,
6532 .help = "display (reread from target with \"force\") or set a register; "
6533 "with no arguments, displays all registers and their values",
6534 .usage = "[(register_number|register_name) [(value|'force')]]",
6535 },
6536 {
6537 .name = "poll",
6538 .handler = handle_poll_command,
6539 .mode = COMMAND_EXEC,
6540 .help = "poll target state; or reconfigure background polling",
6541 .usage = "['on'|'off']",
6542 },
6543 {
6544 .name = "wait_halt",
6545 .handler = handle_wait_halt_command,
6546 .mode = COMMAND_EXEC,
6547 .help = "wait up to the specified number of milliseconds "
6548 "(default 5000) for a previously requested halt",
6549 .usage = "[milliseconds]",
6550 },
6551 {
6552 .name = "halt",
6553 .handler = handle_halt_command,
6554 .mode = COMMAND_EXEC,
6555 .help = "request target to halt, then wait up to the specified "
6556 "number of milliseconds (default 5000) for it to complete",
6557 .usage = "[milliseconds]",
6558 },
6559 {
6560 .name = "resume",
6561 .handler = handle_resume_command,
6562 .mode = COMMAND_EXEC,
6563 .help = "resume target execution from current PC or address",
6564 .usage = "[address]",
6565 },
6566 {
6567 .name = "reset",
6568 .handler = handle_reset_command,
6569 .mode = COMMAND_EXEC,
6570 .usage = "[run|halt|init]",
6571 .help = "Reset all targets into the specified mode. "
6572 "Default reset mode is run, if not given.",
6573 },
6574 {
6575 .name = "soft_reset_halt",
6576 .handler = handle_soft_reset_halt_command,
6577 .mode = COMMAND_EXEC,
6578 .usage = "",
6579 .help = "halt the target and do a soft reset",
6580 },
6581 {
6582 .name = "step",
6583 .handler = handle_step_command,
6584 .mode = COMMAND_EXEC,
6585 .help = "step one instruction from current PC or address",
6586 .usage = "[address]",
6587 },
6588 {
6589 .name = "mdd",
6590 .handler = handle_md_command,
6591 .mode = COMMAND_EXEC,
6592 .help = "display memory double-words",
6593 .usage = "['phys'] address [count]",
6594 },
6595 {
6596 .name = "mdw",
6597 .handler = handle_md_command,
6598 .mode = COMMAND_EXEC,
6599 .help = "display memory words",
6600 .usage = "['phys'] address [count]",
6601 },
6602 {
6603 .name = "mdh",
6604 .handler = handle_md_command,
6605 .mode = COMMAND_EXEC,
6606 .help = "display memory half-words",
6607 .usage = "['phys'] address [count]",
6608 },
6609 {
6610 .name = "mdb",
6611 .handler = handle_md_command,
6612 .mode = COMMAND_EXEC,
6613 .help = "display memory bytes",
6614 .usage = "['phys'] address [count]",
6615 },
6616 {
6617 .name = "mwd",
6618 .handler = handle_mw_command,
6619 .mode = COMMAND_EXEC,
6620 .help = "write memory double-word",
6621 .usage = "['phys'] address value [count]",
6622 },
6623 {
6624 .name = "mww",
6625 .handler = handle_mw_command,
6626 .mode = COMMAND_EXEC,
6627 .help = "write memory word",
6628 .usage = "['phys'] address value [count]",
6629 },
6630 {
6631 .name = "mwh",
6632 .handler = handle_mw_command,
6633 .mode = COMMAND_EXEC,
6634 .help = "write memory half-word",
6635 .usage = "['phys'] address value [count]",
6636 },
6637 {
6638 .name = "mwb",
6639 .handler = handle_mw_command,
6640 .mode = COMMAND_EXEC,
6641 .help = "write memory byte",
6642 .usage = "['phys'] address value [count]",
6643 },
6644 {
6645 .name = "bp",
6646 .handler = handle_bp_command,
6647 .mode = COMMAND_EXEC,
6648 .help = "list or set hardware or software breakpoint",
6649 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6650 },
6651 {
6652 .name = "rbp",
6653 .handler = handle_rbp_command,
6654 .mode = COMMAND_EXEC,
6655 .help = "remove breakpoint",
6656 .usage = "'all' | address",
6657 },
6658 {
6659 .name = "wp",
6660 .handler = handle_wp_command,
6661 .mode = COMMAND_EXEC,
6662 .help = "list (no params) or create watchpoints",
6663 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6664 },
6665 {
6666 .name = "rwp",
6667 .handler = handle_rwp_command,
6668 .mode = COMMAND_EXEC,
6669 .help = "remove watchpoint",
6670 .usage = "address",
6671 },
6672 {
6673 .name = "load_image",
6674 .handler = handle_load_image_command,
6675 .mode = COMMAND_EXEC,
6676 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6677 "[min_address] [max_length]",
6678 },
6679 {
6680 .name = "dump_image",
6681 .handler = handle_dump_image_command,
6682 .mode = COMMAND_EXEC,
6683 .usage = "filename address size",
6684 },
6685 {
6686 .name = "verify_image_checksum",
6687 .handler = handle_verify_image_checksum_command,
6688 .mode = COMMAND_EXEC,
6689 .usage = "filename [offset [type]]",
6690 },
6691 {
6692 .name = "verify_image",
6693 .handler = handle_verify_image_command,
6694 .mode = COMMAND_EXEC,
6695 .usage = "filename [offset [type]]",
6696 },
6697 {
6698 .name = "test_image",
6699 .handler = handle_test_image_command,
6700 .mode = COMMAND_EXEC,
6701 .usage = "filename [offset [type]]",
6702 },
6703 {
6704 .name = "mem2array",
6705 .mode = COMMAND_EXEC,
6706 .jim_handler = jim_mem2array,
6707 .help = "read 8/16/32 bit memory and return as a TCL array "
6708 "for script processing",
6709 .usage = "arrayname bitwidth address count",
6710 },
6711 {
6712 .name = "array2mem",
6713 .mode = COMMAND_EXEC,
6714 .jim_handler = jim_array2mem,
6715 .help = "convert a TCL array to memory locations "
6716 "and write the 8/16/32 bit values",
6717 .usage = "arrayname bitwidth address count",
6718 },
6719 {
6720 .name = "reset_nag",
6721 .handler = handle_target_reset_nag,
6722 .mode = COMMAND_ANY,
6723 .help = "Nag after each reset about options that could have been "
6724 "enabled to improve performance.",
6725 .usage = "['enable'|'disable']",
6726 },
6727 {
6728 .name = "ps",
6729 .handler = handle_ps_command,
6730 .mode = COMMAND_EXEC,
6731 .help = "list all tasks",
6732 .usage = "",
6733 },
6734 {
6735 .name = "test_mem_access",
6736 .handler = handle_test_mem_access_command,
6737 .mode = COMMAND_EXEC,
6738 .help = "Test the target's memory access functions",
6739 .usage = "size",
6740 },
6741
6742 COMMAND_REGISTRATION_DONE
6743 };
6744 static int target_register_user_commands(struct command_context *cmd_ctx)
6745 {
6746 int retval = ERROR_OK;
6747 retval = target_request_register_commands(cmd_ctx);
6748 if (retval != ERROR_OK)
6749 return retval;
6750
6751 retval = trace_register_commands(cmd_ctx);
6752 if (retval != ERROR_OK)
6753 return retval;
6754
6755
6756 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6757 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)