helper/command: register full-name commands in jim
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75
76 /* targets */
77 extern struct target_type arm7tdmi_target;
78 extern struct target_type arm720t_target;
79 extern struct target_type arm9tdmi_target;
80 extern struct target_type arm920t_target;
81 extern struct target_type arm966e_target;
82 extern struct target_type arm946e_target;
83 extern struct target_type arm926ejs_target;
84 extern struct target_type fa526_target;
85 extern struct target_type feroceon_target;
86 extern struct target_type dragonite_target;
87 extern struct target_type xscale_target;
88 extern struct target_type cortexm_target;
89 extern struct target_type cortexa_target;
90 extern struct target_type aarch64_target;
91 extern struct target_type cortexr4_target;
92 extern struct target_type arm11_target;
93 extern struct target_type ls1_sap_target;
94 extern struct target_type mips_m4k_target;
95 extern struct target_type mips_mips64_target;
96 extern struct target_type avr_target;
97 extern struct target_type dsp563xx_target;
98 extern struct target_type dsp5680xx_target;
99 extern struct target_type testee_target;
100 extern struct target_type avr32_ap7k_target;
101 extern struct target_type hla_target;
102 extern struct target_type nds32_v2_target;
103 extern struct target_type nds32_v3_target;
104 extern struct target_type nds32_v3m_target;
105 extern struct target_type or1k_target;
106 extern struct target_type quark_x10xx_target;
107 extern struct target_type quark_d20xx_target;
108 extern struct target_type stm8_target;
109 extern struct target_type riscv_target;
110 extern struct target_type mem_ap_target;
111 extern struct target_type esirisc_target;
112 extern struct target_type arcv2_target;
113
114 static struct target_type *target_types[] = {
115 &arm7tdmi_target,
116 &arm9tdmi_target,
117 &arm920t_target,
118 &arm720t_target,
119 &arm966e_target,
120 &arm946e_target,
121 &arm926ejs_target,
122 &fa526_target,
123 &feroceon_target,
124 &dragonite_target,
125 &xscale_target,
126 &cortexm_target,
127 &cortexa_target,
128 &cortexr4_target,
129 &arm11_target,
130 &ls1_sap_target,
131 &mips_m4k_target,
132 &avr_target,
133 &dsp563xx_target,
134 &dsp5680xx_target,
135 &testee_target,
136 &avr32_ap7k_target,
137 &hla_target,
138 &nds32_v2_target,
139 &nds32_v3_target,
140 &nds32_v3m_target,
141 &or1k_target,
142 &quark_x10xx_target,
143 &quark_d20xx_target,
144 &stm8_target,
145 &riscv_target,
146 &mem_ap_target,
147 &esirisc_target,
148 &arcv2_target,
149 &aarch64_target,
150 &mips_mips64_target,
151 NULL,
152 };
153
154 struct target *all_targets;
155 static struct target_event_callback *target_event_callbacks;
156 static struct target_timer_callback *target_timer_callbacks;
157 static LIST_HEAD(target_reset_callback_list);
158 static LIST_HEAD(target_trace_callback_list);
159 static const int polling_interval = 100;
160
161 static const Jim_Nvp nvp_assert[] = {
162 { .name = "assert", NVP_ASSERT },
163 { .name = "deassert", NVP_DEASSERT },
164 { .name = "T", NVP_ASSERT },
165 { .name = "F", NVP_DEASSERT },
166 { .name = "t", NVP_ASSERT },
167 { .name = "f", NVP_DEASSERT },
168 { .name = NULL, .value = -1 }
169 };
170
171 static const Jim_Nvp nvp_error_target[] = {
172 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
173 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
174 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
175 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
176 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
177 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
178 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
179 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
180 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
181 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
182 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
183 { .value = -1, .name = NULL }
184 };
185
186 static const char *target_strerror_safe(int err)
187 {
188 const Jim_Nvp *n;
189
190 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
191 if (n->name == NULL)
192 return "unknown";
193 else
194 return n->name;
195 }
196
197 static const Jim_Nvp nvp_target_event[] = {
198
199 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
200 { .value = TARGET_EVENT_HALTED, .name = "halted" },
201 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
202 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
203 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
204 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
205 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
206
207 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
208 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
209
210 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
211 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
212 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
213 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
214 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
215 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
216 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
217 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
218
219 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
220 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
221 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
222
223 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
224 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
225
226 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
227 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
228
229 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
230 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
231
232 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
233 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
234
235 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
236
237 { .name = NULL, .value = -1 }
238 };
239
240 static const Jim_Nvp nvp_target_state[] = {
241 { .name = "unknown", .value = TARGET_UNKNOWN },
242 { .name = "running", .value = TARGET_RUNNING },
243 { .name = "halted", .value = TARGET_HALTED },
244 { .name = "reset", .value = TARGET_RESET },
245 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
246 { .name = NULL, .value = -1 },
247 };
248
249 static const Jim_Nvp nvp_target_debug_reason[] = {
250 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
251 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
252 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
253 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
254 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
255 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
256 { .name = "program-exit", .value = DBG_REASON_EXIT },
257 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
258 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const Jim_Nvp nvp_target_endian[] = {
263 { .name = "big", .value = TARGET_BIG_ENDIAN },
264 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
265 { .name = "be", .value = TARGET_BIG_ENDIAN },
266 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
267 { .name = NULL, .value = -1 },
268 };
269
270 static const Jim_Nvp nvp_reset_modes[] = {
271 { .name = "unknown", .value = RESET_UNKNOWN },
272 { .name = "run", .value = RESET_RUN },
273 { .name = "halt", .value = RESET_HALT },
274 { .name = "init", .value = RESET_INIT },
275 { .name = NULL, .value = -1 },
276 };
277
278 const char *debug_reason_name(struct target *t)
279 {
280 const char *cp;
281
282 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
283 t->debug_reason)->name;
284 if (!cp) {
285 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
286 cp = "(*BUG*unknown*BUG*)";
287 }
288 return cp;
289 }
290
291 const char *target_state_name(struct target *t)
292 {
293 const char *cp;
294 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
295 if (!cp) {
296 LOG_ERROR("Invalid target state: %d", (int)(t->state));
297 cp = "(*BUG*unknown*BUG*)";
298 }
299
300 if (!target_was_examined(t) && t->defer_examine)
301 cp = "examine deferred";
302
303 return cp;
304 }
305
306 const char *target_event_name(enum target_event event)
307 {
308 const char *cp;
309 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
310 if (!cp) {
311 LOG_ERROR("Invalid target event: %d", (int)(event));
312 cp = "(*BUG*unknown*BUG*)";
313 }
314 return cp;
315 }
316
317 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
318 {
319 const char *cp;
320 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
321 if (!cp) {
322 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
323 cp = "(*BUG*unknown*BUG*)";
324 }
325 return cp;
326 }
327
328 /* determine the number of the new target */
329 static int new_target_number(void)
330 {
331 struct target *t;
332 int x;
333
334 /* number is 0 based */
335 x = -1;
336 t = all_targets;
337 while (t) {
338 if (x < t->target_number)
339 x = t->target_number;
340 t = t->next;
341 }
342 return x + 1;
343 }
344
345 static void append_to_list_all_targets(struct target *target)
346 {
347 struct target **t = &all_targets;
348
349 while (*t)
350 t = &((*t)->next);
351 *t = target;
352 }
353
354 /* read a uint64_t from a buffer in target memory endianness */
355 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
356 {
357 if (target->endianness == TARGET_LITTLE_ENDIAN)
358 return le_to_h_u64(buffer);
359 else
360 return be_to_h_u64(buffer);
361 }
362
363 /* read a uint32_t from a buffer in target memory endianness */
364 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
365 {
366 if (target->endianness == TARGET_LITTLE_ENDIAN)
367 return le_to_h_u32(buffer);
368 else
369 return be_to_h_u32(buffer);
370 }
371
372 /* read a uint24_t from a buffer in target memory endianness */
373 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 return le_to_h_u24(buffer);
377 else
378 return be_to_h_u24(buffer);
379 }
380
381 /* read a uint16_t from a buffer in target memory endianness */
382 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 return le_to_h_u16(buffer);
386 else
387 return be_to_h_u16(buffer);
388 }
389
390 /* write a uint64_t to a buffer in target memory endianness */
391 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 h_u64_to_le(buffer, value);
395 else
396 h_u64_to_be(buffer, value);
397 }
398
399 /* write a uint32_t to a buffer in target memory endianness */
400 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
401 {
402 if (target->endianness == TARGET_LITTLE_ENDIAN)
403 h_u32_to_le(buffer, value);
404 else
405 h_u32_to_be(buffer, value);
406 }
407
408 /* write a uint24_t to a buffer in target memory endianness */
409 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
410 {
411 if (target->endianness == TARGET_LITTLE_ENDIAN)
412 h_u24_to_le(buffer, value);
413 else
414 h_u24_to_be(buffer, value);
415 }
416
417 /* write a uint16_t to a buffer in target memory endianness */
418 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
419 {
420 if (target->endianness == TARGET_LITTLE_ENDIAN)
421 h_u16_to_le(buffer, value);
422 else
423 h_u16_to_be(buffer, value);
424 }
425
426 /* write a uint8_t to a buffer in target memory endianness */
427 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
428 {
429 *buffer = value;
430 }
431
432 /* write a uint64_t array to a buffer in target memory endianness */
433 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
434 {
435 uint32_t i;
436 for (i = 0; i < count; i++)
437 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
438 }
439
440 /* write a uint32_t array to a buffer in target memory endianness */
441 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
442 {
443 uint32_t i;
444 for (i = 0; i < count; i++)
445 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
446 }
447
448 /* write a uint16_t array to a buffer in target memory endianness */
449 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
450 {
451 uint32_t i;
452 for (i = 0; i < count; i++)
453 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
454 }
455
456 /* write a uint64_t array to a buffer in target memory endianness */
457 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
458 {
459 uint32_t i;
460 for (i = 0; i < count; i++)
461 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
462 }
463
464 /* write a uint32_t array to a buffer in target memory endianness */
465 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
466 {
467 uint32_t i;
468 for (i = 0; i < count; i++)
469 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
470 }
471
472 /* write a uint16_t array to a buffer in target memory endianness */
473 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
474 {
475 uint32_t i;
476 for (i = 0; i < count; i++)
477 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
478 }
479
480 /* return a pointer to a configured target; id is name or number */
481 struct target *get_target(const char *id)
482 {
483 struct target *target;
484
485 /* try as tcltarget name */
486 for (target = all_targets; target; target = target->next) {
487 if (target_name(target) == NULL)
488 continue;
489 if (strcmp(id, target_name(target)) == 0)
490 return target;
491 }
492
493 /* It's OK to remove this fallback sometime after August 2010 or so */
494
495 /* no match, try as number */
496 unsigned num;
497 if (parse_uint(id, &num) != ERROR_OK)
498 return NULL;
499
500 for (target = all_targets; target; target = target->next) {
501 if (target->target_number == (int)num) {
502 LOG_WARNING("use '%s' as target identifier, not '%u'",
503 target_name(target), num);
504 return target;
505 }
506 }
507
508 return NULL;
509 }
510
511 /* returns a pointer to the n-th configured target */
512 struct target *get_target_by_num(int num)
513 {
514 struct target *target = all_targets;
515
516 while (target) {
517 if (target->target_number == num)
518 return target;
519 target = target->next;
520 }
521
522 return NULL;
523 }
524
525 struct target *get_current_target(struct command_context *cmd_ctx)
526 {
527 struct target *target = get_current_target_or_null(cmd_ctx);
528
529 if (target == NULL) {
530 LOG_ERROR("BUG: current_target out of bounds");
531 exit(-1);
532 }
533
534 return target;
535 }
536
537 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
538 {
539 return cmd_ctx->current_target_override
540 ? cmd_ctx->current_target_override
541 : cmd_ctx->current_target;
542 }
543
544 int target_poll(struct target *target)
545 {
546 int retval;
547
548 /* We can't poll until after examine */
549 if (!target_was_examined(target)) {
550 /* Fail silently lest we pollute the log */
551 return ERROR_FAIL;
552 }
553
554 retval = target->type->poll(target);
555 if (retval != ERROR_OK)
556 return retval;
557
558 if (target->halt_issued) {
559 if (target->state == TARGET_HALTED)
560 target->halt_issued = false;
561 else {
562 int64_t t = timeval_ms() - target->halt_issued_time;
563 if (t > DEFAULT_HALT_TIMEOUT) {
564 target->halt_issued = false;
565 LOG_INFO("Halt timed out, wake up GDB.");
566 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
567 }
568 }
569 }
570
571 return ERROR_OK;
572 }
573
574 int target_halt(struct target *target)
575 {
576 int retval;
577 /* We can't poll until after examine */
578 if (!target_was_examined(target)) {
579 LOG_ERROR("Target not examined yet");
580 return ERROR_FAIL;
581 }
582
583 retval = target->type->halt(target);
584 if (retval != ERROR_OK)
585 return retval;
586
587 target->halt_issued = true;
588 target->halt_issued_time = timeval_ms();
589
590 return ERROR_OK;
591 }
592
593 /**
594 * Make the target (re)start executing using its saved execution
595 * context (possibly with some modifications).
596 *
597 * @param target Which target should start executing.
598 * @param current True to use the target's saved program counter instead
599 * of the address parameter
600 * @param address Optionally used as the program counter.
601 * @param handle_breakpoints True iff breakpoints at the resumption PC
602 * should be skipped. (For example, maybe execution was stopped by
603 * such a breakpoint, in which case it would be counterproductive to
604 * let it re-trigger.
605 * @param debug_execution False if all working areas allocated by OpenOCD
606 * should be released and/or restored to their original contents.
607 * (This would for example be true to run some downloaded "helper"
608 * algorithm code, which resides in one such working buffer and uses
609 * another for data storage.)
610 *
611 * @todo Resolve the ambiguity about what the "debug_execution" flag
612 * signifies. For example, Target implementations don't agree on how
613 * it relates to invalidation of the register cache, or to whether
614 * breakpoints and watchpoints should be enabled. (It would seem wrong
615 * to enable breakpoints when running downloaded "helper" algorithms
616 * (debug_execution true), since the breakpoints would be set to match
617 * target firmware being debugged, not the helper algorithm.... and
618 * enabling them could cause such helpers to malfunction (for example,
619 * by overwriting data with a breakpoint instruction. On the other
620 * hand the infrastructure for running such helpers might use this
621 * procedure but rely on hardware breakpoint to detect termination.)
622 */
623 int target_resume(struct target *target, int current, target_addr_t address,
624 int handle_breakpoints, int debug_execution)
625 {
626 int retval;
627
628 /* We can't poll until after examine */
629 if (!target_was_examined(target)) {
630 LOG_ERROR("Target not examined yet");
631 return ERROR_FAIL;
632 }
633
634 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
635
636 /* note that resume *must* be asynchronous. The CPU can halt before
637 * we poll. The CPU can even halt at the current PC as a result of
638 * a software breakpoint being inserted by (a bug?) the application.
639 */
640 /*
641 * resume() triggers the event 'resumed'. The execution of TCL commands
642 * in the event handler causes the polling of targets. If the target has
643 * already halted for a breakpoint, polling will run the 'halted' event
644 * handler before the pending 'resumed' handler.
645 * Disable polling during resume() to guarantee the execution of handlers
646 * in the correct order.
647 */
648 bool save_poll = jtag_poll_get_enabled();
649 jtag_poll_set_enabled(false);
650 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
651 jtag_poll_set_enabled(save_poll);
652 if (retval != ERROR_OK)
653 return retval;
654
655 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
656
657 return retval;
658 }
659
660 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
661 {
662 char buf[100];
663 int retval;
664 Jim_Nvp *n;
665 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
666 if (n->name == NULL) {
667 LOG_ERROR("invalid reset mode");
668 return ERROR_FAIL;
669 }
670
671 struct target *target;
672 for (target = all_targets; target; target = target->next)
673 target_call_reset_callbacks(target, reset_mode);
674
675 /* disable polling during reset to make reset event scripts
676 * more predictable, i.e. dr/irscan & pathmove in events will
677 * not have JTAG operations injected into the middle of a sequence.
678 */
679 bool save_poll = jtag_poll_get_enabled();
680
681 jtag_poll_set_enabled(false);
682
683 sprintf(buf, "ocd_process_reset %s", n->name);
684 retval = Jim_Eval(cmd->ctx->interp, buf);
685
686 jtag_poll_set_enabled(save_poll);
687
688 if (retval != JIM_OK) {
689 Jim_MakeErrorMessage(cmd->ctx->interp);
690 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
691 return ERROR_FAIL;
692 }
693
694 /* We want any events to be processed before the prompt */
695 retval = target_call_timer_callbacks_now();
696
697 for (target = all_targets; target; target = target->next) {
698 target->type->check_reset(target);
699 target->running_alg = false;
700 }
701
702 return retval;
703 }
704
705 static int identity_virt2phys(struct target *target,
706 target_addr_t virtual, target_addr_t *physical)
707 {
708 *physical = virtual;
709 return ERROR_OK;
710 }
711
712 static int no_mmu(struct target *target, int *enabled)
713 {
714 *enabled = 0;
715 return ERROR_OK;
716 }
717
718 static int default_examine(struct target *target)
719 {
720 target_set_examined(target);
721 return ERROR_OK;
722 }
723
724 /* no check by default */
725 static int default_check_reset(struct target *target)
726 {
727 return ERROR_OK;
728 }
729
730 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
731 * Keep in sync */
732 int target_examine_one(struct target *target)
733 {
734 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
735
736 int retval = target->type->examine(target);
737 if (retval != ERROR_OK) {
738 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
739 return retval;
740 }
741
742 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
743
744 return ERROR_OK;
745 }
746
747 static int jtag_enable_callback(enum jtag_event event, void *priv)
748 {
749 struct target *target = priv;
750
751 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
752 return ERROR_OK;
753
754 jtag_unregister_event_callback(jtag_enable_callback, target);
755
756 return target_examine_one(target);
757 }
758
759 /* Targets that correctly implement init + examine, i.e.
760 * no communication with target during init:
761 *
762 * XScale
763 */
764 int target_examine(void)
765 {
766 int retval = ERROR_OK;
767 struct target *target;
768
769 for (target = all_targets; target; target = target->next) {
770 /* defer examination, but don't skip it */
771 if (!target->tap->enabled) {
772 jtag_register_event_callback(jtag_enable_callback,
773 target);
774 continue;
775 }
776
777 if (target->defer_examine)
778 continue;
779
780 int retval2 = target_examine_one(target);
781 if (retval2 != ERROR_OK) {
782 LOG_WARNING("target %s examination failed", target_name(target));
783 retval = retval2;
784 }
785 }
786 return retval;
787 }
788
789 const char *target_type_name(struct target *target)
790 {
791 return target->type->name;
792 }
793
794 static int target_soft_reset_halt(struct target *target)
795 {
796 if (!target_was_examined(target)) {
797 LOG_ERROR("Target not examined yet");
798 return ERROR_FAIL;
799 }
800 if (!target->type->soft_reset_halt) {
801 LOG_ERROR("Target %s does not support soft_reset_halt",
802 target_name(target));
803 return ERROR_FAIL;
804 }
805 return target->type->soft_reset_halt(target);
806 }
807
808 /**
809 * Downloads a target-specific native code algorithm to the target,
810 * and executes it. * Note that some targets may need to set up, enable,
811 * and tear down a breakpoint (hard or * soft) to detect algorithm
812 * termination, while others may support lower overhead schemes where
813 * soft breakpoints embedded in the algorithm automatically terminate the
814 * algorithm.
815 *
816 * @param target used to run the algorithm
817 * @param num_mem_params
818 * @param mem_params
819 * @param num_reg_params
820 * @param reg_param
821 * @param entry_point
822 * @param exit_point
823 * @param timeout_ms
824 * @param arch_info target-specific description of the algorithm.
825 */
826 int target_run_algorithm(struct target *target,
827 int num_mem_params, struct mem_param *mem_params,
828 int num_reg_params, struct reg_param *reg_param,
829 uint32_t entry_point, uint32_t exit_point,
830 int timeout_ms, void *arch_info)
831 {
832 int retval = ERROR_FAIL;
833
834 if (!target_was_examined(target)) {
835 LOG_ERROR("Target not examined yet");
836 goto done;
837 }
838 if (!target->type->run_algorithm) {
839 LOG_ERROR("Target type '%s' does not support %s",
840 target_type_name(target), __func__);
841 goto done;
842 }
843
844 target->running_alg = true;
845 retval = target->type->run_algorithm(target,
846 num_mem_params, mem_params,
847 num_reg_params, reg_param,
848 entry_point, exit_point, timeout_ms, arch_info);
849 target->running_alg = false;
850
851 done:
852 return retval;
853 }
854
855 /**
856 * Executes a target-specific native code algorithm and leaves it running.
857 *
858 * @param target used to run the algorithm
859 * @param num_mem_params
860 * @param mem_params
861 * @param num_reg_params
862 * @param reg_params
863 * @param entry_point
864 * @param exit_point
865 * @param arch_info target-specific description of the algorithm.
866 */
867 int target_start_algorithm(struct target *target,
868 int num_mem_params, struct mem_param *mem_params,
869 int num_reg_params, struct reg_param *reg_params,
870 uint32_t entry_point, uint32_t exit_point,
871 void *arch_info)
872 {
873 int retval = ERROR_FAIL;
874
875 if (!target_was_examined(target)) {
876 LOG_ERROR("Target not examined yet");
877 goto done;
878 }
879 if (!target->type->start_algorithm) {
880 LOG_ERROR("Target type '%s' does not support %s",
881 target_type_name(target), __func__);
882 goto done;
883 }
884 if (target->running_alg) {
885 LOG_ERROR("Target is already running an algorithm");
886 goto done;
887 }
888
889 target->running_alg = true;
890 retval = target->type->start_algorithm(target,
891 num_mem_params, mem_params,
892 num_reg_params, reg_params,
893 entry_point, exit_point, arch_info);
894
895 done:
896 return retval;
897 }
898
899 /**
900 * Waits for an algorithm started with target_start_algorithm() to complete.
901 *
902 * @param target used to run the algorithm
903 * @param num_mem_params
904 * @param mem_params
905 * @param num_reg_params
906 * @param reg_params
907 * @param exit_point
908 * @param timeout_ms
909 * @param arch_info target-specific description of the algorithm.
910 */
911 int target_wait_algorithm(struct target *target,
912 int num_mem_params, struct mem_param *mem_params,
913 int num_reg_params, struct reg_param *reg_params,
914 uint32_t exit_point, int timeout_ms,
915 void *arch_info)
916 {
917 int retval = ERROR_FAIL;
918
919 if (!target->type->wait_algorithm) {
920 LOG_ERROR("Target type '%s' does not support %s",
921 target_type_name(target), __func__);
922 goto done;
923 }
924 if (!target->running_alg) {
925 LOG_ERROR("Target is not running an algorithm");
926 goto done;
927 }
928
929 retval = target->type->wait_algorithm(target,
930 num_mem_params, mem_params,
931 num_reg_params, reg_params,
932 exit_point, timeout_ms, arch_info);
933 if (retval != ERROR_TARGET_TIMEOUT)
934 target->running_alg = false;
935
936 done:
937 return retval;
938 }
939
940 /**
941 * Streams data to a circular buffer on target intended for consumption by code
942 * running asynchronously on target.
943 *
944 * This is intended for applications where target-specific native code runs
945 * on the target, receives data from the circular buffer, does something with
946 * it (most likely writing it to a flash memory), and advances the circular
947 * buffer pointer.
948 *
949 * This assumes that the helper algorithm has already been loaded to the target,
950 * but has not been started yet. Given memory and register parameters are passed
951 * to the algorithm.
952 *
953 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
954 * following format:
955 *
956 * [buffer_start + 0, buffer_start + 4):
957 * Write Pointer address (aka head). Written and updated by this
958 * routine when new data is written to the circular buffer.
959 * [buffer_start + 4, buffer_start + 8):
960 * Read Pointer address (aka tail). Updated by code running on the
961 * target after it consumes data.
962 * [buffer_start + 8, buffer_start + buffer_size):
963 * Circular buffer contents.
964 *
965 * See contrib/loaders/flash/stm32f1x.S for an example.
966 *
967 * @param target used to run the algorithm
968 * @param buffer address on the host where data to be sent is located
969 * @param count number of blocks to send
970 * @param block_size size in bytes of each block
971 * @param num_mem_params count of memory-based params to pass to algorithm
972 * @param mem_params memory-based params to pass to algorithm
973 * @param num_reg_params count of register-based params to pass to algorithm
974 * @param reg_params memory-based params to pass to algorithm
975 * @param buffer_start address on the target of the circular buffer structure
976 * @param buffer_size size of the circular buffer structure
977 * @param entry_point address on the target to execute to start the algorithm
978 * @param exit_point address at which to set a breakpoint to catch the
979 * end of the algorithm; can be 0 if target triggers a breakpoint itself
980 * @param arch_info
981 */
982
983 int target_run_flash_async_algorithm(struct target *target,
984 const uint8_t *buffer, uint32_t count, int block_size,
985 int num_mem_params, struct mem_param *mem_params,
986 int num_reg_params, struct reg_param *reg_params,
987 uint32_t buffer_start, uint32_t buffer_size,
988 uint32_t entry_point, uint32_t exit_point, void *arch_info)
989 {
990 int retval;
991 int timeout = 0;
992
993 const uint8_t *buffer_orig = buffer;
994
995 /* Set up working area. First word is write pointer, second word is read pointer,
996 * rest is fifo data area. */
997 uint32_t wp_addr = buffer_start;
998 uint32_t rp_addr = buffer_start + 4;
999 uint32_t fifo_start_addr = buffer_start + 8;
1000 uint32_t fifo_end_addr = buffer_start + buffer_size;
1001
1002 uint32_t wp = fifo_start_addr;
1003 uint32_t rp = fifo_start_addr;
1004
1005 /* validate block_size is 2^n */
1006 assert(!block_size || !(block_size & (block_size - 1)));
1007
1008 retval = target_write_u32(target, wp_addr, wp);
1009 if (retval != ERROR_OK)
1010 return retval;
1011 retval = target_write_u32(target, rp_addr, rp);
1012 if (retval != ERROR_OK)
1013 return retval;
1014
1015 /* Start up algorithm on target and let it idle while writing the first chunk */
1016 retval = target_start_algorithm(target, num_mem_params, mem_params,
1017 num_reg_params, reg_params,
1018 entry_point,
1019 exit_point,
1020 arch_info);
1021
1022 if (retval != ERROR_OK) {
1023 LOG_ERROR("error starting target flash write algorithm");
1024 return retval;
1025 }
1026
1027 while (count > 0) {
1028
1029 retval = target_read_u32(target, rp_addr, &rp);
1030 if (retval != ERROR_OK) {
1031 LOG_ERROR("failed to get read pointer");
1032 break;
1033 }
1034
1035 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1036 (size_t) (buffer - buffer_orig), count, wp, rp);
1037
1038 if (rp == 0) {
1039 LOG_ERROR("flash write algorithm aborted by target");
1040 retval = ERROR_FLASH_OPERATION_FAILED;
1041 break;
1042 }
1043
1044 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1045 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1046 break;
1047 }
1048
1049 /* Count the number of bytes available in the fifo without
1050 * crossing the wrap around. Make sure to not fill it completely,
1051 * because that would make wp == rp and that's the empty condition. */
1052 uint32_t thisrun_bytes;
1053 if (rp > wp)
1054 thisrun_bytes = rp - wp - block_size;
1055 else if (rp > fifo_start_addr)
1056 thisrun_bytes = fifo_end_addr - wp;
1057 else
1058 thisrun_bytes = fifo_end_addr - wp - block_size;
1059
1060 if (thisrun_bytes == 0) {
1061 /* Throttle polling a bit if transfer is (much) faster than flash
1062 * programming. The exact delay shouldn't matter as long as it's
1063 * less than buffer size / flash speed. This is very unlikely to
1064 * run when using high latency connections such as USB. */
1065 alive_sleep(2);
1066
1067 /* to stop an infinite loop on some targets check and increment a timeout
1068 * this issue was observed on a stellaris using the new ICDI interface */
1069 if (timeout++ >= 2500) {
1070 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1071 return ERROR_FLASH_OPERATION_FAILED;
1072 }
1073 continue;
1074 }
1075
1076 /* reset our timeout */
1077 timeout = 0;
1078
1079 /* Limit to the amount of data we actually want to write */
1080 if (thisrun_bytes > count * block_size)
1081 thisrun_bytes = count * block_size;
1082
1083 /* Force end of large blocks to be word aligned */
1084 if (thisrun_bytes >= 16)
1085 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1086
1087 /* Write data to fifo */
1088 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1089 if (retval != ERROR_OK)
1090 break;
1091
1092 /* Update counters and wrap write pointer */
1093 buffer += thisrun_bytes;
1094 count -= thisrun_bytes / block_size;
1095 wp += thisrun_bytes;
1096 if (wp >= fifo_end_addr)
1097 wp = fifo_start_addr;
1098
1099 /* Store updated write pointer to target */
1100 retval = target_write_u32(target, wp_addr, wp);
1101 if (retval != ERROR_OK)
1102 break;
1103
1104 /* Avoid GDB timeouts */
1105 keep_alive();
1106 }
1107
1108 if (retval != ERROR_OK) {
1109 /* abort flash write algorithm on target */
1110 target_write_u32(target, wp_addr, 0);
1111 }
1112
1113 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1114 num_reg_params, reg_params,
1115 exit_point,
1116 10000,
1117 arch_info);
1118
1119 if (retval2 != ERROR_OK) {
1120 LOG_ERROR("error waiting for target flash write algorithm");
1121 retval = retval2;
1122 }
1123
1124 if (retval == ERROR_OK) {
1125 /* check if algorithm set rp = 0 after fifo writer loop finished */
1126 retval = target_read_u32(target, rp_addr, &rp);
1127 if (retval == ERROR_OK && rp == 0) {
1128 LOG_ERROR("flash write algorithm aborted by target");
1129 retval = ERROR_FLASH_OPERATION_FAILED;
1130 }
1131 }
1132
1133 return retval;
1134 }
1135
1136 int target_run_read_async_algorithm(struct target *target,
1137 uint8_t *buffer, uint32_t count, int block_size,
1138 int num_mem_params, struct mem_param *mem_params,
1139 int num_reg_params, struct reg_param *reg_params,
1140 uint32_t buffer_start, uint32_t buffer_size,
1141 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1142 {
1143 int retval;
1144 int timeout = 0;
1145
1146 const uint8_t *buffer_orig = buffer;
1147
1148 /* Set up working area. First word is write pointer, second word is read pointer,
1149 * rest is fifo data area. */
1150 uint32_t wp_addr = buffer_start;
1151 uint32_t rp_addr = buffer_start + 4;
1152 uint32_t fifo_start_addr = buffer_start + 8;
1153 uint32_t fifo_end_addr = buffer_start + buffer_size;
1154
1155 uint32_t wp = fifo_start_addr;
1156 uint32_t rp = fifo_start_addr;
1157
1158 /* validate block_size is 2^n */
1159 assert(!block_size || !(block_size & (block_size - 1)));
1160
1161 retval = target_write_u32(target, wp_addr, wp);
1162 if (retval != ERROR_OK)
1163 return retval;
1164 retval = target_write_u32(target, rp_addr, rp);
1165 if (retval != ERROR_OK)
1166 return retval;
1167
1168 /* Start up algorithm on target */
1169 retval = target_start_algorithm(target, num_mem_params, mem_params,
1170 num_reg_params, reg_params,
1171 entry_point,
1172 exit_point,
1173 arch_info);
1174
1175 if (retval != ERROR_OK) {
1176 LOG_ERROR("error starting target flash read algorithm");
1177 return retval;
1178 }
1179
1180 while (count > 0) {
1181 retval = target_read_u32(target, wp_addr, &wp);
1182 if (retval != ERROR_OK) {
1183 LOG_ERROR("failed to get write pointer");
1184 break;
1185 }
1186
1187 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1188 (size_t)(buffer - buffer_orig), count, wp, rp);
1189
1190 if (wp == 0) {
1191 LOG_ERROR("flash read algorithm aborted by target");
1192 retval = ERROR_FLASH_OPERATION_FAILED;
1193 break;
1194 }
1195
1196 if (((wp - fifo_start_addr) & (block_size - 1)) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1197 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1198 break;
1199 }
1200
1201 /* Count the number of bytes available in the fifo without
1202 * crossing the wrap around. */
1203 uint32_t thisrun_bytes;
1204 if (wp >= rp)
1205 thisrun_bytes = wp - rp;
1206 else
1207 thisrun_bytes = fifo_end_addr - rp;
1208
1209 if (thisrun_bytes == 0) {
1210 /* Throttle polling a bit if transfer is (much) faster than flash
1211 * reading. The exact delay shouldn't matter as long as it's
1212 * less than buffer size / flash speed. This is very unlikely to
1213 * run when using high latency connections such as USB. */
1214 alive_sleep(2);
1215
1216 /* to stop an infinite loop on some targets check and increment a timeout
1217 * this issue was observed on a stellaris using the new ICDI interface */
1218 if (timeout++ >= 2500) {
1219 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1220 return ERROR_FLASH_OPERATION_FAILED;
1221 }
1222 continue;
1223 }
1224
1225 /* Reset our timeout */
1226 timeout = 0;
1227
1228 /* Limit to the amount of data we actually want to read */
1229 if (thisrun_bytes > count * block_size)
1230 thisrun_bytes = count * block_size;
1231
1232 /* Force end of large blocks to be word aligned */
1233 if (thisrun_bytes >= 16)
1234 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1235
1236 /* Read data from fifo */
1237 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1238 if (retval != ERROR_OK)
1239 break;
1240
1241 /* Update counters and wrap write pointer */
1242 buffer += thisrun_bytes;
1243 count -= thisrun_bytes / block_size;
1244 rp += thisrun_bytes;
1245 if (rp >= fifo_end_addr)
1246 rp = fifo_start_addr;
1247
1248 /* Store updated write pointer to target */
1249 retval = target_write_u32(target, rp_addr, rp);
1250 if (retval != ERROR_OK)
1251 break;
1252
1253 /* Avoid GDB timeouts */
1254 keep_alive();
1255
1256 }
1257
1258 if (retval != ERROR_OK) {
1259 /* abort flash write algorithm on target */
1260 target_write_u32(target, rp_addr, 0);
1261 }
1262
1263 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1264 num_reg_params, reg_params,
1265 exit_point,
1266 10000,
1267 arch_info);
1268
1269 if (retval2 != ERROR_OK) {
1270 LOG_ERROR("error waiting for target flash write algorithm");
1271 retval = retval2;
1272 }
1273
1274 if (retval == ERROR_OK) {
1275 /* check if algorithm set wp = 0 after fifo writer loop finished */
1276 retval = target_read_u32(target, wp_addr, &wp);
1277 if (retval == ERROR_OK && wp == 0) {
1278 LOG_ERROR("flash read algorithm aborted by target");
1279 retval = ERROR_FLASH_OPERATION_FAILED;
1280 }
1281 }
1282
1283 return retval;
1284 }
1285
1286 int target_read_memory(struct target *target,
1287 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1288 {
1289 if (!target_was_examined(target)) {
1290 LOG_ERROR("Target not examined yet");
1291 return ERROR_FAIL;
1292 }
1293 if (!target->type->read_memory) {
1294 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1295 return ERROR_FAIL;
1296 }
1297 return target->type->read_memory(target, address, size, count, buffer);
1298 }
1299
1300 int target_read_phys_memory(struct target *target,
1301 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1302 {
1303 if (!target_was_examined(target)) {
1304 LOG_ERROR("Target not examined yet");
1305 return ERROR_FAIL;
1306 }
1307 if (!target->type->read_phys_memory) {
1308 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1309 return ERROR_FAIL;
1310 }
1311 return target->type->read_phys_memory(target, address, size, count, buffer);
1312 }
1313
1314 int target_write_memory(struct target *target,
1315 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1316 {
1317 if (!target_was_examined(target)) {
1318 LOG_ERROR("Target not examined yet");
1319 return ERROR_FAIL;
1320 }
1321 if (!target->type->write_memory) {
1322 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1323 return ERROR_FAIL;
1324 }
1325 return target->type->write_memory(target, address, size, count, buffer);
1326 }
1327
1328 int target_write_phys_memory(struct target *target,
1329 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1330 {
1331 if (!target_was_examined(target)) {
1332 LOG_ERROR("Target not examined yet");
1333 return ERROR_FAIL;
1334 }
1335 if (!target->type->write_phys_memory) {
1336 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1337 return ERROR_FAIL;
1338 }
1339 return target->type->write_phys_memory(target, address, size, count, buffer);
1340 }
1341
1342 int target_add_breakpoint(struct target *target,
1343 struct breakpoint *breakpoint)
1344 {
1345 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1346 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1347 return ERROR_TARGET_NOT_HALTED;
1348 }
1349 return target->type->add_breakpoint(target, breakpoint);
1350 }
1351
1352 int target_add_context_breakpoint(struct target *target,
1353 struct breakpoint *breakpoint)
1354 {
1355 if (target->state != TARGET_HALTED) {
1356 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1357 return ERROR_TARGET_NOT_HALTED;
1358 }
1359 return target->type->add_context_breakpoint(target, breakpoint);
1360 }
1361
1362 int target_add_hybrid_breakpoint(struct target *target,
1363 struct breakpoint *breakpoint)
1364 {
1365 if (target->state != TARGET_HALTED) {
1366 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1367 return ERROR_TARGET_NOT_HALTED;
1368 }
1369 return target->type->add_hybrid_breakpoint(target, breakpoint);
1370 }
1371
1372 int target_remove_breakpoint(struct target *target,
1373 struct breakpoint *breakpoint)
1374 {
1375 return target->type->remove_breakpoint(target, breakpoint);
1376 }
1377
1378 int target_add_watchpoint(struct target *target,
1379 struct watchpoint *watchpoint)
1380 {
1381 if (target->state != TARGET_HALTED) {
1382 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1383 return ERROR_TARGET_NOT_HALTED;
1384 }
1385 return target->type->add_watchpoint(target, watchpoint);
1386 }
1387 int target_remove_watchpoint(struct target *target,
1388 struct watchpoint *watchpoint)
1389 {
1390 return target->type->remove_watchpoint(target, watchpoint);
1391 }
1392 int target_hit_watchpoint(struct target *target,
1393 struct watchpoint **hit_watchpoint)
1394 {
1395 if (target->state != TARGET_HALTED) {
1396 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1397 return ERROR_TARGET_NOT_HALTED;
1398 }
1399
1400 if (target->type->hit_watchpoint == NULL) {
1401 /* For backward compatible, if hit_watchpoint is not implemented,
1402 * return ERROR_FAIL such that gdb_server will not take the nonsense
1403 * information. */
1404 return ERROR_FAIL;
1405 }
1406
1407 return target->type->hit_watchpoint(target, hit_watchpoint);
1408 }
1409
1410 const char *target_get_gdb_arch(struct target *target)
1411 {
1412 if (target->type->get_gdb_arch == NULL)
1413 return NULL;
1414 return target->type->get_gdb_arch(target);
1415 }
1416
1417 int target_get_gdb_reg_list(struct target *target,
1418 struct reg **reg_list[], int *reg_list_size,
1419 enum target_register_class reg_class)
1420 {
1421 int result = ERROR_FAIL;
1422
1423 if (!target_was_examined(target)) {
1424 LOG_ERROR("Target not examined yet");
1425 goto done;
1426 }
1427
1428 result = target->type->get_gdb_reg_list(target, reg_list,
1429 reg_list_size, reg_class);
1430
1431 done:
1432 if (result != ERROR_OK) {
1433 *reg_list = NULL;
1434 *reg_list_size = 0;
1435 }
1436 return result;
1437 }
1438
1439 int target_get_gdb_reg_list_noread(struct target *target,
1440 struct reg **reg_list[], int *reg_list_size,
1441 enum target_register_class reg_class)
1442 {
1443 if (target->type->get_gdb_reg_list_noread &&
1444 target->type->get_gdb_reg_list_noread(target, reg_list,
1445 reg_list_size, reg_class) == ERROR_OK)
1446 return ERROR_OK;
1447 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1448 }
1449
1450 bool target_supports_gdb_connection(struct target *target)
1451 {
1452 /*
1453 * exclude all the targets that don't provide get_gdb_reg_list
1454 * or that have explicit gdb_max_connection == 0
1455 */
1456 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1457 }
1458
1459 int target_step(struct target *target,
1460 int current, target_addr_t address, int handle_breakpoints)
1461 {
1462 int retval;
1463
1464 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1465
1466 retval = target->type->step(target, current, address, handle_breakpoints);
1467 if (retval != ERROR_OK)
1468 return retval;
1469
1470 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1471
1472 return retval;
1473 }
1474
1475 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1476 {
1477 if (target->state != TARGET_HALTED) {
1478 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1479 return ERROR_TARGET_NOT_HALTED;
1480 }
1481 return target->type->get_gdb_fileio_info(target, fileio_info);
1482 }
1483
1484 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1485 {
1486 if (target->state != TARGET_HALTED) {
1487 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1488 return ERROR_TARGET_NOT_HALTED;
1489 }
1490 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1491 }
1492
1493 target_addr_t target_address_max(struct target *target)
1494 {
1495 unsigned bits = target_address_bits(target);
1496 if (sizeof(target_addr_t) * 8 == bits)
1497 return (target_addr_t) -1;
1498 else
1499 return (((target_addr_t) 1) << bits) - 1;
1500 }
1501
1502 unsigned target_address_bits(struct target *target)
1503 {
1504 if (target->type->address_bits)
1505 return target->type->address_bits(target);
1506 return 32;
1507 }
1508
1509 static int target_profiling(struct target *target, uint32_t *samples,
1510 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1511 {
1512 return target->type->profiling(target, samples, max_num_samples,
1513 num_samples, seconds);
1514 }
1515
1516 /**
1517 * Reset the @c examined flag for the given target.
1518 * Pure paranoia -- targets are zeroed on allocation.
1519 */
1520 static void target_reset_examined(struct target *target)
1521 {
1522 target->examined = false;
1523 }
1524
1525 static int handle_target(void *priv);
1526
1527 static int target_init_one(struct command_context *cmd_ctx,
1528 struct target *target)
1529 {
1530 target_reset_examined(target);
1531
1532 struct target_type *type = target->type;
1533 if (type->examine == NULL)
1534 type->examine = default_examine;
1535
1536 if (type->check_reset == NULL)
1537 type->check_reset = default_check_reset;
1538
1539 assert(type->init_target != NULL);
1540
1541 int retval = type->init_target(cmd_ctx, target);
1542 if (ERROR_OK != retval) {
1543 LOG_ERROR("target '%s' init failed", target_name(target));
1544 return retval;
1545 }
1546
1547 /* Sanity-check MMU support ... stub in what we must, to help
1548 * implement it in stages, but warn if we need to do so.
1549 */
1550 if (type->mmu) {
1551 if (type->virt2phys == NULL) {
1552 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1553 type->virt2phys = identity_virt2phys;
1554 }
1555 } else {
1556 /* Make sure no-MMU targets all behave the same: make no
1557 * distinction between physical and virtual addresses, and
1558 * ensure that virt2phys() is always an identity mapping.
1559 */
1560 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1561 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1562
1563 type->mmu = no_mmu;
1564 type->write_phys_memory = type->write_memory;
1565 type->read_phys_memory = type->read_memory;
1566 type->virt2phys = identity_virt2phys;
1567 }
1568
1569 if (target->type->read_buffer == NULL)
1570 target->type->read_buffer = target_read_buffer_default;
1571
1572 if (target->type->write_buffer == NULL)
1573 target->type->write_buffer = target_write_buffer_default;
1574
1575 if (target->type->get_gdb_fileio_info == NULL)
1576 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1577
1578 if (target->type->gdb_fileio_end == NULL)
1579 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1580
1581 if (target->type->profiling == NULL)
1582 target->type->profiling = target_profiling_default;
1583
1584 return ERROR_OK;
1585 }
1586
1587 static int target_init(struct command_context *cmd_ctx)
1588 {
1589 struct target *target;
1590 int retval;
1591
1592 for (target = all_targets; target; target = target->next) {
1593 retval = target_init_one(cmd_ctx, target);
1594 if (ERROR_OK != retval)
1595 return retval;
1596 }
1597
1598 if (!all_targets)
1599 return ERROR_OK;
1600
1601 retval = target_register_user_commands(cmd_ctx);
1602 if (ERROR_OK != retval)
1603 return retval;
1604
1605 retval = target_register_timer_callback(&handle_target,
1606 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1607 if (ERROR_OK != retval)
1608 return retval;
1609
1610 return ERROR_OK;
1611 }
1612
1613 COMMAND_HANDLER(handle_target_init_command)
1614 {
1615 int retval;
1616
1617 if (CMD_ARGC != 0)
1618 return ERROR_COMMAND_SYNTAX_ERROR;
1619
1620 static bool target_initialized;
1621 if (target_initialized) {
1622 LOG_INFO("'target init' has already been called");
1623 return ERROR_OK;
1624 }
1625 target_initialized = true;
1626
1627 retval = command_run_line(CMD_CTX, "init_targets");
1628 if (ERROR_OK != retval)
1629 return retval;
1630
1631 retval = command_run_line(CMD_CTX, "init_target_events");
1632 if (ERROR_OK != retval)
1633 return retval;
1634
1635 retval = command_run_line(CMD_CTX, "init_board");
1636 if (ERROR_OK != retval)
1637 return retval;
1638
1639 LOG_DEBUG("Initializing targets...");
1640 return target_init(CMD_CTX);
1641 }
1642
1643 int target_register_event_callback(int (*callback)(struct target *target,
1644 enum target_event event, void *priv), void *priv)
1645 {
1646 struct target_event_callback **callbacks_p = &target_event_callbacks;
1647
1648 if (callback == NULL)
1649 return ERROR_COMMAND_SYNTAX_ERROR;
1650
1651 if (*callbacks_p) {
1652 while ((*callbacks_p)->next)
1653 callbacks_p = &((*callbacks_p)->next);
1654 callbacks_p = &((*callbacks_p)->next);
1655 }
1656
1657 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1658 (*callbacks_p)->callback = callback;
1659 (*callbacks_p)->priv = priv;
1660 (*callbacks_p)->next = NULL;
1661
1662 return ERROR_OK;
1663 }
1664
1665 int target_register_reset_callback(int (*callback)(struct target *target,
1666 enum target_reset_mode reset_mode, void *priv), void *priv)
1667 {
1668 struct target_reset_callback *entry;
1669
1670 if (callback == NULL)
1671 return ERROR_COMMAND_SYNTAX_ERROR;
1672
1673 entry = malloc(sizeof(struct target_reset_callback));
1674 if (entry == NULL) {
1675 LOG_ERROR("error allocating buffer for reset callback entry");
1676 return ERROR_COMMAND_SYNTAX_ERROR;
1677 }
1678
1679 entry->callback = callback;
1680 entry->priv = priv;
1681 list_add(&entry->list, &target_reset_callback_list);
1682
1683
1684 return ERROR_OK;
1685 }
1686
1687 int target_register_trace_callback(int (*callback)(struct target *target,
1688 size_t len, uint8_t *data, void *priv), void *priv)
1689 {
1690 struct target_trace_callback *entry;
1691
1692 if (callback == NULL)
1693 return ERROR_COMMAND_SYNTAX_ERROR;
1694
1695 entry = malloc(sizeof(struct target_trace_callback));
1696 if (entry == NULL) {
1697 LOG_ERROR("error allocating buffer for trace callback entry");
1698 return ERROR_COMMAND_SYNTAX_ERROR;
1699 }
1700
1701 entry->callback = callback;
1702 entry->priv = priv;
1703 list_add(&entry->list, &target_trace_callback_list);
1704
1705
1706 return ERROR_OK;
1707 }
1708
1709 int target_register_timer_callback(int (*callback)(void *priv),
1710 unsigned int time_ms, enum target_timer_type type, void *priv)
1711 {
1712 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1713
1714 if (callback == NULL)
1715 return ERROR_COMMAND_SYNTAX_ERROR;
1716
1717 if (*callbacks_p) {
1718 while ((*callbacks_p)->next)
1719 callbacks_p = &((*callbacks_p)->next);
1720 callbacks_p = &((*callbacks_p)->next);
1721 }
1722
1723 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1724 (*callbacks_p)->callback = callback;
1725 (*callbacks_p)->type = type;
1726 (*callbacks_p)->time_ms = time_ms;
1727 (*callbacks_p)->removed = false;
1728
1729 gettimeofday(&(*callbacks_p)->when, NULL);
1730 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1731
1732 (*callbacks_p)->priv = priv;
1733 (*callbacks_p)->next = NULL;
1734
1735 return ERROR_OK;
1736 }
1737
1738 int target_unregister_event_callback(int (*callback)(struct target *target,
1739 enum target_event event, void *priv), void *priv)
1740 {
1741 struct target_event_callback **p = &target_event_callbacks;
1742 struct target_event_callback *c = target_event_callbacks;
1743
1744 if (callback == NULL)
1745 return ERROR_COMMAND_SYNTAX_ERROR;
1746
1747 while (c) {
1748 struct target_event_callback *next = c->next;
1749 if ((c->callback == callback) && (c->priv == priv)) {
1750 *p = next;
1751 free(c);
1752 return ERROR_OK;
1753 } else
1754 p = &(c->next);
1755 c = next;
1756 }
1757
1758 return ERROR_OK;
1759 }
1760
1761 int target_unregister_reset_callback(int (*callback)(struct target *target,
1762 enum target_reset_mode reset_mode, void *priv), void *priv)
1763 {
1764 struct target_reset_callback *entry;
1765
1766 if (callback == NULL)
1767 return ERROR_COMMAND_SYNTAX_ERROR;
1768
1769 list_for_each_entry(entry, &target_reset_callback_list, list) {
1770 if (entry->callback == callback && entry->priv == priv) {
1771 list_del(&entry->list);
1772 free(entry);
1773 break;
1774 }
1775 }
1776
1777 return ERROR_OK;
1778 }
1779
1780 int target_unregister_trace_callback(int (*callback)(struct target *target,
1781 size_t len, uint8_t *data, void *priv), void *priv)
1782 {
1783 struct target_trace_callback *entry;
1784
1785 if (callback == NULL)
1786 return ERROR_COMMAND_SYNTAX_ERROR;
1787
1788 list_for_each_entry(entry, &target_trace_callback_list, list) {
1789 if (entry->callback == callback && entry->priv == priv) {
1790 list_del(&entry->list);
1791 free(entry);
1792 break;
1793 }
1794 }
1795
1796 return ERROR_OK;
1797 }
1798
1799 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1800 {
1801 if (callback == NULL)
1802 return ERROR_COMMAND_SYNTAX_ERROR;
1803
1804 for (struct target_timer_callback *c = target_timer_callbacks;
1805 c; c = c->next) {
1806 if ((c->callback == callback) && (c->priv == priv)) {
1807 c->removed = true;
1808 return ERROR_OK;
1809 }
1810 }
1811
1812 return ERROR_FAIL;
1813 }
1814
1815 int target_call_event_callbacks(struct target *target, enum target_event event)
1816 {
1817 struct target_event_callback *callback = target_event_callbacks;
1818 struct target_event_callback *next_callback;
1819
1820 if (event == TARGET_EVENT_HALTED) {
1821 /* execute early halted first */
1822 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1823 }
1824
1825 LOG_DEBUG("target event %i (%s) for core %s", event,
1826 Jim_Nvp_value2name_simple(nvp_target_event, event)->name,
1827 target_name(target));
1828
1829 target_handle_event(target, event);
1830
1831 while (callback) {
1832 next_callback = callback->next;
1833 callback->callback(target, event, callback->priv);
1834 callback = next_callback;
1835 }
1836
1837 return ERROR_OK;
1838 }
1839
1840 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1841 {
1842 struct target_reset_callback *callback;
1843
1844 LOG_DEBUG("target reset %i (%s)", reset_mode,
1845 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1846
1847 list_for_each_entry(callback, &target_reset_callback_list, list)
1848 callback->callback(target, reset_mode, callback->priv);
1849
1850 return ERROR_OK;
1851 }
1852
1853 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1854 {
1855 struct target_trace_callback *callback;
1856
1857 list_for_each_entry(callback, &target_trace_callback_list, list)
1858 callback->callback(target, len, data, callback->priv);
1859
1860 return ERROR_OK;
1861 }
1862
1863 static int target_timer_callback_periodic_restart(
1864 struct target_timer_callback *cb, struct timeval *now)
1865 {
1866 cb->when = *now;
1867 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1868 return ERROR_OK;
1869 }
1870
1871 static int target_call_timer_callback(struct target_timer_callback *cb,
1872 struct timeval *now)
1873 {
1874 cb->callback(cb->priv);
1875
1876 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1877 return target_timer_callback_periodic_restart(cb, now);
1878
1879 return target_unregister_timer_callback(cb->callback, cb->priv);
1880 }
1881
1882 static int target_call_timer_callbacks_check_time(int checktime)
1883 {
1884 static bool callback_processing;
1885
1886 /* Do not allow nesting */
1887 if (callback_processing)
1888 return ERROR_OK;
1889
1890 callback_processing = true;
1891
1892 keep_alive();
1893
1894 struct timeval now;
1895 gettimeofday(&now, NULL);
1896
1897 /* Store an address of the place containing a pointer to the
1898 * next item; initially, that's a standalone "root of the
1899 * list" variable. */
1900 struct target_timer_callback **callback = &target_timer_callbacks;
1901 while (callback && *callback) {
1902 if ((*callback)->removed) {
1903 struct target_timer_callback *p = *callback;
1904 *callback = (*callback)->next;
1905 free(p);
1906 continue;
1907 }
1908
1909 bool call_it = (*callback)->callback &&
1910 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1911 timeval_compare(&now, &(*callback)->when) >= 0);
1912
1913 if (call_it)
1914 target_call_timer_callback(*callback, &now);
1915
1916 callback = &(*callback)->next;
1917 }
1918
1919 callback_processing = false;
1920 return ERROR_OK;
1921 }
1922
1923 int target_call_timer_callbacks(void)
1924 {
1925 return target_call_timer_callbacks_check_time(1);
1926 }
1927
1928 /* invoke periodic callbacks immediately */
1929 int target_call_timer_callbacks_now(void)
1930 {
1931 return target_call_timer_callbacks_check_time(0);
1932 }
1933
1934 /* Prints the working area layout for debug purposes */
1935 static void print_wa_layout(struct target *target)
1936 {
1937 struct working_area *c = target->working_areas;
1938
1939 while (c) {
1940 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1941 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1942 c->address, c->address + c->size - 1, c->size);
1943 c = c->next;
1944 }
1945 }
1946
1947 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1948 static void target_split_working_area(struct working_area *area, uint32_t size)
1949 {
1950 assert(area->free); /* Shouldn't split an allocated area */
1951 assert(size <= area->size); /* Caller should guarantee this */
1952
1953 /* Split only if not already the right size */
1954 if (size < area->size) {
1955 struct working_area *new_wa = malloc(sizeof(*new_wa));
1956
1957 if (new_wa == NULL)
1958 return;
1959
1960 new_wa->next = area->next;
1961 new_wa->size = area->size - size;
1962 new_wa->address = area->address + size;
1963 new_wa->backup = NULL;
1964 new_wa->user = NULL;
1965 new_wa->free = true;
1966
1967 area->next = new_wa;
1968 area->size = size;
1969
1970 /* If backup memory was allocated to this area, it has the wrong size
1971 * now so free it and it will be reallocated if/when needed */
1972 free(area->backup);
1973 area->backup = NULL;
1974 }
1975 }
1976
1977 /* Merge all adjacent free areas into one */
1978 static void target_merge_working_areas(struct target *target)
1979 {
1980 struct working_area *c = target->working_areas;
1981
1982 while (c && c->next) {
1983 assert(c->next->address == c->address + c->size); /* This is an invariant */
1984
1985 /* Find two adjacent free areas */
1986 if (c->free && c->next->free) {
1987 /* Merge the last into the first */
1988 c->size += c->next->size;
1989
1990 /* Remove the last */
1991 struct working_area *to_be_freed = c->next;
1992 c->next = c->next->next;
1993 free(to_be_freed->backup);
1994 free(to_be_freed);
1995
1996 /* If backup memory was allocated to the remaining area, it's has
1997 * the wrong size now */
1998 free(c->backup);
1999 c->backup = NULL;
2000 } else {
2001 c = c->next;
2002 }
2003 }
2004 }
2005
2006 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2007 {
2008 /* Reevaluate working area address based on MMU state*/
2009 if (target->working_areas == NULL) {
2010 int retval;
2011 int enabled;
2012
2013 retval = target->type->mmu(target, &enabled);
2014 if (retval != ERROR_OK)
2015 return retval;
2016
2017 if (!enabled) {
2018 if (target->working_area_phys_spec) {
2019 LOG_DEBUG("MMU disabled, using physical "
2020 "address for working memory " TARGET_ADDR_FMT,
2021 target->working_area_phys);
2022 target->working_area = target->working_area_phys;
2023 } else {
2024 LOG_ERROR("No working memory available. "
2025 "Specify -work-area-phys to target.");
2026 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2027 }
2028 } else {
2029 if (target->working_area_virt_spec) {
2030 LOG_DEBUG("MMU enabled, using virtual "
2031 "address for working memory " TARGET_ADDR_FMT,
2032 target->working_area_virt);
2033 target->working_area = target->working_area_virt;
2034 } else {
2035 LOG_ERROR("No working memory available. "
2036 "Specify -work-area-virt to target.");
2037 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2038 }
2039 }
2040
2041 /* Set up initial working area on first call */
2042 struct working_area *new_wa = malloc(sizeof(*new_wa));
2043 if (new_wa) {
2044 new_wa->next = NULL;
2045 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2046 new_wa->address = target->working_area;
2047 new_wa->backup = NULL;
2048 new_wa->user = NULL;
2049 new_wa->free = true;
2050 }
2051
2052 target->working_areas = new_wa;
2053 }
2054
2055 /* only allocate multiples of 4 byte */
2056 if (size % 4)
2057 size = (size + 3) & (~3UL);
2058
2059 struct working_area *c = target->working_areas;
2060
2061 /* Find the first large enough working area */
2062 while (c) {
2063 if (c->free && c->size >= size)
2064 break;
2065 c = c->next;
2066 }
2067
2068 if (c == NULL)
2069 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2070
2071 /* Split the working area into the requested size */
2072 target_split_working_area(c, size);
2073
2074 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2075 size, c->address);
2076
2077 if (target->backup_working_area) {
2078 if (c->backup == NULL) {
2079 c->backup = malloc(c->size);
2080 if (c->backup == NULL)
2081 return ERROR_FAIL;
2082 }
2083
2084 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2085 if (retval != ERROR_OK)
2086 return retval;
2087 }
2088
2089 /* mark as used, and return the new (reused) area */
2090 c->free = false;
2091 *area = c;
2092
2093 /* user pointer */
2094 c->user = area;
2095
2096 print_wa_layout(target);
2097
2098 return ERROR_OK;
2099 }
2100
2101 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2102 {
2103 int retval;
2104
2105 retval = target_alloc_working_area_try(target, size, area);
2106 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2107 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2108 return retval;
2109
2110 }
2111
2112 static int target_restore_working_area(struct target *target, struct working_area *area)
2113 {
2114 int retval = ERROR_OK;
2115
2116 if (target->backup_working_area && area->backup != NULL) {
2117 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2118 if (retval != ERROR_OK)
2119 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2120 area->size, area->address);
2121 }
2122
2123 return retval;
2124 }
2125
2126 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2127 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2128 {
2129 int retval = ERROR_OK;
2130
2131 if (area->free)
2132 return retval;
2133
2134 if (restore) {
2135 retval = target_restore_working_area(target, area);
2136 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2137 if (retval != ERROR_OK)
2138 return retval;
2139 }
2140
2141 area->free = true;
2142
2143 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2144 area->size, area->address);
2145
2146 /* mark user pointer invalid */
2147 /* TODO: Is this really safe? It points to some previous caller's memory.
2148 * How could we know that the area pointer is still in that place and not
2149 * some other vital data? What's the purpose of this, anyway? */
2150 *area->user = NULL;
2151 area->user = NULL;
2152
2153 target_merge_working_areas(target);
2154
2155 print_wa_layout(target);
2156
2157 return retval;
2158 }
2159
2160 int target_free_working_area(struct target *target, struct working_area *area)
2161 {
2162 return target_free_working_area_restore(target, area, 1);
2163 }
2164
2165 /* free resources and restore memory, if restoring memory fails,
2166 * free up resources anyway
2167 */
2168 static void target_free_all_working_areas_restore(struct target *target, int restore)
2169 {
2170 struct working_area *c = target->working_areas;
2171
2172 LOG_DEBUG("freeing all working areas");
2173
2174 /* Loop through all areas, restoring the allocated ones and marking them as free */
2175 while (c) {
2176 if (!c->free) {
2177 if (restore)
2178 target_restore_working_area(target, c);
2179 c->free = true;
2180 *c->user = NULL; /* Same as above */
2181 c->user = NULL;
2182 }
2183 c = c->next;
2184 }
2185
2186 /* Run a merge pass to combine all areas into one */
2187 target_merge_working_areas(target);
2188
2189 print_wa_layout(target);
2190 }
2191
2192 void target_free_all_working_areas(struct target *target)
2193 {
2194 target_free_all_working_areas_restore(target, 1);
2195
2196 /* Now we have none or only one working area marked as free */
2197 if (target->working_areas) {
2198 /* Free the last one to allow on-the-fly moving and resizing */
2199 free(target->working_areas->backup);
2200 free(target->working_areas);
2201 target->working_areas = NULL;
2202 }
2203 }
2204
2205 /* Find the largest number of bytes that can be allocated */
2206 uint32_t target_get_working_area_avail(struct target *target)
2207 {
2208 struct working_area *c = target->working_areas;
2209 uint32_t max_size = 0;
2210
2211 if (c == NULL)
2212 return target->working_area_size;
2213
2214 while (c) {
2215 if (c->free && max_size < c->size)
2216 max_size = c->size;
2217
2218 c = c->next;
2219 }
2220
2221 return max_size;
2222 }
2223
2224 static void target_destroy(struct target *target)
2225 {
2226 if (target->type->deinit_target)
2227 target->type->deinit_target(target);
2228
2229 free(target->semihosting);
2230
2231 jtag_unregister_event_callback(jtag_enable_callback, target);
2232
2233 struct target_event_action *teap = target->event_action;
2234 while (teap) {
2235 struct target_event_action *next = teap->next;
2236 Jim_DecrRefCount(teap->interp, teap->body);
2237 free(teap);
2238 teap = next;
2239 }
2240
2241 target_free_all_working_areas(target);
2242
2243 /* release the targets SMP list */
2244 if (target->smp) {
2245 struct target_list *head = target->head;
2246 while (head != NULL) {
2247 struct target_list *pos = head->next;
2248 head->target->smp = 0;
2249 free(head);
2250 head = pos;
2251 }
2252 target->smp = 0;
2253 }
2254
2255 rtos_destroy(target);
2256
2257 free(target->gdb_port_override);
2258 free(target->type);
2259 free(target->trace_info);
2260 free(target->fileio_info);
2261 free(target->cmd_name);
2262 free(target);
2263 }
2264
2265 void target_quit(void)
2266 {
2267 struct target_event_callback *pe = target_event_callbacks;
2268 while (pe) {
2269 struct target_event_callback *t = pe->next;
2270 free(pe);
2271 pe = t;
2272 }
2273 target_event_callbacks = NULL;
2274
2275 struct target_timer_callback *pt = target_timer_callbacks;
2276 while (pt) {
2277 struct target_timer_callback *t = pt->next;
2278 free(pt);
2279 pt = t;
2280 }
2281 target_timer_callbacks = NULL;
2282
2283 for (struct target *target = all_targets; target;) {
2284 struct target *tmp;
2285
2286 tmp = target->next;
2287 target_destroy(target);
2288 target = tmp;
2289 }
2290
2291 all_targets = NULL;
2292 }
2293
2294 int target_arch_state(struct target *target)
2295 {
2296 int retval;
2297 if (target == NULL) {
2298 LOG_WARNING("No target has been configured");
2299 return ERROR_OK;
2300 }
2301
2302 if (target->state != TARGET_HALTED)
2303 return ERROR_OK;
2304
2305 retval = target->type->arch_state(target);
2306 return retval;
2307 }
2308
2309 static int target_get_gdb_fileio_info_default(struct target *target,
2310 struct gdb_fileio_info *fileio_info)
2311 {
2312 /* If target does not support semi-hosting function, target
2313 has no need to provide .get_gdb_fileio_info callback.
2314 It just return ERROR_FAIL and gdb_server will return "Txx"
2315 as target halted every time. */
2316 return ERROR_FAIL;
2317 }
2318
2319 static int target_gdb_fileio_end_default(struct target *target,
2320 int retcode, int fileio_errno, bool ctrl_c)
2321 {
2322 return ERROR_OK;
2323 }
2324
2325 int target_profiling_default(struct target *target, uint32_t *samples,
2326 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2327 {
2328 struct timeval timeout, now;
2329
2330 gettimeofday(&timeout, NULL);
2331 timeval_add_time(&timeout, seconds, 0);
2332
2333 LOG_INFO("Starting profiling. Halting and resuming the"
2334 " target as often as we can...");
2335
2336 uint32_t sample_count = 0;
2337 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2338 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
2339
2340 int retval = ERROR_OK;
2341 for (;;) {
2342 target_poll(target);
2343 if (target->state == TARGET_HALTED) {
2344 uint32_t t = buf_get_u32(reg->value, 0, 32);
2345 samples[sample_count++] = t;
2346 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2347 retval = target_resume(target, 1, 0, 0, 0);
2348 target_poll(target);
2349 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2350 } else if (target->state == TARGET_RUNNING) {
2351 /* We want to quickly sample the PC. */
2352 retval = target_halt(target);
2353 } else {
2354 LOG_INFO("Target not halted or running");
2355 retval = ERROR_OK;
2356 break;
2357 }
2358
2359 if (retval != ERROR_OK)
2360 break;
2361
2362 gettimeofday(&now, NULL);
2363 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2364 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2365 break;
2366 }
2367 }
2368
2369 *num_samples = sample_count;
2370 return retval;
2371 }
2372
2373 /* Single aligned words are guaranteed to use 16 or 32 bit access
2374 * mode respectively, otherwise data is handled as quickly as
2375 * possible
2376 */
2377 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2378 {
2379 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2380 size, address);
2381
2382 if (!target_was_examined(target)) {
2383 LOG_ERROR("Target not examined yet");
2384 return ERROR_FAIL;
2385 }
2386
2387 if (size == 0)
2388 return ERROR_OK;
2389
2390 if ((address + size - 1) < address) {
2391 /* GDB can request this when e.g. PC is 0xfffffffc */
2392 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2393 address,
2394 size);
2395 return ERROR_FAIL;
2396 }
2397
2398 return target->type->write_buffer(target, address, size, buffer);
2399 }
2400
2401 static int target_write_buffer_default(struct target *target,
2402 target_addr_t address, uint32_t count, const uint8_t *buffer)
2403 {
2404 uint32_t size;
2405
2406 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2407 * will have something to do with the size we leave to it. */
2408 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2409 if (address & size) {
2410 int retval = target_write_memory(target, address, size, 1, buffer);
2411 if (retval != ERROR_OK)
2412 return retval;
2413 address += size;
2414 count -= size;
2415 buffer += size;
2416 }
2417 }
2418
2419 /* Write the data with as large access size as possible. */
2420 for (; size > 0; size /= 2) {
2421 uint32_t aligned = count - count % size;
2422 if (aligned > 0) {
2423 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2424 if (retval != ERROR_OK)
2425 return retval;
2426 address += aligned;
2427 count -= aligned;
2428 buffer += aligned;
2429 }
2430 }
2431
2432 return ERROR_OK;
2433 }
2434
2435 /* Single aligned words are guaranteed to use 16 or 32 bit access
2436 * mode respectively, otherwise data is handled as quickly as
2437 * possible
2438 */
2439 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2440 {
2441 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2442 size, address);
2443
2444 if (!target_was_examined(target)) {
2445 LOG_ERROR("Target not examined yet");
2446 return ERROR_FAIL;
2447 }
2448
2449 if (size == 0)
2450 return ERROR_OK;
2451
2452 if ((address + size - 1) < address) {
2453 /* GDB can request this when e.g. PC is 0xfffffffc */
2454 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2455 address,
2456 size);
2457 return ERROR_FAIL;
2458 }
2459
2460 return target->type->read_buffer(target, address, size, buffer);
2461 }
2462
2463 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2464 {
2465 uint32_t size;
2466
2467 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2468 * will have something to do with the size we leave to it. */
2469 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2470 if (address & size) {
2471 int retval = target_read_memory(target, address, size, 1, buffer);
2472 if (retval != ERROR_OK)
2473 return retval;
2474 address += size;
2475 count -= size;
2476 buffer += size;
2477 }
2478 }
2479
2480 /* Read the data with as large access size as possible. */
2481 for (; size > 0; size /= 2) {
2482 uint32_t aligned = count - count % size;
2483 if (aligned > 0) {
2484 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2485 if (retval != ERROR_OK)
2486 return retval;
2487 address += aligned;
2488 count -= aligned;
2489 buffer += aligned;
2490 }
2491 }
2492
2493 return ERROR_OK;
2494 }
2495
2496 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2497 {
2498 uint8_t *buffer;
2499 int retval;
2500 uint32_t i;
2501 uint32_t checksum = 0;
2502 if (!target_was_examined(target)) {
2503 LOG_ERROR("Target not examined yet");
2504 return ERROR_FAIL;
2505 }
2506
2507 retval = target->type->checksum_memory(target, address, size, &checksum);
2508 if (retval != ERROR_OK) {
2509 buffer = malloc(size);
2510 if (buffer == NULL) {
2511 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2512 return ERROR_COMMAND_SYNTAX_ERROR;
2513 }
2514 retval = target_read_buffer(target, address, size, buffer);
2515 if (retval != ERROR_OK) {
2516 free(buffer);
2517 return retval;
2518 }
2519
2520 /* convert to target endianness */
2521 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2522 uint32_t target_data;
2523 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2524 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2525 }
2526
2527 retval = image_calculate_checksum(buffer, size, &checksum);
2528 free(buffer);
2529 }
2530
2531 *crc = checksum;
2532
2533 return retval;
2534 }
2535
2536 int target_blank_check_memory(struct target *target,
2537 struct target_memory_check_block *blocks, int num_blocks,
2538 uint8_t erased_value)
2539 {
2540 if (!target_was_examined(target)) {
2541 LOG_ERROR("Target not examined yet");
2542 return ERROR_FAIL;
2543 }
2544
2545 if (target->type->blank_check_memory == NULL)
2546 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2547
2548 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2549 }
2550
2551 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2552 {
2553 uint8_t value_buf[8];
2554 if (!target_was_examined(target)) {
2555 LOG_ERROR("Target not examined yet");
2556 return ERROR_FAIL;
2557 }
2558
2559 int retval = target_read_memory(target, address, 8, 1, value_buf);
2560
2561 if (retval == ERROR_OK) {
2562 *value = target_buffer_get_u64(target, value_buf);
2563 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2564 address,
2565 *value);
2566 } else {
2567 *value = 0x0;
2568 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2569 address);
2570 }
2571
2572 return retval;
2573 }
2574
2575 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2576 {
2577 uint8_t value_buf[4];
2578 if (!target_was_examined(target)) {
2579 LOG_ERROR("Target not examined yet");
2580 return ERROR_FAIL;
2581 }
2582
2583 int retval = target_read_memory(target, address, 4, 1, value_buf);
2584
2585 if (retval == ERROR_OK) {
2586 *value = target_buffer_get_u32(target, value_buf);
2587 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2588 address,
2589 *value);
2590 } else {
2591 *value = 0x0;
2592 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2593 address);
2594 }
2595
2596 return retval;
2597 }
2598
2599 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2600 {
2601 uint8_t value_buf[2];
2602 if (!target_was_examined(target)) {
2603 LOG_ERROR("Target not examined yet");
2604 return ERROR_FAIL;
2605 }
2606
2607 int retval = target_read_memory(target, address, 2, 1, value_buf);
2608
2609 if (retval == ERROR_OK) {
2610 *value = target_buffer_get_u16(target, value_buf);
2611 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2612 address,
2613 *value);
2614 } else {
2615 *value = 0x0;
2616 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2617 address);
2618 }
2619
2620 return retval;
2621 }
2622
2623 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2624 {
2625 if (!target_was_examined(target)) {
2626 LOG_ERROR("Target not examined yet");
2627 return ERROR_FAIL;
2628 }
2629
2630 int retval = target_read_memory(target, address, 1, 1, value);
2631
2632 if (retval == ERROR_OK) {
2633 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2634 address,
2635 *value);
2636 } else {
2637 *value = 0x0;
2638 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2639 address);
2640 }
2641
2642 return retval;
2643 }
2644
2645 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2646 {
2647 int retval;
2648 uint8_t value_buf[8];
2649 if (!target_was_examined(target)) {
2650 LOG_ERROR("Target not examined yet");
2651 return ERROR_FAIL;
2652 }
2653
2654 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2655 address,
2656 value);
2657
2658 target_buffer_set_u64(target, value_buf, value);
2659 retval = target_write_memory(target, address, 8, 1, value_buf);
2660 if (retval != ERROR_OK)
2661 LOG_DEBUG("failed: %i", retval);
2662
2663 return retval;
2664 }
2665
2666 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2667 {
2668 int retval;
2669 uint8_t value_buf[4];
2670 if (!target_was_examined(target)) {
2671 LOG_ERROR("Target not examined yet");
2672 return ERROR_FAIL;
2673 }
2674
2675 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2676 address,
2677 value);
2678
2679 target_buffer_set_u32(target, value_buf, value);
2680 retval = target_write_memory(target, address, 4, 1, value_buf);
2681 if (retval != ERROR_OK)
2682 LOG_DEBUG("failed: %i", retval);
2683
2684 return retval;
2685 }
2686
2687 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2688 {
2689 int retval;
2690 uint8_t value_buf[2];
2691 if (!target_was_examined(target)) {
2692 LOG_ERROR("Target not examined yet");
2693 return ERROR_FAIL;
2694 }
2695
2696 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2697 address,
2698 value);
2699
2700 target_buffer_set_u16(target, value_buf, value);
2701 retval = target_write_memory(target, address, 2, 1, value_buf);
2702 if (retval != ERROR_OK)
2703 LOG_DEBUG("failed: %i", retval);
2704
2705 return retval;
2706 }
2707
2708 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2709 {
2710 int retval;
2711 if (!target_was_examined(target)) {
2712 LOG_ERROR("Target not examined yet");
2713 return ERROR_FAIL;
2714 }
2715
2716 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2717 address, value);
2718
2719 retval = target_write_memory(target, address, 1, 1, &value);
2720 if (retval != ERROR_OK)
2721 LOG_DEBUG("failed: %i", retval);
2722
2723 return retval;
2724 }
2725
2726 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2727 {
2728 int retval;
2729 uint8_t value_buf[8];
2730 if (!target_was_examined(target)) {
2731 LOG_ERROR("Target not examined yet");
2732 return ERROR_FAIL;
2733 }
2734
2735 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2736 address,
2737 value);
2738
2739 target_buffer_set_u64(target, value_buf, value);
2740 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2741 if (retval != ERROR_OK)
2742 LOG_DEBUG("failed: %i", retval);
2743
2744 return retval;
2745 }
2746
2747 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2748 {
2749 int retval;
2750 uint8_t value_buf[4];
2751 if (!target_was_examined(target)) {
2752 LOG_ERROR("Target not examined yet");
2753 return ERROR_FAIL;
2754 }
2755
2756 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2757 address,
2758 value);
2759
2760 target_buffer_set_u32(target, value_buf, value);
2761 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2762 if (retval != ERROR_OK)
2763 LOG_DEBUG("failed: %i", retval);
2764
2765 return retval;
2766 }
2767
2768 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2769 {
2770 int retval;
2771 uint8_t value_buf[2];
2772 if (!target_was_examined(target)) {
2773 LOG_ERROR("Target not examined yet");
2774 return ERROR_FAIL;
2775 }
2776
2777 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2778 address,
2779 value);
2780
2781 target_buffer_set_u16(target, value_buf, value);
2782 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2783 if (retval != ERROR_OK)
2784 LOG_DEBUG("failed: %i", retval);
2785
2786 return retval;
2787 }
2788
2789 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2790 {
2791 int retval;
2792 if (!target_was_examined(target)) {
2793 LOG_ERROR("Target not examined yet");
2794 return ERROR_FAIL;
2795 }
2796
2797 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2798 address, value);
2799
2800 retval = target_write_phys_memory(target, address, 1, 1, &value);
2801 if (retval != ERROR_OK)
2802 LOG_DEBUG("failed: %i", retval);
2803
2804 return retval;
2805 }
2806
2807 static int find_target(struct command_invocation *cmd, const char *name)
2808 {
2809 struct target *target = get_target(name);
2810 if (target == NULL) {
2811 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2812 return ERROR_FAIL;
2813 }
2814 if (!target->tap->enabled) {
2815 command_print(cmd, "Target: TAP %s is disabled, "
2816 "can't be the current target\n",
2817 target->tap->dotted_name);
2818 return ERROR_FAIL;
2819 }
2820
2821 cmd->ctx->current_target = target;
2822 if (cmd->ctx->current_target_override)
2823 cmd->ctx->current_target_override = target;
2824
2825 return ERROR_OK;
2826 }
2827
2828
2829 COMMAND_HANDLER(handle_targets_command)
2830 {
2831 int retval = ERROR_OK;
2832 if (CMD_ARGC == 1) {
2833 retval = find_target(CMD, CMD_ARGV[0]);
2834 if (retval == ERROR_OK) {
2835 /* we're done! */
2836 return retval;
2837 }
2838 }
2839
2840 struct target *target = all_targets;
2841 command_print(CMD, " TargetName Type Endian TapName State ");
2842 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2843 while (target) {
2844 const char *state;
2845 char marker = ' ';
2846
2847 if (target->tap->enabled)
2848 state = target_state_name(target);
2849 else
2850 state = "tap-disabled";
2851
2852 if (CMD_CTX->current_target == target)
2853 marker = '*';
2854
2855 /* keep columns lined up to match the headers above */
2856 command_print(CMD,
2857 "%2d%c %-18s %-10s %-6s %-18s %s",
2858 target->target_number,
2859 marker,
2860 target_name(target),
2861 target_type_name(target),
2862 Jim_Nvp_value2name_simple(nvp_target_endian,
2863 target->endianness)->name,
2864 target->tap->dotted_name,
2865 state);
2866 target = target->next;
2867 }
2868
2869 return retval;
2870 }
2871
2872 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2873
2874 static int powerDropout;
2875 static int srstAsserted;
2876
2877 static int runPowerRestore;
2878 static int runPowerDropout;
2879 static int runSrstAsserted;
2880 static int runSrstDeasserted;
2881
2882 static int sense_handler(void)
2883 {
2884 static int prevSrstAsserted;
2885 static int prevPowerdropout;
2886
2887 int retval = jtag_power_dropout(&powerDropout);
2888 if (retval != ERROR_OK)
2889 return retval;
2890
2891 int powerRestored;
2892 powerRestored = prevPowerdropout && !powerDropout;
2893 if (powerRestored)
2894 runPowerRestore = 1;
2895
2896 int64_t current = timeval_ms();
2897 static int64_t lastPower;
2898 bool waitMore = lastPower + 2000 > current;
2899 if (powerDropout && !waitMore) {
2900 runPowerDropout = 1;
2901 lastPower = current;
2902 }
2903
2904 retval = jtag_srst_asserted(&srstAsserted);
2905 if (retval != ERROR_OK)
2906 return retval;
2907
2908 int srstDeasserted;
2909 srstDeasserted = prevSrstAsserted && !srstAsserted;
2910
2911 static int64_t lastSrst;
2912 waitMore = lastSrst + 2000 > current;
2913 if (srstDeasserted && !waitMore) {
2914 runSrstDeasserted = 1;
2915 lastSrst = current;
2916 }
2917
2918 if (!prevSrstAsserted && srstAsserted)
2919 runSrstAsserted = 1;
2920
2921 prevSrstAsserted = srstAsserted;
2922 prevPowerdropout = powerDropout;
2923
2924 if (srstDeasserted || powerRestored) {
2925 /* Other than logging the event we can't do anything here.
2926 * Issuing a reset is a particularly bad idea as we might
2927 * be inside a reset already.
2928 */
2929 }
2930
2931 return ERROR_OK;
2932 }
2933
2934 /* process target state changes */
2935 static int handle_target(void *priv)
2936 {
2937 Jim_Interp *interp = (Jim_Interp *)priv;
2938 int retval = ERROR_OK;
2939
2940 if (!is_jtag_poll_safe()) {
2941 /* polling is disabled currently */
2942 return ERROR_OK;
2943 }
2944
2945 /* we do not want to recurse here... */
2946 static int recursive;
2947 if (!recursive) {
2948 recursive = 1;
2949 sense_handler();
2950 /* danger! running these procedures can trigger srst assertions and power dropouts.
2951 * We need to avoid an infinite loop/recursion here and we do that by
2952 * clearing the flags after running these events.
2953 */
2954 int did_something = 0;
2955 if (runSrstAsserted) {
2956 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2957 Jim_Eval(interp, "srst_asserted");
2958 did_something = 1;
2959 }
2960 if (runSrstDeasserted) {
2961 Jim_Eval(interp, "srst_deasserted");
2962 did_something = 1;
2963 }
2964 if (runPowerDropout) {
2965 LOG_INFO("Power dropout detected, running power_dropout proc.");
2966 Jim_Eval(interp, "power_dropout");
2967 did_something = 1;
2968 }
2969 if (runPowerRestore) {
2970 Jim_Eval(interp, "power_restore");
2971 did_something = 1;
2972 }
2973
2974 if (did_something) {
2975 /* clear detect flags */
2976 sense_handler();
2977 }
2978
2979 /* clear action flags */
2980
2981 runSrstAsserted = 0;
2982 runSrstDeasserted = 0;
2983 runPowerRestore = 0;
2984 runPowerDropout = 0;
2985
2986 recursive = 0;
2987 }
2988
2989 /* Poll targets for state changes unless that's globally disabled.
2990 * Skip targets that are currently disabled.
2991 */
2992 for (struct target *target = all_targets;
2993 is_jtag_poll_safe() && target;
2994 target = target->next) {
2995
2996 if (!target_was_examined(target))
2997 continue;
2998
2999 if (!target->tap->enabled)
3000 continue;
3001
3002 if (target->backoff.times > target->backoff.count) {
3003 /* do not poll this time as we failed previously */
3004 target->backoff.count++;
3005 continue;
3006 }
3007 target->backoff.count = 0;
3008
3009 /* only poll target if we've got power and srst isn't asserted */
3010 if (!powerDropout && !srstAsserted) {
3011 /* polling may fail silently until the target has been examined */
3012 retval = target_poll(target);
3013 if (retval != ERROR_OK) {
3014 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3015 if (target->backoff.times * polling_interval < 5000) {
3016 target->backoff.times *= 2;
3017 target->backoff.times++;
3018 }
3019
3020 /* Tell GDB to halt the debugger. This allows the user to
3021 * run monitor commands to handle the situation.
3022 */
3023 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3024 }
3025 if (target->backoff.times > 0) {
3026 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3027 target_reset_examined(target);
3028 retval = target_examine_one(target);
3029 /* Target examination could have failed due to unstable connection,
3030 * but we set the examined flag anyway to repoll it later */
3031 if (retval != ERROR_OK) {
3032 target->examined = true;
3033 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3034 target->backoff.times * polling_interval);
3035 return retval;
3036 }
3037 }
3038
3039 /* Since we succeeded, we reset backoff count */
3040 target->backoff.times = 0;
3041 }
3042 }
3043
3044 return retval;
3045 }
3046
3047 COMMAND_HANDLER(handle_reg_command)
3048 {
3049 struct target *target;
3050 struct reg *reg = NULL;
3051 unsigned count = 0;
3052 char *value;
3053
3054 LOG_DEBUG("-");
3055
3056 target = get_current_target(CMD_CTX);
3057
3058 /* list all available registers for the current target */
3059 if (CMD_ARGC == 0) {
3060 struct reg_cache *cache = target->reg_cache;
3061
3062 count = 0;
3063 while (cache) {
3064 unsigned i;
3065
3066 command_print(CMD, "===== %s", cache->name);
3067
3068 for (i = 0, reg = cache->reg_list;
3069 i < cache->num_regs;
3070 i++, reg++, count++) {
3071 if (reg->exist == false || reg->hidden)
3072 continue;
3073 /* only print cached values if they are valid */
3074 if (reg->valid) {
3075 value = buf_to_hex_str(reg->value,
3076 reg->size);
3077 command_print(CMD,
3078 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3079 count, reg->name,
3080 reg->size, value,
3081 reg->dirty
3082 ? " (dirty)"
3083 : "");
3084 free(value);
3085 } else {
3086 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3087 count, reg->name,
3088 reg->size);
3089 }
3090 }
3091 cache = cache->next;
3092 }
3093
3094 return ERROR_OK;
3095 }
3096
3097 /* access a single register by its ordinal number */
3098 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3099 unsigned num;
3100 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3101
3102 struct reg_cache *cache = target->reg_cache;
3103 count = 0;
3104 while (cache) {
3105 unsigned i;
3106 for (i = 0; i < cache->num_regs; i++) {
3107 if (count++ == num) {
3108 reg = &cache->reg_list[i];
3109 break;
3110 }
3111 }
3112 if (reg)
3113 break;
3114 cache = cache->next;
3115 }
3116
3117 if (!reg) {
3118 command_print(CMD, "%i is out of bounds, the current target "
3119 "has only %i registers (0 - %i)", num, count, count - 1);
3120 return ERROR_OK;
3121 }
3122 } else {
3123 /* access a single register by its name */
3124 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
3125
3126 if (!reg)
3127 goto not_found;
3128 }
3129
3130 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
3131
3132 if (!reg->exist)
3133 goto not_found;
3134
3135 /* display a register */
3136 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3137 && (CMD_ARGV[1][0] <= '9')))) {
3138 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3139 reg->valid = 0;
3140
3141 if (reg->valid == 0)
3142 reg->type->get(reg);
3143 value = buf_to_hex_str(reg->value, reg->size);
3144 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3145 free(value);
3146 return ERROR_OK;
3147 }
3148
3149 /* set register value */
3150 if (CMD_ARGC == 2) {
3151 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3152 if (buf == NULL)
3153 return ERROR_FAIL;
3154 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3155
3156 reg->type->set(reg, buf);
3157
3158 value = buf_to_hex_str(reg->value, reg->size);
3159 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3160 free(value);
3161
3162 free(buf);
3163
3164 return ERROR_OK;
3165 }
3166
3167 return ERROR_COMMAND_SYNTAX_ERROR;
3168
3169 not_found:
3170 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3171 return ERROR_OK;
3172 }
3173
3174 COMMAND_HANDLER(handle_poll_command)
3175 {
3176 int retval = ERROR_OK;
3177 struct target *target = get_current_target(CMD_CTX);
3178
3179 if (CMD_ARGC == 0) {
3180 command_print(CMD, "background polling: %s",
3181 jtag_poll_get_enabled() ? "on" : "off");
3182 command_print(CMD, "TAP: %s (%s)",
3183 target->tap->dotted_name,
3184 target->tap->enabled ? "enabled" : "disabled");
3185 if (!target->tap->enabled)
3186 return ERROR_OK;
3187 retval = target_poll(target);
3188 if (retval != ERROR_OK)
3189 return retval;
3190 retval = target_arch_state(target);
3191 if (retval != ERROR_OK)
3192 return retval;
3193 } else if (CMD_ARGC == 1) {
3194 bool enable;
3195 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3196 jtag_poll_set_enabled(enable);
3197 } else
3198 return ERROR_COMMAND_SYNTAX_ERROR;
3199
3200 return retval;
3201 }
3202
3203 COMMAND_HANDLER(handle_wait_halt_command)
3204 {
3205 if (CMD_ARGC > 1)
3206 return ERROR_COMMAND_SYNTAX_ERROR;
3207
3208 unsigned ms = DEFAULT_HALT_TIMEOUT;
3209 if (1 == CMD_ARGC) {
3210 int retval = parse_uint(CMD_ARGV[0], &ms);
3211 if (ERROR_OK != retval)
3212 return ERROR_COMMAND_SYNTAX_ERROR;
3213 }
3214
3215 struct target *target = get_current_target(CMD_CTX);
3216 return target_wait_state(target, TARGET_HALTED, ms);
3217 }
3218
3219 /* wait for target state to change. The trick here is to have a low
3220 * latency for short waits and not to suck up all the CPU time
3221 * on longer waits.
3222 *
3223 * After 500ms, keep_alive() is invoked
3224 */
3225 int target_wait_state(struct target *target, enum target_state state, int ms)
3226 {
3227 int retval;
3228 int64_t then = 0, cur;
3229 bool once = true;
3230
3231 for (;;) {
3232 retval = target_poll(target);
3233 if (retval != ERROR_OK)
3234 return retval;
3235 if (target->state == state)
3236 break;
3237 cur = timeval_ms();
3238 if (once) {
3239 once = false;
3240 then = timeval_ms();
3241 LOG_DEBUG("waiting for target %s...",
3242 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
3243 }
3244
3245 if (cur-then > 500)
3246 keep_alive();
3247
3248 if ((cur-then) > ms) {
3249 LOG_ERROR("timed out while waiting for target %s",
3250 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
3251 return ERROR_FAIL;
3252 }
3253 }
3254
3255 return ERROR_OK;
3256 }
3257
3258 COMMAND_HANDLER(handle_halt_command)
3259 {
3260 LOG_DEBUG("-");
3261
3262 struct target *target = get_current_target(CMD_CTX);
3263
3264 target->verbose_halt_msg = true;
3265
3266 int retval = target_halt(target);
3267 if (ERROR_OK != retval)
3268 return retval;
3269
3270 if (CMD_ARGC == 1) {
3271 unsigned wait_local;
3272 retval = parse_uint(CMD_ARGV[0], &wait_local);
3273 if (ERROR_OK != retval)
3274 return ERROR_COMMAND_SYNTAX_ERROR;
3275 if (!wait_local)
3276 return ERROR_OK;
3277 }
3278
3279 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3280 }
3281
3282 COMMAND_HANDLER(handle_soft_reset_halt_command)
3283 {
3284 struct target *target = get_current_target(CMD_CTX);
3285
3286 LOG_USER("requesting target halt and executing a soft reset");
3287
3288 target_soft_reset_halt(target);
3289
3290 return ERROR_OK;
3291 }
3292
3293 COMMAND_HANDLER(handle_reset_command)
3294 {
3295 if (CMD_ARGC > 1)
3296 return ERROR_COMMAND_SYNTAX_ERROR;
3297
3298 enum target_reset_mode reset_mode = RESET_RUN;
3299 if (CMD_ARGC == 1) {
3300 const Jim_Nvp *n;
3301 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3302 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
3303 return ERROR_COMMAND_SYNTAX_ERROR;
3304 reset_mode = n->value;
3305 }
3306
3307 /* reset *all* targets */
3308 return target_process_reset(CMD, reset_mode);
3309 }
3310
3311
3312 COMMAND_HANDLER(handle_resume_command)
3313 {
3314 int current = 1;
3315 if (CMD_ARGC > 1)
3316 return ERROR_COMMAND_SYNTAX_ERROR;
3317
3318 struct target *target = get_current_target(CMD_CTX);
3319
3320 /* with no CMD_ARGV, resume from current pc, addr = 0,
3321 * with one arguments, addr = CMD_ARGV[0],
3322 * handle breakpoints, not debugging */
3323 target_addr_t addr = 0;
3324 if (CMD_ARGC == 1) {
3325 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3326 current = 0;
3327 }
3328
3329 return target_resume(target, current, addr, 1, 0);
3330 }
3331
3332 COMMAND_HANDLER(handle_step_command)
3333 {
3334 if (CMD_ARGC > 1)
3335 return ERROR_COMMAND_SYNTAX_ERROR;
3336
3337 LOG_DEBUG("-");
3338
3339 /* with no CMD_ARGV, step from current pc, addr = 0,
3340 * with one argument addr = CMD_ARGV[0],
3341 * handle breakpoints, debugging */
3342 target_addr_t addr = 0;
3343 int current_pc = 1;
3344 if (CMD_ARGC == 1) {
3345 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3346 current_pc = 0;
3347 }
3348
3349 struct target *target = get_current_target(CMD_CTX);
3350
3351 return target_step(target, current_pc, addr, 1);
3352 }
3353
3354 void target_handle_md_output(struct command_invocation *cmd,
3355 struct target *target, target_addr_t address, unsigned size,
3356 unsigned count, const uint8_t *buffer)
3357 {
3358 const unsigned line_bytecnt = 32;
3359 unsigned line_modulo = line_bytecnt / size;
3360
3361 char output[line_bytecnt * 4 + 1];
3362 unsigned output_len = 0;
3363
3364 const char *value_fmt;
3365 switch (size) {
3366 case 8:
3367 value_fmt = "%16.16"PRIx64" ";
3368 break;
3369 case 4:
3370 value_fmt = "%8.8"PRIx64" ";
3371 break;
3372 case 2:
3373 value_fmt = "%4.4"PRIx64" ";
3374 break;
3375 case 1:
3376 value_fmt = "%2.2"PRIx64" ";
3377 break;
3378 default:
3379 /* "can't happen", caller checked */
3380 LOG_ERROR("invalid memory read size: %u", size);
3381 return;
3382 }
3383
3384 for (unsigned i = 0; i < count; i++) {
3385 if (i % line_modulo == 0) {
3386 output_len += snprintf(output + output_len,
3387 sizeof(output) - output_len,
3388 TARGET_ADDR_FMT ": ",
3389 (address + (i * size)));
3390 }
3391
3392 uint64_t value = 0;
3393 const uint8_t *value_ptr = buffer + i * size;
3394 switch (size) {
3395 case 8:
3396 value = target_buffer_get_u64(target, value_ptr);
3397 break;
3398 case 4:
3399 value = target_buffer_get_u32(target, value_ptr);
3400 break;
3401 case 2:
3402 value = target_buffer_get_u16(target, value_ptr);
3403 break;
3404 case 1:
3405 value = *value_ptr;
3406 }
3407 output_len += snprintf(output + output_len,
3408 sizeof(output) - output_len,
3409 value_fmt, value);
3410
3411 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3412 command_print(cmd, "%s", output);
3413 output_len = 0;
3414 }
3415 }
3416 }
3417
3418 COMMAND_HANDLER(handle_md_command)
3419 {
3420 if (CMD_ARGC < 1)
3421 return ERROR_COMMAND_SYNTAX_ERROR;
3422
3423 unsigned size = 0;
3424 switch (CMD_NAME[2]) {
3425 case 'd':
3426 size = 8;
3427 break;
3428 case 'w':
3429 size = 4;
3430 break;
3431 case 'h':
3432 size = 2;
3433 break;
3434 case 'b':
3435 size = 1;
3436 break;
3437 default:
3438 return ERROR_COMMAND_SYNTAX_ERROR;
3439 }
3440
3441 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3442 int (*fn)(struct target *target,
3443 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3444 if (physical) {
3445 CMD_ARGC--;
3446 CMD_ARGV++;
3447 fn = target_read_phys_memory;
3448 } else
3449 fn = target_read_memory;
3450 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3451 return ERROR_COMMAND_SYNTAX_ERROR;
3452
3453 target_addr_t address;
3454 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3455
3456 unsigned count = 1;
3457 if (CMD_ARGC == 2)
3458 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3459
3460 uint8_t *buffer = calloc(count, size);
3461 if (buffer == NULL) {
3462 LOG_ERROR("Failed to allocate md read buffer");
3463 return ERROR_FAIL;
3464 }
3465
3466 struct target *target = get_current_target(CMD_CTX);
3467 int retval = fn(target, address, size, count, buffer);
3468 if (ERROR_OK == retval)
3469 target_handle_md_output(CMD, target, address, size, count, buffer);
3470
3471 free(buffer);
3472
3473 return retval;
3474 }
3475
3476 typedef int (*target_write_fn)(struct target *target,
3477 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3478
3479 static int target_fill_mem(struct target *target,
3480 target_addr_t address,
3481 target_write_fn fn,
3482 unsigned data_size,
3483 /* value */
3484 uint64_t b,
3485 /* count */
3486 unsigned c)
3487 {
3488 /* We have to write in reasonably large chunks to be able
3489 * to fill large memory areas with any sane speed */
3490 const unsigned chunk_size = 16384;
3491 uint8_t *target_buf = malloc(chunk_size * data_size);
3492 if (target_buf == NULL) {
3493 LOG_ERROR("Out of memory");
3494 return ERROR_FAIL;
3495 }
3496
3497 for (unsigned i = 0; i < chunk_size; i++) {
3498 switch (data_size) {
3499 case 8:
3500 target_buffer_set_u64(target, target_buf + i * data_size, b);
3501 break;
3502 case 4:
3503 target_buffer_set_u32(target, target_buf + i * data_size, b);
3504 break;
3505 case 2:
3506 target_buffer_set_u16(target, target_buf + i * data_size, b);
3507 break;
3508 case 1:
3509 target_buffer_set_u8(target, target_buf + i * data_size, b);
3510 break;
3511 default:
3512 exit(-1);
3513 }
3514 }
3515
3516 int retval = ERROR_OK;
3517
3518 for (unsigned x = 0; x < c; x += chunk_size) {
3519 unsigned current;
3520 current = c - x;
3521 if (current > chunk_size)
3522 current = chunk_size;
3523 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3524 if (retval != ERROR_OK)
3525 break;
3526 /* avoid GDB timeouts */
3527 keep_alive();
3528 }
3529 free(target_buf);
3530
3531 return retval;
3532 }
3533
3534
3535 COMMAND_HANDLER(handle_mw_command)
3536 {
3537 if (CMD_ARGC < 2)
3538 return ERROR_COMMAND_SYNTAX_ERROR;
3539 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3540 target_write_fn fn;
3541 if (physical) {
3542 CMD_ARGC--;
3543 CMD_ARGV++;
3544 fn = target_write_phys_memory;
3545 } else
3546 fn = target_write_memory;
3547 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3548 return ERROR_COMMAND_SYNTAX_ERROR;
3549
3550 target_addr_t address;
3551 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3552
3553 uint64_t value;
3554 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3555
3556 unsigned count = 1;
3557 if (CMD_ARGC == 3)
3558 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3559
3560 struct target *target = get_current_target(CMD_CTX);
3561 unsigned wordsize;
3562 switch (CMD_NAME[2]) {
3563 case 'd':
3564 wordsize = 8;
3565 break;
3566 case 'w':
3567 wordsize = 4;
3568 break;
3569 case 'h':
3570 wordsize = 2;
3571 break;
3572 case 'b':
3573 wordsize = 1;
3574 break;
3575 default:
3576 return ERROR_COMMAND_SYNTAX_ERROR;
3577 }
3578
3579 return target_fill_mem(target, address, fn, wordsize, value, count);
3580 }
3581
3582 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3583 target_addr_t *min_address, target_addr_t *max_address)
3584 {
3585 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3586 return ERROR_COMMAND_SYNTAX_ERROR;
3587
3588 /* a base address isn't always necessary,
3589 * default to 0x0 (i.e. don't relocate) */
3590 if (CMD_ARGC >= 2) {
3591 target_addr_t addr;
3592 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3593 image->base_address = addr;
3594 image->base_address_set = true;
3595 } else
3596 image->base_address_set = false;
3597
3598 image->start_address_set = false;
3599
3600 if (CMD_ARGC >= 4)
3601 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3602 if (CMD_ARGC == 5) {
3603 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3604 /* use size (given) to find max (required) */
3605 *max_address += *min_address;
3606 }
3607
3608 if (*min_address > *max_address)
3609 return ERROR_COMMAND_SYNTAX_ERROR;
3610
3611 return ERROR_OK;
3612 }
3613
3614 COMMAND_HANDLER(handle_load_image_command)
3615 {
3616 uint8_t *buffer;
3617 size_t buf_cnt;
3618 uint32_t image_size;
3619 target_addr_t min_address = 0;
3620 target_addr_t max_address = -1;
3621 struct image image;
3622
3623 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3624 &image, &min_address, &max_address);
3625 if (ERROR_OK != retval)
3626 return retval;
3627
3628 struct target *target = get_current_target(CMD_CTX);
3629
3630 struct duration bench;
3631 duration_start(&bench);
3632
3633 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3634 return ERROR_FAIL;
3635
3636 image_size = 0x0;
3637 retval = ERROR_OK;
3638 for (unsigned int i = 0; i < image.num_sections; i++) {
3639 buffer = malloc(image.sections[i].size);
3640 if (buffer == NULL) {
3641 command_print(CMD,
3642 "error allocating buffer for section (%d bytes)",
3643 (int)(image.sections[i].size));
3644 retval = ERROR_FAIL;
3645 break;
3646 }
3647
3648 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3649 if (retval != ERROR_OK) {
3650 free(buffer);
3651 break;
3652 }
3653
3654 uint32_t offset = 0;
3655 uint32_t length = buf_cnt;
3656
3657 /* DANGER!!! beware of unsigned comparison here!!! */
3658
3659 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3660 (image.sections[i].base_address < max_address)) {
3661
3662 if (image.sections[i].base_address < min_address) {
3663 /* clip addresses below */
3664 offset += min_address-image.sections[i].base_address;
3665 length -= offset;
3666 }
3667
3668 if (image.sections[i].base_address + buf_cnt > max_address)
3669 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3670
3671 retval = target_write_buffer(target,
3672 image.sections[i].base_address + offset, length, buffer + offset);
3673 if (retval != ERROR_OK) {
3674 free(buffer);
3675 break;
3676 }
3677 image_size += length;
3678 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3679 (unsigned int)length,
3680 image.sections[i].base_address + offset);
3681 }
3682
3683 free(buffer);
3684 }
3685
3686 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3687 command_print(CMD, "downloaded %" PRIu32 " bytes "
3688 "in %fs (%0.3f KiB/s)", image_size,
3689 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3690 }
3691
3692 image_close(&image);
3693
3694 return retval;
3695
3696 }
3697
3698 COMMAND_HANDLER(handle_dump_image_command)
3699 {
3700 struct fileio *fileio;
3701 uint8_t *buffer;
3702 int retval, retvaltemp;
3703 target_addr_t address, size;
3704 struct duration bench;
3705 struct target *target = get_current_target(CMD_CTX);
3706
3707 if (CMD_ARGC != 3)
3708 return ERROR_COMMAND_SYNTAX_ERROR;
3709
3710 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3711 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3712
3713 uint32_t buf_size = (size > 4096) ? 4096 : size;
3714 buffer = malloc(buf_size);
3715 if (!buffer)
3716 return ERROR_FAIL;
3717
3718 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3719 if (retval != ERROR_OK) {
3720 free(buffer);
3721 return retval;
3722 }
3723
3724 duration_start(&bench);
3725
3726 while (size > 0) {
3727 size_t size_written;
3728 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3729 retval = target_read_buffer(target, address, this_run_size, buffer);
3730 if (retval != ERROR_OK)
3731 break;
3732
3733 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3734 if (retval != ERROR_OK)
3735 break;
3736
3737 size -= this_run_size;
3738 address += this_run_size;
3739 }
3740
3741 free(buffer);
3742
3743 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3744 size_t filesize;
3745 retval = fileio_size(fileio, &filesize);
3746 if (retval != ERROR_OK)
3747 return retval;
3748 command_print(CMD,
3749 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3750 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3751 }
3752
3753 retvaltemp = fileio_close(fileio);
3754 if (retvaltemp != ERROR_OK)
3755 return retvaltemp;
3756
3757 return retval;
3758 }
3759
3760 enum verify_mode {
3761 IMAGE_TEST = 0,
3762 IMAGE_VERIFY = 1,
3763 IMAGE_CHECKSUM_ONLY = 2
3764 };
3765
3766 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3767 {
3768 uint8_t *buffer;
3769 size_t buf_cnt;
3770 uint32_t image_size;
3771 int retval;
3772 uint32_t checksum = 0;
3773 uint32_t mem_checksum = 0;
3774
3775 struct image image;
3776
3777 struct target *target = get_current_target(CMD_CTX);
3778
3779 if (CMD_ARGC < 1)
3780 return ERROR_COMMAND_SYNTAX_ERROR;
3781
3782 if (!target) {
3783 LOG_ERROR("no target selected");
3784 return ERROR_FAIL;
3785 }
3786
3787 struct duration bench;
3788 duration_start(&bench);
3789
3790 if (CMD_ARGC >= 2) {
3791 target_addr_t addr;
3792 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3793 image.base_address = addr;
3794 image.base_address_set = true;
3795 } else {
3796 image.base_address_set = false;
3797 image.base_address = 0x0;
3798 }
3799
3800 image.start_address_set = false;
3801
3802 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3803 if (retval != ERROR_OK)
3804 return retval;
3805
3806 image_size = 0x0;
3807 int diffs = 0;
3808 retval = ERROR_OK;
3809 for (unsigned int i = 0; i < image.num_sections; i++) {
3810 buffer = malloc(image.sections[i].size);
3811 if (buffer == NULL) {
3812 command_print(CMD,
3813 "error allocating buffer for section (%" PRIu32 " bytes)",
3814 image.sections[i].size);
3815 break;
3816 }
3817 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3818 if (retval != ERROR_OK) {
3819 free(buffer);
3820 break;
3821 }
3822
3823 if (verify >= IMAGE_VERIFY) {
3824 /* calculate checksum of image */
3825 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3826 if (retval != ERROR_OK) {
3827 free(buffer);
3828 break;
3829 }
3830
3831 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3832 if (retval != ERROR_OK) {
3833 free(buffer);
3834 break;
3835 }
3836 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3837 LOG_ERROR("checksum mismatch");
3838 free(buffer);
3839 retval = ERROR_FAIL;
3840 goto done;
3841 }
3842 if (checksum != mem_checksum) {
3843 /* failed crc checksum, fall back to a binary compare */
3844 uint8_t *data;
3845
3846 if (diffs == 0)
3847 LOG_ERROR("checksum mismatch - attempting binary compare");
3848
3849 data = malloc(buf_cnt);
3850
3851 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3852 if (retval == ERROR_OK) {
3853 uint32_t t;
3854 for (t = 0; t < buf_cnt; t++) {
3855 if (data[t] != buffer[t]) {
3856 command_print(CMD,
3857 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3858 diffs,
3859 (unsigned)(t + image.sections[i].base_address),
3860 data[t],
3861 buffer[t]);
3862 if (diffs++ >= 127) {
3863 command_print(CMD, "More than 128 errors, the rest are not printed.");
3864 free(data);
3865 free(buffer);
3866 goto done;
3867 }
3868 }
3869 keep_alive();
3870 }
3871 }
3872 free(data);
3873 }
3874 } else {
3875 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3876 image.sections[i].base_address,
3877 buf_cnt);
3878 }
3879
3880 free(buffer);
3881 image_size += buf_cnt;
3882 }
3883 if (diffs > 0)
3884 command_print(CMD, "No more differences found.");
3885 done:
3886 if (diffs > 0)
3887 retval = ERROR_FAIL;
3888 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3889 command_print(CMD, "verified %" PRIu32 " bytes "
3890 "in %fs (%0.3f KiB/s)", image_size,
3891 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3892 }
3893
3894 image_close(&image);
3895
3896 return retval;
3897 }
3898
3899 COMMAND_HANDLER(handle_verify_image_checksum_command)
3900 {
3901 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3902 }
3903
3904 COMMAND_HANDLER(handle_verify_image_command)
3905 {
3906 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3907 }
3908
3909 COMMAND_HANDLER(handle_test_image_command)
3910 {
3911 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3912 }
3913
3914 static int handle_bp_command_list(struct command_invocation *cmd)
3915 {
3916 struct target *target = get_current_target(cmd->ctx);
3917 struct breakpoint *breakpoint = target->breakpoints;
3918 while (breakpoint) {
3919 if (breakpoint->type == BKPT_SOFT) {
3920 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3921 breakpoint->length);
3922 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3923 breakpoint->address,
3924 breakpoint->length,
3925 breakpoint->set, buf);
3926 free(buf);
3927 } else {
3928 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3929 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3930 breakpoint->asid,
3931 breakpoint->length, breakpoint->set);
3932 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3933 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3934 breakpoint->address,
3935 breakpoint->length, breakpoint->set);
3936 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3937 breakpoint->asid);
3938 } else
3939 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3940 breakpoint->address,
3941 breakpoint->length, breakpoint->set);
3942 }
3943
3944 breakpoint = breakpoint->next;
3945 }
3946 return ERROR_OK;
3947 }
3948
3949 static int handle_bp_command_set(struct command_invocation *cmd,
3950 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3951 {
3952 struct target *target = get_current_target(cmd->ctx);
3953 int retval;
3954
3955 if (asid == 0) {
3956 retval = breakpoint_add(target, addr, length, hw);
3957 /* error is always logged in breakpoint_add(), do not print it again */
3958 if (ERROR_OK == retval)
3959 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3960
3961 } else if (addr == 0) {
3962 if (target->type->add_context_breakpoint == NULL) {
3963 LOG_ERROR("Context breakpoint not available");
3964 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3965 }
3966 retval = context_breakpoint_add(target, asid, length, hw);
3967 /* error is always logged in context_breakpoint_add(), do not print it again */
3968 if (ERROR_OK == retval)
3969 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3970
3971 } else {
3972 if (target->type->add_hybrid_breakpoint == NULL) {
3973 LOG_ERROR("Hybrid breakpoint not available");
3974 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3975 }
3976 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3977 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3978 if (ERROR_OK == retval)
3979 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3980 }
3981 return retval;
3982 }
3983
3984 COMMAND_HANDLER(handle_bp_command)
3985 {
3986 target_addr_t addr;
3987 uint32_t asid;
3988 uint32_t length;
3989 int hw = BKPT_SOFT;
3990
3991 switch (CMD_ARGC) {
3992 case 0:
3993 return handle_bp_command_list(CMD);
3994
3995 case 2:
3996 asid = 0;
3997 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3998 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3999 return handle_bp_command_set(CMD, addr, asid, length, hw);
4000
4001 case 3:
4002 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4003 hw = BKPT_HARD;
4004 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4005 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4006 asid = 0;
4007 return handle_bp_command_set(CMD, addr, asid, length, hw);
4008 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4009 hw = BKPT_HARD;
4010 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4011 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4012 addr = 0;
4013 return handle_bp_command_set(CMD, addr, asid, length, hw);
4014 }
4015 /* fallthrough */
4016 case 4:
4017 hw = BKPT_HARD;
4018 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4019 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4020 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4021 return handle_bp_command_set(CMD, addr, asid, length, hw);
4022
4023 default:
4024 return ERROR_COMMAND_SYNTAX_ERROR;
4025 }
4026 }
4027
4028 COMMAND_HANDLER(handle_rbp_command)
4029 {
4030 if (CMD_ARGC != 1)
4031 return ERROR_COMMAND_SYNTAX_ERROR;
4032
4033 struct target *target = get_current_target(CMD_CTX);
4034
4035 if (!strcmp(CMD_ARGV[0], "all")) {
4036 breakpoint_remove_all(target);
4037 } else {
4038 target_addr_t addr;
4039 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4040
4041 breakpoint_remove(target, addr);
4042 }
4043
4044 return ERROR_OK;
4045 }
4046
4047 COMMAND_HANDLER(handle_wp_command)
4048 {
4049 struct target *target = get_current_target(CMD_CTX);
4050
4051 if (CMD_ARGC == 0) {
4052 struct watchpoint *watchpoint = target->watchpoints;
4053
4054 while (watchpoint) {
4055 command_print(CMD, "address: " TARGET_ADDR_FMT
4056 ", len: 0x%8.8" PRIx32
4057 ", r/w/a: %i, value: 0x%8.8" PRIx32
4058 ", mask: 0x%8.8" PRIx32,
4059 watchpoint->address,
4060 watchpoint->length,
4061 (int)watchpoint->rw,
4062 watchpoint->value,
4063 watchpoint->mask);
4064 watchpoint = watchpoint->next;
4065 }
4066 return ERROR_OK;
4067 }
4068
4069 enum watchpoint_rw type = WPT_ACCESS;
4070 target_addr_t addr = 0;
4071 uint32_t length = 0;
4072 uint32_t data_value = 0x0;
4073 uint32_t data_mask = 0xffffffff;
4074
4075 switch (CMD_ARGC) {
4076 case 5:
4077 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4078 /* fall through */
4079 case 4:
4080 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4081 /* fall through */
4082 case 3:
4083 switch (CMD_ARGV[2][0]) {
4084 case 'r':
4085 type = WPT_READ;
4086 break;
4087 case 'w':
4088 type = WPT_WRITE;
4089 break;
4090 case 'a':
4091 type = WPT_ACCESS;
4092 break;
4093 default:
4094 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4095 return ERROR_COMMAND_SYNTAX_ERROR;
4096 }
4097 /* fall through */
4098 case 2:
4099 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4100 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4101 break;
4102
4103 default:
4104 return ERROR_COMMAND_SYNTAX_ERROR;
4105 }
4106
4107 int retval = watchpoint_add(target, addr, length, type,
4108 data_value, data_mask);
4109 if (ERROR_OK != retval)
4110 LOG_ERROR("Failure setting watchpoints");
4111
4112 return retval;
4113 }
4114
4115 COMMAND_HANDLER(handle_rwp_command)
4116 {
4117 if (CMD_ARGC != 1)
4118 return ERROR_COMMAND_SYNTAX_ERROR;
4119
4120 target_addr_t addr;
4121 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4122
4123 struct target *target = get_current_target(CMD_CTX);
4124 watchpoint_remove(target, addr);
4125
4126 return ERROR_OK;
4127 }
4128
4129 /**
4130 * Translate a virtual address to a physical address.
4131 *
4132 * The low-level target implementation must have logged a detailed error
4133 * which is forwarded to telnet/GDB session.
4134 */
4135 COMMAND_HANDLER(handle_virt2phys_command)
4136 {
4137 if (CMD_ARGC != 1)
4138 return ERROR_COMMAND_SYNTAX_ERROR;
4139
4140 target_addr_t va;
4141 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4142 target_addr_t pa;
4143
4144 struct target *target = get_current_target(CMD_CTX);
4145 int retval = target->type->virt2phys(target, va, &pa);
4146 if (retval == ERROR_OK)
4147 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4148
4149 return retval;
4150 }
4151
4152 static void writeData(FILE *f, const void *data, size_t len)
4153 {
4154 size_t written = fwrite(data, 1, len, f);
4155 if (written != len)
4156 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4157 }
4158
4159 static void writeLong(FILE *f, int l, struct target *target)
4160 {
4161 uint8_t val[4];
4162
4163 target_buffer_set_u32(target, val, l);
4164 writeData(f, val, 4);
4165 }
4166
4167 static void writeString(FILE *f, char *s)
4168 {
4169 writeData(f, s, strlen(s));
4170 }
4171
4172 typedef unsigned char UNIT[2]; /* unit of profiling */
4173
4174 /* Dump a gmon.out histogram file. */
4175 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
4176 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4177 {
4178 uint32_t i;
4179 FILE *f = fopen(filename, "w");
4180 if (f == NULL)
4181 return;
4182 writeString(f, "gmon");
4183 writeLong(f, 0x00000001, target); /* Version */
4184 writeLong(f, 0, target); /* padding */
4185 writeLong(f, 0, target); /* padding */
4186 writeLong(f, 0, target); /* padding */
4187
4188 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4189 writeData(f, &zero, 1);
4190
4191 /* figure out bucket size */
4192 uint32_t min;
4193 uint32_t max;
4194 if (with_range) {
4195 min = start_address;
4196 max = end_address;
4197 } else {
4198 min = samples[0];
4199 max = samples[0];
4200 for (i = 0; i < sampleNum; i++) {
4201 if (min > samples[i])
4202 min = samples[i];
4203 if (max < samples[i])
4204 max = samples[i];
4205 }
4206
4207 /* max should be (largest sample + 1)
4208 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4209 max++;
4210 }
4211
4212 int addressSpace = max - min;
4213 assert(addressSpace >= 2);
4214
4215 /* FIXME: What is the reasonable number of buckets?
4216 * The profiling result will be more accurate if there are enough buckets. */
4217 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
4218 uint32_t numBuckets = addressSpace / sizeof(UNIT);
4219 if (numBuckets > maxBuckets)
4220 numBuckets = maxBuckets;
4221 int *buckets = malloc(sizeof(int) * numBuckets);
4222 if (buckets == NULL) {
4223 fclose(f);
4224 return;
4225 }
4226 memset(buckets, 0, sizeof(int) * numBuckets);
4227 for (i = 0; i < sampleNum; i++) {
4228 uint32_t address = samples[i];
4229
4230 if ((address < min) || (max <= address))
4231 continue;
4232
4233 long long a = address - min;
4234 long long b = numBuckets;
4235 long long c = addressSpace;
4236 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4237 buckets[index_t]++;
4238 }
4239
4240 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4241 writeLong(f, min, target); /* low_pc */
4242 writeLong(f, max, target); /* high_pc */
4243 writeLong(f, numBuckets, target); /* # of buckets */
4244 float sample_rate = sampleNum / (duration_ms / 1000.0);
4245 writeLong(f, sample_rate, target);
4246 writeString(f, "seconds");
4247 for (i = 0; i < (15-strlen("seconds")); i++)
4248 writeData(f, &zero, 1);
4249 writeString(f, "s");
4250
4251 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4252
4253 char *data = malloc(2 * numBuckets);
4254 if (data != NULL) {
4255 for (i = 0; i < numBuckets; i++) {
4256 int val;
4257 val = buckets[i];
4258 if (val > 65535)
4259 val = 65535;
4260 data[i * 2] = val&0xff;
4261 data[i * 2 + 1] = (val >> 8) & 0xff;
4262 }
4263 free(buckets);
4264 writeData(f, data, numBuckets * 2);
4265 free(data);
4266 } else
4267 free(buckets);
4268
4269 fclose(f);
4270 }
4271
4272 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4273 * which will be used as a random sampling of PC */
4274 COMMAND_HANDLER(handle_profile_command)
4275 {
4276 struct target *target = get_current_target(CMD_CTX);
4277
4278 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4279 return ERROR_COMMAND_SYNTAX_ERROR;
4280
4281 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4282 uint32_t offset;
4283 uint32_t num_of_samples;
4284 int retval = ERROR_OK;
4285 bool halted_before_profiling = target->state == TARGET_HALTED;
4286
4287 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4288
4289 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4290 if (samples == NULL) {
4291 LOG_ERROR("No memory to store samples.");
4292 return ERROR_FAIL;
4293 }
4294
4295 uint64_t timestart_ms = timeval_ms();
4296 /**
4297 * Some cores let us sample the PC without the
4298 * annoying halt/resume step; for example, ARMv7 PCSR.
4299 * Provide a way to use that more efficient mechanism.
4300 */
4301 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4302 &num_of_samples, offset);
4303 if (retval != ERROR_OK) {
4304 free(samples);
4305 return retval;
4306 }
4307 uint32_t duration_ms = timeval_ms() - timestart_ms;
4308
4309 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4310
4311 retval = target_poll(target);
4312 if (retval != ERROR_OK) {
4313 free(samples);
4314 return retval;
4315 }
4316
4317 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4318 /* The target was halted before we started and is running now. Halt it,
4319 * for consistency. */
4320 retval = target_halt(target);
4321 if (retval != ERROR_OK) {
4322 free(samples);
4323 return retval;
4324 }
4325 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4326 /* The target was running before we started and is halted now. Resume
4327 * it, for consistency. */
4328 retval = target_resume(target, 1, 0, 0, 0);
4329 if (retval != ERROR_OK) {
4330 free(samples);
4331 return retval;
4332 }
4333 }
4334
4335 retval = target_poll(target);
4336 if (retval != ERROR_OK) {
4337 free(samples);
4338 return retval;
4339 }
4340
4341 uint32_t start_address = 0;
4342 uint32_t end_address = 0;
4343 bool with_range = false;
4344 if (CMD_ARGC == 4) {
4345 with_range = true;
4346 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4347 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4348 }
4349
4350 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4351 with_range, start_address, end_address, target, duration_ms);
4352 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4353
4354 free(samples);
4355 return retval;
4356 }
4357
4358 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
4359 {
4360 char *namebuf;
4361 Jim_Obj *nameObjPtr, *valObjPtr;
4362 int result;
4363
4364 namebuf = alloc_printf("%s(%d)", varname, idx);
4365 if (!namebuf)
4366 return JIM_ERR;
4367
4368 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4369 valObjPtr = Jim_NewIntObj(interp, val);
4370 if (!nameObjPtr || !valObjPtr) {
4371 free(namebuf);
4372 return JIM_ERR;
4373 }
4374
4375 Jim_IncrRefCount(nameObjPtr);
4376 Jim_IncrRefCount(valObjPtr);
4377 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
4378 Jim_DecrRefCount(interp, nameObjPtr);
4379 Jim_DecrRefCount(interp, valObjPtr);
4380 free(namebuf);
4381 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4382 return result;
4383 }
4384
4385 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4386 {
4387 struct command_context *context;
4388 struct target *target;
4389
4390 context = current_command_context(interp);
4391 assert(context != NULL);
4392
4393 target = get_current_target(context);
4394 if (target == NULL) {
4395 LOG_ERROR("mem2array: no current target");
4396 return JIM_ERR;
4397 }
4398
4399 return target_mem2array(interp, target, argc - 1, argv + 1);
4400 }
4401
4402 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4403 {
4404 long l;
4405 uint32_t width;
4406 int len;
4407 uint32_t addr;
4408 uint32_t count;
4409 uint32_t v;
4410 const char *varname;
4411 const char *phys;
4412 bool is_phys;
4413 int n, e, retval;
4414 uint32_t i;
4415
4416 /* argv[1] = name of array to receive the data
4417 * argv[2] = desired width
4418 * argv[3] = memory address
4419 * argv[4] = count of times to read
4420 */
4421
4422 if (argc < 4 || argc > 5) {
4423 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4424 return JIM_ERR;
4425 }
4426 varname = Jim_GetString(argv[0], &len);
4427 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4428
4429 e = Jim_GetLong(interp, argv[1], &l);
4430 width = l;
4431 if (e != JIM_OK)
4432 return e;
4433
4434 e = Jim_GetLong(interp, argv[2], &l);
4435 addr = l;
4436 if (e != JIM_OK)
4437 return e;
4438 e = Jim_GetLong(interp, argv[3], &l);
4439 len = l;
4440 if (e != JIM_OK)
4441 return e;
4442 is_phys = false;
4443 if (argc > 4) {
4444 phys = Jim_GetString(argv[4], &n);
4445 if (!strncmp(phys, "phys", n))
4446 is_phys = true;
4447 else
4448 return JIM_ERR;
4449 }
4450 switch (width) {
4451 case 8:
4452 width = 1;
4453 break;
4454 case 16:
4455 width = 2;
4456 break;
4457 case 32:
4458 width = 4;
4459 break;
4460 default:
4461 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4462 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
4463 return JIM_ERR;
4464 }
4465 if (len == 0) {
4466 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4467 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4468 return JIM_ERR;
4469 }
4470 if ((addr + (len * width)) < addr) {
4471 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4472 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4473 return JIM_ERR;
4474 }
4475 /* absurd transfer size? */
4476 if (len > 65536) {
4477 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4478 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
4479 return JIM_ERR;
4480 }
4481
4482 if ((width == 1) ||
4483 ((width == 2) && ((addr & 1) == 0)) ||
4484 ((width == 4) && ((addr & 3) == 0))) {
4485 /* all is well */
4486 } else {
4487 char buf[100];
4488 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4489 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRIu32 " byte reads",
4490 addr,
4491 width);
4492 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4493 return JIM_ERR;
4494 }
4495
4496 /* Transfer loop */
4497
4498 /* index counter */
4499 n = 0;
4500
4501 size_t buffersize = 4096;
4502 uint8_t *buffer = malloc(buffersize);
4503 if (buffer == NULL)
4504 return JIM_ERR;
4505
4506 /* assume ok */
4507 e = JIM_OK;
4508 while (len) {
4509 /* Slurp... in buffer size chunks */
4510
4511 count = len; /* in objects.. */
4512 if (count > (buffersize / width))
4513 count = (buffersize / width);
4514
4515 if (is_phys)
4516 retval = target_read_phys_memory(target, addr, width, count, buffer);
4517 else
4518 retval = target_read_memory(target, addr, width, count, buffer);
4519 if (retval != ERROR_OK) {
4520 /* BOO !*/
4521 LOG_ERROR("mem2array: Read @ 0x%08" PRIx32 ", w=%" PRIu32 ", cnt=%" PRIu32 ", failed",
4522 addr,
4523 width,
4524 count);
4525 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4526 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4527 e = JIM_ERR;
4528 break;
4529 } else {
4530 v = 0; /* shut up gcc */
4531 for (i = 0; i < count ; i++, n++) {
4532 switch (width) {
4533 case 4:
4534 v = target_buffer_get_u32(target, &buffer[i*width]);
4535 break;
4536 case 2:
4537 v = target_buffer_get_u16(target, &buffer[i*width]);
4538 break;
4539 case 1:
4540 v = buffer[i] & 0x0ff;
4541 break;
4542 }
4543 new_int_array_element(interp, varname, n, v);
4544 }
4545 len -= count;
4546 addr += count * width;
4547 }
4548 }
4549
4550 free(buffer);
4551
4552 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4553
4554 return e;
4555 }
4556
4557 static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
4558 {
4559 char *namebuf;
4560 Jim_Obj *nameObjPtr, *valObjPtr;
4561 int result;
4562 long l;
4563
4564 namebuf = alloc_printf("%s(%d)", varname, idx);
4565 if (!namebuf)
4566 return JIM_ERR;
4567
4568 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4569 if (!nameObjPtr) {
4570 free(namebuf);
4571 return JIM_ERR;
4572 }
4573
4574 Jim_IncrRefCount(nameObjPtr);
4575 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
4576 Jim_DecrRefCount(interp, nameObjPtr);
4577 free(namebuf);
4578 if (valObjPtr == NULL)
4579 return JIM_ERR;
4580
4581 result = Jim_GetLong(interp, valObjPtr, &l);
4582 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
4583 *val = l;
4584 return result;
4585 }
4586
4587 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4588 {
4589 struct command_context *context;
4590 struct target *target;
4591
4592 context = current_command_context(interp);
4593 assert(context != NULL);
4594
4595 target = get_current_target(context);
4596 if (target == NULL) {
4597 LOG_ERROR("array2mem: no current target");
4598 return JIM_ERR;
4599 }
4600
4601 return target_array2mem(interp, target, argc-1, argv + 1);
4602 }
4603
4604 static int target_array2mem(Jim_Interp *interp, struct target *target,
4605 int argc, Jim_Obj *const *argv)
4606 {
4607 long l;
4608 uint32_t width;
4609 int len;
4610 uint32_t addr;
4611 uint32_t count;
4612 uint32_t v;
4613 const char *varname;
4614 const char *phys;
4615 bool is_phys;
4616 int n, e, retval;
4617 uint32_t i;
4618
4619 /* argv[1] = name of array to get the data
4620 * argv[2] = desired width
4621 * argv[3] = memory address
4622 * argv[4] = count to write
4623 */
4624 if (argc < 4 || argc > 5) {
4625 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4626 return JIM_ERR;
4627 }
4628 varname = Jim_GetString(argv[0], &len);
4629 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4630
4631 e = Jim_GetLong(interp, argv[1], &l);
4632 width = l;
4633 if (e != JIM_OK)
4634 return e;
4635
4636 e = Jim_GetLong(interp, argv[2], &l);
4637 addr = l;
4638 if (e != JIM_OK)
4639 return e;
4640 e = Jim_GetLong(interp, argv[3], &l);
4641 len = l;
4642 if (e != JIM_OK)
4643 return e;
4644 is_phys = false;
4645 if (argc > 4) {
4646 phys = Jim_GetString(argv[4], &n);
4647 if (!strncmp(phys, "phys", n))
4648 is_phys = true;
4649 else
4650 return JIM_ERR;
4651 }
4652 switch (width) {
4653 case 8:
4654 width = 1;
4655 break;
4656 case 16:
4657 width = 2;
4658 break;
4659 case 32:
4660 width = 4;
4661 break;
4662 default:
4663 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4664 Jim_AppendStrings(interp, Jim_GetResult(interp),
4665 "Invalid width param, must be 8/16/32", NULL);
4666 return JIM_ERR;
4667 }
4668 if (len == 0) {
4669 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4670 Jim_AppendStrings(interp, Jim_GetResult(interp),
4671 "array2mem: zero width read?", NULL);
4672 return JIM_ERR;
4673 }
4674 if ((addr + (len * width)) < addr) {
4675 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4676 Jim_AppendStrings(interp, Jim_GetResult(interp),
4677 "array2mem: addr + len - wraps to zero?", NULL);
4678 return JIM_ERR;
4679 }
4680 /* absurd transfer size? */
4681 if (len > 65536) {
4682 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4683 Jim_AppendStrings(interp, Jim_GetResult(interp),
4684 "array2mem: absurd > 64K item request", NULL);
4685 return JIM_ERR;
4686 }
4687
4688 if ((width == 1) ||
4689 ((width == 2) && ((addr & 1) == 0)) ||
4690 ((width == 4) && ((addr & 3) == 0))) {
4691 /* all is well */
4692 } else {
4693 char buf[100];
4694 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4695 sprintf(buf, "array2mem address: 0x%08" PRIx32 " is not aligned for %" PRIu32 " byte reads",
4696 addr,
4697 width);
4698 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4699 return JIM_ERR;
4700 }
4701
4702 /* Transfer loop */
4703
4704 /* index counter */
4705 n = 0;
4706 /* assume ok */
4707 e = JIM_OK;
4708
4709 size_t buffersize = 4096;
4710 uint8_t *buffer = malloc(buffersize);
4711 if (buffer == NULL)
4712 return JIM_ERR;
4713
4714 while (len) {
4715 /* Slurp... in buffer size chunks */
4716
4717 count = len; /* in objects.. */
4718 if (count > (buffersize / width))
4719 count = (buffersize / width);
4720
4721 v = 0; /* shut up gcc */
4722 for (i = 0; i < count; i++, n++) {
4723 get_int_array_element(interp, varname, n, &v);
4724 switch (width) {
4725 case 4:
4726 target_buffer_set_u32(target, &buffer[i * width], v);
4727 break;
4728 case 2:
4729 target_buffer_set_u16(target, &buffer[i * width], v);
4730 break;
4731 case 1:
4732 buffer[i] = v & 0x0ff;
4733 break;
4734 }
4735 }
4736 len -= count;
4737
4738 if (is_phys)
4739 retval = target_write_phys_memory(target, addr, width, count, buffer);
4740 else
4741 retval = target_write_memory(target, addr, width, count, buffer);
4742 if (retval != ERROR_OK) {
4743 /* BOO !*/
4744 LOG_ERROR("array2mem: Write @ 0x%08" PRIx32 ", w=%" PRIu32 ", cnt=%" PRIu32 ", failed",
4745 addr,
4746 width,
4747 count);
4748 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4749 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4750 e = JIM_ERR;
4751 break;
4752 }
4753 addr += count * width;
4754 }
4755
4756 free(buffer);
4757
4758 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4759
4760 return e;
4761 }
4762
4763 /* FIX? should we propagate errors here rather than printing them
4764 * and continuing?
4765 */
4766 void target_handle_event(struct target *target, enum target_event e)
4767 {
4768 struct target_event_action *teap;
4769 int retval;
4770
4771 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4772 if (teap->event == e) {
4773 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4774 target->target_number,
4775 target_name(target),
4776 target_type_name(target),
4777 e,
4778 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4779 Jim_GetString(teap->body, NULL));
4780
4781 /* Override current target by the target an event
4782 * is issued from (lot of scripts need it).
4783 * Return back to previous override as soon
4784 * as the handler processing is done */
4785 struct command_context *cmd_ctx = current_command_context(teap->interp);
4786 struct target *saved_target_override = cmd_ctx->current_target_override;
4787 cmd_ctx->current_target_override = target;
4788
4789 retval = Jim_EvalObj(teap->interp, teap->body);
4790
4791 cmd_ctx->current_target_override = saved_target_override;
4792
4793 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4794 return;
4795
4796 if (retval == JIM_RETURN)
4797 retval = teap->interp->returnCode;
4798
4799 if (retval != JIM_OK) {
4800 Jim_MakeErrorMessage(teap->interp);
4801 LOG_USER("Error executing event %s on target %s:\n%s",
4802 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4803 target_name(target),
4804 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4805 /* clean both error code and stacktrace before return */
4806 Jim_Eval(teap->interp, "error \"\" \"\"");
4807 }
4808 }
4809 }
4810 }
4811
4812 /**
4813 * Returns true only if the target has a handler for the specified event.
4814 */
4815 bool target_has_event_action(struct target *target, enum target_event event)
4816 {
4817 struct target_event_action *teap;
4818
4819 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4820 if (teap->event == event)
4821 return true;
4822 }
4823 return false;
4824 }
4825
4826 enum target_cfg_param {
4827 TCFG_TYPE,
4828 TCFG_EVENT,
4829 TCFG_WORK_AREA_VIRT,
4830 TCFG_WORK_AREA_PHYS,
4831 TCFG_WORK_AREA_SIZE,
4832 TCFG_WORK_AREA_BACKUP,
4833 TCFG_ENDIAN,
4834 TCFG_COREID,
4835 TCFG_CHAIN_POSITION,
4836 TCFG_DBGBASE,
4837 TCFG_RTOS,
4838 TCFG_DEFER_EXAMINE,
4839 TCFG_GDB_PORT,
4840 TCFG_GDB_MAX_CONNECTIONS,
4841 };
4842
4843 static Jim_Nvp nvp_config_opts[] = {
4844 { .name = "-type", .value = TCFG_TYPE },
4845 { .name = "-event", .value = TCFG_EVENT },
4846 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4847 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4848 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4849 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4850 { .name = "-endian", .value = TCFG_ENDIAN },
4851 { .name = "-coreid", .value = TCFG_COREID },
4852 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4853 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4854 { .name = "-rtos", .value = TCFG_RTOS },
4855 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4856 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4857 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
4858 { .name = NULL, .value = -1 }
4859 };
4860
4861 static int target_configure(Jim_GetOptInfo *goi, struct target *target)
4862 {
4863 Jim_Nvp *n;
4864 Jim_Obj *o;
4865 jim_wide w;
4866 int e;
4867
4868 /* parse config or cget options ... */
4869 while (goi->argc > 0) {
4870 Jim_SetEmptyResult(goi->interp);
4871 /* Jim_GetOpt_Debug(goi); */
4872
4873 if (target->type->target_jim_configure) {
4874 /* target defines a configure function */
4875 /* target gets first dibs on parameters */
4876 e = (*(target->type->target_jim_configure))(target, goi);
4877 if (e == JIM_OK) {
4878 /* more? */
4879 continue;
4880 }
4881 if (e == JIM_ERR) {
4882 /* An error */
4883 return e;
4884 }
4885 /* otherwise we 'continue' below */
4886 }
4887 e = Jim_GetOpt_Nvp(goi, nvp_config_opts, &n);
4888 if (e != JIM_OK) {
4889 Jim_GetOpt_NvpUnknown(goi, nvp_config_opts, 0);
4890 return e;
4891 }
4892 switch (n->value) {
4893 case TCFG_TYPE:
4894 /* not settable */
4895 if (goi->isconfigure) {
4896 Jim_SetResultFormatted(goi->interp,
4897 "not settable: %s", n->name);
4898 return JIM_ERR;
4899 } else {
4900 no_params:
4901 if (goi->argc != 0) {
4902 Jim_WrongNumArgs(goi->interp,
4903 goi->argc, goi->argv,
4904 "NO PARAMS");
4905 return JIM_ERR;
4906 }
4907 }
4908 Jim_SetResultString(goi->interp,
4909 target_type_name(target), -1);
4910 /* loop for more */
4911 break;
4912 case TCFG_EVENT:
4913 if (goi->argc == 0) {
4914 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4915 return JIM_ERR;
4916 }
4917
4918 e = Jim_GetOpt_Nvp(goi, nvp_target_event, &n);
4919 if (e != JIM_OK) {
4920 Jim_GetOpt_NvpUnknown(goi, nvp_target_event, 1);
4921 return e;
4922 }
4923
4924 if (goi->isconfigure) {
4925 if (goi->argc != 1) {
4926 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4927 return JIM_ERR;
4928 }
4929 } else {
4930 if (goi->argc != 0) {
4931 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4932 return JIM_ERR;
4933 }
4934 }
4935
4936 {
4937 struct target_event_action *teap;
4938
4939 teap = target->event_action;
4940 /* replace existing? */
4941 while (teap) {
4942 if (teap->event == (enum target_event)n->value)
4943 break;
4944 teap = teap->next;
4945 }
4946
4947 if (goi->isconfigure) {
4948 /* START_DEPRECATED_TPIU */
4949 if (n->value == TARGET_EVENT_TRACE_CONFIG)
4950 LOG_INFO("DEPRECATED target event %s", n->name);
4951 /* END_DEPRECATED_TPIU */
4952
4953 bool replace = true;
4954 if (teap == NULL) {
4955 /* create new */
4956 teap = calloc(1, sizeof(*teap));
4957 replace = false;
4958 }
4959 teap->event = n->value;
4960 teap->interp = goi->interp;
4961 Jim_GetOpt_Obj(goi, &o);
4962 if (teap->body)
4963 Jim_DecrRefCount(teap->interp, teap->body);
4964 teap->body = Jim_DuplicateObj(goi->interp, o);
4965 /*
4966 * FIXME:
4967 * Tcl/TK - "tk events" have a nice feature.
4968 * See the "BIND" command.
4969 * We should support that here.
4970 * You can specify %X and %Y in the event code.
4971 * The idea is: %T - target name.
4972 * The idea is: %N - target number
4973 * The idea is: %E - event name.
4974 */
4975 Jim_IncrRefCount(teap->body);
4976
4977 if (!replace) {
4978 /* add to head of event list */
4979 teap->next = target->event_action;
4980 target->event_action = teap;
4981 }
4982 Jim_SetEmptyResult(goi->interp);
4983 } else {
4984 /* get */
4985 if (teap == NULL)
4986 Jim_SetEmptyResult(goi->interp);
4987 else
4988 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
4989 }
4990 }
4991 /* loop for more */
4992 break;
4993
4994 case TCFG_WORK_AREA_VIRT:
4995 if (goi->isconfigure) {
4996 target_free_all_working_areas(target);
4997 e = Jim_GetOpt_Wide(goi, &w);
4998 if (e != JIM_OK)
4999 return e;
5000 target->working_area_virt = w;
5001 target->working_area_virt_spec = true;
5002 } else {
5003 if (goi->argc != 0)
5004 goto no_params;
5005 }
5006 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5007 /* loop for more */
5008 break;
5009
5010 case TCFG_WORK_AREA_PHYS:
5011 if (goi->isconfigure) {
5012 target_free_all_working_areas(target);
5013 e = Jim_GetOpt_Wide(goi, &w);
5014 if (e != JIM_OK)
5015 return e;
5016 target->working_area_phys = w;
5017 target->working_area_phys_spec = true;
5018 } else {
5019 if (goi->argc != 0)
5020 goto no_params;
5021 }
5022 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5023 /* loop for more */
5024 break;
5025
5026 case TCFG_WORK_AREA_SIZE:
5027 if (goi->isconfigure) {
5028 target_free_all_working_areas(target);
5029 e = Jim_GetOpt_Wide(goi, &w);
5030 if (e != JIM_OK)
5031 return e;
5032 target->working_area_size = w;
5033 } else {
5034 if (goi->argc != 0)
5035 goto no_params;
5036 }
5037 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5038 /* loop for more */
5039 break;
5040
5041 case TCFG_WORK_AREA_BACKUP:
5042 if (goi->isconfigure) {
5043 target_free_all_working_areas(target);
5044 e = Jim_GetOpt_Wide(goi, &w);
5045 if (e != JIM_OK)
5046 return e;
5047 /* make this exactly 1 or 0 */
5048 target->backup_working_area = (!!w);
5049 } else {
5050 if (goi->argc != 0)
5051 goto no_params;
5052 }
5053 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5054 /* loop for more e*/
5055 break;
5056
5057
5058 case TCFG_ENDIAN:
5059 if (goi->isconfigure) {
5060 e = Jim_GetOpt_Nvp(goi, nvp_target_endian, &n);
5061 if (e != JIM_OK) {
5062 Jim_GetOpt_NvpUnknown(goi, nvp_target_endian, 1);
5063 return e;
5064 }
5065 target->endianness = n->value;
5066 } else {
5067 if (goi->argc != 0)
5068 goto no_params;
5069 }
5070 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
5071 if (n->name == NULL) {
5072 target->endianness = TARGET_LITTLE_ENDIAN;
5073 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
5074 }
5075 Jim_SetResultString(goi->interp, n->name, -1);
5076 /* loop for more */
5077 break;
5078
5079 case TCFG_COREID:
5080 if (goi->isconfigure) {
5081 e = Jim_GetOpt_Wide(goi, &w);
5082 if (e != JIM_OK)
5083 return e;
5084 target->coreid = (int32_t)w;
5085 } else {
5086 if (goi->argc != 0)
5087 goto no_params;
5088 }
5089 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5090 /* loop for more */
5091 break;
5092
5093 case TCFG_CHAIN_POSITION:
5094 if (goi->isconfigure) {
5095 Jim_Obj *o_t;
5096 struct jtag_tap *tap;
5097
5098 if (target->has_dap) {
5099 Jim_SetResultString(goi->interp,
5100 "target requires -dap parameter instead of -chain-position!", -1);
5101 return JIM_ERR;
5102 }
5103
5104 target_free_all_working_areas(target);
5105 e = Jim_GetOpt_Obj(goi, &o_t);
5106 if (e != JIM_OK)
5107 return e;
5108 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5109 if (tap == NULL)
5110 return JIM_ERR;
5111 target->tap = tap;
5112 target->tap_configured = true;
5113 } else {
5114 if (goi->argc != 0)
5115 goto no_params;
5116 }
5117 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5118 /* loop for more e*/
5119 break;
5120 case TCFG_DBGBASE:
5121 if (goi->isconfigure) {
5122 e = Jim_GetOpt_Wide(goi, &w);
5123 if (e != JIM_OK)
5124 return e;
5125 target->dbgbase = (uint32_t)w;
5126 target->dbgbase_set = true;
5127 } else {
5128 if (goi->argc != 0)
5129 goto no_params;
5130 }
5131 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5132 /* loop for more */
5133 break;
5134 case TCFG_RTOS:
5135 /* RTOS */
5136 {
5137 int result = rtos_create(goi, target);
5138 if (result != JIM_OK)
5139 return result;
5140 }
5141 /* loop for more */
5142 break;
5143
5144 case TCFG_DEFER_EXAMINE:
5145 /* DEFER_EXAMINE */
5146 target->defer_examine = true;
5147 /* loop for more */
5148 break;
5149
5150 case TCFG_GDB_PORT:
5151 if (goi->isconfigure) {
5152 struct command_context *cmd_ctx = current_command_context(goi->interp);
5153 if (cmd_ctx->mode != COMMAND_CONFIG) {
5154 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5155 return JIM_ERR;
5156 }
5157
5158 const char *s;
5159 e = Jim_GetOpt_String(goi, &s, NULL);
5160 if (e != JIM_OK)
5161 return e;
5162 free(target->gdb_port_override);
5163 target->gdb_port_override = strdup(s);
5164 } else {
5165 if (goi->argc != 0)
5166 goto no_params;
5167 }
5168 Jim_SetResultString(goi->interp, target->gdb_port_override ? : "undefined", -1);
5169 /* loop for more */
5170 break;
5171
5172 case TCFG_GDB_MAX_CONNECTIONS:
5173 if (goi->isconfigure) {
5174 struct command_context *cmd_ctx = current_command_context(goi->interp);
5175 if (cmd_ctx->mode != COMMAND_CONFIG) {
5176 Jim_SetResultString(goi->interp, "-gdb-max-conenctions must be configured before 'init'", -1);
5177 return JIM_ERR;
5178 }
5179
5180 e = Jim_GetOpt_Wide(goi, &w);
5181 if (e != JIM_OK)
5182 return e;
5183 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5184 } else {
5185 if (goi->argc != 0)
5186 goto no_params;
5187 }
5188 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5189 break;
5190 }
5191 } /* while (goi->argc) */
5192
5193
5194 /* done - we return */
5195 return JIM_OK;
5196 }
5197
5198 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5199 {
5200 struct command *c = jim_to_command(interp);
5201 Jim_GetOptInfo goi;
5202
5203 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5204 goi.isconfigure = !strcmp(c->name, "configure");
5205 if (goi.argc < 1) {
5206 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5207 "missing: -option ...");
5208 return JIM_ERR;
5209 }
5210 struct command_context *cmd_ctx = current_command_context(interp);
5211 assert(cmd_ctx);
5212 struct target *target = get_current_target(cmd_ctx);
5213 return target_configure(&goi, target);
5214 }
5215
5216 static int jim_target_mem2array(Jim_Interp *interp,
5217 int argc, Jim_Obj *const *argv)
5218 {
5219 struct command_context *cmd_ctx = current_command_context(interp);
5220 assert(cmd_ctx);
5221 struct target *target = get_current_target(cmd_ctx);
5222 return target_mem2array(interp, target, argc - 1, argv + 1);
5223 }
5224
5225 static int jim_target_array2mem(Jim_Interp *interp,
5226 int argc, Jim_Obj *const *argv)
5227 {
5228 struct command_context *cmd_ctx = current_command_context(interp);
5229 assert(cmd_ctx);
5230 struct target *target = get_current_target(cmd_ctx);
5231 return target_array2mem(interp, target, argc - 1, argv + 1);
5232 }
5233
5234 static int jim_target_tap_disabled(Jim_Interp *interp)
5235 {
5236 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5237 return JIM_ERR;
5238 }
5239
5240 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5241 {
5242 bool allow_defer = false;
5243
5244 Jim_GetOptInfo goi;
5245 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5246 if (goi.argc > 1) {
5247 const char *cmd_name = Jim_GetString(argv[0], NULL);
5248 Jim_SetResultFormatted(goi.interp,
5249 "usage: %s ['allow-defer']", cmd_name);
5250 return JIM_ERR;
5251 }
5252 if (goi.argc > 0 &&
5253 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5254 /* consume it */
5255 Jim_Obj *obj;
5256 int e = Jim_GetOpt_Obj(&goi, &obj);
5257 if (e != JIM_OK)
5258 return e;
5259 allow_defer = true;
5260 }
5261
5262 struct command_context *cmd_ctx = current_command_context(interp);
5263 assert(cmd_ctx);
5264 struct target *target = get_current_target(cmd_ctx);
5265 if (!target->tap->enabled)
5266 return jim_target_tap_disabled(interp);
5267
5268 if (allow_defer && target->defer_examine) {
5269 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5270 LOG_INFO("Use arp_examine command to examine it manually!");
5271 return JIM_OK;
5272 }
5273
5274 int e = target->type->examine(target);
5275 if (e != ERROR_OK)
5276 return JIM_ERR;
5277 return JIM_OK;
5278 }
5279
5280 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5281 {
5282 struct command_context *cmd_ctx = current_command_context(interp);
5283 assert(cmd_ctx);
5284 struct target *target = get_current_target(cmd_ctx);
5285
5286 Jim_SetResultBool(interp, target_was_examined(target));
5287 return JIM_OK;
5288 }
5289
5290 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5291 {
5292 struct command_context *cmd_ctx = current_command_context(interp);
5293 assert(cmd_ctx);
5294 struct target *target = get_current_target(cmd_ctx);
5295
5296 Jim_SetResultBool(interp, target->defer_examine);
5297 return JIM_OK;
5298 }
5299
5300 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5301 {
5302 if (argc != 1) {
5303 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5304 return JIM_ERR;
5305 }
5306 struct command_context *cmd_ctx = current_command_context(interp);
5307 assert(cmd_ctx);
5308 struct target *target = get_current_target(cmd_ctx);
5309
5310 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5311 return JIM_ERR;
5312
5313 return JIM_OK;
5314 }
5315
5316 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5317 {
5318 if (argc != 1) {
5319 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5320 return JIM_ERR;
5321 }
5322 struct command_context *cmd_ctx = current_command_context(interp);
5323 assert(cmd_ctx);
5324 struct target *target = get_current_target(cmd_ctx);
5325 if (!target->tap->enabled)
5326 return jim_target_tap_disabled(interp);
5327
5328 int e;
5329 if (!(target_was_examined(target)))
5330 e = ERROR_TARGET_NOT_EXAMINED;
5331 else
5332 e = target->type->poll(target);
5333 if (e != ERROR_OK)
5334 return JIM_ERR;
5335 return JIM_OK;
5336 }
5337
5338 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5339 {
5340 Jim_GetOptInfo goi;
5341 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5342
5343 if (goi.argc != 2) {
5344 Jim_WrongNumArgs(interp, 0, argv,
5345 "([tT]|[fF]|assert|deassert) BOOL");
5346 return JIM_ERR;
5347 }
5348
5349 Jim_Nvp *n;
5350 int e = Jim_GetOpt_Nvp(&goi, nvp_assert, &n);
5351 if (e != JIM_OK) {
5352 Jim_GetOpt_NvpUnknown(&goi, nvp_assert, 1);
5353 return e;
5354 }
5355 /* the halt or not param */
5356 jim_wide a;
5357 e = Jim_GetOpt_Wide(&goi, &a);
5358 if (e != JIM_OK)
5359 return e;
5360
5361 struct command_context *cmd_ctx = current_command_context(interp);
5362 assert(cmd_ctx);
5363 struct target *target = get_current_target(cmd_ctx);
5364 if (!target->tap->enabled)
5365 return jim_target_tap_disabled(interp);
5366
5367 if (!target->type->assert_reset || !target->type->deassert_reset) {
5368 Jim_SetResultFormatted(interp,
5369 "No target-specific reset for %s",
5370 target_name(target));
5371 return JIM_ERR;
5372 }
5373
5374 if (target->defer_examine)
5375 target_reset_examined(target);
5376
5377 /* determine if we should halt or not. */
5378 target->reset_halt = !!a;
5379 /* When this happens - all workareas are invalid. */
5380 target_free_all_working_areas_restore(target, 0);
5381
5382 /* do the assert */
5383 if (n->value == NVP_ASSERT)
5384 e = target->type->assert_reset(target);
5385 else
5386 e = target->type->deassert_reset(target);
5387 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5388 }
5389
5390 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5391 {
5392 if (argc != 1) {
5393 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5394 return JIM_ERR;
5395 }
5396 struct command_context *cmd_ctx = current_command_context(interp);
5397 assert(cmd_ctx);
5398 struct target *target = get_current_target(cmd_ctx);
5399 if (!target->tap->enabled)
5400 return jim_target_tap_disabled(interp);
5401 int e = target->type->halt(target);
5402 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5403 }
5404
5405 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5406 {
5407 Jim_GetOptInfo goi;
5408 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5409
5410 /* params: <name> statename timeoutmsecs */
5411 if (goi.argc != 2) {
5412 const char *cmd_name = Jim_GetString(argv[0], NULL);
5413 Jim_SetResultFormatted(goi.interp,
5414 "%s <state_name> <timeout_in_msec>", cmd_name);
5415 return JIM_ERR;
5416 }
5417
5418 Jim_Nvp *n;
5419 int e = Jim_GetOpt_Nvp(&goi, nvp_target_state, &n);
5420 if (e != JIM_OK) {
5421 Jim_GetOpt_NvpUnknown(&goi, nvp_target_state, 1);
5422 return e;
5423 }
5424 jim_wide a;
5425 e = Jim_GetOpt_Wide(&goi, &a);
5426 if (e != JIM_OK)
5427 return e;
5428 struct command_context *cmd_ctx = current_command_context(interp);
5429 assert(cmd_ctx);
5430 struct target *target = get_current_target(cmd_ctx);
5431 if (!target->tap->enabled)
5432 return jim_target_tap_disabled(interp);
5433
5434 e = target_wait_state(target, n->value, a);
5435 if (e != ERROR_OK) {
5436 Jim_Obj *eObj = Jim_NewIntObj(interp, e);
5437 Jim_SetResultFormatted(goi.interp,
5438 "target: %s wait %s fails (%#s) %s",
5439 target_name(target), n->name,
5440 eObj, target_strerror_safe(e));
5441 return JIM_ERR;
5442 }
5443 return JIM_OK;
5444 }
5445 /* List for human, Events defined for this target.
5446 * scripts/programs should use 'name cget -event NAME'
5447 */
5448 COMMAND_HANDLER(handle_target_event_list)
5449 {
5450 struct target *target = get_current_target(CMD_CTX);
5451 struct target_event_action *teap = target->event_action;
5452
5453 command_print(CMD, "Event actions for target (%d) %s\n",
5454 target->target_number,
5455 target_name(target));
5456 command_print(CMD, "%-25s | Body", "Event");
5457 command_print(CMD, "------------------------- | "
5458 "----------------------------------------");
5459 while (teap) {
5460 Jim_Nvp *opt = Jim_Nvp_value2name_simple(nvp_target_event, teap->event);
5461 command_print(CMD, "%-25s | %s",
5462 opt->name, Jim_GetString(teap->body, NULL));
5463 teap = teap->next;
5464 }
5465 command_print(CMD, "***END***");
5466 return ERROR_OK;
5467 }
5468 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5469 {
5470 if (argc != 1) {
5471 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5472 return JIM_ERR;
5473 }
5474 struct command_context *cmd_ctx = current_command_context(interp);
5475 assert(cmd_ctx);
5476 struct target *target = get_current_target(cmd_ctx);
5477 Jim_SetResultString(interp, target_state_name(target), -1);
5478 return JIM_OK;
5479 }
5480 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5481 {
5482 Jim_GetOptInfo goi;
5483 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5484 if (goi.argc != 1) {
5485 const char *cmd_name = Jim_GetString(argv[0], NULL);
5486 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5487 return JIM_ERR;
5488 }
5489 Jim_Nvp *n;
5490 int e = Jim_GetOpt_Nvp(&goi, nvp_target_event, &n);
5491 if (e != JIM_OK) {
5492 Jim_GetOpt_NvpUnknown(&goi, nvp_target_event, 1);
5493 return e;
5494 }
5495 struct command_context *cmd_ctx = current_command_context(interp);
5496 assert(cmd_ctx);
5497 struct target *target = get_current_target(cmd_ctx);
5498 target_handle_event(target, n->value);
5499 return JIM_OK;
5500 }
5501
5502 static const struct command_registration target_instance_command_handlers[] = {
5503 {
5504 .name = "configure",
5505 .mode = COMMAND_ANY,
5506 .jim_handler = jim_target_configure,
5507 .help = "configure a new target for use",
5508 .usage = "[target_attribute ...]",
5509 },
5510 {
5511 .name = "cget",
5512 .mode = COMMAND_ANY,
5513 .jim_handler = jim_target_configure,
5514 .help = "returns the specified target attribute",
5515 .usage = "target_attribute",
5516 },
5517 {
5518 .name = "mwd",
5519 .handler = handle_mw_command,
5520 .mode = COMMAND_EXEC,
5521 .help = "Write 64-bit word(s) to target memory",
5522 .usage = "address data [count]",
5523 },
5524 {
5525 .name = "mww",
5526 .handler = handle_mw_command,
5527 .mode = COMMAND_EXEC,
5528 .help = "Write 32-bit word(s) to target memory",
5529 .usage = "address data [count]",
5530 },
5531 {
5532 .name = "mwh",
5533 .handler = handle_mw_command,
5534 .mode = COMMAND_EXEC,
5535 .help = "Write 16-bit half-word(s) to target memory",
5536 .usage = "address data [count]",
5537 },
5538 {
5539 .name = "mwb",
5540 .handler = handle_mw_command,
5541 .mode = COMMAND_EXEC,
5542 .help = "Write byte(s) to target memory",
5543 .usage = "address data [count]",
5544 },
5545 {
5546 .name = "mdd",
5547 .handler = handle_md_command,
5548 .mode = COMMAND_EXEC,
5549 .help = "Display target memory as 64-bit words",
5550 .usage = "address [count]",
5551 },
5552 {
5553 .name = "mdw",
5554 .handler = handle_md_command,
5555 .mode = COMMAND_EXEC,
5556 .help = "Display target memory as 32-bit words",
5557 .usage = "address [count]",
5558 },
5559 {
5560 .name = "mdh",
5561 .handler = handle_md_command,
5562 .mode = COMMAND_EXEC,
5563 .help = "Display target memory as 16-bit half-words",
5564 .usage = "address [count]",
5565 },
5566 {
5567 .name = "mdb",
5568 .handler = handle_md_command,
5569 .mode = COMMAND_EXEC,
5570 .help = "Display target memory as 8-bit bytes",
5571 .usage = "address [count]",
5572 },
5573 {
5574 .name = "array2mem",
5575 .mode = COMMAND_EXEC,
5576 .jim_handler = jim_target_array2mem,
5577 .help = "Writes Tcl array of 8/16/32 bit numbers "
5578 "to target memory",
5579 .usage = "arrayname bitwidth address count",
5580 },
5581 {
5582 .name = "mem2array",
5583 .mode = COMMAND_EXEC,
5584 .jim_handler = jim_target_mem2array,
5585 .help = "Loads Tcl array of 8/16/32 bit numbers "
5586 "from target memory",
5587 .usage = "arrayname bitwidth address count",
5588 },
5589 {
5590 .name = "eventlist",
5591 .handler = handle_target_event_list,
5592 .mode = COMMAND_EXEC,
5593 .help = "displays a table of events defined for this target",
5594 .usage = "",
5595 },
5596 {
5597 .name = "curstate",
5598 .mode = COMMAND_EXEC,
5599 .jim_handler = jim_target_current_state,
5600 .help = "displays the current state of this target",
5601 },
5602 {
5603 .name = "arp_examine",
5604 .mode = COMMAND_EXEC,
5605 .jim_handler = jim_target_examine,
5606 .help = "used internally for reset processing",
5607 .usage = "['allow-defer']",
5608 },
5609 {
5610 .name = "was_examined",
5611 .mode = COMMAND_EXEC,
5612 .jim_handler = jim_target_was_examined,
5613 .help = "used internally for reset processing",
5614 },
5615 {
5616 .name = "examine_deferred",
5617 .mode = COMMAND_EXEC,
5618 .jim_handler = jim_target_examine_deferred,
5619 .help = "used internally for reset processing",
5620 },
5621 {
5622 .name = "arp_halt_gdb",
5623 .mode = COMMAND_EXEC,
5624 .jim_handler = jim_target_halt_gdb,
5625 .help = "used internally for reset processing to halt GDB",
5626 },
5627 {
5628 .name = "arp_poll",
5629 .mode = COMMAND_EXEC,
5630 .jim_handler = jim_target_poll,
5631 .help = "used internally for reset processing",
5632 },
5633 {
5634 .name = "arp_reset",
5635 .mode = COMMAND_EXEC,
5636 .jim_handler = jim_target_reset,
5637 .help = "used internally for reset processing",
5638 },
5639 {
5640 .name = "arp_halt",
5641 .mode = COMMAND_EXEC,
5642 .jim_handler = jim_target_halt,
5643 .help = "used internally for reset processing",
5644 },
5645 {
5646 .name = "arp_waitstate",
5647 .mode = COMMAND_EXEC,
5648 .jim_handler = jim_target_wait_state,
5649 .help = "used internally for reset processing",
5650 },
5651 {
5652 .name = "invoke-event",
5653 .mode = COMMAND_EXEC,
5654 .jim_handler = jim_target_invoke_event,
5655 .help = "invoke handler for specified event",
5656 .usage = "event_name",
5657 },
5658 COMMAND_REGISTRATION_DONE
5659 };
5660
5661 static int target_create(Jim_GetOptInfo *goi)
5662 {
5663 Jim_Obj *new_cmd;
5664 Jim_Cmd *cmd;
5665 const char *cp;
5666 int e;
5667 int x;
5668 struct target *target;
5669 struct command_context *cmd_ctx;
5670
5671 cmd_ctx = current_command_context(goi->interp);
5672 assert(cmd_ctx != NULL);
5673
5674 if (goi->argc < 3) {
5675 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5676 return JIM_ERR;
5677 }
5678
5679 /* COMMAND */
5680 Jim_GetOpt_Obj(goi, &new_cmd);
5681 /* does this command exist? */
5682 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5683 if (cmd) {
5684 cp = Jim_GetString(new_cmd, NULL);
5685 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5686 return JIM_ERR;
5687 }
5688
5689 /* TYPE */
5690 e = Jim_GetOpt_String(goi, &cp, NULL);
5691 if (e != JIM_OK)
5692 return e;
5693 struct transport *tr = get_current_transport();
5694 if (tr->override_target) {
5695 e = tr->override_target(&cp);
5696 if (e != ERROR_OK) {
5697 LOG_ERROR("The selected transport doesn't support this target");
5698 return JIM_ERR;
5699 }
5700 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5701 }
5702 /* now does target type exist */
5703 for (x = 0 ; target_types[x] ; x++) {
5704 if (0 == strcmp(cp, target_types[x]->name)) {
5705 /* found */
5706 break;
5707 }
5708 }
5709 if (target_types[x] == NULL) {
5710 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5711 for (x = 0 ; target_types[x] ; x++) {
5712 if (target_types[x + 1]) {
5713 Jim_AppendStrings(goi->interp,
5714 Jim_GetResult(goi->interp),
5715 target_types[x]->name,
5716 ", ", NULL);
5717 } else {
5718 Jim_AppendStrings(goi->interp,
5719 Jim_GetResult(goi->interp),
5720 " or ",
5721 target_types[x]->name, NULL);
5722 }
5723 }
5724 return JIM_ERR;
5725 }
5726
5727 /* Create it */
5728 target = calloc(1, sizeof(struct target));
5729 if (!target) {
5730 LOG_ERROR("Out of memory");
5731 return JIM_ERR;
5732 }
5733
5734 /* set target number */
5735 target->target_number = new_target_number();
5736
5737 /* allocate memory for each unique target type */
5738 target->type = malloc(sizeof(struct target_type));
5739 if (!target->type) {
5740 LOG_ERROR("Out of memory");
5741 free(target);
5742 return JIM_ERR;
5743 }
5744
5745 memcpy(target->type, target_types[x], sizeof(struct target_type));
5746
5747 /* default to first core, override with -coreid */
5748 target->coreid = 0;
5749
5750 target->working_area = 0x0;
5751 target->working_area_size = 0x0;
5752 target->working_areas = NULL;
5753 target->backup_working_area = 0;
5754
5755 target->state = TARGET_UNKNOWN;
5756 target->debug_reason = DBG_REASON_UNDEFINED;
5757 target->reg_cache = NULL;
5758 target->breakpoints = NULL;
5759 target->watchpoints = NULL;
5760 target->next = NULL;
5761 target->arch_info = NULL;
5762
5763 target->verbose_halt_msg = true;
5764
5765 target->halt_issued = false;
5766
5767 /* initialize trace information */
5768 target->trace_info = calloc(1, sizeof(struct trace));
5769 if (!target->trace_info) {
5770 LOG_ERROR("Out of memory");
5771 free(target->type);
5772 free(target);
5773 return JIM_ERR;
5774 }
5775
5776 target->dbgmsg = NULL;
5777 target->dbg_msg_enabled = 0;
5778
5779 target->endianness = TARGET_ENDIAN_UNKNOWN;
5780
5781 target->rtos = NULL;
5782 target->rtos_auto_detect = false;
5783
5784 target->gdb_port_override = NULL;
5785 target->gdb_max_connections = 1;
5786
5787 /* Do the rest as "configure" options */
5788 goi->isconfigure = 1;
5789 e = target_configure(goi, target);
5790
5791 if (e == JIM_OK) {
5792 if (target->has_dap) {
5793 if (!target->dap_configured) {
5794 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5795 e = JIM_ERR;
5796 }
5797 } else {
5798 if (!target->tap_configured) {
5799 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5800 e = JIM_ERR;
5801 }
5802 }
5803 /* tap must be set after target was configured */
5804 if (target->tap == NULL)
5805 e = JIM_ERR;
5806 }
5807
5808 if (e != JIM_OK) {
5809 rtos_destroy(target);
5810 free(target->gdb_port_override);
5811 free(target->trace_info);
5812 free(target->type);
5813 free(target);
5814 return e;
5815 }
5816
5817 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5818 /* default endian to little if not specified */
5819 target->endianness = TARGET_LITTLE_ENDIAN;
5820 }
5821
5822 cp = Jim_GetString(new_cmd, NULL);
5823 target->cmd_name = strdup(cp);
5824 if (!target->cmd_name) {
5825 LOG_ERROR("Out of memory");
5826 rtos_destroy(target);
5827 free(target->gdb_port_override);
5828 free(target->trace_info);
5829 free(target->type);
5830 free(target);
5831 return JIM_ERR;
5832 }
5833
5834 if (target->type->target_create) {
5835 e = (*(target->type->target_create))(target, goi->interp);
5836 if (e != ERROR_OK) {
5837 LOG_DEBUG("target_create failed");
5838 free(target->cmd_name);
5839 rtos_destroy(target);
5840 free(target->gdb_port_override);
5841 free(target->trace_info);
5842 free(target->type);
5843 free(target);
5844 return JIM_ERR;
5845 }
5846 }
5847
5848 /* create the target specific commands */
5849 if (target->type->commands) {
5850 e = register_commands(cmd_ctx, NULL, target->type->commands);
5851 if (ERROR_OK != e)
5852 LOG_ERROR("unable to register '%s' commands", cp);
5853 }
5854
5855 /* now - create the new target name command */
5856 const struct command_registration target_subcommands[] = {
5857 {
5858 .chain = target_instance_command_handlers,
5859 },
5860 {
5861 .chain = target->type->commands,
5862 },
5863 COMMAND_REGISTRATION_DONE
5864 };
5865 const struct command_registration target_commands[] = {
5866 {
5867 .name = cp,
5868 .mode = COMMAND_ANY,
5869 .help = "target command group",
5870 .usage = "",
5871 .chain = target_subcommands,
5872 },
5873 COMMAND_REGISTRATION_DONE
5874 };
5875 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
5876 if (e != ERROR_OK) {
5877 if (target->type->deinit_target)
5878 target->type->deinit_target(target);
5879 free(target->cmd_name);
5880 rtos_destroy(target);
5881 free(target->gdb_port_override);
5882 free(target->trace_info);
5883 free(target->type);
5884 free(target);
5885 return JIM_ERR;
5886 }
5887
5888 /* append to end of list */
5889 append_to_list_all_targets(target);
5890
5891 cmd_ctx->current_target = target;
5892 return JIM_OK;
5893 }
5894
5895 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5896 {
5897 if (argc != 1) {
5898 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5899 return JIM_ERR;
5900 }
5901 struct command_context *cmd_ctx = current_command_context(interp);
5902 assert(cmd_ctx != NULL);
5903
5904 struct target *target = get_current_target_or_null(cmd_ctx);
5905 if (target)
5906 Jim_SetResultString(interp, target_name(target), -1);
5907 return JIM_OK;
5908 }
5909
5910 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5911 {
5912 if (argc != 1) {
5913 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5914 return JIM_ERR;
5915 }
5916 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5917 for (unsigned x = 0; NULL != target_types[x]; x++) {
5918 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5919 Jim_NewStringObj(interp, target_types[x]->name, -1));
5920 }
5921 return JIM_OK;
5922 }
5923
5924 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5925 {
5926 if (argc != 1) {
5927 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5928 return JIM_ERR;
5929 }
5930 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5931 struct target *target = all_targets;
5932 while (target) {
5933 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5934 Jim_NewStringObj(interp, target_name(target), -1));
5935 target = target->next;
5936 }
5937 return JIM_OK;
5938 }
5939
5940 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5941 {
5942 int i;
5943 const char *targetname;
5944 int retval, len;
5945 struct target *target = (struct target *) NULL;
5946 struct target_list *head, *curr, *new;
5947 curr = (struct target_list *) NULL;
5948 head = (struct target_list *) NULL;
5949
5950 retval = 0;
5951 LOG_DEBUG("%d", argc);
5952 /* argv[1] = target to associate in smp
5953 * argv[2] = target to associate in smp
5954 * argv[3] ...
5955 */
5956
5957 for (i = 1; i < argc; i++) {
5958
5959 targetname = Jim_GetString(argv[i], &len);
5960 target = get_target(targetname);
5961 LOG_DEBUG("%s ", targetname);
5962 if (target) {
5963 new = malloc(sizeof(struct target_list));
5964 new->target = target;
5965 new->next = (struct target_list *)NULL;
5966 if (head == (struct target_list *)NULL) {
5967 head = new;
5968 curr = head;
5969 } else {
5970 curr->next = new;
5971 curr = new;
5972 }
5973 }
5974 }
5975 /* now parse the list of cpu and put the target in smp mode*/
5976 curr = head;
5977
5978 while (curr != (struct target_list *)NULL) {
5979 target = curr->target;
5980 target->smp = 1;
5981 target->head = head;
5982 curr = curr->next;
5983 }
5984
5985 if (target && target->rtos)
5986 retval = rtos_smp_init(head->target);
5987
5988 return retval;
5989 }
5990
5991
5992 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5993 {
5994 Jim_GetOptInfo goi;
5995 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5996 if (goi.argc < 3) {
5997 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5998 "<name> <target_type> [<target_options> ...]");
5999 return JIM_ERR;
6000 }
6001 return target_create(&goi);
6002 }
6003
6004 static const struct command_registration target_subcommand_handlers[] = {
6005 {
6006 .name = "init",
6007 .mode = COMMAND_CONFIG,
6008 .handler = handle_target_init_command,
6009 .help = "initialize targets",
6010 .usage = "",
6011 },
6012 {
6013 .name = "create",
6014 .mode = COMMAND_CONFIG,
6015 .jim_handler = jim_target_create,
6016 .usage = "name type '-chain-position' name [options ...]",
6017 .help = "Creates and selects a new target",
6018 },
6019 {
6020 .name = "current",
6021 .mode = COMMAND_ANY,
6022 .jim_handler = jim_target_current,
6023 .help = "Returns the currently selected target",
6024 },
6025 {
6026 .name = "types",
6027 .mode = COMMAND_ANY,
6028 .jim_handler = jim_target_types,
6029 .help = "Returns the available target types as "
6030 "a list of strings",
6031 },
6032 {
6033 .name = "names",
6034 .mode = COMMAND_ANY,
6035 .jim_handler = jim_target_names,
6036 .help = "Returns the names of all targets as a list of strings",
6037 },
6038 {
6039 .name = "smp",
6040 .mode = COMMAND_ANY,
6041 .jim_handler = jim_target_smp,
6042 .usage = "targetname1 targetname2 ...",
6043 .help = "gather several target in a smp list"
6044 },
6045
6046 COMMAND_REGISTRATION_DONE
6047 };
6048
6049 struct FastLoad {
6050 target_addr_t address;
6051 uint8_t *data;
6052 int length;
6053
6054 };
6055
6056 static int fastload_num;
6057 static struct FastLoad *fastload;
6058
6059 static void free_fastload(void)
6060 {
6061 if (fastload != NULL) {
6062 for (int i = 0; i < fastload_num; i++)
6063 free(fastload[i].data);
6064 free(fastload);
6065 fastload = NULL;
6066 }
6067 }
6068
6069 COMMAND_HANDLER(handle_fast_load_image_command)
6070 {
6071 uint8_t *buffer;
6072 size_t buf_cnt;
6073 uint32_t image_size;
6074 target_addr_t min_address = 0;
6075 target_addr_t max_address = -1;
6076
6077 struct image image;
6078
6079 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
6080 &image, &min_address, &max_address);
6081 if (ERROR_OK != retval)
6082 return retval;
6083
6084 struct duration bench;
6085 duration_start(&bench);
6086
6087 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6088 if (retval != ERROR_OK)
6089 return retval;
6090
6091 image_size = 0x0;
6092 retval = ERROR_OK;
6093 fastload_num = image.num_sections;
6094 fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
6095 if (fastload == NULL) {
6096 command_print(CMD, "out of memory");
6097 image_close(&image);
6098 return ERROR_FAIL;
6099 }
6100 memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
6101 for (unsigned int i = 0; i < image.num_sections; i++) {
6102 buffer = malloc(image.sections[i].size);
6103 if (buffer == NULL) {
6104 command_print(CMD, "error allocating buffer for section (%d bytes)",
6105 (int)(image.sections[i].size));
6106 retval = ERROR_FAIL;
6107 break;
6108 }
6109
6110 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6111 if (retval != ERROR_OK) {
6112 free(buffer);
6113 break;
6114 }
6115
6116 uint32_t offset = 0;
6117 uint32_t length = buf_cnt;
6118
6119 /* DANGER!!! beware of unsigned comparison here!!! */
6120
6121 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6122 (image.sections[i].base_address < max_address)) {
6123 if (image.sections[i].base_address < min_address) {
6124 /* clip addresses below */
6125 offset += min_address-image.sections[i].base_address;
6126 length -= offset;
6127 }
6128
6129 if (image.sections[i].base_address + buf_cnt > max_address)
6130 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6131
6132 fastload[i].address = image.sections[i].base_address + offset;
6133 fastload[i].data = malloc(length);
6134 if (fastload[i].data == NULL) {
6135 free(buffer);
6136 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6137 length);
6138 retval = ERROR_FAIL;
6139 break;
6140 }
6141 memcpy(fastload[i].data, buffer + offset, length);
6142 fastload[i].length = length;
6143
6144 image_size += length;
6145 command_print(CMD, "%u bytes written at address 0x%8.8x",
6146 (unsigned int)length,
6147 ((unsigned int)(image.sections[i].base_address + offset)));
6148 }
6149
6150 free(buffer);
6151 }
6152
6153 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
6154 command_print(CMD, "Loaded %" PRIu32 " bytes "
6155 "in %fs (%0.3f KiB/s)", image_size,
6156 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6157
6158 command_print(CMD,
6159 "WARNING: image has not been loaded to target!"
6160 "You can issue a 'fast_load' to finish loading.");
6161 }
6162
6163 image_close(&image);
6164
6165 if (retval != ERROR_OK)
6166 free_fastload();
6167
6168 return retval;
6169 }
6170
6171 COMMAND_HANDLER(handle_fast_load_command)
6172 {
6173 if (CMD_ARGC > 0)
6174 return ERROR_COMMAND_SYNTAX_ERROR;
6175 if (fastload == NULL) {
6176 LOG_ERROR("No image in memory");
6177 return ERROR_FAIL;
6178 }
6179 int i;
6180 int64_t ms = timeval_ms();
6181 int size = 0;
6182 int retval = ERROR_OK;
6183 for (i = 0; i < fastload_num; i++) {
6184 struct target *target = get_current_target(CMD_CTX);
6185 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6186 (unsigned int)(fastload[i].address),
6187 (unsigned int)(fastload[i].length));
6188 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6189 if (retval != ERROR_OK)
6190 break;
6191 size += fastload[i].length;
6192 }
6193 if (retval == ERROR_OK) {
6194 int64_t after = timeval_ms();
6195 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6196 }
6197 return retval;
6198 }
6199
6200 static const struct command_registration target_command_handlers[] = {
6201 {
6202 .name = "targets",
6203 .handler = handle_targets_command,
6204 .mode = COMMAND_ANY,
6205 .help = "change current default target (one parameter) "
6206 "or prints table of all targets (no parameters)",
6207 .usage = "[target]",
6208 },
6209 {
6210 .name = "target",
6211 .mode = COMMAND_CONFIG,
6212 .help = "configure target",
6213 .chain = target_subcommand_handlers,
6214 .usage = "",
6215 },
6216 COMMAND_REGISTRATION_DONE
6217 };
6218
6219 int target_register_commands(struct command_context *cmd_ctx)
6220 {
6221 return register_commands(cmd_ctx, NULL, target_command_handlers);
6222 }
6223
6224 static bool target_reset_nag = true;
6225
6226 bool get_target_reset_nag(void)
6227 {
6228 return target_reset_nag;
6229 }
6230
6231 COMMAND_HANDLER(handle_target_reset_nag)
6232 {
6233 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6234 &target_reset_nag, "Nag after each reset about options to improve "
6235 "performance");
6236 }
6237
6238 COMMAND_HANDLER(handle_ps_command)
6239 {
6240 struct target *target = get_current_target(CMD_CTX);
6241 char *display;
6242 if (target->state != TARGET_HALTED) {
6243 LOG_INFO("target not halted !!");
6244 return ERROR_OK;
6245 }
6246
6247 if ((target->rtos) && (target->rtos->type)
6248 && (target->rtos->type->ps_command)) {
6249 display = target->rtos->type->ps_command(target);
6250 command_print(CMD, "%s", display);
6251 free(display);
6252 return ERROR_OK;
6253 } else {
6254 LOG_INFO("failed");
6255 return ERROR_TARGET_FAILURE;
6256 }
6257 }
6258
6259 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6260 {
6261 if (text != NULL)
6262 command_print_sameline(cmd, "%s", text);
6263 for (int i = 0; i < size; i++)
6264 command_print_sameline(cmd, " %02x", buf[i]);
6265 command_print(cmd, " ");
6266 }
6267
6268 COMMAND_HANDLER(handle_test_mem_access_command)
6269 {
6270 struct target *target = get_current_target(CMD_CTX);
6271 uint32_t test_size;
6272 int retval = ERROR_OK;
6273
6274 if (target->state != TARGET_HALTED) {
6275 LOG_INFO("target not halted !!");
6276 return ERROR_FAIL;
6277 }
6278
6279 if (CMD_ARGC != 1)
6280 return ERROR_COMMAND_SYNTAX_ERROR;
6281
6282 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6283
6284 /* Test reads */
6285 size_t num_bytes = test_size + 4;
6286
6287 struct working_area *wa = NULL;
6288 retval = target_alloc_working_area(target, num_bytes, &wa);
6289 if (retval != ERROR_OK) {
6290 LOG_ERROR("Not enough working area");
6291 return ERROR_FAIL;
6292 }
6293
6294 uint8_t *test_pattern = malloc(num_bytes);
6295
6296 for (size_t i = 0; i < num_bytes; i++)
6297 test_pattern[i] = rand();
6298
6299 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6300 if (retval != ERROR_OK) {
6301 LOG_ERROR("Test pattern write failed");
6302 goto out;
6303 }
6304
6305 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6306 for (int size = 1; size <= 4; size *= 2) {
6307 for (int offset = 0; offset < 4; offset++) {
6308 uint32_t count = test_size / size;
6309 size_t host_bufsiz = (count + 2) * size + host_offset;
6310 uint8_t *read_ref = malloc(host_bufsiz);
6311 uint8_t *read_buf = malloc(host_bufsiz);
6312
6313 for (size_t i = 0; i < host_bufsiz; i++) {
6314 read_ref[i] = rand();
6315 read_buf[i] = read_ref[i];
6316 }
6317 command_print_sameline(CMD,
6318 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6319 size, offset, host_offset ? "un" : "");
6320
6321 struct duration bench;
6322 duration_start(&bench);
6323
6324 retval = target_read_memory(target, wa->address + offset, size, count,
6325 read_buf + size + host_offset);
6326
6327 duration_measure(&bench);
6328
6329 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6330 command_print(CMD, "Unsupported alignment");
6331 goto next;
6332 } else if (retval != ERROR_OK) {
6333 command_print(CMD, "Memory read failed");
6334 goto next;
6335 }
6336
6337 /* replay on host */
6338 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6339
6340 /* check result */
6341 int result = memcmp(read_ref, read_buf, host_bufsiz);
6342 if (result == 0) {
6343 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6344 duration_elapsed(&bench),
6345 duration_kbps(&bench, count * size));
6346 } else {
6347 command_print(CMD, "Compare failed");
6348 binprint(CMD, "ref:", read_ref, host_bufsiz);
6349 binprint(CMD, "buf:", read_buf, host_bufsiz);
6350 }
6351 next:
6352 free(read_ref);
6353 free(read_buf);
6354 }
6355 }
6356 }
6357
6358 out:
6359 free(test_pattern);
6360
6361 if (wa != NULL)
6362 target_free_working_area(target, wa);
6363
6364 /* Test writes */
6365 num_bytes = test_size + 4 + 4 + 4;
6366
6367 retval = target_alloc_working_area(target, num_bytes, &wa);
6368 if (retval != ERROR_OK) {
6369 LOG_ERROR("Not enough working area");
6370 return ERROR_FAIL;
6371 }
6372
6373 test_pattern = malloc(num_bytes);
6374
6375 for (size_t i = 0; i < num_bytes; i++)
6376 test_pattern[i] = rand();
6377
6378 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6379 for (int size = 1; size <= 4; size *= 2) {
6380 for (int offset = 0; offset < 4; offset++) {
6381 uint32_t count = test_size / size;
6382 size_t host_bufsiz = count * size + host_offset;
6383 uint8_t *read_ref = malloc(num_bytes);
6384 uint8_t *read_buf = malloc(num_bytes);
6385 uint8_t *write_buf = malloc(host_bufsiz);
6386
6387 for (size_t i = 0; i < host_bufsiz; i++)
6388 write_buf[i] = rand();
6389 command_print_sameline(CMD,
6390 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6391 size, offset, host_offset ? "un" : "");
6392
6393 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6394 if (retval != ERROR_OK) {
6395 command_print(CMD, "Test pattern write failed");
6396 goto nextw;
6397 }
6398
6399 /* replay on host */
6400 memcpy(read_ref, test_pattern, num_bytes);
6401 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6402
6403 struct duration bench;
6404 duration_start(&bench);
6405
6406 retval = target_write_memory(target, wa->address + size + offset, size, count,
6407 write_buf + host_offset);
6408
6409 duration_measure(&bench);
6410
6411 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6412 command_print(CMD, "Unsupported alignment");
6413 goto nextw;
6414 } else if (retval != ERROR_OK) {
6415 command_print(CMD, "Memory write failed");
6416 goto nextw;
6417 }
6418
6419 /* read back */
6420 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6421 if (retval != ERROR_OK) {
6422 command_print(CMD, "Test pattern write failed");
6423 goto nextw;
6424 }
6425
6426 /* check result */
6427 int result = memcmp(read_ref, read_buf, num_bytes);
6428 if (result == 0) {
6429 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6430 duration_elapsed(&bench),
6431 duration_kbps(&bench, count * size));
6432 } else {
6433 command_print(CMD, "Compare failed");
6434 binprint(CMD, "ref:", read_ref, num_bytes);
6435 binprint(CMD, "buf:", read_buf, num_bytes);
6436 }
6437 nextw:
6438 free(read_ref);
6439 free(read_buf);
6440 }
6441 }
6442 }
6443
6444 free(test_pattern);
6445
6446 if (wa != NULL)
6447 target_free_working_area(target, wa);
6448 return retval;
6449 }
6450
6451 static const struct command_registration target_exec_command_handlers[] = {
6452 {
6453 .name = "fast_load_image",
6454 .handler = handle_fast_load_image_command,
6455 .mode = COMMAND_ANY,
6456 .help = "Load image into server memory for later use by "
6457 "fast_load; primarily for profiling",
6458 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6459 "[min_address [max_length]]",
6460 },
6461 {
6462 .name = "fast_load",
6463 .handler = handle_fast_load_command,
6464 .mode = COMMAND_EXEC,
6465 .help = "loads active fast load image to current target "
6466 "- mainly for profiling purposes",
6467 .usage = "",
6468 },
6469 {
6470 .name = "profile",
6471 .handler = handle_profile_command,
6472 .mode = COMMAND_EXEC,
6473 .usage = "seconds filename [start end]",
6474 .help = "profiling samples the CPU PC",
6475 },
6476 /** @todo don't register virt2phys() unless target supports it */
6477 {
6478 .name = "virt2phys",
6479 .handler = handle_virt2phys_command,
6480 .mode = COMMAND_ANY,
6481 .help = "translate a virtual address into a physical address",
6482 .usage = "virtual_address",
6483 },
6484 {
6485 .name = "reg",
6486 .handler = handle_reg_command,
6487 .mode = COMMAND_EXEC,
6488 .help = "display (reread from target with \"force\") or set a register; "
6489 "with no arguments, displays all registers and their values",
6490 .usage = "[(register_number|register_name) [(value|'force')]]",
6491 },
6492 {
6493 .name = "poll",
6494 .handler = handle_poll_command,
6495 .mode = COMMAND_EXEC,
6496 .help = "poll target state; or reconfigure background polling",
6497 .usage = "['on'|'off']",
6498 },
6499 {
6500 .name = "wait_halt",
6501 .handler = handle_wait_halt_command,
6502 .mode = COMMAND_EXEC,
6503 .help = "wait up to the specified number of milliseconds "
6504 "(default 5000) for a previously requested halt",
6505 .usage = "[milliseconds]",
6506 },
6507 {
6508 .name = "halt",
6509 .handler = handle_halt_command,
6510 .mode = COMMAND_EXEC,
6511 .help = "request target to halt, then wait up to the specified "
6512 "number of milliseconds (default 5000) for it to complete",
6513 .usage = "[milliseconds]",
6514 },
6515 {
6516 .name = "resume",
6517 .handler = handle_resume_command,
6518 .mode = COMMAND_EXEC,
6519 .help = "resume target execution from current PC or address",
6520 .usage = "[address]",
6521 },
6522 {
6523 .name = "reset",
6524 .handler = handle_reset_command,
6525 .mode = COMMAND_EXEC,
6526 .usage = "[run|halt|init]",
6527 .help = "Reset all targets into the specified mode. "
6528 "Default reset mode is run, if not given.",
6529 },
6530 {
6531 .name = "soft_reset_halt",
6532 .handler = handle_soft_reset_halt_command,
6533 .mode = COMMAND_EXEC,
6534 .usage = "",
6535 .help = "halt the target and do a soft reset",
6536 },
6537 {
6538 .name = "step",
6539 .handler = handle_step_command,
6540 .mode = COMMAND_EXEC,
6541 .help = "step one instruction from current PC or address",
6542 .usage = "[address]",
6543 },
6544 {
6545 .name = "mdd",
6546 .handler = handle_md_command,
6547 .mode = COMMAND_EXEC,
6548 .help = "display memory double-words",
6549 .usage = "['phys'] address [count]",
6550 },
6551 {
6552 .name = "mdw",
6553 .handler = handle_md_command,
6554 .mode = COMMAND_EXEC,
6555 .help = "display memory words",
6556 .usage = "['phys'] address [count]",
6557 },
6558 {
6559 .name = "mdh",
6560 .handler = handle_md_command,
6561 .mode = COMMAND_EXEC,
6562 .help = "display memory half-words",
6563 .usage = "['phys'] address [count]",
6564 },
6565 {
6566 .name = "mdb",
6567 .handler = handle_md_command,
6568 .mode = COMMAND_EXEC,
6569 .help = "display memory bytes",
6570 .usage = "['phys'] address [count]",
6571 },
6572 {
6573 .name = "mwd",
6574 .handler = handle_mw_command,
6575 .mode = COMMAND_EXEC,
6576 .help = "write memory double-word",
6577 .usage = "['phys'] address value [count]",
6578 },
6579 {
6580 .name = "mww",
6581 .handler = handle_mw_command,
6582 .mode = COMMAND_EXEC,
6583 .help = "write memory word",
6584 .usage = "['phys'] address value [count]",
6585 },
6586 {
6587 .name = "mwh",
6588 .handler = handle_mw_command,
6589 .mode = COMMAND_EXEC,
6590 .help = "write memory half-word",
6591 .usage = "['phys'] address value [count]",
6592 },
6593 {
6594 .name = "mwb",
6595 .handler = handle_mw_command,
6596 .mode = COMMAND_EXEC,
6597 .help = "write memory byte",
6598 .usage = "['phys'] address value [count]",
6599 },
6600 {
6601 .name = "bp",
6602 .handler = handle_bp_command,
6603 .mode = COMMAND_EXEC,
6604 .help = "list or set hardware or software breakpoint",
6605 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6606 },
6607 {
6608 .name = "rbp",
6609 .handler = handle_rbp_command,
6610 .mode = COMMAND_EXEC,
6611 .help = "remove breakpoint",
6612 .usage = "'all' | address",
6613 },
6614 {
6615 .name = "wp",
6616 .handler = handle_wp_command,
6617 .mode = COMMAND_EXEC,
6618 .help = "list (no params) or create watchpoints",
6619 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6620 },
6621 {
6622 .name = "rwp",
6623 .handler = handle_rwp_command,
6624 .mode = COMMAND_EXEC,
6625 .help = "remove watchpoint",
6626 .usage = "address",
6627 },
6628 {
6629 .name = "load_image",
6630 .handler = handle_load_image_command,
6631 .mode = COMMAND_EXEC,
6632 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6633 "[min_address] [max_length]",
6634 },
6635 {
6636 .name = "dump_image",
6637 .handler = handle_dump_image_command,
6638 .mode = COMMAND_EXEC,
6639 .usage = "filename address size",
6640 },
6641 {
6642 .name = "verify_image_checksum",
6643 .handler = handle_verify_image_checksum_command,
6644 .mode = COMMAND_EXEC,
6645 .usage = "filename [offset [type]]",
6646 },
6647 {
6648 .name = "verify_image",
6649 .handler = handle_verify_image_command,
6650 .mode = COMMAND_EXEC,
6651 .usage = "filename [offset [type]]",
6652 },
6653 {
6654 .name = "test_image",
6655 .handler = handle_test_image_command,
6656 .mode = COMMAND_EXEC,
6657 .usage = "filename [offset [type]]",
6658 },
6659 {
6660 .name = "mem2array",
6661 .mode = COMMAND_EXEC,
6662 .jim_handler = jim_mem2array,
6663 .help = "read 8/16/32 bit memory and return as a TCL array "
6664 "for script processing",
6665 .usage = "arrayname bitwidth address count",
6666 },
6667 {
6668 .name = "array2mem",
6669 .mode = COMMAND_EXEC,
6670 .jim_handler = jim_array2mem,
6671 .help = "convert a TCL array to memory locations "
6672 "and write the 8/16/32 bit values",
6673 .usage = "arrayname bitwidth address count",
6674 },
6675 {
6676 .name = "reset_nag",
6677 .handler = handle_target_reset_nag,
6678 .mode = COMMAND_ANY,
6679 .help = "Nag after each reset about options that could have been "
6680 "enabled to improve performance. ",
6681 .usage = "['enable'|'disable']",
6682 },
6683 {
6684 .name = "ps",
6685 .handler = handle_ps_command,
6686 .mode = COMMAND_EXEC,
6687 .help = "list all tasks ",
6688 .usage = " ",
6689 },
6690 {
6691 .name = "test_mem_access",
6692 .handler = handle_test_mem_access_command,
6693 .mode = COMMAND_EXEC,
6694 .help = "Test the target's memory access functions",
6695 .usage = "size",
6696 },
6697
6698 COMMAND_REGISTRATION_DONE
6699 };
6700 static int target_register_user_commands(struct command_context *cmd_ctx)
6701 {
6702 int retval = ERROR_OK;
6703 retval = target_request_register_commands(cmd_ctx);
6704 if (retval != ERROR_OK)
6705 return retval;
6706
6707 retval = trace_register_commands(cmd_ctx);
6708 if (retval != ERROR_OK)
6709 return retval;
6710
6711
6712 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6713 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)