flash/stmqspi: minor fixes on coding style
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75
76 /* targets */
77 extern struct target_type arm7tdmi_target;
78 extern struct target_type arm720t_target;
79 extern struct target_type arm9tdmi_target;
80 extern struct target_type arm920t_target;
81 extern struct target_type arm966e_target;
82 extern struct target_type arm946e_target;
83 extern struct target_type arm926ejs_target;
84 extern struct target_type fa526_target;
85 extern struct target_type feroceon_target;
86 extern struct target_type dragonite_target;
87 extern struct target_type xscale_target;
88 extern struct target_type cortexm_target;
89 extern struct target_type cortexa_target;
90 extern struct target_type aarch64_target;
91 extern struct target_type cortexr4_target;
92 extern struct target_type arm11_target;
93 extern struct target_type ls1_sap_target;
94 extern struct target_type mips_m4k_target;
95 extern struct target_type mips_mips64_target;
96 extern struct target_type avr_target;
97 extern struct target_type dsp563xx_target;
98 extern struct target_type dsp5680xx_target;
99 extern struct target_type testee_target;
100 extern struct target_type avr32_ap7k_target;
101 extern struct target_type hla_target;
102 extern struct target_type nds32_v2_target;
103 extern struct target_type nds32_v3_target;
104 extern struct target_type nds32_v3m_target;
105 extern struct target_type or1k_target;
106 extern struct target_type quark_x10xx_target;
107 extern struct target_type quark_d20xx_target;
108 extern struct target_type stm8_target;
109 extern struct target_type riscv_target;
110 extern struct target_type mem_ap_target;
111 extern struct target_type esirisc_target;
112 extern struct target_type arcv2_target;
113
114 static struct target_type *target_types[] = {
115 &arm7tdmi_target,
116 &arm9tdmi_target,
117 &arm920t_target,
118 &arm720t_target,
119 &arm966e_target,
120 &arm946e_target,
121 &arm926ejs_target,
122 &fa526_target,
123 &feroceon_target,
124 &dragonite_target,
125 &xscale_target,
126 &cortexm_target,
127 &cortexa_target,
128 &cortexr4_target,
129 &arm11_target,
130 &ls1_sap_target,
131 &mips_m4k_target,
132 &avr_target,
133 &dsp563xx_target,
134 &dsp5680xx_target,
135 &testee_target,
136 &avr32_ap7k_target,
137 &hla_target,
138 &nds32_v2_target,
139 &nds32_v3_target,
140 &nds32_v3m_target,
141 &or1k_target,
142 &quark_x10xx_target,
143 &quark_d20xx_target,
144 &stm8_target,
145 &riscv_target,
146 &mem_ap_target,
147 &esirisc_target,
148 &arcv2_target,
149 &aarch64_target,
150 &mips_mips64_target,
151 NULL,
152 };
153
154 struct target *all_targets;
155 static struct target_event_callback *target_event_callbacks;
156 static struct target_timer_callback *target_timer_callbacks;
157 static LIST_HEAD(target_reset_callback_list);
158 static LIST_HEAD(target_trace_callback_list);
159 static const int polling_interval = 100;
160
161 static const Jim_Nvp nvp_assert[] = {
162 { .name = "assert", NVP_ASSERT },
163 { .name = "deassert", NVP_DEASSERT },
164 { .name = "T", NVP_ASSERT },
165 { .name = "F", NVP_DEASSERT },
166 { .name = "t", NVP_ASSERT },
167 { .name = "f", NVP_DEASSERT },
168 { .name = NULL, .value = -1 }
169 };
170
171 static const Jim_Nvp nvp_error_target[] = {
172 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
173 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
174 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
175 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
176 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
177 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
178 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
179 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
180 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
181 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
182 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
183 { .value = -1, .name = NULL }
184 };
185
186 static const char *target_strerror_safe(int err)
187 {
188 const Jim_Nvp *n;
189
190 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
191 if (n->name == NULL)
192 return "unknown";
193 else
194 return n->name;
195 }
196
197 static const Jim_Nvp nvp_target_event[] = {
198
199 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
200 { .value = TARGET_EVENT_HALTED, .name = "halted" },
201 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
202 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
203 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
204 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
205 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
206
207 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
208 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
209
210 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
211 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
212 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
213 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
214 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
215 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
216 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
217 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
218
219 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
220 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
221 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
222
223 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
224 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
225
226 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
227 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
228
229 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
230 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
231
232 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
233 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
234
235 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
236
237 { .name = NULL, .value = -1 }
238 };
239
240 static const Jim_Nvp nvp_target_state[] = {
241 { .name = "unknown", .value = TARGET_UNKNOWN },
242 { .name = "running", .value = TARGET_RUNNING },
243 { .name = "halted", .value = TARGET_HALTED },
244 { .name = "reset", .value = TARGET_RESET },
245 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
246 { .name = NULL, .value = -1 },
247 };
248
249 static const Jim_Nvp nvp_target_debug_reason[] = {
250 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
251 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
252 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
253 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
254 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
255 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
256 { .name = "program-exit", .value = DBG_REASON_EXIT },
257 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
258 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const Jim_Nvp nvp_target_endian[] = {
263 { .name = "big", .value = TARGET_BIG_ENDIAN },
264 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
265 { .name = "be", .value = TARGET_BIG_ENDIAN },
266 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
267 { .name = NULL, .value = -1 },
268 };
269
270 static const Jim_Nvp nvp_reset_modes[] = {
271 { .name = "unknown", .value = RESET_UNKNOWN },
272 { .name = "run", .value = RESET_RUN },
273 { .name = "halt", .value = RESET_HALT },
274 { .name = "init", .value = RESET_INIT },
275 { .name = NULL, .value = -1 },
276 };
277
278 const char *debug_reason_name(struct target *t)
279 {
280 const char *cp;
281
282 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
283 t->debug_reason)->name;
284 if (!cp) {
285 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
286 cp = "(*BUG*unknown*BUG*)";
287 }
288 return cp;
289 }
290
291 const char *target_state_name(struct target *t)
292 {
293 const char *cp;
294 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
295 if (!cp) {
296 LOG_ERROR("Invalid target state: %d", (int)(t->state));
297 cp = "(*BUG*unknown*BUG*)";
298 }
299
300 if (!target_was_examined(t) && t->defer_examine)
301 cp = "examine deferred";
302
303 return cp;
304 }
305
306 const char *target_event_name(enum target_event event)
307 {
308 const char *cp;
309 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
310 if (!cp) {
311 LOG_ERROR("Invalid target event: %d", (int)(event));
312 cp = "(*BUG*unknown*BUG*)";
313 }
314 return cp;
315 }
316
317 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
318 {
319 const char *cp;
320 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
321 if (!cp) {
322 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
323 cp = "(*BUG*unknown*BUG*)";
324 }
325 return cp;
326 }
327
328 /* determine the number of the new target */
329 static int new_target_number(void)
330 {
331 struct target *t;
332 int x;
333
334 /* number is 0 based */
335 x = -1;
336 t = all_targets;
337 while (t) {
338 if (x < t->target_number)
339 x = t->target_number;
340 t = t->next;
341 }
342 return x + 1;
343 }
344
345 static void append_to_list_all_targets(struct target *target)
346 {
347 struct target **t = &all_targets;
348
349 while (*t)
350 t = &((*t)->next);
351 *t = target;
352 }
353
354 /* read a uint64_t from a buffer in target memory endianness */
355 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
356 {
357 if (target->endianness == TARGET_LITTLE_ENDIAN)
358 return le_to_h_u64(buffer);
359 else
360 return be_to_h_u64(buffer);
361 }
362
363 /* read a uint32_t from a buffer in target memory endianness */
364 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
365 {
366 if (target->endianness == TARGET_LITTLE_ENDIAN)
367 return le_to_h_u32(buffer);
368 else
369 return be_to_h_u32(buffer);
370 }
371
372 /* read a uint24_t from a buffer in target memory endianness */
373 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 return le_to_h_u24(buffer);
377 else
378 return be_to_h_u24(buffer);
379 }
380
381 /* read a uint16_t from a buffer in target memory endianness */
382 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 return le_to_h_u16(buffer);
386 else
387 return be_to_h_u16(buffer);
388 }
389
390 /* write a uint64_t to a buffer in target memory endianness */
391 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 h_u64_to_le(buffer, value);
395 else
396 h_u64_to_be(buffer, value);
397 }
398
399 /* write a uint32_t to a buffer in target memory endianness */
400 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
401 {
402 if (target->endianness == TARGET_LITTLE_ENDIAN)
403 h_u32_to_le(buffer, value);
404 else
405 h_u32_to_be(buffer, value);
406 }
407
408 /* write a uint24_t to a buffer in target memory endianness */
409 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
410 {
411 if (target->endianness == TARGET_LITTLE_ENDIAN)
412 h_u24_to_le(buffer, value);
413 else
414 h_u24_to_be(buffer, value);
415 }
416
417 /* write a uint16_t to a buffer in target memory endianness */
418 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
419 {
420 if (target->endianness == TARGET_LITTLE_ENDIAN)
421 h_u16_to_le(buffer, value);
422 else
423 h_u16_to_be(buffer, value);
424 }
425
426 /* write a uint8_t to a buffer in target memory endianness */
427 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
428 {
429 *buffer = value;
430 }
431
432 /* write a uint64_t array to a buffer in target memory endianness */
433 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
434 {
435 uint32_t i;
436 for (i = 0; i < count; i++)
437 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
438 }
439
440 /* write a uint32_t array to a buffer in target memory endianness */
441 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
442 {
443 uint32_t i;
444 for (i = 0; i < count; i++)
445 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
446 }
447
448 /* write a uint16_t array to a buffer in target memory endianness */
449 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
450 {
451 uint32_t i;
452 for (i = 0; i < count; i++)
453 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
454 }
455
456 /* write a uint64_t array to a buffer in target memory endianness */
457 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
458 {
459 uint32_t i;
460 for (i = 0; i < count; i++)
461 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
462 }
463
464 /* write a uint32_t array to a buffer in target memory endianness */
465 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
466 {
467 uint32_t i;
468 for (i = 0; i < count; i++)
469 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
470 }
471
472 /* write a uint16_t array to a buffer in target memory endianness */
473 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
474 {
475 uint32_t i;
476 for (i = 0; i < count; i++)
477 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
478 }
479
480 /* return a pointer to a configured target; id is name or number */
481 struct target *get_target(const char *id)
482 {
483 struct target *target;
484
485 /* try as tcltarget name */
486 for (target = all_targets; target; target = target->next) {
487 if (target_name(target) == NULL)
488 continue;
489 if (strcmp(id, target_name(target)) == 0)
490 return target;
491 }
492
493 /* It's OK to remove this fallback sometime after August 2010 or so */
494
495 /* no match, try as number */
496 unsigned num;
497 if (parse_uint(id, &num) != ERROR_OK)
498 return NULL;
499
500 for (target = all_targets; target; target = target->next) {
501 if (target->target_number == (int)num) {
502 LOG_WARNING("use '%s' as target identifier, not '%u'",
503 target_name(target), num);
504 return target;
505 }
506 }
507
508 return NULL;
509 }
510
511 /* returns a pointer to the n-th configured target */
512 struct target *get_target_by_num(int num)
513 {
514 struct target *target = all_targets;
515
516 while (target) {
517 if (target->target_number == num)
518 return target;
519 target = target->next;
520 }
521
522 return NULL;
523 }
524
525 struct target *get_current_target(struct command_context *cmd_ctx)
526 {
527 struct target *target = get_current_target_or_null(cmd_ctx);
528
529 if (target == NULL) {
530 LOG_ERROR("BUG: current_target out of bounds");
531 exit(-1);
532 }
533
534 return target;
535 }
536
537 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
538 {
539 return cmd_ctx->current_target_override
540 ? cmd_ctx->current_target_override
541 : cmd_ctx->current_target;
542 }
543
544 int target_poll(struct target *target)
545 {
546 int retval;
547
548 /* We can't poll until after examine */
549 if (!target_was_examined(target)) {
550 /* Fail silently lest we pollute the log */
551 return ERROR_FAIL;
552 }
553
554 retval = target->type->poll(target);
555 if (retval != ERROR_OK)
556 return retval;
557
558 if (target->halt_issued) {
559 if (target->state == TARGET_HALTED)
560 target->halt_issued = false;
561 else {
562 int64_t t = timeval_ms() - target->halt_issued_time;
563 if (t > DEFAULT_HALT_TIMEOUT) {
564 target->halt_issued = false;
565 LOG_INFO("Halt timed out, wake up GDB.");
566 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
567 }
568 }
569 }
570
571 return ERROR_OK;
572 }
573
574 int target_halt(struct target *target)
575 {
576 int retval;
577 /* We can't poll until after examine */
578 if (!target_was_examined(target)) {
579 LOG_ERROR("Target not examined yet");
580 return ERROR_FAIL;
581 }
582
583 retval = target->type->halt(target);
584 if (retval != ERROR_OK)
585 return retval;
586
587 target->halt_issued = true;
588 target->halt_issued_time = timeval_ms();
589
590 return ERROR_OK;
591 }
592
593 /**
594 * Make the target (re)start executing using its saved execution
595 * context (possibly with some modifications).
596 *
597 * @param target Which target should start executing.
598 * @param current True to use the target's saved program counter instead
599 * of the address parameter
600 * @param address Optionally used as the program counter.
601 * @param handle_breakpoints True iff breakpoints at the resumption PC
602 * should be skipped. (For example, maybe execution was stopped by
603 * such a breakpoint, in which case it would be counterproductive to
604 * let it re-trigger.
605 * @param debug_execution False if all working areas allocated by OpenOCD
606 * should be released and/or restored to their original contents.
607 * (This would for example be true to run some downloaded "helper"
608 * algorithm code, which resides in one such working buffer and uses
609 * another for data storage.)
610 *
611 * @todo Resolve the ambiguity about what the "debug_execution" flag
612 * signifies. For example, Target implementations don't agree on how
613 * it relates to invalidation of the register cache, or to whether
614 * breakpoints and watchpoints should be enabled. (It would seem wrong
615 * to enable breakpoints when running downloaded "helper" algorithms
616 * (debug_execution true), since the breakpoints would be set to match
617 * target firmware being debugged, not the helper algorithm.... and
618 * enabling them could cause such helpers to malfunction (for example,
619 * by overwriting data with a breakpoint instruction. On the other
620 * hand the infrastructure for running such helpers might use this
621 * procedure but rely on hardware breakpoint to detect termination.)
622 */
623 int target_resume(struct target *target, int current, target_addr_t address,
624 int handle_breakpoints, int debug_execution)
625 {
626 int retval;
627
628 /* We can't poll until after examine */
629 if (!target_was_examined(target)) {
630 LOG_ERROR("Target not examined yet");
631 return ERROR_FAIL;
632 }
633
634 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
635
636 /* note that resume *must* be asynchronous. The CPU can halt before
637 * we poll. The CPU can even halt at the current PC as a result of
638 * a software breakpoint being inserted by (a bug?) the application.
639 */
640 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
641 if (retval != ERROR_OK)
642 return retval;
643
644 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
645
646 return retval;
647 }
648
649 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
650 {
651 char buf[100];
652 int retval;
653 Jim_Nvp *n;
654 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
655 if (n->name == NULL) {
656 LOG_ERROR("invalid reset mode");
657 return ERROR_FAIL;
658 }
659
660 struct target *target;
661 for (target = all_targets; target; target = target->next)
662 target_call_reset_callbacks(target, reset_mode);
663
664 /* disable polling during reset to make reset event scripts
665 * more predictable, i.e. dr/irscan & pathmove in events will
666 * not have JTAG operations injected into the middle of a sequence.
667 */
668 bool save_poll = jtag_poll_get_enabled();
669
670 jtag_poll_set_enabled(false);
671
672 sprintf(buf, "ocd_process_reset %s", n->name);
673 retval = Jim_Eval(cmd->ctx->interp, buf);
674
675 jtag_poll_set_enabled(save_poll);
676
677 if (retval != JIM_OK) {
678 Jim_MakeErrorMessage(cmd->ctx->interp);
679 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
680 return ERROR_FAIL;
681 }
682
683 /* We want any events to be processed before the prompt */
684 retval = target_call_timer_callbacks_now();
685
686 for (target = all_targets; target; target = target->next) {
687 target->type->check_reset(target);
688 target->running_alg = false;
689 }
690
691 return retval;
692 }
693
694 static int identity_virt2phys(struct target *target,
695 target_addr_t virtual, target_addr_t *physical)
696 {
697 *physical = virtual;
698 return ERROR_OK;
699 }
700
701 static int no_mmu(struct target *target, int *enabled)
702 {
703 *enabled = 0;
704 return ERROR_OK;
705 }
706
707 static int default_examine(struct target *target)
708 {
709 target_set_examined(target);
710 return ERROR_OK;
711 }
712
713 /* no check by default */
714 static int default_check_reset(struct target *target)
715 {
716 return ERROR_OK;
717 }
718
719 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
720 * Keep in sync */
721 int target_examine_one(struct target *target)
722 {
723 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
724
725 int retval = target->type->examine(target);
726 if (retval != ERROR_OK) {
727 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
728 return retval;
729 }
730
731 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
732
733 return ERROR_OK;
734 }
735
736 static int jtag_enable_callback(enum jtag_event event, void *priv)
737 {
738 struct target *target = priv;
739
740 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
741 return ERROR_OK;
742
743 jtag_unregister_event_callback(jtag_enable_callback, target);
744
745 return target_examine_one(target);
746 }
747
748 /* Targets that correctly implement init + examine, i.e.
749 * no communication with target during init:
750 *
751 * XScale
752 */
753 int target_examine(void)
754 {
755 int retval = ERROR_OK;
756 struct target *target;
757
758 for (target = all_targets; target; target = target->next) {
759 /* defer examination, but don't skip it */
760 if (!target->tap->enabled) {
761 jtag_register_event_callback(jtag_enable_callback,
762 target);
763 continue;
764 }
765
766 if (target->defer_examine)
767 continue;
768
769 int retval2 = target_examine_one(target);
770 if (retval2 != ERROR_OK) {
771 LOG_WARNING("target %s examination failed", target_name(target));
772 retval = retval2;
773 }
774 }
775 return retval;
776 }
777
778 const char *target_type_name(struct target *target)
779 {
780 return target->type->name;
781 }
782
783 static int target_soft_reset_halt(struct target *target)
784 {
785 if (!target_was_examined(target)) {
786 LOG_ERROR("Target not examined yet");
787 return ERROR_FAIL;
788 }
789 if (!target->type->soft_reset_halt) {
790 LOG_ERROR("Target %s does not support soft_reset_halt",
791 target_name(target));
792 return ERROR_FAIL;
793 }
794 return target->type->soft_reset_halt(target);
795 }
796
797 /**
798 * Downloads a target-specific native code algorithm to the target,
799 * and executes it. * Note that some targets may need to set up, enable,
800 * and tear down a breakpoint (hard or * soft) to detect algorithm
801 * termination, while others may support lower overhead schemes where
802 * soft breakpoints embedded in the algorithm automatically terminate the
803 * algorithm.
804 *
805 * @param target used to run the algorithm
806 * @param arch_info target-specific description of the algorithm.
807 */
808 int target_run_algorithm(struct target *target,
809 int num_mem_params, struct mem_param *mem_params,
810 int num_reg_params, struct reg_param *reg_param,
811 uint32_t entry_point, uint32_t exit_point,
812 int timeout_ms, void *arch_info)
813 {
814 int retval = ERROR_FAIL;
815
816 if (!target_was_examined(target)) {
817 LOG_ERROR("Target not examined yet");
818 goto done;
819 }
820 if (!target->type->run_algorithm) {
821 LOG_ERROR("Target type '%s' does not support %s",
822 target_type_name(target), __func__);
823 goto done;
824 }
825
826 target->running_alg = true;
827 retval = target->type->run_algorithm(target,
828 num_mem_params, mem_params,
829 num_reg_params, reg_param,
830 entry_point, exit_point, timeout_ms, arch_info);
831 target->running_alg = false;
832
833 done:
834 return retval;
835 }
836
837 /**
838 * Executes a target-specific native code algorithm and leaves it running.
839 *
840 * @param target used to run the algorithm
841 * @param arch_info target-specific description of the algorithm.
842 */
843 int target_start_algorithm(struct target *target,
844 int num_mem_params, struct mem_param *mem_params,
845 int num_reg_params, struct reg_param *reg_params,
846 uint32_t entry_point, uint32_t exit_point,
847 void *arch_info)
848 {
849 int retval = ERROR_FAIL;
850
851 if (!target_was_examined(target)) {
852 LOG_ERROR("Target not examined yet");
853 goto done;
854 }
855 if (!target->type->start_algorithm) {
856 LOG_ERROR("Target type '%s' does not support %s",
857 target_type_name(target), __func__);
858 goto done;
859 }
860 if (target->running_alg) {
861 LOG_ERROR("Target is already running an algorithm");
862 goto done;
863 }
864
865 target->running_alg = true;
866 retval = target->type->start_algorithm(target,
867 num_mem_params, mem_params,
868 num_reg_params, reg_params,
869 entry_point, exit_point, arch_info);
870
871 done:
872 return retval;
873 }
874
875 /**
876 * Waits for an algorithm started with target_start_algorithm() to complete.
877 *
878 * @param target used to run the algorithm
879 * @param arch_info target-specific description of the algorithm.
880 */
881 int target_wait_algorithm(struct target *target,
882 int num_mem_params, struct mem_param *mem_params,
883 int num_reg_params, struct reg_param *reg_params,
884 uint32_t exit_point, int timeout_ms,
885 void *arch_info)
886 {
887 int retval = ERROR_FAIL;
888
889 if (!target->type->wait_algorithm) {
890 LOG_ERROR("Target type '%s' does not support %s",
891 target_type_name(target), __func__);
892 goto done;
893 }
894 if (!target->running_alg) {
895 LOG_ERROR("Target is not running an algorithm");
896 goto done;
897 }
898
899 retval = target->type->wait_algorithm(target,
900 num_mem_params, mem_params,
901 num_reg_params, reg_params,
902 exit_point, timeout_ms, arch_info);
903 if (retval != ERROR_TARGET_TIMEOUT)
904 target->running_alg = false;
905
906 done:
907 return retval;
908 }
909
910 /**
911 * Streams data to a circular buffer on target intended for consumption by code
912 * running asynchronously on target.
913 *
914 * This is intended for applications where target-specific native code runs
915 * on the target, receives data from the circular buffer, does something with
916 * it (most likely writing it to a flash memory), and advances the circular
917 * buffer pointer.
918 *
919 * This assumes that the helper algorithm has already been loaded to the target,
920 * but has not been started yet. Given memory and register parameters are passed
921 * to the algorithm.
922 *
923 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
924 * following format:
925 *
926 * [buffer_start + 0, buffer_start + 4):
927 * Write Pointer address (aka head). Written and updated by this
928 * routine when new data is written to the circular buffer.
929 * [buffer_start + 4, buffer_start + 8):
930 * Read Pointer address (aka tail). Updated by code running on the
931 * target after it consumes data.
932 * [buffer_start + 8, buffer_start + buffer_size):
933 * Circular buffer contents.
934 *
935 * See contrib/loaders/flash/stm32f1x.S for an example.
936 *
937 * @param target used to run the algorithm
938 * @param buffer address on the host where data to be sent is located
939 * @param count number of blocks to send
940 * @param block_size size in bytes of each block
941 * @param num_mem_params count of memory-based params to pass to algorithm
942 * @param mem_params memory-based params to pass to algorithm
943 * @param num_reg_params count of register-based params to pass to algorithm
944 * @param reg_params memory-based params to pass to algorithm
945 * @param buffer_start address on the target of the circular buffer structure
946 * @param buffer_size size of the circular buffer structure
947 * @param entry_point address on the target to execute to start the algorithm
948 * @param exit_point address at which to set a breakpoint to catch the
949 * end of the algorithm; can be 0 if target triggers a breakpoint itself
950 */
951
952 int target_run_flash_async_algorithm(struct target *target,
953 const uint8_t *buffer, uint32_t count, int block_size,
954 int num_mem_params, struct mem_param *mem_params,
955 int num_reg_params, struct reg_param *reg_params,
956 uint32_t buffer_start, uint32_t buffer_size,
957 uint32_t entry_point, uint32_t exit_point, void *arch_info)
958 {
959 int retval;
960 int timeout = 0;
961
962 const uint8_t *buffer_orig = buffer;
963
964 /* Set up working area. First word is write pointer, second word is read pointer,
965 * rest is fifo data area. */
966 uint32_t wp_addr = buffer_start;
967 uint32_t rp_addr = buffer_start + 4;
968 uint32_t fifo_start_addr = buffer_start + 8;
969 uint32_t fifo_end_addr = buffer_start + buffer_size;
970
971 uint32_t wp = fifo_start_addr;
972 uint32_t rp = fifo_start_addr;
973
974 /* validate block_size is 2^n */
975 assert(!block_size || !(block_size & (block_size - 1)));
976
977 retval = target_write_u32(target, wp_addr, wp);
978 if (retval != ERROR_OK)
979 return retval;
980 retval = target_write_u32(target, rp_addr, rp);
981 if (retval != ERROR_OK)
982 return retval;
983
984 /* Start up algorithm on target and let it idle while writing the first chunk */
985 retval = target_start_algorithm(target, num_mem_params, mem_params,
986 num_reg_params, reg_params,
987 entry_point,
988 exit_point,
989 arch_info);
990
991 if (retval != ERROR_OK) {
992 LOG_ERROR("error starting target flash write algorithm");
993 return retval;
994 }
995
996 while (count > 0) {
997
998 retval = target_read_u32(target, rp_addr, &rp);
999 if (retval != ERROR_OK) {
1000 LOG_ERROR("failed to get read pointer");
1001 break;
1002 }
1003
1004 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1005 (size_t) (buffer - buffer_orig), count, wp, rp);
1006
1007 if (rp == 0) {
1008 LOG_ERROR("flash write algorithm aborted by target");
1009 retval = ERROR_FLASH_OPERATION_FAILED;
1010 break;
1011 }
1012
1013 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1014 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1015 break;
1016 }
1017
1018 /* Count the number of bytes available in the fifo without
1019 * crossing the wrap around. Make sure to not fill it completely,
1020 * because that would make wp == rp and that's the empty condition. */
1021 uint32_t thisrun_bytes;
1022 if (rp > wp)
1023 thisrun_bytes = rp - wp - block_size;
1024 else if (rp > fifo_start_addr)
1025 thisrun_bytes = fifo_end_addr - wp;
1026 else
1027 thisrun_bytes = fifo_end_addr - wp - block_size;
1028
1029 if (thisrun_bytes == 0) {
1030 /* Throttle polling a bit if transfer is (much) faster than flash
1031 * programming. The exact delay shouldn't matter as long as it's
1032 * less than buffer size / flash speed. This is very unlikely to
1033 * run when using high latency connections such as USB. */
1034 alive_sleep(2);
1035
1036 /* to stop an infinite loop on some targets check and increment a timeout
1037 * this issue was observed on a stellaris using the new ICDI interface */
1038 if (timeout++ >= 2500) {
1039 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1040 return ERROR_FLASH_OPERATION_FAILED;
1041 }
1042 continue;
1043 }
1044
1045 /* reset our timeout */
1046 timeout = 0;
1047
1048 /* Limit to the amount of data we actually want to write */
1049 if (thisrun_bytes > count * block_size)
1050 thisrun_bytes = count * block_size;
1051
1052 /* Force end of large blocks to be word aligned */
1053 if (thisrun_bytes >= 16)
1054 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1055
1056 /* Write data to fifo */
1057 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1058 if (retval != ERROR_OK)
1059 break;
1060
1061 /* Update counters and wrap write pointer */
1062 buffer += thisrun_bytes;
1063 count -= thisrun_bytes / block_size;
1064 wp += thisrun_bytes;
1065 if (wp >= fifo_end_addr)
1066 wp = fifo_start_addr;
1067
1068 /* Store updated write pointer to target */
1069 retval = target_write_u32(target, wp_addr, wp);
1070 if (retval != ERROR_OK)
1071 break;
1072
1073 /* Avoid GDB timeouts */
1074 keep_alive();
1075 }
1076
1077 if (retval != ERROR_OK) {
1078 /* abort flash write algorithm on target */
1079 target_write_u32(target, wp_addr, 0);
1080 }
1081
1082 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1083 num_reg_params, reg_params,
1084 exit_point,
1085 10000,
1086 arch_info);
1087
1088 if (retval2 != ERROR_OK) {
1089 LOG_ERROR("error waiting for target flash write algorithm");
1090 retval = retval2;
1091 }
1092
1093 if (retval == ERROR_OK) {
1094 /* check if algorithm set rp = 0 after fifo writer loop finished */
1095 retval = target_read_u32(target, rp_addr, &rp);
1096 if (retval == ERROR_OK && rp == 0) {
1097 LOG_ERROR("flash write algorithm aborted by target");
1098 retval = ERROR_FLASH_OPERATION_FAILED;
1099 }
1100 }
1101
1102 return retval;
1103 }
1104
1105 int target_run_read_async_algorithm(struct target *target,
1106 uint8_t *buffer, uint32_t count, int block_size,
1107 int num_mem_params, struct mem_param *mem_params,
1108 int num_reg_params, struct reg_param *reg_params,
1109 uint32_t buffer_start, uint32_t buffer_size,
1110 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1111 {
1112 int retval;
1113 int timeout = 0;
1114
1115 const uint8_t *buffer_orig = buffer;
1116
1117 /* Set up working area. First word is write pointer, second word is read pointer,
1118 * rest is fifo data area. */
1119 uint32_t wp_addr = buffer_start;
1120 uint32_t rp_addr = buffer_start + 4;
1121 uint32_t fifo_start_addr = buffer_start + 8;
1122 uint32_t fifo_end_addr = buffer_start + buffer_size;
1123
1124 uint32_t wp = fifo_start_addr;
1125 uint32_t rp = fifo_start_addr;
1126
1127 /* validate block_size is 2^n */
1128 assert(!block_size || !(block_size & (block_size - 1)));
1129
1130 retval = target_write_u32(target, wp_addr, wp);
1131 if (retval != ERROR_OK)
1132 return retval;
1133 retval = target_write_u32(target, rp_addr, rp);
1134 if (retval != ERROR_OK)
1135 return retval;
1136
1137 /* Start up algorithm on target */
1138 retval = target_start_algorithm(target, num_mem_params, mem_params,
1139 num_reg_params, reg_params,
1140 entry_point,
1141 exit_point,
1142 arch_info);
1143
1144 if (retval != ERROR_OK) {
1145 LOG_ERROR("error starting target flash read algorithm");
1146 return retval;
1147 }
1148
1149 while (count > 0) {
1150 retval = target_read_u32(target, wp_addr, &wp);
1151 if (retval != ERROR_OK) {
1152 LOG_ERROR("failed to get write pointer");
1153 break;
1154 }
1155
1156 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1157 (size_t)(buffer - buffer_orig), count, wp, rp);
1158
1159 if (wp == 0) {
1160 LOG_ERROR("flash read algorithm aborted by target");
1161 retval = ERROR_FLASH_OPERATION_FAILED;
1162 break;
1163 }
1164
1165 if (((wp - fifo_start_addr) & (block_size - 1)) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1166 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1167 break;
1168 }
1169
1170 /* Count the number of bytes available in the fifo without
1171 * crossing the wrap around. */
1172 uint32_t thisrun_bytes;
1173 if (wp >= rp)
1174 thisrun_bytes = wp - rp;
1175 else
1176 thisrun_bytes = fifo_end_addr - rp;
1177
1178 if (thisrun_bytes == 0) {
1179 /* Throttle polling a bit if transfer is (much) faster than flash
1180 * reading. The exact delay shouldn't matter as long as it's
1181 * less than buffer size / flash speed. This is very unlikely to
1182 * run when using high latency connections such as USB. */
1183 alive_sleep(2);
1184
1185 /* to stop an infinite loop on some targets check and increment a timeout
1186 * this issue was observed on a stellaris using the new ICDI interface */
1187 if (timeout++ >= 2500) {
1188 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1189 return ERROR_FLASH_OPERATION_FAILED;
1190 }
1191 continue;
1192 }
1193
1194 /* Reset our timeout */
1195 timeout = 0;
1196
1197 /* Limit to the amount of data we actually want to read */
1198 if (thisrun_bytes > count * block_size)
1199 thisrun_bytes = count * block_size;
1200
1201 /* Force end of large blocks to be word aligned */
1202 if (thisrun_bytes >= 16)
1203 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1204
1205 /* Read data from fifo */
1206 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1207 if (retval != ERROR_OK)
1208 break;
1209
1210 /* Update counters and wrap write pointer */
1211 buffer += thisrun_bytes;
1212 count -= thisrun_bytes / block_size;
1213 rp += thisrun_bytes;
1214 if (rp >= fifo_end_addr)
1215 rp = fifo_start_addr;
1216
1217 /* Store updated write pointer to target */
1218 retval = target_write_u32(target, rp_addr, rp);
1219 if (retval != ERROR_OK)
1220 break;
1221
1222 /* Avoid GDB timeouts */
1223 keep_alive();
1224
1225 }
1226
1227 if (retval != ERROR_OK) {
1228 /* abort flash write algorithm on target */
1229 target_write_u32(target, rp_addr, 0);
1230 }
1231
1232 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1233 num_reg_params, reg_params,
1234 exit_point,
1235 10000,
1236 arch_info);
1237
1238 if (retval2 != ERROR_OK) {
1239 LOG_ERROR("error waiting for target flash write algorithm");
1240 retval = retval2;
1241 }
1242
1243 if (retval == ERROR_OK) {
1244 /* check if algorithm set wp = 0 after fifo writer loop finished */
1245 retval = target_read_u32(target, wp_addr, &wp);
1246 if (retval == ERROR_OK && wp == 0) {
1247 LOG_ERROR("flash read algorithm aborted by target");
1248 retval = ERROR_FLASH_OPERATION_FAILED;
1249 }
1250 }
1251
1252 return retval;
1253 }
1254
1255 int target_read_memory(struct target *target,
1256 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1257 {
1258 if (!target_was_examined(target)) {
1259 LOG_ERROR("Target not examined yet");
1260 return ERROR_FAIL;
1261 }
1262 if (!target->type->read_memory) {
1263 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1264 return ERROR_FAIL;
1265 }
1266 return target->type->read_memory(target, address, size, count, buffer);
1267 }
1268
1269 int target_read_phys_memory(struct target *target,
1270 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1271 {
1272 if (!target_was_examined(target)) {
1273 LOG_ERROR("Target not examined yet");
1274 return ERROR_FAIL;
1275 }
1276 if (!target->type->read_phys_memory) {
1277 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1278 return ERROR_FAIL;
1279 }
1280 return target->type->read_phys_memory(target, address, size, count, buffer);
1281 }
1282
1283 int target_write_memory(struct target *target,
1284 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1285 {
1286 if (!target_was_examined(target)) {
1287 LOG_ERROR("Target not examined yet");
1288 return ERROR_FAIL;
1289 }
1290 if (!target->type->write_memory) {
1291 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1292 return ERROR_FAIL;
1293 }
1294 return target->type->write_memory(target, address, size, count, buffer);
1295 }
1296
1297 int target_write_phys_memory(struct target *target,
1298 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1299 {
1300 if (!target_was_examined(target)) {
1301 LOG_ERROR("Target not examined yet");
1302 return ERROR_FAIL;
1303 }
1304 if (!target->type->write_phys_memory) {
1305 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1306 return ERROR_FAIL;
1307 }
1308 return target->type->write_phys_memory(target, address, size, count, buffer);
1309 }
1310
1311 int target_add_breakpoint(struct target *target,
1312 struct breakpoint *breakpoint)
1313 {
1314 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1315 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1316 return ERROR_TARGET_NOT_HALTED;
1317 }
1318 return target->type->add_breakpoint(target, breakpoint);
1319 }
1320
1321 int target_add_context_breakpoint(struct target *target,
1322 struct breakpoint *breakpoint)
1323 {
1324 if (target->state != TARGET_HALTED) {
1325 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1326 return ERROR_TARGET_NOT_HALTED;
1327 }
1328 return target->type->add_context_breakpoint(target, breakpoint);
1329 }
1330
1331 int target_add_hybrid_breakpoint(struct target *target,
1332 struct breakpoint *breakpoint)
1333 {
1334 if (target->state != TARGET_HALTED) {
1335 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1336 return ERROR_TARGET_NOT_HALTED;
1337 }
1338 return target->type->add_hybrid_breakpoint(target, breakpoint);
1339 }
1340
1341 int target_remove_breakpoint(struct target *target,
1342 struct breakpoint *breakpoint)
1343 {
1344 return target->type->remove_breakpoint(target, breakpoint);
1345 }
1346
1347 int target_add_watchpoint(struct target *target,
1348 struct watchpoint *watchpoint)
1349 {
1350 if (target->state != TARGET_HALTED) {
1351 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1352 return ERROR_TARGET_NOT_HALTED;
1353 }
1354 return target->type->add_watchpoint(target, watchpoint);
1355 }
1356 int target_remove_watchpoint(struct target *target,
1357 struct watchpoint *watchpoint)
1358 {
1359 return target->type->remove_watchpoint(target, watchpoint);
1360 }
1361 int target_hit_watchpoint(struct target *target,
1362 struct watchpoint **hit_watchpoint)
1363 {
1364 if (target->state != TARGET_HALTED) {
1365 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1366 return ERROR_TARGET_NOT_HALTED;
1367 }
1368
1369 if (target->type->hit_watchpoint == NULL) {
1370 /* For backward compatible, if hit_watchpoint is not implemented,
1371 * return ERROR_FAIL such that gdb_server will not take the nonsense
1372 * information. */
1373 return ERROR_FAIL;
1374 }
1375
1376 return target->type->hit_watchpoint(target, hit_watchpoint);
1377 }
1378
1379 const char *target_get_gdb_arch(struct target *target)
1380 {
1381 if (target->type->get_gdb_arch == NULL)
1382 return NULL;
1383 return target->type->get_gdb_arch(target);
1384 }
1385
1386 int target_get_gdb_reg_list(struct target *target,
1387 struct reg **reg_list[], int *reg_list_size,
1388 enum target_register_class reg_class)
1389 {
1390 int result = ERROR_FAIL;
1391
1392 if (!target_was_examined(target)) {
1393 LOG_ERROR("Target not examined yet");
1394 goto done;
1395 }
1396
1397 result = target->type->get_gdb_reg_list(target, reg_list,
1398 reg_list_size, reg_class);
1399
1400 done:
1401 if (result != ERROR_OK) {
1402 *reg_list = NULL;
1403 *reg_list_size = 0;
1404 }
1405 return result;
1406 }
1407
1408 int target_get_gdb_reg_list_noread(struct target *target,
1409 struct reg **reg_list[], int *reg_list_size,
1410 enum target_register_class reg_class)
1411 {
1412 if (target->type->get_gdb_reg_list_noread &&
1413 target->type->get_gdb_reg_list_noread(target, reg_list,
1414 reg_list_size, reg_class) == ERROR_OK)
1415 return ERROR_OK;
1416 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1417 }
1418
1419 bool target_supports_gdb_connection(struct target *target)
1420 {
1421 /*
1422 * exclude all the targets that don't provide get_gdb_reg_list
1423 * or that have explicit gdb_max_connection == 0
1424 */
1425 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1426 }
1427
1428 int target_step(struct target *target,
1429 int current, target_addr_t address, int handle_breakpoints)
1430 {
1431 int retval;
1432
1433 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1434
1435 retval = target->type->step(target, current, address, handle_breakpoints);
1436 if (retval != ERROR_OK)
1437 return retval;
1438
1439 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1440
1441 return retval;
1442 }
1443
1444 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1445 {
1446 if (target->state != TARGET_HALTED) {
1447 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1448 return ERROR_TARGET_NOT_HALTED;
1449 }
1450 return target->type->get_gdb_fileio_info(target, fileio_info);
1451 }
1452
1453 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1454 {
1455 if (target->state != TARGET_HALTED) {
1456 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1457 return ERROR_TARGET_NOT_HALTED;
1458 }
1459 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1460 }
1461
1462 target_addr_t target_address_max(struct target *target)
1463 {
1464 unsigned bits = target_address_bits(target);
1465 if (sizeof(target_addr_t) * 8 == bits)
1466 return (target_addr_t) -1;
1467 else
1468 return (((target_addr_t) 1) << bits) - 1;
1469 }
1470
1471 unsigned target_address_bits(struct target *target)
1472 {
1473 if (target->type->address_bits)
1474 return target->type->address_bits(target);
1475 return 32;
1476 }
1477
1478 static int target_profiling(struct target *target, uint32_t *samples,
1479 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1480 {
1481 return target->type->profiling(target, samples, max_num_samples,
1482 num_samples, seconds);
1483 }
1484
1485 /**
1486 * Reset the @c examined flag for the given target.
1487 * Pure paranoia -- targets are zeroed on allocation.
1488 */
1489 static void target_reset_examined(struct target *target)
1490 {
1491 target->examined = false;
1492 }
1493
1494 static int handle_target(void *priv);
1495
1496 static int target_init_one(struct command_context *cmd_ctx,
1497 struct target *target)
1498 {
1499 target_reset_examined(target);
1500
1501 struct target_type *type = target->type;
1502 if (type->examine == NULL)
1503 type->examine = default_examine;
1504
1505 if (type->check_reset == NULL)
1506 type->check_reset = default_check_reset;
1507
1508 assert(type->init_target != NULL);
1509
1510 int retval = type->init_target(cmd_ctx, target);
1511 if (ERROR_OK != retval) {
1512 LOG_ERROR("target '%s' init failed", target_name(target));
1513 return retval;
1514 }
1515
1516 /* Sanity-check MMU support ... stub in what we must, to help
1517 * implement it in stages, but warn if we need to do so.
1518 */
1519 if (type->mmu) {
1520 if (type->virt2phys == NULL) {
1521 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1522 type->virt2phys = identity_virt2phys;
1523 }
1524 } else {
1525 /* Make sure no-MMU targets all behave the same: make no
1526 * distinction between physical and virtual addresses, and
1527 * ensure that virt2phys() is always an identity mapping.
1528 */
1529 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1530 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1531
1532 type->mmu = no_mmu;
1533 type->write_phys_memory = type->write_memory;
1534 type->read_phys_memory = type->read_memory;
1535 type->virt2phys = identity_virt2phys;
1536 }
1537
1538 if (target->type->read_buffer == NULL)
1539 target->type->read_buffer = target_read_buffer_default;
1540
1541 if (target->type->write_buffer == NULL)
1542 target->type->write_buffer = target_write_buffer_default;
1543
1544 if (target->type->get_gdb_fileio_info == NULL)
1545 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1546
1547 if (target->type->gdb_fileio_end == NULL)
1548 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1549
1550 if (target->type->profiling == NULL)
1551 target->type->profiling = target_profiling_default;
1552
1553 return ERROR_OK;
1554 }
1555
1556 static int target_init(struct command_context *cmd_ctx)
1557 {
1558 struct target *target;
1559 int retval;
1560
1561 for (target = all_targets; target; target = target->next) {
1562 retval = target_init_one(cmd_ctx, target);
1563 if (ERROR_OK != retval)
1564 return retval;
1565 }
1566
1567 if (!all_targets)
1568 return ERROR_OK;
1569
1570 retval = target_register_user_commands(cmd_ctx);
1571 if (ERROR_OK != retval)
1572 return retval;
1573
1574 retval = target_register_timer_callback(&handle_target,
1575 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1576 if (ERROR_OK != retval)
1577 return retval;
1578
1579 return ERROR_OK;
1580 }
1581
1582 COMMAND_HANDLER(handle_target_init_command)
1583 {
1584 int retval;
1585
1586 if (CMD_ARGC != 0)
1587 return ERROR_COMMAND_SYNTAX_ERROR;
1588
1589 static bool target_initialized;
1590 if (target_initialized) {
1591 LOG_INFO("'target init' has already been called");
1592 return ERROR_OK;
1593 }
1594 target_initialized = true;
1595
1596 retval = command_run_line(CMD_CTX, "init_targets");
1597 if (ERROR_OK != retval)
1598 return retval;
1599
1600 retval = command_run_line(CMD_CTX, "init_target_events");
1601 if (ERROR_OK != retval)
1602 return retval;
1603
1604 retval = command_run_line(CMD_CTX, "init_board");
1605 if (ERROR_OK != retval)
1606 return retval;
1607
1608 LOG_DEBUG("Initializing targets...");
1609 return target_init(CMD_CTX);
1610 }
1611
1612 int target_register_event_callback(int (*callback)(struct target *target,
1613 enum target_event event, void *priv), void *priv)
1614 {
1615 struct target_event_callback **callbacks_p = &target_event_callbacks;
1616
1617 if (callback == NULL)
1618 return ERROR_COMMAND_SYNTAX_ERROR;
1619
1620 if (*callbacks_p) {
1621 while ((*callbacks_p)->next)
1622 callbacks_p = &((*callbacks_p)->next);
1623 callbacks_p = &((*callbacks_p)->next);
1624 }
1625
1626 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1627 (*callbacks_p)->callback = callback;
1628 (*callbacks_p)->priv = priv;
1629 (*callbacks_p)->next = NULL;
1630
1631 return ERROR_OK;
1632 }
1633
1634 int target_register_reset_callback(int (*callback)(struct target *target,
1635 enum target_reset_mode reset_mode, void *priv), void *priv)
1636 {
1637 struct target_reset_callback *entry;
1638
1639 if (callback == NULL)
1640 return ERROR_COMMAND_SYNTAX_ERROR;
1641
1642 entry = malloc(sizeof(struct target_reset_callback));
1643 if (entry == NULL) {
1644 LOG_ERROR("error allocating buffer for reset callback entry");
1645 return ERROR_COMMAND_SYNTAX_ERROR;
1646 }
1647
1648 entry->callback = callback;
1649 entry->priv = priv;
1650 list_add(&entry->list, &target_reset_callback_list);
1651
1652
1653 return ERROR_OK;
1654 }
1655
1656 int target_register_trace_callback(int (*callback)(struct target *target,
1657 size_t len, uint8_t *data, void *priv), void *priv)
1658 {
1659 struct target_trace_callback *entry;
1660
1661 if (callback == NULL)
1662 return ERROR_COMMAND_SYNTAX_ERROR;
1663
1664 entry = malloc(sizeof(struct target_trace_callback));
1665 if (entry == NULL) {
1666 LOG_ERROR("error allocating buffer for trace callback entry");
1667 return ERROR_COMMAND_SYNTAX_ERROR;
1668 }
1669
1670 entry->callback = callback;
1671 entry->priv = priv;
1672 list_add(&entry->list, &target_trace_callback_list);
1673
1674
1675 return ERROR_OK;
1676 }
1677
1678 int target_register_timer_callback(int (*callback)(void *priv),
1679 unsigned int time_ms, enum target_timer_type type, void *priv)
1680 {
1681 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1682
1683 if (callback == NULL)
1684 return ERROR_COMMAND_SYNTAX_ERROR;
1685
1686 if (*callbacks_p) {
1687 while ((*callbacks_p)->next)
1688 callbacks_p = &((*callbacks_p)->next);
1689 callbacks_p = &((*callbacks_p)->next);
1690 }
1691
1692 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1693 (*callbacks_p)->callback = callback;
1694 (*callbacks_p)->type = type;
1695 (*callbacks_p)->time_ms = time_ms;
1696 (*callbacks_p)->removed = false;
1697
1698 gettimeofday(&(*callbacks_p)->when, NULL);
1699 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1700
1701 (*callbacks_p)->priv = priv;
1702 (*callbacks_p)->next = NULL;
1703
1704 return ERROR_OK;
1705 }
1706
1707 int target_unregister_event_callback(int (*callback)(struct target *target,
1708 enum target_event event, void *priv), void *priv)
1709 {
1710 struct target_event_callback **p = &target_event_callbacks;
1711 struct target_event_callback *c = target_event_callbacks;
1712
1713 if (callback == NULL)
1714 return ERROR_COMMAND_SYNTAX_ERROR;
1715
1716 while (c) {
1717 struct target_event_callback *next = c->next;
1718 if ((c->callback == callback) && (c->priv == priv)) {
1719 *p = next;
1720 free(c);
1721 return ERROR_OK;
1722 } else
1723 p = &(c->next);
1724 c = next;
1725 }
1726
1727 return ERROR_OK;
1728 }
1729
1730 int target_unregister_reset_callback(int (*callback)(struct target *target,
1731 enum target_reset_mode reset_mode, void *priv), void *priv)
1732 {
1733 struct target_reset_callback *entry;
1734
1735 if (callback == NULL)
1736 return ERROR_COMMAND_SYNTAX_ERROR;
1737
1738 list_for_each_entry(entry, &target_reset_callback_list, list) {
1739 if (entry->callback == callback && entry->priv == priv) {
1740 list_del(&entry->list);
1741 free(entry);
1742 break;
1743 }
1744 }
1745
1746 return ERROR_OK;
1747 }
1748
1749 int target_unregister_trace_callback(int (*callback)(struct target *target,
1750 size_t len, uint8_t *data, void *priv), void *priv)
1751 {
1752 struct target_trace_callback *entry;
1753
1754 if (callback == NULL)
1755 return ERROR_COMMAND_SYNTAX_ERROR;
1756
1757 list_for_each_entry(entry, &target_trace_callback_list, list) {
1758 if (entry->callback == callback && entry->priv == priv) {
1759 list_del(&entry->list);
1760 free(entry);
1761 break;
1762 }
1763 }
1764
1765 return ERROR_OK;
1766 }
1767
1768 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1769 {
1770 if (callback == NULL)
1771 return ERROR_COMMAND_SYNTAX_ERROR;
1772
1773 for (struct target_timer_callback *c = target_timer_callbacks;
1774 c; c = c->next) {
1775 if ((c->callback == callback) && (c->priv == priv)) {
1776 c->removed = true;
1777 return ERROR_OK;
1778 }
1779 }
1780
1781 return ERROR_FAIL;
1782 }
1783
1784 int target_call_event_callbacks(struct target *target, enum target_event event)
1785 {
1786 struct target_event_callback *callback = target_event_callbacks;
1787 struct target_event_callback *next_callback;
1788
1789 if (event == TARGET_EVENT_HALTED) {
1790 /* execute early halted first */
1791 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1792 }
1793
1794 LOG_DEBUG("target event %i (%s) for core %s", event,
1795 Jim_Nvp_value2name_simple(nvp_target_event, event)->name,
1796 target_name(target));
1797
1798 target_handle_event(target, event);
1799
1800 while (callback) {
1801 next_callback = callback->next;
1802 callback->callback(target, event, callback->priv);
1803 callback = next_callback;
1804 }
1805
1806 return ERROR_OK;
1807 }
1808
1809 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1810 {
1811 struct target_reset_callback *callback;
1812
1813 LOG_DEBUG("target reset %i (%s)", reset_mode,
1814 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1815
1816 list_for_each_entry(callback, &target_reset_callback_list, list)
1817 callback->callback(target, reset_mode, callback->priv);
1818
1819 return ERROR_OK;
1820 }
1821
1822 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1823 {
1824 struct target_trace_callback *callback;
1825
1826 list_for_each_entry(callback, &target_trace_callback_list, list)
1827 callback->callback(target, len, data, callback->priv);
1828
1829 return ERROR_OK;
1830 }
1831
1832 static int target_timer_callback_periodic_restart(
1833 struct target_timer_callback *cb, struct timeval *now)
1834 {
1835 cb->when = *now;
1836 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1837 return ERROR_OK;
1838 }
1839
1840 static int target_call_timer_callback(struct target_timer_callback *cb,
1841 struct timeval *now)
1842 {
1843 cb->callback(cb->priv);
1844
1845 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1846 return target_timer_callback_periodic_restart(cb, now);
1847
1848 return target_unregister_timer_callback(cb->callback, cb->priv);
1849 }
1850
1851 static int target_call_timer_callbacks_check_time(int checktime)
1852 {
1853 static bool callback_processing;
1854
1855 /* Do not allow nesting */
1856 if (callback_processing)
1857 return ERROR_OK;
1858
1859 callback_processing = true;
1860
1861 keep_alive();
1862
1863 struct timeval now;
1864 gettimeofday(&now, NULL);
1865
1866 /* Store an address of the place containing a pointer to the
1867 * next item; initially, that's a standalone "root of the
1868 * list" variable. */
1869 struct target_timer_callback **callback = &target_timer_callbacks;
1870 while (callback && *callback) {
1871 if ((*callback)->removed) {
1872 struct target_timer_callback *p = *callback;
1873 *callback = (*callback)->next;
1874 free(p);
1875 continue;
1876 }
1877
1878 bool call_it = (*callback)->callback &&
1879 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1880 timeval_compare(&now, &(*callback)->when) >= 0);
1881
1882 if (call_it)
1883 target_call_timer_callback(*callback, &now);
1884
1885 callback = &(*callback)->next;
1886 }
1887
1888 callback_processing = false;
1889 return ERROR_OK;
1890 }
1891
1892 int target_call_timer_callbacks(void)
1893 {
1894 return target_call_timer_callbacks_check_time(1);
1895 }
1896
1897 /* invoke periodic callbacks immediately */
1898 int target_call_timer_callbacks_now(void)
1899 {
1900 return target_call_timer_callbacks_check_time(0);
1901 }
1902
1903 /* Prints the working area layout for debug purposes */
1904 static void print_wa_layout(struct target *target)
1905 {
1906 struct working_area *c = target->working_areas;
1907
1908 while (c) {
1909 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1910 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1911 c->address, c->address + c->size - 1, c->size);
1912 c = c->next;
1913 }
1914 }
1915
1916 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1917 static void target_split_working_area(struct working_area *area, uint32_t size)
1918 {
1919 assert(area->free); /* Shouldn't split an allocated area */
1920 assert(size <= area->size); /* Caller should guarantee this */
1921
1922 /* Split only if not already the right size */
1923 if (size < area->size) {
1924 struct working_area *new_wa = malloc(sizeof(*new_wa));
1925
1926 if (new_wa == NULL)
1927 return;
1928
1929 new_wa->next = area->next;
1930 new_wa->size = area->size - size;
1931 new_wa->address = area->address + size;
1932 new_wa->backup = NULL;
1933 new_wa->user = NULL;
1934 new_wa->free = true;
1935
1936 area->next = new_wa;
1937 area->size = size;
1938
1939 /* If backup memory was allocated to this area, it has the wrong size
1940 * now so free it and it will be reallocated if/when needed */
1941 free(area->backup);
1942 area->backup = NULL;
1943 }
1944 }
1945
1946 /* Merge all adjacent free areas into one */
1947 static void target_merge_working_areas(struct target *target)
1948 {
1949 struct working_area *c = target->working_areas;
1950
1951 while (c && c->next) {
1952 assert(c->next->address == c->address + c->size); /* This is an invariant */
1953
1954 /* Find two adjacent free areas */
1955 if (c->free && c->next->free) {
1956 /* Merge the last into the first */
1957 c->size += c->next->size;
1958
1959 /* Remove the last */
1960 struct working_area *to_be_freed = c->next;
1961 c->next = c->next->next;
1962 free(to_be_freed->backup);
1963 free(to_be_freed);
1964
1965 /* If backup memory was allocated to the remaining area, it's has
1966 * the wrong size now */
1967 free(c->backup);
1968 c->backup = NULL;
1969 } else {
1970 c = c->next;
1971 }
1972 }
1973 }
1974
1975 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1976 {
1977 /* Reevaluate working area address based on MMU state*/
1978 if (target->working_areas == NULL) {
1979 int retval;
1980 int enabled;
1981
1982 retval = target->type->mmu(target, &enabled);
1983 if (retval != ERROR_OK)
1984 return retval;
1985
1986 if (!enabled) {
1987 if (target->working_area_phys_spec) {
1988 LOG_DEBUG("MMU disabled, using physical "
1989 "address for working memory " TARGET_ADDR_FMT,
1990 target->working_area_phys);
1991 target->working_area = target->working_area_phys;
1992 } else {
1993 LOG_ERROR("No working memory available. "
1994 "Specify -work-area-phys to target.");
1995 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1996 }
1997 } else {
1998 if (target->working_area_virt_spec) {
1999 LOG_DEBUG("MMU enabled, using virtual "
2000 "address for working memory " TARGET_ADDR_FMT,
2001 target->working_area_virt);
2002 target->working_area = target->working_area_virt;
2003 } else {
2004 LOG_ERROR("No working memory available. "
2005 "Specify -work-area-virt to target.");
2006 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2007 }
2008 }
2009
2010 /* Set up initial working area on first call */
2011 struct working_area *new_wa = malloc(sizeof(*new_wa));
2012 if (new_wa) {
2013 new_wa->next = NULL;
2014 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2015 new_wa->address = target->working_area;
2016 new_wa->backup = NULL;
2017 new_wa->user = NULL;
2018 new_wa->free = true;
2019 }
2020
2021 target->working_areas = new_wa;
2022 }
2023
2024 /* only allocate multiples of 4 byte */
2025 if (size % 4)
2026 size = (size + 3) & (~3UL);
2027
2028 struct working_area *c = target->working_areas;
2029
2030 /* Find the first large enough working area */
2031 while (c) {
2032 if (c->free && c->size >= size)
2033 break;
2034 c = c->next;
2035 }
2036
2037 if (c == NULL)
2038 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2039
2040 /* Split the working area into the requested size */
2041 target_split_working_area(c, size);
2042
2043 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2044 size, c->address);
2045
2046 if (target->backup_working_area) {
2047 if (c->backup == NULL) {
2048 c->backup = malloc(c->size);
2049 if (c->backup == NULL)
2050 return ERROR_FAIL;
2051 }
2052
2053 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2054 if (retval != ERROR_OK)
2055 return retval;
2056 }
2057
2058 /* mark as used, and return the new (reused) area */
2059 c->free = false;
2060 *area = c;
2061
2062 /* user pointer */
2063 c->user = area;
2064
2065 print_wa_layout(target);
2066
2067 return ERROR_OK;
2068 }
2069
2070 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2071 {
2072 int retval;
2073
2074 retval = target_alloc_working_area_try(target, size, area);
2075 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2076 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2077 return retval;
2078
2079 }
2080
2081 static int target_restore_working_area(struct target *target, struct working_area *area)
2082 {
2083 int retval = ERROR_OK;
2084
2085 if (target->backup_working_area && area->backup != NULL) {
2086 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2087 if (retval != ERROR_OK)
2088 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2089 area->size, area->address);
2090 }
2091
2092 return retval;
2093 }
2094
2095 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2096 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2097 {
2098 int retval = ERROR_OK;
2099
2100 if (area->free)
2101 return retval;
2102
2103 if (restore) {
2104 retval = target_restore_working_area(target, area);
2105 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2106 if (retval != ERROR_OK)
2107 return retval;
2108 }
2109
2110 area->free = true;
2111
2112 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2113 area->size, area->address);
2114
2115 /* mark user pointer invalid */
2116 /* TODO: Is this really safe? It points to some previous caller's memory.
2117 * How could we know that the area pointer is still in that place and not
2118 * some other vital data? What's the purpose of this, anyway? */
2119 *area->user = NULL;
2120 area->user = NULL;
2121
2122 target_merge_working_areas(target);
2123
2124 print_wa_layout(target);
2125
2126 return retval;
2127 }
2128
2129 int target_free_working_area(struct target *target, struct working_area *area)
2130 {
2131 return target_free_working_area_restore(target, area, 1);
2132 }
2133
2134 /* free resources and restore memory, if restoring memory fails,
2135 * free up resources anyway
2136 */
2137 static void target_free_all_working_areas_restore(struct target *target, int restore)
2138 {
2139 struct working_area *c = target->working_areas;
2140
2141 LOG_DEBUG("freeing all working areas");
2142
2143 /* Loop through all areas, restoring the allocated ones and marking them as free */
2144 while (c) {
2145 if (!c->free) {
2146 if (restore)
2147 target_restore_working_area(target, c);
2148 c->free = true;
2149 *c->user = NULL; /* Same as above */
2150 c->user = NULL;
2151 }
2152 c = c->next;
2153 }
2154
2155 /* Run a merge pass to combine all areas into one */
2156 target_merge_working_areas(target);
2157
2158 print_wa_layout(target);
2159 }
2160
2161 void target_free_all_working_areas(struct target *target)
2162 {
2163 target_free_all_working_areas_restore(target, 1);
2164
2165 /* Now we have none or only one working area marked as free */
2166 if (target->working_areas) {
2167 /* Free the last one to allow on-the-fly moving and resizing */
2168 free(target->working_areas->backup);
2169 free(target->working_areas);
2170 target->working_areas = NULL;
2171 }
2172 }
2173
2174 /* Find the largest number of bytes that can be allocated */
2175 uint32_t target_get_working_area_avail(struct target *target)
2176 {
2177 struct working_area *c = target->working_areas;
2178 uint32_t max_size = 0;
2179
2180 if (c == NULL)
2181 return target->working_area_size;
2182
2183 while (c) {
2184 if (c->free && max_size < c->size)
2185 max_size = c->size;
2186
2187 c = c->next;
2188 }
2189
2190 return max_size;
2191 }
2192
2193 static void target_destroy(struct target *target)
2194 {
2195 if (target->type->deinit_target)
2196 target->type->deinit_target(target);
2197
2198 free(target->semihosting);
2199
2200 jtag_unregister_event_callback(jtag_enable_callback, target);
2201
2202 struct target_event_action *teap = target->event_action;
2203 while (teap) {
2204 struct target_event_action *next = teap->next;
2205 Jim_DecrRefCount(teap->interp, teap->body);
2206 free(teap);
2207 teap = next;
2208 }
2209
2210 target_free_all_working_areas(target);
2211
2212 /* release the targets SMP list */
2213 if (target->smp) {
2214 struct target_list *head = target->head;
2215 while (head != NULL) {
2216 struct target_list *pos = head->next;
2217 head->target->smp = 0;
2218 free(head);
2219 head = pos;
2220 }
2221 target->smp = 0;
2222 }
2223
2224 rtos_destroy(target);
2225
2226 free(target->gdb_port_override);
2227 free(target->type);
2228 free(target->trace_info);
2229 free(target->fileio_info);
2230 free(target->cmd_name);
2231 free(target);
2232 }
2233
2234 void target_quit(void)
2235 {
2236 struct target_event_callback *pe = target_event_callbacks;
2237 while (pe) {
2238 struct target_event_callback *t = pe->next;
2239 free(pe);
2240 pe = t;
2241 }
2242 target_event_callbacks = NULL;
2243
2244 struct target_timer_callback *pt = target_timer_callbacks;
2245 while (pt) {
2246 struct target_timer_callback *t = pt->next;
2247 free(pt);
2248 pt = t;
2249 }
2250 target_timer_callbacks = NULL;
2251
2252 for (struct target *target = all_targets; target;) {
2253 struct target *tmp;
2254
2255 tmp = target->next;
2256 target_destroy(target);
2257 target = tmp;
2258 }
2259
2260 all_targets = NULL;
2261 }
2262
2263 int target_arch_state(struct target *target)
2264 {
2265 int retval;
2266 if (target == NULL) {
2267 LOG_WARNING("No target has been configured");
2268 return ERROR_OK;
2269 }
2270
2271 if (target->state != TARGET_HALTED)
2272 return ERROR_OK;
2273
2274 retval = target->type->arch_state(target);
2275 return retval;
2276 }
2277
2278 static int target_get_gdb_fileio_info_default(struct target *target,
2279 struct gdb_fileio_info *fileio_info)
2280 {
2281 /* If target does not support semi-hosting function, target
2282 has no need to provide .get_gdb_fileio_info callback.
2283 It just return ERROR_FAIL and gdb_server will return "Txx"
2284 as target halted every time. */
2285 return ERROR_FAIL;
2286 }
2287
2288 static int target_gdb_fileio_end_default(struct target *target,
2289 int retcode, int fileio_errno, bool ctrl_c)
2290 {
2291 return ERROR_OK;
2292 }
2293
2294 int target_profiling_default(struct target *target, uint32_t *samples,
2295 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2296 {
2297 struct timeval timeout, now;
2298
2299 gettimeofday(&timeout, NULL);
2300 timeval_add_time(&timeout, seconds, 0);
2301
2302 LOG_INFO("Starting profiling. Halting and resuming the"
2303 " target as often as we can...");
2304
2305 uint32_t sample_count = 0;
2306 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2307 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
2308
2309 int retval = ERROR_OK;
2310 for (;;) {
2311 target_poll(target);
2312 if (target->state == TARGET_HALTED) {
2313 uint32_t t = buf_get_u32(reg->value, 0, 32);
2314 samples[sample_count++] = t;
2315 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2316 retval = target_resume(target, 1, 0, 0, 0);
2317 target_poll(target);
2318 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2319 } else if (target->state == TARGET_RUNNING) {
2320 /* We want to quickly sample the PC. */
2321 retval = target_halt(target);
2322 } else {
2323 LOG_INFO("Target not halted or running");
2324 retval = ERROR_OK;
2325 break;
2326 }
2327
2328 if (retval != ERROR_OK)
2329 break;
2330
2331 gettimeofday(&now, NULL);
2332 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2333 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2334 break;
2335 }
2336 }
2337
2338 *num_samples = sample_count;
2339 return retval;
2340 }
2341
2342 /* Single aligned words are guaranteed to use 16 or 32 bit access
2343 * mode respectively, otherwise data is handled as quickly as
2344 * possible
2345 */
2346 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2347 {
2348 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2349 size, address);
2350
2351 if (!target_was_examined(target)) {
2352 LOG_ERROR("Target not examined yet");
2353 return ERROR_FAIL;
2354 }
2355
2356 if (size == 0)
2357 return ERROR_OK;
2358
2359 if ((address + size - 1) < address) {
2360 /* GDB can request this when e.g. PC is 0xfffffffc */
2361 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2362 address,
2363 size);
2364 return ERROR_FAIL;
2365 }
2366
2367 return target->type->write_buffer(target, address, size, buffer);
2368 }
2369
2370 static int target_write_buffer_default(struct target *target,
2371 target_addr_t address, uint32_t count, const uint8_t *buffer)
2372 {
2373 uint32_t size;
2374
2375 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2376 * will have something to do with the size we leave to it. */
2377 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2378 if (address & size) {
2379 int retval = target_write_memory(target, address, size, 1, buffer);
2380 if (retval != ERROR_OK)
2381 return retval;
2382 address += size;
2383 count -= size;
2384 buffer += size;
2385 }
2386 }
2387
2388 /* Write the data with as large access size as possible. */
2389 for (; size > 0; size /= 2) {
2390 uint32_t aligned = count - count % size;
2391 if (aligned > 0) {
2392 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2393 if (retval != ERROR_OK)
2394 return retval;
2395 address += aligned;
2396 count -= aligned;
2397 buffer += aligned;
2398 }
2399 }
2400
2401 return ERROR_OK;
2402 }
2403
2404 /* Single aligned words are guaranteed to use 16 or 32 bit access
2405 * mode respectively, otherwise data is handled as quickly as
2406 * possible
2407 */
2408 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2409 {
2410 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2411 size, address);
2412
2413 if (!target_was_examined(target)) {
2414 LOG_ERROR("Target not examined yet");
2415 return ERROR_FAIL;
2416 }
2417
2418 if (size == 0)
2419 return ERROR_OK;
2420
2421 if ((address + size - 1) < address) {
2422 /* GDB can request this when e.g. PC is 0xfffffffc */
2423 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2424 address,
2425 size);
2426 return ERROR_FAIL;
2427 }
2428
2429 return target->type->read_buffer(target, address, size, buffer);
2430 }
2431
2432 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2433 {
2434 uint32_t size;
2435
2436 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2437 * will have something to do with the size we leave to it. */
2438 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2439 if (address & size) {
2440 int retval = target_read_memory(target, address, size, 1, buffer);
2441 if (retval != ERROR_OK)
2442 return retval;
2443 address += size;
2444 count -= size;
2445 buffer += size;
2446 }
2447 }
2448
2449 /* Read the data with as large access size as possible. */
2450 for (; size > 0; size /= 2) {
2451 uint32_t aligned = count - count % size;
2452 if (aligned > 0) {
2453 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2454 if (retval != ERROR_OK)
2455 return retval;
2456 address += aligned;
2457 count -= aligned;
2458 buffer += aligned;
2459 }
2460 }
2461
2462 return ERROR_OK;
2463 }
2464
2465 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2466 {
2467 uint8_t *buffer;
2468 int retval;
2469 uint32_t i;
2470 uint32_t checksum = 0;
2471 if (!target_was_examined(target)) {
2472 LOG_ERROR("Target not examined yet");
2473 return ERROR_FAIL;
2474 }
2475
2476 retval = target->type->checksum_memory(target, address, size, &checksum);
2477 if (retval != ERROR_OK) {
2478 buffer = malloc(size);
2479 if (buffer == NULL) {
2480 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2481 return ERROR_COMMAND_SYNTAX_ERROR;
2482 }
2483 retval = target_read_buffer(target, address, size, buffer);
2484 if (retval != ERROR_OK) {
2485 free(buffer);
2486 return retval;
2487 }
2488
2489 /* convert to target endianness */
2490 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2491 uint32_t target_data;
2492 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2493 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2494 }
2495
2496 retval = image_calculate_checksum(buffer, size, &checksum);
2497 free(buffer);
2498 }
2499
2500 *crc = checksum;
2501
2502 return retval;
2503 }
2504
2505 int target_blank_check_memory(struct target *target,
2506 struct target_memory_check_block *blocks, int num_blocks,
2507 uint8_t erased_value)
2508 {
2509 if (!target_was_examined(target)) {
2510 LOG_ERROR("Target not examined yet");
2511 return ERROR_FAIL;
2512 }
2513
2514 if (target->type->blank_check_memory == NULL)
2515 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2516
2517 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2518 }
2519
2520 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2521 {
2522 uint8_t value_buf[8];
2523 if (!target_was_examined(target)) {
2524 LOG_ERROR("Target not examined yet");
2525 return ERROR_FAIL;
2526 }
2527
2528 int retval = target_read_memory(target, address, 8, 1, value_buf);
2529
2530 if (retval == ERROR_OK) {
2531 *value = target_buffer_get_u64(target, value_buf);
2532 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2533 address,
2534 *value);
2535 } else {
2536 *value = 0x0;
2537 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2538 address);
2539 }
2540
2541 return retval;
2542 }
2543
2544 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2545 {
2546 uint8_t value_buf[4];
2547 if (!target_was_examined(target)) {
2548 LOG_ERROR("Target not examined yet");
2549 return ERROR_FAIL;
2550 }
2551
2552 int retval = target_read_memory(target, address, 4, 1, value_buf);
2553
2554 if (retval == ERROR_OK) {
2555 *value = target_buffer_get_u32(target, value_buf);
2556 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2557 address,
2558 *value);
2559 } else {
2560 *value = 0x0;
2561 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2562 address);
2563 }
2564
2565 return retval;
2566 }
2567
2568 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2569 {
2570 uint8_t value_buf[2];
2571 if (!target_was_examined(target)) {
2572 LOG_ERROR("Target not examined yet");
2573 return ERROR_FAIL;
2574 }
2575
2576 int retval = target_read_memory(target, address, 2, 1, value_buf);
2577
2578 if (retval == ERROR_OK) {
2579 *value = target_buffer_get_u16(target, value_buf);
2580 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2581 address,
2582 *value);
2583 } else {
2584 *value = 0x0;
2585 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2586 address);
2587 }
2588
2589 return retval;
2590 }
2591
2592 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2593 {
2594 if (!target_was_examined(target)) {
2595 LOG_ERROR("Target not examined yet");
2596 return ERROR_FAIL;
2597 }
2598
2599 int retval = target_read_memory(target, address, 1, 1, value);
2600
2601 if (retval == ERROR_OK) {
2602 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2603 address,
2604 *value);
2605 } else {
2606 *value = 0x0;
2607 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2608 address);
2609 }
2610
2611 return retval;
2612 }
2613
2614 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2615 {
2616 int retval;
2617 uint8_t value_buf[8];
2618 if (!target_was_examined(target)) {
2619 LOG_ERROR("Target not examined yet");
2620 return ERROR_FAIL;
2621 }
2622
2623 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2624 address,
2625 value);
2626
2627 target_buffer_set_u64(target, value_buf, value);
2628 retval = target_write_memory(target, address, 8, 1, value_buf);
2629 if (retval != ERROR_OK)
2630 LOG_DEBUG("failed: %i", retval);
2631
2632 return retval;
2633 }
2634
2635 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2636 {
2637 int retval;
2638 uint8_t value_buf[4];
2639 if (!target_was_examined(target)) {
2640 LOG_ERROR("Target not examined yet");
2641 return ERROR_FAIL;
2642 }
2643
2644 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2645 address,
2646 value);
2647
2648 target_buffer_set_u32(target, value_buf, value);
2649 retval = target_write_memory(target, address, 4, 1, value_buf);
2650 if (retval != ERROR_OK)
2651 LOG_DEBUG("failed: %i", retval);
2652
2653 return retval;
2654 }
2655
2656 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2657 {
2658 int retval;
2659 uint8_t value_buf[2];
2660 if (!target_was_examined(target)) {
2661 LOG_ERROR("Target not examined yet");
2662 return ERROR_FAIL;
2663 }
2664
2665 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2666 address,
2667 value);
2668
2669 target_buffer_set_u16(target, value_buf, value);
2670 retval = target_write_memory(target, address, 2, 1, value_buf);
2671 if (retval != ERROR_OK)
2672 LOG_DEBUG("failed: %i", retval);
2673
2674 return retval;
2675 }
2676
2677 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2678 {
2679 int retval;
2680 if (!target_was_examined(target)) {
2681 LOG_ERROR("Target not examined yet");
2682 return ERROR_FAIL;
2683 }
2684
2685 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2686 address, value);
2687
2688 retval = target_write_memory(target, address, 1, 1, &value);
2689 if (retval != ERROR_OK)
2690 LOG_DEBUG("failed: %i", retval);
2691
2692 return retval;
2693 }
2694
2695 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2696 {
2697 int retval;
2698 uint8_t value_buf[8];
2699 if (!target_was_examined(target)) {
2700 LOG_ERROR("Target not examined yet");
2701 return ERROR_FAIL;
2702 }
2703
2704 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2705 address,
2706 value);
2707
2708 target_buffer_set_u64(target, value_buf, value);
2709 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2710 if (retval != ERROR_OK)
2711 LOG_DEBUG("failed: %i", retval);
2712
2713 return retval;
2714 }
2715
2716 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2717 {
2718 int retval;
2719 uint8_t value_buf[4];
2720 if (!target_was_examined(target)) {
2721 LOG_ERROR("Target not examined yet");
2722 return ERROR_FAIL;
2723 }
2724
2725 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2726 address,
2727 value);
2728
2729 target_buffer_set_u32(target, value_buf, value);
2730 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2731 if (retval != ERROR_OK)
2732 LOG_DEBUG("failed: %i", retval);
2733
2734 return retval;
2735 }
2736
2737 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2738 {
2739 int retval;
2740 uint8_t value_buf[2];
2741 if (!target_was_examined(target)) {
2742 LOG_ERROR("Target not examined yet");
2743 return ERROR_FAIL;
2744 }
2745
2746 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2747 address,
2748 value);
2749
2750 target_buffer_set_u16(target, value_buf, value);
2751 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2752 if (retval != ERROR_OK)
2753 LOG_DEBUG("failed: %i", retval);
2754
2755 return retval;
2756 }
2757
2758 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2759 {
2760 int retval;
2761 if (!target_was_examined(target)) {
2762 LOG_ERROR("Target not examined yet");
2763 return ERROR_FAIL;
2764 }
2765
2766 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2767 address, value);
2768
2769 retval = target_write_phys_memory(target, address, 1, 1, &value);
2770 if (retval != ERROR_OK)
2771 LOG_DEBUG("failed: %i", retval);
2772
2773 return retval;
2774 }
2775
2776 static int find_target(struct command_invocation *cmd, const char *name)
2777 {
2778 struct target *target = get_target(name);
2779 if (target == NULL) {
2780 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2781 return ERROR_FAIL;
2782 }
2783 if (!target->tap->enabled) {
2784 command_print(cmd, "Target: TAP %s is disabled, "
2785 "can't be the current target\n",
2786 target->tap->dotted_name);
2787 return ERROR_FAIL;
2788 }
2789
2790 cmd->ctx->current_target = target;
2791 if (cmd->ctx->current_target_override)
2792 cmd->ctx->current_target_override = target;
2793
2794 return ERROR_OK;
2795 }
2796
2797
2798 COMMAND_HANDLER(handle_targets_command)
2799 {
2800 int retval = ERROR_OK;
2801 if (CMD_ARGC == 1) {
2802 retval = find_target(CMD, CMD_ARGV[0]);
2803 if (retval == ERROR_OK) {
2804 /* we're done! */
2805 return retval;
2806 }
2807 }
2808
2809 struct target *target = all_targets;
2810 command_print(CMD, " TargetName Type Endian TapName State ");
2811 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2812 while (target) {
2813 const char *state;
2814 char marker = ' ';
2815
2816 if (target->tap->enabled)
2817 state = target_state_name(target);
2818 else
2819 state = "tap-disabled";
2820
2821 if (CMD_CTX->current_target == target)
2822 marker = '*';
2823
2824 /* keep columns lined up to match the headers above */
2825 command_print(CMD,
2826 "%2d%c %-18s %-10s %-6s %-18s %s",
2827 target->target_number,
2828 marker,
2829 target_name(target),
2830 target_type_name(target),
2831 Jim_Nvp_value2name_simple(nvp_target_endian,
2832 target->endianness)->name,
2833 target->tap->dotted_name,
2834 state);
2835 target = target->next;
2836 }
2837
2838 return retval;
2839 }
2840
2841 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2842
2843 static int powerDropout;
2844 static int srstAsserted;
2845
2846 static int runPowerRestore;
2847 static int runPowerDropout;
2848 static int runSrstAsserted;
2849 static int runSrstDeasserted;
2850
2851 static int sense_handler(void)
2852 {
2853 static int prevSrstAsserted;
2854 static int prevPowerdropout;
2855
2856 int retval = jtag_power_dropout(&powerDropout);
2857 if (retval != ERROR_OK)
2858 return retval;
2859
2860 int powerRestored;
2861 powerRestored = prevPowerdropout && !powerDropout;
2862 if (powerRestored)
2863 runPowerRestore = 1;
2864
2865 int64_t current = timeval_ms();
2866 static int64_t lastPower;
2867 bool waitMore = lastPower + 2000 > current;
2868 if (powerDropout && !waitMore) {
2869 runPowerDropout = 1;
2870 lastPower = current;
2871 }
2872
2873 retval = jtag_srst_asserted(&srstAsserted);
2874 if (retval != ERROR_OK)
2875 return retval;
2876
2877 int srstDeasserted;
2878 srstDeasserted = prevSrstAsserted && !srstAsserted;
2879
2880 static int64_t lastSrst;
2881 waitMore = lastSrst + 2000 > current;
2882 if (srstDeasserted && !waitMore) {
2883 runSrstDeasserted = 1;
2884 lastSrst = current;
2885 }
2886
2887 if (!prevSrstAsserted && srstAsserted)
2888 runSrstAsserted = 1;
2889
2890 prevSrstAsserted = srstAsserted;
2891 prevPowerdropout = powerDropout;
2892
2893 if (srstDeasserted || powerRestored) {
2894 /* Other than logging the event we can't do anything here.
2895 * Issuing a reset is a particularly bad idea as we might
2896 * be inside a reset already.
2897 */
2898 }
2899
2900 return ERROR_OK;
2901 }
2902
2903 /* process target state changes */
2904 static int handle_target(void *priv)
2905 {
2906 Jim_Interp *interp = (Jim_Interp *)priv;
2907 int retval = ERROR_OK;
2908
2909 if (!is_jtag_poll_safe()) {
2910 /* polling is disabled currently */
2911 return ERROR_OK;
2912 }
2913
2914 /* we do not want to recurse here... */
2915 static int recursive;
2916 if (!recursive) {
2917 recursive = 1;
2918 sense_handler();
2919 /* danger! running these procedures can trigger srst assertions and power dropouts.
2920 * We need to avoid an infinite loop/recursion here and we do that by
2921 * clearing the flags after running these events.
2922 */
2923 int did_something = 0;
2924 if (runSrstAsserted) {
2925 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2926 Jim_Eval(interp, "srst_asserted");
2927 did_something = 1;
2928 }
2929 if (runSrstDeasserted) {
2930 Jim_Eval(interp, "srst_deasserted");
2931 did_something = 1;
2932 }
2933 if (runPowerDropout) {
2934 LOG_INFO("Power dropout detected, running power_dropout proc.");
2935 Jim_Eval(interp, "power_dropout");
2936 did_something = 1;
2937 }
2938 if (runPowerRestore) {
2939 Jim_Eval(interp, "power_restore");
2940 did_something = 1;
2941 }
2942
2943 if (did_something) {
2944 /* clear detect flags */
2945 sense_handler();
2946 }
2947
2948 /* clear action flags */
2949
2950 runSrstAsserted = 0;
2951 runSrstDeasserted = 0;
2952 runPowerRestore = 0;
2953 runPowerDropout = 0;
2954
2955 recursive = 0;
2956 }
2957
2958 /* Poll targets for state changes unless that's globally disabled.
2959 * Skip targets that are currently disabled.
2960 */
2961 for (struct target *target = all_targets;
2962 is_jtag_poll_safe() && target;
2963 target = target->next) {
2964
2965 if (!target_was_examined(target))
2966 continue;
2967
2968 if (!target->tap->enabled)
2969 continue;
2970
2971 if (target->backoff.times > target->backoff.count) {
2972 /* do not poll this time as we failed previously */
2973 target->backoff.count++;
2974 continue;
2975 }
2976 target->backoff.count = 0;
2977
2978 /* only poll target if we've got power and srst isn't asserted */
2979 if (!powerDropout && !srstAsserted) {
2980 /* polling may fail silently until the target has been examined */
2981 retval = target_poll(target);
2982 if (retval != ERROR_OK) {
2983 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2984 if (target->backoff.times * polling_interval < 5000) {
2985 target->backoff.times *= 2;
2986 target->backoff.times++;
2987 }
2988
2989 /* Tell GDB to halt the debugger. This allows the user to
2990 * run monitor commands to handle the situation.
2991 */
2992 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2993 }
2994 if (target->backoff.times > 0) {
2995 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
2996 target_reset_examined(target);
2997 retval = target_examine_one(target);
2998 /* Target examination could have failed due to unstable connection,
2999 * but we set the examined flag anyway to repoll it later */
3000 if (retval != ERROR_OK) {
3001 target->examined = true;
3002 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3003 target->backoff.times * polling_interval);
3004 return retval;
3005 }
3006 }
3007
3008 /* Since we succeeded, we reset backoff count */
3009 target->backoff.times = 0;
3010 }
3011 }
3012
3013 return retval;
3014 }
3015
3016 COMMAND_HANDLER(handle_reg_command)
3017 {
3018 struct target *target;
3019 struct reg *reg = NULL;
3020 unsigned count = 0;
3021 char *value;
3022
3023 LOG_DEBUG("-");
3024
3025 target = get_current_target(CMD_CTX);
3026
3027 /* list all available registers for the current target */
3028 if (CMD_ARGC == 0) {
3029 struct reg_cache *cache = target->reg_cache;
3030
3031 count = 0;
3032 while (cache) {
3033 unsigned i;
3034
3035 command_print(CMD, "===== %s", cache->name);
3036
3037 for (i = 0, reg = cache->reg_list;
3038 i < cache->num_regs;
3039 i++, reg++, count++) {
3040 if (reg->exist == false)
3041 continue;
3042 /* only print cached values if they are valid */
3043 if (reg->valid) {
3044 value = buf_to_hex_str(reg->value,
3045 reg->size);
3046 command_print(CMD,
3047 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3048 count, reg->name,
3049 reg->size, value,
3050 reg->dirty
3051 ? " (dirty)"
3052 : "");
3053 free(value);
3054 } else {
3055 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3056 count, reg->name,
3057 reg->size);
3058 }
3059 }
3060 cache = cache->next;
3061 }
3062
3063 return ERROR_OK;
3064 }
3065
3066 /* access a single register by its ordinal number */
3067 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3068 unsigned num;
3069 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3070
3071 struct reg_cache *cache = target->reg_cache;
3072 count = 0;
3073 while (cache) {
3074 unsigned i;
3075 for (i = 0; i < cache->num_regs; i++) {
3076 if (count++ == num) {
3077 reg = &cache->reg_list[i];
3078 break;
3079 }
3080 }
3081 if (reg)
3082 break;
3083 cache = cache->next;
3084 }
3085
3086 if (!reg) {
3087 command_print(CMD, "%i is out of bounds, the current target "
3088 "has only %i registers (0 - %i)", num, count, count - 1);
3089 return ERROR_OK;
3090 }
3091 } else {
3092 /* access a single register by its name */
3093 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
3094
3095 if (!reg)
3096 goto not_found;
3097 }
3098
3099 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
3100
3101 if (!reg->exist)
3102 goto not_found;
3103
3104 /* display a register */
3105 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3106 && (CMD_ARGV[1][0] <= '9')))) {
3107 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3108 reg->valid = 0;
3109
3110 if (reg->valid == 0)
3111 reg->type->get(reg);
3112 value = buf_to_hex_str(reg->value, reg->size);
3113 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3114 free(value);
3115 return ERROR_OK;
3116 }
3117
3118 /* set register value */
3119 if (CMD_ARGC == 2) {
3120 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3121 if (buf == NULL)
3122 return ERROR_FAIL;
3123 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3124
3125 reg->type->set(reg, buf);
3126
3127 value = buf_to_hex_str(reg->value, reg->size);
3128 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3129 free(value);
3130
3131 free(buf);
3132
3133 return ERROR_OK;
3134 }
3135
3136 return ERROR_COMMAND_SYNTAX_ERROR;
3137
3138 not_found:
3139 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3140 return ERROR_OK;
3141 }
3142
3143 COMMAND_HANDLER(handle_poll_command)
3144 {
3145 int retval = ERROR_OK;
3146 struct target *target = get_current_target(CMD_CTX);
3147
3148 if (CMD_ARGC == 0) {
3149 command_print(CMD, "background polling: %s",
3150 jtag_poll_get_enabled() ? "on" : "off");
3151 command_print(CMD, "TAP: %s (%s)",
3152 target->tap->dotted_name,
3153 target->tap->enabled ? "enabled" : "disabled");
3154 if (!target->tap->enabled)
3155 return ERROR_OK;
3156 retval = target_poll(target);
3157 if (retval != ERROR_OK)
3158 return retval;
3159 retval = target_arch_state(target);
3160 if (retval != ERROR_OK)
3161 return retval;
3162 } else if (CMD_ARGC == 1) {
3163 bool enable;
3164 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3165 jtag_poll_set_enabled(enable);
3166 } else
3167 return ERROR_COMMAND_SYNTAX_ERROR;
3168
3169 return retval;
3170 }
3171
3172 COMMAND_HANDLER(handle_wait_halt_command)
3173 {
3174 if (CMD_ARGC > 1)
3175 return ERROR_COMMAND_SYNTAX_ERROR;
3176
3177 unsigned ms = DEFAULT_HALT_TIMEOUT;
3178 if (1 == CMD_ARGC) {
3179 int retval = parse_uint(CMD_ARGV[0], &ms);
3180 if (ERROR_OK != retval)
3181 return ERROR_COMMAND_SYNTAX_ERROR;
3182 }
3183
3184 struct target *target = get_current_target(CMD_CTX);
3185 return target_wait_state(target, TARGET_HALTED, ms);
3186 }
3187
3188 /* wait for target state to change. The trick here is to have a low
3189 * latency for short waits and not to suck up all the CPU time
3190 * on longer waits.
3191 *
3192 * After 500ms, keep_alive() is invoked
3193 */
3194 int target_wait_state(struct target *target, enum target_state state, int ms)
3195 {
3196 int retval;
3197 int64_t then = 0, cur;
3198 bool once = true;
3199
3200 for (;;) {
3201 retval = target_poll(target);
3202 if (retval != ERROR_OK)
3203 return retval;
3204 if (target->state == state)
3205 break;
3206 cur = timeval_ms();
3207 if (once) {
3208 once = false;
3209 then = timeval_ms();
3210 LOG_DEBUG("waiting for target %s...",
3211 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
3212 }
3213
3214 if (cur-then > 500)
3215 keep_alive();
3216
3217 if ((cur-then) > ms) {
3218 LOG_ERROR("timed out while waiting for target %s",
3219 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
3220 return ERROR_FAIL;
3221 }
3222 }
3223
3224 return ERROR_OK;
3225 }
3226
3227 COMMAND_HANDLER(handle_halt_command)
3228 {
3229 LOG_DEBUG("-");
3230
3231 struct target *target = get_current_target(CMD_CTX);
3232
3233 target->verbose_halt_msg = true;
3234
3235 int retval = target_halt(target);
3236 if (ERROR_OK != retval)
3237 return retval;
3238
3239 if (CMD_ARGC == 1) {
3240 unsigned wait_local;
3241 retval = parse_uint(CMD_ARGV[0], &wait_local);
3242 if (ERROR_OK != retval)
3243 return ERROR_COMMAND_SYNTAX_ERROR;
3244 if (!wait_local)
3245 return ERROR_OK;
3246 }
3247
3248 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3249 }
3250
3251 COMMAND_HANDLER(handle_soft_reset_halt_command)
3252 {
3253 struct target *target = get_current_target(CMD_CTX);
3254
3255 LOG_USER("requesting target halt and executing a soft reset");
3256
3257 target_soft_reset_halt(target);
3258
3259 return ERROR_OK;
3260 }
3261
3262 COMMAND_HANDLER(handle_reset_command)
3263 {
3264 if (CMD_ARGC > 1)
3265 return ERROR_COMMAND_SYNTAX_ERROR;
3266
3267 enum target_reset_mode reset_mode = RESET_RUN;
3268 if (CMD_ARGC == 1) {
3269 const Jim_Nvp *n;
3270 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3271 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
3272 return ERROR_COMMAND_SYNTAX_ERROR;
3273 reset_mode = n->value;
3274 }
3275
3276 /* reset *all* targets */
3277 return target_process_reset(CMD, reset_mode);
3278 }
3279
3280
3281 COMMAND_HANDLER(handle_resume_command)
3282 {
3283 int current = 1;
3284 if (CMD_ARGC > 1)
3285 return ERROR_COMMAND_SYNTAX_ERROR;
3286
3287 struct target *target = get_current_target(CMD_CTX);
3288
3289 /* with no CMD_ARGV, resume from current pc, addr = 0,
3290 * with one arguments, addr = CMD_ARGV[0],
3291 * handle breakpoints, not debugging */
3292 target_addr_t addr = 0;
3293 if (CMD_ARGC == 1) {
3294 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3295 current = 0;
3296 }
3297
3298 return target_resume(target, current, addr, 1, 0);
3299 }
3300
3301 COMMAND_HANDLER(handle_step_command)
3302 {
3303 if (CMD_ARGC > 1)
3304 return ERROR_COMMAND_SYNTAX_ERROR;
3305
3306 LOG_DEBUG("-");
3307
3308 /* with no CMD_ARGV, step from current pc, addr = 0,
3309 * with one argument addr = CMD_ARGV[0],
3310 * handle breakpoints, debugging */
3311 target_addr_t addr = 0;
3312 int current_pc = 1;
3313 if (CMD_ARGC == 1) {
3314 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3315 current_pc = 0;
3316 }
3317
3318 struct target *target = get_current_target(CMD_CTX);
3319
3320 return target_step(target, current_pc, addr, 1);
3321 }
3322
3323 void target_handle_md_output(struct command_invocation *cmd,
3324 struct target *target, target_addr_t address, unsigned size,
3325 unsigned count, const uint8_t *buffer)
3326 {
3327 const unsigned line_bytecnt = 32;
3328 unsigned line_modulo = line_bytecnt / size;
3329
3330 char output[line_bytecnt * 4 + 1];
3331 unsigned output_len = 0;
3332
3333 const char *value_fmt;
3334 switch (size) {
3335 case 8:
3336 value_fmt = "%16.16"PRIx64" ";
3337 break;
3338 case 4:
3339 value_fmt = "%8.8"PRIx64" ";
3340 break;
3341 case 2:
3342 value_fmt = "%4.4"PRIx64" ";
3343 break;
3344 case 1:
3345 value_fmt = "%2.2"PRIx64" ";
3346 break;
3347 default:
3348 /* "can't happen", caller checked */
3349 LOG_ERROR("invalid memory read size: %u", size);
3350 return;
3351 }
3352
3353 for (unsigned i = 0; i < count; i++) {
3354 if (i % line_modulo == 0) {
3355 output_len += snprintf(output + output_len,
3356 sizeof(output) - output_len,
3357 TARGET_ADDR_FMT ": ",
3358 (address + (i * size)));
3359 }
3360
3361 uint64_t value = 0;
3362 const uint8_t *value_ptr = buffer + i * size;
3363 switch (size) {
3364 case 8:
3365 value = target_buffer_get_u64(target, value_ptr);
3366 break;
3367 case 4:
3368 value = target_buffer_get_u32(target, value_ptr);
3369 break;
3370 case 2:
3371 value = target_buffer_get_u16(target, value_ptr);
3372 break;
3373 case 1:
3374 value = *value_ptr;
3375 }
3376 output_len += snprintf(output + output_len,
3377 sizeof(output) - output_len,
3378 value_fmt, value);
3379
3380 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3381 command_print(cmd, "%s", output);
3382 output_len = 0;
3383 }
3384 }
3385 }
3386
3387 COMMAND_HANDLER(handle_md_command)
3388 {
3389 if (CMD_ARGC < 1)
3390 return ERROR_COMMAND_SYNTAX_ERROR;
3391
3392 unsigned size = 0;
3393 switch (CMD_NAME[2]) {
3394 case 'd':
3395 size = 8;
3396 break;
3397 case 'w':
3398 size = 4;
3399 break;
3400 case 'h':
3401 size = 2;
3402 break;
3403 case 'b':
3404 size = 1;
3405 break;
3406 default:
3407 return ERROR_COMMAND_SYNTAX_ERROR;
3408 }
3409
3410 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3411 int (*fn)(struct target *target,
3412 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3413 if (physical) {
3414 CMD_ARGC--;
3415 CMD_ARGV++;
3416 fn = target_read_phys_memory;
3417 } else
3418 fn = target_read_memory;
3419 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3420 return ERROR_COMMAND_SYNTAX_ERROR;
3421
3422 target_addr_t address;
3423 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3424
3425 unsigned count = 1;
3426 if (CMD_ARGC == 2)
3427 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3428
3429 uint8_t *buffer = calloc(count, size);
3430 if (buffer == NULL) {
3431 LOG_ERROR("Failed to allocate md read buffer");
3432 return ERROR_FAIL;
3433 }
3434
3435 struct target *target = get_current_target(CMD_CTX);
3436 int retval = fn(target, address, size, count, buffer);
3437 if (ERROR_OK == retval)
3438 target_handle_md_output(CMD, target, address, size, count, buffer);
3439
3440 free(buffer);
3441
3442 return retval;
3443 }
3444
3445 typedef int (*target_write_fn)(struct target *target,
3446 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3447
3448 static int target_fill_mem(struct target *target,
3449 target_addr_t address,
3450 target_write_fn fn,
3451 unsigned data_size,
3452 /* value */
3453 uint64_t b,
3454 /* count */
3455 unsigned c)
3456 {
3457 /* We have to write in reasonably large chunks to be able
3458 * to fill large memory areas with any sane speed */
3459 const unsigned chunk_size = 16384;
3460 uint8_t *target_buf = malloc(chunk_size * data_size);
3461 if (target_buf == NULL) {
3462 LOG_ERROR("Out of memory");
3463 return ERROR_FAIL;
3464 }
3465
3466 for (unsigned i = 0; i < chunk_size; i++) {
3467 switch (data_size) {
3468 case 8:
3469 target_buffer_set_u64(target, target_buf + i * data_size, b);
3470 break;
3471 case 4:
3472 target_buffer_set_u32(target, target_buf + i * data_size, b);
3473 break;
3474 case 2:
3475 target_buffer_set_u16(target, target_buf + i * data_size, b);
3476 break;
3477 case 1:
3478 target_buffer_set_u8(target, target_buf + i * data_size, b);
3479 break;
3480 default:
3481 exit(-1);
3482 }
3483 }
3484
3485 int retval = ERROR_OK;
3486
3487 for (unsigned x = 0; x < c; x += chunk_size) {
3488 unsigned current;
3489 current = c - x;
3490 if (current > chunk_size)
3491 current = chunk_size;
3492 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3493 if (retval != ERROR_OK)
3494 break;
3495 /* avoid GDB timeouts */
3496 keep_alive();
3497 }
3498 free(target_buf);
3499
3500 return retval;
3501 }
3502
3503
3504 COMMAND_HANDLER(handle_mw_command)
3505 {
3506 if (CMD_ARGC < 2)
3507 return ERROR_COMMAND_SYNTAX_ERROR;
3508 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3509 target_write_fn fn;
3510 if (physical) {
3511 CMD_ARGC--;
3512 CMD_ARGV++;
3513 fn = target_write_phys_memory;
3514 } else
3515 fn = target_write_memory;
3516 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3517 return ERROR_COMMAND_SYNTAX_ERROR;
3518
3519 target_addr_t address;
3520 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3521
3522 uint64_t value;
3523 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3524
3525 unsigned count = 1;
3526 if (CMD_ARGC == 3)
3527 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3528
3529 struct target *target = get_current_target(CMD_CTX);
3530 unsigned wordsize;
3531 switch (CMD_NAME[2]) {
3532 case 'd':
3533 wordsize = 8;
3534 break;
3535 case 'w':
3536 wordsize = 4;
3537 break;
3538 case 'h':
3539 wordsize = 2;
3540 break;
3541 case 'b':
3542 wordsize = 1;
3543 break;
3544 default:
3545 return ERROR_COMMAND_SYNTAX_ERROR;
3546 }
3547
3548 return target_fill_mem(target, address, fn, wordsize, value, count);
3549 }
3550
3551 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3552 target_addr_t *min_address, target_addr_t *max_address)
3553 {
3554 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3555 return ERROR_COMMAND_SYNTAX_ERROR;
3556
3557 /* a base address isn't always necessary,
3558 * default to 0x0 (i.e. don't relocate) */
3559 if (CMD_ARGC >= 2) {
3560 target_addr_t addr;
3561 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3562 image->base_address = addr;
3563 image->base_address_set = true;
3564 } else
3565 image->base_address_set = false;
3566
3567 image->start_address_set = false;
3568
3569 if (CMD_ARGC >= 4)
3570 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3571 if (CMD_ARGC == 5) {
3572 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3573 /* use size (given) to find max (required) */
3574 *max_address += *min_address;
3575 }
3576
3577 if (*min_address > *max_address)
3578 return ERROR_COMMAND_SYNTAX_ERROR;
3579
3580 return ERROR_OK;
3581 }
3582
3583 COMMAND_HANDLER(handle_load_image_command)
3584 {
3585 uint8_t *buffer;
3586 size_t buf_cnt;
3587 uint32_t image_size;
3588 target_addr_t min_address = 0;
3589 target_addr_t max_address = -1;
3590 struct image image;
3591
3592 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3593 &image, &min_address, &max_address);
3594 if (ERROR_OK != retval)
3595 return retval;
3596
3597 struct target *target = get_current_target(CMD_CTX);
3598
3599 struct duration bench;
3600 duration_start(&bench);
3601
3602 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3603 return ERROR_FAIL;
3604
3605 image_size = 0x0;
3606 retval = ERROR_OK;
3607 for (unsigned int i = 0; i < image.num_sections; i++) {
3608 buffer = malloc(image.sections[i].size);
3609 if (buffer == NULL) {
3610 command_print(CMD,
3611 "error allocating buffer for section (%d bytes)",
3612 (int)(image.sections[i].size));
3613 retval = ERROR_FAIL;
3614 break;
3615 }
3616
3617 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3618 if (retval != ERROR_OK) {
3619 free(buffer);
3620 break;
3621 }
3622
3623 uint32_t offset = 0;
3624 uint32_t length = buf_cnt;
3625
3626 /* DANGER!!! beware of unsigned comparison here!!! */
3627
3628 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3629 (image.sections[i].base_address < max_address)) {
3630
3631 if (image.sections[i].base_address < min_address) {
3632 /* clip addresses below */
3633 offset += min_address-image.sections[i].base_address;
3634 length -= offset;
3635 }
3636
3637 if (image.sections[i].base_address + buf_cnt > max_address)
3638 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3639
3640 retval = target_write_buffer(target,
3641 image.sections[i].base_address + offset, length, buffer + offset);
3642 if (retval != ERROR_OK) {
3643 free(buffer);
3644 break;
3645 }
3646 image_size += length;
3647 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3648 (unsigned int)length,
3649 image.sections[i].base_address + offset);
3650 }
3651
3652 free(buffer);
3653 }
3654
3655 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3656 command_print(CMD, "downloaded %" PRIu32 " bytes "
3657 "in %fs (%0.3f KiB/s)", image_size,
3658 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3659 }
3660
3661 image_close(&image);
3662
3663 return retval;
3664
3665 }
3666
3667 COMMAND_HANDLER(handle_dump_image_command)
3668 {
3669 struct fileio *fileio;
3670 uint8_t *buffer;
3671 int retval, retvaltemp;
3672 target_addr_t address, size;
3673 struct duration bench;
3674 struct target *target = get_current_target(CMD_CTX);
3675
3676 if (CMD_ARGC != 3)
3677 return ERROR_COMMAND_SYNTAX_ERROR;
3678
3679 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3680 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3681
3682 uint32_t buf_size = (size > 4096) ? 4096 : size;
3683 buffer = malloc(buf_size);
3684 if (!buffer)
3685 return ERROR_FAIL;
3686
3687 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3688 if (retval != ERROR_OK) {
3689 free(buffer);
3690 return retval;
3691 }
3692
3693 duration_start(&bench);
3694
3695 while (size > 0) {
3696 size_t size_written;
3697 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3698 retval = target_read_buffer(target, address, this_run_size, buffer);
3699 if (retval != ERROR_OK)
3700 break;
3701
3702 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3703 if (retval != ERROR_OK)
3704 break;
3705
3706 size -= this_run_size;
3707 address += this_run_size;
3708 }
3709
3710 free(buffer);
3711
3712 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3713 size_t filesize;
3714 retval = fileio_size(fileio, &filesize);
3715 if (retval != ERROR_OK)
3716 return retval;
3717 command_print(CMD,
3718 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3719 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3720 }
3721
3722 retvaltemp = fileio_close(fileio);
3723 if (retvaltemp != ERROR_OK)
3724 return retvaltemp;
3725
3726 return retval;
3727 }
3728
3729 enum verify_mode {
3730 IMAGE_TEST = 0,
3731 IMAGE_VERIFY = 1,
3732 IMAGE_CHECKSUM_ONLY = 2
3733 };
3734
3735 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3736 {
3737 uint8_t *buffer;
3738 size_t buf_cnt;
3739 uint32_t image_size;
3740 int retval;
3741 uint32_t checksum = 0;
3742 uint32_t mem_checksum = 0;
3743
3744 struct image image;
3745
3746 struct target *target = get_current_target(CMD_CTX);
3747
3748 if (CMD_ARGC < 1)
3749 return ERROR_COMMAND_SYNTAX_ERROR;
3750
3751 if (!target) {
3752 LOG_ERROR("no target selected");
3753 return ERROR_FAIL;
3754 }
3755
3756 struct duration bench;
3757 duration_start(&bench);
3758
3759 if (CMD_ARGC >= 2) {
3760 target_addr_t addr;
3761 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3762 image.base_address = addr;
3763 image.base_address_set = true;
3764 } else {
3765 image.base_address_set = false;
3766 image.base_address = 0x0;
3767 }
3768
3769 image.start_address_set = false;
3770
3771 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3772 if (retval != ERROR_OK)
3773 return retval;
3774
3775 image_size = 0x0;
3776 int diffs = 0;
3777 retval = ERROR_OK;
3778 for (unsigned int i = 0; i < image.num_sections; i++) {
3779 buffer = malloc(image.sections[i].size);
3780 if (buffer == NULL) {
3781 command_print(CMD,
3782 "error allocating buffer for section (%" PRIu32 " bytes)",
3783 image.sections[i].size);
3784 break;
3785 }
3786 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3787 if (retval != ERROR_OK) {
3788 free(buffer);
3789 break;
3790 }
3791
3792 if (verify >= IMAGE_VERIFY) {
3793 /* calculate checksum of image */
3794 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3795 if (retval != ERROR_OK) {
3796 free(buffer);
3797 break;
3798 }
3799
3800 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3801 if (retval != ERROR_OK) {
3802 free(buffer);
3803 break;
3804 }
3805 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3806 LOG_ERROR("checksum mismatch");
3807 free(buffer);
3808 retval = ERROR_FAIL;
3809 goto done;
3810 }
3811 if (checksum != mem_checksum) {
3812 /* failed crc checksum, fall back to a binary compare */
3813 uint8_t *data;
3814
3815 if (diffs == 0)
3816 LOG_ERROR("checksum mismatch - attempting binary compare");
3817
3818 data = malloc(buf_cnt);
3819
3820 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3821 if (retval == ERROR_OK) {
3822 uint32_t t;
3823 for (t = 0; t < buf_cnt; t++) {
3824 if (data[t] != buffer[t]) {
3825 command_print(CMD,
3826 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3827 diffs,
3828 (unsigned)(t + image.sections[i].base_address),
3829 data[t],
3830 buffer[t]);
3831 if (diffs++ >= 127) {
3832 command_print(CMD, "More than 128 errors, the rest are not printed.");
3833 free(data);
3834 free(buffer);
3835 goto done;
3836 }
3837 }
3838 keep_alive();
3839 }
3840 }
3841 free(data);
3842 }
3843 } else {
3844 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3845 image.sections[i].base_address,
3846 buf_cnt);
3847 }
3848
3849 free(buffer);
3850 image_size += buf_cnt;
3851 }
3852 if (diffs > 0)
3853 command_print(CMD, "No more differences found.");
3854 done:
3855 if (diffs > 0)
3856 retval = ERROR_FAIL;
3857 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3858 command_print(CMD, "verified %" PRIu32 " bytes "
3859 "in %fs (%0.3f KiB/s)", image_size,
3860 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3861 }
3862
3863 image_close(&image);
3864
3865 return retval;
3866 }
3867
3868 COMMAND_HANDLER(handle_verify_image_checksum_command)
3869 {
3870 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3871 }
3872
3873 COMMAND_HANDLER(handle_verify_image_command)
3874 {
3875 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3876 }
3877
3878 COMMAND_HANDLER(handle_test_image_command)
3879 {
3880 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3881 }
3882
3883 static int handle_bp_command_list(struct command_invocation *cmd)
3884 {
3885 struct target *target = get_current_target(cmd->ctx);
3886 struct breakpoint *breakpoint = target->breakpoints;
3887 while (breakpoint) {
3888 if (breakpoint->type == BKPT_SOFT) {
3889 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3890 breakpoint->length);
3891 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3892 breakpoint->address,
3893 breakpoint->length,
3894 breakpoint->set, buf);
3895 free(buf);
3896 } else {
3897 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3898 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3899 breakpoint->asid,
3900 breakpoint->length, breakpoint->set);
3901 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3902 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3903 breakpoint->address,
3904 breakpoint->length, breakpoint->set);
3905 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3906 breakpoint->asid);
3907 } else
3908 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3909 breakpoint->address,
3910 breakpoint->length, breakpoint->set);
3911 }
3912
3913 breakpoint = breakpoint->next;
3914 }
3915 return ERROR_OK;
3916 }
3917
3918 static int handle_bp_command_set(struct command_invocation *cmd,
3919 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3920 {
3921 struct target *target = get_current_target(cmd->ctx);
3922 int retval;
3923
3924 if (asid == 0) {
3925 retval = breakpoint_add(target, addr, length, hw);
3926 /* error is always logged in breakpoint_add(), do not print it again */
3927 if (ERROR_OK == retval)
3928 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3929
3930 } else if (addr == 0) {
3931 if (target->type->add_context_breakpoint == NULL) {
3932 LOG_ERROR("Context breakpoint not available");
3933 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3934 }
3935 retval = context_breakpoint_add(target, asid, length, hw);
3936 /* error is always logged in context_breakpoint_add(), do not print it again */
3937 if (ERROR_OK == retval)
3938 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3939
3940 } else {
3941 if (target->type->add_hybrid_breakpoint == NULL) {
3942 LOG_ERROR("Hybrid breakpoint not available");
3943 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3944 }
3945 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3946 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3947 if (ERROR_OK == retval)
3948 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3949 }
3950 return retval;
3951 }
3952
3953 COMMAND_HANDLER(handle_bp_command)
3954 {
3955 target_addr_t addr;
3956 uint32_t asid;
3957 uint32_t length;
3958 int hw = BKPT_SOFT;
3959
3960 switch (CMD_ARGC) {
3961 case 0:
3962 return handle_bp_command_list(CMD);
3963
3964 case 2:
3965 asid = 0;
3966 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3967 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3968 return handle_bp_command_set(CMD, addr, asid, length, hw);
3969
3970 case 3:
3971 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3972 hw = BKPT_HARD;
3973 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3974 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3975 asid = 0;
3976 return handle_bp_command_set(CMD, addr, asid, length, hw);
3977 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3978 hw = BKPT_HARD;
3979 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3980 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3981 addr = 0;
3982 return handle_bp_command_set(CMD, addr, asid, length, hw);
3983 }
3984 /* fallthrough */
3985 case 4:
3986 hw = BKPT_HARD;
3987 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3988 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3989 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3990 return handle_bp_command_set(CMD, addr, asid, length, hw);
3991
3992 default:
3993 return ERROR_COMMAND_SYNTAX_ERROR;
3994 }
3995 }
3996
3997 COMMAND_HANDLER(handle_rbp_command)
3998 {
3999 if (CMD_ARGC != 1)
4000 return ERROR_COMMAND_SYNTAX_ERROR;
4001
4002 struct target *target = get_current_target(CMD_CTX);
4003
4004 if (!strcmp(CMD_ARGV[0], "all")) {
4005 breakpoint_remove_all(target);
4006 } else {
4007 target_addr_t addr;
4008 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4009
4010 breakpoint_remove(target, addr);
4011 }
4012
4013 return ERROR_OK;
4014 }
4015
4016 COMMAND_HANDLER(handle_wp_command)
4017 {
4018 struct target *target = get_current_target(CMD_CTX);
4019
4020 if (CMD_ARGC == 0) {
4021 struct watchpoint *watchpoint = target->watchpoints;
4022
4023 while (watchpoint) {
4024 command_print(CMD, "address: " TARGET_ADDR_FMT
4025 ", len: 0x%8.8" PRIx32
4026 ", r/w/a: %i, value: 0x%8.8" PRIx32
4027 ", mask: 0x%8.8" PRIx32,
4028 watchpoint->address,
4029 watchpoint->length,
4030 (int)watchpoint->rw,
4031 watchpoint->value,
4032 watchpoint->mask);
4033 watchpoint = watchpoint->next;
4034 }
4035 return ERROR_OK;
4036 }
4037
4038 enum watchpoint_rw type = WPT_ACCESS;
4039 uint32_t addr = 0;
4040 uint32_t length = 0;
4041 uint32_t data_value = 0x0;
4042 uint32_t data_mask = 0xffffffff;
4043
4044 switch (CMD_ARGC) {
4045 case 5:
4046 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4047 /* fall through */
4048 case 4:
4049 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4050 /* fall through */
4051 case 3:
4052 switch (CMD_ARGV[2][0]) {
4053 case 'r':
4054 type = WPT_READ;
4055 break;
4056 case 'w':
4057 type = WPT_WRITE;
4058 break;
4059 case 'a':
4060 type = WPT_ACCESS;
4061 break;
4062 default:
4063 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4064 return ERROR_COMMAND_SYNTAX_ERROR;
4065 }
4066 /* fall through */
4067 case 2:
4068 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4069 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
4070 break;
4071
4072 default:
4073 return ERROR_COMMAND_SYNTAX_ERROR;
4074 }
4075
4076 int retval = watchpoint_add(target, addr, length, type,
4077 data_value, data_mask);
4078 if (ERROR_OK != retval)
4079 LOG_ERROR("Failure setting watchpoints");
4080
4081 return retval;
4082 }
4083
4084 COMMAND_HANDLER(handle_rwp_command)
4085 {
4086 if (CMD_ARGC != 1)
4087 return ERROR_COMMAND_SYNTAX_ERROR;
4088
4089 uint32_t addr;
4090 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
4091
4092 struct target *target = get_current_target(CMD_CTX);
4093 watchpoint_remove(target, addr);
4094
4095 return ERROR_OK;
4096 }
4097
4098 /**
4099 * Translate a virtual address to a physical address.
4100 *
4101 * The low-level target implementation must have logged a detailed error
4102 * which is forwarded to telnet/GDB session.
4103 */
4104 COMMAND_HANDLER(handle_virt2phys_command)
4105 {
4106 if (CMD_ARGC != 1)
4107 return ERROR_COMMAND_SYNTAX_ERROR;
4108
4109 target_addr_t va;
4110 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4111 target_addr_t pa;
4112
4113 struct target *target = get_current_target(CMD_CTX);
4114 int retval = target->type->virt2phys(target, va, &pa);
4115 if (retval == ERROR_OK)
4116 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4117
4118 return retval;
4119 }
4120
4121 static void writeData(FILE *f, const void *data, size_t len)
4122 {
4123 size_t written = fwrite(data, 1, len, f);
4124 if (written != len)
4125 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4126 }
4127
4128 static void writeLong(FILE *f, int l, struct target *target)
4129 {
4130 uint8_t val[4];
4131
4132 target_buffer_set_u32(target, val, l);
4133 writeData(f, val, 4);
4134 }
4135
4136 static void writeString(FILE *f, char *s)
4137 {
4138 writeData(f, s, strlen(s));
4139 }
4140
4141 typedef unsigned char UNIT[2]; /* unit of profiling */
4142
4143 /* Dump a gmon.out histogram file. */
4144 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
4145 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4146 {
4147 uint32_t i;
4148 FILE *f = fopen(filename, "w");
4149 if (f == NULL)
4150 return;
4151 writeString(f, "gmon");
4152 writeLong(f, 0x00000001, target); /* Version */
4153 writeLong(f, 0, target); /* padding */
4154 writeLong(f, 0, target); /* padding */
4155 writeLong(f, 0, target); /* padding */
4156
4157 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4158 writeData(f, &zero, 1);
4159
4160 /* figure out bucket size */
4161 uint32_t min;
4162 uint32_t max;
4163 if (with_range) {
4164 min = start_address;
4165 max = end_address;
4166 } else {
4167 min = samples[0];
4168 max = samples[0];
4169 for (i = 0; i < sampleNum; i++) {
4170 if (min > samples[i])
4171 min = samples[i];
4172 if (max < samples[i])
4173 max = samples[i];
4174 }
4175
4176 /* max should be (largest sample + 1)
4177 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4178 max++;
4179 }
4180
4181 int addressSpace = max - min;
4182 assert(addressSpace >= 2);
4183
4184 /* FIXME: What is the reasonable number of buckets?
4185 * The profiling result will be more accurate if there are enough buckets. */
4186 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
4187 uint32_t numBuckets = addressSpace / sizeof(UNIT);
4188 if (numBuckets > maxBuckets)
4189 numBuckets = maxBuckets;
4190 int *buckets = malloc(sizeof(int) * numBuckets);
4191 if (buckets == NULL) {
4192 fclose(f);
4193 return;
4194 }
4195 memset(buckets, 0, sizeof(int) * numBuckets);
4196 for (i = 0; i < sampleNum; i++) {
4197 uint32_t address = samples[i];
4198
4199 if ((address < min) || (max <= address))
4200 continue;
4201
4202 long long a = address - min;
4203 long long b = numBuckets;
4204 long long c = addressSpace;
4205 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4206 buckets[index_t]++;
4207 }
4208
4209 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4210 writeLong(f, min, target); /* low_pc */
4211 writeLong(f, max, target); /* high_pc */
4212 writeLong(f, numBuckets, target); /* # of buckets */
4213 float sample_rate = sampleNum / (duration_ms / 1000.0);
4214 writeLong(f, sample_rate, target);
4215 writeString(f, "seconds");
4216 for (i = 0; i < (15-strlen("seconds")); i++)
4217 writeData(f, &zero, 1);
4218 writeString(f, "s");
4219
4220 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4221
4222 char *data = malloc(2 * numBuckets);
4223 if (data != NULL) {
4224 for (i = 0; i < numBuckets; i++) {
4225 int val;
4226 val = buckets[i];
4227 if (val > 65535)
4228 val = 65535;
4229 data[i * 2] = val&0xff;
4230 data[i * 2 + 1] = (val >> 8) & 0xff;
4231 }
4232 free(buckets);
4233 writeData(f, data, numBuckets * 2);
4234 free(data);
4235 } else
4236 free(buckets);
4237
4238 fclose(f);
4239 }
4240
4241 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4242 * which will be used as a random sampling of PC */
4243 COMMAND_HANDLER(handle_profile_command)
4244 {
4245 struct target *target = get_current_target(CMD_CTX);
4246
4247 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4248 return ERROR_COMMAND_SYNTAX_ERROR;
4249
4250 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4251 uint32_t offset;
4252 uint32_t num_of_samples;
4253 int retval = ERROR_OK;
4254 bool halted_before_profiling = target->state == TARGET_HALTED;
4255
4256 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4257
4258 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4259 if (samples == NULL) {
4260 LOG_ERROR("No memory to store samples.");
4261 return ERROR_FAIL;
4262 }
4263
4264 uint64_t timestart_ms = timeval_ms();
4265 /**
4266 * Some cores let us sample the PC without the
4267 * annoying halt/resume step; for example, ARMv7 PCSR.
4268 * Provide a way to use that more efficient mechanism.
4269 */
4270 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4271 &num_of_samples, offset);
4272 if (retval != ERROR_OK) {
4273 free(samples);
4274 return retval;
4275 }
4276 uint32_t duration_ms = timeval_ms() - timestart_ms;
4277
4278 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4279
4280 retval = target_poll(target);
4281 if (retval != ERROR_OK) {
4282 free(samples);
4283 return retval;
4284 }
4285
4286 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4287 /* The target was halted before we started and is running now. Halt it,
4288 * for consistency. */
4289 retval = target_halt(target);
4290 if (retval != ERROR_OK) {
4291 free(samples);
4292 return retval;
4293 }
4294 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4295 /* The target was running before we started and is halted now. Resume
4296 * it, for consistency. */
4297 retval = target_resume(target, 1, 0, 0, 0);
4298 if (retval != ERROR_OK) {
4299 free(samples);
4300 return retval;
4301 }
4302 }
4303
4304 retval = target_poll(target);
4305 if (retval != ERROR_OK) {
4306 free(samples);
4307 return retval;
4308 }
4309
4310 uint32_t start_address = 0;
4311 uint32_t end_address = 0;
4312 bool with_range = false;
4313 if (CMD_ARGC == 4) {
4314 with_range = true;
4315 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4316 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4317 }
4318
4319 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4320 with_range, start_address, end_address, target, duration_ms);
4321 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4322
4323 free(samples);
4324 return retval;
4325 }
4326
4327 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
4328 {
4329 char *namebuf;
4330 Jim_Obj *nameObjPtr, *valObjPtr;
4331 int result;
4332
4333 namebuf = alloc_printf("%s(%d)", varname, idx);
4334 if (!namebuf)
4335 return JIM_ERR;
4336
4337 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4338 valObjPtr = Jim_NewIntObj(interp, val);
4339 if (!nameObjPtr || !valObjPtr) {
4340 free(namebuf);
4341 return JIM_ERR;
4342 }
4343
4344 Jim_IncrRefCount(nameObjPtr);
4345 Jim_IncrRefCount(valObjPtr);
4346 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
4347 Jim_DecrRefCount(interp, nameObjPtr);
4348 Jim_DecrRefCount(interp, valObjPtr);
4349 free(namebuf);
4350 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4351 return result;
4352 }
4353
4354 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4355 {
4356 struct command_context *context;
4357 struct target *target;
4358
4359 context = current_command_context(interp);
4360 assert(context != NULL);
4361
4362 target = get_current_target(context);
4363 if (target == NULL) {
4364 LOG_ERROR("mem2array: no current target");
4365 return JIM_ERR;
4366 }
4367
4368 return target_mem2array(interp, target, argc - 1, argv + 1);
4369 }
4370
4371 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4372 {
4373 long l;
4374 uint32_t width;
4375 int len;
4376 uint32_t addr;
4377 uint32_t count;
4378 uint32_t v;
4379 const char *varname;
4380 const char *phys;
4381 bool is_phys;
4382 int n, e, retval;
4383 uint32_t i;
4384
4385 /* argv[1] = name of array to receive the data
4386 * argv[2] = desired width
4387 * argv[3] = memory address
4388 * argv[4] = count of times to read
4389 */
4390
4391 if (argc < 4 || argc > 5) {
4392 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4393 return JIM_ERR;
4394 }
4395 varname = Jim_GetString(argv[0], &len);
4396 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4397
4398 e = Jim_GetLong(interp, argv[1], &l);
4399 width = l;
4400 if (e != JIM_OK)
4401 return e;
4402
4403 e = Jim_GetLong(interp, argv[2], &l);
4404 addr = l;
4405 if (e != JIM_OK)
4406 return e;
4407 e = Jim_GetLong(interp, argv[3], &l);
4408 len = l;
4409 if (e != JIM_OK)
4410 return e;
4411 is_phys = false;
4412 if (argc > 4) {
4413 phys = Jim_GetString(argv[4], &n);
4414 if (!strncmp(phys, "phys", n))
4415 is_phys = true;
4416 else
4417 return JIM_ERR;
4418 }
4419 switch (width) {
4420 case 8:
4421 width = 1;
4422 break;
4423 case 16:
4424 width = 2;
4425 break;
4426 case 32:
4427 width = 4;
4428 break;
4429 default:
4430 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4431 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
4432 return JIM_ERR;
4433 }
4434 if (len == 0) {
4435 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4436 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4437 return JIM_ERR;
4438 }
4439 if ((addr + (len * width)) < addr) {
4440 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4441 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4442 return JIM_ERR;
4443 }
4444 /* absurd transfer size? */
4445 if (len > 65536) {
4446 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4447 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
4448 return JIM_ERR;
4449 }
4450
4451 if ((width == 1) ||
4452 ((width == 2) && ((addr & 1) == 0)) ||
4453 ((width == 4) && ((addr & 3) == 0))) {
4454 /* all is well */
4455 } else {
4456 char buf[100];
4457 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4458 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRIu32 " byte reads",
4459 addr,
4460 width);
4461 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4462 return JIM_ERR;
4463 }
4464
4465 /* Transfer loop */
4466
4467 /* index counter */
4468 n = 0;
4469
4470 size_t buffersize = 4096;
4471 uint8_t *buffer = malloc(buffersize);
4472 if (buffer == NULL)
4473 return JIM_ERR;
4474
4475 /* assume ok */
4476 e = JIM_OK;
4477 while (len) {
4478 /* Slurp... in buffer size chunks */
4479
4480 count = len; /* in objects.. */
4481 if (count > (buffersize / width))
4482 count = (buffersize / width);
4483
4484 if (is_phys)
4485 retval = target_read_phys_memory(target, addr, width, count, buffer);
4486 else
4487 retval = target_read_memory(target, addr, width, count, buffer);
4488 if (retval != ERROR_OK) {
4489 /* BOO !*/
4490 LOG_ERROR("mem2array: Read @ 0x%08" PRIx32 ", w=%" PRIu32 ", cnt=%" PRIu32 ", failed",
4491 addr,
4492 width,
4493 count);
4494 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4495 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4496 e = JIM_ERR;
4497 break;
4498 } else {
4499 v = 0; /* shut up gcc */
4500 for (i = 0; i < count ; i++, n++) {
4501 switch (width) {
4502 case 4:
4503 v = target_buffer_get_u32(target, &buffer[i*width]);
4504 break;
4505 case 2:
4506 v = target_buffer_get_u16(target, &buffer[i*width]);
4507 break;
4508 case 1:
4509 v = buffer[i] & 0x0ff;
4510 break;
4511 }
4512 new_int_array_element(interp, varname, n, v);
4513 }
4514 len -= count;
4515 addr += count * width;
4516 }
4517 }
4518
4519 free(buffer);
4520
4521 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4522
4523 return e;
4524 }
4525
4526 static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
4527 {
4528 char *namebuf;
4529 Jim_Obj *nameObjPtr, *valObjPtr;
4530 int result;
4531 long l;
4532
4533 namebuf = alloc_printf("%s(%d)", varname, idx);
4534 if (!namebuf)
4535 return JIM_ERR;
4536
4537 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4538 if (!nameObjPtr) {
4539 free(namebuf);
4540 return JIM_ERR;
4541 }
4542
4543 Jim_IncrRefCount(nameObjPtr);
4544 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
4545 Jim_DecrRefCount(interp, nameObjPtr);
4546 free(namebuf);
4547 if (valObjPtr == NULL)
4548 return JIM_ERR;
4549
4550 result = Jim_GetLong(interp, valObjPtr, &l);
4551 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
4552 *val = l;
4553 return result;
4554 }
4555
4556 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4557 {
4558 struct command_context *context;
4559 struct target *target;
4560
4561 context = current_command_context(interp);
4562 assert(context != NULL);
4563
4564 target = get_current_target(context);
4565 if (target == NULL) {
4566 LOG_ERROR("array2mem: no current target");
4567 return JIM_ERR;
4568 }
4569
4570 return target_array2mem(interp, target, argc-1, argv + 1);
4571 }
4572
4573 static int target_array2mem(Jim_Interp *interp, struct target *target,
4574 int argc, Jim_Obj *const *argv)
4575 {
4576 long l;
4577 uint32_t width;
4578 int len;
4579 uint32_t addr;
4580 uint32_t count;
4581 uint32_t v;
4582 const char *varname;
4583 const char *phys;
4584 bool is_phys;
4585 int n, e, retval;
4586 uint32_t i;
4587
4588 /* argv[1] = name of array to get the data
4589 * argv[2] = desired width
4590 * argv[3] = memory address
4591 * argv[4] = count to write
4592 */
4593 if (argc < 4 || argc > 5) {
4594 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4595 return JIM_ERR;
4596 }
4597 varname = Jim_GetString(argv[0], &len);
4598 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4599
4600 e = Jim_GetLong(interp, argv[1], &l);
4601 width = l;
4602 if (e != JIM_OK)
4603 return e;
4604
4605 e = Jim_GetLong(interp, argv[2], &l);
4606 addr = l;
4607 if (e != JIM_OK)
4608 return e;
4609 e = Jim_GetLong(interp, argv[3], &l);
4610 len = l;
4611 if (e != JIM_OK)
4612 return e;
4613 is_phys = false;
4614 if (argc > 4) {
4615 phys = Jim_GetString(argv[4], &n);
4616 if (!strncmp(phys, "phys", n))
4617 is_phys = true;
4618 else
4619 return JIM_ERR;
4620 }
4621 switch (width) {
4622 case 8:
4623 width = 1;
4624 break;
4625 case 16:
4626 width = 2;
4627 break;
4628 case 32:
4629 width = 4;
4630 break;
4631 default:
4632 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4633 Jim_AppendStrings(interp, Jim_GetResult(interp),
4634 "Invalid width param, must be 8/16/32", NULL);
4635 return JIM_ERR;
4636 }
4637 if (len == 0) {
4638 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4639 Jim_AppendStrings(interp, Jim_GetResult(interp),
4640 "array2mem: zero width read?", NULL);
4641 return JIM_ERR;
4642 }
4643 if ((addr + (len * width)) < addr) {
4644 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4645 Jim_AppendStrings(interp, Jim_GetResult(interp),
4646 "array2mem: addr + len - wraps to zero?", NULL);
4647 return JIM_ERR;
4648 }
4649 /* absurd transfer size? */
4650 if (len > 65536) {
4651 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4652 Jim_AppendStrings(interp, Jim_GetResult(interp),
4653 "array2mem: absurd > 64K item request", NULL);
4654 return JIM_ERR;
4655 }
4656
4657 if ((width == 1) ||
4658 ((width == 2) && ((addr & 1) == 0)) ||
4659 ((width == 4) && ((addr & 3) == 0))) {
4660 /* all is well */
4661 } else {
4662 char buf[100];
4663 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4664 sprintf(buf, "array2mem address: 0x%08" PRIx32 " is not aligned for %" PRIu32 " byte reads",
4665 addr,
4666 width);
4667 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4668 return JIM_ERR;
4669 }
4670
4671 /* Transfer loop */
4672
4673 /* index counter */
4674 n = 0;
4675 /* assume ok */
4676 e = JIM_OK;
4677
4678 size_t buffersize = 4096;
4679 uint8_t *buffer = malloc(buffersize);
4680 if (buffer == NULL)
4681 return JIM_ERR;
4682
4683 while (len) {
4684 /* Slurp... in buffer size chunks */
4685
4686 count = len; /* in objects.. */
4687 if (count > (buffersize / width))
4688 count = (buffersize / width);
4689
4690 v = 0; /* shut up gcc */
4691 for (i = 0; i < count; i++, n++) {
4692 get_int_array_element(interp, varname, n, &v);
4693 switch (width) {
4694 case 4:
4695 target_buffer_set_u32(target, &buffer[i * width], v);
4696 break;
4697 case 2:
4698 target_buffer_set_u16(target, &buffer[i * width], v);
4699 break;
4700 case 1:
4701 buffer[i] = v & 0x0ff;
4702 break;
4703 }
4704 }
4705 len -= count;
4706
4707 if (is_phys)
4708 retval = target_write_phys_memory(target, addr, width, count, buffer);
4709 else
4710 retval = target_write_memory(target, addr, width, count, buffer);
4711 if (retval != ERROR_OK) {
4712 /* BOO !*/
4713 LOG_ERROR("array2mem: Write @ 0x%08" PRIx32 ", w=%" PRIu32 ", cnt=%" PRIu32 ", failed",
4714 addr,
4715 width,
4716 count);
4717 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4718 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4719 e = JIM_ERR;
4720 break;
4721 }
4722 addr += count * width;
4723 }
4724
4725 free(buffer);
4726
4727 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4728
4729 return e;
4730 }
4731
4732 /* FIX? should we propagate errors here rather than printing them
4733 * and continuing?
4734 */
4735 void target_handle_event(struct target *target, enum target_event e)
4736 {
4737 struct target_event_action *teap;
4738 int retval;
4739
4740 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4741 if (teap->event == e) {
4742 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4743 target->target_number,
4744 target_name(target),
4745 target_type_name(target),
4746 e,
4747 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4748 Jim_GetString(teap->body, NULL));
4749
4750 /* Override current target by the target an event
4751 * is issued from (lot of scripts need it).
4752 * Return back to previous override as soon
4753 * as the handler processing is done */
4754 struct command_context *cmd_ctx = current_command_context(teap->interp);
4755 struct target *saved_target_override = cmd_ctx->current_target_override;
4756 cmd_ctx->current_target_override = target;
4757
4758 retval = Jim_EvalObj(teap->interp, teap->body);
4759
4760 cmd_ctx->current_target_override = saved_target_override;
4761
4762 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4763 return;
4764
4765 if (retval == JIM_RETURN)
4766 retval = teap->interp->returnCode;
4767
4768 if (retval != JIM_OK) {
4769 Jim_MakeErrorMessage(teap->interp);
4770 LOG_USER("Error executing event %s on target %s:\n%s",
4771 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4772 target_name(target),
4773 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4774 /* clean both error code and stacktrace before return */
4775 Jim_Eval(teap->interp, "error \"\" \"\"");
4776 }
4777 }
4778 }
4779 }
4780
4781 /**
4782 * Returns true only if the target has a handler for the specified event.
4783 */
4784 bool target_has_event_action(struct target *target, enum target_event event)
4785 {
4786 struct target_event_action *teap;
4787
4788 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4789 if (teap->event == event)
4790 return true;
4791 }
4792 return false;
4793 }
4794
4795 enum target_cfg_param {
4796 TCFG_TYPE,
4797 TCFG_EVENT,
4798 TCFG_WORK_AREA_VIRT,
4799 TCFG_WORK_AREA_PHYS,
4800 TCFG_WORK_AREA_SIZE,
4801 TCFG_WORK_AREA_BACKUP,
4802 TCFG_ENDIAN,
4803 TCFG_COREID,
4804 TCFG_CHAIN_POSITION,
4805 TCFG_DBGBASE,
4806 TCFG_RTOS,
4807 TCFG_DEFER_EXAMINE,
4808 TCFG_GDB_PORT,
4809 TCFG_GDB_MAX_CONNECTIONS,
4810 };
4811
4812 static Jim_Nvp nvp_config_opts[] = {
4813 { .name = "-type", .value = TCFG_TYPE },
4814 { .name = "-event", .value = TCFG_EVENT },
4815 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4816 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4817 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4818 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4819 { .name = "-endian", .value = TCFG_ENDIAN },
4820 { .name = "-coreid", .value = TCFG_COREID },
4821 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4822 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4823 { .name = "-rtos", .value = TCFG_RTOS },
4824 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4825 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4826 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
4827 { .name = NULL, .value = -1 }
4828 };
4829
4830 static int target_configure(Jim_GetOptInfo *goi, struct target *target)
4831 {
4832 Jim_Nvp *n;
4833 Jim_Obj *o;
4834 jim_wide w;
4835 int e;
4836
4837 /* parse config or cget options ... */
4838 while (goi->argc > 0) {
4839 Jim_SetEmptyResult(goi->interp);
4840 /* Jim_GetOpt_Debug(goi); */
4841
4842 if (target->type->target_jim_configure) {
4843 /* target defines a configure function */
4844 /* target gets first dibs on parameters */
4845 e = (*(target->type->target_jim_configure))(target, goi);
4846 if (e == JIM_OK) {
4847 /* more? */
4848 continue;
4849 }
4850 if (e == JIM_ERR) {
4851 /* An error */
4852 return e;
4853 }
4854 /* otherwise we 'continue' below */
4855 }
4856 e = Jim_GetOpt_Nvp(goi, nvp_config_opts, &n);
4857 if (e != JIM_OK) {
4858 Jim_GetOpt_NvpUnknown(goi, nvp_config_opts, 0);
4859 return e;
4860 }
4861 switch (n->value) {
4862 case TCFG_TYPE:
4863 /* not settable */
4864 if (goi->isconfigure) {
4865 Jim_SetResultFormatted(goi->interp,
4866 "not settable: %s", n->name);
4867 return JIM_ERR;
4868 } else {
4869 no_params:
4870 if (goi->argc != 0) {
4871 Jim_WrongNumArgs(goi->interp,
4872 goi->argc, goi->argv,
4873 "NO PARAMS");
4874 return JIM_ERR;
4875 }
4876 }
4877 Jim_SetResultString(goi->interp,
4878 target_type_name(target), -1);
4879 /* loop for more */
4880 break;
4881 case TCFG_EVENT:
4882 if (goi->argc == 0) {
4883 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4884 return JIM_ERR;
4885 }
4886
4887 e = Jim_GetOpt_Nvp(goi, nvp_target_event, &n);
4888 if (e != JIM_OK) {
4889 Jim_GetOpt_NvpUnknown(goi, nvp_target_event, 1);
4890 return e;
4891 }
4892
4893 if (goi->isconfigure) {
4894 if (goi->argc != 1) {
4895 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4896 return JIM_ERR;
4897 }
4898 } else {
4899 if (goi->argc != 0) {
4900 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4901 return JIM_ERR;
4902 }
4903 }
4904
4905 {
4906 struct target_event_action *teap;
4907
4908 teap = target->event_action;
4909 /* replace existing? */
4910 while (teap) {
4911 if (teap->event == (enum target_event)n->value)
4912 break;
4913 teap = teap->next;
4914 }
4915
4916 if (goi->isconfigure) {
4917 bool replace = true;
4918 if (teap == NULL) {
4919 /* create new */
4920 teap = calloc(1, sizeof(*teap));
4921 replace = false;
4922 }
4923 teap->event = n->value;
4924 teap->interp = goi->interp;
4925 Jim_GetOpt_Obj(goi, &o);
4926 if (teap->body)
4927 Jim_DecrRefCount(teap->interp, teap->body);
4928 teap->body = Jim_DuplicateObj(goi->interp, o);
4929 /*
4930 * FIXME:
4931 * Tcl/TK - "tk events" have a nice feature.
4932 * See the "BIND" command.
4933 * We should support that here.
4934 * You can specify %X and %Y in the event code.
4935 * The idea is: %T - target name.
4936 * The idea is: %N - target number
4937 * The idea is: %E - event name.
4938 */
4939 Jim_IncrRefCount(teap->body);
4940
4941 if (!replace) {
4942 /* add to head of event list */
4943 teap->next = target->event_action;
4944 target->event_action = teap;
4945 }
4946 Jim_SetEmptyResult(goi->interp);
4947 } else {
4948 /* get */
4949 if (teap == NULL)
4950 Jim_SetEmptyResult(goi->interp);
4951 else
4952 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
4953 }
4954 }
4955 /* loop for more */
4956 break;
4957
4958 case TCFG_WORK_AREA_VIRT:
4959 if (goi->isconfigure) {
4960 target_free_all_working_areas(target);
4961 e = Jim_GetOpt_Wide(goi, &w);
4962 if (e != JIM_OK)
4963 return e;
4964 target->working_area_virt = w;
4965 target->working_area_virt_spec = true;
4966 } else {
4967 if (goi->argc != 0)
4968 goto no_params;
4969 }
4970 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
4971 /* loop for more */
4972 break;
4973
4974 case TCFG_WORK_AREA_PHYS:
4975 if (goi->isconfigure) {
4976 target_free_all_working_areas(target);
4977 e = Jim_GetOpt_Wide(goi, &w);
4978 if (e != JIM_OK)
4979 return e;
4980 target->working_area_phys = w;
4981 target->working_area_phys_spec = true;
4982 } else {
4983 if (goi->argc != 0)
4984 goto no_params;
4985 }
4986 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
4987 /* loop for more */
4988 break;
4989
4990 case TCFG_WORK_AREA_SIZE:
4991 if (goi->isconfigure) {
4992 target_free_all_working_areas(target);
4993 e = Jim_GetOpt_Wide(goi, &w);
4994 if (e != JIM_OK)
4995 return e;
4996 target->working_area_size = w;
4997 } else {
4998 if (goi->argc != 0)
4999 goto no_params;
5000 }
5001 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5002 /* loop for more */
5003 break;
5004
5005 case TCFG_WORK_AREA_BACKUP:
5006 if (goi->isconfigure) {
5007 target_free_all_working_areas(target);
5008 e = Jim_GetOpt_Wide(goi, &w);
5009 if (e != JIM_OK)
5010 return e;
5011 /* make this exactly 1 or 0 */
5012 target->backup_working_area = (!!w);
5013 } else {
5014 if (goi->argc != 0)
5015 goto no_params;
5016 }
5017 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5018 /* loop for more e*/
5019 break;
5020
5021
5022 case TCFG_ENDIAN:
5023 if (goi->isconfigure) {
5024 e = Jim_GetOpt_Nvp(goi, nvp_target_endian, &n);
5025 if (e != JIM_OK) {
5026 Jim_GetOpt_NvpUnknown(goi, nvp_target_endian, 1);
5027 return e;
5028 }
5029 target->endianness = n->value;
5030 } else {
5031 if (goi->argc != 0)
5032 goto no_params;
5033 }
5034 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
5035 if (n->name == NULL) {
5036 target->endianness = TARGET_LITTLE_ENDIAN;
5037 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
5038 }
5039 Jim_SetResultString(goi->interp, n->name, -1);
5040 /* loop for more */
5041 break;
5042
5043 case TCFG_COREID:
5044 if (goi->isconfigure) {
5045 e = Jim_GetOpt_Wide(goi, &w);
5046 if (e != JIM_OK)
5047 return e;
5048 target->coreid = (int32_t)w;
5049 } else {
5050 if (goi->argc != 0)
5051 goto no_params;
5052 }
5053 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5054 /* loop for more */
5055 break;
5056
5057 case TCFG_CHAIN_POSITION:
5058 if (goi->isconfigure) {
5059 Jim_Obj *o_t;
5060 struct jtag_tap *tap;
5061
5062 if (target->has_dap) {
5063 Jim_SetResultString(goi->interp,
5064 "target requires -dap parameter instead of -chain-position!", -1);
5065 return JIM_ERR;
5066 }
5067
5068 target_free_all_working_areas(target);
5069 e = Jim_GetOpt_Obj(goi, &o_t);
5070 if (e != JIM_OK)
5071 return e;
5072 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5073 if (tap == NULL)
5074 return JIM_ERR;
5075 target->tap = tap;
5076 target->tap_configured = true;
5077 } else {
5078 if (goi->argc != 0)
5079 goto no_params;
5080 }
5081 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5082 /* loop for more e*/
5083 break;
5084 case TCFG_DBGBASE:
5085 if (goi->isconfigure) {
5086 e = Jim_GetOpt_Wide(goi, &w);
5087 if (e != JIM_OK)
5088 return e;
5089 target->dbgbase = (uint32_t)w;
5090 target->dbgbase_set = true;
5091 } else {
5092 if (goi->argc != 0)
5093 goto no_params;
5094 }
5095 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5096 /* loop for more */
5097 break;
5098 case TCFG_RTOS:
5099 /* RTOS */
5100 {
5101 int result = rtos_create(goi, target);
5102 if (result != JIM_OK)
5103 return result;
5104 }
5105 /* loop for more */
5106 break;
5107
5108 case TCFG_DEFER_EXAMINE:
5109 /* DEFER_EXAMINE */
5110 target->defer_examine = true;
5111 /* loop for more */
5112 break;
5113
5114 case TCFG_GDB_PORT:
5115 if (goi->isconfigure) {
5116 struct command_context *cmd_ctx = current_command_context(goi->interp);
5117 if (cmd_ctx->mode != COMMAND_CONFIG) {
5118 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5119 return JIM_ERR;
5120 }
5121
5122 const char *s;
5123 e = Jim_GetOpt_String(goi, &s, NULL);
5124 if (e != JIM_OK)
5125 return e;
5126 target->gdb_port_override = strdup(s);
5127 } else {
5128 if (goi->argc != 0)
5129 goto no_params;
5130 }
5131 Jim_SetResultString(goi->interp, target->gdb_port_override ? : "undefined", -1);
5132 /* loop for more */
5133 break;
5134
5135 case TCFG_GDB_MAX_CONNECTIONS:
5136 if (goi->isconfigure) {
5137 struct command_context *cmd_ctx = current_command_context(goi->interp);
5138 if (cmd_ctx->mode != COMMAND_CONFIG) {
5139 Jim_SetResultString(goi->interp, "-gdb-max-conenctions must be configured before 'init'", -1);
5140 return JIM_ERR;
5141 }
5142
5143 e = Jim_GetOpt_Wide(goi, &w);
5144 if (e != JIM_OK)
5145 return e;
5146 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5147 } else {
5148 if (goi->argc != 0)
5149 goto no_params;
5150 }
5151 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5152 break;
5153 }
5154 } /* while (goi->argc) */
5155
5156
5157 /* done - we return */
5158 return JIM_OK;
5159 }
5160
5161 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5162 {
5163 Jim_GetOptInfo goi;
5164
5165 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5166 goi.isconfigure = !strcmp(Jim_GetString(argv[0], NULL), "configure");
5167 if (goi.argc < 1) {
5168 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5169 "missing: -option ...");
5170 return JIM_ERR;
5171 }
5172 struct target *target = Jim_CmdPrivData(goi.interp);
5173 return target_configure(&goi, target);
5174 }
5175
5176 static int jim_target_mem2array(Jim_Interp *interp,
5177 int argc, Jim_Obj *const *argv)
5178 {
5179 struct target *target = Jim_CmdPrivData(interp);
5180 return target_mem2array(interp, target, argc - 1, argv + 1);
5181 }
5182
5183 static int jim_target_array2mem(Jim_Interp *interp,
5184 int argc, Jim_Obj *const *argv)
5185 {
5186 struct target *target = Jim_CmdPrivData(interp);
5187 return target_array2mem(interp, target, argc - 1, argv + 1);
5188 }
5189
5190 static int jim_target_tap_disabled(Jim_Interp *interp)
5191 {
5192 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5193 return JIM_ERR;
5194 }
5195
5196 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5197 {
5198 bool allow_defer = false;
5199
5200 Jim_GetOptInfo goi;
5201 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5202 if (goi.argc > 1) {
5203 const char *cmd_name = Jim_GetString(argv[0], NULL);
5204 Jim_SetResultFormatted(goi.interp,
5205 "usage: %s ['allow-defer']", cmd_name);
5206 return JIM_ERR;
5207 }
5208 if (goi.argc > 0 &&
5209 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5210 /* consume it */
5211 Jim_Obj *obj;
5212 int e = Jim_GetOpt_Obj(&goi, &obj);
5213 if (e != JIM_OK)
5214 return e;
5215 allow_defer = true;
5216 }
5217
5218 struct target *target = Jim_CmdPrivData(interp);
5219 if (!target->tap->enabled)
5220 return jim_target_tap_disabled(interp);
5221
5222 if (allow_defer && target->defer_examine) {
5223 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5224 LOG_INFO("Use arp_examine command to examine it manually!");
5225 return JIM_OK;
5226 }
5227
5228 int e = target->type->examine(target);
5229 if (e != ERROR_OK)
5230 return JIM_ERR;
5231 return JIM_OK;
5232 }
5233
5234 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5235 {
5236 struct target *target = Jim_CmdPrivData(interp);
5237
5238 Jim_SetResultBool(interp, target_was_examined(target));
5239 return JIM_OK;
5240 }
5241
5242 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5243 {
5244 struct target *target = Jim_CmdPrivData(interp);
5245
5246 Jim_SetResultBool(interp, target->defer_examine);
5247 return JIM_OK;
5248 }
5249
5250 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5251 {
5252 if (argc != 1) {
5253 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5254 return JIM_ERR;
5255 }
5256 struct target *target = Jim_CmdPrivData(interp);
5257
5258 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5259 return JIM_ERR;
5260
5261 return JIM_OK;
5262 }
5263
5264 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5265 {
5266 if (argc != 1) {
5267 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5268 return JIM_ERR;
5269 }
5270 struct target *target = Jim_CmdPrivData(interp);
5271 if (!target->tap->enabled)
5272 return jim_target_tap_disabled(interp);
5273
5274 int e;
5275 if (!(target_was_examined(target)))
5276 e = ERROR_TARGET_NOT_EXAMINED;
5277 else
5278 e = target->type->poll(target);
5279 if (e != ERROR_OK)
5280 return JIM_ERR;
5281 return JIM_OK;
5282 }
5283
5284 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5285 {
5286 Jim_GetOptInfo goi;
5287 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5288
5289 if (goi.argc != 2) {
5290 Jim_WrongNumArgs(interp, 0, argv,
5291 "([tT]|[fF]|assert|deassert) BOOL");
5292 return JIM_ERR;
5293 }
5294
5295 Jim_Nvp *n;
5296 int e = Jim_GetOpt_Nvp(&goi, nvp_assert, &n);
5297 if (e != JIM_OK) {
5298 Jim_GetOpt_NvpUnknown(&goi, nvp_assert, 1);
5299 return e;
5300 }
5301 /* the halt or not param */
5302 jim_wide a;
5303 e = Jim_GetOpt_Wide(&goi, &a);
5304 if (e != JIM_OK)
5305 return e;
5306
5307 struct target *target = Jim_CmdPrivData(goi.interp);
5308 if (!target->tap->enabled)
5309 return jim_target_tap_disabled(interp);
5310
5311 if (!target->type->assert_reset || !target->type->deassert_reset) {
5312 Jim_SetResultFormatted(interp,
5313 "No target-specific reset for %s",
5314 target_name(target));
5315 return JIM_ERR;
5316 }
5317
5318 if (target->defer_examine)
5319 target_reset_examined(target);
5320
5321 /* determine if we should halt or not. */
5322 target->reset_halt = !!a;
5323 /* When this happens - all workareas are invalid. */
5324 target_free_all_working_areas_restore(target, 0);
5325
5326 /* do the assert */
5327 if (n->value == NVP_ASSERT)
5328 e = target->type->assert_reset(target);
5329 else
5330 e = target->type->deassert_reset(target);
5331 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5332 }
5333
5334 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5335 {
5336 if (argc != 1) {
5337 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5338 return JIM_ERR;
5339 }
5340 struct target *target = Jim_CmdPrivData(interp);
5341 if (!target->tap->enabled)
5342 return jim_target_tap_disabled(interp);
5343 int e = target->type->halt(target);
5344 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5345 }
5346
5347 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5348 {
5349 Jim_GetOptInfo goi;
5350 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5351
5352 /* params: <name> statename timeoutmsecs */
5353 if (goi.argc != 2) {
5354 const char *cmd_name = Jim_GetString(argv[0], NULL);
5355 Jim_SetResultFormatted(goi.interp,
5356 "%s <state_name> <timeout_in_msec>", cmd_name);
5357 return JIM_ERR;
5358 }
5359
5360 Jim_Nvp *n;
5361 int e = Jim_GetOpt_Nvp(&goi, nvp_target_state, &n);
5362 if (e != JIM_OK) {
5363 Jim_GetOpt_NvpUnknown(&goi, nvp_target_state, 1);
5364 return e;
5365 }
5366 jim_wide a;
5367 e = Jim_GetOpt_Wide(&goi, &a);
5368 if (e != JIM_OK)
5369 return e;
5370 struct target *target = Jim_CmdPrivData(interp);
5371 if (!target->tap->enabled)
5372 return jim_target_tap_disabled(interp);
5373
5374 e = target_wait_state(target, n->value, a);
5375 if (e != ERROR_OK) {
5376 Jim_Obj *eObj = Jim_NewIntObj(interp, e);
5377 Jim_SetResultFormatted(goi.interp,
5378 "target: %s wait %s fails (%#s) %s",
5379 target_name(target), n->name,
5380 eObj, target_strerror_safe(e));
5381 return JIM_ERR;
5382 }
5383 return JIM_OK;
5384 }
5385 /* List for human, Events defined for this target.
5386 * scripts/programs should use 'name cget -event NAME'
5387 */
5388 COMMAND_HANDLER(handle_target_event_list)
5389 {
5390 struct target *target = get_current_target(CMD_CTX);
5391 struct target_event_action *teap = target->event_action;
5392
5393 command_print(CMD, "Event actions for target (%d) %s\n",
5394 target->target_number,
5395 target_name(target));
5396 command_print(CMD, "%-25s | Body", "Event");
5397 command_print(CMD, "------------------------- | "
5398 "----------------------------------------");
5399 while (teap) {
5400 Jim_Nvp *opt = Jim_Nvp_value2name_simple(nvp_target_event, teap->event);
5401 command_print(CMD, "%-25s | %s",
5402 opt->name, Jim_GetString(teap->body, NULL));
5403 teap = teap->next;
5404 }
5405 command_print(CMD, "***END***");
5406 return ERROR_OK;
5407 }
5408 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5409 {
5410 if (argc != 1) {
5411 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5412 return JIM_ERR;
5413 }
5414 struct target *target = Jim_CmdPrivData(interp);
5415 Jim_SetResultString(interp, target_state_name(target), -1);
5416 return JIM_OK;
5417 }
5418 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5419 {
5420 Jim_GetOptInfo goi;
5421 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5422 if (goi.argc != 1) {
5423 const char *cmd_name = Jim_GetString(argv[0], NULL);
5424 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5425 return JIM_ERR;
5426 }
5427 Jim_Nvp *n;
5428 int e = Jim_GetOpt_Nvp(&goi, nvp_target_event, &n);
5429 if (e != JIM_OK) {
5430 Jim_GetOpt_NvpUnknown(&goi, nvp_target_event, 1);
5431 return e;
5432 }
5433 struct target *target = Jim_CmdPrivData(interp);
5434 target_handle_event(target, n->value);
5435 return JIM_OK;
5436 }
5437
5438 static const struct command_registration target_instance_command_handlers[] = {
5439 {
5440 .name = "configure",
5441 .mode = COMMAND_ANY,
5442 .jim_handler = jim_target_configure,
5443 .help = "configure a new target for use",
5444 .usage = "[target_attribute ...]",
5445 },
5446 {
5447 .name = "cget",
5448 .mode = COMMAND_ANY,
5449 .jim_handler = jim_target_configure,
5450 .help = "returns the specified target attribute",
5451 .usage = "target_attribute",
5452 },
5453 {
5454 .name = "mwd",
5455 .handler = handle_mw_command,
5456 .mode = COMMAND_EXEC,
5457 .help = "Write 64-bit word(s) to target memory",
5458 .usage = "address data [count]",
5459 },
5460 {
5461 .name = "mww",
5462 .handler = handle_mw_command,
5463 .mode = COMMAND_EXEC,
5464 .help = "Write 32-bit word(s) to target memory",
5465 .usage = "address data [count]",
5466 },
5467 {
5468 .name = "mwh",
5469 .handler = handle_mw_command,
5470 .mode = COMMAND_EXEC,
5471 .help = "Write 16-bit half-word(s) to target memory",
5472 .usage = "address data [count]",
5473 },
5474 {
5475 .name = "mwb",
5476 .handler = handle_mw_command,
5477 .mode = COMMAND_EXEC,
5478 .help = "Write byte(s) to target memory",
5479 .usage = "address data [count]",
5480 },
5481 {
5482 .name = "mdd",
5483 .handler = handle_md_command,
5484 .mode = COMMAND_EXEC,
5485 .help = "Display target memory as 64-bit words",
5486 .usage = "address [count]",
5487 },
5488 {
5489 .name = "mdw",
5490 .handler = handle_md_command,
5491 .mode = COMMAND_EXEC,
5492 .help = "Display target memory as 32-bit words",
5493 .usage = "address [count]",
5494 },
5495 {
5496 .name = "mdh",
5497 .handler = handle_md_command,
5498 .mode = COMMAND_EXEC,
5499 .help = "Display target memory as 16-bit half-words",
5500 .usage = "address [count]",
5501 },
5502 {
5503 .name = "mdb",
5504 .handler = handle_md_command,
5505 .mode = COMMAND_EXEC,
5506 .help = "Display target memory as 8-bit bytes",
5507 .usage = "address [count]",
5508 },
5509 {
5510 .name = "array2mem",
5511 .mode = COMMAND_EXEC,
5512 .jim_handler = jim_target_array2mem,
5513 .help = "Writes Tcl array of 8/16/32 bit numbers "
5514 "to target memory",
5515 .usage = "arrayname bitwidth address count",
5516 },
5517 {
5518 .name = "mem2array",
5519 .mode = COMMAND_EXEC,
5520 .jim_handler = jim_target_mem2array,
5521 .help = "Loads Tcl array of 8/16/32 bit numbers "
5522 "from target memory",
5523 .usage = "arrayname bitwidth address count",
5524 },
5525 {
5526 .name = "eventlist",
5527 .handler = handle_target_event_list,
5528 .mode = COMMAND_EXEC,
5529 .help = "displays a table of events defined for this target",
5530 .usage = "",
5531 },
5532 {
5533 .name = "curstate",
5534 .mode = COMMAND_EXEC,
5535 .jim_handler = jim_target_current_state,
5536 .help = "displays the current state of this target",
5537 },
5538 {
5539 .name = "arp_examine",
5540 .mode = COMMAND_EXEC,
5541 .jim_handler = jim_target_examine,
5542 .help = "used internally for reset processing",
5543 .usage = "['allow-defer']",
5544 },
5545 {
5546 .name = "was_examined",
5547 .mode = COMMAND_EXEC,
5548 .jim_handler = jim_target_was_examined,
5549 .help = "used internally for reset processing",
5550 },
5551 {
5552 .name = "examine_deferred",
5553 .mode = COMMAND_EXEC,
5554 .jim_handler = jim_target_examine_deferred,
5555 .help = "used internally for reset processing",
5556 },
5557 {
5558 .name = "arp_halt_gdb",
5559 .mode = COMMAND_EXEC,
5560 .jim_handler = jim_target_halt_gdb,
5561 .help = "used internally for reset processing to halt GDB",
5562 },
5563 {
5564 .name = "arp_poll",
5565 .mode = COMMAND_EXEC,
5566 .jim_handler = jim_target_poll,
5567 .help = "used internally for reset processing",
5568 },
5569 {
5570 .name = "arp_reset",
5571 .mode = COMMAND_EXEC,
5572 .jim_handler = jim_target_reset,
5573 .help = "used internally for reset processing",
5574 },
5575 {
5576 .name = "arp_halt",
5577 .mode = COMMAND_EXEC,
5578 .jim_handler = jim_target_halt,
5579 .help = "used internally for reset processing",
5580 },
5581 {
5582 .name = "arp_waitstate",
5583 .mode = COMMAND_EXEC,
5584 .jim_handler = jim_target_wait_state,
5585 .help = "used internally for reset processing",
5586 },
5587 {
5588 .name = "invoke-event",
5589 .mode = COMMAND_EXEC,
5590 .jim_handler = jim_target_invoke_event,
5591 .help = "invoke handler for specified event",
5592 .usage = "event_name",
5593 },
5594 COMMAND_REGISTRATION_DONE
5595 };
5596
5597 static int target_create(Jim_GetOptInfo *goi)
5598 {
5599 Jim_Obj *new_cmd;
5600 Jim_Cmd *cmd;
5601 const char *cp;
5602 int e;
5603 int x;
5604 struct target *target;
5605 struct command_context *cmd_ctx;
5606
5607 cmd_ctx = current_command_context(goi->interp);
5608 assert(cmd_ctx != NULL);
5609
5610 if (goi->argc < 3) {
5611 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5612 return JIM_ERR;
5613 }
5614
5615 /* COMMAND */
5616 Jim_GetOpt_Obj(goi, &new_cmd);
5617 /* does this command exist? */
5618 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5619 if (cmd) {
5620 cp = Jim_GetString(new_cmd, NULL);
5621 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5622 return JIM_ERR;
5623 }
5624
5625 /* TYPE */
5626 e = Jim_GetOpt_String(goi, &cp, NULL);
5627 if (e != JIM_OK)
5628 return e;
5629 struct transport *tr = get_current_transport();
5630 if (tr->override_target) {
5631 e = tr->override_target(&cp);
5632 if (e != ERROR_OK) {
5633 LOG_ERROR("The selected transport doesn't support this target");
5634 return JIM_ERR;
5635 }
5636 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5637 }
5638 /* now does target type exist */
5639 for (x = 0 ; target_types[x] ; x++) {
5640 if (0 == strcmp(cp, target_types[x]->name)) {
5641 /* found */
5642 break;
5643 }
5644
5645 /* check for deprecated name */
5646 if (target_types[x]->deprecated_name) {
5647 if (0 == strcmp(cp, target_types[x]->deprecated_name)) {
5648 /* found */
5649 LOG_WARNING("target name is deprecated use: \'%s\'", target_types[x]->name);
5650 break;
5651 }
5652 }
5653 }
5654 if (target_types[x] == NULL) {
5655 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5656 for (x = 0 ; target_types[x] ; x++) {
5657 if (target_types[x + 1]) {
5658 Jim_AppendStrings(goi->interp,
5659 Jim_GetResult(goi->interp),
5660 target_types[x]->name,
5661 ", ", NULL);
5662 } else {
5663 Jim_AppendStrings(goi->interp,
5664 Jim_GetResult(goi->interp),
5665 " or ",
5666 target_types[x]->name, NULL);
5667 }
5668 }
5669 return JIM_ERR;
5670 }
5671
5672 /* Create it */
5673 target = calloc(1, sizeof(struct target));
5674 if (!target) {
5675 LOG_ERROR("Out of memory");
5676 return JIM_ERR;
5677 }
5678
5679 /* set target number */
5680 target->target_number = new_target_number();
5681
5682 /* allocate memory for each unique target type */
5683 target->type = malloc(sizeof(struct target_type));
5684 if (!target->type) {
5685 LOG_ERROR("Out of memory");
5686 free(target);
5687 return JIM_ERR;
5688 }
5689
5690 memcpy(target->type, target_types[x], sizeof(struct target_type));
5691
5692 /* will be set by "-endian" */
5693 target->endianness = TARGET_ENDIAN_UNKNOWN;
5694
5695 /* default to first core, override with -coreid */
5696 target->coreid = 0;
5697
5698 target->working_area = 0x0;
5699 target->working_area_size = 0x0;
5700 target->working_areas = NULL;
5701 target->backup_working_area = 0;
5702
5703 target->state = TARGET_UNKNOWN;
5704 target->debug_reason = DBG_REASON_UNDEFINED;
5705 target->reg_cache = NULL;
5706 target->breakpoints = NULL;
5707 target->watchpoints = NULL;
5708 target->next = NULL;
5709 target->arch_info = NULL;
5710
5711 target->verbose_halt_msg = true;
5712
5713 target->halt_issued = false;
5714
5715 /* initialize trace information */
5716 target->trace_info = calloc(1, sizeof(struct trace));
5717 if (!target->trace_info) {
5718 LOG_ERROR("Out of memory");
5719 free(target->type);
5720 free(target);
5721 return JIM_ERR;
5722 }
5723
5724 target->dbgmsg = NULL;
5725 target->dbg_msg_enabled = 0;
5726
5727 target->endianness = TARGET_ENDIAN_UNKNOWN;
5728
5729 target->rtos = NULL;
5730 target->rtos_auto_detect = false;
5731
5732 target->gdb_port_override = NULL;
5733 target->gdb_max_connections = 1;
5734
5735 /* Do the rest as "configure" options */
5736 goi->isconfigure = 1;
5737 e = target_configure(goi, target);
5738
5739 if (e == JIM_OK) {
5740 if (target->has_dap) {
5741 if (!target->dap_configured) {
5742 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5743 e = JIM_ERR;
5744 }
5745 } else {
5746 if (!target->tap_configured) {
5747 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5748 e = JIM_ERR;
5749 }
5750 }
5751 /* tap must be set after target was configured */
5752 if (target->tap == NULL)
5753 e = JIM_ERR;
5754 }
5755
5756 if (e != JIM_OK) {
5757 rtos_destroy(target);
5758 free(target->gdb_port_override);
5759 free(target->trace_info);
5760 free(target->type);
5761 free(target);
5762 return e;
5763 }
5764
5765 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5766 /* default endian to little if not specified */
5767 target->endianness = TARGET_LITTLE_ENDIAN;
5768 }
5769
5770 cp = Jim_GetString(new_cmd, NULL);
5771 target->cmd_name = strdup(cp);
5772 if (!target->cmd_name) {
5773 LOG_ERROR("Out of memory");
5774 rtos_destroy(target);
5775 free(target->gdb_port_override);
5776 free(target->trace_info);
5777 free(target->type);
5778 free(target);
5779 return JIM_ERR;
5780 }
5781
5782 if (target->type->target_create) {
5783 e = (*(target->type->target_create))(target, goi->interp);
5784 if (e != ERROR_OK) {
5785 LOG_DEBUG("target_create failed");
5786 free(target->cmd_name);
5787 rtos_destroy(target);
5788 free(target->gdb_port_override);
5789 free(target->trace_info);
5790 free(target->type);
5791 free(target);
5792 return JIM_ERR;
5793 }
5794 }
5795
5796 /* create the target specific commands */
5797 if (target->type->commands) {
5798 e = register_commands(cmd_ctx, NULL, target->type->commands);
5799 if (ERROR_OK != e)
5800 LOG_ERROR("unable to register '%s' commands", cp);
5801 }
5802
5803 /* now - create the new target name command */
5804 const struct command_registration target_subcommands[] = {
5805 {
5806 .chain = target_instance_command_handlers,
5807 },
5808 {
5809 .chain = target->type->commands,
5810 },
5811 COMMAND_REGISTRATION_DONE
5812 };
5813 const struct command_registration target_commands[] = {
5814 {
5815 .name = cp,
5816 .mode = COMMAND_ANY,
5817 .help = "target command group",
5818 .usage = "",
5819 .chain = target_subcommands,
5820 },
5821 COMMAND_REGISTRATION_DONE
5822 };
5823 e = register_commands(cmd_ctx, NULL, target_commands);
5824 if (e != ERROR_OK) {
5825 if (target->type->deinit_target)
5826 target->type->deinit_target(target);
5827 free(target->cmd_name);
5828 rtos_destroy(target);
5829 free(target->gdb_port_override);
5830 free(target->trace_info);
5831 free(target->type);
5832 free(target);
5833 return JIM_ERR;
5834 }
5835
5836 struct command *c = command_find_in_context(cmd_ctx, cp);
5837 assert(c);
5838 command_set_handler_data(c, target);
5839
5840 /* append to end of list */
5841 append_to_list_all_targets(target);
5842
5843 cmd_ctx->current_target = target;
5844 return JIM_OK;
5845 }
5846
5847 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5848 {
5849 if (argc != 1) {
5850 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5851 return JIM_ERR;
5852 }
5853 struct command_context *cmd_ctx = current_command_context(interp);
5854 assert(cmd_ctx != NULL);
5855
5856 struct target *target = get_current_target_or_null(cmd_ctx);
5857 if (target)
5858 Jim_SetResultString(interp, target_name(target), -1);
5859 return JIM_OK;
5860 }
5861
5862 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5863 {
5864 if (argc != 1) {
5865 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5866 return JIM_ERR;
5867 }
5868 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5869 for (unsigned x = 0; NULL != target_types[x]; x++) {
5870 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5871 Jim_NewStringObj(interp, target_types[x]->name, -1));
5872 }
5873 return JIM_OK;
5874 }
5875
5876 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5877 {
5878 if (argc != 1) {
5879 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5880 return JIM_ERR;
5881 }
5882 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5883 struct target *target = all_targets;
5884 while (target) {
5885 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5886 Jim_NewStringObj(interp, target_name(target), -1));
5887 target = target->next;
5888 }
5889 return JIM_OK;
5890 }
5891
5892 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5893 {
5894 int i;
5895 const char *targetname;
5896 int retval, len;
5897 struct target *target = (struct target *) NULL;
5898 struct target_list *head, *curr, *new;
5899 curr = (struct target_list *) NULL;
5900 head = (struct target_list *) NULL;
5901
5902 retval = 0;
5903 LOG_DEBUG("%d", argc);
5904 /* argv[1] = target to associate in smp
5905 * argv[2] = target to associate in smp
5906 * argv[3] ...
5907 */
5908
5909 for (i = 1; i < argc; i++) {
5910
5911 targetname = Jim_GetString(argv[i], &len);
5912 target = get_target(targetname);
5913 LOG_DEBUG("%s ", targetname);
5914 if (target) {
5915 new = malloc(sizeof(struct target_list));
5916 new->target = target;
5917 new->next = (struct target_list *)NULL;
5918 if (head == (struct target_list *)NULL) {
5919 head = new;
5920 curr = head;
5921 } else {
5922 curr->next = new;
5923 curr = new;
5924 }
5925 }
5926 }
5927 /* now parse the list of cpu and put the target in smp mode*/
5928 curr = head;
5929
5930 while (curr != (struct target_list *)NULL) {
5931 target = curr->target;
5932 target->smp = 1;
5933 target->head = head;
5934 curr = curr->next;
5935 }
5936
5937 if (target && target->rtos)
5938 retval = rtos_smp_init(head->target);
5939
5940 return retval;
5941 }
5942
5943
5944 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5945 {
5946 Jim_GetOptInfo goi;
5947 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5948 if (goi.argc < 3) {
5949 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5950 "<name> <target_type> [<target_options> ...]");
5951 return JIM_ERR;
5952 }
5953 return target_create(&goi);
5954 }
5955
5956 static const struct command_registration target_subcommand_handlers[] = {
5957 {
5958 .name = "init",
5959 .mode = COMMAND_CONFIG,
5960 .handler = handle_target_init_command,
5961 .help = "initialize targets",
5962 .usage = "",
5963 },
5964 {
5965 .name = "create",
5966 .mode = COMMAND_CONFIG,
5967 .jim_handler = jim_target_create,
5968 .usage = "name type '-chain-position' name [options ...]",
5969 .help = "Creates and selects a new target",
5970 },
5971 {
5972 .name = "current",
5973 .mode = COMMAND_ANY,
5974 .jim_handler = jim_target_current,
5975 .help = "Returns the currently selected target",
5976 },
5977 {
5978 .name = "types",
5979 .mode = COMMAND_ANY,
5980 .jim_handler = jim_target_types,
5981 .help = "Returns the available target types as "
5982 "a list of strings",
5983 },
5984 {
5985 .name = "names",
5986 .mode = COMMAND_ANY,
5987 .jim_handler = jim_target_names,
5988 .help = "Returns the names of all targets as a list of strings",
5989 },
5990 {
5991 .name = "smp",
5992 .mode = COMMAND_ANY,
5993 .jim_handler = jim_target_smp,
5994 .usage = "targetname1 targetname2 ...",
5995 .help = "gather several target in a smp list"
5996 },
5997
5998 COMMAND_REGISTRATION_DONE
5999 };
6000
6001 struct FastLoad {
6002 target_addr_t address;
6003 uint8_t *data;
6004 int length;
6005
6006 };
6007
6008 static int fastload_num;
6009 static struct FastLoad *fastload;
6010
6011 static void free_fastload(void)
6012 {
6013 if (fastload != NULL) {
6014 for (int i = 0; i < fastload_num; i++)
6015 free(fastload[i].data);
6016 free(fastload);
6017 fastload = NULL;
6018 }
6019 }
6020
6021 COMMAND_HANDLER(handle_fast_load_image_command)
6022 {
6023 uint8_t *buffer;
6024 size_t buf_cnt;
6025 uint32_t image_size;
6026 target_addr_t min_address = 0;
6027 target_addr_t max_address = -1;
6028
6029 struct image image;
6030
6031 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
6032 &image, &min_address, &max_address);
6033 if (ERROR_OK != retval)
6034 return retval;
6035
6036 struct duration bench;
6037 duration_start(&bench);
6038
6039 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6040 if (retval != ERROR_OK)
6041 return retval;
6042
6043 image_size = 0x0;
6044 retval = ERROR_OK;
6045 fastload_num = image.num_sections;
6046 fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
6047 if (fastload == NULL) {
6048 command_print(CMD, "out of memory");
6049 image_close(&image);
6050 return ERROR_FAIL;
6051 }
6052 memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
6053 for (unsigned int i = 0; i < image.num_sections; i++) {
6054 buffer = malloc(image.sections[i].size);
6055 if (buffer == NULL) {
6056 command_print(CMD, "error allocating buffer for section (%d bytes)",
6057 (int)(image.sections[i].size));
6058 retval = ERROR_FAIL;
6059 break;
6060 }
6061
6062 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6063 if (retval != ERROR_OK) {
6064 free(buffer);
6065 break;
6066 }
6067
6068 uint32_t offset = 0;
6069 uint32_t length = buf_cnt;
6070
6071 /* DANGER!!! beware of unsigned comparison here!!! */
6072
6073 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6074 (image.sections[i].base_address < max_address)) {
6075 if (image.sections[i].base_address < min_address) {
6076 /* clip addresses below */
6077 offset += min_address-image.sections[i].base_address;
6078 length -= offset;
6079 }
6080
6081 if (image.sections[i].base_address + buf_cnt > max_address)
6082 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6083
6084 fastload[i].address = image.sections[i].base_address + offset;
6085 fastload[i].data = malloc(length);
6086 if (fastload[i].data == NULL) {
6087 free(buffer);
6088 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6089 length);
6090 retval = ERROR_FAIL;
6091 break;
6092 }
6093 memcpy(fastload[i].data, buffer + offset, length);
6094 fastload[i].length = length;
6095
6096 image_size += length;
6097 command_print(CMD, "%u bytes written at address 0x%8.8x",
6098 (unsigned int)length,
6099 ((unsigned int)(image.sections[i].base_address + offset)));
6100 }
6101
6102 free(buffer);
6103 }
6104
6105 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
6106 command_print(CMD, "Loaded %" PRIu32 " bytes "
6107 "in %fs (%0.3f KiB/s)", image_size,
6108 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6109
6110 command_print(CMD,
6111 "WARNING: image has not been loaded to target!"
6112 "You can issue a 'fast_load' to finish loading.");
6113 }
6114
6115 image_close(&image);
6116
6117 if (retval != ERROR_OK)
6118 free_fastload();
6119
6120 return retval;
6121 }
6122
6123 COMMAND_HANDLER(handle_fast_load_command)
6124 {
6125 if (CMD_ARGC > 0)
6126 return ERROR_COMMAND_SYNTAX_ERROR;
6127 if (fastload == NULL) {
6128 LOG_ERROR("No image in memory");
6129 return ERROR_FAIL;
6130 }
6131 int i;
6132 int64_t ms = timeval_ms();
6133 int size = 0;
6134 int retval = ERROR_OK;
6135 for (i = 0; i < fastload_num; i++) {
6136 struct target *target = get_current_target(CMD_CTX);
6137 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6138 (unsigned int)(fastload[i].address),
6139 (unsigned int)(fastload[i].length));
6140 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6141 if (retval != ERROR_OK)
6142 break;
6143 size += fastload[i].length;
6144 }
6145 if (retval == ERROR_OK) {
6146 int64_t after = timeval_ms();
6147 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6148 }
6149 return retval;
6150 }
6151
6152 static const struct command_registration target_command_handlers[] = {
6153 {
6154 .name = "targets",
6155 .handler = handle_targets_command,
6156 .mode = COMMAND_ANY,
6157 .help = "change current default target (one parameter) "
6158 "or prints table of all targets (no parameters)",
6159 .usage = "[target]",
6160 },
6161 {
6162 .name = "target",
6163 .mode = COMMAND_CONFIG,
6164 .help = "configure target",
6165 .chain = target_subcommand_handlers,
6166 .usage = "",
6167 },
6168 COMMAND_REGISTRATION_DONE
6169 };
6170
6171 int target_register_commands(struct command_context *cmd_ctx)
6172 {
6173 return register_commands(cmd_ctx, NULL, target_command_handlers);
6174 }
6175
6176 static bool target_reset_nag = true;
6177
6178 bool get_target_reset_nag(void)
6179 {
6180 return target_reset_nag;
6181 }
6182
6183 COMMAND_HANDLER(handle_target_reset_nag)
6184 {
6185 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6186 &target_reset_nag, "Nag after each reset about options to improve "
6187 "performance");
6188 }
6189
6190 COMMAND_HANDLER(handle_ps_command)
6191 {
6192 struct target *target = get_current_target(CMD_CTX);
6193 char *display;
6194 if (target->state != TARGET_HALTED) {
6195 LOG_INFO("target not halted !!");
6196 return ERROR_OK;
6197 }
6198
6199 if ((target->rtos) && (target->rtos->type)
6200 && (target->rtos->type->ps_command)) {
6201 display = target->rtos->type->ps_command(target);
6202 command_print(CMD, "%s", display);
6203 free(display);
6204 return ERROR_OK;
6205 } else {
6206 LOG_INFO("failed");
6207 return ERROR_TARGET_FAILURE;
6208 }
6209 }
6210
6211 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6212 {
6213 if (text != NULL)
6214 command_print_sameline(cmd, "%s", text);
6215 for (int i = 0; i < size; i++)
6216 command_print_sameline(cmd, " %02x", buf[i]);
6217 command_print(cmd, " ");
6218 }
6219
6220 COMMAND_HANDLER(handle_test_mem_access_command)
6221 {
6222 struct target *target = get_current_target(CMD_CTX);
6223 uint32_t test_size;
6224 int retval = ERROR_OK;
6225
6226 if (target->state != TARGET_HALTED) {
6227 LOG_INFO("target not halted !!");
6228 return ERROR_FAIL;
6229 }
6230
6231 if (CMD_ARGC != 1)
6232 return ERROR_COMMAND_SYNTAX_ERROR;
6233
6234 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6235
6236 /* Test reads */
6237 size_t num_bytes = test_size + 4;
6238
6239 struct working_area *wa = NULL;
6240 retval = target_alloc_working_area(target, num_bytes, &wa);
6241 if (retval != ERROR_OK) {
6242 LOG_ERROR("Not enough working area");
6243 return ERROR_FAIL;
6244 }
6245
6246 uint8_t *test_pattern = malloc(num_bytes);
6247
6248 for (size_t i = 0; i < num_bytes; i++)
6249 test_pattern[i] = rand();
6250
6251 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6252 if (retval != ERROR_OK) {
6253 LOG_ERROR("Test pattern write failed");
6254 goto out;
6255 }
6256
6257 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6258 for (int size = 1; size <= 4; size *= 2) {
6259 for (int offset = 0; offset < 4; offset++) {
6260 uint32_t count = test_size / size;
6261 size_t host_bufsiz = (count + 2) * size + host_offset;
6262 uint8_t *read_ref = malloc(host_bufsiz);
6263 uint8_t *read_buf = malloc(host_bufsiz);
6264
6265 for (size_t i = 0; i < host_bufsiz; i++) {
6266 read_ref[i] = rand();
6267 read_buf[i] = read_ref[i];
6268 }
6269 command_print_sameline(CMD,
6270 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6271 size, offset, host_offset ? "un" : "");
6272
6273 struct duration bench;
6274 duration_start(&bench);
6275
6276 retval = target_read_memory(target, wa->address + offset, size, count,
6277 read_buf + size + host_offset);
6278
6279 duration_measure(&bench);
6280
6281 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6282 command_print(CMD, "Unsupported alignment");
6283 goto next;
6284 } else if (retval != ERROR_OK) {
6285 command_print(CMD, "Memory read failed");
6286 goto next;
6287 }
6288
6289 /* replay on host */
6290 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6291
6292 /* check result */
6293 int result = memcmp(read_ref, read_buf, host_bufsiz);
6294 if (result == 0) {
6295 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6296 duration_elapsed(&bench),
6297 duration_kbps(&bench, count * size));
6298 } else {
6299 command_print(CMD, "Compare failed");
6300 binprint(CMD, "ref:", read_ref, host_bufsiz);
6301 binprint(CMD, "buf:", read_buf, host_bufsiz);
6302 }
6303 next:
6304 free(read_ref);
6305 free(read_buf);
6306 }
6307 }
6308 }
6309
6310 out:
6311 free(test_pattern);
6312
6313 if (wa != NULL)
6314 target_free_working_area(target, wa);
6315
6316 /* Test writes */
6317 num_bytes = test_size + 4 + 4 + 4;
6318
6319 retval = target_alloc_working_area(target, num_bytes, &wa);
6320 if (retval != ERROR_OK) {
6321 LOG_ERROR("Not enough working area");
6322 return ERROR_FAIL;
6323 }
6324
6325 test_pattern = malloc(num_bytes);
6326
6327 for (size_t i = 0; i < num_bytes; i++)
6328 test_pattern[i] = rand();
6329
6330 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6331 for (int size = 1; size <= 4; size *= 2) {
6332 for (int offset = 0; offset < 4; offset++) {
6333 uint32_t count = test_size / size;
6334 size_t host_bufsiz = count * size + host_offset;
6335 uint8_t *read_ref = malloc(num_bytes);
6336 uint8_t *read_buf = malloc(num_bytes);
6337 uint8_t *write_buf = malloc(host_bufsiz);
6338
6339 for (size_t i = 0; i < host_bufsiz; i++)
6340 write_buf[i] = rand();
6341 command_print_sameline(CMD,
6342 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6343 size, offset, host_offset ? "un" : "");
6344
6345 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6346 if (retval != ERROR_OK) {
6347 command_print(CMD, "Test pattern write failed");
6348 goto nextw;
6349 }
6350
6351 /* replay on host */
6352 memcpy(read_ref, test_pattern, num_bytes);
6353 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6354
6355 struct duration bench;
6356 duration_start(&bench);
6357
6358 retval = target_write_memory(target, wa->address + size + offset, size, count,
6359 write_buf + host_offset);
6360
6361 duration_measure(&bench);
6362
6363 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6364 command_print(CMD, "Unsupported alignment");
6365 goto nextw;
6366 } else if (retval != ERROR_OK) {
6367 command_print(CMD, "Memory write failed");
6368 goto nextw;
6369 }
6370
6371 /* read back */
6372 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6373 if (retval != ERROR_OK) {
6374 command_print(CMD, "Test pattern write failed");
6375 goto nextw;
6376 }
6377
6378 /* check result */
6379 int result = memcmp(read_ref, read_buf, num_bytes);
6380 if (result == 0) {
6381 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6382 duration_elapsed(&bench),
6383 duration_kbps(&bench, count * size));
6384 } else {
6385 command_print(CMD, "Compare failed");
6386 binprint(CMD, "ref:", read_ref, num_bytes);
6387 binprint(CMD, "buf:", read_buf, num_bytes);
6388 }
6389 nextw:
6390 free(read_ref);
6391 free(read_buf);
6392 }
6393 }
6394 }
6395
6396 free(test_pattern);
6397
6398 if (wa != NULL)
6399 target_free_working_area(target, wa);
6400 return retval;
6401 }
6402
6403 static const struct command_registration target_exec_command_handlers[] = {
6404 {
6405 .name = "fast_load_image",
6406 .handler = handle_fast_load_image_command,
6407 .mode = COMMAND_ANY,
6408 .help = "Load image into server memory for later use by "
6409 "fast_load; primarily for profiling",
6410 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6411 "[min_address [max_length]]",
6412 },
6413 {
6414 .name = "fast_load",
6415 .handler = handle_fast_load_command,
6416 .mode = COMMAND_EXEC,
6417 .help = "loads active fast load image to current target "
6418 "- mainly for profiling purposes",
6419 .usage = "",
6420 },
6421 {
6422 .name = "profile",
6423 .handler = handle_profile_command,
6424 .mode = COMMAND_EXEC,
6425 .usage = "seconds filename [start end]",
6426 .help = "profiling samples the CPU PC",
6427 },
6428 /** @todo don't register virt2phys() unless target supports it */
6429 {
6430 .name = "virt2phys",
6431 .handler = handle_virt2phys_command,
6432 .mode = COMMAND_ANY,
6433 .help = "translate a virtual address into a physical address",
6434 .usage = "virtual_address",
6435 },
6436 {
6437 .name = "reg",
6438 .handler = handle_reg_command,
6439 .mode = COMMAND_EXEC,
6440 .help = "display (reread from target with \"force\") or set a register; "
6441 "with no arguments, displays all registers and their values",
6442 .usage = "[(register_number|register_name) [(value|'force')]]",
6443 },
6444 {
6445 .name = "poll",
6446 .handler = handle_poll_command,
6447 .mode = COMMAND_EXEC,
6448 .help = "poll target state; or reconfigure background polling",
6449 .usage = "['on'|'off']",
6450 },
6451 {
6452 .name = "wait_halt",
6453 .handler = handle_wait_halt_command,
6454 .mode = COMMAND_EXEC,
6455 .help = "wait up to the specified number of milliseconds "
6456 "(default 5000) for a previously requested halt",
6457 .usage = "[milliseconds]",
6458 },
6459 {
6460 .name = "halt",
6461 .handler = handle_halt_command,
6462 .mode = COMMAND_EXEC,
6463 .help = "request target to halt, then wait up to the specified "
6464 "number of milliseconds (default 5000) for it to complete",
6465 .usage = "[milliseconds]",
6466 },
6467 {
6468 .name = "resume",
6469 .handler = handle_resume_command,
6470 .mode = COMMAND_EXEC,
6471 .help = "resume target execution from current PC or address",
6472 .usage = "[address]",
6473 },
6474 {
6475 .name = "reset",
6476 .handler = handle_reset_command,
6477 .mode = COMMAND_EXEC,
6478 .usage = "[run|halt|init]",
6479 .help = "Reset all targets into the specified mode. "
6480 "Default reset mode is run, if not given.",
6481 },
6482 {
6483 .name = "soft_reset_halt",
6484 .handler = handle_soft_reset_halt_command,
6485 .mode = COMMAND_EXEC,
6486 .usage = "",
6487 .help = "halt the target and do a soft reset",
6488 },
6489 {
6490 .name = "step",
6491 .handler = handle_step_command,
6492 .mode = COMMAND_EXEC,
6493 .help = "step one instruction from current PC or address",
6494 .usage = "[address]",
6495 },
6496 {
6497 .name = "mdd",
6498 .handler = handle_md_command,
6499 .mode = COMMAND_EXEC,
6500 .help = "display memory double-words",
6501 .usage = "['phys'] address [count]",
6502 },
6503 {
6504 .name = "mdw",
6505 .handler = handle_md_command,
6506 .mode = COMMAND_EXEC,
6507 .help = "display memory words",
6508 .usage = "['phys'] address [count]",
6509 },
6510 {
6511 .name = "mdh",
6512 .handler = handle_md_command,
6513 .mode = COMMAND_EXEC,
6514 .help = "display memory half-words",
6515 .usage = "['phys'] address [count]",
6516 },
6517 {
6518 .name = "mdb",
6519 .handler = handle_md_command,
6520 .mode = COMMAND_EXEC,
6521 .help = "display memory bytes",
6522 .usage = "['phys'] address [count]",
6523 },
6524 {
6525 .name = "mwd",
6526 .handler = handle_mw_command,
6527 .mode = COMMAND_EXEC,
6528 .help = "write memory double-word",
6529 .usage = "['phys'] address value [count]",
6530 },
6531 {
6532 .name = "mww",
6533 .handler = handle_mw_command,
6534 .mode = COMMAND_EXEC,
6535 .help = "write memory word",
6536 .usage = "['phys'] address value [count]",
6537 },
6538 {
6539 .name = "mwh",
6540 .handler = handle_mw_command,
6541 .mode = COMMAND_EXEC,
6542 .help = "write memory half-word",
6543 .usage = "['phys'] address value [count]",
6544 },
6545 {
6546 .name = "mwb",
6547 .handler = handle_mw_command,
6548 .mode = COMMAND_EXEC,
6549 .help = "write memory byte",
6550 .usage = "['phys'] address value [count]",
6551 },
6552 {
6553 .name = "bp",
6554 .handler = handle_bp_command,
6555 .mode = COMMAND_EXEC,
6556 .help = "list or set hardware or software breakpoint",
6557 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6558 },
6559 {
6560 .name = "rbp",
6561 .handler = handle_rbp_command,
6562 .mode = COMMAND_EXEC,
6563 .help = "remove breakpoint",
6564 .usage = "'all' | address",
6565 },
6566 {
6567 .name = "wp",
6568 .handler = handle_wp_command,
6569 .mode = COMMAND_EXEC,
6570 .help = "list (no params) or create watchpoints",
6571 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6572 },
6573 {
6574 .name = "rwp",
6575 .handler = handle_rwp_command,
6576 .mode = COMMAND_EXEC,
6577 .help = "remove watchpoint",
6578 .usage = "address",
6579 },
6580 {
6581 .name = "load_image",
6582 .handler = handle_load_image_command,
6583 .mode = COMMAND_EXEC,
6584 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6585 "[min_address] [max_length]",
6586 },
6587 {
6588 .name = "dump_image",
6589 .handler = handle_dump_image_command,
6590 .mode = COMMAND_EXEC,
6591 .usage = "filename address size",
6592 },
6593 {
6594 .name = "verify_image_checksum",
6595 .handler = handle_verify_image_checksum_command,
6596 .mode = COMMAND_EXEC,
6597 .usage = "filename [offset [type]]",
6598 },
6599 {
6600 .name = "verify_image",
6601 .handler = handle_verify_image_command,
6602 .mode = COMMAND_EXEC,
6603 .usage = "filename [offset [type]]",
6604 },
6605 {
6606 .name = "test_image",
6607 .handler = handle_test_image_command,
6608 .mode = COMMAND_EXEC,
6609 .usage = "filename [offset [type]]",
6610 },
6611 {
6612 .name = "mem2array",
6613 .mode = COMMAND_EXEC,
6614 .jim_handler = jim_mem2array,
6615 .help = "read 8/16/32 bit memory and return as a TCL array "
6616 "for script processing",
6617 .usage = "arrayname bitwidth address count",
6618 },
6619 {
6620 .name = "array2mem",
6621 .mode = COMMAND_EXEC,
6622 .jim_handler = jim_array2mem,
6623 .help = "convert a TCL array to memory locations "
6624 "and write the 8/16/32 bit values",
6625 .usage = "arrayname bitwidth address count",
6626 },
6627 {
6628 .name = "reset_nag",
6629 .handler = handle_target_reset_nag,
6630 .mode = COMMAND_ANY,
6631 .help = "Nag after each reset about options that could have been "
6632 "enabled to improve performance. ",
6633 .usage = "['enable'|'disable']",
6634 },
6635 {
6636 .name = "ps",
6637 .handler = handle_ps_command,
6638 .mode = COMMAND_EXEC,
6639 .help = "list all tasks ",
6640 .usage = " ",
6641 },
6642 {
6643 .name = "test_mem_access",
6644 .handler = handle_test_mem_access_command,
6645 .mode = COMMAND_EXEC,
6646 .help = "Test the target's memory access functions",
6647 .usage = "size",
6648 },
6649
6650 COMMAND_REGISTRATION_DONE
6651 };
6652 static int target_register_user_commands(struct command_context *cmd_ctx)
6653 {
6654 int retval = ERROR_OK;
6655 retval = target_request_register_commands(cmd_ctx);
6656 if (retval != ERROR_OK)
6657 return retval;
6658
6659 retval = trace_register_commands(cmd_ctx);
6660 if (retval != ERROR_OK)
6661 return retval;
6662
6663
6664 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6665 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)