openocd: fix Yoda conditions with checkpatch
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75
76 /* targets */
77 extern struct target_type arm7tdmi_target;
78 extern struct target_type arm720t_target;
79 extern struct target_type arm9tdmi_target;
80 extern struct target_type arm920t_target;
81 extern struct target_type arm966e_target;
82 extern struct target_type arm946e_target;
83 extern struct target_type arm926ejs_target;
84 extern struct target_type fa526_target;
85 extern struct target_type feroceon_target;
86 extern struct target_type dragonite_target;
87 extern struct target_type xscale_target;
88 extern struct target_type cortexm_target;
89 extern struct target_type cortexa_target;
90 extern struct target_type aarch64_target;
91 extern struct target_type cortexr4_target;
92 extern struct target_type arm11_target;
93 extern struct target_type ls1_sap_target;
94 extern struct target_type mips_m4k_target;
95 extern struct target_type mips_mips64_target;
96 extern struct target_type avr_target;
97 extern struct target_type dsp563xx_target;
98 extern struct target_type dsp5680xx_target;
99 extern struct target_type testee_target;
100 extern struct target_type avr32_ap7k_target;
101 extern struct target_type hla_target;
102 extern struct target_type nds32_v2_target;
103 extern struct target_type nds32_v3_target;
104 extern struct target_type nds32_v3m_target;
105 extern struct target_type or1k_target;
106 extern struct target_type quark_x10xx_target;
107 extern struct target_type quark_d20xx_target;
108 extern struct target_type stm8_target;
109 extern struct target_type riscv_target;
110 extern struct target_type mem_ap_target;
111 extern struct target_type esirisc_target;
112 extern struct target_type arcv2_target;
113
114 static struct target_type *target_types[] = {
115 &arm7tdmi_target,
116 &arm9tdmi_target,
117 &arm920t_target,
118 &arm720t_target,
119 &arm966e_target,
120 &arm946e_target,
121 &arm926ejs_target,
122 &fa526_target,
123 &feroceon_target,
124 &dragonite_target,
125 &xscale_target,
126 &cortexm_target,
127 &cortexa_target,
128 &cortexr4_target,
129 &arm11_target,
130 &ls1_sap_target,
131 &mips_m4k_target,
132 &avr_target,
133 &dsp563xx_target,
134 &dsp5680xx_target,
135 &testee_target,
136 &avr32_ap7k_target,
137 &hla_target,
138 &nds32_v2_target,
139 &nds32_v3_target,
140 &nds32_v3m_target,
141 &or1k_target,
142 &quark_x10xx_target,
143 &quark_d20xx_target,
144 &stm8_target,
145 &riscv_target,
146 &mem_ap_target,
147 &esirisc_target,
148 &arcv2_target,
149 &aarch64_target,
150 &mips_mips64_target,
151 NULL,
152 };
153
154 struct target *all_targets;
155 static struct target_event_callback *target_event_callbacks;
156 static struct target_timer_callback *target_timer_callbacks;
157 static LIST_HEAD(target_reset_callback_list);
158 static LIST_HEAD(target_trace_callback_list);
159 static const int polling_interval = 100;
160
161 static const struct jim_nvp nvp_assert[] = {
162 { .name = "assert", NVP_ASSERT },
163 { .name = "deassert", NVP_DEASSERT },
164 { .name = "T", NVP_ASSERT },
165 { .name = "F", NVP_DEASSERT },
166 { .name = "t", NVP_ASSERT },
167 { .name = "f", NVP_DEASSERT },
168 { .name = NULL, .value = -1 }
169 };
170
171 static const struct jim_nvp nvp_error_target[] = {
172 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
173 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
174 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
175 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
176 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
177 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
178 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
179 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
180 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
181 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
182 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
183 { .value = -1, .name = NULL }
184 };
185
186 static const char *target_strerror_safe(int err)
187 {
188 const struct jim_nvp *n;
189
190 n = jim_nvp_value2name_simple(nvp_error_target, err);
191 if (!n->name)
192 return "unknown";
193 else
194 return n->name;
195 }
196
197 static const struct jim_nvp nvp_target_event[] = {
198
199 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
200 { .value = TARGET_EVENT_HALTED, .name = "halted" },
201 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
202 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
203 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
204 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
205 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
206
207 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
208 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
209
210 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
211 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
212 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
213 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
214 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
215 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
216 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
217 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
218
219 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
220 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
221 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
222
223 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
224 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
225
226 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
227 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
228
229 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
230 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
231
232 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
233 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
234
235 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
236
237 { .name = NULL, .value = -1 }
238 };
239
240 static const struct jim_nvp nvp_target_state[] = {
241 { .name = "unknown", .value = TARGET_UNKNOWN },
242 { .name = "running", .value = TARGET_RUNNING },
243 { .name = "halted", .value = TARGET_HALTED },
244 { .name = "reset", .value = TARGET_RESET },
245 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
246 { .name = NULL, .value = -1 },
247 };
248
249 static const struct jim_nvp nvp_target_debug_reason[] = {
250 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
251 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
252 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
253 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
254 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
255 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
256 { .name = "program-exit", .value = DBG_REASON_EXIT },
257 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
258 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const struct jim_nvp nvp_target_endian[] = {
263 { .name = "big", .value = TARGET_BIG_ENDIAN },
264 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
265 { .name = "be", .value = TARGET_BIG_ENDIAN },
266 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
267 { .name = NULL, .value = -1 },
268 };
269
270 static const struct jim_nvp nvp_reset_modes[] = {
271 { .name = "unknown", .value = RESET_UNKNOWN },
272 { .name = "run", .value = RESET_RUN },
273 { .name = "halt", .value = RESET_HALT },
274 { .name = "init", .value = RESET_INIT },
275 { .name = NULL, .value = -1 },
276 };
277
278 const char *debug_reason_name(struct target *t)
279 {
280 const char *cp;
281
282 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
283 t->debug_reason)->name;
284 if (!cp) {
285 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
286 cp = "(*BUG*unknown*BUG*)";
287 }
288 return cp;
289 }
290
291 const char *target_state_name(struct target *t)
292 {
293 const char *cp;
294 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
295 if (!cp) {
296 LOG_ERROR("Invalid target state: %d", (int)(t->state));
297 cp = "(*BUG*unknown*BUG*)";
298 }
299
300 if (!target_was_examined(t) && t->defer_examine)
301 cp = "examine deferred";
302
303 return cp;
304 }
305
306 const char *target_event_name(enum target_event event)
307 {
308 const char *cp;
309 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
310 if (!cp) {
311 LOG_ERROR("Invalid target event: %d", (int)(event));
312 cp = "(*BUG*unknown*BUG*)";
313 }
314 return cp;
315 }
316
317 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
318 {
319 const char *cp;
320 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
321 if (!cp) {
322 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
323 cp = "(*BUG*unknown*BUG*)";
324 }
325 return cp;
326 }
327
328 /* determine the number of the new target */
329 static int new_target_number(void)
330 {
331 struct target *t;
332 int x;
333
334 /* number is 0 based */
335 x = -1;
336 t = all_targets;
337 while (t) {
338 if (x < t->target_number)
339 x = t->target_number;
340 t = t->next;
341 }
342 return x + 1;
343 }
344
345 static void append_to_list_all_targets(struct target *target)
346 {
347 struct target **t = &all_targets;
348
349 while (*t)
350 t = &((*t)->next);
351 *t = target;
352 }
353
354 /* read a uint64_t from a buffer in target memory endianness */
355 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
356 {
357 if (target->endianness == TARGET_LITTLE_ENDIAN)
358 return le_to_h_u64(buffer);
359 else
360 return be_to_h_u64(buffer);
361 }
362
363 /* read a uint32_t from a buffer in target memory endianness */
364 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
365 {
366 if (target->endianness == TARGET_LITTLE_ENDIAN)
367 return le_to_h_u32(buffer);
368 else
369 return be_to_h_u32(buffer);
370 }
371
372 /* read a uint24_t from a buffer in target memory endianness */
373 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 return le_to_h_u24(buffer);
377 else
378 return be_to_h_u24(buffer);
379 }
380
381 /* read a uint16_t from a buffer in target memory endianness */
382 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 return le_to_h_u16(buffer);
386 else
387 return be_to_h_u16(buffer);
388 }
389
390 /* write a uint64_t to a buffer in target memory endianness */
391 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 h_u64_to_le(buffer, value);
395 else
396 h_u64_to_be(buffer, value);
397 }
398
399 /* write a uint32_t to a buffer in target memory endianness */
400 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
401 {
402 if (target->endianness == TARGET_LITTLE_ENDIAN)
403 h_u32_to_le(buffer, value);
404 else
405 h_u32_to_be(buffer, value);
406 }
407
408 /* write a uint24_t to a buffer in target memory endianness */
409 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
410 {
411 if (target->endianness == TARGET_LITTLE_ENDIAN)
412 h_u24_to_le(buffer, value);
413 else
414 h_u24_to_be(buffer, value);
415 }
416
417 /* write a uint16_t to a buffer in target memory endianness */
418 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
419 {
420 if (target->endianness == TARGET_LITTLE_ENDIAN)
421 h_u16_to_le(buffer, value);
422 else
423 h_u16_to_be(buffer, value);
424 }
425
426 /* write a uint8_t to a buffer in target memory endianness */
427 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
428 {
429 *buffer = value;
430 }
431
432 /* write a uint64_t array to a buffer in target memory endianness */
433 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
434 {
435 uint32_t i;
436 for (i = 0; i < count; i++)
437 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
438 }
439
440 /* write a uint32_t array to a buffer in target memory endianness */
441 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
442 {
443 uint32_t i;
444 for (i = 0; i < count; i++)
445 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
446 }
447
448 /* write a uint16_t array to a buffer in target memory endianness */
449 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
450 {
451 uint32_t i;
452 for (i = 0; i < count; i++)
453 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
454 }
455
456 /* write a uint64_t array to a buffer in target memory endianness */
457 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
458 {
459 uint32_t i;
460 for (i = 0; i < count; i++)
461 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
462 }
463
464 /* write a uint32_t array to a buffer in target memory endianness */
465 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
466 {
467 uint32_t i;
468 for (i = 0; i < count; i++)
469 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
470 }
471
472 /* write a uint16_t array to a buffer in target memory endianness */
473 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
474 {
475 uint32_t i;
476 for (i = 0; i < count; i++)
477 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
478 }
479
480 /* return a pointer to a configured target; id is name or number */
481 struct target *get_target(const char *id)
482 {
483 struct target *target;
484
485 /* try as tcltarget name */
486 for (target = all_targets; target; target = target->next) {
487 if (!target_name(target))
488 continue;
489 if (strcmp(id, target_name(target)) == 0)
490 return target;
491 }
492
493 /* It's OK to remove this fallback sometime after August 2010 or so */
494
495 /* no match, try as number */
496 unsigned num;
497 if (parse_uint(id, &num) != ERROR_OK)
498 return NULL;
499
500 for (target = all_targets; target; target = target->next) {
501 if (target->target_number == (int)num) {
502 LOG_WARNING("use '%s' as target identifier, not '%u'",
503 target_name(target), num);
504 return target;
505 }
506 }
507
508 return NULL;
509 }
510
511 /* returns a pointer to the n-th configured target */
512 struct target *get_target_by_num(int num)
513 {
514 struct target *target = all_targets;
515
516 while (target) {
517 if (target->target_number == num)
518 return target;
519 target = target->next;
520 }
521
522 return NULL;
523 }
524
525 struct target *get_current_target(struct command_context *cmd_ctx)
526 {
527 struct target *target = get_current_target_or_null(cmd_ctx);
528
529 if (!target) {
530 LOG_ERROR("BUG: current_target out of bounds");
531 exit(-1);
532 }
533
534 return target;
535 }
536
537 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
538 {
539 return cmd_ctx->current_target_override
540 ? cmd_ctx->current_target_override
541 : cmd_ctx->current_target;
542 }
543
544 int target_poll(struct target *target)
545 {
546 int retval;
547
548 /* We can't poll until after examine */
549 if (!target_was_examined(target)) {
550 /* Fail silently lest we pollute the log */
551 return ERROR_FAIL;
552 }
553
554 retval = target->type->poll(target);
555 if (retval != ERROR_OK)
556 return retval;
557
558 if (target->halt_issued) {
559 if (target->state == TARGET_HALTED)
560 target->halt_issued = false;
561 else {
562 int64_t t = timeval_ms() - target->halt_issued_time;
563 if (t > DEFAULT_HALT_TIMEOUT) {
564 target->halt_issued = false;
565 LOG_INFO("Halt timed out, wake up GDB.");
566 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
567 }
568 }
569 }
570
571 return ERROR_OK;
572 }
573
574 int target_halt(struct target *target)
575 {
576 int retval;
577 /* We can't poll until after examine */
578 if (!target_was_examined(target)) {
579 LOG_ERROR("Target not examined yet");
580 return ERROR_FAIL;
581 }
582
583 retval = target->type->halt(target);
584 if (retval != ERROR_OK)
585 return retval;
586
587 target->halt_issued = true;
588 target->halt_issued_time = timeval_ms();
589
590 return ERROR_OK;
591 }
592
593 /**
594 * Make the target (re)start executing using its saved execution
595 * context (possibly with some modifications).
596 *
597 * @param target Which target should start executing.
598 * @param current True to use the target's saved program counter instead
599 * of the address parameter
600 * @param address Optionally used as the program counter.
601 * @param handle_breakpoints True iff breakpoints at the resumption PC
602 * should be skipped. (For example, maybe execution was stopped by
603 * such a breakpoint, in which case it would be counterproductive to
604 * let it re-trigger.
605 * @param debug_execution False if all working areas allocated by OpenOCD
606 * should be released and/or restored to their original contents.
607 * (This would for example be true to run some downloaded "helper"
608 * algorithm code, which resides in one such working buffer and uses
609 * another for data storage.)
610 *
611 * @todo Resolve the ambiguity about what the "debug_execution" flag
612 * signifies. For example, Target implementations don't agree on how
613 * it relates to invalidation of the register cache, or to whether
614 * breakpoints and watchpoints should be enabled. (It would seem wrong
615 * to enable breakpoints when running downloaded "helper" algorithms
616 * (debug_execution true), since the breakpoints would be set to match
617 * target firmware being debugged, not the helper algorithm.... and
618 * enabling them could cause such helpers to malfunction (for example,
619 * by overwriting data with a breakpoint instruction. On the other
620 * hand the infrastructure for running such helpers might use this
621 * procedure but rely on hardware breakpoint to detect termination.)
622 */
623 int target_resume(struct target *target, int current, target_addr_t address,
624 int handle_breakpoints, int debug_execution)
625 {
626 int retval;
627
628 /* We can't poll until after examine */
629 if (!target_was_examined(target)) {
630 LOG_ERROR("Target not examined yet");
631 return ERROR_FAIL;
632 }
633
634 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
635
636 /* note that resume *must* be asynchronous. The CPU can halt before
637 * we poll. The CPU can even halt at the current PC as a result of
638 * a software breakpoint being inserted by (a bug?) the application.
639 */
640 /*
641 * resume() triggers the event 'resumed'. The execution of TCL commands
642 * in the event handler causes the polling of targets. If the target has
643 * already halted for a breakpoint, polling will run the 'halted' event
644 * handler before the pending 'resumed' handler.
645 * Disable polling during resume() to guarantee the execution of handlers
646 * in the correct order.
647 */
648 bool save_poll = jtag_poll_get_enabled();
649 jtag_poll_set_enabled(false);
650 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
651 jtag_poll_set_enabled(save_poll);
652 if (retval != ERROR_OK)
653 return retval;
654
655 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
656
657 return retval;
658 }
659
660 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
661 {
662 char buf[100];
663 int retval;
664 struct jim_nvp *n;
665 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
666 if (!n->name) {
667 LOG_ERROR("invalid reset mode");
668 return ERROR_FAIL;
669 }
670
671 struct target *target;
672 for (target = all_targets; target; target = target->next)
673 target_call_reset_callbacks(target, reset_mode);
674
675 /* disable polling during reset to make reset event scripts
676 * more predictable, i.e. dr/irscan & pathmove in events will
677 * not have JTAG operations injected into the middle of a sequence.
678 */
679 bool save_poll = jtag_poll_get_enabled();
680
681 jtag_poll_set_enabled(false);
682
683 sprintf(buf, "ocd_process_reset %s", n->name);
684 retval = Jim_Eval(cmd->ctx->interp, buf);
685
686 jtag_poll_set_enabled(save_poll);
687
688 if (retval != JIM_OK) {
689 Jim_MakeErrorMessage(cmd->ctx->interp);
690 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
691 return ERROR_FAIL;
692 }
693
694 /* We want any events to be processed before the prompt */
695 retval = target_call_timer_callbacks_now();
696
697 for (target = all_targets; target; target = target->next) {
698 target->type->check_reset(target);
699 target->running_alg = false;
700 }
701
702 return retval;
703 }
704
705 static int identity_virt2phys(struct target *target,
706 target_addr_t virtual, target_addr_t *physical)
707 {
708 *physical = virtual;
709 return ERROR_OK;
710 }
711
712 static int no_mmu(struct target *target, int *enabled)
713 {
714 *enabled = 0;
715 return ERROR_OK;
716 }
717
718 static int default_examine(struct target *target)
719 {
720 target_set_examined(target);
721 return ERROR_OK;
722 }
723
724 /* no check by default */
725 static int default_check_reset(struct target *target)
726 {
727 return ERROR_OK;
728 }
729
730 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
731 * Keep in sync */
732 int target_examine_one(struct target *target)
733 {
734 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
735
736 int retval = target->type->examine(target);
737 if (retval != ERROR_OK) {
738 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
739 return retval;
740 }
741
742 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
743
744 return ERROR_OK;
745 }
746
747 static int jtag_enable_callback(enum jtag_event event, void *priv)
748 {
749 struct target *target = priv;
750
751 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
752 return ERROR_OK;
753
754 jtag_unregister_event_callback(jtag_enable_callback, target);
755
756 return target_examine_one(target);
757 }
758
759 /* Targets that correctly implement init + examine, i.e.
760 * no communication with target during init:
761 *
762 * XScale
763 */
764 int target_examine(void)
765 {
766 int retval = ERROR_OK;
767 struct target *target;
768
769 for (target = all_targets; target; target = target->next) {
770 /* defer examination, but don't skip it */
771 if (!target->tap->enabled) {
772 jtag_register_event_callback(jtag_enable_callback,
773 target);
774 continue;
775 }
776
777 if (target->defer_examine)
778 continue;
779
780 int retval2 = target_examine_one(target);
781 if (retval2 != ERROR_OK) {
782 LOG_WARNING("target %s examination failed", target_name(target));
783 retval = retval2;
784 }
785 }
786 return retval;
787 }
788
789 const char *target_type_name(struct target *target)
790 {
791 return target->type->name;
792 }
793
794 static int target_soft_reset_halt(struct target *target)
795 {
796 if (!target_was_examined(target)) {
797 LOG_ERROR("Target not examined yet");
798 return ERROR_FAIL;
799 }
800 if (!target->type->soft_reset_halt) {
801 LOG_ERROR("Target %s does not support soft_reset_halt",
802 target_name(target));
803 return ERROR_FAIL;
804 }
805 return target->type->soft_reset_halt(target);
806 }
807
808 /**
809 * Downloads a target-specific native code algorithm to the target,
810 * and executes it. * Note that some targets may need to set up, enable,
811 * and tear down a breakpoint (hard or * soft) to detect algorithm
812 * termination, while others may support lower overhead schemes where
813 * soft breakpoints embedded in the algorithm automatically terminate the
814 * algorithm.
815 *
816 * @param target used to run the algorithm
817 * @param num_mem_params
818 * @param mem_params
819 * @param num_reg_params
820 * @param reg_param
821 * @param entry_point
822 * @param exit_point
823 * @param timeout_ms
824 * @param arch_info target-specific description of the algorithm.
825 */
826 int target_run_algorithm(struct target *target,
827 int num_mem_params, struct mem_param *mem_params,
828 int num_reg_params, struct reg_param *reg_param,
829 uint32_t entry_point, uint32_t exit_point,
830 int timeout_ms, void *arch_info)
831 {
832 int retval = ERROR_FAIL;
833
834 if (!target_was_examined(target)) {
835 LOG_ERROR("Target not examined yet");
836 goto done;
837 }
838 if (!target->type->run_algorithm) {
839 LOG_ERROR("Target type '%s' does not support %s",
840 target_type_name(target), __func__);
841 goto done;
842 }
843
844 target->running_alg = true;
845 retval = target->type->run_algorithm(target,
846 num_mem_params, mem_params,
847 num_reg_params, reg_param,
848 entry_point, exit_point, timeout_ms, arch_info);
849 target->running_alg = false;
850
851 done:
852 return retval;
853 }
854
855 /**
856 * Executes a target-specific native code algorithm and leaves it running.
857 *
858 * @param target used to run the algorithm
859 * @param num_mem_params
860 * @param mem_params
861 * @param num_reg_params
862 * @param reg_params
863 * @param entry_point
864 * @param exit_point
865 * @param arch_info target-specific description of the algorithm.
866 */
867 int target_start_algorithm(struct target *target,
868 int num_mem_params, struct mem_param *mem_params,
869 int num_reg_params, struct reg_param *reg_params,
870 uint32_t entry_point, uint32_t exit_point,
871 void *arch_info)
872 {
873 int retval = ERROR_FAIL;
874
875 if (!target_was_examined(target)) {
876 LOG_ERROR("Target not examined yet");
877 goto done;
878 }
879 if (!target->type->start_algorithm) {
880 LOG_ERROR("Target type '%s' does not support %s",
881 target_type_name(target), __func__);
882 goto done;
883 }
884 if (target->running_alg) {
885 LOG_ERROR("Target is already running an algorithm");
886 goto done;
887 }
888
889 target->running_alg = true;
890 retval = target->type->start_algorithm(target,
891 num_mem_params, mem_params,
892 num_reg_params, reg_params,
893 entry_point, exit_point, arch_info);
894
895 done:
896 return retval;
897 }
898
899 /**
900 * Waits for an algorithm started with target_start_algorithm() to complete.
901 *
902 * @param target used to run the algorithm
903 * @param num_mem_params
904 * @param mem_params
905 * @param num_reg_params
906 * @param reg_params
907 * @param exit_point
908 * @param timeout_ms
909 * @param arch_info target-specific description of the algorithm.
910 */
911 int target_wait_algorithm(struct target *target,
912 int num_mem_params, struct mem_param *mem_params,
913 int num_reg_params, struct reg_param *reg_params,
914 uint32_t exit_point, int timeout_ms,
915 void *arch_info)
916 {
917 int retval = ERROR_FAIL;
918
919 if (!target->type->wait_algorithm) {
920 LOG_ERROR("Target type '%s' does not support %s",
921 target_type_name(target), __func__);
922 goto done;
923 }
924 if (!target->running_alg) {
925 LOG_ERROR("Target is not running an algorithm");
926 goto done;
927 }
928
929 retval = target->type->wait_algorithm(target,
930 num_mem_params, mem_params,
931 num_reg_params, reg_params,
932 exit_point, timeout_ms, arch_info);
933 if (retval != ERROR_TARGET_TIMEOUT)
934 target->running_alg = false;
935
936 done:
937 return retval;
938 }
939
940 /**
941 * Streams data to a circular buffer on target intended for consumption by code
942 * running asynchronously on target.
943 *
944 * This is intended for applications where target-specific native code runs
945 * on the target, receives data from the circular buffer, does something with
946 * it (most likely writing it to a flash memory), and advances the circular
947 * buffer pointer.
948 *
949 * This assumes that the helper algorithm has already been loaded to the target,
950 * but has not been started yet. Given memory and register parameters are passed
951 * to the algorithm.
952 *
953 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
954 * following format:
955 *
956 * [buffer_start + 0, buffer_start + 4):
957 * Write Pointer address (aka head). Written and updated by this
958 * routine when new data is written to the circular buffer.
959 * [buffer_start + 4, buffer_start + 8):
960 * Read Pointer address (aka tail). Updated by code running on the
961 * target after it consumes data.
962 * [buffer_start + 8, buffer_start + buffer_size):
963 * Circular buffer contents.
964 *
965 * See contrib/loaders/flash/stm32f1x.S for an example.
966 *
967 * @param target used to run the algorithm
968 * @param buffer address on the host where data to be sent is located
969 * @param count number of blocks to send
970 * @param block_size size in bytes of each block
971 * @param num_mem_params count of memory-based params to pass to algorithm
972 * @param mem_params memory-based params to pass to algorithm
973 * @param num_reg_params count of register-based params to pass to algorithm
974 * @param reg_params memory-based params to pass to algorithm
975 * @param buffer_start address on the target of the circular buffer structure
976 * @param buffer_size size of the circular buffer structure
977 * @param entry_point address on the target to execute to start the algorithm
978 * @param exit_point address at which to set a breakpoint to catch the
979 * end of the algorithm; can be 0 if target triggers a breakpoint itself
980 * @param arch_info
981 */
982
983 int target_run_flash_async_algorithm(struct target *target,
984 const uint8_t *buffer, uint32_t count, int block_size,
985 int num_mem_params, struct mem_param *mem_params,
986 int num_reg_params, struct reg_param *reg_params,
987 uint32_t buffer_start, uint32_t buffer_size,
988 uint32_t entry_point, uint32_t exit_point, void *arch_info)
989 {
990 int retval;
991 int timeout = 0;
992
993 const uint8_t *buffer_orig = buffer;
994
995 /* Set up working area. First word is write pointer, second word is read pointer,
996 * rest is fifo data area. */
997 uint32_t wp_addr = buffer_start;
998 uint32_t rp_addr = buffer_start + 4;
999 uint32_t fifo_start_addr = buffer_start + 8;
1000 uint32_t fifo_end_addr = buffer_start + buffer_size;
1001
1002 uint32_t wp = fifo_start_addr;
1003 uint32_t rp = fifo_start_addr;
1004
1005 /* validate block_size is 2^n */
1006 assert(!block_size || !(block_size & (block_size - 1)));
1007
1008 retval = target_write_u32(target, wp_addr, wp);
1009 if (retval != ERROR_OK)
1010 return retval;
1011 retval = target_write_u32(target, rp_addr, rp);
1012 if (retval != ERROR_OK)
1013 return retval;
1014
1015 /* Start up algorithm on target and let it idle while writing the first chunk */
1016 retval = target_start_algorithm(target, num_mem_params, mem_params,
1017 num_reg_params, reg_params,
1018 entry_point,
1019 exit_point,
1020 arch_info);
1021
1022 if (retval != ERROR_OK) {
1023 LOG_ERROR("error starting target flash write algorithm");
1024 return retval;
1025 }
1026
1027 while (count > 0) {
1028
1029 retval = target_read_u32(target, rp_addr, &rp);
1030 if (retval != ERROR_OK) {
1031 LOG_ERROR("failed to get read pointer");
1032 break;
1033 }
1034
1035 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1036 (size_t) (buffer - buffer_orig), count, wp, rp);
1037
1038 if (rp == 0) {
1039 LOG_ERROR("flash write algorithm aborted by target");
1040 retval = ERROR_FLASH_OPERATION_FAILED;
1041 break;
1042 }
1043
1044 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1045 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1046 break;
1047 }
1048
1049 /* Count the number of bytes available in the fifo without
1050 * crossing the wrap around. Make sure to not fill it completely,
1051 * because that would make wp == rp and that's the empty condition. */
1052 uint32_t thisrun_bytes;
1053 if (rp > wp)
1054 thisrun_bytes = rp - wp - block_size;
1055 else if (rp > fifo_start_addr)
1056 thisrun_bytes = fifo_end_addr - wp;
1057 else
1058 thisrun_bytes = fifo_end_addr - wp - block_size;
1059
1060 if (thisrun_bytes == 0) {
1061 /* Throttle polling a bit if transfer is (much) faster than flash
1062 * programming. The exact delay shouldn't matter as long as it's
1063 * less than buffer size / flash speed. This is very unlikely to
1064 * run when using high latency connections such as USB. */
1065 alive_sleep(2);
1066
1067 /* to stop an infinite loop on some targets check and increment a timeout
1068 * this issue was observed on a stellaris using the new ICDI interface */
1069 if (timeout++ >= 2500) {
1070 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1071 return ERROR_FLASH_OPERATION_FAILED;
1072 }
1073 continue;
1074 }
1075
1076 /* reset our timeout */
1077 timeout = 0;
1078
1079 /* Limit to the amount of data we actually want to write */
1080 if (thisrun_bytes > count * block_size)
1081 thisrun_bytes = count * block_size;
1082
1083 /* Force end of large blocks to be word aligned */
1084 if (thisrun_bytes >= 16)
1085 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1086
1087 /* Write data to fifo */
1088 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1089 if (retval != ERROR_OK)
1090 break;
1091
1092 /* Update counters and wrap write pointer */
1093 buffer += thisrun_bytes;
1094 count -= thisrun_bytes / block_size;
1095 wp += thisrun_bytes;
1096 if (wp >= fifo_end_addr)
1097 wp = fifo_start_addr;
1098
1099 /* Store updated write pointer to target */
1100 retval = target_write_u32(target, wp_addr, wp);
1101 if (retval != ERROR_OK)
1102 break;
1103
1104 /* Avoid GDB timeouts */
1105 keep_alive();
1106 }
1107
1108 if (retval != ERROR_OK) {
1109 /* abort flash write algorithm on target */
1110 target_write_u32(target, wp_addr, 0);
1111 }
1112
1113 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1114 num_reg_params, reg_params,
1115 exit_point,
1116 10000,
1117 arch_info);
1118
1119 if (retval2 != ERROR_OK) {
1120 LOG_ERROR("error waiting for target flash write algorithm");
1121 retval = retval2;
1122 }
1123
1124 if (retval == ERROR_OK) {
1125 /* check if algorithm set rp = 0 after fifo writer loop finished */
1126 retval = target_read_u32(target, rp_addr, &rp);
1127 if (retval == ERROR_OK && rp == 0) {
1128 LOG_ERROR("flash write algorithm aborted by target");
1129 retval = ERROR_FLASH_OPERATION_FAILED;
1130 }
1131 }
1132
1133 return retval;
1134 }
1135
1136 int target_run_read_async_algorithm(struct target *target,
1137 uint8_t *buffer, uint32_t count, int block_size,
1138 int num_mem_params, struct mem_param *mem_params,
1139 int num_reg_params, struct reg_param *reg_params,
1140 uint32_t buffer_start, uint32_t buffer_size,
1141 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1142 {
1143 int retval;
1144 int timeout = 0;
1145
1146 const uint8_t *buffer_orig = buffer;
1147
1148 /* Set up working area. First word is write pointer, second word is read pointer,
1149 * rest is fifo data area. */
1150 uint32_t wp_addr = buffer_start;
1151 uint32_t rp_addr = buffer_start + 4;
1152 uint32_t fifo_start_addr = buffer_start + 8;
1153 uint32_t fifo_end_addr = buffer_start + buffer_size;
1154
1155 uint32_t wp = fifo_start_addr;
1156 uint32_t rp = fifo_start_addr;
1157
1158 /* validate block_size is 2^n */
1159 assert(!block_size || !(block_size & (block_size - 1)));
1160
1161 retval = target_write_u32(target, wp_addr, wp);
1162 if (retval != ERROR_OK)
1163 return retval;
1164 retval = target_write_u32(target, rp_addr, rp);
1165 if (retval != ERROR_OK)
1166 return retval;
1167
1168 /* Start up algorithm on target */
1169 retval = target_start_algorithm(target, num_mem_params, mem_params,
1170 num_reg_params, reg_params,
1171 entry_point,
1172 exit_point,
1173 arch_info);
1174
1175 if (retval != ERROR_OK) {
1176 LOG_ERROR("error starting target flash read algorithm");
1177 return retval;
1178 }
1179
1180 while (count > 0) {
1181 retval = target_read_u32(target, wp_addr, &wp);
1182 if (retval != ERROR_OK) {
1183 LOG_ERROR("failed to get write pointer");
1184 break;
1185 }
1186
1187 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1188 (size_t)(buffer - buffer_orig), count, wp, rp);
1189
1190 if (wp == 0) {
1191 LOG_ERROR("flash read algorithm aborted by target");
1192 retval = ERROR_FLASH_OPERATION_FAILED;
1193 break;
1194 }
1195
1196 if (((wp - fifo_start_addr) & (block_size - 1)) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1197 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1198 break;
1199 }
1200
1201 /* Count the number of bytes available in the fifo without
1202 * crossing the wrap around. */
1203 uint32_t thisrun_bytes;
1204 if (wp >= rp)
1205 thisrun_bytes = wp - rp;
1206 else
1207 thisrun_bytes = fifo_end_addr - rp;
1208
1209 if (thisrun_bytes == 0) {
1210 /* Throttle polling a bit if transfer is (much) faster than flash
1211 * reading. The exact delay shouldn't matter as long as it's
1212 * less than buffer size / flash speed. This is very unlikely to
1213 * run when using high latency connections such as USB. */
1214 alive_sleep(2);
1215
1216 /* to stop an infinite loop on some targets check and increment a timeout
1217 * this issue was observed on a stellaris using the new ICDI interface */
1218 if (timeout++ >= 2500) {
1219 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1220 return ERROR_FLASH_OPERATION_FAILED;
1221 }
1222 continue;
1223 }
1224
1225 /* Reset our timeout */
1226 timeout = 0;
1227
1228 /* Limit to the amount of data we actually want to read */
1229 if (thisrun_bytes > count * block_size)
1230 thisrun_bytes = count * block_size;
1231
1232 /* Force end of large blocks to be word aligned */
1233 if (thisrun_bytes >= 16)
1234 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1235
1236 /* Read data from fifo */
1237 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1238 if (retval != ERROR_OK)
1239 break;
1240
1241 /* Update counters and wrap write pointer */
1242 buffer += thisrun_bytes;
1243 count -= thisrun_bytes / block_size;
1244 rp += thisrun_bytes;
1245 if (rp >= fifo_end_addr)
1246 rp = fifo_start_addr;
1247
1248 /* Store updated write pointer to target */
1249 retval = target_write_u32(target, rp_addr, rp);
1250 if (retval != ERROR_OK)
1251 break;
1252
1253 /* Avoid GDB timeouts */
1254 keep_alive();
1255
1256 }
1257
1258 if (retval != ERROR_OK) {
1259 /* abort flash write algorithm on target */
1260 target_write_u32(target, rp_addr, 0);
1261 }
1262
1263 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1264 num_reg_params, reg_params,
1265 exit_point,
1266 10000,
1267 arch_info);
1268
1269 if (retval2 != ERROR_OK) {
1270 LOG_ERROR("error waiting for target flash write algorithm");
1271 retval = retval2;
1272 }
1273
1274 if (retval == ERROR_OK) {
1275 /* check if algorithm set wp = 0 after fifo writer loop finished */
1276 retval = target_read_u32(target, wp_addr, &wp);
1277 if (retval == ERROR_OK && wp == 0) {
1278 LOG_ERROR("flash read algorithm aborted by target");
1279 retval = ERROR_FLASH_OPERATION_FAILED;
1280 }
1281 }
1282
1283 return retval;
1284 }
1285
1286 int target_read_memory(struct target *target,
1287 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1288 {
1289 if (!target_was_examined(target)) {
1290 LOG_ERROR("Target not examined yet");
1291 return ERROR_FAIL;
1292 }
1293 if (!target->type->read_memory) {
1294 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1295 return ERROR_FAIL;
1296 }
1297 return target->type->read_memory(target, address, size, count, buffer);
1298 }
1299
1300 int target_read_phys_memory(struct target *target,
1301 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1302 {
1303 if (!target_was_examined(target)) {
1304 LOG_ERROR("Target not examined yet");
1305 return ERROR_FAIL;
1306 }
1307 if (!target->type->read_phys_memory) {
1308 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1309 return ERROR_FAIL;
1310 }
1311 return target->type->read_phys_memory(target, address, size, count, buffer);
1312 }
1313
1314 int target_write_memory(struct target *target,
1315 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1316 {
1317 if (!target_was_examined(target)) {
1318 LOG_ERROR("Target not examined yet");
1319 return ERROR_FAIL;
1320 }
1321 if (!target->type->write_memory) {
1322 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1323 return ERROR_FAIL;
1324 }
1325 return target->type->write_memory(target, address, size, count, buffer);
1326 }
1327
1328 int target_write_phys_memory(struct target *target,
1329 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1330 {
1331 if (!target_was_examined(target)) {
1332 LOG_ERROR("Target not examined yet");
1333 return ERROR_FAIL;
1334 }
1335 if (!target->type->write_phys_memory) {
1336 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1337 return ERROR_FAIL;
1338 }
1339 return target->type->write_phys_memory(target, address, size, count, buffer);
1340 }
1341
1342 int target_add_breakpoint(struct target *target,
1343 struct breakpoint *breakpoint)
1344 {
1345 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1346 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1347 return ERROR_TARGET_NOT_HALTED;
1348 }
1349 return target->type->add_breakpoint(target, breakpoint);
1350 }
1351
1352 int target_add_context_breakpoint(struct target *target,
1353 struct breakpoint *breakpoint)
1354 {
1355 if (target->state != TARGET_HALTED) {
1356 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1357 return ERROR_TARGET_NOT_HALTED;
1358 }
1359 return target->type->add_context_breakpoint(target, breakpoint);
1360 }
1361
1362 int target_add_hybrid_breakpoint(struct target *target,
1363 struct breakpoint *breakpoint)
1364 {
1365 if (target->state != TARGET_HALTED) {
1366 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1367 return ERROR_TARGET_NOT_HALTED;
1368 }
1369 return target->type->add_hybrid_breakpoint(target, breakpoint);
1370 }
1371
1372 int target_remove_breakpoint(struct target *target,
1373 struct breakpoint *breakpoint)
1374 {
1375 return target->type->remove_breakpoint(target, breakpoint);
1376 }
1377
1378 int target_add_watchpoint(struct target *target,
1379 struct watchpoint *watchpoint)
1380 {
1381 if (target->state != TARGET_HALTED) {
1382 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1383 return ERROR_TARGET_NOT_HALTED;
1384 }
1385 return target->type->add_watchpoint(target, watchpoint);
1386 }
1387 int target_remove_watchpoint(struct target *target,
1388 struct watchpoint *watchpoint)
1389 {
1390 return target->type->remove_watchpoint(target, watchpoint);
1391 }
1392 int target_hit_watchpoint(struct target *target,
1393 struct watchpoint **hit_watchpoint)
1394 {
1395 if (target->state != TARGET_HALTED) {
1396 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1397 return ERROR_TARGET_NOT_HALTED;
1398 }
1399
1400 if (!target->type->hit_watchpoint) {
1401 /* For backward compatible, if hit_watchpoint is not implemented,
1402 * return ERROR_FAIL such that gdb_server will not take the nonsense
1403 * information. */
1404 return ERROR_FAIL;
1405 }
1406
1407 return target->type->hit_watchpoint(target, hit_watchpoint);
1408 }
1409
1410 const char *target_get_gdb_arch(struct target *target)
1411 {
1412 if (!target->type->get_gdb_arch)
1413 return NULL;
1414 return target->type->get_gdb_arch(target);
1415 }
1416
1417 int target_get_gdb_reg_list(struct target *target,
1418 struct reg **reg_list[], int *reg_list_size,
1419 enum target_register_class reg_class)
1420 {
1421 int result = ERROR_FAIL;
1422
1423 if (!target_was_examined(target)) {
1424 LOG_ERROR("Target not examined yet");
1425 goto done;
1426 }
1427
1428 result = target->type->get_gdb_reg_list(target, reg_list,
1429 reg_list_size, reg_class);
1430
1431 done:
1432 if (result != ERROR_OK) {
1433 *reg_list = NULL;
1434 *reg_list_size = 0;
1435 }
1436 return result;
1437 }
1438
1439 int target_get_gdb_reg_list_noread(struct target *target,
1440 struct reg **reg_list[], int *reg_list_size,
1441 enum target_register_class reg_class)
1442 {
1443 if (target->type->get_gdb_reg_list_noread &&
1444 target->type->get_gdb_reg_list_noread(target, reg_list,
1445 reg_list_size, reg_class) == ERROR_OK)
1446 return ERROR_OK;
1447 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1448 }
1449
1450 bool target_supports_gdb_connection(struct target *target)
1451 {
1452 /*
1453 * exclude all the targets that don't provide get_gdb_reg_list
1454 * or that have explicit gdb_max_connection == 0
1455 */
1456 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1457 }
1458
1459 int target_step(struct target *target,
1460 int current, target_addr_t address, int handle_breakpoints)
1461 {
1462 int retval;
1463
1464 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1465
1466 retval = target->type->step(target, current, address, handle_breakpoints);
1467 if (retval != ERROR_OK)
1468 return retval;
1469
1470 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1471
1472 return retval;
1473 }
1474
1475 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1476 {
1477 if (target->state != TARGET_HALTED) {
1478 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1479 return ERROR_TARGET_NOT_HALTED;
1480 }
1481 return target->type->get_gdb_fileio_info(target, fileio_info);
1482 }
1483
1484 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1485 {
1486 if (target->state != TARGET_HALTED) {
1487 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1488 return ERROR_TARGET_NOT_HALTED;
1489 }
1490 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1491 }
1492
1493 target_addr_t target_address_max(struct target *target)
1494 {
1495 unsigned bits = target_address_bits(target);
1496 if (sizeof(target_addr_t) * 8 == bits)
1497 return (target_addr_t) -1;
1498 else
1499 return (((target_addr_t) 1) << bits) - 1;
1500 }
1501
1502 unsigned target_address_bits(struct target *target)
1503 {
1504 if (target->type->address_bits)
1505 return target->type->address_bits(target);
1506 return 32;
1507 }
1508
1509 unsigned int target_data_bits(struct target *target)
1510 {
1511 if (target->type->data_bits)
1512 return target->type->data_bits(target);
1513 return 32;
1514 }
1515
1516 static int target_profiling(struct target *target, uint32_t *samples,
1517 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1518 {
1519 return target->type->profiling(target, samples, max_num_samples,
1520 num_samples, seconds);
1521 }
1522
1523 /**
1524 * Reset the @c examined flag for the given target.
1525 * Pure paranoia -- targets are zeroed on allocation.
1526 */
1527 static void target_reset_examined(struct target *target)
1528 {
1529 target->examined = false;
1530 }
1531
1532 static int handle_target(void *priv);
1533
1534 static int target_init_one(struct command_context *cmd_ctx,
1535 struct target *target)
1536 {
1537 target_reset_examined(target);
1538
1539 struct target_type *type = target->type;
1540 if (!type->examine)
1541 type->examine = default_examine;
1542
1543 if (!type->check_reset)
1544 type->check_reset = default_check_reset;
1545
1546 assert(type->init_target);
1547
1548 int retval = type->init_target(cmd_ctx, target);
1549 if (retval != ERROR_OK) {
1550 LOG_ERROR("target '%s' init failed", target_name(target));
1551 return retval;
1552 }
1553
1554 /* Sanity-check MMU support ... stub in what we must, to help
1555 * implement it in stages, but warn if we need to do so.
1556 */
1557 if (type->mmu) {
1558 if (!type->virt2phys) {
1559 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1560 type->virt2phys = identity_virt2phys;
1561 }
1562 } else {
1563 /* Make sure no-MMU targets all behave the same: make no
1564 * distinction between physical and virtual addresses, and
1565 * ensure that virt2phys() is always an identity mapping.
1566 */
1567 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1568 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1569
1570 type->mmu = no_mmu;
1571 type->write_phys_memory = type->write_memory;
1572 type->read_phys_memory = type->read_memory;
1573 type->virt2phys = identity_virt2phys;
1574 }
1575
1576 if (!target->type->read_buffer)
1577 target->type->read_buffer = target_read_buffer_default;
1578
1579 if (!target->type->write_buffer)
1580 target->type->write_buffer = target_write_buffer_default;
1581
1582 if (!target->type->get_gdb_fileio_info)
1583 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1584
1585 if (!target->type->gdb_fileio_end)
1586 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1587
1588 if (!target->type->profiling)
1589 target->type->profiling = target_profiling_default;
1590
1591 return ERROR_OK;
1592 }
1593
1594 static int target_init(struct command_context *cmd_ctx)
1595 {
1596 struct target *target;
1597 int retval;
1598
1599 for (target = all_targets; target; target = target->next) {
1600 retval = target_init_one(cmd_ctx, target);
1601 if (retval != ERROR_OK)
1602 return retval;
1603 }
1604
1605 if (!all_targets)
1606 return ERROR_OK;
1607
1608 retval = target_register_user_commands(cmd_ctx);
1609 if (retval != ERROR_OK)
1610 return retval;
1611
1612 retval = target_register_timer_callback(&handle_target,
1613 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1614 if (retval != ERROR_OK)
1615 return retval;
1616
1617 return ERROR_OK;
1618 }
1619
1620 COMMAND_HANDLER(handle_target_init_command)
1621 {
1622 int retval;
1623
1624 if (CMD_ARGC != 0)
1625 return ERROR_COMMAND_SYNTAX_ERROR;
1626
1627 static bool target_initialized;
1628 if (target_initialized) {
1629 LOG_INFO("'target init' has already been called");
1630 return ERROR_OK;
1631 }
1632 target_initialized = true;
1633
1634 retval = command_run_line(CMD_CTX, "init_targets");
1635 if (retval != ERROR_OK)
1636 return retval;
1637
1638 retval = command_run_line(CMD_CTX, "init_target_events");
1639 if (retval != ERROR_OK)
1640 return retval;
1641
1642 retval = command_run_line(CMD_CTX, "init_board");
1643 if (retval != ERROR_OK)
1644 return retval;
1645
1646 LOG_DEBUG("Initializing targets...");
1647 return target_init(CMD_CTX);
1648 }
1649
1650 int target_register_event_callback(int (*callback)(struct target *target,
1651 enum target_event event, void *priv), void *priv)
1652 {
1653 struct target_event_callback **callbacks_p = &target_event_callbacks;
1654
1655 if (!callback)
1656 return ERROR_COMMAND_SYNTAX_ERROR;
1657
1658 if (*callbacks_p) {
1659 while ((*callbacks_p)->next)
1660 callbacks_p = &((*callbacks_p)->next);
1661 callbacks_p = &((*callbacks_p)->next);
1662 }
1663
1664 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1665 (*callbacks_p)->callback = callback;
1666 (*callbacks_p)->priv = priv;
1667 (*callbacks_p)->next = NULL;
1668
1669 return ERROR_OK;
1670 }
1671
1672 int target_register_reset_callback(int (*callback)(struct target *target,
1673 enum target_reset_mode reset_mode, void *priv), void *priv)
1674 {
1675 struct target_reset_callback *entry;
1676
1677 if (!callback)
1678 return ERROR_COMMAND_SYNTAX_ERROR;
1679
1680 entry = malloc(sizeof(struct target_reset_callback));
1681 if (!entry) {
1682 LOG_ERROR("error allocating buffer for reset callback entry");
1683 return ERROR_COMMAND_SYNTAX_ERROR;
1684 }
1685
1686 entry->callback = callback;
1687 entry->priv = priv;
1688 list_add(&entry->list, &target_reset_callback_list);
1689
1690
1691 return ERROR_OK;
1692 }
1693
1694 int target_register_trace_callback(int (*callback)(struct target *target,
1695 size_t len, uint8_t *data, void *priv), void *priv)
1696 {
1697 struct target_trace_callback *entry;
1698
1699 if (!callback)
1700 return ERROR_COMMAND_SYNTAX_ERROR;
1701
1702 entry = malloc(sizeof(struct target_trace_callback));
1703 if (!entry) {
1704 LOG_ERROR("error allocating buffer for trace callback entry");
1705 return ERROR_COMMAND_SYNTAX_ERROR;
1706 }
1707
1708 entry->callback = callback;
1709 entry->priv = priv;
1710 list_add(&entry->list, &target_trace_callback_list);
1711
1712
1713 return ERROR_OK;
1714 }
1715
1716 int target_register_timer_callback(int (*callback)(void *priv),
1717 unsigned int time_ms, enum target_timer_type type, void *priv)
1718 {
1719 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1720
1721 if (!callback)
1722 return ERROR_COMMAND_SYNTAX_ERROR;
1723
1724 if (*callbacks_p) {
1725 while ((*callbacks_p)->next)
1726 callbacks_p = &((*callbacks_p)->next);
1727 callbacks_p = &((*callbacks_p)->next);
1728 }
1729
1730 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1731 (*callbacks_p)->callback = callback;
1732 (*callbacks_p)->type = type;
1733 (*callbacks_p)->time_ms = time_ms;
1734 (*callbacks_p)->removed = false;
1735
1736 gettimeofday(&(*callbacks_p)->when, NULL);
1737 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1738
1739 (*callbacks_p)->priv = priv;
1740 (*callbacks_p)->next = NULL;
1741
1742 return ERROR_OK;
1743 }
1744
1745 int target_unregister_event_callback(int (*callback)(struct target *target,
1746 enum target_event event, void *priv), void *priv)
1747 {
1748 struct target_event_callback **p = &target_event_callbacks;
1749 struct target_event_callback *c = target_event_callbacks;
1750
1751 if (!callback)
1752 return ERROR_COMMAND_SYNTAX_ERROR;
1753
1754 while (c) {
1755 struct target_event_callback *next = c->next;
1756 if ((c->callback == callback) && (c->priv == priv)) {
1757 *p = next;
1758 free(c);
1759 return ERROR_OK;
1760 } else
1761 p = &(c->next);
1762 c = next;
1763 }
1764
1765 return ERROR_OK;
1766 }
1767
1768 int target_unregister_reset_callback(int (*callback)(struct target *target,
1769 enum target_reset_mode reset_mode, void *priv), void *priv)
1770 {
1771 struct target_reset_callback *entry;
1772
1773 if (!callback)
1774 return ERROR_COMMAND_SYNTAX_ERROR;
1775
1776 list_for_each_entry(entry, &target_reset_callback_list, list) {
1777 if (entry->callback == callback && entry->priv == priv) {
1778 list_del(&entry->list);
1779 free(entry);
1780 break;
1781 }
1782 }
1783
1784 return ERROR_OK;
1785 }
1786
1787 int target_unregister_trace_callback(int (*callback)(struct target *target,
1788 size_t len, uint8_t *data, void *priv), void *priv)
1789 {
1790 struct target_trace_callback *entry;
1791
1792 if (!callback)
1793 return ERROR_COMMAND_SYNTAX_ERROR;
1794
1795 list_for_each_entry(entry, &target_trace_callback_list, list) {
1796 if (entry->callback == callback && entry->priv == priv) {
1797 list_del(&entry->list);
1798 free(entry);
1799 break;
1800 }
1801 }
1802
1803 return ERROR_OK;
1804 }
1805
1806 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1807 {
1808 if (!callback)
1809 return ERROR_COMMAND_SYNTAX_ERROR;
1810
1811 for (struct target_timer_callback *c = target_timer_callbacks;
1812 c; c = c->next) {
1813 if ((c->callback == callback) && (c->priv == priv)) {
1814 c->removed = true;
1815 return ERROR_OK;
1816 }
1817 }
1818
1819 return ERROR_FAIL;
1820 }
1821
1822 int target_call_event_callbacks(struct target *target, enum target_event event)
1823 {
1824 struct target_event_callback *callback = target_event_callbacks;
1825 struct target_event_callback *next_callback;
1826
1827 if (event == TARGET_EVENT_HALTED) {
1828 /* execute early halted first */
1829 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1830 }
1831
1832 LOG_DEBUG("target event %i (%s) for core %s", event,
1833 jim_nvp_value2name_simple(nvp_target_event, event)->name,
1834 target_name(target));
1835
1836 target_handle_event(target, event);
1837
1838 while (callback) {
1839 next_callback = callback->next;
1840 callback->callback(target, event, callback->priv);
1841 callback = next_callback;
1842 }
1843
1844 return ERROR_OK;
1845 }
1846
1847 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1848 {
1849 struct target_reset_callback *callback;
1850
1851 LOG_DEBUG("target reset %i (%s)", reset_mode,
1852 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1853
1854 list_for_each_entry(callback, &target_reset_callback_list, list)
1855 callback->callback(target, reset_mode, callback->priv);
1856
1857 return ERROR_OK;
1858 }
1859
1860 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1861 {
1862 struct target_trace_callback *callback;
1863
1864 list_for_each_entry(callback, &target_trace_callback_list, list)
1865 callback->callback(target, len, data, callback->priv);
1866
1867 return ERROR_OK;
1868 }
1869
1870 static int target_timer_callback_periodic_restart(
1871 struct target_timer_callback *cb, struct timeval *now)
1872 {
1873 cb->when = *now;
1874 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1875 return ERROR_OK;
1876 }
1877
1878 static int target_call_timer_callback(struct target_timer_callback *cb,
1879 struct timeval *now)
1880 {
1881 cb->callback(cb->priv);
1882
1883 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1884 return target_timer_callback_periodic_restart(cb, now);
1885
1886 return target_unregister_timer_callback(cb->callback, cb->priv);
1887 }
1888
1889 static int target_call_timer_callbacks_check_time(int checktime)
1890 {
1891 static bool callback_processing;
1892
1893 /* Do not allow nesting */
1894 if (callback_processing)
1895 return ERROR_OK;
1896
1897 callback_processing = true;
1898
1899 keep_alive();
1900
1901 struct timeval now;
1902 gettimeofday(&now, NULL);
1903
1904 /* Store an address of the place containing a pointer to the
1905 * next item; initially, that's a standalone "root of the
1906 * list" variable. */
1907 struct target_timer_callback **callback = &target_timer_callbacks;
1908 while (callback && *callback) {
1909 if ((*callback)->removed) {
1910 struct target_timer_callback *p = *callback;
1911 *callback = (*callback)->next;
1912 free(p);
1913 continue;
1914 }
1915
1916 bool call_it = (*callback)->callback &&
1917 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1918 timeval_compare(&now, &(*callback)->when) >= 0);
1919
1920 if (call_it)
1921 target_call_timer_callback(*callback, &now);
1922
1923 callback = &(*callback)->next;
1924 }
1925
1926 callback_processing = false;
1927 return ERROR_OK;
1928 }
1929
1930 int target_call_timer_callbacks(void)
1931 {
1932 return target_call_timer_callbacks_check_time(1);
1933 }
1934
1935 /* invoke periodic callbacks immediately */
1936 int target_call_timer_callbacks_now(void)
1937 {
1938 return target_call_timer_callbacks_check_time(0);
1939 }
1940
1941 /* Prints the working area layout for debug purposes */
1942 static void print_wa_layout(struct target *target)
1943 {
1944 struct working_area *c = target->working_areas;
1945
1946 while (c) {
1947 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1948 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1949 c->address, c->address + c->size - 1, c->size);
1950 c = c->next;
1951 }
1952 }
1953
1954 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1955 static void target_split_working_area(struct working_area *area, uint32_t size)
1956 {
1957 assert(area->free); /* Shouldn't split an allocated area */
1958 assert(size <= area->size); /* Caller should guarantee this */
1959
1960 /* Split only if not already the right size */
1961 if (size < area->size) {
1962 struct working_area *new_wa = malloc(sizeof(*new_wa));
1963
1964 if (!new_wa)
1965 return;
1966
1967 new_wa->next = area->next;
1968 new_wa->size = area->size - size;
1969 new_wa->address = area->address + size;
1970 new_wa->backup = NULL;
1971 new_wa->user = NULL;
1972 new_wa->free = true;
1973
1974 area->next = new_wa;
1975 area->size = size;
1976
1977 /* If backup memory was allocated to this area, it has the wrong size
1978 * now so free it and it will be reallocated if/when needed */
1979 free(area->backup);
1980 area->backup = NULL;
1981 }
1982 }
1983
1984 /* Merge all adjacent free areas into one */
1985 static void target_merge_working_areas(struct target *target)
1986 {
1987 struct working_area *c = target->working_areas;
1988
1989 while (c && c->next) {
1990 assert(c->next->address == c->address + c->size); /* This is an invariant */
1991
1992 /* Find two adjacent free areas */
1993 if (c->free && c->next->free) {
1994 /* Merge the last into the first */
1995 c->size += c->next->size;
1996
1997 /* Remove the last */
1998 struct working_area *to_be_freed = c->next;
1999 c->next = c->next->next;
2000 free(to_be_freed->backup);
2001 free(to_be_freed);
2002
2003 /* If backup memory was allocated to the remaining area, it's has
2004 * the wrong size now */
2005 free(c->backup);
2006 c->backup = NULL;
2007 } else {
2008 c = c->next;
2009 }
2010 }
2011 }
2012
2013 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2014 {
2015 /* Reevaluate working area address based on MMU state*/
2016 if (!target->working_areas) {
2017 int retval;
2018 int enabled;
2019
2020 retval = target->type->mmu(target, &enabled);
2021 if (retval != ERROR_OK)
2022 return retval;
2023
2024 if (!enabled) {
2025 if (target->working_area_phys_spec) {
2026 LOG_DEBUG("MMU disabled, using physical "
2027 "address for working memory " TARGET_ADDR_FMT,
2028 target->working_area_phys);
2029 target->working_area = target->working_area_phys;
2030 } else {
2031 LOG_ERROR("No working memory available. "
2032 "Specify -work-area-phys to target.");
2033 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2034 }
2035 } else {
2036 if (target->working_area_virt_spec) {
2037 LOG_DEBUG("MMU enabled, using virtual "
2038 "address for working memory " TARGET_ADDR_FMT,
2039 target->working_area_virt);
2040 target->working_area = target->working_area_virt;
2041 } else {
2042 LOG_ERROR("No working memory available. "
2043 "Specify -work-area-virt to target.");
2044 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2045 }
2046 }
2047
2048 /* Set up initial working area on first call */
2049 struct working_area *new_wa = malloc(sizeof(*new_wa));
2050 if (new_wa) {
2051 new_wa->next = NULL;
2052 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2053 new_wa->address = target->working_area;
2054 new_wa->backup = NULL;
2055 new_wa->user = NULL;
2056 new_wa->free = true;
2057 }
2058
2059 target->working_areas = new_wa;
2060 }
2061
2062 /* only allocate multiples of 4 byte */
2063 if (size % 4)
2064 size = (size + 3) & (~3UL);
2065
2066 struct working_area *c = target->working_areas;
2067
2068 /* Find the first large enough working area */
2069 while (c) {
2070 if (c->free && c->size >= size)
2071 break;
2072 c = c->next;
2073 }
2074
2075 if (!c)
2076 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2077
2078 /* Split the working area into the requested size */
2079 target_split_working_area(c, size);
2080
2081 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2082 size, c->address);
2083
2084 if (target->backup_working_area) {
2085 if (!c->backup) {
2086 c->backup = malloc(c->size);
2087 if (!c->backup)
2088 return ERROR_FAIL;
2089 }
2090
2091 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2092 if (retval != ERROR_OK)
2093 return retval;
2094 }
2095
2096 /* mark as used, and return the new (reused) area */
2097 c->free = false;
2098 *area = c;
2099
2100 /* user pointer */
2101 c->user = area;
2102
2103 print_wa_layout(target);
2104
2105 return ERROR_OK;
2106 }
2107
2108 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2109 {
2110 int retval;
2111
2112 retval = target_alloc_working_area_try(target, size, area);
2113 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2114 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2115 return retval;
2116
2117 }
2118
2119 static int target_restore_working_area(struct target *target, struct working_area *area)
2120 {
2121 int retval = ERROR_OK;
2122
2123 if (target->backup_working_area && area->backup) {
2124 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2125 if (retval != ERROR_OK)
2126 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2127 area->size, area->address);
2128 }
2129
2130 return retval;
2131 }
2132
2133 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2134 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2135 {
2136 int retval = ERROR_OK;
2137
2138 if (area->free)
2139 return retval;
2140
2141 if (restore) {
2142 retval = target_restore_working_area(target, area);
2143 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2144 if (retval != ERROR_OK)
2145 return retval;
2146 }
2147
2148 area->free = true;
2149
2150 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2151 area->size, area->address);
2152
2153 /* mark user pointer invalid */
2154 /* TODO: Is this really safe? It points to some previous caller's memory.
2155 * How could we know that the area pointer is still in that place and not
2156 * some other vital data? What's the purpose of this, anyway? */
2157 *area->user = NULL;
2158 area->user = NULL;
2159
2160 target_merge_working_areas(target);
2161
2162 print_wa_layout(target);
2163
2164 return retval;
2165 }
2166
2167 int target_free_working_area(struct target *target, struct working_area *area)
2168 {
2169 return target_free_working_area_restore(target, area, 1);
2170 }
2171
2172 /* free resources and restore memory, if restoring memory fails,
2173 * free up resources anyway
2174 */
2175 static void target_free_all_working_areas_restore(struct target *target, int restore)
2176 {
2177 struct working_area *c = target->working_areas;
2178
2179 LOG_DEBUG("freeing all working areas");
2180
2181 /* Loop through all areas, restoring the allocated ones and marking them as free */
2182 while (c) {
2183 if (!c->free) {
2184 if (restore)
2185 target_restore_working_area(target, c);
2186 c->free = true;
2187 *c->user = NULL; /* Same as above */
2188 c->user = NULL;
2189 }
2190 c = c->next;
2191 }
2192
2193 /* Run a merge pass to combine all areas into one */
2194 target_merge_working_areas(target);
2195
2196 print_wa_layout(target);
2197 }
2198
2199 void target_free_all_working_areas(struct target *target)
2200 {
2201 target_free_all_working_areas_restore(target, 1);
2202
2203 /* Now we have none or only one working area marked as free */
2204 if (target->working_areas) {
2205 /* Free the last one to allow on-the-fly moving and resizing */
2206 free(target->working_areas->backup);
2207 free(target->working_areas);
2208 target->working_areas = NULL;
2209 }
2210 }
2211
2212 /* Find the largest number of bytes that can be allocated */
2213 uint32_t target_get_working_area_avail(struct target *target)
2214 {
2215 struct working_area *c = target->working_areas;
2216 uint32_t max_size = 0;
2217
2218 if (!c)
2219 return target->working_area_size;
2220
2221 while (c) {
2222 if (c->free && max_size < c->size)
2223 max_size = c->size;
2224
2225 c = c->next;
2226 }
2227
2228 return max_size;
2229 }
2230
2231 static void target_destroy(struct target *target)
2232 {
2233 if (target->type->deinit_target)
2234 target->type->deinit_target(target);
2235
2236 free(target->semihosting);
2237
2238 jtag_unregister_event_callback(jtag_enable_callback, target);
2239
2240 struct target_event_action *teap = target->event_action;
2241 while (teap) {
2242 struct target_event_action *next = teap->next;
2243 Jim_DecrRefCount(teap->interp, teap->body);
2244 free(teap);
2245 teap = next;
2246 }
2247
2248 target_free_all_working_areas(target);
2249
2250 /* release the targets SMP list */
2251 if (target->smp) {
2252 struct target_list *head = target->head;
2253 while (head) {
2254 struct target_list *pos = head->next;
2255 head->target->smp = 0;
2256 free(head);
2257 head = pos;
2258 }
2259 target->smp = 0;
2260 }
2261
2262 rtos_destroy(target);
2263
2264 free(target->gdb_port_override);
2265 free(target->type);
2266 free(target->trace_info);
2267 free(target->fileio_info);
2268 free(target->cmd_name);
2269 free(target);
2270 }
2271
2272 void target_quit(void)
2273 {
2274 struct target_event_callback *pe = target_event_callbacks;
2275 while (pe) {
2276 struct target_event_callback *t = pe->next;
2277 free(pe);
2278 pe = t;
2279 }
2280 target_event_callbacks = NULL;
2281
2282 struct target_timer_callback *pt = target_timer_callbacks;
2283 while (pt) {
2284 struct target_timer_callback *t = pt->next;
2285 free(pt);
2286 pt = t;
2287 }
2288 target_timer_callbacks = NULL;
2289
2290 for (struct target *target = all_targets; target;) {
2291 struct target *tmp;
2292
2293 tmp = target->next;
2294 target_destroy(target);
2295 target = tmp;
2296 }
2297
2298 all_targets = NULL;
2299 }
2300
2301 int target_arch_state(struct target *target)
2302 {
2303 int retval;
2304 if (!target) {
2305 LOG_WARNING("No target has been configured");
2306 return ERROR_OK;
2307 }
2308
2309 if (target->state != TARGET_HALTED)
2310 return ERROR_OK;
2311
2312 retval = target->type->arch_state(target);
2313 return retval;
2314 }
2315
2316 static int target_get_gdb_fileio_info_default(struct target *target,
2317 struct gdb_fileio_info *fileio_info)
2318 {
2319 /* If target does not support semi-hosting function, target
2320 has no need to provide .get_gdb_fileio_info callback.
2321 It just return ERROR_FAIL and gdb_server will return "Txx"
2322 as target halted every time. */
2323 return ERROR_FAIL;
2324 }
2325
2326 static int target_gdb_fileio_end_default(struct target *target,
2327 int retcode, int fileio_errno, bool ctrl_c)
2328 {
2329 return ERROR_OK;
2330 }
2331
2332 int target_profiling_default(struct target *target, uint32_t *samples,
2333 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2334 {
2335 struct timeval timeout, now;
2336
2337 gettimeofday(&timeout, NULL);
2338 timeval_add_time(&timeout, seconds, 0);
2339
2340 LOG_INFO("Starting profiling. Halting and resuming the"
2341 " target as often as we can...");
2342
2343 uint32_t sample_count = 0;
2344 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2345 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2346
2347 int retval = ERROR_OK;
2348 for (;;) {
2349 target_poll(target);
2350 if (target->state == TARGET_HALTED) {
2351 uint32_t t = buf_get_u32(reg->value, 0, 32);
2352 samples[sample_count++] = t;
2353 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2354 retval = target_resume(target, 1, 0, 0, 0);
2355 target_poll(target);
2356 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2357 } else if (target->state == TARGET_RUNNING) {
2358 /* We want to quickly sample the PC. */
2359 retval = target_halt(target);
2360 } else {
2361 LOG_INFO("Target not halted or running");
2362 retval = ERROR_OK;
2363 break;
2364 }
2365
2366 if (retval != ERROR_OK)
2367 break;
2368
2369 gettimeofday(&now, NULL);
2370 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2371 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2372 break;
2373 }
2374 }
2375
2376 *num_samples = sample_count;
2377 return retval;
2378 }
2379
2380 /* Single aligned words are guaranteed to use 16 or 32 bit access
2381 * mode respectively, otherwise data is handled as quickly as
2382 * possible
2383 */
2384 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2385 {
2386 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2387 size, address);
2388
2389 if (!target_was_examined(target)) {
2390 LOG_ERROR("Target not examined yet");
2391 return ERROR_FAIL;
2392 }
2393
2394 if (size == 0)
2395 return ERROR_OK;
2396
2397 if ((address + size - 1) < address) {
2398 /* GDB can request this when e.g. PC is 0xfffffffc */
2399 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2400 address,
2401 size);
2402 return ERROR_FAIL;
2403 }
2404
2405 return target->type->write_buffer(target, address, size, buffer);
2406 }
2407
2408 static int target_write_buffer_default(struct target *target,
2409 target_addr_t address, uint32_t count, const uint8_t *buffer)
2410 {
2411 uint32_t size;
2412 unsigned int data_bytes = target_data_bits(target) / 8;
2413
2414 /* Align up to maximum bytes. The loop condition makes sure the next pass
2415 * will have something to do with the size we leave to it. */
2416 for (size = 1;
2417 size < data_bytes && count >= size * 2 + (address & size);
2418 size *= 2) {
2419 if (address & size) {
2420 int retval = target_write_memory(target, address, size, 1, buffer);
2421 if (retval != ERROR_OK)
2422 return retval;
2423 address += size;
2424 count -= size;
2425 buffer += size;
2426 }
2427 }
2428
2429 /* Write the data with as large access size as possible. */
2430 for (; size > 0; size /= 2) {
2431 uint32_t aligned = count - count % size;
2432 if (aligned > 0) {
2433 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2434 if (retval != ERROR_OK)
2435 return retval;
2436 address += aligned;
2437 count -= aligned;
2438 buffer += aligned;
2439 }
2440 }
2441
2442 return ERROR_OK;
2443 }
2444
2445 /* Single aligned words are guaranteed to use 16 or 32 bit access
2446 * mode respectively, otherwise data is handled as quickly as
2447 * possible
2448 */
2449 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2450 {
2451 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2452 size, address);
2453
2454 if (!target_was_examined(target)) {
2455 LOG_ERROR("Target not examined yet");
2456 return ERROR_FAIL;
2457 }
2458
2459 if (size == 0)
2460 return ERROR_OK;
2461
2462 if ((address + size - 1) < address) {
2463 /* GDB can request this when e.g. PC is 0xfffffffc */
2464 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2465 address,
2466 size);
2467 return ERROR_FAIL;
2468 }
2469
2470 return target->type->read_buffer(target, address, size, buffer);
2471 }
2472
2473 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2474 {
2475 uint32_t size;
2476 unsigned int data_bytes = target_data_bits(target) / 8;
2477
2478 /* Align up to maximum bytes. The loop condition makes sure the next pass
2479 * will have something to do with the size we leave to it. */
2480 for (size = 1;
2481 size < data_bytes && count >= size * 2 + (address & size);
2482 size *= 2) {
2483 if (address & size) {
2484 int retval = target_read_memory(target, address, size, 1, buffer);
2485 if (retval != ERROR_OK)
2486 return retval;
2487 address += size;
2488 count -= size;
2489 buffer += size;
2490 }
2491 }
2492
2493 /* Read the data with as large access size as possible. */
2494 for (; size > 0; size /= 2) {
2495 uint32_t aligned = count - count % size;
2496 if (aligned > 0) {
2497 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2498 if (retval != ERROR_OK)
2499 return retval;
2500 address += aligned;
2501 count -= aligned;
2502 buffer += aligned;
2503 }
2504 }
2505
2506 return ERROR_OK;
2507 }
2508
2509 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2510 {
2511 uint8_t *buffer;
2512 int retval;
2513 uint32_t i;
2514 uint32_t checksum = 0;
2515 if (!target_was_examined(target)) {
2516 LOG_ERROR("Target not examined yet");
2517 return ERROR_FAIL;
2518 }
2519
2520 retval = target->type->checksum_memory(target, address, size, &checksum);
2521 if (retval != ERROR_OK) {
2522 buffer = malloc(size);
2523 if (!buffer) {
2524 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2525 return ERROR_COMMAND_SYNTAX_ERROR;
2526 }
2527 retval = target_read_buffer(target, address, size, buffer);
2528 if (retval != ERROR_OK) {
2529 free(buffer);
2530 return retval;
2531 }
2532
2533 /* convert to target endianness */
2534 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2535 uint32_t target_data;
2536 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2537 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2538 }
2539
2540 retval = image_calculate_checksum(buffer, size, &checksum);
2541 free(buffer);
2542 }
2543
2544 *crc = checksum;
2545
2546 return retval;
2547 }
2548
2549 int target_blank_check_memory(struct target *target,
2550 struct target_memory_check_block *blocks, int num_blocks,
2551 uint8_t erased_value)
2552 {
2553 if (!target_was_examined(target)) {
2554 LOG_ERROR("Target not examined yet");
2555 return ERROR_FAIL;
2556 }
2557
2558 if (!target->type->blank_check_memory)
2559 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2560
2561 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2562 }
2563
2564 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2565 {
2566 uint8_t value_buf[8];
2567 if (!target_was_examined(target)) {
2568 LOG_ERROR("Target not examined yet");
2569 return ERROR_FAIL;
2570 }
2571
2572 int retval = target_read_memory(target, address, 8, 1, value_buf);
2573
2574 if (retval == ERROR_OK) {
2575 *value = target_buffer_get_u64(target, value_buf);
2576 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2577 address,
2578 *value);
2579 } else {
2580 *value = 0x0;
2581 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2582 address);
2583 }
2584
2585 return retval;
2586 }
2587
2588 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2589 {
2590 uint8_t value_buf[4];
2591 if (!target_was_examined(target)) {
2592 LOG_ERROR("Target not examined yet");
2593 return ERROR_FAIL;
2594 }
2595
2596 int retval = target_read_memory(target, address, 4, 1, value_buf);
2597
2598 if (retval == ERROR_OK) {
2599 *value = target_buffer_get_u32(target, value_buf);
2600 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2601 address,
2602 *value);
2603 } else {
2604 *value = 0x0;
2605 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2606 address);
2607 }
2608
2609 return retval;
2610 }
2611
2612 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2613 {
2614 uint8_t value_buf[2];
2615 if (!target_was_examined(target)) {
2616 LOG_ERROR("Target not examined yet");
2617 return ERROR_FAIL;
2618 }
2619
2620 int retval = target_read_memory(target, address, 2, 1, value_buf);
2621
2622 if (retval == ERROR_OK) {
2623 *value = target_buffer_get_u16(target, value_buf);
2624 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2625 address,
2626 *value);
2627 } else {
2628 *value = 0x0;
2629 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2630 address);
2631 }
2632
2633 return retval;
2634 }
2635
2636 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2637 {
2638 if (!target_was_examined(target)) {
2639 LOG_ERROR("Target not examined yet");
2640 return ERROR_FAIL;
2641 }
2642
2643 int retval = target_read_memory(target, address, 1, 1, value);
2644
2645 if (retval == ERROR_OK) {
2646 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2647 address,
2648 *value);
2649 } else {
2650 *value = 0x0;
2651 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2652 address);
2653 }
2654
2655 return retval;
2656 }
2657
2658 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2659 {
2660 int retval;
2661 uint8_t value_buf[8];
2662 if (!target_was_examined(target)) {
2663 LOG_ERROR("Target not examined yet");
2664 return ERROR_FAIL;
2665 }
2666
2667 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2668 address,
2669 value);
2670
2671 target_buffer_set_u64(target, value_buf, value);
2672 retval = target_write_memory(target, address, 8, 1, value_buf);
2673 if (retval != ERROR_OK)
2674 LOG_DEBUG("failed: %i", retval);
2675
2676 return retval;
2677 }
2678
2679 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2680 {
2681 int retval;
2682 uint8_t value_buf[4];
2683 if (!target_was_examined(target)) {
2684 LOG_ERROR("Target not examined yet");
2685 return ERROR_FAIL;
2686 }
2687
2688 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2689 address,
2690 value);
2691
2692 target_buffer_set_u32(target, value_buf, value);
2693 retval = target_write_memory(target, address, 4, 1, value_buf);
2694 if (retval != ERROR_OK)
2695 LOG_DEBUG("failed: %i", retval);
2696
2697 return retval;
2698 }
2699
2700 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2701 {
2702 int retval;
2703 uint8_t value_buf[2];
2704 if (!target_was_examined(target)) {
2705 LOG_ERROR("Target not examined yet");
2706 return ERROR_FAIL;
2707 }
2708
2709 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2710 address,
2711 value);
2712
2713 target_buffer_set_u16(target, value_buf, value);
2714 retval = target_write_memory(target, address, 2, 1, value_buf);
2715 if (retval != ERROR_OK)
2716 LOG_DEBUG("failed: %i", retval);
2717
2718 return retval;
2719 }
2720
2721 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2722 {
2723 int retval;
2724 if (!target_was_examined(target)) {
2725 LOG_ERROR("Target not examined yet");
2726 return ERROR_FAIL;
2727 }
2728
2729 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2730 address, value);
2731
2732 retval = target_write_memory(target, address, 1, 1, &value);
2733 if (retval != ERROR_OK)
2734 LOG_DEBUG("failed: %i", retval);
2735
2736 return retval;
2737 }
2738
2739 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2740 {
2741 int retval;
2742 uint8_t value_buf[8];
2743 if (!target_was_examined(target)) {
2744 LOG_ERROR("Target not examined yet");
2745 return ERROR_FAIL;
2746 }
2747
2748 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2749 address,
2750 value);
2751
2752 target_buffer_set_u64(target, value_buf, value);
2753 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2754 if (retval != ERROR_OK)
2755 LOG_DEBUG("failed: %i", retval);
2756
2757 return retval;
2758 }
2759
2760 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2761 {
2762 int retval;
2763 uint8_t value_buf[4];
2764 if (!target_was_examined(target)) {
2765 LOG_ERROR("Target not examined yet");
2766 return ERROR_FAIL;
2767 }
2768
2769 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2770 address,
2771 value);
2772
2773 target_buffer_set_u32(target, value_buf, value);
2774 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2775 if (retval != ERROR_OK)
2776 LOG_DEBUG("failed: %i", retval);
2777
2778 return retval;
2779 }
2780
2781 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2782 {
2783 int retval;
2784 uint8_t value_buf[2];
2785 if (!target_was_examined(target)) {
2786 LOG_ERROR("Target not examined yet");
2787 return ERROR_FAIL;
2788 }
2789
2790 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2791 address,
2792 value);
2793
2794 target_buffer_set_u16(target, value_buf, value);
2795 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2796 if (retval != ERROR_OK)
2797 LOG_DEBUG("failed: %i", retval);
2798
2799 return retval;
2800 }
2801
2802 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2803 {
2804 int retval;
2805 if (!target_was_examined(target)) {
2806 LOG_ERROR("Target not examined yet");
2807 return ERROR_FAIL;
2808 }
2809
2810 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2811 address, value);
2812
2813 retval = target_write_phys_memory(target, address, 1, 1, &value);
2814 if (retval != ERROR_OK)
2815 LOG_DEBUG("failed: %i", retval);
2816
2817 return retval;
2818 }
2819
2820 static int find_target(struct command_invocation *cmd, const char *name)
2821 {
2822 struct target *target = get_target(name);
2823 if (!target) {
2824 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2825 return ERROR_FAIL;
2826 }
2827 if (!target->tap->enabled) {
2828 command_print(cmd, "Target: TAP %s is disabled, "
2829 "can't be the current target\n",
2830 target->tap->dotted_name);
2831 return ERROR_FAIL;
2832 }
2833
2834 cmd->ctx->current_target = target;
2835 if (cmd->ctx->current_target_override)
2836 cmd->ctx->current_target_override = target;
2837
2838 return ERROR_OK;
2839 }
2840
2841
2842 COMMAND_HANDLER(handle_targets_command)
2843 {
2844 int retval = ERROR_OK;
2845 if (CMD_ARGC == 1) {
2846 retval = find_target(CMD, CMD_ARGV[0]);
2847 if (retval == ERROR_OK) {
2848 /* we're done! */
2849 return retval;
2850 }
2851 }
2852
2853 struct target *target = all_targets;
2854 command_print(CMD, " TargetName Type Endian TapName State ");
2855 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2856 while (target) {
2857 const char *state;
2858 char marker = ' ';
2859
2860 if (target->tap->enabled)
2861 state = target_state_name(target);
2862 else
2863 state = "tap-disabled";
2864
2865 if (CMD_CTX->current_target == target)
2866 marker = '*';
2867
2868 /* keep columns lined up to match the headers above */
2869 command_print(CMD,
2870 "%2d%c %-18s %-10s %-6s %-18s %s",
2871 target->target_number,
2872 marker,
2873 target_name(target),
2874 target_type_name(target),
2875 jim_nvp_value2name_simple(nvp_target_endian,
2876 target->endianness)->name,
2877 target->tap->dotted_name,
2878 state);
2879 target = target->next;
2880 }
2881
2882 return retval;
2883 }
2884
2885 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2886
2887 static int power_dropout;
2888 static int srst_asserted;
2889
2890 static int run_power_restore;
2891 static int run_power_dropout;
2892 static int run_srst_asserted;
2893 static int run_srst_deasserted;
2894
2895 static int sense_handler(void)
2896 {
2897 static int prev_srst_asserted;
2898 static int prev_power_dropout;
2899
2900 int retval = jtag_power_dropout(&power_dropout);
2901 if (retval != ERROR_OK)
2902 return retval;
2903
2904 int power_restored;
2905 power_restored = prev_power_dropout && !power_dropout;
2906 if (power_restored)
2907 run_power_restore = 1;
2908
2909 int64_t current = timeval_ms();
2910 static int64_t last_power;
2911 bool wait_more = last_power + 2000 > current;
2912 if (power_dropout && !wait_more) {
2913 run_power_dropout = 1;
2914 last_power = current;
2915 }
2916
2917 retval = jtag_srst_asserted(&srst_asserted);
2918 if (retval != ERROR_OK)
2919 return retval;
2920
2921 int srst_deasserted;
2922 srst_deasserted = prev_srst_asserted && !srst_asserted;
2923
2924 static int64_t last_srst;
2925 wait_more = last_srst + 2000 > current;
2926 if (srst_deasserted && !wait_more) {
2927 run_srst_deasserted = 1;
2928 last_srst = current;
2929 }
2930
2931 if (!prev_srst_asserted && srst_asserted)
2932 run_srst_asserted = 1;
2933
2934 prev_srst_asserted = srst_asserted;
2935 prev_power_dropout = power_dropout;
2936
2937 if (srst_deasserted || power_restored) {
2938 /* Other than logging the event we can't do anything here.
2939 * Issuing a reset is a particularly bad idea as we might
2940 * be inside a reset already.
2941 */
2942 }
2943
2944 return ERROR_OK;
2945 }
2946
2947 /* process target state changes */
2948 static int handle_target(void *priv)
2949 {
2950 Jim_Interp *interp = (Jim_Interp *)priv;
2951 int retval = ERROR_OK;
2952
2953 if (!is_jtag_poll_safe()) {
2954 /* polling is disabled currently */
2955 return ERROR_OK;
2956 }
2957
2958 /* we do not want to recurse here... */
2959 static int recursive;
2960 if (!recursive) {
2961 recursive = 1;
2962 sense_handler();
2963 /* danger! running these procedures can trigger srst assertions and power dropouts.
2964 * We need to avoid an infinite loop/recursion here and we do that by
2965 * clearing the flags after running these events.
2966 */
2967 int did_something = 0;
2968 if (run_srst_asserted) {
2969 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2970 Jim_Eval(interp, "srst_asserted");
2971 did_something = 1;
2972 }
2973 if (run_srst_deasserted) {
2974 Jim_Eval(interp, "srst_deasserted");
2975 did_something = 1;
2976 }
2977 if (run_power_dropout) {
2978 LOG_INFO("Power dropout detected, running power_dropout proc.");
2979 Jim_Eval(interp, "power_dropout");
2980 did_something = 1;
2981 }
2982 if (run_power_restore) {
2983 Jim_Eval(interp, "power_restore");
2984 did_something = 1;
2985 }
2986
2987 if (did_something) {
2988 /* clear detect flags */
2989 sense_handler();
2990 }
2991
2992 /* clear action flags */
2993
2994 run_srst_asserted = 0;
2995 run_srst_deasserted = 0;
2996 run_power_restore = 0;
2997 run_power_dropout = 0;
2998
2999 recursive = 0;
3000 }
3001
3002 /* Poll targets for state changes unless that's globally disabled.
3003 * Skip targets that are currently disabled.
3004 */
3005 for (struct target *target = all_targets;
3006 is_jtag_poll_safe() && target;
3007 target = target->next) {
3008
3009 if (!target_was_examined(target))
3010 continue;
3011
3012 if (!target->tap->enabled)
3013 continue;
3014
3015 if (target->backoff.times > target->backoff.count) {
3016 /* do not poll this time as we failed previously */
3017 target->backoff.count++;
3018 continue;
3019 }
3020 target->backoff.count = 0;
3021
3022 /* only poll target if we've got power and srst isn't asserted */
3023 if (!power_dropout && !srst_asserted) {
3024 /* polling may fail silently until the target has been examined */
3025 retval = target_poll(target);
3026 if (retval != ERROR_OK) {
3027 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3028 if (target->backoff.times * polling_interval < 5000) {
3029 target->backoff.times *= 2;
3030 target->backoff.times++;
3031 }
3032
3033 /* Tell GDB to halt the debugger. This allows the user to
3034 * run monitor commands to handle the situation.
3035 */
3036 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3037 }
3038 if (target->backoff.times > 0) {
3039 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3040 target_reset_examined(target);
3041 retval = target_examine_one(target);
3042 /* Target examination could have failed due to unstable connection,
3043 * but we set the examined flag anyway to repoll it later */
3044 if (retval != ERROR_OK) {
3045 target->examined = true;
3046 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3047 target->backoff.times * polling_interval);
3048 return retval;
3049 }
3050 }
3051
3052 /* Since we succeeded, we reset backoff count */
3053 target->backoff.times = 0;
3054 }
3055 }
3056
3057 return retval;
3058 }
3059
3060 COMMAND_HANDLER(handle_reg_command)
3061 {
3062 LOG_DEBUG("-");
3063
3064 struct target *target = get_current_target(CMD_CTX);
3065 struct reg *reg = NULL;
3066
3067 /* list all available registers for the current target */
3068 if (CMD_ARGC == 0) {
3069 struct reg_cache *cache = target->reg_cache;
3070
3071 unsigned int count = 0;
3072 while (cache) {
3073 unsigned i;
3074
3075 command_print(CMD, "===== %s", cache->name);
3076
3077 for (i = 0, reg = cache->reg_list;
3078 i < cache->num_regs;
3079 i++, reg++, count++) {
3080 if (reg->exist == false || reg->hidden)
3081 continue;
3082 /* only print cached values if they are valid */
3083 if (reg->valid) {
3084 char *value = buf_to_hex_str(reg->value,
3085 reg->size);
3086 command_print(CMD,
3087 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3088 count, reg->name,
3089 reg->size, value,
3090 reg->dirty
3091 ? " (dirty)"
3092 : "");
3093 free(value);
3094 } else {
3095 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3096 count, reg->name,
3097 reg->size);
3098 }
3099 }
3100 cache = cache->next;
3101 }
3102
3103 return ERROR_OK;
3104 }
3105
3106 /* access a single register by its ordinal number */
3107 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3108 unsigned num;
3109 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3110
3111 struct reg_cache *cache = target->reg_cache;
3112 unsigned int count = 0;
3113 while (cache) {
3114 unsigned i;
3115 for (i = 0; i < cache->num_regs; i++) {
3116 if (count++ == num) {
3117 reg = &cache->reg_list[i];
3118 break;
3119 }
3120 }
3121 if (reg)
3122 break;
3123 cache = cache->next;
3124 }
3125
3126 if (!reg) {
3127 command_print(CMD, "%i is out of bounds, the current target "
3128 "has only %i registers (0 - %i)", num, count, count - 1);
3129 return ERROR_OK;
3130 }
3131 } else {
3132 /* access a single register by its name */
3133 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3134
3135 if (!reg)
3136 goto not_found;
3137 }
3138
3139 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3140
3141 if (!reg->exist)
3142 goto not_found;
3143
3144 /* display a register */
3145 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3146 && (CMD_ARGV[1][0] <= '9')))) {
3147 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3148 reg->valid = 0;
3149
3150 if (reg->valid == 0) {
3151 int retval = reg->type->get(reg);
3152 if (retval != ERROR_OK) {
3153 LOG_ERROR("Could not read register '%s'", reg->name);
3154 return retval;
3155 }
3156 }
3157 char *value = buf_to_hex_str(reg->value, reg->size);
3158 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3159 free(value);
3160 return ERROR_OK;
3161 }
3162
3163 /* set register value */
3164 if (CMD_ARGC == 2) {
3165 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3166 if (!buf)
3167 return ERROR_FAIL;
3168 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3169
3170 int retval = reg->type->set(reg, buf);
3171 if (retval != ERROR_OK) {
3172 LOG_ERROR("Could not write to register '%s'", reg->name);
3173 } else {
3174 char *value = buf_to_hex_str(reg->value, reg->size);
3175 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3176 free(value);
3177 }
3178
3179 free(buf);
3180
3181 return retval;
3182 }
3183
3184 return ERROR_COMMAND_SYNTAX_ERROR;
3185
3186 not_found:
3187 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3188 return ERROR_OK;
3189 }
3190
3191 COMMAND_HANDLER(handle_poll_command)
3192 {
3193 int retval = ERROR_OK;
3194 struct target *target = get_current_target(CMD_CTX);
3195
3196 if (CMD_ARGC == 0) {
3197 command_print(CMD, "background polling: %s",
3198 jtag_poll_get_enabled() ? "on" : "off");
3199 command_print(CMD, "TAP: %s (%s)",
3200 target->tap->dotted_name,
3201 target->tap->enabled ? "enabled" : "disabled");
3202 if (!target->tap->enabled)
3203 return ERROR_OK;
3204 retval = target_poll(target);
3205 if (retval != ERROR_OK)
3206 return retval;
3207 retval = target_arch_state(target);
3208 if (retval != ERROR_OK)
3209 return retval;
3210 } else if (CMD_ARGC == 1) {
3211 bool enable;
3212 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3213 jtag_poll_set_enabled(enable);
3214 } else
3215 return ERROR_COMMAND_SYNTAX_ERROR;
3216
3217 return retval;
3218 }
3219
3220 COMMAND_HANDLER(handle_wait_halt_command)
3221 {
3222 if (CMD_ARGC > 1)
3223 return ERROR_COMMAND_SYNTAX_ERROR;
3224
3225 unsigned ms = DEFAULT_HALT_TIMEOUT;
3226 if (1 == CMD_ARGC) {
3227 int retval = parse_uint(CMD_ARGV[0], &ms);
3228 if (retval != ERROR_OK)
3229 return ERROR_COMMAND_SYNTAX_ERROR;
3230 }
3231
3232 struct target *target = get_current_target(CMD_CTX);
3233 return target_wait_state(target, TARGET_HALTED, ms);
3234 }
3235
3236 /* wait for target state to change. The trick here is to have a low
3237 * latency for short waits and not to suck up all the CPU time
3238 * on longer waits.
3239 *
3240 * After 500ms, keep_alive() is invoked
3241 */
3242 int target_wait_state(struct target *target, enum target_state state, int ms)
3243 {
3244 int retval;
3245 int64_t then = 0, cur;
3246 bool once = true;
3247
3248 for (;;) {
3249 retval = target_poll(target);
3250 if (retval != ERROR_OK)
3251 return retval;
3252 if (target->state == state)
3253 break;
3254 cur = timeval_ms();
3255 if (once) {
3256 once = false;
3257 then = timeval_ms();
3258 LOG_DEBUG("waiting for target %s...",
3259 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3260 }
3261
3262 if (cur-then > 500)
3263 keep_alive();
3264
3265 if ((cur-then) > ms) {
3266 LOG_ERROR("timed out while waiting for target %s",
3267 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3268 return ERROR_FAIL;
3269 }
3270 }
3271
3272 return ERROR_OK;
3273 }
3274
3275 COMMAND_HANDLER(handle_halt_command)
3276 {
3277 LOG_DEBUG("-");
3278
3279 struct target *target = get_current_target(CMD_CTX);
3280
3281 target->verbose_halt_msg = true;
3282
3283 int retval = target_halt(target);
3284 if (retval != ERROR_OK)
3285 return retval;
3286
3287 if (CMD_ARGC == 1) {
3288 unsigned wait_local;
3289 retval = parse_uint(CMD_ARGV[0], &wait_local);
3290 if (retval != ERROR_OK)
3291 return ERROR_COMMAND_SYNTAX_ERROR;
3292 if (!wait_local)
3293 return ERROR_OK;
3294 }
3295
3296 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3297 }
3298
3299 COMMAND_HANDLER(handle_soft_reset_halt_command)
3300 {
3301 struct target *target = get_current_target(CMD_CTX);
3302
3303 LOG_USER("requesting target halt and executing a soft reset");
3304
3305 target_soft_reset_halt(target);
3306
3307 return ERROR_OK;
3308 }
3309
3310 COMMAND_HANDLER(handle_reset_command)
3311 {
3312 if (CMD_ARGC > 1)
3313 return ERROR_COMMAND_SYNTAX_ERROR;
3314
3315 enum target_reset_mode reset_mode = RESET_RUN;
3316 if (CMD_ARGC == 1) {
3317 const struct jim_nvp *n;
3318 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3319 if ((!n->name) || (n->value == RESET_UNKNOWN))
3320 return ERROR_COMMAND_SYNTAX_ERROR;
3321 reset_mode = n->value;
3322 }
3323
3324 /* reset *all* targets */
3325 return target_process_reset(CMD, reset_mode);
3326 }
3327
3328
3329 COMMAND_HANDLER(handle_resume_command)
3330 {
3331 int current = 1;
3332 if (CMD_ARGC > 1)
3333 return ERROR_COMMAND_SYNTAX_ERROR;
3334
3335 struct target *target = get_current_target(CMD_CTX);
3336
3337 /* with no CMD_ARGV, resume from current pc, addr = 0,
3338 * with one arguments, addr = CMD_ARGV[0],
3339 * handle breakpoints, not debugging */
3340 target_addr_t addr = 0;
3341 if (CMD_ARGC == 1) {
3342 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3343 current = 0;
3344 }
3345
3346 return target_resume(target, current, addr, 1, 0);
3347 }
3348
3349 COMMAND_HANDLER(handle_step_command)
3350 {
3351 if (CMD_ARGC > 1)
3352 return ERROR_COMMAND_SYNTAX_ERROR;
3353
3354 LOG_DEBUG("-");
3355
3356 /* with no CMD_ARGV, step from current pc, addr = 0,
3357 * with one argument addr = CMD_ARGV[0],
3358 * handle breakpoints, debugging */
3359 target_addr_t addr = 0;
3360 int current_pc = 1;
3361 if (CMD_ARGC == 1) {
3362 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3363 current_pc = 0;
3364 }
3365
3366 struct target *target = get_current_target(CMD_CTX);
3367
3368 return target_step(target, current_pc, addr, 1);
3369 }
3370
3371 void target_handle_md_output(struct command_invocation *cmd,
3372 struct target *target, target_addr_t address, unsigned size,
3373 unsigned count, const uint8_t *buffer)
3374 {
3375 const unsigned line_bytecnt = 32;
3376 unsigned line_modulo = line_bytecnt / size;
3377
3378 char output[line_bytecnt * 4 + 1];
3379 unsigned output_len = 0;
3380
3381 const char *value_fmt;
3382 switch (size) {
3383 case 8:
3384 value_fmt = "%16.16"PRIx64" ";
3385 break;
3386 case 4:
3387 value_fmt = "%8.8"PRIx64" ";
3388 break;
3389 case 2:
3390 value_fmt = "%4.4"PRIx64" ";
3391 break;
3392 case 1:
3393 value_fmt = "%2.2"PRIx64" ";
3394 break;
3395 default:
3396 /* "can't happen", caller checked */
3397 LOG_ERROR("invalid memory read size: %u", size);
3398 return;
3399 }
3400
3401 for (unsigned i = 0; i < count; i++) {
3402 if (i % line_modulo == 0) {
3403 output_len += snprintf(output + output_len,
3404 sizeof(output) - output_len,
3405 TARGET_ADDR_FMT ": ",
3406 (address + (i * size)));
3407 }
3408
3409 uint64_t value = 0;
3410 const uint8_t *value_ptr = buffer + i * size;
3411 switch (size) {
3412 case 8:
3413 value = target_buffer_get_u64(target, value_ptr);
3414 break;
3415 case 4:
3416 value = target_buffer_get_u32(target, value_ptr);
3417 break;
3418 case 2:
3419 value = target_buffer_get_u16(target, value_ptr);
3420 break;
3421 case 1:
3422 value = *value_ptr;
3423 }
3424 output_len += snprintf(output + output_len,
3425 sizeof(output) - output_len,
3426 value_fmt, value);
3427
3428 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3429 command_print(cmd, "%s", output);
3430 output_len = 0;
3431 }
3432 }
3433 }
3434
3435 COMMAND_HANDLER(handle_md_command)
3436 {
3437 if (CMD_ARGC < 1)
3438 return ERROR_COMMAND_SYNTAX_ERROR;
3439
3440 unsigned size = 0;
3441 switch (CMD_NAME[2]) {
3442 case 'd':
3443 size = 8;
3444 break;
3445 case 'w':
3446 size = 4;
3447 break;
3448 case 'h':
3449 size = 2;
3450 break;
3451 case 'b':
3452 size = 1;
3453 break;
3454 default:
3455 return ERROR_COMMAND_SYNTAX_ERROR;
3456 }
3457
3458 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3459 int (*fn)(struct target *target,
3460 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3461 if (physical) {
3462 CMD_ARGC--;
3463 CMD_ARGV++;
3464 fn = target_read_phys_memory;
3465 } else
3466 fn = target_read_memory;
3467 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3468 return ERROR_COMMAND_SYNTAX_ERROR;
3469
3470 target_addr_t address;
3471 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3472
3473 unsigned count = 1;
3474 if (CMD_ARGC == 2)
3475 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3476
3477 uint8_t *buffer = calloc(count, size);
3478 if (!buffer) {
3479 LOG_ERROR("Failed to allocate md read buffer");
3480 return ERROR_FAIL;
3481 }
3482
3483 struct target *target = get_current_target(CMD_CTX);
3484 int retval = fn(target, address, size, count, buffer);
3485 if (retval == ERROR_OK)
3486 target_handle_md_output(CMD, target, address, size, count, buffer);
3487
3488 free(buffer);
3489
3490 return retval;
3491 }
3492
3493 typedef int (*target_write_fn)(struct target *target,
3494 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3495
3496 static int target_fill_mem(struct target *target,
3497 target_addr_t address,
3498 target_write_fn fn,
3499 unsigned data_size,
3500 /* value */
3501 uint64_t b,
3502 /* count */
3503 unsigned c)
3504 {
3505 /* We have to write in reasonably large chunks to be able
3506 * to fill large memory areas with any sane speed */
3507 const unsigned chunk_size = 16384;
3508 uint8_t *target_buf = malloc(chunk_size * data_size);
3509 if (!target_buf) {
3510 LOG_ERROR("Out of memory");
3511 return ERROR_FAIL;
3512 }
3513
3514 for (unsigned i = 0; i < chunk_size; i++) {
3515 switch (data_size) {
3516 case 8:
3517 target_buffer_set_u64(target, target_buf + i * data_size, b);
3518 break;
3519 case 4:
3520 target_buffer_set_u32(target, target_buf + i * data_size, b);
3521 break;
3522 case 2:
3523 target_buffer_set_u16(target, target_buf + i * data_size, b);
3524 break;
3525 case 1:
3526 target_buffer_set_u8(target, target_buf + i * data_size, b);
3527 break;
3528 default:
3529 exit(-1);
3530 }
3531 }
3532
3533 int retval = ERROR_OK;
3534
3535 for (unsigned x = 0; x < c; x += chunk_size) {
3536 unsigned current;
3537 current = c - x;
3538 if (current > chunk_size)
3539 current = chunk_size;
3540 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3541 if (retval != ERROR_OK)
3542 break;
3543 /* avoid GDB timeouts */
3544 keep_alive();
3545 }
3546 free(target_buf);
3547
3548 return retval;
3549 }
3550
3551
3552 COMMAND_HANDLER(handle_mw_command)
3553 {
3554 if (CMD_ARGC < 2)
3555 return ERROR_COMMAND_SYNTAX_ERROR;
3556 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3557 target_write_fn fn;
3558 if (physical) {
3559 CMD_ARGC--;
3560 CMD_ARGV++;
3561 fn = target_write_phys_memory;
3562 } else
3563 fn = target_write_memory;
3564 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3565 return ERROR_COMMAND_SYNTAX_ERROR;
3566
3567 target_addr_t address;
3568 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3569
3570 uint64_t value;
3571 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3572
3573 unsigned count = 1;
3574 if (CMD_ARGC == 3)
3575 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3576
3577 struct target *target = get_current_target(CMD_CTX);
3578 unsigned wordsize;
3579 switch (CMD_NAME[2]) {
3580 case 'd':
3581 wordsize = 8;
3582 break;
3583 case 'w':
3584 wordsize = 4;
3585 break;
3586 case 'h':
3587 wordsize = 2;
3588 break;
3589 case 'b':
3590 wordsize = 1;
3591 break;
3592 default:
3593 return ERROR_COMMAND_SYNTAX_ERROR;
3594 }
3595
3596 return target_fill_mem(target, address, fn, wordsize, value, count);
3597 }
3598
3599 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3600 target_addr_t *min_address, target_addr_t *max_address)
3601 {
3602 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3603 return ERROR_COMMAND_SYNTAX_ERROR;
3604
3605 /* a base address isn't always necessary,
3606 * default to 0x0 (i.e. don't relocate) */
3607 if (CMD_ARGC >= 2) {
3608 target_addr_t addr;
3609 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3610 image->base_address = addr;
3611 image->base_address_set = true;
3612 } else
3613 image->base_address_set = false;
3614
3615 image->start_address_set = false;
3616
3617 if (CMD_ARGC >= 4)
3618 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3619 if (CMD_ARGC == 5) {
3620 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3621 /* use size (given) to find max (required) */
3622 *max_address += *min_address;
3623 }
3624
3625 if (*min_address > *max_address)
3626 return ERROR_COMMAND_SYNTAX_ERROR;
3627
3628 return ERROR_OK;
3629 }
3630
3631 COMMAND_HANDLER(handle_load_image_command)
3632 {
3633 uint8_t *buffer;
3634 size_t buf_cnt;
3635 uint32_t image_size;
3636 target_addr_t min_address = 0;
3637 target_addr_t max_address = -1;
3638 struct image image;
3639
3640 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3641 &image, &min_address, &max_address);
3642 if (retval != ERROR_OK)
3643 return retval;
3644
3645 struct target *target = get_current_target(CMD_CTX);
3646
3647 struct duration bench;
3648 duration_start(&bench);
3649
3650 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3651 return ERROR_FAIL;
3652
3653 image_size = 0x0;
3654 retval = ERROR_OK;
3655 for (unsigned int i = 0; i < image.num_sections; i++) {
3656 buffer = malloc(image.sections[i].size);
3657 if (!buffer) {
3658 command_print(CMD,
3659 "error allocating buffer for section (%d bytes)",
3660 (int)(image.sections[i].size));
3661 retval = ERROR_FAIL;
3662 break;
3663 }
3664
3665 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3666 if (retval != ERROR_OK) {
3667 free(buffer);
3668 break;
3669 }
3670
3671 uint32_t offset = 0;
3672 uint32_t length = buf_cnt;
3673
3674 /* DANGER!!! beware of unsigned comparison here!!! */
3675
3676 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3677 (image.sections[i].base_address < max_address)) {
3678
3679 if (image.sections[i].base_address < min_address) {
3680 /* clip addresses below */
3681 offset += min_address-image.sections[i].base_address;
3682 length -= offset;
3683 }
3684
3685 if (image.sections[i].base_address + buf_cnt > max_address)
3686 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3687
3688 retval = target_write_buffer(target,
3689 image.sections[i].base_address + offset, length, buffer + offset);
3690 if (retval != ERROR_OK) {
3691 free(buffer);
3692 break;
3693 }
3694 image_size += length;
3695 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3696 (unsigned int)length,
3697 image.sections[i].base_address + offset);
3698 }
3699
3700 free(buffer);
3701 }
3702
3703 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3704 command_print(CMD, "downloaded %" PRIu32 " bytes "
3705 "in %fs (%0.3f KiB/s)", image_size,
3706 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3707 }
3708
3709 image_close(&image);
3710
3711 return retval;
3712
3713 }
3714
3715 COMMAND_HANDLER(handle_dump_image_command)
3716 {
3717 struct fileio *fileio;
3718 uint8_t *buffer;
3719 int retval, retvaltemp;
3720 target_addr_t address, size;
3721 struct duration bench;
3722 struct target *target = get_current_target(CMD_CTX);
3723
3724 if (CMD_ARGC != 3)
3725 return ERROR_COMMAND_SYNTAX_ERROR;
3726
3727 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3728 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3729
3730 uint32_t buf_size = (size > 4096) ? 4096 : size;
3731 buffer = malloc(buf_size);
3732 if (!buffer)
3733 return ERROR_FAIL;
3734
3735 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3736 if (retval != ERROR_OK) {
3737 free(buffer);
3738 return retval;
3739 }
3740
3741 duration_start(&bench);
3742
3743 while (size > 0) {
3744 size_t size_written;
3745 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3746 retval = target_read_buffer(target, address, this_run_size, buffer);
3747 if (retval != ERROR_OK)
3748 break;
3749
3750 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3751 if (retval != ERROR_OK)
3752 break;
3753
3754 size -= this_run_size;
3755 address += this_run_size;
3756 }
3757
3758 free(buffer);
3759
3760 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3761 size_t filesize;
3762 retval = fileio_size(fileio, &filesize);
3763 if (retval != ERROR_OK)
3764 return retval;
3765 command_print(CMD,
3766 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3767 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3768 }
3769
3770 retvaltemp = fileio_close(fileio);
3771 if (retvaltemp != ERROR_OK)
3772 return retvaltemp;
3773
3774 return retval;
3775 }
3776
3777 enum verify_mode {
3778 IMAGE_TEST = 0,
3779 IMAGE_VERIFY = 1,
3780 IMAGE_CHECKSUM_ONLY = 2
3781 };
3782
3783 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3784 {
3785 uint8_t *buffer;
3786 size_t buf_cnt;
3787 uint32_t image_size;
3788 int retval;
3789 uint32_t checksum = 0;
3790 uint32_t mem_checksum = 0;
3791
3792 struct image image;
3793
3794 struct target *target = get_current_target(CMD_CTX);
3795
3796 if (CMD_ARGC < 1)
3797 return ERROR_COMMAND_SYNTAX_ERROR;
3798
3799 if (!target) {
3800 LOG_ERROR("no target selected");
3801 return ERROR_FAIL;
3802 }
3803
3804 struct duration bench;
3805 duration_start(&bench);
3806
3807 if (CMD_ARGC >= 2) {
3808 target_addr_t addr;
3809 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3810 image.base_address = addr;
3811 image.base_address_set = true;
3812 } else {
3813 image.base_address_set = false;
3814 image.base_address = 0x0;
3815 }
3816
3817 image.start_address_set = false;
3818
3819 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3820 if (retval != ERROR_OK)
3821 return retval;
3822
3823 image_size = 0x0;
3824 int diffs = 0;
3825 retval = ERROR_OK;
3826 for (unsigned int i = 0; i < image.num_sections; i++) {
3827 buffer = malloc(image.sections[i].size);
3828 if (!buffer) {
3829 command_print(CMD,
3830 "error allocating buffer for section (%" PRIu32 " bytes)",
3831 image.sections[i].size);
3832 break;
3833 }
3834 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3835 if (retval != ERROR_OK) {
3836 free(buffer);
3837 break;
3838 }
3839
3840 if (verify >= IMAGE_VERIFY) {
3841 /* calculate checksum of image */
3842 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3843 if (retval != ERROR_OK) {
3844 free(buffer);
3845 break;
3846 }
3847
3848 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3849 if (retval != ERROR_OK) {
3850 free(buffer);
3851 break;
3852 }
3853 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3854 LOG_ERROR("checksum mismatch");
3855 free(buffer);
3856 retval = ERROR_FAIL;
3857 goto done;
3858 }
3859 if (checksum != mem_checksum) {
3860 /* failed crc checksum, fall back to a binary compare */
3861 uint8_t *data;
3862
3863 if (diffs == 0)
3864 LOG_ERROR("checksum mismatch - attempting binary compare");
3865
3866 data = malloc(buf_cnt);
3867
3868 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3869 if (retval == ERROR_OK) {
3870 uint32_t t;
3871 for (t = 0; t < buf_cnt; t++) {
3872 if (data[t] != buffer[t]) {
3873 command_print(CMD,
3874 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3875 diffs,
3876 (unsigned)(t + image.sections[i].base_address),
3877 data[t],
3878 buffer[t]);
3879 if (diffs++ >= 127) {
3880 command_print(CMD, "More than 128 errors, the rest are not printed.");
3881 free(data);
3882 free(buffer);
3883 goto done;
3884 }
3885 }
3886 keep_alive();
3887 }
3888 }
3889 free(data);
3890 }
3891 } else {
3892 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3893 image.sections[i].base_address,
3894 buf_cnt);
3895 }
3896
3897 free(buffer);
3898 image_size += buf_cnt;
3899 }
3900 if (diffs > 0)
3901 command_print(CMD, "No more differences found.");
3902 done:
3903 if (diffs > 0)
3904 retval = ERROR_FAIL;
3905 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3906 command_print(CMD, "verified %" PRIu32 " bytes "
3907 "in %fs (%0.3f KiB/s)", image_size,
3908 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3909 }
3910
3911 image_close(&image);
3912
3913 return retval;
3914 }
3915
3916 COMMAND_HANDLER(handle_verify_image_checksum_command)
3917 {
3918 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3919 }
3920
3921 COMMAND_HANDLER(handle_verify_image_command)
3922 {
3923 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3924 }
3925
3926 COMMAND_HANDLER(handle_test_image_command)
3927 {
3928 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3929 }
3930
3931 static int handle_bp_command_list(struct command_invocation *cmd)
3932 {
3933 struct target *target = get_current_target(cmd->ctx);
3934 struct breakpoint *breakpoint = target->breakpoints;
3935 while (breakpoint) {
3936 if (breakpoint->type == BKPT_SOFT) {
3937 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3938 breakpoint->length);
3939 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3940 breakpoint->address,
3941 breakpoint->length,
3942 breakpoint->set, buf);
3943 free(buf);
3944 } else {
3945 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3946 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3947 breakpoint->asid,
3948 breakpoint->length, breakpoint->set);
3949 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3950 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3951 breakpoint->address,
3952 breakpoint->length, breakpoint->set);
3953 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3954 breakpoint->asid);
3955 } else
3956 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3957 breakpoint->address,
3958 breakpoint->length, breakpoint->set);
3959 }
3960
3961 breakpoint = breakpoint->next;
3962 }
3963 return ERROR_OK;
3964 }
3965
3966 static int handle_bp_command_set(struct command_invocation *cmd,
3967 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3968 {
3969 struct target *target = get_current_target(cmd->ctx);
3970 int retval;
3971
3972 if (asid == 0) {
3973 retval = breakpoint_add(target, addr, length, hw);
3974 /* error is always logged in breakpoint_add(), do not print it again */
3975 if (retval == ERROR_OK)
3976 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3977
3978 } else if (addr == 0) {
3979 if (!target->type->add_context_breakpoint) {
3980 LOG_ERROR("Context breakpoint not available");
3981 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3982 }
3983 retval = context_breakpoint_add(target, asid, length, hw);
3984 /* error is always logged in context_breakpoint_add(), do not print it again */
3985 if (retval == ERROR_OK)
3986 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3987
3988 } else {
3989 if (!target->type->add_hybrid_breakpoint) {
3990 LOG_ERROR("Hybrid breakpoint not available");
3991 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3992 }
3993 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3994 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3995 if (retval == ERROR_OK)
3996 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3997 }
3998 return retval;
3999 }
4000
4001 COMMAND_HANDLER(handle_bp_command)
4002 {
4003 target_addr_t addr;
4004 uint32_t asid;
4005 uint32_t length;
4006 int hw = BKPT_SOFT;
4007
4008 switch (CMD_ARGC) {
4009 case 0:
4010 return handle_bp_command_list(CMD);
4011
4012 case 2:
4013 asid = 0;
4014 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4015 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4016 return handle_bp_command_set(CMD, addr, asid, length, hw);
4017
4018 case 3:
4019 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4020 hw = BKPT_HARD;
4021 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4022 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4023 asid = 0;
4024 return handle_bp_command_set(CMD, addr, asid, length, hw);
4025 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4026 hw = BKPT_HARD;
4027 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4028 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4029 addr = 0;
4030 return handle_bp_command_set(CMD, addr, asid, length, hw);
4031 }
4032 /* fallthrough */
4033 case 4:
4034 hw = BKPT_HARD;
4035 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4036 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4037 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4038 return handle_bp_command_set(CMD, addr, asid, length, hw);
4039
4040 default:
4041 return ERROR_COMMAND_SYNTAX_ERROR;
4042 }
4043 }
4044
4045 COMMAND_HANDLER(handle_rbp_command)
4046 {
4047 if (CMD_ARGC != 1)
4048 return ERROR_COMMAND_SYNTAX_ERROR;
4049
4050 struct target *target = get_current_target(CMD_CTX);
4051
4052 if (!strcmp(CMD_ARGV[0], "all")) {
4053 breakpoint_remove_all(target);
4054 } else {
4055 target_addr_t addr;
4056 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4057
4058 breakpoint_remove(target, addr);
4059 }
4060
4061 return ERROR_OK;
4062 }
4063
4064 COMMAND_HANDLER(handle_wp_command)
4065 {
4066 struct target *target = get_current_target(CMD_CTX);
4067
4068 if (CMD_ARGC == 0) {
4069 struct watchpoint *watchpoint = target->watchpoints;
4070
4071 while (watchpoint) {
4072 command_print(CMD, "address: " TARGET_ADDR_FMT
4073 ", len: 0x%8.8" PRIx32
4074 ", r/w/a: %i, value: 0x%8.8" PRIx32
4075 ", mask: 0x%8.8" PRIx32,
4076 watchpoint->address,
4077 watchpoint->length,
4078 (int)watchpoint->rw,
4079 watchpoint->value,
4080 watchpoint->mask);
4081 watchpoint = watchpoint->next;
4082 }
4083 return ERROR_OK;
4084 }
4085
4086 enum watchpoint_rw type = WPT_ACCESS;
4087 target_addr_t addr = 0;
4088 uint32_t length = 0;
4089 uint32_t data_value = 0x0;
4090 uint32_t data_mask = 0xffffffff;
4091
4092 switch (CMD_ARGC) {
4093 case 5:
4094 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4095 /* fall through */
4096 case 4:
4097 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4098 /* fall through */
4099 case 3:
4100 switch (CMD_ARGV[2][0]) {
4101 case 'r':
4102 type = WPT_READ;
4103 break;
4104 case 'w':
4105 type = WPT_WRITE;
4106 break;
4107 case 'a':
4108 type = WPT_ACCESS;
4109 break;
4110 default:
4111 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4112 return ERROR_COMMAND_SYNTAX_ERROR;
4113 }
4114 /* fall through */
4115 case 2:
4116 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4117 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4118 break;
4119
4120 default:
4121 return ERROR_COMMAND_SYNTAX_ERROR;
4122 }
4123
4124 int retval = watchpoint_add(target, addr, length, type,
4125 data_value, data_mask);
4126 if (retval != ERROR_OK)
4127 LOG_ERROR("Failure setting watchpoints");
4128
4129 return retval;
4130 }
4131
4132 COMMAND_HANDLER(handle_rwp_command)
4133 {
4134 if (CMD_ARGC != 1)
4135 return ERROR_COMMAND_SYNTAX_ERROR;
4136
4137 target_addr_t addr;
4138 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4139
4140 struct target *target = get_current_target(CMD_CTX);
4141 watchpoint_remove(target, addr);
4142
4143 return ERROR_OK;
4144 }
4145
4146 /**
4147 * Translate a virtual address to a physical address.
4148 *
4149 * The low-level target implementation must have logged a detailed error
4150 * which is forwarded to telnet/GDB session.
4151 */
4152 COMMAND_HANDLER(handle_virt2phys_command)
4153 {
4154 if (CMD_ARGC != 1)
4155 return ERROR_COMMAND_SYNTAX_ERROR;
4156
4157 target_addr_t va;
4158 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4159 target_addr_t pa;
4160
4161 struct target *target = get_current_target(CMD_CTX);
4162 int retval = target->type->virt2phys(target, va, &pa);
4163 if (retval == ERROR_OK)
4164 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4165
4166 return retval;
4167 }
4168
4169 static void write_data(FILE *f, const void *data, size_t len)
4170 {
4171 size_t written = fwrite(data, 1, len, f);
4172 if (written != len)
4173 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4174 }
4175
4176 static void write_long(FILE *f, int l, struct target *target)
4177 {
4178 uint8_t val[4];
4179
4180 target_buffer_set_u32(target, val, l);
4181 write_data(f, val, 4);
4182 }
4183
4184 static void write_string(FILE *f, char *s)
4185 {
4186 write_data(f, s, strlen(s));
4187 }
4188
4189 typedef unsigned char UNIT[2]; /* unit of profiling */
4190
4191 /* Dump a gmon.out histogram file. */
4192 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4193 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4194 {
4195 uint32_t i;
4196 FILE *f = fopen(filename, "w");
4197 if (!f)
4198 return;
4199 write_string(f, "gmon");
4200 write_long(f, 0x00000001, target); /* Version */
4201 write_long(f, 0, target); /* padding */
4202 write_long(f, 0, target); /* padding */
4203 write_long(f, 0, target); /* padding */
4204
4205 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4206 write_data(f, &zero, 1);
4207
4208 /* figure out bucket size */
4209 uint32_t min;
4210 uint32_t max;
4211 if (with_range) {
4212 min = start_address;
4213 max = end_address;
4214 } else {
4215 min = samples[0];
4216 max = samples[0];
4217 for (i = 0; i < sample_num; i++) {
4218 if (min > samples[i])
4219 min = samples[i];
4220 if (max < samples[i])
4221 max = samples[i];
4222 }
4223
4224 /* max should be (largest sample + 1)
4225 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4226 max++;
4227 }
4228
4229 int address_space = max - min;
4230 assert(address_space >= 2);
4231
4232 /* FIXME: What is the reasonable number of buckets?
4233 * The profiling result will be more accurate if there are enough buckets. */
4234 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4235 uint32_t num_buckets = address_space / sizeof(UNIT);
4236 if (num_buckets > max_buckets)
4237 num_buckets = max_buckets;
4238 int *buckets = malloc(sizeof(int) * num_buckets);
4239 if (!buckets) {
4240 fclose(f);
4241 return;
4242 }
4243 memset(buckets, 0, sizeof(int) * num_buckets);
4244 for (i = 0; i < sample_num; i++) {
4245 uint32_t address = samples[i];
4246
4247 if ((address < min) || (max <= address))
4248 continue;
4249
4250 long long a = address - min;
4251 long long b = num_buckets;
4252 long long c = address_space;
4253 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4254 buckets[index_t]++;
4255 }
4256
4257 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4258 write_long(f, min, target); /* low_pc */
4259 write_long(f, max, target); /* high_pc */
4260 write_long(f, num_buckets, target); /* # of buckets */
4261 float sample_rate = sample_num / (duration_ms / 1000.0);
4262 write_long(f, sample_rate, target);
4263 write_string(f, "seconds");
4264 for (i = 0; i < (15-strlen("seconds")); i++)
4265 write_data(f, &zero, 1);
4266 write_string(f, "s");
4267
4268 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4269
4270 char *data = malloc(2 * num_buckets);
4271 if (data) {
4272 for (i = 0; i < num_buckets; i++) {
4273 int val;
4274 val = buckets[i];
4275 if (val > 65535)
4276 val = 65535;
4277 data[i * 2] = val&0xff;
4278 data[i * 2 + 1] = (val >> 8) & 0xff;
4279 }
4280 free(buckets);
4281 write_data(f, data, num_buckets * 2);
4282 free(data);
4283 } else
4284 free(buckets);
4285
4286 fclose(f);
4287 }
4288
4289 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4290 * which will be used as a random sampling of PC */
4291 COMMAND_HANDLER(handle_profile_command)
4292 {
4293 struct target *target = get_current_target(CMD_CTX);
4294
4295 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4296 return ERROR_COMMAND_SYNTAX_ERROR;
4297
4298 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4299 uint32_t offset;
4300 uint32_t num_of_samples;
4301 int retval = ERROR_OK;
4302 bool halted_before_profiling = target->state == TARGET_HALTED;
4303
4304 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4305
4306 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4307 if (!samples) {
4308 LOG_ERROR("No memory to store samples.");
4309 return ERROR_FAIL;
4310 }
4311
4312 uint64_t timestart_ms = timeval_ms();
4313 /**
4314 * Some cores let us sample the PC without the
4315 * annoying halt/resume step; for example, ARMv7 PCSR.
4316 * Provide a way to use that more efficient mechanism.
4317 */
4318 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4319 &num_of_samples, offset);
4320 if (retval != ERROR_OK) {
4321 free(samples);
4322 return retval;
4323 }
4324 uint32_t duration_ms = timeval_ms() - timestart_ms;
4325
4326 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4327
4328 retval = target_poll(target);
4329 if (retval != ERROR_OK) {
4330 free(samples);
4331 return retval;
4332 }
4333
4334 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4335 /* The target was halted before we started and is running now. Halt it,
4336 * for consistency. */
4337 retval = target_halt(target);
4338 if (retval != ERROR_OK) {
4339 free(samples);
4340 return retval;
4341 }
4342 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4343 /* The target was running before we started and is halted now. Resume
4344 * it, for consistency. */
4345 retval = target_resume(target, 1, 0, 0, 0);
4346 if (retval != ERROR_OK) {
4347 free(samples);
4348 return retval;
4349 }
4350 }
4351
4352 retval = target_poll(target);
4353 if (retval != ERROR_OK) {
4354 free(samples);
4355 return retval;
4356 }
4357
4358 uint32_t start_address = 0;
4359 uint32_t end_address = 0;
4360 bool with_range = false;
4361 if (CMD_ARGC == 4) {
4362 with_range = true;
4363 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4364 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4365 }
4366
4367 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4368 with_range, start_address, end_address, target, duration_ms);
4369 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4370
4371 free(samples);
4372 return retval;
4373 }
4374
4375 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4376 {
4377 char *namebuf;
4378 Jim_Obj *obj_name, *obj_val;
4379 int result;
4380
4381 namebuf = alloc_printf("%s(%d)", varname, idx);
4382 if (!namebuf)
4383 return JIM_ERR;
4384
4385 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4386 jim_wide wide_val = val;
4387 obj_val = Jim_NewWideObj(interp, wide_val);
4388 if (!obj_name || !obj_val) {
4389 free(namebuf);
4390 return JIM_ERR;
4391 }
4392
4393 Jim_IncrRefCount(obj_name);
4394 Jim_IncrRefCount(obj_val);
4395 result = Jim_SetVariable(interp, obj_name, obj_val);
4396 Jim_DecrRefCount(interp, obj_name);
4397 Jim_DecrRefCount(interp, obj_val);
4398 free(namebuf);
4399 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4400 return result;
4401 }
4402
4403 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4404 {
4405 struct command_context *context;
4406 struct target *target;
4407
4408 context = current_command_context(interp);
4409 assert(context);
4410
4411 target = get_current_target(context);
4412 if (!target) {
4413 LOG_ERROR("mem2array: no current target");
4414 return JIM_ERR;
4415 }
4416
4417 return target_mem2array(interp, target, argc - 1, argv + 1);
4418 }
4419
4420 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4421 {
4422 int e;
4423
4424 /* argv[0] = name of array to receive the data
4425 * argv[1] = desired element width in bits
4426 * argv[2] = memory address
4427 * argv[3] = count of times to read
4428 * argv[4] = optional "phys"
4429 */
4430 if (argc < 4 || argc > 5) {
4431 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4432 return JIM_ERR;
4433 }
4434
4435 /* Arg 0: Name of the array variable */
4436 const char *varname = Jim_GetString(argv[0], NULL);
4437
4438 /* Arg 1: Bit width of one element */
4439 long l;
4440 e = Jim_GetLong(interp, argv[1], &l);
4441 if (e != JIM_OK)
4442 return e;
4443 const unsigned int width_bits = l;
4444
4445 if (width_bits != 8 &&
4446 width_bits != 16 &&
4447 width_bits != 32 &&
4448 width_bits != 64) {
4449 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4450 Jim_AppendStrings(interp, Jim_GetResult(interp),
4451 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4452 return JIM_ERR;
4453 }
4454 const unsigned int width = width_bits / 8;
4455
4456 /* Arg 2: Memory address */
4457 jim_wide wide_addr;
4458 e = Jim_GetWide(interp, argv[2], &wide_addr);
4459 if (e != JIM_OK)
4460 return e;
4461 target_addr_t addr = (target_addr_t)wide_addr;
4462
4463 /* Arg 3: Number of elements to read */
4464 e = Jim_GetLong(interp, argv[3], &l);
4465 if (e != JIM_OK)
4466 return e;
4467 size_t len = l;
4468
4469 /* Arg 4: phys */
4470 bool is_phys = false;
4471 if (argc > 4) {
4472 int str_len = 0;
4473 const char *phys = Jim_GetString(argv[4], &str_len);
4474 if (!strncmp(phys, "phys", str_len))
4475 is_phys = true;
4476 else
4477 return JIM_ERR;
4478 }
4479
4480 /* Argument checks */
4481 if (len == 0) {
4482 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4483 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4484 return JIM_ERR;
4485 }
4486 if ((addr + (len * width)) < addr) {
4487 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4488 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4489 return JIM_ERR;
4490 }
4491 if (len > 65536) {
4492 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4493 Jim_AppendStrings(interp, Jim_GetResult(interp),
4494 "mem2array: too large read request, exceeds 64K items", NULL);
4495 return JIM_ERR;
4496 }
4497
4498 if ((width == 1) ||
4499 ((width == 2) && ((addr & 1) == 0)) ||
4500 ((width == 4) && ((addr & 3) == 0)) ||
4501 ((width == 8) && ((addr & 7) == 0))) {
4502 /* alignment correct */
4503 } else {
4504 char buf[100];
4505 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4506 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4507 addr,
4508 width);
4509 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4510 return JIM_ERR;
4511 }
4512
4513 /* Transfer loop */
4514
4515 /* index counter */
4516 size_t idx = 0;
4517
4518 const size_t buffersize = 4096;
4519 uint8_t *buffer = malloc(buffersize);
4520 if (!buffer)
4521 return JIM_ERR;
4522
4523 /* assume ok */
4524 e = JIM_OK;
4525 while (len) {
4526 /* Slurp... in buffer size chunks */
4527 const unsigned int max_chunk_len = buffersize / width;
4528 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4529
4530 int retval;
4531 if (is_phys)
4532 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4533 else
4534 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4535 if (retval != ERROR_OK) {
4536 /* BOO !*/
4537 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4538 addr,
4539 width,
4540 chunk_len);
4541 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4542 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4543 e = JIM_ERR;
4544 break;
4545 } else {
4546 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4547 uint64_t v = 0;
4548 switch (width) {
4549 case 8:
4550 v = target_buffer_get_u64(target, &buffer[i*width]);
4551 break;
4552 case 4:
4553 v = target_buffer_get_u32(target, &buffer[i*width]);
4554 break;
4555 case 2:
4556 v = target_buffer_get_u16(target, &buffer[i*width]);
4557 break;
4558 case 1:
4559 v = buffer[i] & 0x0ff;
4560 break;
4561 }
4562 new_u64_array_element(interp, varname, idx, v);
4563 }
4564 len -= chunk_len;
4565 addr += chunk_len * width;
4566 }
4567 }
4568
4569 free(buffer);
4570
4571 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4572
4573 return e;
4574 }
4575
4576 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4577 {
4578 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4579 if (!namebuf)
4580 return JIM_ERR;
4581
4582 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4583 if (!obj_name) {
4584 free(namebuf);
4585 return JIM_ERR;
4586 }
4587
4588 Jim_IncrRefCount(obj_name);
4589 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4590 Jim_DecrRefCount(interp, obj_name);
4591 free(namebuf);
4592 if (!obj_val)
4593 return JIM_ERR;
4594
4595 jim_wide wide_val;
4596 int result = Jim_GetWide(interp, obj_val, &wide_val);
4597 *val = wide_val;
4598 return result;
4599 }
4600
4601 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4602 {
4603 struct command_context *context;
4604 struct target *target;
4605
4606 context = current_command_context(interp);
4607 assert(context);
4608
4609 target = get_current_target(context);
4610 if (!target) {
4611 LOG_ERROR("array2mem: no current target");
4612 return JIM_ERR;
4613 }
4614
4615 return target_array2mem(interp, target, argc-1, argv + 1);
4616 }
4617
4618 static int target_array2mem(Jim_Interp *interp, struct target *target,
4619 int argc, Jim_Obj *const *argv)
4620 {
4621 int e;
4622
4623 /* argv[0] = name of array from which to read the data
4624 * argv[1] = desired element width in bits
4625 * argv[2] = memory address
4626 * argv[3] = number of elements to write
4627 * argv[4] = optional "phys"
4628 */
4629 if (argc < 4 || argc > 5) {
4630 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4631 return JIM_ERR;
4632 }
4633
4634 /* Arg 0: Name of the array variable */
4635 const char *varname = Jim_GetString(argv[0], NULL);
4636
4637 /* Arg 1: Bit width of one element */
4638 long l;
4639 e = Jim_GetLong(interp, argv[1], &l);
4640 if (e != JIM_OK)
4641 return e;
4642 const unsigned int width_bits = l;
4643
4644 if (width_bits != 8 &&
4645 width_bits != 16 &&
4646 width_bits != 32 &&
4647 width_bits != 64) {
4648 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4649 Jim_AppendStrings(interp, Jim_GetResult(interp),
4650 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4651 return JIM_ERR;
4652 }
4653 const unsigned int width = width_bits / 8;
4654
4655 /* Arg 2: Memory address */
4656 jim_wide wide_addr;
4657 e = Jim_GetWide(interp, argv[2], &wide_addr);
4658 if (e != JIM_OK)
4659 return e;
4660 target_addr_t addr = (target_addr_t)wide_addr;
4661
4662 /* Arg 3: Number of elements to write */
4663 e = Jim_GetLong(interp, argv[3], &l);
4664 if (e != JIM_OK)
4665 return e;
4666 size_t len = l;
4667
4668 /* Arg 4: Phys */
4669 bool is_phys = false;
4670 if (argc > 4) {
4671 int str_len = 0;
4672 const char *phys = Jim_GetString(argv[4], &str_len);
4673 if (!strncmp(phys, "phys", str_len))
4674 is_phys = true;
4675 else
4676 return JIM_ERR;
4677 }
4678
4679 /* Argument checks */
4680 if (len == 0) {
4681 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4682 Jim_AppendStrings(interp, Jim_GetResult(interp),
4683 "array2mem: zero width read?", NULL);
4684 return JIM_ERR;
4685 }
4686
4687 if ((addr + (len * width)) < addr) {
4688 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4689 Jim_AppendStrings(interp, Jim_GetResult(interp),
4690 "array2mem: addr + len - wraps to zero?", NULL);
4691 return JIM_ERR;
4692 }
4693
4694 if (len > 65536) {
4695 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4696 Jim_AppendStrings(interp, Jim_GetResult(interp),
4697 "array2mem: too large memory write request, exceeds 64K items", NULL);
4698 return JIM_ERR;
4699 }
4700
4701 if ((width == 1) ||
4702 ((width == 2) && ((addr & 1) == 0)) ||
4703 ((width == 4) && ((addr & 3) == 0)) ||
4704 ((width == 8) && ((addr & 7) == 0))) {
4705 /* alignment correct */
4706 } else {
4707 char buf[100];
4708 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4709 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4710 addr,
4711 width);
4712 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4713 return JIM_ERR;
4714 }
4715
4716 /* Transfer loop */
4717
4718 /* assume ok */
4719 e = JIM_OK;
4720
4721 const size_t buffersize = 4096;
4722 uint8_t *buffer = malloc(buffersize);
4723 if (!buffer)
4724 return JIM_ERR;
4725
4726 /* index counter */
4727 size_t idx = 0;
4728
4729 while (len) {
4730 /* Slurp... in buffer size chunks */
4731 const unsigned int max_chunk_len = buffersize / width;
4732
4733 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4734
4735 /* Fill the buffer */
4736 for (size_t i = 0; i < chunk_len; i++, idx++) {
4737 uint64_t v = 0;
4738 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4739 free(buffer);
4740 return JIM_ERR;
4741 }
4742 switch (width) {
4743 case 8:
4744 target_buffer_set_u64(target, &buffer[i * width], v);
4745 break;
4746 case 4:
4747 target_buffer_set_u32(target, &buffer[i * width], v);
4748 break;
4749 case 2:
4750 target_buffer_set_u16(target, &buffer[i * width], v);
4751 break;
4752 case 1:
4753 buffer[i] = v & 0x0ff;
4754 break;
4755 }
4756 }
4757 len -= chunk_len;
4758
4759 /* Write the buffer to memory */
4760 int retval;
4761 if (is_phys)
4762 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4763 else
4764 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4765 if (retval != ERROR_OK) {
4766 /* BOO !*/
4767 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4768 addr,
4769 width,
4770 chunk_len);
4771 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4772 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4773 e = JIM_ERR;
4774 break;
4775 }
4776 addr += chunk_len * width;
4777 }
4778
4779 free(buffer);
4780
4781 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4782
4783 return e;
4784 }
4785
4786 /* FIX? should we propagate errors here rather than printing them
4787 * and continuing?
4788 */
4789 void target_handle_event(struct target *target, enum target_event e)
4790 {
4791 struct target_event_action *teap;
4792 int retval;
4793
4794 for (teap = target->event_action; teap; teap = teap->next) {
4795 if (teap->event == e) {
4796 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4797 target->target_number,
4798 target_name(target),
4799 target_type_name(target),
4800 e,
4801 jim_nvp_value2name_simple(nvp_target_event, e)->name,
4802 Jim_GetString(teap->body, NULL));
4803
4804 /* Override current target by the target an event
4805 * is issued from (lot of scripts need it).
4806 * Return back to previous override as soon
4807 * as the handler processing is done */
4808 struct command_context *cmd_ctx = current_command_context(teap->interp);
4809 struct target *saved_target_override = cmd_ctx->current_target_override;
4810 cmd_ctx->current_target_override = target;
4811
4812 retval = Jim_EvalObj(teap->interp, teap->body);
4813
4814 cmd_ctx->current_target_override = saved_target_override;
4815
4816 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4817 return;
4818
4819 if (retval == JIM_RETURN)
4820 retval = teap->interp->returnCode;
4821
4822 if (retval != JIM_OK) {
4823 Jim_MakeErrorMessage(teap->interp);
4824 LOG_USER("Error executing event %s on target %s:\n%s",
4825 jim_nvp_value2name_simple(nvp_target_event, e)->name,
4826 target_name(target),
4827 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4828 /* clean both error code and stacktrace before return */
4829 Jim_Eval(teap->interp, "error \"\" \"\"");
4830 }
4831 }
4832 }
4833 }
4834
4835 /**
4836 * Returns true only if the target has a handler for the specified event.
4837 */
4838 bool target_has_event_action(struct target *target, enum target_event event)
4839 {
4840 struct target_event_action *teap;
4841
4842 for (teap = target->event_action; teap; teap = teap->next) {
4843 if (teap->event == event)
4844 return true;
4845 }
4846 return false;
4847 }
4848
4849 enum target_cfg_param {
4850 TCFG_TYPE,
4851 TCFG_EVENT,
4852 TCFG_WORK_AREA_VIRT,
4853 TCFG_WORK_AREA_PHYS,
4854 TCFG_WORK_AREA_SIZE,
4855 TCFG_WORK_AREA_BACKUP,
4856 TCFG_ENDIAN,
4857 TCFG_COREID,
4858 TCFG_CHAIN_POSITION,
4859 TCFG_DBGBASE,
4860 TCFG_RTOS,
4861 TCFG_DEFER_EXAMINE,
4862 TCFG_GDB_PORT,
4863 TCFG_GDB_MAX_CONNECTIONS,
4864 };
4865
4866 static struct jim_nvp nvp_config_opts[] = {
4867 { .name = "-type", .value = TCFG_TYPE },
4868 { .name = "-event", .value = TCFG_EVENT },
4869 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4870 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4871 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4872 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4873 { .name = "-endian", .value = TCFG_ENDIAN },
4874 { .name = "-coreid", .value = TCFG_COREID },
4875 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4876 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4877 { .name = "-rtos", .value = TCFG_RTOS },
4878 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4879 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4880 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
4881 { .name = NULL, .value = -1 }
4882 };
4883
4884 static int target_configure(struct jim_getopt_info *goi, struct target *target)
4885 {
4886 struct jim_nvp *n;
4887 Jim_Obj *o;
4888 jim_wide w;
4889 int e;
4890
4891 /* parse config or cget options ... */
4892 while (goi->argc > 0) {
4893 Jim_SetEmptyResult(goi->interp);
4894 /* jim_getopt_debug(goi); */
4895
4896 if (target->type->target_jim_configure) {
4897 /* target defines a configure function */
4898 /* target gets first dibs on parameters */
4899 e = (*(target->type->target_jim_configure))(target, goi);
4900 if (e == JIM_OK) {
4901 /* more? */
4902 continue;
4903 }
4904 if (e == JIM_ERR) {
4905 /* An error */
4906 return e;
4907 }
4908 /* otherwise we 'continue' below */
4909 }
4910 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
4911 if (e != JIM_OK) {
4912 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
4913 return e;
4914 }
4915 switch (n->value) {
4916 case TCFG_TYPE:
4917 /* not settable */
4918 if (goi->isconfigure) {
4919 Jim_SetResultFormatted(goi->interp,
4920 "not settable: %s", n->name);
4921 return JIM_ERR;
4922 } else {
4923 no_params:
4924 if (goi->argc != 0) {
4925 Jim_WrongNumArgs(goi->interp,
4926 goi->argc, goi->argv,
4927 "NO PARAMS");
4928 return JIM_ERR;
4929 }
4930 }
4931 Jim_SetResultString(goi->interp,
4932 target_type_name(target), -1);
4933 /* loop for more */
4934 break;
4935 case TCFG_EVENT:
4936 if (goi->argc == 0) {
4937 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4938 return JIM_ERR;
4939 }
4940
4941 e = jim_getopt_nvp(goi, nvp_target_event, &n);
4942 if (e != JIM_OK) {
4943 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
4944 return e;
4945 }
4946
4947 if (goi->isconfigure) {
4948 if (goi->argc != 1) {
4949 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4950 return JIM_ERR;
4951 }
4952 } else {
4953 if (goi->argc != 0) {
4954 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4955 return JIM_ERR;
4956 }
4957 }
4958
4959 {
4960 struct target_event_action *teap;
4961
4962 teap = target->event_action;
4963 /* replace existing? */
4964 while (teap) {
4965 if (teap->event == (enum target_event)n->value)
4966 break;
4967 teap = teap->next;
4968 }
4969
4970 if (goi->isconfigure) {
4971 /* START_DEPRECATED_TPIU */
4972 if (n->value == TARGET_EVENT_TRACE_CONFIG)
4973 LOG_INFO("DEPRECATED target event %s", n->name);
4974 /* END_DEPRECATED_TPIU */
4975
4976 bool replace = true;
4977 if (!teap) {
4978 /* create new */
4979 teap = calloc(1, sizeof(*teap));
4980 replace = false;
4981 }
4982 teap->event = n->value;
4983 teap->interp = goi->interp;
4984 jim_getopt_obj(goi, &o);
4985 if (teap->body)
4986 Jim_DecrRefCount(teap->interp, teap->body);
4987 teap->body = Jim_DuplicateObj(goi->interp, o);
4988 /*
4989 * FIXME:
4990 * Tcl/TK - "tk events" have a nice feature.
4991 * See the "BIND" command.
4992 * We should support that here.
4993 * You can specify %X and %Y in the event code.
4994 * The idea is: %T - target name.
4995 * The idea is: %N - target number
4996 * The idea is: %E - event name.
4997 */
4998 Jim_IncrRefCount(teap->body);
4999
5000 if (!replace) {
5001 /* add to head of event list */
5002 teap->next = target->event_action;
5003 target->event_action = teap;
5004 }
5005 Jim_SetEmptyResult(goi->interp);
5006 } else {
5007 /* get */
5008 if (!teap)
5009 Jim_SetEmptyResult(goi->interp);
5010 else
5011 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5012 }
5013 }
5014 /* loop for more */
5015 break;
5016
5017 case TCFG_WORK_AREA_VIRT:
5018 if (goi->isconfigure) {
5019 target_free_all_working_areas(target);
5020 e = jim_getopt_wide(goi, &w);
5021 if (e != JIM_OK)
5022 return e;
5023 target->working_area_virt = w;
5024 target->working_area_virt_spec = true;
5025 } else {
5026 if (goi->argc != 0)
5027 goto no_params;
5028 }
5029 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5030 /* loop for more */
5031 break;
5032
5033 case TCFG_WORK_AREA_PHYS:
5034 if (goi->isconfigure) {
5035 target_free_all_working_areas(target);
5036 e = jim_getopt_wide(goi, &w);
5037 if (e != JIM_OK)
5038 return e;
5039 target->working_area_phys = w;
5040 target->working_area_phys_spec = true;
5041 } else {
5042 if (goi->argc != 0)
5043 goto no_params;
5044 }
5045 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5046 /* loop for more */
5047 break;
5048
5049 case TCFG_WORK_AREA_SIZE:
5050 if (goi->isconfigure) {
5051 target_free_all_working_areas(target);
5052 e = jim_getopt_wide(goi, &w);
5053 if (e != JIM_OK)
5054 return e;
5055 target->working_area_size = w;
5056 } else {
5057 if (goi->argc != 0)
5058 goto no_params;
5059 }
5060 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5061 /* loop for more */
5062 break;
5063
5064 case TCFG_WORK_AREA_BACKUP:
5065 if (goi->isconfigure) {
5066 target_free_all_working_areas(target);
5067 e = jim_getopt_wide(goi, &w);
5068 if (e != JIM_OK)
5069 return e;
5070 /* make this exactly 1 or 0 */
5071 target->backup_working_area = (!!w);
5072 } else {
5073 if (goi->argc != 0)
5074 goto no_params;
5075 }
5076 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5077 /* loop for more e*/
5078 break;
5079
5080
5081 case TCFG_ENDIAN:
5082 if (goi->isconfigure) {
5083 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5084 if (e != JIM_OK) {
5085 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5086 return e;
5087 }
5088 target->endianness = n->value;
5089 } else {
5090 if (goi->argc != 0)
5091 goto no_params;
5092 }
5093 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5094 if (!n->name) {
5095 target->endianness = TARGET_LITTLE_ENDIAN;
5096 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5097 }
5098 Jim_SetResultString(goi->interp, n->name, -1);
5099 /* loop for more */
5100 break;
5101
5102 case TCFG_COREID:
5103 if (goi->isconfigure) {
5104 e = jim_getopt_wide(goi, &w);
5105 if (e != JIM_OK)
5106 return e;
5107 target->coreid = (int32_t)w;
5108 } else {
5109 if (goi->argc != 0)
5110 goto no_params;
5111 }
5112 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5113 /* loop for more */
5114 break;
5115
5116 case TCFG_CHAIN_POSITION:
5117 if (goi->isconfigure) {
5118 Jim_Obj *o_t;
5119 struct jtag_tap *tap;
5120
5121 if (target->has_dap) {
5122 Jim_SetResultString(goi->interp,
5123 "target requires -dap parameter instead of -chain-position!", -1);
5124 return JIM_ERR;
5125 }
5126
5127 target_free_all_working_areas(target);
5128 e = jim_getopt_obj(goi, &o_t);
5129 if (e != JIM_OK)
5130 return e;
5131 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5132 if (!tap)
5133 return JIM_ERR;
5134 target->tap = tap;
5135 target->tap_configured = true;
5136 } else {
5137 if (goi->argc != 0)
5138 goto no_params;
5139 }
5140 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5141 /* loop for more e*/
5142 break;
5143 case TCFG_DBGBASE:
5144 if (goi->isconfigure) {
5145 e = jim_getopt_wide(goi, &w);
5146 if (e != JIM_OK)
5147 return e;
5148 target->dbgbase = (uint32_t)w;
5149 target->dbgbase_set = true;
5150 } else {
5151 if (goi->argc != 0)
5152 goto no_params;
5153 }
5154 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5155 /* loop for more */
5156 break;
5157 case TCFG_RTOS:
5158 /* RTOS */
5159 {
5160 int result = rtos_create(goi, target);
5161 if (result != JIM_OK)
5162 return result;
5163 }
5164 /* loop for more */
5165 break;
5166
5167 case TCFG_DEFER_EXAMINE:
5168 /* DEFER_EXAMINE */
5169 target->defer_examine = true;
5170 /* loop for more */
5171 break;
5172
5173 case TCFG_GDB_PORT:
5174 if (goi->isconfigure) {
5175 struct command_context *cmd_ctx = current_command_context(goi->interp);
5176 if (cmd_ctx->mode != COMMAND_CONFIG) {
5177 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5178 return JIM_ERR;
5179 }
5180
5181 const char *s;
5182 e = jim_getopt_string(goi, &s, NULL);
5183 if (e != JIM_OK)
5184 return e;
5185 free(target->gdb_port_override);
5186 target->gdb_port_override = strdup(s);
5187 } else {
5188 if (goi->argc != 0)
5189 goto no_params;
5190 }
5191 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5192 /* loop for more */
5193 break;
5194
5195 case TCFG_GDB_MAX_CONNECTIONS:
5196 if (goi->isconfigure) {
5197 struct command_context *cmd_ctx = current_command_context(goi->interp);
5198 if (cmd_ctx->mode != COMMAND_CONFIG) {
5199 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5200 return JIM_ERR;
5201 }
5202
5203 e = jim_getopt_wide(goi, &w);
5204 if (e != JIM_OK)
5205 return e;
5206 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5207 } else {
5208 if (goi->argc != 0)
5209 goto no_params;
5210 }
5211 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5212 break;
5213 }
5214 } /* while (goi->argc) */
5215
5216
5217 /* done - we return */
5218 return JIM_OK;
5219 }
5220
5221 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5222 {
5223 struct command *c = jim_to_command(interp);
5224 struct jim_getopt_info goi;
5225
5226 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5227 goi.isconfigure = !strcmp(c->name, "configure");
5228 if (goi.argc < 1) {
5229 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5230 "missing: -option ...");
5231 return JIM_ERR;
5232 }
5233 struct command_context *cmd_ctx = current_command_context(interp);
5234 assert(cmd_ctx);
5235 struct target *target = get_current_target(cmd_ctx);
5236 return target_configure(&goi, target);
5237 }
5238
5239 static int jim_target_mem2array(Jim_Interp *interp,
5240 int argc, Jim_Obj *const *argv)
5241 {
5242 struct command_context *cmd_ctx = current_command_context(interp);
5243 assert(cmd_ctx);
5244 struct target *target = get_current_target(cmd_ctx);
5245 return target_mem2array(interp, target, argc - 1, argv + 1);
5246 }
5247
5248 static int jim_target_array2mem(Jim_Interp *interp,
5249 int argc, Jim_Obj *const *argv)
5250 {
5251 struct command_context *cmd_ctx = current_command_context(interp);
5252 assert(cmd_ctx);
5253 struct target *target = get_current_target(cmd_ctx);
5254 return target_array2mem(interp, target, argc - 1, argv + 1);
5255 }
5256
5257 static int jim_target_tap_disabled(Jim_Interp *interp)
5258 {
5259 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5260 return JIM_ERR;
5261 }
5262
5263 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5264 {
5265 bool allow_defer = false;
5266
5267 struct jim_getopt_info goi;
5268 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5269 if (goi.argc > 1) {
5270 const char *cmd_name = Jim_GetString(argv[0], NULL);
5271 Jim_SetResultFormatted(goi.interp,
5272 "usage: %s ['allow-defer']", cmd_name);
5273 return JIM_ERR;
5274 }
5275 if (goi.argc > 0 &&
5276 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5277 /* consume it */
5278 Jim_Obj *obj;
5279 int e = jim_getopt_obj(&goi, &obj);
5280 if (e != JIM_OK)
5281 return e;
5282 allow_defer = true;
5283 }
5284
5285 struct command_context *cmd_ctx = current_command_context(interp);
5286 assert(cmd_ctx);
5287 struct target *target = get_current_target(cmd_ctx);
5288 if (!target->tap->enabled)
5289 return jim_target_tap_disabled(interp);
5290
5291 if (allow_defer && target->defer_examine) {
5292 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5293 LOG_INFO("Use arp_examine command to examine it manually!");
5294 return JIM_OK;
5295 }
5296
5297 int e = target->type->examine(target);
5298 if (e != ERROR_OK)
5299 return JIM_ERR;
5300 return JIM_OK;
5301 }
5302
5303 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5304 {
5305 struct command_context *cmd_ctx = current_command_context(interp);
5306 assert(cmd_ctx);
5307 struct target *target = get_current_target(cmd_ctx);
5308
5309 Jim_SetResultBool(interp, target_was_examined(target));
5310 return JIM_OK;
5311 }
5312
5313 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5314 {
5315 struct command_context *cmd_ctx = current_command_context(interp);
5316 assert(cmd_ctx);
5317 struct target *target = get_current_target(cmd_ctx);
5318
5319 Jim_SetResultBool(interp, target->defer_examine);
5320 return JIM_OK;
5321 }
5322
5323 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5324 {
5325 if (argc != 1) {
5326 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5327 return JIM_ERR;
5328 }
5329 struct command_context *cmd_ctx = current_command_context(interp);
5330 assert(cmd_ctx);
5331 struct target *target = get_current_target(cmd_ctx);
5332
5333 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5334 return JIM_ERR;
5335
5336 return JIM_OK;
5337 }
5338
5339 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5340 {
5341 if (argc != 1) {
5342 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5343 return JIM_ERR;
5344 }
5345 struct command_context *cmd_ctx = current_command_context(interp);
5346 assert(cmd_ctx);
5347 struct target *target = get_current_target(cmd_ctx);
5348 if (!target->tap->enabled)
5349 return jim_target_tap_disabled(interp);
5350
5351 int e;
5352 if (!(target_was_examined(target)))
5353 e = ERROR_TARGET_NOT_EXAMINED;
5354 else
5355 e = target->type->poll(target);
5356 if (e != ERROR_OK)
5357 return JIM_ERR;
5358 return JIM_OK;
5359 }
5360
5361 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5362 {
5363 struct jim_getopt_info goi;
5364 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5365
5366 if (goi.argc != 2) {
5367 Jim_WrongNumArgs(interp, 0, argv,
5368 "([tT]|[fF]|assert|deassert) BOOL");
5369 return JIM_ERR;
5370 }
5371
5372 struct jim_nvp *n;
5373 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5374 if (e != JIM_OK) {
5375 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5376 return e;
5377 }
5378 /* the halt or not param */
5379 jim_wide a;
5380 e = jim_getopt_wide(&goi, &a);
5381 if (e != JIM_OK)
5382 return e;
5383
5384 struct command_context *cmd_ctx = current_command_context(interp);
5385 assert(cmd_ctx);
5386 struct target *target = get_current_target(cmd_ctx);
5387 if (!target->tap->enabled)
5388 return jim_target_tap_disabled(interp);
5389
5390 if (!target->type->assert_reset || !target->type->deassert_reset) {
5391 Jim_SetResultFormatted(interp,
5392 "No target-specific reset for %s",
5393 target_name(target));
5394 return JIM_ERR;
5395 }
5396
5397 if (target->defer_examine)
5398 target_reset_examined(target);
5399
5400 /* determine if we should halt or not. */
5401 target->reset_halt = (a != 0);
5402 /* When this happens - all workareas are invalid. */
5403 target_free_all_working_areas_restore(target, 0);
5404
5405 /* do the assert */
5406 if (n->value == NVP_ASSERT)
5407 e = target->type->assert_reset(target);
5408 else
5409 e = target->type->deassert_reset(target);
5410 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5411 }
5412
5413 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5414 {
5415 if (argc != 1) {
5416 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5417 return JIM_ERR;
5418 }
5419 struct command_context *cmd_ctx = current_command_context(interp);
5420 assert(cmd_ctx);
5421 struct target *target = get_current_target(cmd_ctx);
5422 if (!target->tap->enabled)
5423 return jim_target_tap_disabled(interp);
5424 int e = target->type->halt(target);
5425 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5426 }
5427
5428 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5429 {
5430 struct jim_getopt_info goi;
5431 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5432
5433 /* params: <name> statename timeoutmsecs */
5434 if (goi.argc != 2) {
5435 const char *cmd_name = Jim_GetString(argv[0], NULL);
5436 Jim_SetResultFormatted(goi.interp,
5437 "%s <state_name> <timeout_in_msec>", cmd_name);
5438 return JIM_ERR;
5439 }
5440
5441 struct jim_nvp *n;
5442 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5443 if (e != JIM_OK) {
5444 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5445 return e;
5446 }
5447 jim_wide a;
5448 e = jim_getopt_wide(&goi, &a);
5449 if (e != JIM_OK)
5450 return e;
5451 struct command_context *cmd_ctx = current_command_context(interp);
5452 assert(cmd_ctx);
5453 struct target *target = get_current_target(cmd_ctx);
5454 if (!target->tap->enabled)
5455 return jim_target_tap_disabled(interp);
5456
5457 e = target_wait_state(target, n->value, a);
5458 if (e != ERROR_OK) {
5459 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5460 Jim_SetResultFormatted(goi.interp,
5461 "target: %s wait %s fails (%#s) %s",
5462 target_name(target), n->name,
5463 obj, target_strerror_safe(e));
5464 return JIM_ERR;
5465 }
5466 return JIM_OK;
5467 }
5468 /* List for human, Events defined for this target.
5469 * scripts/programs should use 'name cget -event NAME'
5470 */
5471 COMMAND_HANDLER(handle_target_event_list)
5472 {
5473 struct target *target = get_current_target(CMD_CTX);
5474 struct target_event_action *teap = target->event_action;
5475
5476 command_print(CMD, "Event actions for target (%d) %s\n",
5477 target->target_number,
5478 target_name(target));
5479 command_print(CMD, "%-25s | Body", "Event");
5480 command_print(CMD, "------------------------- | "
5481 "----------------------------------------");
5482 while (teap) {
5483 struct jim_nvp *opt = jim_nvp_value2name_simple(nvp_target_event, teap->event);
5484 command_print(CMD, "%-25s | %s",
5485 opt->name, Jim_GetString(teap->body, NULL));
5486 teap = teap->next;
5487 }
5488 command_print(CMD, "***END***");
5489 return ERROR_OK;
5490 }
5491 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5492 {
5493 if (argc != 1) {
5494 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5495 return JIM_ERR;
5496 }
5497 struct command_context *cmd_ctx = current_command_context(interp);
5498 assert(cmd_ctx);
5499 struct target *target = get_current_target(cmd_ctx);
5500 Jim_SetResultString(interp, target_state_name(target), -1);
5501 return JIM_OK;
5502 }
5503 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5504 {
5505 struct jim_getopt_info goi;
5506 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5507 if (goi.argc != 1) {
5508 const char *cmd_name = Jim_GetString(argv[0], NULL);
5509 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5510 return JIM_ERR;
5511 }
5512 struct jim_nvp *n;
5513 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5514 if (e != JIM_OK) {
5515 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5516 return e;
5517 }
5518 struct command_context *cmd_ctx = current_command_context(interp);
5519 assert(cmd_ctx);
5520 struct target *target = get_current_target(cmd_ctx);
5521 target_handle_event(target, n->value);
5522 return JIM_OK;
5523 }
5524
5525 static const struct command_registration target_instance_command_handlers[] = {
5526 {
5527 .name = "configure",
5528 .mode = COMMAND_ANY,
5529 .jim_handler = jim_target_configure,
5530 .help = "configure a new target for use",
5531 .usage = "[target_attribute ...]",
5532 },
5533 {
5534 .name = "cget",
5535 .mode = COMMAND_ANY,
5536 .jim_handler = jim_target_configure,
5537 .help = "returns the specified target attribute",
5538 .usage = "target_attribute",
5539 },
5540 {
5541 .name = "mwd",
5542 .handler = handle_mw_command,
5543 .mode = COMMAND_EXEC,
5544 .help = "Write 64-bit word(s) to target memory",
5545 .usage = "address data [count]",
5546 },
5547 {
5548 .name = "mww",
5549 .handler = handle_mw_command,
5550 .mode = COMMAND_EXEC,
5551 .help = "Write 32-bit word(s) to target memory",
5552 .usage = "address data [count]",
5553 },
5554 {
5555 .name = "mwh",
5556 .handler = handle_mw_command,
5557 .mode = COMMAND_EXEC,
5558 .help = "Write 16-bit half-word(s) to target memory",
5559 .usage = "address data [count]",
5560 },
5561 {
5562 .name = "mwb",
5563 .handler = handle_mw_command,
5564 .mode = COMMAND_EXEC,
5565 .help = "Write byte(s) to target memory",
5566 .usage = "address data [count]",
5567 },
5568 {
5569 .name = "mdd",
5570 .handler = handle_md_command,
5571 .mode = COMMAND_EXEC,
5572 .help = "Display target memory as 64-bit words",
5573 .usage = "address [count]",
5574 },
5575 {
5576 .name = "mdw",
5577 .handler = handle_md_command,
5578 .mode = COMMAND_EXEC,
5579 .help = "Display target memory as 32-bit words",
5580 .usage = "address [count]",
5581 },
5582 {
5583 .name = "mdh",
5584 .handler = handle_md_command,
5585 .mode = COMMAND_EXEC,
5586 .help = "Display target memory as 16-bit half-words",
5587 .usage = "address [count]",
5588 },
5589 {
5590 .name = "mdb",
5591 .handler = handle_md_command,
5592 .mode = COMMAND_EXEC,
5593 .help = "Display target memory as 8-bit bytes",
5594 .usage = "address [count]",
5595 },
5596 {
5597 .name = "array2mem",
5598 .mode = COMMAND_EXEC,
5599 .jim_handler = jim_target_array2mem,
5600 .help = "Writes Tcl array of 8/16/32 bit numbers "
5601 "to target memory",
5602 .usage = "arrayname bitwidth address count",
5603 },
5604 {
5605 .name = "mem2array",
5606 .mode = COMMAND_EXEC,
5607 .jim_handler = jim_target_mem2array,
5608 .help = "Loads Tcl array of 8/16/32 bit numbers "
5609 "from target memory",
5610 .usage = "arrayname bitwidth address count",
5611 },
5612 {
5613 .name = "eventlist",
5614 .handler = handle_target_event_list,
5615 .mode = COMMAND_EXEC,
5616 .help = "displays a table of events defined for this target",
5617 .usage = "",
5618 },
5619 {
5620 .name = "curstate",
5621 .mode = COMMAND_EXEC,
5622 .jim_handler = jim_target_current_state,
5623 .help = "displays the current state of this target",
5624 },
5625 {
5626 .name = "arp_examine",
5627 .mode = COMMAND_EXEC,
5628 .jim_handler = jim_target_examine,
5629 .help = "used internally for reset processing",
5630 .usage = "['allow-defer']",
5631 },
5632 {
5633 .name = "was_examined",
5634 .mode = COMMAND_EXEC,
5635 .jim_handler = jim_target_was_examined,
5636 .help = "used internally for reset processing",
5637 },
5638 {
5639 .name = "examine_deferred",
5640 .mode = COMMAND_EXEC,
5641 .jim_handler = jim_target_examine_deferred,
5642 .help = "used internally for reset processing",
5643 },
5644 {
5645 .name = "arp_halt_gdb",
5646 .mode = COMMAND_EXEC,
5647 .jim_handler = jim_target_halt_gdb,
5648 .help = "used internally for reset processing to halt GDB",
5649 },
5650 {
5651 .name = "arp_poll",
5652 .mode = COMMAND_EXEC,
5653 .jim_handler = jim_target_poll,
5654 .help = "used internally for reset processing",
5655 },
5656 {
5657 .name = "arp_reset",
5658 .mode = COMMAND_EXEC,
5659 .jim_handler = jim_target_reset,
5660 .help = "used internally for reset processing",
5661 },
5662 {
5663 .name = "arp_halt",
5664 .mode = COMMAND_EXEC,
5665 .jim_handler = jim_target_halt,
5666 .help = "used internally for reset processing",
5667 },
5668 {
5669 .name = "arp_waitstate",
5670 .mode = COMMAND_EXEC,
5671 .jim_handler = jim_target_wait_state,
5672 .help = "used internally for reset processing",
5673 },
5674 {
5675 .name = "invoke-event",
5676 .mode = COMMAND_EXEC,
5677 .jim_handler = jim_target_invoke_event,
5678 .help = "invoke handler for specified event",
5679 .usage = "event_name",
5680 },
5681 COMMAND_REGISTRATION_DONE
5682 };
5683
5684 static int target_create(struct jim_getopt_info *goi)
5685 {
5686 Jim_Obj *new_cmd;
5687 Jim_Cmd *cmd;
5688 const char *cp;
5689 int e;
5690 int x;
5691 struct target *target;
5692 struct command_context *cmd_ctx;
5693
5694 cmd_ctx = current_command_context(goi->interp);
5695 assert(cmd_ctx);
5696
5697 if (goi->argc < 3) {
5698 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5699 return JIM_ERR;
5700 }
5701
5702 /* COMMAND */
5703 jim_getopt_obj(goi, &new_cmd);
5704 /* does this command exist? */
5705 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5706 if (cmd) {
5707 cp = Jim_GetString(new_cmd, NULL);
5708 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5709 return JIM_ERR;
5710 }
5711
5712 /* TYPE */
5713 e = jim_getopt_string(goi, &cp, NULL);
5714 if (e != JIM_OK)
5715 return e;
5716 struct transport *tr = get_current_transport();
5717 if (tr->override_target) {
5718 e = tr->override_target(&cp);
5719 if (e != ERROR_OK) {
5720 LOG_ERROR("The selected transport doesn't support this target");
5721 return JIM_ERR;
5722 }
5723 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5724 }
5725 /* now does target type exist */
5726 for (x = 0 ; target_types[x] ; x++) {
5727 if (strcmp(cp, target_types[x]->name) == 0) {
5728 /* found */
5729 break;
5730 }
5731 }
5732 if (!target_types[x]) {
5733 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5734 for (x = 0 ; target_types[x] ; x++) {
5735 if (target_types[x + 1]) {
5736 Jim_AppendStrings(goi->interp,
5737 Jim_GetResult(goi->interp),
5738 target_types[x]->name,
5739 ", ", NULL);
5740 } else {
5741 Jim_AppendStrings(goi->interp,
5742 Jim_GetResult(goi->interp),
5743 " or ",
5744 target_types[x]->name, NULL);
5745 }
5746 }
5747 return JIM_ERR;
5748 }
5749
5750 /* Create it */
5751 target = calloc(1, sizeof(struct target));
5752 if (!target) {
5753 LOG_ERROR("Out of memory");
5754 return JIM_ERR;
5755 }
5756
5757 /* set target number */
5758 target->target_number = new_target_number();
5759
5760 /* allocate memory for each unique target type */
5761 target->type = malloc(sizeof(struct target_type));
5762 if (!target->type) {
5763 LOG_ERROR("Out of memory");
5764 free(target);
5765 return JIM_ERR;
5766 }
5767
5768 memcpy(target->type, target_types[x], sizeof(struct target_type));
5769
5770 /* default to first core, override with -coreid */
5771 target->coreid = 0;
5772
5773 target->working_area = 0x0;
5774 target->working_area_size = 0x0;
5775 target->working_areas = NULL;
5776 target->backup_working_area = 0;
5777
5778 target->state = TARGET_UNKNOWN;
5779 target->debug_reason = DBG_REASON_UNDEFINED;
5780 target->reg_cache = NULL;
5781 target->breakpoints = NULL;
5782 target->watchpoints = NULL;
5783 target->next = NULL;
5784 target->arch_info = NULL;
5785
5786 target->verbose_halt_msg = true;
5787
5788 target->halt_issued = false;
5789
5790 /* initialize trace information */
5791 target->trace_info = calloc(1, sizeof(struct trace));
5792 if (!target->trace_info) {
5793 LOG_ERROR("Out of memory");
5794 free(target->type);
5795 free(target);
5796 return JIM_ERR;
5797 }
5798
5799 target->dbgmsg = NULL;
5800 target->dbg_msg_enabled = 0;
5801
5802 target->endianness = TARGET_ENDIAN_UNKNOWN;
5803
5804 target->rtos = NULL;
5805 target->rtos_auto_detect = false;
5806
5807 target->gdb_port_override = NULL;
5808 target->gdb_max_connections = 1;
5809
5810 /* Do the rest as "configure" options */
5811 goi->isconfigure = 1;
5812 e = target_configure(goi, target);
5813
5814 if (e == JIM_OK) {
5815 if (target->has_dap) {
5816 if (!target->dap_configured) {
5817 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5818 e = JIM_ERR;
5819 }
5820 } else {
5821 if (!target->tap_configured) {
5822 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5823 e = JIM_ERR;
5824 }
5825 }
5826 /* tap must be set after target was configured */
5827 if (!target->tap)
5828 e = JIM_ERR;
5829 }
5830
5831 if (e != JIM_OK) {
5832 rtos_destroy(target);
5833 free(target->gdb_port_override);
5834 free(target->trace_info);
5835 free(target->type);
5836 free(target);
5837 return e;
5838 }
5839
5840 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5841 /* default endian to little if not specified */
5842 target->endianness = TARGET_LITTLE_ENDIAN;
5843 }
5844
5845 cp = Jim_GetString(new_cmd, NULL);
5846 target->cmd_name = strdup(cp);
5847 if (!target->cmd_name) {
5848 LOG_ERROR("Out of memory");
5849 rtos_destroy(target);
5850 free(target->gdb_port_override);
5851 free(target->trace_info);
5852 free(target->type);
5853 free(target);
5854 return JIM_ERR;
5855 }
5856
5857 if (target->type->target_create) {
5858 e = (*(target->type->target_create))(target, goi->interp);
5859 if (e != ERROR_OK) {
5860 LOG_DEBUG("target_create failed");
5861 free(target->cmd_name);
5862 rtos_destroy(target);
5863 free(target->gdb_port_override);
5864 free(target->trace_info);
5865 free(target->type);
5866 free(target);
5867 return JIM_ERR;
5868 }
5869 }
5870
5871 /* create the target specific commands */
5872 if (target->type->commands) {
5873 e = register_commands(cmd_ctx, NULL, target->type->commands);
5874 if (e != ERROR_OK)
5875 LOG_ERROR("unable to register '%s' commands", cp);
5876 }
5877
5878 /* now - create the new target name command */
5879 const struct command_registration target_subcommands[] = {
5880 {
5881 .chain = target_instance_command_handlers,
5882 },
5883 {
5884 .chain = target->type->commands,
5885 },
5886 COMMAND_REGISTRATION_DONE
5887 };
5888 const struct command_registration target_commands[] = {
5889 {
5890 .name = cp,
5891 .mode = COMMAND_ANY,
5892 .help = "target command group",
5893 .usage = "",
5894 .chain = target_subcommands,
5895 },
5896 COMMAND_REGISTRATION_DONE
5897 };
5898 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
5899 if (e != ERROR_OK) {
5900 if (target->type->deinit_target)
5901 target->type->deinit_target(target);
5902 free(target->cmd_name);
5903 rtos_destroy(target);
5904 free(target->gdb_port_override);
5905 free(target->trace_info);
5906 free(target->type);
5907 free(target);
5908 return JIM_ERR;
5909 }
5910
5911 /* append to end of list */
5912 append_to_list_all_targets(target);
5913
5914 cmd_ctx->current_target = target;
5915 return JIM_OK;
5916 }
5917
5918 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5919 {
5920 if (argc != 1) {
5921 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5922 return JIM_ERR;
5923 }
5924 struct command_context *cmd_ctx = current_command_context(interp);
5925 assert(cmd_ctx);
5926
5927 struct target *target = get_current_target_or_null(cmd_ctx);
5928 if (target)
5929 Jim_SetResultString(interp, target_name(target), -1);
5930 return JIM_OK;
5931 }
5932
5933 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5934 {
5935 if (argc != 1) {
5936 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5937 return JIM_ERR;
5938 }
5939 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5940 for (unsigned x = 0; target_types[x]; x++) {
5941 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5942 Jim_NewStringObj(interp, target_types[x]->name, -1));
5943 }
5944 return JIM_OK;
5945 }
5946
5947 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5948 {
5949 if (argc != 1) {
5950 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5951 return JIM_ERR;
5952 }
5953 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5954 struct target *target = all_targets;
5955 while (target) {
5956 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5957 Jim_NewStringObj(interp, target_name(target), -1));
5958 target = target->next;
5959 }
5960 return JIM_OK;
5961 }
5962
5963 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5964 {
5965 int i;
5966 const char *targetname;
5967 int retval, len;
5968 struct target *target = (struct target *) NULL;
5969 struct target_list *head, *curr, *new;
5970 curr = (struct target_list *) NULL;
5971 head = (struct target_list *) NULL;
5972
5973 retval = 0;
5974 LOG_DEBUG("%d", argc);
5975 /* argv[1] = target to associate in smp
5976 * argv[2] = target to associate in smp
5977 * argv[3] ...
5978 */
5979
5980 for (i = 1; i < argc; i++) {
5981
5982 targetname = Jim_GetString(argv[i], &len);
5983 target = get_target(targetname);
5984 LOG_DEBUG("%s ", targetname);
5985 if (target) {
5986 new = malloc(sizeof(struct target_list));
5987 new->target = target;
5988 new->next = (struct target_list *)NULL;
5989 if (head == (struct target_list *)NULL) {
5990 head = new;
5991 curr = head;
5992 } else {
5993 curr->next = new;
5994 curr = new;
5995 }
5996 }
5997 }
5998 /* now parse the list of cpu and put the target in smp mode*/
5999 curr = head;
6000
6001 while (curr != (struct target_list *)NULL) {
6002 target = curr->target;
6003 target->smp = 1;
6004 target->head = head;
6005 curr = curr->next;
6006 }
6007
6008 if (target && target->rtos)
6009 retval = rtos_smp_init(head->target);
6010
6011 return retval;
6012 }
6013
6014
6015 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6016 {
6017 struct jim_getopt_info goi;
6018 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6019 if (goi.argc < 3) {
6020 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6021 "<name> <target_type> [<target_options> ...]");
6022 return JIM_ERR;
6023 }
6024 return target_create(&goi);
6025 }
6026
6027 static const struct command_registration target_subcommand_handlers[] = {
6028 {
6029 .name = "init",
6030 .mode = COMMAND_CONFIG,
6031 .handler = handle_target_init_command,
6032 .help = "initialize targets",
6033 .usage = "",
6034 },
6035 {
6036 .name = "create",
6037 .mode = COMMAND_CONFIG,
6038 .jim_handler = jim_target_create,
6039 .usage = "name type '-chain-position' name [options ...]",
6040 .help = "Creates and selects a new target",
6041 },
6042 {
6043 .name = "current",
6044 .mode = COMMAND_ANY,
6045 .jim_handler = jim_target_current,
6046 .help = "Returns the currently selected target",
6047 },
6048 {
6049 .name = "types",
6050 .mode = COMMAND_ANY,
6051 .jim_handler = jim_target_types,
6052 .help = "Returns the available target types as "
6053 "a list of strings",
6054 },
6055 {
6056 .name = "names",
6057 .mode = COMMAND_ANY,
6058 .jim_handler = jim_target_names,
6059 .help = "Returns the names of all targets as a list of strings",
6060 },
6061 {
6062 .name = "smp",
6063 .mode = COMMAND_ANY,
6064 .jim_handler = jim_target_smp,
6065 .usage = "targetname1 targetname2 ...",
6066 .help = "gather several target in a smp list"
6067 },
6068
6069 COMMAND_REGISTRATION_DONE
6070 };
6071
6072 struct fast_load {
6073 target_addr_t address;
6074 uint8_t *data;
6075 int length;
6076
6077 };
6078
6079 static int fastload_num;
6080 static struct fast_load *fastload;
6081
6082 static void free_fastload(void)
6083 {
6084 if (fastload) {
6085 for (int i = 0; i < fastload_num; i++)
6086 free(fastload[i].data);
6087 free(fastload);
6088 fastload = NULL;
6089 }
6090 }
6091
6092 COMMAND_HANDLER(handle_fast_load_image_command)
6093 {
6094 uint8_t *buffer;
6095 size_t buf_cnt;
6096 uint32_t image_size;
6097 target_addr_t min_address = 0;
6098 target_addr_t max_address = -1;
6099
6100 struct image image;
6101
6102 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6103 &image, &min_address, &max_address);
6104 if (retval != ERROR_OK)
6105 return retval;
6106
6107 struct duration bench;
6108 duration_start(&bench);
6109
6110 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6111 if (retval != ERROR_OK)
6112 return retval;
6113
6114 image_size = 0x0;
6115 retval = ERROR_OK;
6116 fastload_num = image.num_sections;
6117 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6118 if (!fastload) {
6119 command_print(CMD, "out of memory");
6120 image_close(&image);
6121 return ERROR_FAIL;
6122 }
6123 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6124 for (unsigned int i = 0; i < image.num_sections; i++) {
6125 buffer = malloc(image.sections[i].size);
6126 if (!buffer) {
6127 command_print(CMD, "error allocating buffer for section (%d bytes)",
6128 (int)(image.sections[i].size));
6129 retval = ERROR_FAIL;
6130 break;
6131 }
6132
6133 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6134 if (retval != ERROR_OK) {
6135 free(buffer);
6136 break;
6137 }
6138
6139 uint32_t offset = 0;
6140 uint32_t length = buf_cnt;
6141
6142 /* DANGER!!! beware of unsigned comparison here!!! */
6143
6144 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6145 (image.sections[i].base_address < max_address)) {
6146 if (image.sections[i].base_address < min_address) {
6147 /* clip addresses below */
6148 offset += min_address-image.sections[i].base_address;
6149 length -= offset;
6150 }
6151
6152 if (image.sections[i].base_address + buf_cnt > max_address)
6153 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6154
6155 fastload[i].address = image.sections[i].base_address + offset;
6156 fastload[i].data = malloc(length);
6157 if (!fastload[i].data) {
6158 free(buffer);
6159 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6160 length);
6161 retval = ERROR_FAIL;
6162 break;
6163 }
6164 memcpy(fastload[i].data, buffer + offset, length);
6165 fastload[i].length = length;
6166
6167 image_size += length;
6168 command_print(CMD, "%u bytes written at address 0x%8.8x",
6169 (unsigned int)length,
6170 ((unsigned int)(image.sections[i].base_address + offset)));
6171 }
6172
6173 free(buffer);
6174 }
6175
6176 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6177 command_print(CMD, "Loaded %" PRIu32 " bytes "
6178 "in %fs (%0.3f KiB/s)", image_size,
6179 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6180
6181 command_print(CMD,
6182 "WARNING: image has not been loaded to target!"
6183 "You can issue a 'fast_load' to finish loading.");
6184 }
6185
6186 image_close(&image);
6187
6188 if (retval != ERROR_OK)
6189 free_fastload();
6190
6191 return retval;
6192 }
6193
6194 COMMAND_HANDLER(handle_fast_load_command)
6195 {
6196 if (CMD_ARGC > 0)
6197 return ERROR_COMMAND_SYNTAX_ERROR;
6198 if (!fastload) {
6199 LOG_ERROR("No image in memory");
6200 return ERROR_FAIL;
6201 }
6202 int i;
6203 int64_t ms = timeval_ms();
6204 int size = 0;
6205 int retval = ERROR_OK;
6206 for (i = 0; i < fastload_num; i++) {
6207 struct target *target = get_current_target(CMD_CTX);
6208 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6209 (unsigned int)(fastload[i].address),
6210 (unsigned int)(fastload[i].length));
6211 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6212 if (retval != ERROR_OK)
6213 break;
6214 size += fastload[i].length;
6215 }
6216 if (retval == ERROR_OK) {
6217 int64_t after = timeval_ms();
6218 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6219 }
6220 return retval;
6221 }
6222
6223 static const struct command_registration target_command_handlers[] = {
6224 {
6225 .name = "targets",
6226 .handler = handle_targets_command,
6227 .mode = COMMAND_ANY,
6228 .help = "change current default target (one parameter) "
6229 "or prints table of all targets (no parameters)",
6230 .usage = "[target]",
6231 },
6232 {
6233 .name = "target",
6234 .mode = COMMAND_CONFIG,
6235 .help = "configure target",
6236 .chain = target_subcommand_handlers,
6237 .usage = "",
6238 },
6239 COMMAND_REGISTRATION_DONE
6240 };
6241
6242 int target_register_commands(struct command_context *cmd_ctx)
6243 {
6244 return register_commands(cmd_ctx, NULL, target_command_handlers);
6245 }
6246
6247 static bool target_reset_nag = true;
6248
6249 bool get_target_reset_nag(void)
6250 {
6251 return target_reset_nag;
6252 }
6253
6254 COMMAND_HANDLER(handle_target_reset_nag)
6255 {
6256 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6257 &target_reset_nag, "Nag after each reset about options to improve "
6258 "performance");
6259 }
6260
6261 COMMAND_HANDLER(handle_ps_command)
6262 {
6263 struct target *target = get_current_target(CMD_CTX);
6264 char *display;
6265 if (target->state != TARGET_HALTED) {
6266 LOG_INFO("target not halted !!");
6267 return ERROR_OK;
6268 }
6269
6270 if ((target->rtos) && (target->rtos->type)
6271 && (target->rtos->type->ps_command)) {
6272 display = target->rtos->type->ps_command(target);
6273 command_print(CMD, "%s", display);
6274 free(display);
6275 return ERROR_OK;
6276 } else {
6277 LOG_INFO("failed");
6278 return ERROR_TARGET_FAILURE;
6279 }
6280 }
6281
6282 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6283 {
6284 if (text)
6285 command_print_sameline(cmd, "%s", text);
6286 for (int i = 0; i < size; i++)
6287 command_print_sameline(cmd, " %02x", buf[i]);
6288 command_print(cmd, " ");
6289 }
6290
6291 COMMAND_HANDLER(handle_test_mem_access_command)
6292 {
6293 struct target *target = get_current_target(CMD_CTX);
6294 uint32_t test_size;
6295 int retval = ERROR_OK;
6296
6297 if (target->state != TARGET_HALTED) {
6298 LOG_INFO("target not halted !!");
6299 return ERROR_FAIL;
6300 }
6301
6302 if (CMD_ARGC != 1)
6303 return ERROR_COMMAND_SYNTAX_ERROR;
6304
6305 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6306
6307 /* Test reads */
6308 size_t num_bytes = test_size + 4;
6309
6310 struct working_area *wa = NULL;
6311 retval = target_alloc_working_area(target, num_bytes, &wa);
6312 if (retval != ERROR_OK) {
6313 LOG_ERROR("Not enough working area");
6314 return ERROR_FAIL;
6315 }
6316
6317 uint8_t *test_pattern = malloc(num_bytes);
6318
6319 for (size_t i = 0; i < num_bytes; i++)
6320 test_pattern[i] = rand();
6321
6322 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6323 if (retval != ERROR_OK) {
6324 LOG_ERROR("Test pattern write failed");
6325 goto out;
6326 }
6327
6328 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6329 for (int size = 1; size <= 4; size *= 2) {
6330 for (int offset = 0; offset < 4; offset++) {
6331 uint32_t count = test_size / size;
6332 size_t host_bufsiz = (count + 2) * size + host_offset;
6333 uint8_t *read_ref = malloc(host_bufsiz);
6334 uint8_t *read_buf = malloc(host_bufsiz);
6335
6336 for (size_t i = 0; i < host_bufsiz; i++) {
6337 read_ref[i] = rand();
6338 read_buf[i] = read_ref[i];
6339 }
6340 command_print_sameline(CMD,
6341 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6342 size, offset, host_offset ? "un" : "");
6343
6344 struct duration bench;
6345 duration_start(&bench);
6346
6347 retval = target_read_memory(target, wa->address + offset, size, count,
6348 read_buf + size + host_offset);
6349
6350 duration_measure(&bench);
6351
6352 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6353 command_print(CMD, "Unsupported alignment");
6354 goto next;
6355 } else if (retval != ERROR_OK) {
6356 command_print(CMD, "Memory read failed");
6357 goto next;
6358 }
6359
6360 /* replay on host */
6361 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6362
6363 /* check result */
6364 int result = memcmp(read_ref, read_buf, host_bufsiz);
6365 if (result == 0) {
6366 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6367 duration_elapsed(&bench),
6368 duration_kbps(&bench, count * size));
6369 } else {
6370 command_print(CMD, "Compare failed");
6371 binprint(CMD, "ref:", read_ref, host_bufsiz);
6372 binprint(CMD, "buf:", read_buf, host_bufsiz);
6373 }
6374 next:
6375 free(read_ref);
6376 free(read_buf);
6377 }
6378 }
6379 }
6380
6381 out:
6382 free(test_pattern);
6383
6384 if (wa)
6385 target_free_working_area(target, wa);
6386
6387 /* Test writes */
6388 num_bytes = test_size + 4 + 4 + 4;
6389
6390 retval = target_alloc_working_area(target, num_bytes, &wa);
6391 if (retval != ERROR_OK) {
6392 LOG_ERROR("Not enough working area");
6393 return ERROR_FAIL;
6394 }
6395
6396 test_pattern = malloc(num_bytes);
6397
6398 for (size_t i = 0; i < num_bytes; i++)
6399 test_pattern[i] = rand();
6400
6401 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6402 for (int size = 1; size <= 4; size *= 2) {
6403 for (int offset = 0; offset < 4; offset++) {
6404 uint32_t count = test_size / size;
6405 size_t host_bufsiz = count * size + host_offset;
6406 uint8_t *read_ref = malloc(num_bytes);
6407 uint8_t *read_buf = malloc(num_bytes);
6408 uint8_t *write_buf = malloc(host_bufsiz);
6409
6410 for (size_t i = 0; i < host_bufsiz; i++)
6411 write_buf[i] = rand();
6412 command_print_sameline(CMD,
6413 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6414 size, offset, host_offset ? "un" : "");
6415
6416 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6417 if (retval != ERROR_OK) {
6418 command_print(CMD, "Test pattern write failed");
6419 goto nextw;
6420 }
6421
6422 /* replay on host */
6423 memcpy(read_ref, test_pattern, num_bytes);
6424 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6425
6426 struct duration bench;
6427 duration_start(&bench);
6428
6429 retval = target_write_memory(target, wa->address + size + offset, size, count,
6430 write_buf + host_offset);
6431
6432 duration_measure(&bench);
6433
6434 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6435 command_print(CMD, "Unsupported alignment");
6436 goto nextw;
6437 } else if (retval != ERROR_OK) {
6438 command_print(CMD, "Memory write failed");
6439 goto nextw;
6440 }
6441
6442 /* read back */
6443 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6444 if (retval != ERROR_OK) {
6445 command_print(CMD, "Test pattern write failed");
6446 goto nextw;
6447 }
6448
6449 /* check result */
6450 int result = memcmp(read_ref, read_buf, num_bytes);
6451 if (result == 0) {
6452 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6453 duration_elapsed(&bench),
6454 duration_kbps(&bench, count * size));
6455 } else {
6456 command_print(CMD, "Compare failed");
6457 binprint(CMD, "ref:", read_ref, num_bytes);
6458 binprint(CMD, "buf:", read_buf, num_bytes);
6459 }
6460 nextw:
6461 free(read_ref);
6462 free(read_buf);
6463 }
6464 }
6465 }
6466
6467 free(test_pattern);
6468
6469 if (wa)
6470 target_free_working_area(target, wa);
6471 return retval;
6472 }
6473
6474 static const struct command_registration target_exec_command_handlers[] = {
6475 {
6476 .name = "fast_load_image",
6477 .handler = handle_fast_load_image_command,
6478 .mode = COMMAND_ANY,
6479 .help = "Load image into server memory for later use by "
6480 "fast_load; primarily for profiling",
6481 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6482 "[min_address [max_length]]",
6483 },
6484 {
6485 .name = "fast_load",
6486 .handler = handle_fast_load_command,
6487 .mode = COMMAND_EXEC,
6488 .help = "loads active fast load image to current target "
6489 "- mainly for profiling purposes",
6490 .usage = "",
6491 },
6492 {
6493 .name = "profile",
6494 .handler = handle_profile_command,
6495 .mode = COMMAND_EXEC,
6496 .usage = "seconds filename [start end]",
6497 .help = "profiling samples the CPU PC",
6498 },
6499 /** @todo don't register virt2phys() unless target supports it */
6500 {
6501 .name = "virt2phys",
6502 .handler = handle_virt2phys_command,
6503 .mode = COMMAND_ANY,
6504 .help = "translate a virtual address into a physical address",
6505 .usage = "virtual_address",
6506 },
6507 {
6508 .name = "reg",
6509 .handler = handle_reg_command,
6510 .mode = COMMAND_EXEC,
6511 .help = "display (reread from target with \"force\") or set a register; "
6512 "with no arguments, displays all registers and their values",
6513 .usage = "[(register_number|register_name) [(value|'force')]]",
6514 },
6515 {
6516 .name = "poll",
6517 .handler = handle_poll_command,
6518 .mode = COMMAND_EXEC,
6519 .help = "poll target state; or reconfigure background polling",
6520 .usage = "['on'|'off']",
6521 },
6522 {
6523 .name = "wait_halt",
6524 .handler = handle_wait_halt_command,
6525 .mode = COMMAND_EXEC,
6526 .help = "wait up to the specified number of milliseconds "
6527 "(default 5000) for a previously requested halt",
6528 .usage = "[milliseconds]",
6529 },
6530 {
6531 .name = "halt",
6532 .handler = handle_halt_command,
6533 .mode = COMMAND_EXEC,
6534 .help = "request target to halt, then wait up to the specified "
6535 "number of milliseconds (default 5000) for it to complete",
6536 .usage = "[milliseconds]",
6537 },
6538 {
6539 .name = "resume",
6540 .handler = handle_resume_command,
6541 .mode = COMMAND_EXEC,
6542 .help = "resume target execution from current PC or address",
6543 .usage = "[address]",
6544 },
6545 {
6546 .name = "reset",
6547 .handler = handle_reset_command,
6548 .mode = COMMAND_EXEC,
6549 .usage = "[run|halt|init]",
6550 .help = "Reset all targets into the specified mode. "
6551 "Default reset mode is run, if not given.",
6552 },
6553 {
6554 .name = "soft_reset_halt",
6555 .handler = handle_soft_reset_halt_command,
6556 .mode = COMMAND_EXEC,
6557 .usage = "",
6558 .help = "halt the target and do a soft reset",
6559 },
6560 {
6561 .name = "step",
6562 .handler = handle_step_command,
6563 .mode = COMMAND_EXEC,
6564 .help = "step one instruction from current PC or address",
6565 .usage = "[address]",
6566 },
6567 {
6568 .name = "mdd",
6569 .handler = handle_md_command,
6570 .mode = COMMAND_EXEC,
6571 .help = "display memory double-words",
6572 .usage = "['phys'] address [count]",
6573 },
6574 {
6575 .name = "mdw",
6576 .handler = handle_md_command,
6577 .mode = COMMAND_EXEC,
6578 .help = "display memory words",
6579 .usage = "['phys'] address [count]",
6580 },
6581 {
6582 .name = "mdh",
6583 .handler = handle_md_command,
6584 .mode = COMMAND_EXEC,
6585 .help = "display memory half-words",
6586 .usage = "['phys'] address [count]",
6587 },
6588 {
6589 .name = "mdb",
6590 .handler = handle_md_command,
6591 .mode = COMMAND_EXEC,
6592 .help = "display memory bytes",
6593 .usage = "['phys'] address [count]",
6594 },
6595 {
6596 .name = "mwd",
6597 .handler = handle_mw_command,
6598 .mode = COMMAND_EXEC,
6599 .help = "write memory double-word",
6600 .usage = "['phys'] address value [count]",
6601 },
6602 {
6603 .name = "mww",
6604 .handler = handle_mw_command,
6605 .mode = COMMAND_EXEC,
6606 .help = "write memory word",
6607 .usage = "['phys'] address value [count]",
6608 },
6609 {
6610 .name = "mwh",
6611 .handler = handle_mw_command,
6612 .mode = COMMAND_EXEC,
6613 .help = "write memory half-word",
6614 .usage = "['phys'] address value [count]",
6615 },
6616 {
6617 .name = "mwb",
6618 .handler = handle_mw_command,
6619 .mode = COMMAND_EXEC,
6620 .help = "write memory byte",
6621 .usage = "['phys'] address value [count]",
6622 },
6623 {
6624 .name = "bp",
6625 .handler = handle_bp_command,
6626 .mode = COMMAND_EXEC,
6627 .help = "list or set hardware or software breakpoint",
6628 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6629 },
6630 {
6631 .name = "rbp",
6632 .handler = handle_rbp_command,
6633 .mode = COMMAND_EXEC,
6634 .help = "remove breakpoint",
6635 .usage = "'all' | address",
6636 },
6637 {
6638 .name = "wp",
6639 .handler = handle_wp_command,
6640 .mode = COMMAND_EXEC,
6641 .help = "list (no params) or create watchpoints",
6642 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6643 },
6644 {
6645 .name = "rwp",
6646 .handler = handle_rwp_command,
6647 .mode = COMMAND_EXEC,
6648 .help = "remove watchpoint",
6649 .usage = "address",
6650 },
6651 {
6652 .name = "load_image",
6653 .handler = handle_load_image_command,
6654 .mode = COMMAND_EXEC,
6655 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6656 "[min_address] [max_length]",
6657 },
6658 {
6659 .name = "dump_image",
6660 .handler = handle_dump_image_command,
6661 .mode = COMMAND_EXEC,
6662 .usage = "filename address size",
6663 },
6664 {
6665 .name = "verify_image_checksum",
6666 .handler = handle_verify_image_checksum_command,
6667 .mode = COMMAND_EXEC,
6668 .usage = "filename [offset [type]]",
6669 },
6670 {
6671 .name = "verify_image",
6672 .handler = handle_verify_image_command,
6673 .mode = COMMAND_EXEC,
6674 .usage = "filename [offset [type]]",
6675 },
6676 {
6677 .name = "test_image",
6678 .handler = handle_test_image_command,
6679 .mode = COMMAND_EXEC,
6680 .usage = "filename [offset [type]]",
6681 },
6682 {
6683 .name = "mem2array",
6684 .mode = COMMAND_EXEC,
6685 .jim_handler = jim_mem2array,
6686 .help = "read 8/16/32 bit memory and return as a TCL array "
6687 "for script processing",
6688 .usage = "arrayname bitwidth address count",
6689 },
6690 {
6691 .name = "array2mem",
6692 .mode = COMMAND_EXEC,
6693 .jim_handler = jim_array2mem,
6694 .help = "convert a TCL array to memory locations "
6695 "and write the 8/16/32 bit values",
6696 .usage = "arrayname bitwidth address count",
6697 },
6698 {
6699 .name = "reset_nag",
6700 .handler = handle_target_reset_nag,
6701 .mode = COMMAND_ANY,
6702 .help = "Nag after each reset about options that could have been "
6703 "enabled to improve performance.",
6704 .usage = "['enable'|'disable']",
6705 },
6706 {
6707 .name = "ps",
6708 .handler = handle_ps_command,
6709 .mode = COMMAND_EXEC,
6710 .help = "list all tasks",
6711 .usage = "",
6712 },
6713 {
6714 .name = "test_mem_access",
6715 .handler = handle_test_mem_access_command,
6716 .mode = COMMAND_EXEC,
6717 .help = "Test the target's memory access functions",
6718 .usage = "size",
6719 },
6720
6721 COMMAND_REGISTRATION_DONE
6722 };
6723 static int target_register_user_commands(struct command_context *cmd_ctx)
6724 {
6725 int retval = ERROR_OK;
6726 retval = target_request_register_commands(cmd_ctx);
6727 if (retval != ERROR_OK)
6728 return retval;
6729
6730 retval = trace_register_commands(cmd_ctx);
6731 if (retval != ERROR_OK)
6732 return retval;
6733
6734
6735 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6736 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)