473538ab7747cc9484c91c11a2c72ec5caaed5a7
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
60
61 /* default halt wait timeout (ms) */
62 #define DEFAULT_HALT_TIMEOUT 5000
63
64 static int target_read_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, uint8_t *buffer);
66 static int target_write_buffer_default(struct target *target, target_addr_t address,
67 uint32_t count, const uint8_t *buffer);
68 static int target_array2mem(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_mem2array(Jim_Interp *interp, struct target *target,
71 int argc, Jim_Obj * const *argv);
72 static int target_register_user_commands(struct command_context *cmd_ctx);
73 static int target_get_gdb_fileio_info_default(struct target *target,
74 struct gdb_fileio_info *fileio_info);
75 static int target_gdb_fileio_end_default(struct target *target, int retcode,
76 int fileio_errno, bool ctrl_c);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type mips_mips64_target;
98 extern struct target_type avr_target;
99 extern struct target_type dsp563xx_target;
100 extern struct target_type dsp5680xx_target;
101 extern struct target_type testee_target;
102 extern struct target_type avr32_ap7k_target;
103 extern struct target_type hla_target;
104 extern struct target_type nds32_v2_target;
105 extern struct target_type nds32_v3_target;
106 extern struct target_type nds32_v3m_target;
107 extern struct target_type or1k_target;
108 extern struct target_type quark_x10xx_target;
109 extern struct target_type quark_d20xx_target;
110 extern struct target_type stm8_target;
111 extern struct target_type riscv_target;
112 extern struct target_type mem_ap_target;
113 extern struct target_type esirisc_target;
114 extern struct target_type arcv2_target;
115
116 static struct target_type *target_types[] = {
117 &arm7tdmi_target,
118 &arm9tdmi_target,
119 &arm920t_target,
120 &arm720t_target,
121 &arm966e_target,
122 &arm946e_target,
123 &arm926ejs_target,
124 &fa526_target,
125 &feroceon_target,
126 &dragonite_target,
127 &xscale_target,
128 &cortexm_target,
129 &cortexa_target,
130 &cortexr4_target,
131 &arm11_target,
132 &ls1_sap_target,
133 &mips_m4k_target,
134 &avr_target,
135 &dsp563xx_target,
136 &dsp5680xx_target,
137 &testee_target,
138 &avr32_ap7k_target,
139 &hla_target,
140 &nds32_v2_target,
141 &nds32_v3_target,
142 &nds32_v3m_target,
143 &or1k_target,
144 &quark_x10xx_target,
145 &quark_d20xx_target,
146 &stm8_target,
147 &riscv_target,
148 &mem_ap_target,
149 &esirisc_target,
150 &arcv2_target,
151 &aarch64_target,
152 &mips_mips64_target,
153 NULL,
154 };
155
156 struct target *all_targets;
157 static struct target_event_callback *target_event_callbacks;
158 static struct target_timer_callback *target_timer_callbacks;
159 static int64_t target_timer_next_event_value;
160 static LIST_HEAD(target_reset_callback_list);
161 static LIST_HEAD(target_trace_callback_list);
162 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
163 static LIST_HEAD(empty_smp_targets);
164
165 static const struct jim_nvp nvp_assert[] = {
166 { .name = "assert", NVP_ASSERT },
167 { .name = "deassert", NVP_DEASSERT },
168 { .name = "T", NVP_ASSERT },
169 { .name = "F", NVP_DEASSERT },
170 { .name = "t", NVP_ASSERT },
171 { .name = "f", NVP_DEASSERT },
172 { .name = NULL, .value = -1 }
173 };
174
175 static const struct jim_nvp nvp_error_target[] = {
176 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
177 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
178 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
179 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
180 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
181 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
182 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
183 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
184 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
185 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
186 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
187 { .value = -1, .name = NULL }
188 };
189
190 static const char *target_strerror_safe(int err)
191 {
192 const struct jim_nvp *n;
193
194 n = jim_nvp_value2name_simple(nvp_error_target, err);
195 if (!n->name)
196 return "unknown";
197 else
198 return n->name;
199 }
200
201 static const struct jim_nvp nvp_target_event[] = {
202
203 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
204 { .value = TARGET_EVENT_HALTED, .name = "halted" },
205 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
206 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
207 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
208 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
209 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
210
211 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
212 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
213
214 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
215 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
216 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
217 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
218 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
219 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
220 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
221 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
222
223 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
224 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
225 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
226
227 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
228 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
229
230 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
231 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
232
233 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
234 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
235
236 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
237 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
238
239 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
240
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
247 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
249
250 { .name = NULL, .value = -1 }
251 };
252
253 static const struct jim_nvp nvp_target_state[] = {
254 { .name = "unknown", .value = TARGET_UNKNOWN },
255 { .name = "running", .value = TARGET_RUNNING },
256 { .name = "halted", .value = TARGET_HALTED },
257 { .name = "reset", .value = TARGET_RESET },
258 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const struct jim_nvp nvp_target_debug_reason[] = {
263 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
264 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
265 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
266 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
267 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
268 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
269 { .name = "program-exit", .value = DBG_REASON_EXIT },
270 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
271 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
272 { .name = NULL, .value = -1 },
273 };
274
275 static const struct jim_nvp nvp_target_endian[] = {
276 { .name = "big", .value = TARGET_BIG_ENDIAN },
277 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
278 { .name = "be", .value = TARGET_BIG_ENDIAN },
279 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
280 { .name = NULL, .value = -1 },
281 };
282
283 static const struct jim_nvp nvp_reset_modes[] = {
284 { .name = "unknown", .value = RESET_UNKNOWN },
285 { .name = "run", .value = RESET_RUN },
286 { .name = "halt", .value = RESET_HALT },
287 { .name = "init", .value = RESET_INIT },
288 { .name = NULL, .value = -1 },
289 };
290
291 const char *debug_reason_name(struct target *t)
292 {
293 const char *cp;
294
295 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
296 t->debug_reason)->name;
297 if (!cp) {
298 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
299 cp = "(*BUG*unknown*BUG*)";
300 }
301 return cp;
302 }
303
304 const char *target_state_name(struct target *t)
305 {
306 const char *cp;
307 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
308 if (!cp) {
309 LOG_ERROR("Invalid target state: %d", (int)(t->state));
310 cp = "(*BUG*unknown*BUG*)";
311 }
312
313 if (!target_was_examined(t) && t->defer_examine)
314 cp = "examine deferred";
315
316 return cp;
317 }
318
319 const char *target_event_name(enum target_event event)
320 {
321 const char *cp;
322 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
323 if (!cp) {
324 LOG_ERROR("Invalid target event: %d", (int)(event));
325 cp = "(*BUG*unknown*BUG*)";
326 }
327 return cp;
328 }
329
330 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
331 {
332 const char *cp;
333 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
334 if (!cp) {
335 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
336 cp = "(*BUG*unknown*BUG*)";
337 }
338 return cp;
339 }
340
341 /* determine the number of the new target */
342 static int new_target_number(void)
343 {
344 struct target *t;
345 int x;
346
347 /* number is 0 based */
348 x = -1;
349 t = all_targets;
350 while (t) {
351 if (x < t->target_number)
352 x = t->target_number;
353 t = t->next;
354 }
355 return x + 1;
356 }
357
358 static void append_to_list_all_targets(struct target *target)
359 {
360 struct target **t = &all_targets;
361
362 while (*t)
363 t = &((*t)->next);
364 *t = target;
365 }
366
367 /* read a uint64_t from a buffer in target memory endianness */
368 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
369 {
370 if (target->endianness == TARGET_LITTLE_ENDIAN)
371 return le_to_h_u64(buffer);
372 else
373 return be_to_h_u64(buffer);
374 }
375
376 /* read a uint32_t from a buffer in target memory endianness */
377 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
378 {
379 if (target->endianness == TARGET_LITTLE_ENDIAN)
380 return le_to_h_u32(buffer);
381 else
382 return be_to_h_u32(buffer);
383 }
384
385 /* read a uint24_t from a buffer in target memory endianness */
386 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
387 {
388 if (target->endianness == TARGET_LITTLE_ENDIAN)
389 return le_to_h_u24(buffer);
390 else
391 return be_to_h_u24(buffer);
392 }
393
394 /* read a uint16_t from a buffer in target memory endianness */
395 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
396 {
397 if (target->endianness == TARGET_LITTLE_ENDIAN)
398 return le_to_h_u16(buffer);
399 else
400 return be_to_h_u16(buffer);
401 }
402
403 /* write a uint64_t to a buffer in target memory endianness */
404 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
405 {
406 if (target->endianness == TARGET_LITTLE_ENDIAN)
407 h_u64_to_le(buffer, value);
408 else
409 h_u64_to_be(buffer, value);
410 }
411
412 /* write a uint32_t to a buffer in target memory endianness */
413 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
414 {
415 if (target->endianness == TARGET_LITTLE_ENDIAN)
416 h_u32_to_le(buffer, value);
417 else
418 h_u32_to_be(buffer, value);
419 }
420
421 /* write a uint24_t to a buffer in target memory endianness */
422 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
423 {
424 if (target->endianness == TARGET_LITTLE_ENDIAN)
425 h_u24_to_le(buffer, value);
426 else
427 h_u24_to_be(buffer, value);
428 }
429
430 /* write a uint16_t to a buffer in target memory endianness */
431 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
432 {
433 if (target->endianness == TARGET_LITTLE_ENDIAN)
434 h_u16_to_le(buffer, value);
435 else
436 h_u16_to_be(buffer, value);
437 }
438
439 /* write a uint8_t to a buffer in target memory endianness */
440 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
441 {
442 *buffer = value;
443 }
444
445 /* write a uint64_t array to a buffer in target memory endianness */
446 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
451 }
452
453 /* write a uint32_t array to a buffer in target memory endianness */
454 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
455 {
456 uint32_t i;
457 for (i = 0; i < count; i++)
458 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
459 }
460
461 /* write a uint16_t array to a buffer in target memory endianness */
462 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
463 {
464 uint32_t i;
465 for (i = 0; i < count; i++)
466 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
467 }
468
469 /* write a uint64_t array to a buffer in target memory endianness */
470 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
471 {
472 uint32_t i;
473 for (i = 0; i < count; i++)
474 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
475 }
476
477 /* write a uint32_t array to a buffer in target memory endianness */
478 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
479 {
480 uint32_t i;
481 for (i = 0; i < count; i++)
482 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
483 }
484
485 /* write a uint16_t array to a buffer in target memory endianness */
486 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
487 {
488 uint32_t i;
489 for (i = 0; i < count; i++)
490 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
491 }
492
493 /* return a pointer to a configured target; id is name or number */
494 struct target *get_target(const char *id)
495 {
496 struct target *target;
497
498 /* try as tcltarget name */
499 for (target = all_targets; target; target = target->next) {
500 if (!target_name(target))
501 continue;
502 if (strcmp(id, target_name(target)) == 0)
503 return target;
504 }
505
506 /* It's OK to remove this fallback sometime after August 2010 or so */
507
508 /* no match, try as number */
509 unsigned num;
510 if (parse_uint(id, &num) != ERROR_OK)
511 return NULL;
512
513 for (target = all_targets; target; target = target->next) {
514 if (target->target_number == (int)num) {
515 LOG_WARNING("use '%s' as target identifier, not '%u'",
516 target_name(target), num);
517 return target;
518 }
519 }
520
521 return NULL;
522 }
523
524 /* returns a pointer to the n-th configured target */
525 struct target *get_target_by_num(int num)
526 {
527 struct target *target = all_targets;
528
529 while (target) {
530 if (target->target_number == num)
531 return target;
532 target = target->next;
533 }
534
535 return NULL;
536 }
537
538 struct target *get_current_target(struct command_context *cmd_ctx)
539 {
540 struct target *target = get_current_target_or_null(cmd_ctx);
541
542 if (!target) {
543 LOG_ERROR("BUG: current_target out of bounds");
544 exit(-1);
545 }
546
547 return target;
548 }
549
550 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
551 {
552 return cmd_ctx->current_target_override
553 ? cmd_ctx->current_target_override
554 : cmd_ctx->current_target;
555 }
556
557 int target_poll(struct target *target)
558 {
559 int retval;
560
561 /* We can't poll until after examine */
562 if (!target_was_examined(target)) {
563 /* Fail silently lest we pollute the log */
564 return ERROR_FAIL;
565 }
566
567 retval = target->type->poll(target);
568 if (retval != ERROR_OK)
569 return retval;
570
571 if (target->halt_issued) {
572 if (target->state == TARGET_HALTED)
573 target->halt_issued = false;
574 else {
575 int64_t t = timeval_ms() - target->halt_issued_time;
576 if (t > DEFAULT_HALT_TIMEOUT) {
577 target->halt_issued = false;
578 LOG_INFO("Halt timed out, wake up GDB.");
579 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
580 }
581 }
582 }
583
584 return ERROR_OK;
585 }
586
587 int target_halt(struct target *target)
588 {
589 int retval;
590 /* We can't poll until after examine */
591 if (!target_was_examined(target)) {
592 LOG_ERROR("Target not examined yet");
593 return ERROR_FAIL;
594 }
595
596 retval = target->type->halt(target);
597 if (retval != ERROR_OK)
598 return retval;
599
600 target->halt_issued = true;
601 target->halt_issued_time = timeval_ms();
602
603 return ERROR_OK;
604 }
605
606 /**
607 * Make the target (re)start executing using its saved execution
608 * context (possibly with some modifications).
609 *
610 * @param target Which target should start executing.
611 * @param current True to use the target's saved program counter instead
612 * of the address parameter
613 * @param address Optionally used as the program counter.
614 * @param handle_breakpoints True iff breakpoints at the resumption PC
615 * should be skipped. (For example, maybe execution was stopped by
616 * such a breakpoint, in which case it would be counterproductive to
617 * let it re-trigger.
618 * @param debug_execution False if all working areas allocated by OpenOCD
619 * should be released and/or restored to their original contents.
620 * (This would for example be true to run some downloaded "helper"
621 * algorithm code, which resides in one such working buffer and uses
622 * another for data storage.)
623 *
624 * @todo Resolve the ambiguity about what the "debug_execution" flag
625 * signifies. For example, Target implementations don't agree on how
626 * it relates to invalidation of the register cache, or to whether
627 * breakpoints and watchpoints should be enabled. (It would seem wrong
628 * to enable breakpoints when running downloaded "helper" algorithms
629 * (debug_execution true), since the breakpoints would be set to match
630 * target firmware being debugged, not the helper algorithm.... and
631 * enabling them could cause such helpers to malfunction (for example,
632 * by overwriting data with a breakpoint instruction. On the other
633 * hand the infrastructure for running such helpers might use this
634 * procedure but rely on hardware breakpoint to detect termination.)
635 */
636 int target_resume(struct target *target, int current, target_addr_t address,
637 int handle_breakpoints, int debug_execution)
638 {
639 int retval;
640
641 /* We can't poll until after examine */
642 if (!target_was_examined(target)) {
643 LOG_ERROR("Target not examined yet");
644 return ERROR_FAIL;
645 }
646
647 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
648
649 /* note that resume *must* be asynchronous. The CPU can halt before
650 * we poll. The CPU can even halt at the current PC as a result of
651 * a software breakpoint being inserted by (a bug?) the application.
652 */
653 /*
654 * resume() triggers the event 'resumed'. The execution of TCL commands
655 * in the event handler causes the polling of targets. If the target has
656 * already halted for a breakpoint, polling will run the 'halted' event
657 * handler before the pending 'resumed' handler.
658 * Disable polling during resume() to guarantee the execution of handlers
659 * in the correct order.
660 */
661 bool save_poll = jtag_poll_get_enabled();
662 jtag_poll_set_enabled(false);
663 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
664 jtag_poll_set_enabled(save_poll);
665 if (retval != ERROR_OK)
666 return retval;
667
668 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
669
670 return retval;
671 }
672
673 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
674 {
675 char buf[100];
676 int retval;
677 struct jim_nvp *n;
678 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
679 if (!n->name) {
680 LOG_ERROR("invalid reset mode");
681 return ERROR_FAIL;
682 }
683
684 struct target *target;
685 for (target = all_targets; target; target = target->next)
686 target_call_reset_callbacks(target, reset_mode);
687
688 /* disable polling during reset to make reset event scripts
689 * more predictable, i.e. dr/irscan & pathmove in events will
690 * not have JTAG operations injected into the middle of a sequence.
691 */
692 bool save_poll = jtag_poll_get_enabled();
693
694 jtag_poll_set_enabled(false);
695
696 sprintf(buf, "ocd_process_reset %s", n->name);
697 retval = Jim_Eval(cmd->ctx->interp, buf);
698
699 jtag_poll_set_enabled(save_poll);
700
701 if (retval != JIM_OK) {
702 Jim_MakeErrorMessage(cmd->ctx->interp);
703 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
704 return ERROR_FAIL;
705 }
706
707 /* We want any events to be processed before the prompt */
708 retval = target_call_timer_callbacks_now();
709
710 for (target = all_targets; target; target = target->next) {
711 target->type->check_reset(target);
712 target->running_alg = false;
713 }
714
715 return retval;
716 }
717
718 static int identity_virt2phys(struct target *target,
719 target_addr_t virtual, target_addr_t *physical)
720 {
721 *physical = virtual;
722 return ERROR_OK;
723 }
724
725 static int no_mmu(struct target *target, int *enabled)
726 {
727 *enabled = 0;
728 return ERROR_OK;
729 }
730
731 /**
732 * Reset the @c examined flag for the given target.
733 * Pure paranoia -- targets are zeroed on allocation.
734 */
735 static inline void target_reset_examined(struct target *target)
736 {
737 target->examined = false;
738 }
739
740 static int default_examine(struct target *target)
741 {
742 target_set_examined(target);
743 return ERROR_OK;
744 }
745
746 /* no check by default */
747 static int default_check_reset(struct target *target)
748 {
749 return ERROR_OK;
750 }
751
752 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
753 * Keep in sync */
754 int target_examine_one(struct target *target)
755 {
756 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
757
758 int retval = target->type->examine(target);
759 if (retval != ERROR_OK) {
760 target_reset_examined(target);
761 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
762 return retval;
763 }
764
765 target_set_examined(target);
766 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
767
768 return ERROR_OK;
769 }
770
771 static int jtag_enable_callback(enum jtag_event event, void *priv)
772 {
773 struct target *target = priv;
774
775 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
776 return ERROR_OK;
777
778 jtag_unregister_event_callback(jtag_enable_callback, target);
779
780 return target_examine_one(target);
781 }
782
783 /* Targets that correctly implement init + examine, i.e.
784 * no communication with target during init:
785 *
786 * XScale
787 */
788 int target_examine(void)
789 {
790 int retval = ERROR_OK;
791 struct target *target;
792
793 for (target = all_targets; target; target = target->next) {
794 /* defer examination, but don't skip it */
795 if (!target->tap->enabled) {
796 jtag_register_event_callback(jtag_enable_callback,
797 target);
798 continue;
799 }
800
801 if (target->defer_examine)
802 continue;
803
804 int retval2 = target_examine_one(target);
805 if (retval2 != ERROR_OK) {
806 LOG_WARNING("target %s examination failed", target_name(target));
807 retval = retval2;
808 }
809 }
810 return retval;
811 }
812
813 const char *target_type_name(struct target *target)
814 {
815 return target->type->name;
816 }
817
818 static int target_soft_reset_halt(struct target *target)
819 {
820 if (!target_was_examined(target)) {
821 LOG_ERROR("Target not examined yet");
822 return ERROR_FAIL;
823 }
824 if (!target->type->soft_reset_halt) {
825 LOG_ERROR("Target %s does not support soft_reset_halt",
826 target_name(target));
827 return ERROR_FAIL;
828 }
829 return target->type->soft_reset_halt(target);
830 }
831
832 /**
833 * Downloads a target-specific native code algorithm to the target,
834 * and executes it. * Note that some targets may need to set up, enable,
835 * and tear down a breakpoint (hard or * soft) to detect algorithm
836 * termination, while others may support lower overhead schemes where
837 * soft breakpoints embedded in the algorithm automatically terminate the
838 * algorithm.
839 *
840 * @param target used to run the algorithm
841 * @param num_mem_params
842 * @param mem_params
843 * @param num_reg_params
844 * @param reg_param
845 * @param entry_point
846 * @param exit_point
847 * @param timeout_ms
848 * @param arch_info target-specific description of the algorithm.
849 */
850 int target_run_algorithm(struct target *target,
851 int num_mem_params, struct mem_param *mem_params,
852 int num_reg_params, struct reg_param *reg_param,
853 target_addr_t entry_point, target_addr_t exit_point,
854 int timeout_ms, void *arch_info)
855 {
856 int retval = ERROR_FAIL;
857
858 if (!target_was_examined(target)) {
859 LOG_ERROR("Target not examined yet");
860 goto done;
861 }
862 if (!target->type->run_algorithm) {
863 LOG_ERROR("Target type '%s' does not support %s",
864 target_type_name(target), __func__);
865 goto done;
866 }
867
868 target->running_alg = true;
869 retval = target->type->run_algorithm(target,
870 num_mem_params, mem_params,
871 num_reg_params, reg_param,
872 entry_point, exit_point, timeout_ms, arch_info);
873 target->running_alg = false;
874
875 done:
876 return retval;
877 }
878
879 /**
880 * Executes a target-specific native code algorithm and leaves it running.
881 *
882 * @param target used to run the algorithm
883 * @param num_mem_params
884 * @param mem_params
885 * @param num_reg_params
886 * @param reg_params
887 * @param entry_point
888 * @param exit_point
889 * @param arch_info target-specific description of the algorithm.
890 */
891 int target_start_algorithm(struct target *target,
892 int num_mem_params, struct mem_param *mem_params,
893 int num_reg_params, struct reg_param *reg_params,
894 target_addr_t entry_point, target_addr_t exit_point,
895 void *arch_info)
896 {
897 int retval = ERROR_FAIL;
898
899 if (!target_was_examined(target)) {
900 LOG_ERROR("Target not examined yet");
901 goto done;
902 }
903 if (!target->type->start_algorithm) {
904 LOG_ERROR("Target type '%s' does not support %s",
905 target_type_name(target), __func__);
906 goto done;
907 }
908 if (target->running_alg) {
909 LOG_ERROR("Target is already running an algorithm");
910 goto done;
911 }
912
913 target->running_alg = true;
914 retval = target->type->start_algorithm(target,
915 num_mem_params, mem_params,
916 num_reg_params, reg_params,
917 entry_point, exit_point, arch_info);
918
919 done:
920 return retval;
921 }
922
923 /**
924 * Waits for an algorithm started with target_start_algorithm() to complete.
925 *
926 * @param target used to run the algorithm
927 * @param num_mem_params
928 * @param mem_params
929 * @param num_reg_params
930 * @param reg_params
931 * @param exit_point
932 * @param timeout_ms
933 * @param arch_info target-specific description of the algorithm.
934 */
935 int target_wait_algorithm(struct target *target,
936 int num_mem_params, struct mem_param *mem_params,
937 int num_reg_params, struct reg_param *reg_params,
938 target_addr_t exit_point, int timeout_ms,
939 void *arch_info)
940 {
941 int retval = ERROR_FAIL;
942
943 if (!target->type->wait_algorithm) {
944 LOG_ERROR("Target type '%s' does not support %s",
945 target_type_name(target), __func__);
946 goto done;
947 }
948 if (!target->running_alg) {
949 LOG_ERROR("Target is not running an algorithm");
950 goto done;
951 }
952
953 retval = target->type->wait_algorithm(target,
954 num_mem_params, mem_params,
955 num_reg_params, reg_params,
956 exit_point, timeout_ms, arch_info);
957 if (retval != ERROR_TARGET_TIMEOUT)
958 target->running_alg = false;
959
960 done:
961 return retval;
962 }
963
964 /**
965 * Streams data to a circular buffer on target intended for consumption by code
966 * running asynchronously on target.
967 *
968 * This is intended for applications where target-specific native code runs
969 * on the target, receives data from the circular buffer, does something with
970 * it (most likely writing it to a flash memory), and advances the circular
971 * buffer pointer.
972 *
973 * This assumes that the helper algorithm has already been loaded to the target,
974 * but has not been started yet. Given memory and register parameters are passed
975 * to the algorithm.
976 *
977 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
978 * following format:
979 *
980 * [buffer_start + 0, buffer_start + 4):
981 * Write Pointer address (aka head). Written and updated by this
982 * routine when new data is written to the circular buffer.
983 * [buffer_start + 4, buffer_start + 8):
984 * Read Pointer address (aka tail). Updated by code running on the
985 * target after it consumes data.
986 * [buffer_start + 8, buffer_start + buffer_size):
987 * Circular buffer contents.
988 *
989 * See contrib/loaders/flash/stm32f1x.S for an example.
990 *
991 * @param target used to run the algorithm
992 * @param buffer address on the host where data to be sent is located
993 * @param count number of blocks to send
994 * @param block_size size in bytes of each block
995 * @param num_mem_params count of memory-based params to pass to algorithm
996 * @param mem_params memory-based params to pass to algorithm
997 * @param num_reg_params count of register-based params to pass to algorithm
998 * @param reg_params memory-based params to pass to algorithm
999 * @param buffer_start address on the target of the circular buffer structure
1000 * @param buffer_size size of the circular buffer structure
1001 * @param entry_point address on the target to execute to start the algorithm
1002 * @param exit_point address at which to set a breakpoint to catch the
1003 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1004 * @param arch_info
1005 */
1006
1007 int target_run_flash_async_algorithm(struct target *target,
1008 const uint8_t *buffer, uint32_t count, int block_size,
1009 int num_mem_params, struct mem_param *mem_params,
1010 int num_reg_params, struct reg_param *reg_params,
1011 uint32_t buffer_start, uint32_t buffer_size,
1012 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1013 {
1014 int retval;
1015 int timeout = 0;
1016
1017 const uint8_t *buffer_orig = buffer;
1018
1019 /* Set up working area. First word is write pointer, second word is read pointer,
1020 * rest is fifo data area. */
1021 uint32_t wp_addr = buffer_start;
1022 uint32_t rp_addr = buffer_start + 4;
1023 uint32_t fifo_start_addr = buffer_start + 8;
1024 uint32_t fifo_end_addr = buffer_start + buffer_size;
1025
1026 uint32_t wp = fifo_start_addr;
1027 uint32_t rp = fifo_start_addr;
1028
1029 /* validate block_size is 2^n */
1030 assert(IS_PWR_OF_2(block_size));
1031
1032 retval = target_write_u32(target, wp_addr, wp);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 retval = target_write_u32(target, rp_addr, rp);
1036 if (retval != ERROR_OK)
1037 return retval;
1038
1039 /* Start up algorithm on target and let it idle while writing the first chunk */
1040 retval = target_start_algorithm(target, num_mem_params, mem_params,
1041 num_reg_params, reg_params,
1042 entry_point,
1043 exit_point,
1044 arch_info);
1045
1046 if (retval != ERROR_OK) {
1047 LOG_ERROR("error starting target flash write algorithm");
1048 return retval;
1049 }
1050
1051 while (count > 0) {
1052
1053 retval = target_read_u32(target, rp_addr, &rp);
1054 if (retval != ERROR_OK) {
1055 LOG_ERROR("failed to get read pointer");
1056 break;
1057 }
1058
1059 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1060 (size_t) (buffer - buffer_orig), count, wp, rp);
1061
1062 if (rp == 0) {
1063 LOG_ERROR("flash write algorithm aborted by target");
1064 retval = ERROR_FLASH_OPERATION_FAILED;
1065 break;
1066 }
1067
1068 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1069 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1070 break;
1071 }
1072
1073 /* Count the number of bytes available in the fifo without
1074 * crossing the wrap around. Make sure to not fill it completely,
1075 * because that would make wp == rp and that's the empty condition. */
1076 uint32_t thisrun_bytes;
1077 if (rp > wp)
1078 thisrun_bytes = rp - wp - block_size;
1079 else if (rp > fifo_start_addr)
1080 thisrun_bytes = fifo_end_addr - wp;
1081 else
1082 thisrun_bytes = fifo_end_addr - wp - block_size;
1083
1084 if (thisrun_bytes == 0) {
1085 /* Throttle polling a bit if transfer is (much) faster than flash
1086 * programming. The exact delay shouldn't matter as long as it's
1087 * less than buffer size / flash speed. This is very unlikely to
1088 * run when using high latency connections such as USB. */
1089 alive_sleep(2);
1090
1091 /* to stop an infinite loop on some targets check and increment a timeout
1092 * this issue was observed on a stellaris using the new ICDI interface */
1093 if (timeout++ >= 2500) {
1094 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1095 return ERROR_FLASH_OPERATION_FAILED;
1096 }
1097 continue;
1098 }
1099
1100 /* reset our timeout */
1101 timeout = 0;
1102
1103 /* Limit to the amount of data we actually want to write */
1104 if (thisrun_bytes > count * block_size)
1105 thisrun_bytes = count * block_size;
1106
1107 /* Force end of large blocks to be word aligned */
1108 if (thisrun_bytes >= 16)
1109 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1110
1111 /* Write data to fifo */
1112 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1113 if (retval != ERROR_OK)
1114 break;
1115
1116 /* Update counters and wrap write pointer */
1117 buffer += thisrun_bytes;
1118 count -= thisrun_bytes / block_size;
1119 wp += thisrun_bytes;
1120 if (wp >= fifo_end_addr)
1121 wp = fifo_start_addr;
1122
1123 /* Store updated write pointer to target */
1124 retval = target_write_u32(target, wp_addr, wp);
1125 if (retval != ERROR_OK)
1126 break;
1127
1128 /* Avoid GDB timeouts */
1129 keep_alive();
1130 }
1131
1132 if (retval != ERROR_OK) {
1133 /* abort flash write algorithm on target */
1134 target_write_u32(target, wp_addr, 0);
1135 }
1136
1137 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1138 num_reg_params, reg_params,
1139 exit_point,
1140 10000,
1141 arch_info);
1142
1143 if (retval2 != ERROR_OK) {
1144 LOG_ERROR("error waiting for target flash write algorithm");
1145 retval = retval2;
1146 }
1147
1148 if (retval == ERROR_OK) {
1149 /* check if algorithm set rp = 0 after fifo writer loop finished */
1150 retval = target_read_u32(target, rp_addr, &rp);
1151 if (retval == ERROR_OK && rp == 0) {
1152 LOG_ERROR("flash write algorithm aborted by target");
1153 retval = ERROR_FLASH_OPERATION_FAILED;
1154 }
1155 }
1156
1157 return retval;
1158 }
1159
1160 int target_run_read_async_algorithm(struct target *target,
1161 uint8_t *buffer, uint32_t count, int block_size,
1162 int num_mem_params, struct mem_param *mem_params,
1163 int num_reg_params, struct reg_param *reg_params,
1164 uint32_t buffer_start, uint32_t buffer_size,
1165 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1166 {
1167 int retval;
1168 int timeout = 0;
1169
1170 const uint8_t *buffer_orig = buffer;
1171
1172 /* Set up working area. First word is write pointer, second word is read pointer,
1173 * rest is fifo data area. */
1174 uint32_t wp_addr = buffer_start;
1175 uint32_t rp_addr = buffer_start + 4;
1176 uint32_t fifo_start_addr = buffer_start + 8;
1177 uint32_t fifo_end_addr = buffer_start + buffer_size;
1178
1179 uint32_t wp = fifo_start_addr;
1180 uint32_t rp = fifo_start_addr;
1181
1182 /* validate block_size is 2^n */
1183 assert(IS_PWR_OF_2(block_size));
1184
1185 retval = target_write_u32(target, wp_addr, wp);
1186 if (retval != ERROR_OK)
1187 return retval;
1188 retval = target_write_u32(target, rp_addr, rp);
1189 if (retval != ERROR_OK)
1190 return retval;
1191
1192 /* Start up algorithm on target */
1193 retval = target_start_algorithm(target, num_mem_params, mem_params,
1194 num_reg_params, reg_params,
1195 entry_point,
1196 exit_point,
1197 arch_info);
1198
1199 if (retval != ERROR_OK) {
1200 LOG_ERROR("error starting target flash read algorithm");
1201 return retval;
1202 }
1203
1204 while (count > 0) {
1205 retval = target_read_u32(target, wp_addr, &wp);
1206 if (retval != ERROR_OK) {
1207 LOG_ERROR("failed to get write pointer");
1208 break;
1209 }
1210
1211 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1212 (size_t)(buffer - buffer_orig), count, wp, rp);
1213
1214 if (wp == 0) {
1215 LOG_ERROR("flash read algorithm aborted by target");
1216 retval = ERROR_FLASH_OPERATION_FAILED;
1217 break;
1218 }
1219
1220 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1221 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1222 break;
1223 }
1224
1225 /* Count the number of bytes available in the fifo without
1226 * crossing the wrap around. */
1227 uint32_t thisrun_bytes;
1228 if (wp >= rp)
1229 thisrun_bytes = wp - rp;
1230 else
1231 thisrun_bytes = fifo_end_addr - rp;
1232
1233 if (thisrun_bytes == 0) {
1234 /* Throttle polling a bit if transfer is (much) faster than flash
1235 * reading. The exact delay shouldn't matter as long as it's
1236 * less than buffer size / flash speed. This is very unlikely to
1237 * run when using high latency connections such as USB. */
1238 alive_sleep(2);
1239
1240 /* to stop an infinite loop on some targets check and increment a timeout
1241 * this issue was observed on a stellaris using the new ICDI interface */
1242 if (timeout++ >= 2500) {
1243 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1244 return ERROR_FLASH_OPERATION_FAILED;
1245 }
1246 continue;
1247 }
1248
1249 /* Reset our timeout */
1250 timeout = 0;
1251
1252 /* Limit to the amount of data we actually want to read */
1253 if (thisrun_bytes > count * block_size)
1254 thisrun_bytes = count * block_size;
1255
1256 /* Force end of large blocks to be word aligned */
1257 if (thisrun_bytes >= 16)
1258 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1259
1260 /* Read data from fifo */
1261 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1262 if (retval != ERROR_OK)
1263 break;
1264
1265 /* Update counters and wrap write pointer */
1266 buffer += thisrun_bytes;
1267 count -= thisrun_bytes / block_size;
1268 rp += thisrun_bytes;
1269 if (rp >= fifo_end_addr)
1270 rp = fifo_start_addr;
1271
1272 /* Store updated write pointer to target */
1273 retval = target_write_u32(target, rp_addr, rp);
1274 if (retval != ERROR_OK)
1275 break;
1276
1277 /* Avoid GDB timeouts */
1278 keep_alive();
1279
1280 }
1281
1282 if (retval != ERROR_OK) {
1283 /* abort flash write algorithm on target */
1284 target_write_u32(target, rp_addr, 0);
1285 }
1286
1287 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1288 num_reg_params, reg_params,
1289 exit_point,
1290 10000,
1291 arch_info);
1292
1293 if (retval2 != ERROR_OK) {
1294 LOG_ERROR("error waiting for target flash write algorithm");
1295 retval = retval2;
1296 }
1297
1298 if (retval == ERROR_OK) {
1299 /* check if algorithm set wp = 0 after fifo writer loop finished */
1300 retval = target_read_u32(target, wp_addr, &wp);
1301 if (retval == ERROR_OK && wp == 0) {
1302 LOG_ERROR("flash read algorithm aborted by target");
1303 retval = ERROR_FLASH_OPERATION_FAILED;
1304 }
1305 }
1306
1307 return retval;
1308 }
1309
1310 int target_read_memory(struct target *target,
1311 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1312 {
1313 if (!target_was_examined(target)) {
1314 LOG_ERROR("Target not examined yet");
1315 return ERROR_FAIL;
1316 }
1317 if (!target->type->read_memory) {
1318 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1319 return ERROR_FAIL;
1320 }
1321 return target->type->read_memory(target, address, size, count, buffer);
1322 }
1323
1324 int target_read_phys_memory(struct target *target,
1325 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1326 {
1327 if (!target_was_examined(target)) {
1328 LOG_ERROR("Target not examined yet");
1329 return ERROR_FAIL;
1330 }
1331 if (!target->type->read_phys_memory) {
1332 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1333 return ERROR_FAIL;
1334 }
1335 return target->type->read_phys_memory(target, address, size, count, buffer);
1336 }
1337
1338 int target_write_memory(struct target *target,
1339 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1340 {
1341 if (!target_was_examined(target)) {
1342 LOG_ERROR("Target not examined yet");
1343 return ERROR_FAIL;
1344 }
1345 if (!target->type->write_memory) {
1346 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1347 return ERROR_FAIL;
1348 }
1349 return target->type->write_memory(target, address, size, count, buffer);
1350 }
1351
1352 int target_write_phys_memory(struct target *target,
1353 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1354 {
1355 if (!target_was_examined(target)) {
1356 LOG_ERROR("Target not examined yet");
1357 return ERROR_FAIL;
1358 }
1359 if (!target->type->write_phys_memory) {
1360 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1361 return ERROR_FAIL;
1362 }
1363 return target->type->write_phys_memory(target, address, size, count, buffer);
1364 }
1365
1366 int target_add_breakpoint(struct target *target,
1367 struct breakpoint *breakpoint)
1368 {
1369 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1370 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1371 return ERROR_TARGET_NOT_HALTED;
1372 }
1373 return target->type->add_breakpoint(target, breakpoint);
1374 }
1375
1376 int target_add_context_breakpoint(struct target *target,
1377 struct breakpoint *breakpoint)
1378 {
1379 if (target->state != TARGET_HALTED) {
1380 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1381 return ERROR_TARGET_NOT_HALTED;
1382 }
1383 return target->type->add_context_breakpoint(target, breakpoint);
1384 }
1385
1386 int target_add_hybrid_breakpoint(struct target *target,
1387 struct breakpoint *breakpoint)
1388 {
1389 if (target->state != TARGET_HALTED) {
1390 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1391 return ERROR_TARGET_NOT_HALTED;
1392 }
1393 return target->type->add_hybrid_breakpoint(target, breakpoint);
1394 }
1395
1396 int target_remove_breakpoint(struct target *target,
1397 struct breakpoint *breakpoint)
1398 {
1399 return target->type->remove_breakpoint(target, breakpoint);
1400 }
1401
1402 int target_add_watchpoint(struct target *target,
1403 struct watchpoint *watchpoint)
1404 {
1405 if (target->state != TARGET_HALTED) {
1406 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1407 return ERROR_TARGET_NOT_HALTED;
1408 }
1409 return target->type->add_watchpoint(target, watchpoint);
1410 }
1411 int target_remove_watchpoint(struct target *target,
1412 struct watchpoint *watchpoint)
1413 {
1414 return target->type->remove_watchpoint(target, watchpoint);
1415 }
1416 int target_hit_watchpoint(struct target *target,
1417 struct watchpoint **hit_watchpoint)
1418 {
1419 if (target->state != TARGET_HALTED) {
1420 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1421 return ERROR_TARGET_NOT_HALTED;
1422 }
1423
1424 if (!target->type->hit_watchpoint) {
1425 /* For backward compatible, if hit_watchpoint is not implemented,
1426 * return ERROR_FAIL such that gdb_server will not take the nonsense
1427 * information. */
1428 return ERROR_FAIL;
1429 }
1430
1431 return target->type->hit_watchpoint(target, hit_watchpoint);
1432 }
1433
1434 const char *target_get_gdb_arch(struct target *target)
1435 {
1436 if (!target->type->get_gdb_arch)
1437 return NULL;
1438 return target->type->get_gdb_arch(target);
1439 }
1440
1441 int target_get_gdb_reg_list(struct target *target,
1442 struct reg **reg_list[], int *reg_list_size,
1443 enum target_register_class reg_class)
1444 {
1445 int result = ERROR_FAIL;
1446
1447 if (!target_was_examined(target)) {
1448 LOG_ERROR("Target not examined yet");
1449 goto done;
1450 }
1451
1452 result = target->type->get_gdb_reg_list(target, reg_list,
1453 reg_list_size, reg_class);
1454
1455 done:
1456 if (result != ERROR_OK) {
1457 *reg_list = NULL;
1458 *reg_list_size = 0;
1459 }
1460 return result;
1461 }
1462
1463 int target_get_gdb_reg_list_noread(struct target *target,
1464 struct reg **reg_list[], int *reg_list_size,
1465 enum target_register_class reg_class)
1466 {
1467 if (target->type->get_gdb_reg_list_noread &&
1468 target->type->get_gdb_reg_list_noread(target, reg_list,
1469 reg_list_size, reg_class) == ERROR_OK)
1470 return ERROR_OK;
1471 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1472 }
1473
1474 bool target_supports_gdb_connection(struct target *target)
1475 {
1476 /*
1477 * exclude all the targets that don't provide get_gdb_reg_list
1478 * or that have explicit gdb_max_connection == 0
1479 */
1480 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1481 }
1482
1483 int target_step(struct target *target,
1484 int current, target_addr_t address, int handle_breakpoints)
1485 {
1486 int retval;
1487
1488 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1489
1490 retval = target->type->step(target, current, address, handle_breakpoints);
1491 if (retval != ERROR_OK)
1492 return retval;
1493
1494 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1495
1496 return retval;
1497 }
1498
1499 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1500 {
1501 if (target->state != TARGET_HALTED) {
1502 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1503 return ERROR_TARGET_NOT_HALTED;
1504 }
1505 return target->type->get_gdb_fileio_info(target, fileio_info);
1506 }
1507
1508 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1509 {
1510 if (target->state != TARGET_HALTED) {
1511 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1512 return ERROR_TARGET_NOT_HALTED;
1513 }
1514 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1515 }
1516
1517 target_addr_t target_address_max(struct target *target)
1518 {
1519 unsigned bits = target_address_bits(target);
1520 if (sizeof(target_addr_t) * 8 == bits)
1521 return (target_addr_t) -1;
1522 else
1523 return (((target_addr_t) 1) << bits) - 1;
1524 }
1525
1526 unsigned target_address_bits(struct target *target)
1527 {
1528 if (target->type->address_bits)
1529 return target->type->address_bits(target);
1530 return 32;
1531 }
1532
1533 unsigned int target_data_bits(struct target *target)
1534 {
1535 if (target->type->data_bits)
1536 return target->type->data_bits(target);
1537 return 32;
1538 }
1539
1540 static int target_profiling(struct target *target, uint32_t *samples,
1541 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1542 {
1543 return target->type->profiling(target, samples, max_num_samples,
1544 num_samples, seconds);
1545 }
1546
1547 static int handle_target(void *priv);
1548
1549 static int target_init_one(struct command_context *cmd_ctx,
1550 struct target *target)
1551 {
1552 target_reset_examined(target);
1553
1554 struct target_type *type = target->type;
1555 if (!type->examine)
1556 type->examine = default_examine;
1557
1558 if (!type->check_reset)
1559 type->check_reset = default_check_reset;
1560
1561 assert(type->init_target);
1562
1563 int retval = type->init_target(cmd_ctx, target);
1564 if (retval != ERROR_OK) {
1565 LOG_ERROR("target '%s' init failed", target_name(target));
1566 return retval;
1567 }
1568
1569 /* Sanity-check MMU support ... stub in what we must, to help
1570 * implement it in stages, but warn if we need to do so.
1571 */
1572 if (type->mmu) {
1573 if (!type->virt2phys) {
1574 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1575 type->virt2phys = identity_virt2phys;
1576 }
1577 } else {
1578 /* Make sure no-MMU targets all behave the same: make no
1579 * distinction between physical and virtual addresses, and
1580 * ensure that virt2phys() is always an identity mapping.
1581 */
1582 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1583 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1584
1585 type->mmu = no_mmu;
1586 type->write_phys_memory = type->write_memory;
1587 type->read_phys_memory = type->read_memory;
1588 type->virt2phys = identity_virt2phys;
1589 }
1590
1591 if (!target->type->read_buffer)
1592 target->type->read_buffer = target_read_buffer_default;
1593
1594 if (!target->type->write_buffer)
1595 target->type->write_buffer = target_write_buffer_default;
1596
1597 if (!target->type->get_gdb_fileio_info)
1598 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1599
1600 if (!target->type->gdb_fileio_end)
1601 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1602
1603 if (!target->type->profiling)
1604 target->type->profiling = target_profiling_default;
1605
1606 return ERROR_OK;
1607 }
1608
1609 static int target_init(struct command_context *cmd_ctx)
1610 {
1611 struct target *target;
1612 int retval;
1613
1614 for (target = all_targets; target; target = target->next) {
1615 retval = target_init_one(cmd_ctx, target);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 }
1619
1620 if (!all_targets)
1621 return ERROR_OK;
1622
1623 retval = target_register_user_commands(cmd_ctx);
1624 if (retval != ERROR_OK)
1625 return retval;
1626
1627 retval = target_register_timer_callback(&handle_target,
1628 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1629 if (retval != ERROR_OK)
1630 return retval;
1631
1632 return ERROR_OK;
1633 }
1634
1635 COMMAND_HANDLER(handle_target_init_command)
1636 {
1637 int retval;
1638
1639 if (CMD_ARGC != 0)
1640 return ERROR_COMMAND_SYNTAX_ERROR;
1641
1642 static bool target_initialized;
1643 if (target_initialized) {
1644 LOG_INFO("'target init' has already been called");
1645 return ERROR_OK;
1646 }
1647 target_initialized = true;
1648
1649 retval = command_run_line(CMD_CTX, "init_targets");
1650 if (retval != ERROR_OK)
1651 return retval;
1652
1653 retval = command_run_line(CMD_CTX, "init_target_events");
1654 if (retval != ERROR_OK)
1655 return retval;
1656
1657 retval = command_run_line(CMD_CTX, "init_board");
1658 if (retval != ERROR_OK)
1659 return retval;
1660
1661 LOG_DEBUG("Initializing targets...");
1662 return target_init(CMD_CTX);
1663 }
1664
1665 int target_register_event_callback(int (*callback)(struct target *target,
1666 enum target_event event, void *priv), void *priv)
1667 {
1668 struct target_event_callback **callbacks_p = &target_event_callbacks;
1669
1670 if (!callback)
1671 return ERROR_COMMAND_SYNTAX_ERROR;
1672
1673 if (*callbacks_p) {
1674 while ((*callbacks_p)->next)
1675 callbacks_p = &((*callbacks_p)->next);
1676 callbacks_p = &((*callbacks_p)->next);
1677 }
1678
1679 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1680 (*callbacks_p)->callback = callback;
1681 (*callbacks_p)->priv = priv;
1682 (*callbacks_p)->next = NULL;
1683
1684 return ERROR_OK;
1685 }
1686
1687 int target_register_reset_callback(int (*callback)(struct target *target,
1688 enum target_reset_mode reset_mode, void *priv), void *priv)
1689 {
1690 struct target_reset_callback *entry;
1691
1692 if (!callback)
1693 return ERROR_COMMAND_SYNTAX_ERROR;
1694
1695 entry = malloc(sizeof(struct target_reset_callback));
1696 if (!entry) {
1697 LOG_ERROR("error allocating buffer for reset callback entry");
1698 return ERROR_COMMAND_SYNTAX_ERROR;
1699 }
1700
1701 entry->callback = callback;
1702 entry->priv = priv;
1703 list_add(&entry->list, &target_reset_callback_list);
1704
1705
1706 return ERROR_OK;
1707 }
1708
1709 int target_register_trace_callback(int (*callback)(struct target *target,
1710 size_t len, uint8_t *data, void *priv), void *priv)
1711 {
1712 struct target_trace_callback *entry;
1713
1714 if (!callback)
1715 return ERROR_COMMAND_SYNTAX_ERROR;
1716
1717 entry = malloc(sizeof(struct target_trace_callback));
1718 if (!entry) {
1719 LOG_ERROR("error allocating buffer for trace callback entry");
1720 return ERROR_COMMAND_SYNTAX_ERROR;
1721 }
1722
1723 entry->callback = callback;
1724 entry->priv = priv;
1725 list_add(&entry->list, &target_trace_callback_list);
1726
1727
1728 return ERROR_OK;
1729 }
1730
1731 int target_register_timer_callback(int (*callback)(void *priv),
1732 unsigned int time_ms, enum target_timer_type type, void *priv)
1733 {
1734 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1735
1736 if (!callback)
1737 return ERROR_COMMAND_SYNTAX_ERROR;
1738
1739 if (*callbacks_p) {
1740 while ((*callbacks_p)->next)
1741 callbacks_p = &((*callbacks_p)->next);
1742 callbacks_p = &((*callbacks_p)->next);
1743 }
1744
1745 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1746 (*callbacks_p)->callback = callback;
1747 (*callbacks_p)->type = type;
1748 (*callbacks_p)->time_ms = time_ms;
1749 (*callbacks_p)->removed = false;
1750
1751 (*callbacks_p)->when = timeval_ms() + time_ms;
1752 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1753
1754 (*callbacks_p)->priv = priv;
1755 (*callbacks_p)->next = NULL;
1756
1757 return ERROR_OK;
1758 }
1759
1760 int target_unregister_event_callback(int (*callback)(struct target *target,
1761 enum target_event event, void *priv), void *priv)
1762 {
1763 struct target_event_callback **p = &target_event_callbacks;
1764 struct target_event_callback *c = target_event_callbacks;
1765
1766 if (!callback)
1767 return ERROR_COMMAND_SYNTAX_ERROR;
1768
1769 while (c) {
1770 struct target_event_callback *next = c->next;
1771 if ((c->callback == callback) && (c->priv == priv)) {
1772 *p = next;
1773 free(c);
1774 return ERROR_OK;
1775 } else
1776 p = &(c->next);
1777 c = next;
1778 }
1779
1780 return ERROR_OK;
1781 }
1782
1783 int target_unregister_reset_callback(int (*callback)(struct target *target,
1784 enum target_reset_mode reset_mode, void *priv), void *priv)
1785 {
1786 struct target_reset_callback *entry;
1787
1788 if (!callback)
1789 return ERROR_COMMAND_SYNTAX_ERROR;
1790
1791 list_for_each_entry(entry, &target_reset_callback_list, list) {
1792 if (entry->callback == callback && entry->priv == priv) {
1793 list_del(&entry->list);
1794 free(entry);
1795 break;
1796 }
1797 }
1798
1799 return ERROR_OK;
1800 }
1801
1802 int target_unregister_trace_callback(int (*callback)(struct target *target,
1803 size_t len, uint8_t *data, void *priv), void *priv)
1804 {
1805 struct target_trace_callback *entry;
1806
1807 if (!callback)
1808 return ERROR_COMMAND_SYNTAX_ERROR;
1809
1810 list_for_each_entry(entry, &target_trace_callback_list, list) {
1811 if (entry->callback == callback && entry->priv == priv) {
1812 list_del(&entry->list);
1813 free(entry);
1814 break;
1815 }
1816 }
1817
1818 return ERROR_OK;
1819 }
1820
1821 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1822 {
1823 if (!callback)
1824 return ERROR_COMMAND_SYNTAX_ERROR;
1825
1826 for (struct target_timer_callback *c = target_timer_callbacks;
1827 c; c = c->next) {
1828 if ((c->callback == callback) && (c->priv == priv)) {
1829 c->removed = true;
1830 return ERROR_OK;
1831 }
1832 }
1833
1834 return ERROR_FAIL;
1835 }
1836
1837 int target_call_event_callbacks(struct target *target, enum target_event event)
1838 {
1839 struct target_event_callback *callback = target_event_callbacks;
1840 struct target_event_callback *next_callback;
1841
1842 if (event == TARGET_EVENT_HALTED) {
1843 /* execute early halted first */
1844 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1845 }
1846
1847 LOG_DEBUG("target event %i (%s) for core %s", event,
1848 target_event_name(event),
1849 target_name(target));
1850
1851 target_handle_event(target, event);
1852
1853 while (callback) {
1854 next_callback = callback->next;
1855 callback->callback(target, event, callback->priv);
1856 callback = next_callback;
1857 }
1858
1859 return ERROR_OK;
1860 }
1861
1862 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1863 {
1864 struct target_reset_callback *callback;
1865
1866 LOG_DEBUG("target reset %i (%s)", reset_mode,
1867 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1868
1869 list_for_each_entry(callback, &target_reset_callback_list, list)
1870 callback->callback(target, reset_mode, callback->priv);
1871
1872 return ERROR_OK;
1873 }
1874
1875 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1876 {
1877 struct target_trace_callback *callback;
1878
1879 list_for_each_entry(callback, &target_trace_callback_list, list)
1880 callback->callback(target, len, data, callback->priv);
1881
1882 return ERROR_OK;
1883 }
1884
1885 static int target_timer_callback_periodic_restart(
1886 struct target_timer_callback *cb, int64_t *now)
1887 {
1888 cb->when = *now + cb->time_ms;
1889 return ERROR_OK;
1890 }
1891
1892 static int target_call_timer_callback(struct target_timer_callback *cb,
1893 int64_t *now)
1894 {
1895 cb->callback(cb->priv);
1896
1897 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1898 return target_timer_callback_periodic_restart(cb, now);
1899
1900 return target_unregister_timer_callback(cb->callback, cb->priv);
1901 }
1902
1903 static int target_call_timer_callbacks_check_time(int checktime)
1904 {
1905 static bool callback_processing;
1906
1907 /* Do not allow nesting */
1908 if (callback_processing)
1909 return ERROR_OK;
1910
1911 callback_processing = true;
1912
1913 keep_alive();
1914
1915 int64_t now = timeval_ms();
1916
1917 /* Initialize to a default value that's a ways into the future.
1918 * The loop below will make it closer to now if there are
1919 * callbacks that want to be called sooner. */
1920 target_timer_next_event_value = now + 1000;
1921
1922 /* Store an address of the place containing a pointer to the
1923 * next item; initially, that's a standalone "root of the
1924 * list" variable. */
1925 struct target_timer_callback **callback = &target_timer_callbacks;
1926 while (callback && *callback) {
1927 if ((*callback)->removed) {
1928 struct target_timer_callback *p = *callback;
1929 *callback = (*callback)->next;
1930 free(p);
1931 continue;
1932 }
1933
1934 bool call_it = (*callback)->callback &&
1935 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1936 now >= (*callback)->when);
1937
1938 if (call_it)
1939 target_call_timer_callback(*callback, &now);
1940
1941 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1942 target_timer_next_event_value = (*callback)->when;
1943
1944 callback = &(*callback)->next;
1945 }
1946
1947 callback_processing = false;
1948 return ERROR_OK;
1949 }
1950
1951 int target_call_timer_callbacks()
1952 {
1953 return target_call_timer_callbacks_check_time(1);
1954 }
1955
1956 /* invoke periodic callbacks immediately */
1957 int target_call_timer_callbacks_now()
1958 {
1959 return target_call_timer_callbacks_check_time(0);
1960 }
1961
1962 int64_t target_timer_next_event(void)
1963 {
1964 return target_timer_next_event_value;
1965 }
1966
1967 /* Prints the working area layout for debug purposes */
1968 static void print_wa_layout(struct target *target)
1969 {
1970 struct working_area *c = target->working_areas;
1971
1972 while (c) {
1973 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1974 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1975 c->address, c->address + c->size - 1, c->size);
1976 c = c->next;
1977 }
1978 }
1979
1980 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1981 static void target_split_working_area(struct working_area *area, uint32_t size)
1982 {
1983 assert(area->free); /* Shouldn't split an allocated area */
1984 assert(size <= area->size); /* Caller should guarantee this */
1985
1986 /* Split only if not already the right size */
1987 if (size < area->size) {
1988 struct working_area *new_wa = malloc(sizeof(*new_wa));
1989
1990 if (!new_wa)
1991 return;
1992
1993 new_wa->next = area->next;
1994 new_wa->size = area->size - size;
1995 new_wa->address = area->address + size;
1996 new_wa->backup = NULL;
1997 new_wa->user = NULL;
1998 new_wa->free = true;
1999
2000 area->next = new_wa;
2001 area->size = size;
2002
2003 /* If backup memory was allocated to this area, it has the wrong size
2004 * now so free it and it will be reallocated if/when needed */
2005 free(area->backup);
2006 area->backup = NULL;
2007 }
2008 }
2009
2010 /* Merge all adjacent free areas into one */
2011 static void target_merge_working_areas(struct target *target)
2012 {
2013 struct working_area *c = target->working_areas;
2014
2015 while (c && c->next) {
2016 assert(c->next->address == c->address + c->size); /* This is an invariant */
2017
2018 /* Find two adjacent free areas */
2019 if (c->free && c->next->free) {
2020 /* Merge the last into the first */
2021 c->size += c->next->size;
2022
2023 /* Remove the last */
2024 struct working_area *to_be_freed = c->next;
2025 c->next = c->next->next;
2026 free(to_be_freed->backup);
2027 free(to_be_freed);
2028
2029 /* If backup memory was allocated to the remaining area, it's has
2030 * the wrong size now */
2031 free(c->backup);
2032 c->backup = NULL;
2033 } else {
2034 c = c->next;
2035 }
2036 }
2037 }
2038
2039 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2040 {
2041 /* Reevaluate working area address based on MMU state*/
2042 if (!target->working_areas) {
2043 int retval;
2044 int enabled;
2045
2046 retval = target->type->mmu(target, &enabled);
2047 if (retval != ERROR_OK)
2048 return retval;
2049
2050 if (!enabled) {
2051 if (target->working_area_phys_spec) {
2052 LOG_DEBUG("MMU disabled, using physical "
2053 "address for working memory " TARGET_ADDR_FMT,
2054 target->working_area_phys);
2055 target->working_area = target->working_area_phys;
2056 } else {
2057 LOG_ERROR("No working memory available. "
2058 "Specify -work-area-phys to target.");
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060 }
2061 } else {
2062 if (target->working_area_virt_spec) {
2063 LOG_DEBUG("MMU enabled, using virtual "
2064 "address for working memory " TARGET_ADDR_FMT,
2065 target->working_area_virt);
2066 target->working_area = target->working_area_virt;
2067 } else {
2068 LOG_ERROR("No working memory available. "
2069 "Specify -work-area-virt to target.");
2070 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2071 }
2072 }
2073
2074 /* Set up initial working area on first call */
2075 struct working_area *new_wa = malloc(sizeof(*new_wa));
2076 if (new_wa) {
2077 new_wa->next = NULL;
2078 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2079 new_wa->address = target->working_area;
2080 new_wa->backup = NULL;
2081 new_wa->user = NULL;
2082 new_wa->free = true;
2083 }
2084
2085 target->working_areas = new_wa;
2086 }
2087
2088 /* only allocate multiples of 4 byte */
2089 if (size % 4)
2090 size = (size + 3) & (~3UL);
2091
2092 struct working_area *c = target->working_areas;
2093
2094 /* Find the first large enough working area */
2095 while (c) {
2096 if (c->free && c->size >= size)
2097 break;
2098 c = c->next;
2099 }
2100
2101 if (!c)
2102 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2103
2104 /* Split the working area into the requested size */
2105 target_split_working_area(c, size);
2106
2107 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2108 size, c->address);
2109
2110 if (target->backup_working_area) {
2111 if (!c->backup) {
2112 c->backup = malloc(c->size);
2113 if (!c->backup)
2114 return ERROR_FAIL;
2115 }
2116
2117 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2118 if (retval != ERROR_OK)
2119 return retval;
2120 }
2121
2122 /* mark as used, and return the new (reused) area */
2123 c->free = false;
2124 *area = c;
2125
2126 /* user pointer */
2127 c->user = area;
2128
2129 print_wa_layout(target);
2130
2131 return ERROR_OK;
2132 }
2133
2134 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2135 {
2136 int retval;
2137
2138 retval = target_alloc_working_area_try(target, size, area);
2139 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2140 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2141 return retval;
2142
2143 }
2144
2145 static int target_restore_working_area(struct target *target, struct working_area *area)
2146 {
2147 int retval = ERROR_OK;
2148
2149 if (target->backup_working_area && area->backup) {
2150 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2151 if (retval != ERROR_OK)
2152 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2153 area->size, area->address);
2154 }
2155
2156 return retval;
2157 }
2158
2159 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2160 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2161 {
2162 if (!area || area->free)
2163 return ERROR_OK;
2164
2165 int retval = ERROR_OK;
2166 if (restore) {
2167 retval = target_restore_working_area(target, area);
2168 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2169 if (retval != ERROR_OK)
2170 return retval;
2171 }
2172
2173 area->free = true;
2174
2175 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2176 area->size, area->address);
2177
2178 /* mark user pointer invalid */
2179 /* TODO: Is this really safe? It points to some previous caller's memory.
2180 * How could we know that the area pointer is still in that place and not
2181 * some other vital data? What's the purpose of this, anyway? */
2182 *area->user = NULL;
2183 area->user = NULL;
2184
2185 target_merge_working_areas(target);
2186
2187 print_wa_layout(target);
2188
2189 return retval;
2190 }
2191
2192 int target_free_working_area(struct target *target, struct working_area *area)
2193 {
2194 return target_free_working_area_restore(target, area, 1);
2195 }
2196
2197 /* free resources and restore memory, if restoring memory fails,
2198 * free up resources anyway
2199 */
2200 static void target_free_all_working_areas_restore(struct target *target, int restore)
2201 {
2202 struct working_area *c = target->working_areas;
2203
2204 LOG_DEBUG("freeing all working areas");
2205
2206 /* Loop through all areas, restoring the allocated ones and marking them as free */
2207 while (c) {
2208 if (!c->free) {
2209 if (restore)
2210 target_restore_working_area(target, c);
2211 c->free = true;
2212 *c->user = NULL; /* Same as above */
2213 c->user = NULL;
2214 }
2215 c = c->next;
2216 }
2217
2218 /* Run a merge pass to combine all areas into one */
2219 target_merge_working_areas(target);
2220
2221 print_wa_layout(target);
2222 }
2223
2224 void target_free_all_working_areas(struct target *target)
2225 {
2226 target_free_all_working_areas_restore(target, 1);
2227
2228 /* Now we have none or only one working area marked as free */
2229 if (target->working_areas) {
2230 /* Free the last one to allow on-the-fly moving and resizing */
2231 free(target->working_areas->backup);
2232 free(target->working_areas);
2233 target->working_areas = NULL;
2234 }
2235 }
2236
2237 /* Find the largest number of bytes that can be allocated */
2238 uint32_t target_get_working_area_avail(struct target *target)
2239 {
2240 struct working_area *c = target->working_areas;
2241 uint32_t max_size = 0;
2242
2243 if (!c)
2244 return target->working_area_size;
2245
2246 while (c) {
2247 if (c->free && max_size < c->size)
2248 max_size = c->size;
2249
2250 c = c->next;
2251 }
2252
2253 return max_size;
2254 }
2255
2256 static void target_destroy(struct target *target)
2257 {
2258 if (target->type->deinit_target)
2259 target->type->deinit_target(target);
2260
2261 free(target->semihosting);
2262
2263 jtag_unregister_event_callback(jtag_enable_callback, target);
2264
2265 struct target_event_action *teap = target->event_action;
2266 while (teap) {
2267 struct target_event_action *next = teap->next;
2268 Jim_DecrRefCount(teap->interp, teap->body);
2269 free(teap);
2270 teap = next;
2271 }
2272
2273 target_free_all_working_areas(target);
2274
2275 /* release the targets SMP list */
2276 if (target->smp) {
2277 struct target_list *head, *tmp;
2278
2279 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2280 list_del(&head->lh);
2281 head->target->smp = 0;
2282 free(head);
2283 }
2284 if (target->smp_targets != &empty_smp_targets)
2285 free(target->smp_targets);
2286 target->smp = 0;
2287 }
2288
2289 rtos_destroy(target);
2290
2291 free(target->gdb_port_override);
2292 free(target->type);
2293 free(target->trace_info);
2294 free(target->fileio_info);
2295 free(target->cmd_name);
2296 free(target);
2297 }
2298
2299 void target_quit(void)
2300 {
2301 struct target_event_callback *pe = target_event_callbacks;
2302 while (pe) {
2303 struct target_event_callback *t = pe->next;
2304 free(pe);
2305 pe = t;
2306 }
2307 target_event_callbacks = NULL;
2308
2309 struct target_timer_callback *pt = target_timer_callbacks;
2310 while (pt) {
2311 struct target_timer_callback *t = pt->next;
2312 free(pt);
2313 pt = t;
2314 }
2315 target_timer_callbacks = NULL;
2316
2317 for (struct target *target = all_targets; target;) {
2318 struct target *tmp;
2319
2320 tmp = target->next;
2321 target_destroy(target);
2322 target = tmp;
2323 }
2324
2325 all_targets = NULL;
2326 }
2327
2328 int target_arch_state(struct target *target)
2329 {
2330 int retval;
2331 if (!target) {
2332 LOG_WARNING("No target has been configured");
2333 return ERROR_OK;
2334 }
2335
2336 if (target->state != TARGET_HALTED)
2337 return ERROR_OK;
2338
2339 retval = target->type->arch_state(target);
2340 return retval;
2341 }
2342
2343 static int target_get_gdb_fileio_info_default(struct target *target,
2344 struct gdb_fileio_info *fileio_info)
2345 {
2346 /* If target does not support semi-hosting function, target
2347 has no need to provide .get_gdb_fileio_info callback.
2348 It just return ERROR_FAIL and gdb_server will return "Txx"
2349 as target halted every time. */
2350 return ERROR_FAIL;
2351 }
2352
2353 static int target_gdb_fileio_end_default(struct target *target,
2354 int retcode, int fileio_errno, bool ctrl_c)
2355 {
2356 return ERROR_OK;
2357 }
2358
2359 int target_profiling_default(struct target *target, uint32_t *samples,
2360 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2361 {
2362 struct timeval timeout, now;
2363
2364 gettimeofday(&timeout, NULL);
2365 timeval_add_time(&timeout, seconds, 0);
2366
2367 LOG_INFO("Starting profiling. Halting and resuming the"
2368 " target as often as we can...");
2369
2370 uint32_t sample_count = 0;
2371 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2372 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2373
2374 int retval = ERROR_OK;
2375 for (;;) {
2376 target_poll(target);
2377 if (target->state == TARGET_HALTED) {
2378 uint32_t t = buf_get_u32(reg->value, 0, 32);
2379 samples[sample_count++] = t;
2380 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2381 retval = target_resume(target, 1, 0, 0, 0);
2382 target_poll(target);
2383 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2384 } else if (target->state == TARGET_RUNNING) {
2385 /* We want to quickly sample the PC. */
2386 retval = target_halt(target);
2387 } else {
2388 LOG_INFO("Target not halted or running");
2389 retval = ERROR_OK;
2390 break;
2391 }
2392
2393 if (retval != ERROR_OK)
2394 break;
2395
2396 gettimeofday(&now, NULL);
2397 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2398 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2399 break;
2400 }
2401 }
2402
2403 *num_samples = sample_count;
2404 return retval;
2405 }
2406
2407 /* Single aligned words are guaranteed to use 16 or 32 bit access
2408 * mode respectively, otherwise data is handled as quickly as
2409 * possible
2410 */
2411 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2412 {
2413 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2414 size, address);
2415
2416 if (!target_was_examined(target)) {
2417 LOG_ERROR("Target not examined yet");
2418 return ERROR_FAIL;
2419 }
2420
2421 if (size == 0)
2422 return ERROR_OK;
2423
2424 if ((address + size - 1) < address) {
2425 /* GDB can request this when e.g. PC is 0xfffffffc */
2426 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2427 address,
2428 size);
2429 return ERROR_FAIL;
2430 }
2431
2432 return target->type->write_buffer(target, address, size, buffer);
2433 }
2434
2435 static int target_write_buffer_default(struct target *target,
2436 target_addr_t address, uint32_t count, const uint8_t *buffer)
2437 {
2438 uint32_t size;
2439 unsigned int data_bytes = target_data_bits(target) / 8;
2440
2441 /* Align up to maximum bytes. The loop condition makes sure the next pass
2442 * will have something to do with the size we leave to it. */
2443 for (size = 1;
2444 size < data_bytes && count >= size * 2 + (address & size);
2445 size *= 2) {
2446 if (address & size) {
2447 int retval = target_write_memory(target, address, size, 1, buffer);
2448 if (retval != ERROR_OK)
2449 return retval;
2450 address += size;
2451 count -= size;
2452 buffer += size;
2453 }
2454 }
2455
2456 /* Write the data with as large access size as possible. */
2457 for (; size > 0; size /= 2) {
2458 uint32_t aligned = count - count % size;
2459 if (aligned > 0) {
2460 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2461 if (retval != ERROR_OK)
2462 return retval;
2463 address += aligned;
2464 count -= aligned;
2465 buffer += aligned;
2466 }
2467 }
2468
2469 return ERROR_OK;
2470 }
2471
2472 /* Single aligned words are guaranteed to use 16 or 32 bit access
2473 * mode respectively, otherwise data is handled as quickly as
2474 * possible
2475 */
2476 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2477 {
2478 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2479 size, address);
2480
2481 if (!target_was_examined(target)) {
2482 LOG_ERROR("Target not examined yet");
2483 return ERROR_FAIL;
2484 }
2485
2486 if (size == 0)
2487 return ERROR_OK;
2488
2489 if ((address + size - 1) < address) {
2490 /* GDB can request this when e.g. PC is 0xfffffffc */
2491 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2492 address,
2493 size);
2494 return ERROR_FAIL;
2495 }
2496
2497 return target->type->read_buffer(target, address, size, buffer);
2498 }
2499
2500 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2501 {
2502 uint32_t size;
2503 unsigned int data_bytes = target_data_bits(target) / 8;
2504
2505 /* Align up to maximum bytes. The loop condition makes sure the next pass
2506 * will have something to do with the size we leave to it. */
2507 for (size = 1;
2508 size < data_bytes && count >= size * 2 + (address & size);
2509 size *= 2) {
2510 if (address & size) {
2511 int retval = target_read_memory(target, address, size, 1, buffer);
2512 if (retval != ERROR_OK)
2513 return retval;
2514 address += size;
2515 count -= size;
2516 buffer += size;
2517 }
2518 }
2519
2520 /* Read the data with as large access size as possible. */
2521 for (; size > 0; size /= 2) {
2522 uint32_t aligned = count - count % size;
2523 if (aligned > 0) {
2524 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2525 if (retval != ERROR_OK)
2526 return retval;
2527 address += aligned;
2528 count -= aligned;
2529 buffer += aligned;
2530 }
2531 }
2532
2533 return ERROR_OK;
2534 }
2535
2536 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2537 {
2538 uint8_t *buffer;
2539 int retval;
2540 uint32_t i;
2541 uint32_t checksum = 0;
2542 if (!target_was_examined(target)) {
2543 LOG_ERROR("Target not examined yet");
2544 return ERROR_FAIL;
2545 }
2546 if (!target->type->checksum_memory) {
2547 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2548 return ERROR_FAIL;
2549 }
2550
2551 retval = target->type->checksum_memory(target, address, size, &checksum);
2552 if (retval != ERROR_OK) {
2553 buffer = malloc(size);
2554 if (!buffer) {
2555 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2556 return ERROR_COMMAND_SYNTAX_ERROR;
2557 }
2558 retval = target_read_buffer(target, address, size, buffer);
2559 if (retval != ERROR_OK) {
2560 free(buffer);
2561 return retval;
2562 }
2563
2564 /* convert to target endianness */
2565 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2566 uint32_t target_data;
2567 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2568 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2569 }
2570
2571 retval = image_calculate_checksum(buffer, size, &checksum);
2572 free(buffer);
2573 }
2574
2575 *crc = checksum;
2576
2577 return retval;
2578 }
2579
2580 int target_blank_check_memory(struct target *target,
2581 struct target_memory_check_block *blocks, int num_blocks,
2582 uint8_t erased_value)
2583 {
2584 if (!target_was_examined(target)) {
2585 LOG_ERROR("Target not examined yet");
2586 return ERROR_FAIL;
2587 }
2588
2589 if (!target->type->blank_check_memory)
2590 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2591
2592 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2593 }
2594
2595 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2596 {
2597 uint8_t value_buf[8];
2598 if (!target_was_examined(target)) {
2599 LOG_ERROR("Target not examined yet");
2600 return ERROR_FAIL;
2601 }
2602
2603 int retval = target_read_memory(target, address, 8, 1, value_buf);
2604
2605 if (retval == ERROR_OK) {
2606 *value = target_buffer_get_u64(target, value_buf);
2607 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2608 address,
2609 *value);
2610 } else {
2611 *value = 0x0;
2612 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2613 address);
2614 }
2615
2616 return retval;
2617 }
2618
2619 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2620 {
2621 uint8_t value_buf[4];
2622 if (!target_was_examined(target)) {
2623 LOG_ERROR("Target not examined yet");
2624 return ERROR_FAIL;
2625 }
2626
2627 int retval = target_read_memory(target, address, 4, 1, value_buf);
2628
2629 if (retval == ERROR_OK) {
2630 *value = target_buffer_get_u32(target, value_buf);
2631 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2632 address,
2633 *value);
2634 } else {
2635 *value = 0x0;
2636 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2637 address);
2638 }
2639
2640 return retval;
2641 }
2642
2643 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2644 {
2645 uint8_t value_buf[2];
2646 if (!target_was_examined(target)) {
2647 LOG_ERROR("Target not examined yet");
2648 return ERROR_FAIL;
2649 }
2650
2651 int retval = target_read_memory(target, address, 2, 1, value_buf);
2652
2653 if (retval == ERROR_OK) {
2654 *value = target_buffer_get_u16(target, value_buf);
2655 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2656 address,
2657 *value);
2658 } else {
2659 *value = 0x0;
2660 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2661 address);
2662 }
2663
2664 return retval;
2665 }
2666
2667 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2668 {
2669 if (!target_was_examined(target)) {
2670 LOG_ERROR("Target not examined yet");
2671 return ERROR_FAIL;
2672 }
2673
2674 int retval = target_read_memory(target, address, 1, 1, value);
2675
2676 if (retval == ERROR_OK) {
2677 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2678 address,
2679 *value);
2680 } else {
2681 *value = 0x0;
2682 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2683 address);
2684 }
2685
2686 return retval;
2687 }
2688
2689 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2690 {
2691 int retval;
2692 uint8_t value_buf[8];
2693 if (!target_was_examined(target)) {
2694 LOG_ERROR("Target not examined yet");
2695 return ERROR_FAIL;
2696 }
2697
2698 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2699 address,
2700 value);
2701
2702 target_buffer_set_u64(target, value_buf, value);
2703 retval = target_write_memory(target, address, 8, 1, value_buf);
2704 if (retval != ERROR_OK)
2705 LOG_DEBUG("failed: %i", retval);
2706
2707 return retval;
2708 }
2709
2710 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2711 {
2712 int retval;
2713 uint8_t value_buf[4];
2714 if (!target_was_examined(target)) {
2715 LOG_ERROR("Target not examined yet");
2716 return ERROR_FAIL;
2717 }
2718
2719 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2720 address,
2721 value);
2722
2723 target_buffer_set_u32(target, value_buf, value);
2724 retval = target_write_memory(target, address, 4, 1, value_buf);
2725 if (retval != ERROR_OK)
2726 LOG_DEBUG("failed: %i", retval);
2727
2728 return retval;
2729 }
2730
2731 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2732 {
2733 int retval;
2734 uint8_t value_buf[2];
2735 if (!target_was_examined(target)) {
2736 LOG_ERROR("Target not examined yet");
2737 return ERROR_FAIL;
2738 }
2739
2740 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2741 address,
2742 value);
2743
2744 target_buffer_set_u16(target, value_buf, value);
2745 retval = target_write_memory(target, address, 2, 1, value_buf);
2746 if (retval != ERROR_OK)
2747 LOG_DEBUG("failed: %i", retval);
2748
2749 return retval;
2750 }
2751
2752 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2753 {
2754 int retval;
2755 if (!target_was_examined(target)) {
2756 LOG_ERROR("Target not examined yet");
2757 return ERROR_FAIL;
2758 }
2759
2760 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2761 address, value);
2762
2763 retval = target_write_memory(target, address, 1, 1, &value);
2764 if (retval != ERROR_OK)
2765 LOG_DEBUG("failed: %i", retval);
2766
2767 return retval;
2768 }
2769
2770 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2771 {
2772 int retval;
2773 uint8_t value_buf[8];
2774 if (!target_was_examined(target)) {
2775 LOG_ERROR("Target not examined yet");
2776 return ERROR_FAIL;
2777 }
2778
2779 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2780 address,
2781 value);
2782
2783 target_buffer_set_u64(target, value_buf, value);
2784 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2785 if (retval != ERROR_OK)
2786 LOG_DEBUG("failed: %i", retval);
2787
2788 return retval;
2789 }
2790
2791 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2792 {
2793 int retval;
2794 uint8_t value_buf[4];
2795 if (!target_was_examined(target)) {
2796 LOG_ERROR("Target not examined yet");
2797 return ERROR_FAIL;
2798 }
2799
2800 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2801 address,
2802 value);
2803
2804 target_buffer_set_u32(target, value_buf, value);
2805 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2806 if (retval != ERROR_OK)
2807 LOG_DEBUG("failed: %i", retval);
2808
2809 return retval;
2810 }
2811
2812 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2813 {
2814 int retval;
2815 uint8_t value_buf[2];
2816 if (!target_was_examined(target)) {
2817 LOG_ERROR("Target not examined yet");
2818 return ERROR_FAIL;
2819 }
2820
2821 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2822 address,
2823 value);
2824
2825 target_buffer_set_u16(target, value_buf, value);
2826 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2827 if (retval != ERROR_OK)
2828 LOG_DEBUG("failed: %i", retval);
2829
2830 return retval;
2831 }
2832
2833 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2834 {
2835 int retval;
2836 if (!target_was_examined(target)) {
2837 LOG_ERROR("Target not examined yet");
2838 return ERROR_FAIL;
2839 }
2840
2841 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2842 address, value);
2843
2844 retval = target_write_phys_memory(target, address, 1, 1, &value);
2845 if (retval != ERROR_OK)
2846 LOG_DEBUG("failed: %i", retval);
2847
2848 return retval;
2849 }
2850
2851 static int find_target(struct command_invocation *cmd, const char *name)
2852 {
2853 struct target *target = get_target(name);
2854 if (!target) {
2855 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2856 return ERROR_FAIL;
2857 }
2858 if (!target->tap->enabled) {
2859 command_print(cmd, "Target: TAP %s is disabled, "
2860 "can't be the current target\n",
2861 target->tap->dotted_name);
2862 return ERROR_FAIL;
2863 }
2864
2865 cmd->ctx->current_target = target;
2866 if (cmd->ctx->current_target_override)
2867 cmd->ctx->current_target_override = target;
2868
2869 return ERROR_OK;
2870 }
2871
2872
2873 COMMAND_HANDLER(handle_targets_command)
2874 {
2875 int retval = ERROR_OK;
2876 if (CMD_ARGC == 1) {
2877 retval = find_target(CMD, CMD_ARGV[0]);
2878 if (retval == ERROR_OK) {
2879 /* we're done! */
2880 return retval;
2881 }
2882 }
2883
2884 struct target *target = all_targets;
2885 command_print(CMD, " TargetName Type Endian TapName State ");
2886 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2887 while (target) {
2888 const char *state;
2889 char marker = ' ';
2890
2891 if (target->tap->enabled)
2892 state = target_state_name(target);
2893 else
2894 state = "tap-disabled";
2895
2896 if (CMD_CTX->current_target == target)
2897 marker = '*';
2898
2899 /* keep columns lined up to match the headers above */
2900 command_print(CMD,
2901 "%2d%c %-18s %-10s %-6s %-18s %s",
2902 target->target_number,
2903 marker,
2904 target_name(target),
2905 target_type_name(target),
2906 jim_nvp_value2name_simple(nvp_target_endian,
2907 target->endianness)->name,
2908 target->tap->dotted_name,
2909 state);
2910 target = target->next;
2911 }
2912
2913 return retval;
2914 }
2915
2916 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2917
2918 static int power_dropout;
2919 static int srst_asserted;
2920
2921 static int run_power_restore;
2922 static int run_power_dropout;
2923 static int run_srst_asserted;
2924 static int run_srst_deasserted;
2925
2926 static int sense_handler(void)
2927 {
2928 static int prev_srst_asserted;
2929 static int prev_power_dropout;
2930
2931 int retval = jtag_power_dropout(&power_dropout);
2932 if (retval != ERROR_OK)
2933 return retval;
2934
2935 int power_restored;
2936 power_restored = prev_power_dropout && !power_dropout;
2937 if (power_restored)
2938 run_power_restore = 1;
2939
2940 int64_t current = timeval_ms();
2941 static int64_t last_power;
2942 bool wait_more = last_power + 2000 > current;
2943 if (power_dropout && !wait_more) {
2944 run_power_dropout = 1;
2945 last_power = current;
2946 }
2947
2948 retval = jtag_srst_asserted(&srst_asserted);
2949 if (retval != ERROR_OK)
2950 return retval;
2951
2952 int srst_deasserted;
2953 srst_deasserted = prev_srst_asserted && !srst_asserted;
2954
2955 static int64_t last_srst;
2956 wait_more = last_srst + 2000 > current;
2957 if (srst_deasserted && !wait_more) {
2958 run_srst_deasserted = 1;
2959 last_srst = current;
2960 }
2961
2962 if (!prev_srst_asserted && srst_asserted)
2963 run_srst_asserted = 1;
2964
2965 prev_srst_asserted = srst_asserted;
2966 prev_power_dropout = power_dropout;
2967
2968 if (srst_deasserted || power_restored) {
2969 /* Other than logging the event we can't do anything here.
2970 * Issuing a reset is a particularly bad idea as we might
2971 * be inside a reset already.
2972 */
2973 }
2974
2975 return ERROR_OK;
2976 }
2977
2978 /* process target state changes */
2979 static int handle_target(void *priv)
2980 {
2981 Jim_Interp *interp = (Jim_Interp *)priv;
2982 int retval = ERROR_OK;
2983
2984 if (!is_jtag_poll_safe()) {
2985 /* polling is disabled currently */
2986 return ERROR_OK;
2987 }
2988
2989 /* we do not want to recurse here... */
2990 static int recursive;
2991 if (!recursive) {
2992 recursive = 1;
2993 sense_handler();
2994 /* danger! running these procedures can trigger srst assertions and power dropouts.
2995 * We need to avoid an infinite loop/recursion here and we do that by
2996 * clearing the flags after running these events.
2997 */
2998 int did_something = 0;
2999 if (run_srst_asserted) {
3000 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3001 Jim_Eval(interp, "srst_asserted");
3002 did_something = 1;
3003 }
3004 if (run_srst_deasserted) {
3005 Jim_Eval(interp, "srst_deasserted");
3006 did_something = 1;
3007 }
3008 if (run_power_dropout) {
3009 LOG_INFO("Power dropout detected, running power_dropout proc.");
3010 Jim_Eval(interp, "power_dropout");
3011 did_something = 1;
3012 }
3013 if (run_power_restore) {
3014 Jim_Eval(interp, "power_restore");
3015 did_something = 1;
3016 }
3017
3018 if (did_something) {
3019 /* clear detect flags */
3020 sense_handler();
3021 }
3022
3023 /* clear action flags */
3024
3025 run_srst_asserted = 0;
3026 run_srst_deasserted = 0;
3027 run_power_restore = 0;
3028 run_power_dropout = 0;
3029
3030 recursive = 0;
3031 }
3032
3033 /* Poll targets for state changes unless that's globally disabled.
3034 * Skip targets that are currently disabled.
3035 */
3036 for (struct target *target = all_targets;
3037 is_jtag_poll_safe() && target;
3038 target = target->next) {
3039
3040 if (!target_was_examined(target))
3041 continue;
3042
3043 if (!target->tap->enabled)
3044 continue;
3045
3046 if (target->backoff.times > target->backoff.count) {
3047 /* do not poll this time as we failed previously */
3048 target->backoff.count++;
3049 continue;
3050 }
3051 target->backoff.count = 0;
3052
3053 /* only poll target if we've got power and srst isn't asserted */
3054 if (!power_dropout && !srst_asserted) {
3055 /* polling may fail silently until the target has been examined */
3056 retval = target_poll(target);
3057 if (retval != ERROR_OK) {
3058 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3059 if (target->backoff.times * polling_interval < 5000) {
3060 target->backoff.times *= 2;
3061 target->backoff.times++;
3062 }
3063
3064 /* Tell GDB to halt the debugger. This allows the user to
3065 * run monitor commands to handle the situation.
3066 */
3067 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3068 }
3069 if (target->backoff.times > 0) {
3070 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3071 target_reset_examined(target);
3072 retval = target_examine_one(target);
3073 /* Target examination could have failed due to unstable connection,
3074 * but we set the examined flag anyway to repoll it later */
3075 if (retval != ERROR_OK) {
3076 target_set_examined(target);
3077 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3078 target->backoff.times * polling_interval);
3079 return retval;
3080 }
3081 }
3082
3083 /* Since we succeeded, we reset backoff count */
3084 target->backoff.times = 0;
3085 }
3086 }
3087
3088 return retval;
3089 }
3090
3091 COMMAND_HANDLER(handle_reg_command)
3092 {
3093 LOG_DEBUG("-");
3094
3095 struct target *target = get_current_target(CMD_CTX);
3096 struct reg *reg = NULL;
3097
3098 /* list all available registers for the current target */
3099 if (CMD_ARGC == 0) {
3100 struct reg_cache *cache = target->reg_cache;
3101
3102 unsigned int count = 0;
3103 while (cache) {
3104 unsigned i;
3105
3106 command_print(CMD, "===== %s", cache->name);
3107
3108 for (i = 0, reg = cache->reg_list;
3109 i < cache->num_regs;
3110 i++, reg++, count++) {
3111 if (reg->exist == false || reg->hidden)
3112 continue;
3113 /* only print cached values if they are valid */
3114 if (reg->valid) {
3115 char *value = buf_to_hex_str(reg->value,
3116 reg->size);
3117 command_print(CMD,
3118 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3119 count, reg->name,
3120 reg->size, value,
3121 reg->dirty
3122 ? " (dirty)"
3123 : "");
3124 free(value);
3125 } else {
3126 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3127 count, reg->name,
3128 reg->size);
3129 }
3130 }
3131 cache = cache->next;
3132 }
3133
3134 return ERROR_OK;
3135 }
3136
3137 /* access a single register by its ordinal number */
3138 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3139 unsigned num;
3140 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3141
3142 struct reg_cache *cache = target->reg_cache;
3143 unsigned int count = 0;
3144 while (cache) {
3145 unsigned i;
3146 for (i = 0; i < cache->num_regs; i++) {
3147 if (count++ == num) {
3148 reg = &cache->reg_list[i];
3149 break;
3150 }
3151 }
3152 if (reg)
3153 break;
3154 cache = cache->next;
3155 }
3156
3157 if (!reg) {
3158 command_print(CMD, "%i is out of bounds, the current target "
3159 "has only %i registers (0 - %i)", num, count, count - 1);
3160 return ERROR_OK;
3161 }
3162 } else {
3163 /* access a single register by its name */
3164 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3165
3166 if (!reg)
3167 goto not_found;
3168 }
3169
3170 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3171
3172 if (!reg->exist)
3173 goto not_found;
3174
3175 /* display a register */
3176 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3177 && (CMD_ARGV[1][0] <= '9')))) {
3178 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3179 reg->valid = 0;
3180
3181 if (reg->valid == 0) {
3182 int retval = reg->type->get(reg);
3183 if (retval != ERROR_OK) {
3184 LOG_ERROR("Could not read register '%s'", reg->name);
3185 return retval;
3186 }
3187 }
3188 char *value = buf_to_hex_str(reg->value, reg->size);
3189 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3190 free(value);
3191 return ERROR_OK;
3192 }
3193
3194 /* set register value */
3195 if (CMD_ARGC == 2) {
3196 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3197 if (!buf)
3198 return ERROR_FAIL;
3199 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3200
3201 int retval = reg->type->set(reg, buf);
3202 if (retval != ERROR_OK) {
3203 LOG_ERROR("Could not write to register '%s'", reg->name);
3204 } else {
3205 char *value = buf_to_hex_str(reg->value, reg->size);
3206 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3207 free(value);
3208 }
3209
3210 free(buf);
3211
3212 return retval;
3213 }
3214
3215 return ERROR_COMMAND_SYNTAX_ERROR;
3216
3217 not_found:
3218 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3219 return ERROR_OK;
3220 }
3221
3222 COMMAND_HANDLER(handle_poll_command)
3223 {
3224 int retval = ERROR_OK;
3225 struct target *target = get_current_target(CMD_CTX);
3226
3227 if (CMD_ARGC == 0) {
3228 command_print(CMD, "background polling: %s",
3229 jtag_poll_get_enabled() ? "on" : "off");
3230 command_print(CMD, "TAP: %s (%s)",
3231 target->tap->dotted_name,
3232 target->tap->enabled ? "enabled" : "disabled");
3233 if (!target->tap->enabled)
3234 return ERROR_OK;
3235 retval = target_poll(target);
3236 if (retval != ERROR_OK)
3237 return retval;
3238 retval = target_arch_state(target);
3239 if (retval != ERROR_OK)
3240 return retval;
3241 } else if (CMD_ARGC == 1) {
3242 bool enable;
3243 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3244 jtag_poll_set_enabled(enable);
3245 } else
3246 return ERROR_COMMAND_SYNTAX_ERROR;
3247
3248 return retval;
3249 }
3250
3251 COMMAND_HANDLER(handle_wait_halt_command)
3252 {
3253 if (CMD_ARGC > 1)
3254 return ERROR_COMMAND_SYNTAX_ERROR;
3255
3256 unsigned ms = DEFAULT_HALT_TIMEOUT;
3257 if (1 == CMD_ARGC) {
3258 int retval = parse_uint(CMD_ARGV[0], &ms);
3259 if (retval != ERROR_OK)
3260 return ERROR_COMMAND_SYNTAX_ERROR;
3261 }
3262
3263 struct target *target = get_current_target(CMD_CTX);
3264 return target_wait_state(target, TARGET_HALTED, ms);
3265 }
3266
3267 /* wait for target state to change. The trick here is to have a low
3268 * latency for short waits and not to suck up all the CPU time
3269 * on longer waits.
3270 *
3271 * After 500ms, keep_alive() is invoked
3272 */
3273 int target_wait_state(struct target *target, enum target_state state, int ms)
3274 {
3275 int retval;
3276 int64_t then = 0, cur;
3277 bool once = true;
3278
3279 for (;;) {
3280 retval = target_poll(target);
3281 if (retval != ERROR_OK)
3282 return retval;
3283 if (target->state == state)
3284 break;
3285 cur = timeval_ms();
3286 if (once) {
3287 once = false;
3288 then = timeval_ms();
3289 LOG_DEBUG("waiting for target %s...",
3290 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3291 }
3292
3293 if (cur-then > 500)
3294 keep_alive();
3295
3296 if ((cur-then) > ms) {
3297 LOG_ERROR("timed out while waiting for target %s",
3298 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3299 return ERROR_FAIL;
3300 }
3301 }
3302
3303 return ERROR_OK;
3304 }
3305
3306 COMMAND_HANDLER(handle_halt_command)
3307 {
3308 LOG_DEBUG("-");
3309
3310 struct target *target = get_current_target(CMD_CTX);
3311
3312 target->verbose_halt_msg = true;
3313
3314 int retval = target_halt(target);
3315 if (retval != ERROR_OK)
3316 return retval;
3317
3318 if (CMD_ARGC == 1) {
3319 unsigned wait_local;
3320 retval = parse_uint(CMD_ARGV[0], &wait_local);
3321 if (retval != ERROR_OK)
3322 return ERROR_COMMAND_SYNTAX_ERROR;
3323 if (!wait_local)
3324 return ERROR_OK;
3325 }
3326
3327 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3328 }
3329
3330 COMMAND_HANDLER(handle_soft_reset_halt_command)
3331 {
3332 struct target *target = get_current_target(CMD_CTX);
3333
3334 LOG_USER("requesting target halt and executing a soft reset");
3335
3336 target_soft_reset_halt(target);
3337
3338 return ERROR_OK;
3339 }
3340
3341 COMMAND_HANDLER(handle_reset_command)
3342 {
3343 if (CMD_ARGC > 1)
3344 return ERROR_COMMAND_SYNTAX_ERROR;
3345
3346 enum target_reset_mode reset_mode = RESET_RUN;
3347 if (CMD_ARGC == 1) {
3348 const struct jim_nvp *n;
3349 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3350 if ((!n->name) || (n->value == RESET_UNKNOWN))
3351 return ERROR_COMMAND_SYNTAX_ERROR;
3352 reset_mode = n->value;
3353 }
3354
3355 /* reset *all* targets */
3356 return target_process_reset(CMD, reset_mode);
3357 }
3358
3359
3360 COMMAND_HANDLER(handle_resume_command)
3361 {
3362 int current = 1;
3363 if (CMD_ARGC > 1)
3364 return ERROR_COMMAND_SYNTAX_ERROR;
3365
3366 struct target *target = get_current_target(CMD_CTX);
3367
3368 /* with no CMD_ARGV, resume from current pc, addr = 0,
3369 * with one arguments, addr = CMD_ARGV[0],
3370 * handle breakpoints, not debugging */
3371 target_addr_t addr = 0;
3372 if (CMD_ARGC == 1) {
3373 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3374 current = 0;
3375 }
3376
3377 return target_resume(target, current, addr, 1, 0);
3378 }
3379
3380 COMMAND_HANDLER(handle_step_command)
3381 {
3382 if (CMD_ARGC > 1)
3383 return ERROR_COMMAND_SYNTAX_ERROR;
3384
3385 LOG_DEBUG("-");
3386
3387 /* with no CMD_ARGV, step from current pc, addr = 0,
3388 * with one argument addr = CMD_ARGV[0],
3389 * handle breakpoints, debugging */
3390 target_addr_t addr = 0;
3391 int current_pc = 1;
3392 if (CMD_ARGC == 1) {
3393 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3394 current_pc = 0;
3395 }
3396
3397 struct target *target = get_current_target(CMD_CTX);
3398
3399 return target_step(target, current_pc, addr, 1);
3400 }
3401
3402 void target_handle_md_output(struct command_invocation *cmd,
3403 struct target *target, target_addr_t address, unsigned size,
3404 unsigned count, const uint8_t *buffer)
3405 {
3406 const unsigned line_bytecnt = 32;
3407 unsigned line_modulo = line_bytecnt / size;
3408
3409 char output[line_bytecnt * 4 + 1];
3410 unsigned output_len = 0;
3411
3412 const char *value_fmt;
3413 switch (size) {
3414 case 8:
3415 value_fmt = "%16.16"PRIx64" ";
3416 break;
3417 case 4:
3418 value_fmt = "%8.8"PRIx64" ";
3419 break;
3420 case 2:
3421 value_fmt = "%4.4"PRIx64" ";
3422 break;
3423 case 1:
3424 value_fmt = "%2.2"PRIx64" ";
3425 break;
3426 default:
3427 /* "can't happen", caller checked */
3428 LOG_ERROR("invalid memory read size: %u", size);
3429 return;
3430 }
3431
3432 for (unsigned i = 0; i < count; i++) {
3433 if (i % line_modulo == 0) {
3434 output_len += snprintf(output + output_len,
3435 sizeof(output) - output_len,
3436 TARGET_ADDR_FMT ": ",
3437 (address + (i * size)));
3438 }
3439
3440 uint64_t value = 0;
3441 const uint8_t *value_ptr = buffer + i * size;
3442 switch (size) {
3443 case 8:
3444 value = target_buffer_get_u64(target, value_ptr);
3445 break;
3446 case 4:
3447 value = target_buffer_get_u32(target, value_ptr);
3448 break;
3449 case 2:
3450 value = target_buffer_get_u16(target, value_ptr);
3451 break;
3452 case 1:
3453 value = *value_ptr;
3454 }
3455 output_len += snprintf(output + output_len,
3456 sizeof(output) - output_len,
3457 value_fmt, value);
3458
3459 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3460 command_print(cmd, "%s", output);
3461 output_len = 0;
3462 }
3463 }
3464 }
3465
3466 COMMAND_HANDLER(handle_md_command)
3467 {
3468 if (CMD_ARGC < 1)
3469 return ERROR_COMMAND_SYNTAX_ERROR;
3470
3471 unsigned size = 0;
3472 switch (CMD_NAME[2]) {
3473 case 'd':
3474 size = 8;
3475 break;
3476 case 'w':
3477 size = 4;
3478 break;
3479 case 'h':
3480 size = 2;
3481 break;
3482 case 'b':
3483 size = 1;
3484 break;
3485 default:
3486 return ERROR_COMMAND_SYNTAX_ERROR;
3487 }
3488
3489 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3490 int (*fn)(struct target *target,
3491 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3492 if (physical) {
3493 CMD_ARGC--;
3494 CMD_ARGV++;
3495 fn = target_read_phys_memory;
3496 } else
3497 fn = target_read_memory;
3498 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3499 return ERROR_COMMAND_SYNTAX_ERROR;
3500
3501 target_addr_t address;
3502 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3503
3504 unsigned count = 1;
3505 if (CMD_ARGC == 2)
3506 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3507
3508 uint8_t *buffer = calloc(count, size);
3509 if (!buffer) {
3510 LOG_ERROR("Failed to allocate md read buffer");
3511 return ERROR_FAIL;
3512 }
3513
3514 struct target *target = get_current_target(CMD_CTX);
3515 int retval = fn(target, address, size, count, buffer);
3516 if (retval == ERROR_OK)
3517 target_handle_md_output(CMD, target, address, size, count, buffer);
3518
3519 free(buffer);
3520
3521 return retval;
3522 }
3523
3524 typedef int (*target_write_fn)(struct target *target,
3525 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3526
3527 static int target_fill_mem(struct target *target,
3528 target_addr_t address,
3529 target_write_fn fn,
3530 unsigned data_size,
3531 /* value */
3532 uint64_t b,
3533 /* count */
3534 unsigned c)
3535 {
3536 /* We have to write in reasonably large chunks to be able
3537 * to fill large memory areas with any sane speed */
3538 const unsigned chunk_size = 16384;
3539 uint8_t *target_buf = malloc(chunk_size * data_size);
3540 if (!target_buf) {
3541 LOG_ERROR("Out of memory");
3542 return ERROR_FAIL;
3543 }
3544
3545 for (unsigned i = 0; i < chunk_size; i++) {
3546 switch (data_size) {
3547 case 8:
3548 target_buffer_set_u64(target, target_buf + i * data_size, b);
3549 break;
3550 case 4:
3551 target_buffer_set_u32(target, target_buf + i * data_size, b);
3552 break;
3553 case 2:
3554 target_buffer_set_u16(target, target_buf + i * data_size, b);
3555 break;
3556 case 1:
3557 target_buffer_set_u8(target, target_buf + i * data_size, b);
3558 break;
3559 default:
3560 exit(-1);
3561 }
3562 }
3563
3564 int retval = ERROR_OK;
3565
3566 for (unsigned x = 0; x < c; x += chunk_size) {
3567 unsigned current;
3568 current = c - x;
3569 if (current > chunk_size)
3570 current = chunk_size;
3571 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3572 if (retval != ERROR_OK)
3573 break;
3574 /* avoid GDB timeouts */
3575 keep_alive();
3576 }
3577 free(target_buf);
3578
3579 return retval;
3580 }
3581
3582
3583 COMMAND_HANDLER(handle_mw_command)
3584 {
3585 if (CMD_ARGC < 2)
3586 return ERROR_COMMAND_SYNTAX_ERROR;
3587 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3588 target_write_fn fn;
3589 if (physical) {
3590 CMD_ARGC--;
3591 CMD_ARGV++;
3592 fn = target_write_phys_memory;
3593 } else
3594 fn = target_write_memory;
3595 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3596 return ERROR_COMMAND_SYNTAX_ERROR;
3597
3598 target_addr_t address;
3599 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3600
3601 uint64_t value;
3602 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3603
3604 unsigned count = 1;
3605 if (CMD_ARGC == 3)
3606 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3607
3608 struct target *target = get_current_target(CMD_CTX);
3609 unsigned wordsize;
3610 switch (CMD_NAME[2]) {
3611 case 'd':
3612 wordsize = 8;
3613 break;
3614 case 'w':
3615 wordsize = 4;
3616 break;
3617 case 'h':
3618 wordsize = 2;
3619 break;
3620 case 'b':
3621 wordsize = 1;
3622 break;
3623 default:
3624 return ERROR_COMMAND_SYNTAX_ERROR;
3625 }
3626
3627 return target_fill_mem(target, address, fn, wordsize, value, count);
3628 }
3629
3630 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3631 target_addr_t *min_address, target_addr_t *max_address)
3632 {
3633 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3634 return ERROR_COMMAND_SYNTAX_ERROR;
3635
3636 /* a base address isn't always necessary,
3637 * default to 0x0 (i.e. don't relocate) */
3638 if (CMD_ARGC >= 2) {
3639 target_addr_t addr;
3640 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3641 image->base_address = addr;
3642 image->base_address_set = true;
3643 } else
3644 image->base_address_set = false;
3645
3646 image->start_address_set = false;
3647
3648 if (CMD_ARGC >= 4)
3649 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3650 if (CMD_ARGC == 5) {
3651 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3652 /* use size (given) to find max (required) */
3653 *max_address += *min_address;
3654 }
3655
3656 if (*min_address > *max_address)
3657 return ERROR_COMMAND_SYNTAX_ERROR;
3658
3659 return ERROR_OK;
3660 }
3661
3662 COMMAND_HANDLER(handle_load_image_command)
3663 {
3664 uint8_t *buffer;
3665 size_t buf_cnt;
3666 uint32_t image_size;
3667 target_addr_t min_address = 0;
3668 target_addr_t max_address = -1;
3669 struct image image;
3670
3671 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3672 &image, &min_address, &max_address);
3673 if (retval != ERROR_OK)
3674 return retval;
3675
3676 struct target *target = get_current_target(CMD_CTX);
3677
3678 struct duration bench;
3679 duration_start(&bench);
3680
3681 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3682 return ERROR_FAIL;
3683
3684 image_size = 0x0;
3685 retval = ERROR_OK;
3686 for (unsigned int i = 0; i < image.num_sections; i++) {
3687 buffer = malloc(image.sections[i].size);
3688 if (!buffer) {
3689 command_print(CMD,
3690 "error allocating buffer for section (%d bytes)",
3691 (int)(image.sections[i].size));
3692 retval = ERROR_FAIL;
3693 break;
3694 }
3695
3696 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3697 if (retval != ERROR_OK) {
3698 free(buffer);
3699 break;
3700 }
3701
3702 uint32_t offset = 0;
3703 uint32_t length = buf_cnt;
3704
3705 /* DANGER!!! beware of unsigned comparison here!!! */
3706
3707 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3708 (image.sections[i].base_address < max_address)) {
3709
3710 if (image.sections[i].base_address < min_address) {
3711 /* clip addresses below */
3712 offset += min_address-image.sections[i].base_address;
3713 length -= offset;
3714 }
3715
3716 if (image.sections[i].base_address + buf_cnt > max_address)
3717 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3718
3719 retval = target_write_buffer(target,
3720 image.sections[i].base_address + offset, length, buffer + offset);
3721 if (retval != ERROR_OK) {
3722 free(buffer);
3723 break;
3724 }
3725 image_size += length;
3726 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3727 (unsigned int)length,
3728 image.sections[i].base_address + offset);
3729 }
3730
3731 free(buffer);
3732 }
3733
3734 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3735 command_print(CMD, "downloaded %" PRIu32 " bytes "
3736 "in %fs (%0.3f KiB/s)", image_size,
3737 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3738 }
3739
3740 image_close(&image);
3741
3742 return retval;
3743
3744 }
3745
3746 COMMAND_HANDLER(handle_dump_image_command)
3747 {
3748 struct fileio *fileio;
3749 uint8_t *buffer;
3750 int retval, retvaltemp;
3751 target_addr_t address, size;
3752 struct duration bench;
3753 struct target *target = get_current_target(CMD_CTX);
3754
3755 if (CMD_ARGC != 3)
3756 return ERROR_COMMAND_SYNTAX_ERROR;
3757
3758 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3759 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3760
3761 uint32_t buf_size = (size > 4096) ? 4096 : size;
3762 buffer = malloc(buf_size);
3763 if (!buffer)
3764 return ERROR_FAIL;
3765
3766 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3767 if (retval != ERROR_OK) {
3768 free(buffer);
3769 return retval;
3770 }
3771
3772 duration_start(&bench);
3773
3774 while (size > 0) {
3775 size_t size_written;
3776 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3777 retval = target_read_buffer(target, address, this_run_size, buffer);
3778 if (retval != ERROR_OK)
3779 break;
3780
3781 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3782 if (retval != ERROR_OK)
3783 break;
3784
3785 size -= this_run_size;
3786 address += this_run_size;
3787 }
3788
3789 free(buffer);
3790
3791 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3792 size_t filesize;
3793 retval = fileio_size(fileio, &filesize);
3794 if (retval != ERROR_OK)
3795 return retval;
3796 command_print(CMD,
3797 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3798 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3799 }
3800
3801 retvaltemp = fileio_close(fileio);
3802 if (retvaltemp != ERROR_OK)
3803 return retvaltemp;
3804
3805 return retval;
3806 }
3807
3808 enum verify_mode {
3809 IMAGE_TEST = 0,
3810 IMAGE_VERIFY = 1,
3811 IMAGE_CHECKSUM_ONLY = 2
3812 };
3813
3814 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3815 {
3816 uint8_t *buffer;
3817 size_t buf_cnt;
3818 uint32_t image_size;
3819 int retval;
3820 uint32_t checksum = 0;
3821 uint32_t mem_checksum = 0;
3822
3823 struct image image;
3824
3825 struct target *target = get_current_target(CMD_CTX);
3826
3827 if (CMD_ARGC < 1)
3828 return ERROR_COMMAND_SYNTAX_ERROR;
3829
3830 if (!target) {
3831 LOG_ERROR("no target selected");
3832 return ERROR_FAIL;
3833 }
3834
3835 struct duration bench;
3836 duration_start(&bench);
3837
3838 if (CMD_ARGC >= 2) {
3839 target_addr_t addr;
3840 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3841 image.base_address = addr;
3842 image.base_address_set = true;
3843 } else {
3844 image.base_address_set = false;
3845 image.base_address = 0x0;
3846 }
3847
3848 image.start_address_set = false;
3849
3850 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3851 if (retval != ERROR_OK)
3852 return retval;
3853
3854 image_size = 0x0;
3855 int diffs = 0;
3856 retval = ERROR_OK;
3857 for (unsigned int i = 0; i < image.num_sections; i++) {
3858 buffer = malloc(image.sections[i].size);
3859 if (!buffer) {
3860 command_print(CMD,
3861 "error allocating buffer for section (%" PRIu32 " bytes)",
3862 image.sections[i].size);
3863 break;
3864 }
3865 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3866 if (retval != ERROR_OK) {
3867 free(buffer);
3868 break;
3869 }
3870
3871 if (verify >= IMAGE_VERIFY) {
3872 /* calculate checksum of image */
3873 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3874 if (retval != ERROR_OK) {
3875 free(buffer);
3876 break;
3877 }
3878
3879 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3880 if (retval != ERROR_OK) {
3881 free(buffer);
3882 break;
3883 }
3884 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3885 LOG_ERROR("checksum mismatch");
3886 free(buffer);
3887 retval = ERROR_FAIL;
3888 goto done;
3889 }
3890 if (checksum != mem_checksum) {
3891 /* failed crc checksum, fall back to a binary compare */
3892 uint8_t *data;
3893
3894 if (diffs == 0)
3895 LOG_ERROR("checksum mismatch - attempting binary compare");
3896
3897 data = malloc(buf_cnt);
3898
3899 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3900 if (retval == ERROR_OK) {
3901 uint32_t t;
3902 for (t = 0; t < buf_cnt; t++) {
3903 if (data[t] != buffer[t]) {
3904 command_print(CMD,
3905 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3906 diffs,
3907 (unsigned)(t + image.sections[i].base_address),
3908 data[t],
3909 buffer[t]);
3910 if (diffs++ >= 127) {
3911 command_print(CMD, "More than 128 errors, the rest are not printed.");
3912 free(data);
3913 free(buffer);
3914 goto done;
3915 }
3916 }
3917 keep_alive();
3918 }
3919 }
3920 free(data);
3921 }
3922 } else {
3923 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3924 image.sections[i].base_address,
3925 buf_cnt);
3926 }
3927
3928 free(buffer);
3929 image_size += buf_cnt;
3930 }
3931 if (diffs > 0)
3932 command_print(CMD, "No more differences found.");
3933 done:
3934 if (diffs > 0)
3935 retval = ERROR_FAIL;
3936 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3937 command_print(CMD, "verified %" PRIu32 " bytes "
3938 "in %fs (%0.3f KiB/s)", image_size,
3939 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3940 }
3941
3942 image_close(&image);
3943
3944 return retval;
3945 }
3946
3947 COMMAND_HANDLER(handle_verify_image_checksum_command)
3948 {
3949 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3950 }
3951
3952 COMMAND_HANDLER(handle_verify_image_command)
3953 {
3954 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3955 }
3956
3957 COMMAND_HANDLER(handle_test_image_command)
3958 {
3959 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3960 }
3961
3962 static int handle_bp_command_list(struct command_invocation *cmd)
3963 {
3964 struct target *target = get_current_target(cmd->ctx);
3965 struct breakpoint *breakpoint = target->breakpoints;
3966 while (breakpoint) {
3967 if (breakpoint->type == BKPT_SOFT) {
3968 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3969 breakpoint->length);
3970 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3971 breakpoint->address,
3972 breakpoint->length,
3973 breakpoint->set, buf);
3974 free(buf);
3975 } else {
3976 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3977 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3978 breakpoint->asid,
3979 breakpoint->length, breakpoint->set);
3980 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3981 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3982 breakpoint->address,
3983 breakpoint->length, breakpoint->set);
3984 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3985 breakpoint->asid);
3986 } else
3987 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3988 breakpoint->address,
3989 breakpoint->length, breakpoint->set);
3990 }
3991
3992 breakpoint = breakpoint->next;
3993 }
3994 return ERROR_OK;
3995 }
3996
3997 static int handle_bp_command_set(struct command_invocation *cmd,
3998 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3999 {
4000 struct target *target = get_current_target(cmd->ctx);
4001 int retval;
4002
4003 if (asid == 0) {
4004 retval = breakpoint_add(target, addr, length, hw);
4005 /* error is always logged in breakpoint_add(), do not print it again */
4006 if (retval == ERROR_OK)
4007 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4008
4009 } else if (addr == 0) {
4010 if (!target->type->add_context_breakpoint) {
4011 LOG_ERROR("Context breakpoint not available");
4012 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4013 }
4014 retval = context_breakpoint_add(target, asid, length, hw);
4015 /* error is always logged in context_breakpoint_add(), do not print it again */
4016 if (retval == ERROR_OK)
4017 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4018
4019 } else {
4020 if (!target->type->add_hybrid_breakpoint) {
4021 LOG_ERROR("Hybrid breakpoint not available");
4022 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4023 }
4024 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4025 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4026 if (retval == ERROR_OK)
4027 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4028 }
4029 return retval;
4030 }
4031
4032 COMMAND_HANDLER(handle_bp_command)
4033 {
4034 target_addr_t addr;
4035 uint32_t asid;
4036 uint32_t length;
4037 int hw = BKPT_SOFT;
4038
4039 switch (CMD_ARGC) {
4040 case 0:
4041 return handle_bp_command_list(CMD);
4042
4043 case 2:
4044 asid = 0;
4045 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4046 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4047 return handle_bp_command_set(CMD, addr, asid, length, hw);
4048
4049 case 3:
4050 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4051 hw = BKPT_HARD;
4052 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4053 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4054 asid = 0;
4055 return handle_bp_command_set(CMD, addr, asid, length, hw);
4056 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4057 hw = BKPT_HARD;
4058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4059 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4060 addr = 0;
4061 return handle_bp_command_set(CMD, addr, asid, length, hw);
4062 }
4063 /* fallthrough */
4064 case 4:
4065 hw = BKPT_HARD;
4066 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4067 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4068 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4069 return handle_bp_command_set(CMD, addr, asid, length, hw);
4070
4071 default:
4072 return ERROR_COMMAND_SYNTAX_ERROR;
4073 }
4074 }
4075
4076 COMMAND_HANDLER(handle_rbp_command)
4077 {
4078 if (CMD_ARGC != 1)
4079 return ERROR_COMMAND_SYNTAX_ERROR;
4080
4081 struct target *target = get_current_target(CMD_CTX);
4082
4083 if (!strcmp(CMD_ARGV[0], "all")) {
4084 breakpoint_remove_all(target);
4085 } else {
4086 target_addr_t addr;
4087 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4088
4089 breakpoint_remove(target, addr);
4090 }
4091
4092 return ERROR_OK;
4093 }
4094
4095 COMMAND_HANDLER(handle_wp_command)
4096 {
4097 struct target *target = get_current_target(CMD_CTX);
4098
4099 if (CMD_ARGC == 0) {
4100 struct watchpoint *watchpoint = target->watchpoints;
4101
4102 while (watchpoint) {
4103 command_print(CMD, "address: " TARGET_ADDR_FMT
4104 ", len: 0x%8.8" PRIx32
4105 ", r/w/a: %i, value: 0x%8.8" PRIx32
4106 ", mask: 0x%8.8" PRIx32,
4107 watchpoint->address,
4108 watchpoint->length,
4109 (int)watchpoint->rw,
4110 watchpoint->value,
4111 watchpoint->mask);
4112 watchpoint = watchpoint->next;
4113 }
4114 return ERROR_OK;
4115 }
4116
4117 enum watchpoint_rw type = WPT_ACCESS;
4118 target_addr_t addr = 0;
4119 uint32_t length = 0;
4120 uint32_t data_value = 0x0;
4121 uint32_t data_mask = 0xffffffff;
4122
4123 switch (CMD_ARGC) {
4124 case 5:
4125 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4126 /* fall through */
4127 case 4:
4128 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4129 /* fall through */
4130 case 3:
4131 switch (CMD_ARGV[2][0]) {
4132 case 'r':
4133 type = WPT_READ;
4134 break;
4135 case 'w':
4136 type = WPT_WRITE;
4137 break;
4138 case 'a':
4139 type = WPT_ACCESS;
4140 break;
4141 default:
4142 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4143 return ERROR_COMMAND_SYNTAX_ERROR;
4144 }
4145 /* fall through */
4146 case 2:
4147 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4148 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4149 break;
4150
4151 default:
4152 return ERROR_COMMAND_SYNTAX_ERROR;
4153 }
4154
4155 int retval = watchpoint_add(target, addr, length, type,
4156 data_value, data_mask);
4157 if (retval != ERROR_OK)
4158 LOG_ERROR("Failure setting watchpoints");
4159
4160 return retval;
4161 }
4162
4163 COMMAND_HANDLER(handle_rwp_command)
4164 {
4165 if (CMD_ARGC != 1)
4166 return ERROR_COMMAND_SYNTAX_ERROR;
4167
4168 target_addr_t addr;
4169 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4170
4171 struct target *target = get_current_target(CMD_CTX);
4172 watchpoint_remove(target, addr);
4173
4174 return ERROR_OK;
4175 }
4176
4177 /**
4178 * Translate a virtual address to a physical address.
4179 *
4180 * The low-level target implementation must have logged a detailed error
4181 * which is forwarded to telnet/GDB session.
4182 */
4183 COMMAND_HANDLER(handle_virt2phys_command)
4184 {
4185 if (CMD_ARGC != 1)
4186 return ERROR_COMMAND_SYNTAX_ERROR;
4187
4188 target_addr_t va;
4189 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4190 target_addr_t pa;
4191
4192 struct target *target = get_current_target(CMD_CTX);
4193 int retval = target->type->virt2phys(target, va, &pa);
4194 if (retval == ERROR_OK)
4195 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4196
4197 return retval;
4198 }
4199
4200 static void write_data(FILE *f, const void *data, size_t len)
4201 {
4202 size_t written = fwrite(data, 1, len, f);
4203 if (written != len)
4204 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4205 }
4206
4207 static void write_long(FILE *f, int l, struct target *target)
4208 {
4209 uint8_t val[4];
4210
4211 target_buffer_set_u32(target, val, l);
4212 write_data(f, val, 4);
4213 }
4214
4215 static void write_string(FILE *f, char *s)
4216 {
4217 write_data(f, s, strlen(s));
4218 }
4219
4220 typedef unsigned char UNIT[2]; /* unit of profiling */
4221
4222 /* Dump a gmon.out histogram file. */
4223 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4224 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4225 {
4226 uint32_t i;
4227 FILE *f = fopen(filename, "w");
4228 if (!f)
4229 return;
4230 write_string(f, "gmon");
4231 write_long(f, 0x00000001, target); /* Version */
4232 write_long(f, 0, target); /* padding */
4233 write_long(f, 0, target); /* padding */
4234 write_long(f, 0, target); /* padding */
4235
4236 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4237 write_data(f, &zero, 1);
4238
4239 /* figure out bucket size */
4240 uint32_t min;
4241 uint32_t max;
4242 if (with_range) {
4243 min = start_address;
4244 max = end_address;
4245 } else {
4246 min = samples[0];
4247 max = samples[0];
4248 for (i = 0; i < sample_num; i++) {
4249 if (min > samples[i])
4250 min = samples[i];
4251 if (max < samples[i])
4252 max = samples[i];
4253 }
4254
4255 /* max should be (largest sample + 1)
4256 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4257 max++;
4258 }
4259
4260 int address_space = max - min;
4261 assert(address_space >= 2);
4262
4263 /* FIXME: What is the reasonable number of buckets?
4264 * The profiling result will be more accurate if there are enough buckets. */
4265 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4266 uint32_t num_buckets = address_space / sizeof(UNIT);
4267 if (num_buckets > max_buckets)
4268 num_buckets = max_buckets;
4269 int *buckets = malloc(sizeof(int) * num_buckets);
4270 if (!buckets) {
4271 fclose(f);
4272 return;
4273 }
4274 memset(buckets, 0, sizeof(int) * num_buckets);
4275 for (i = 0; i < sample_num; i++) {
4276 uint32_t address = samples[i];
4277
4278 if ((address < min) || (max <= address))
4279 continue;
4280
4281 long long a = address - min;
4282 long long b = num_buckets;
4283 long long c = address_space;
4284 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4285 buckets[index_t]++;
4286 }
4287
4288 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4289 write_long(f, min, target); /* low_pc */
4290 write_long(f, max, target); /* high_pc */
4291 write_long(f, num_buckets, target); /* # of buckets */
4292 float sample_rate = sample_num / (duration_ms / 1000.0);
4293 write_long(f, sample_rate, target);
4294 write_string(f, "seconds");
4295 for (i = 0; i < (15-strlen("seconds")); i++)
4296 write_data(f, &zero, 1);
4297 write_string(f, "s");
4298
4299 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4300
4301 char *data = malloc(2 * num_buckets);
4302 if (data) {
4303 for (i = 0; i < num_buckets; i++) {
4304 int val;
4305 val = buckets[i];
4306 if (val > 65535)
4307 val = 65535;
4308 data[i * 2] = val&0xff;
4309 data[i * 2 + 1] = (val >> 8) & 0xff;
4310 }
4311 free(buckets);
4312 write_data(f, data, num_buckets * 2);
4313 free(data);
4314 } else
4315 free(buckets);
4316
4317 fclose(f);
4318 }
4319
4320 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4321 * which will be used as a random sampling of PC */
4322 COMMAND_HANDLER(handle_profile_command)
4323 {
4324 struct target *target = get_current_target(CMD_CTX);
4325
4326 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4327 return ERROR_COMMAND_SYNTAX_ERROR;
4328
4329 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4330 uint32_t offset;
4331 uint32_t num_of_samples;
4332 int retval = ERROR_OK;
4333 bool halted_before_profiling = target->state == TARGET_HALTED;
4334
4335 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4336
4337 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4338 if (!samples) {
4339 LOG_ERROR("No memory to store samples.");
4340 return ERROR_FAIL;
4341 }
4342
4343 uint64_t timestart_ms = timeval_ms();
4344 /**
4345 * Some cores let us sample the PC without the
4346 * annoying halt/resume step; for example, ARMv7 PCSR.
4347 * Provide a way to use that more efficient mechanism.
4348 */
4349 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4350 &num_of_samples, offset);
4351 if (retval != ERROR_OK) {
4352 free(samples);
4353 return retval;
4354 }
4355 uint32_t duration_ms = timeval_ms() - timestart_ms;
4356
4357 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4358
4359 retval = target_poll(target);
4360 if (retval != ERROR_OK) {
4361 free(samples);
4362 return retval;
4363 }
4364
4365 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4366 /* The target was halted before we started and is running now. Halt it,
4367 * for consistency. */
4368 retval = target_halt(target);
4369 if (retval != ERROR_OK) {
4370 free(samples);
4371 return retval;
4372 }
4373 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4374 /* The target was running before we started and is halted now. Resume
4375 * it, for consistency. */
4376 retval = target_resume(target, 1, 0, 0, 0);
4377 if (retval != ERROR_OK) {
4378 free(samples);
4379 return retval;
4380 }
4381 }
4382
4383 retval = target_poll(target);
4384 if (retval != ERROR_OK) {
4385 free(samples);
4386 return retval;
4387 }
4388
4389 uint32_t start_address = 0;
4390 uint32_t end_address = 0;
4391 bool with_range = false;
4392 if (CMD_ARGC == 4) {
4393 with_range = true;
4394 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4395 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4396 }
4397
4398 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4399 with_range, start_address, end_address, target, duration_ms);
4400 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4401
4402 free(samples);
4403 return retval;
4404 }
4405
4406 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4407 {
4408 char *namebuf;
4409 Jim_Obj *obj_name, *obj_val;
4410 int result;
4411
4412 namebuf = alloc_printf("%s(%d)", varname, idx);
4413 if (!namebuf)
4414 return JIM_ERR;
4415
4416 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4417 jim_wide wide_val = val;
4418 obj_val = Jim_NewWideObj(interp, wide_val);
4419 if (!obj_name || !obj_val) {
4420 free(namebuf);
4421 return JIM_ERR;
4422 }
4423
4424 Jim_IncrRefCount(obj_name);
4425 Jim_IncrRefCount(obj_val);
4426 result = Jim_SetVariable(interp, obj_name, obj_val);
4427 Jim_DecrRefCount(interp, obj_name);
4428 Jim_DecrRefCount(interp, obj_val);
4429 free(namebuf);
4430 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4431 return result;
4432 }
4433
4434 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4435 {
4436 struct command_context *context;
4437 struct target *target;
4438
4439 context = current_command_context(interp);
4440 assert(context);
4441
4442 target = get_current_target(context);
4443 if (!target) {
4444 LOG_ERROR("mem2array: no current target");
4445 return JIM_ERR;
4446 }
4447
4448 return target_mem2array(interp, target, argc - 1, argv + 1);
4449 }
4450
4451 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4452 {
4453 int e;
4454
4455 /* argv[0] = name of array to receive the data
4456 * argv[1] = desired element width in bits
4457 * argv[2] = memory address
4458 * argv[3] = count of times to read
4459 * argv[4] = optional "phys"
4460 */
4461 if (argc < 4 || argc > 5) {
4462 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4463 return JIM_ERR;
4464 }
4465
4466 /* Arg 0: Name of the array variable */
4467 const char *varname = Jim_GetString(argv[0], NULL);
4468
4469 /* Arg 1: Bit width of one element */
4470 long l;
4471 e = Jim_GetLong(interp, argv[1], &l);
4472 if (e != JIM_OK)
4473 return e;
4474 const unsigned int width_bits = l;
4475
4476 if (width_bits != 8 &&
4477 width_bits != 16 &&
4478 width_bits != 32 &&
4479 width_bits != 64) {
4480 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4481 Jim_AppendStrings(interp, Jim_GetResult(interp),
4482 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4483 return JIM_ERR;
4484 }
4485 const unsigned int width = width_bits / 8;
4486
4487 /* Arg 2: Memory address */
4488 jim_wide wide_addr;
4489 e = Jim_GetWide(interp, argv[2], &wide_addr);
4490 if (e != JIM_OK)
4491 return e;
4492 target_addr_t addr = (target_addr_t)wide_addr;
4493
4494 /* Arg 3: Number of elements to read */
4495 e = Jim_GetLong(interp, argv[3], &l);
4496 if (e != JIM_OK)
4497 return e;
4498 size_t len = l;
4499
4500 /* Arg 4: phys */
4501 bool is_phys = false;
4502 if (argc > 4) {
4503 int str_len = 0;
4504 const char *phys = Jim_GetString(argv[4], &str_len);
4505 if (!strncmp(phys, "phys", str_len))
4506 is_phys = true;
4507 else
4508 return JIM_ERR;
4509 }
4510
4511 /* Argument checks */
4512 if (len == 0) {
4513 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4514 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4515 return JIM_ERR;
4516 }
4517 if ((addr + (len * width)) < addr) {
4518 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4519 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4520 return JIM_ERR;
4521 }
4522 if (len > 65536) {
4523 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4524 Jim_AppendStrings(interp, Jim_GetResult(interp),
4525 "mem2array: too large read request, exceeds 64K items", NULL);
4526 return JIM_ERR;
4527 }
4528
4529 if ((width == 1) ||
4530 ((width == 2) && ((addr & 1) == 0)) ||
4531 ((width == 4) && ((addr & 3) == 0)) ||
4532 ((width == 8) && ((addr & 7) == 0))) {
4533 /* alignment correct */
4534 } else {
4535 char buf[100];
4536 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4537 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4538 addr,
4539 width);
4540 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4541 return JIM_ERR;
4542 }
4543
4544 /* Transfer loop */
4545
4546 /* index counter */
4547 size_t idx = 0;
4548
4549 const size_t buffersize = 4096;
4550 uint8_t *buffer = malloc(buffersize);
4551 if (!buffer)
4552 return JIM_ERR;
4553
4554 /* assume ok */
4555 e = JIM_OK;
4556 while (len) {
4557 /* Slurp... in buffer size chunks */
4558 const unsigned int max_chunk_len = buffersize / width;
4559 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4560
4561 int retval;
4562 if (is_phys)
4563 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4564 else
4565 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4566 if (retval != ERROR_OK) {
4567 /* BOO !*/
4568 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4569 addr,
4570 width,
4571 chunk_len);
4572 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4573 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4574 e = JIM_ERR;
4575 break;
4576 } else {
4577 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4578 uint64_t v = 0;
4579 switch (width) {
4580 case 8:
4581 v = target_buffer_get_u64(target, &buffer[i*width]);
4582 break;
4583 case 4:
4584 v = target_buffer_get_u32(target, &buffer[i*width]);
4585 break;
4586 case 2:
4587 v = target_buffer_get_u16(target, &buffer[i*width]);
4588 break;
4589 case 1:
4590 v = buffer[i] & 0x0ff;
4591 break;
4592 }
4593 new_u64_array_element(interp, varname, idx, v);
4594 }
4595 len -= chunk_len;
4596 addr += chunk_len * width;
4597 }
4598 }
4599
4600 free(buffer);
4601
4602 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4603
4604 return e;
4605 }
4606
4607 static int target_jim_read_memory(Jim_Interp *interp, int argc,
4608 Jim_Obj * const *argv)
4609 {
4610 /*
4611 * argv[1] = memory address
4612 * argv[2] = desired element width in bits
4613 * argv[3] = number of elements to read
4614 * argv[4] = optional "phys"
4615 */
4616
4617 if (argc < 4 || argc > 5) {
4618 Jim_WrongNumArgs(interp, 1, argv, "address width count ['phys']");
4619 return JIM_ERR;
4620 }
4621
4622 /* Arg 1: Memory address. */
4623 jim_wide wide_addr;
4624 int e;
4625 e = Jim_GetWide(interp, argv[1], &wide_addr);
4626
4627 if (e != JIM_OK)
4628 return e;
4629
4630 target_addr_t addr = (target_addr_t)wide_addr;
4631
4632 /* Arg 2: Bit width of one element. */
4633 long l;
4634 e = Jim_GetLong(interp, argv[2], &l);
4635
4636 if (e != JIM_OK)
4637 return e;
4638
4639 const unsigned int width_bits = l;
4640
4641 /* Arg 3: Number of elements to read. */
4642 e = Jim_GetLong(interp, argv[3], &l);
4643
4644 if (e != JIM_OK)
4645 return e;
4646
4647 size_t count = l;
4648
4649 /* Arg 4: Optional 'phys'. */
4650 bool is_phys = false;
4651
4652 if (argc > 4) {
4653 const char *phys = Jim_GetString(argv[4], NULL);
4654
4655 if (strcmp(phys, "phys")) {
4656 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4657 return JIM_ERR;
4658 }
4659
4660 is_phys = true;
4661 }
4662
4663 switch (width_bits) {
4664 case 8:
4665 case 16:
4666 case 32:
4667 case 64:
4668 break;
4669 default:
4670 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4671 return JIM_ERR;
4672 }
4673
4674 const unsigned int width = width_bits / 8;
4675
4676 if ((addr + (count * width)) < addr) {
4677 Jim_SetResultString(interp, "read_memory: addr + count wraps to zero", -1);
4678 return JIM_ERR;
4679 }
4680
4681 if (count > 65536) {
4682 Jim_SetResultString(interp, "read_memory: too large read request, exeeds 64K elements", -1);
4683 return JIM_ERR;
4684 }
4685
4686 struct command_context *cmd_ctx = current_command_context(interp);
4687 assert(cmd_ctx != NULL);
4688 struct target *target = get_current_target(cmd_ctx);
4689
4690 const size_t buffersize = 4096;
4691 uint8_t *buffer = malloc(buffersize);
4692
4693 if (!buffer) {
4694 LOG_ERROR("Failed to allocate memory");
4695 return JIM_ERR;
4696 }
4697
4698 Jim_Obj *result_list = Jim_NewListObj(interp, NULL, 0);
4699 Jim_IncrRefCount(result_list);
4700
4701 while (count > 0) {
4702 const unsigned int max_chunk_len = buffersize / width;
4703 const size_t chunk_len = MIN(count, max_chunk_len);
4704
4705 int retval;
4706
4707 if (is_phys)
4708 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4709 else
4710 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4711
4712 if (retval != ERROR_OK) {
4713 LOG_ERROR("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4714 addr, width_bits, chunk_len);
4715 Jim_SetResultString(interp, "read_memory: failed to read memory", -1);
4716 e = JIM_ERR;
4717 break;
4718 }
4719
4720 for (size_t i = 0; i < chunk_len ; i++) {
4721 uint64_t v = 0;
4722
4723 switch (width) {
4724 case 8:
4725 v = target_buffer_get_u64(target, &buffer[i * width]);
4726 break;
4727 case 4:
4728 v = target_buffer_get_u32(target, &buffer[i * width]);
4729 break;
4730 case 2:
4731 v = target_buffer_get_u16(target, &buffer[i * width]);
4732 break;
4733 case 1:
4734 v = buffer[i];
4735 break;
4736 }
4737
4738 char value_buf[11];
4739 snprintf(value_buf, sizeof(value_buf), "0x%" PRIx64, v);
4740
4741 Jim_ListAppendElement(interp, result_list,
4742 Jim_NewStringObj(interp, value_buf, -1));
4743 }
4744
4745 count -= chunk_len;
4746 addr += chunk_len * width;
4747 }
4748
4749 free(buffer);
4750
4751 if (e != JIM_OK) {
4752 Jim_DecrRefCount(interp, result_list);
4753 return e;
4754 }
4755
4756 Jim_SetResult(interp, result_list);
4757 Jim_DecrRefCount(interp, result_list);
4758
4759 return JIM_OK;
4760 }
4761
4762 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4763 {
4764 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4765 if (!namebuf)
4766 return JIM_ERR;
4767
4768 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4769 if (!obj_name) {
4770 free(namebuf);
4771 return JIM_ERR;
4772 }
4773
4774 Jim_IncrRefCount(obj_name);
4775 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4776 Jim_DecrRefCount(interp, obj_name);
4777 free(namebuf);
4778 if (!obj_val)
4779 return JIM_ERR;
4780
4781 jim_wide wide_val;
4782 int result = Jim_GetWide(interp, obj_val, &wide_val);
4783 *val = wide_val;
4784 return result;
4785 }
4786
4787 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4788 {
4789 struct command_context *context;
4790 struct target *target;
4791
4792 context = current_command_context(interp);
4793 assert(context);
4794
4795 target = get_current_target(context);
4796 if (!target) {
4797 LOG_ERROR("array2mem: no current target");
4798 return JIM_ERR;
4799 }
4800
4801 return target_array2mem(interp, target, argc-1, argv + 1);
4802 }
4803
4804 static int target_array2mem(Jim_Interp *interp, struct target *target,
4805 int argc, Jim_Obj *const *argv)
4806 {
4807 int e;
4808
4809 /* argv[0] = name of array from which to read the data
4810 * argv[1] = desired element width in bits
4811 * argv[2] = memory address
4812 * argv[3] = number of elements to write
4813 * argv[4] = optional "phys"
4814 */
4815 if (argc < 4 || argc > 5) {
4816 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4817 return JIM_ERR;
4818 }
4819
4820 /* Arg 0: Name of the array variable */
4821 const char *varname = Jim_GetString(argv[0], NULL);
4822
4823 /* Arg 1: Bit width of one element */
4824 long l;
4825 e = Jim_GetLong(interp, argv[1], &l);
4826 if (e != JIM_OK)
4827 return e;
4828 const unsigned int width_bits = l;
4829
4830 if (width_bits != 8 &&
4831 width_bits != 16 &&
4832 width_bits != 32 &&
4833 width_bits != 64) {
4834 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4835 Jim_AppendStrings(interp, Jim_GetResult(interp),
4836 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4837 return JIM_ERR;
4838 }
4839 const unsigned int width = width_bits / 8;
4840
4841 /* Arg 2: Memory address */
4842 jim_wide wide_addr;
4843 e = Jim_GetWide(interp, argv[2], &wide_addr);
4844 if (e != JIM_OK)
4845 return e;
4846 target_addr_t addr = (target_addr_t)wide_addr;
4847
4848 /* Arg 3: Number of elements to write */
4849 e = Jim_GetLong(interp, argv[3], &l);
4850 if (e != JIM_OK)
4851 return e;
4852 size_t len = l;
4853
4854 /* Arg 4: Phys */
4855 bool is_phys = false;
4856 if (argc > 4) {
4857 int str_len = 0;
4858 const char *phys = Jim_GetString(argv[4], &str_len);
4859 if (!strncmp(phys, "phys", str_len))
4860 is_phys = true;
4861 else
4862 return JIM_ERR;
4863 }
4864
4865 /* Argument checks */
4866 if (len == 0) {
4867 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4868 Jim_AppendStrings(interp, Jim_GetResult(interp),
4869 "array2mem: zero width read?", NULL);
4870 return JIM_ERR;
4871 }
4872
4873 if ((addr + (len * width)) < addr) {
4874 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4875 Jim_AppendStrings(interp, Jim_GetResult(interp),
4876 "array2mem: addr + len - wraps to zero?", NULL);
4877 return JIM_ERR;
4878 }
4879
4880 if (len > 65536) {
4881 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4882 Jim_AppendStrings(interp, Jim_GetResult(interp),
4883 "array2mem: too large memory write request, exceeds 64K items", NULL);
4884 return JIM_ERR;
4885 }
4886
4887 if ((width == 1) ||
4888 ((width == 2) && ((addr & 1) == 0)) ||
4889 ((width == 4) && ((addr & 3) == 0)) ||
4890 ((width == 8) && ((addr & 7) == 0))) {
4891 /* alignment correct */
4892 } else {
4893 char buf[100];
4894 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4895 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4896 addr,
4897 width);
4898 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4899 return JIM_ERR;
4900 }
4901
4902 /* Transfer loop */
4903
4904 /* assume ok */
4905 e = JIM_OK;
4906
4907 const size_t buffersize = 4096;
4908 uint8_t *buffer = malloc(buffersize);
4909 if (!buffer)
4910 return JIM_ERR;
4911
4912 /* index counter */
4913 size_t idx = 0;
4914
4915 while (len) {
4916 /* Slurp... in buffer size chunks */
4917 const unsigned int max_chunk_len = buffersize / width;
4918
4919 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4920
4921 /* Fill the buffer */
4922 for (size_t i = 0; i < chunk_len; i++, idx++) {
4923 uint64_t v = 0;
4924 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4925 free(buffer);
4926 return JIM_ERR;
4927 }
4928 switch (width) {
4929 case 8:
4930 target_buffer_set_u64(target, &buffer[i * width], v);
4931 break;
4932 case 4:
4933 target_buffer_set_u32(target, &buffer[i * width], v);
4934 break;
4935 case 2:
4936 target_buffer_set_u16(target, &buffer[i * width], v);
4937 break;
4938 case 1:
4939 buffer[i] = v & 0x0ff;
4940 break;
4941 }
4942 }
4943 len -= chunk_len;
4944
4945 /* Write the buffer to memory */
4946 int retval;
4947 if (is_phys)
4948 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4949 else
4950 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4951 if (retval != ERROR_OK) {
4952 /* BOO !*/
4953 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4954 addr,
4955 width,
4956 chunk_len);
4957 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4958 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4959 e = JIM_ERR;
4960 break;
4961 }
4962 addr += chunk_len * width;
4963 }
4964
4965 free(buffer);
4966
4967 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4968
4969 return e;
4970 }
4971
4972 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4973 Jim_Obj * const *argv)
4974 {
4975 /*
4976 * argv[1] = memory address
4977 * argv[2] = desired element width in bits
4978 * argv[3] = list of data to write
4979 * argv[4] = optional "phys"
4980 */
4981
4982 if (argc < 4 || argc > 5) {
4983 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4984 return JIM_ERR;
4985 }
4986
4987 /* Arg 1: Memory address. */
4988 int e;
4989 jim_wide wide_addr;
4990 e = Jim_GetWide(interp, argv[1], &wide_addr);
4991
4992 if (e != JIM_OK)
4993 return e;
4994
4995 target_addr_t addr = (target_addr_t)wide_addr;
4996
4997 /* Arg 2: Bit width of one element. */
4998 long l;
4999 e = Jim_GetLong(interp, argv[2], &l);
5000
5001 if (e != JIM_OK)
5002 return e;
5003
5004 const unsigned int width_bits = l;
5005 size_t count = Jim_ListLength(interp, argv[3]);
5006
5007 /* Arg 4: Optional 'phys'. */
5008 bool is_phys = false;
5009
5010 if (argc > 4) {
5011 const char *phys = Jim_GetString(argv[4], NULL);
5012
5013 if (strcmp(phys, "phys")) {
5014 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
5015 return JIM_ERR;
5016 }
5017
5018 is_phys = true;
5019 }
5020
5021 switch (width_bits) {
5022 case 8:
5023 case 16:
5024 case 32:
5025 case 64:
5026 break;
5027 default:
5028 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
5029 return JIM_ERR;
5030 }
5031
5032 const unsigned int width = width_bits / 8;
5033
5034 if ((addr + (count * width)) < addr) {
5035 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
5036 return JIM_ERR;
5037 }
5038
5039 if (count > 65536) {
5040 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
5041 return JIM_ERR;
5042 }
5043
5044 struct command_context *cmd_ctx = current_command_context(interp);
5045 assert(cmd_ctx != NULL);
5046 struct target *target = get_current_target(cmd_ctx);
5047
5048 const size_t buffersize = 4096;
5049 uint8_t *buffer = malloc(buffersize);
5050
5051 if (!buffer) {
5052 LOG_ERROR("Failed to allocate memory");
5053 return JIM_ERR;
5054 }
5055
5056 size_t j = 0;
5057
5058 while (count > 0) {
5059 const unsigned int max_chunk_len = buffersize / width;
5060 const size_t chunk_len = MIN(count, max_chunk_len);
5061
5062 for (size_t i = 0; i < chunk_len; i++, j++) {
5063 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5064 jim_wide element_wide;
5065 Jim_GetWide(interp, tmp, &element_wide);
5066
5067 const uint64_t v = element_wide;
5068
5069 switch (width) {
5070 case 8:
5071 target_buffer_set_u64(target, &buffer[i * width], v);
5072 break;
5073 case 4:
5074 target_buffer_set_u32(target, &buffer[i * width], v);
5075 break;
5076 case 2:
5077 target_buffer_set_u16(target, &buffer[i * width], v);
5078 break;
5079 case 1:
5080 buffer[i] = v & 0x0ff;
5081 break;
5082 }
5083 }
5084
5085 count -= chunk_len;
5086
5087 int retval;
5088
5089 if (is_phys)
5090 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5091 else
5092 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5093
5094 if (retval != ERROR_OK) {
5095 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5096 addr, width_bits, chunk_len);
5097 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5098 e = JIM_ERR;
5099 break;
5100 }
5101
5102 addr += chunk_len * width;
5103 }
5104
5105 free(buffer);
5106
5107 return e;
5108 }
5109
5110 /* FIX? should we propagate errors here rather than printing them
5111 * and continuing?
5112 */
5113 void target_handle_event(struct target *target, enum target_event e)
5114 {
5115 struct target_event_action *teap;
5116 int retval;
5117
5118 for (teap = target->event_action; teap; teap = teap->next) {
5119 if (teap->event == e) {
5120 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5121 target->target_number,
5122 target_name(target),
5123 target_type_name(target),
5124 e,
5125 target_event_name(e),
5126 Jim_GetString(teap->body, NULL));
5127
5128 /* Override current target by the target an event
5129 * is issued from (lot of scripts need it).
5130 * Return back to previous override as soon
5131 * as the handler processing is done */
5132 struct command_context *cmd_ctx = current_command_context(teap->interp);
5133 struct target *saved_target_override = cmd_ctx->current_target_override;
5134 cmd_ctx->current_target_override = target;
5135
5136 retval = Jim_EvalObj(teap->interp, teap->body);
5137
5138 cmd_ctx->current_target_override = saved_target_override;
5139
5140 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5141 return;
5142
5143 if (retval == JIM_RETURN)
5144 retval = teap->interp->returnCode;
5145
5146 if (retval != JIM_OK) {
5147 Jim_MakeErrorMessage(teap->interp);
5148 LOG_USER("Error executing event %s on target %s:\n%s",
5149 target_event_name(e),
5150 target_name(target),
5151 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5152 /* clean both error code and stacktrace before return */
5153 Jim_Eval(teap->interp, "error \"\" \"\"");
5154 }
5155 }
5156 }
5157 }
5158
5159 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5160 Jim_Obj * const *argv)
5161 {
5162 bool force = false;
5163
5164 if (argc == 3) {
5165 const char *option = Jim_GetString(argv[1], NULL);
5166
5167 if (!strcmp(option, "-force")) {
5168 argc--;
5169 argv++;
5170 force = true;
5171 } else {
5172 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5173 return JIM_ERR;
5174 }
5175 }
5176
5177 if (argc != 2) {
5178 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5179 return JIM_ERR;
5180 }
5181
5182 const int length = Jim_ListLength(interp, argv[1]);
5183
5184 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5185
5186 if (!result_dict)
5187 return JIM_ERR;
5188
5189 struct command_context *cmd_ctx = current_command_context(interp);
5190 assert(cmd_ctx != NULL);
5191 const struct target *target = get_current_target(cmd_ctx);
5192
5193 for (int i = 0; i < length; i++) {
5194 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5195
5196 if (!elem)
5197 return JIM_ERR;
5198
5199 const char *reg_name = Jim_String(elem);
5200
5201 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5202 false);
5203
5204 if (!reg || !reg->exist) {
5205 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5206 return JIM_ERR;
5207 }
5208
5209 if (force) {
5210 int retval = reg->type->get(reg);
5211
5212 if (retval != ERROR_OK) {
5213 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5214 reg_name);
5215 return JIM_ERR;
5216 }
5217 }
5218
5219 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5220
5221 if (!reg_value) {
5222 LOG_ERROR("Failed to allocate memory");
5223 return JIM_ERR;
5224 }
5225
5226 char *tmp = alloc_printf("0x%s", reg_value);
5227
5228 free(reg_value);
5229
5230 if (!tmp) {
5231 LOG_ERROR("Failed to allocate memory");
5232 return JIM_ERR;
5233 }
5234
5235 Jim_DictAddElement(interp, result_dict, elem,
5236 Jim_NewStringObj(interp, tmp, -1));
5237
5238 free(tmp);
5239 }
5240
5241 Jim_SetResult(interp, result_dict);
5242
5243 return JIM_OK;
5244 }
5245
5246 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5247 Jim_Obj * const *argv)
5248 {
5249 if (argc != 2) {
5250 Jim_WrongNumArgs(interp, 1, argv, "dict");
5251 return JIM_ERR;
5252 }
5253
5254 int tmp;
5255 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5256
5257 if (!dict)
5258 return JIM_ERR;
5259
5260 const unsigned int length = tmp;
5261 struct command_context *cmd_ctx = current_command_context(interp);
5262 assert(cmd_ctx);
5263 const struct target *target = get_current_target(cmd_ctx);
5264
5265 for (unsigned int i = 0; i < length; i += 2) {
5266 const char *reg_name = Jim_String(dict[i]);
5267 const char *reg_value = Jim_String(dict[i + 1]);
5268 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5269 false);
5270
5271 if (!reg || !reg->exist) {
5272 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5273 return JIM_ERR;
5274 }
5275
5276 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5277
5278 if (!buf) {
5279 LOG_ERROR("Failed to allocate memory");
5280 return JIM_ERR;
5281 }
5282
5283 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5284 int retval = reg->type->set(reg, buf);
5285 free(buf);
5286
5287 if (retval != ERROR_OK) {
5288 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5289 reg_value, reg_name);
5290 return JIM_ERR;
5291 }
5292 }
5293
5294 return JIM_OK;
5295 }
5296
5297 /**
5298 * Returns true only if the target has a handler for the specified event.
5299 */
5300 bool target_has_event_action(struct target *target, enum target_event event)
5301 {
5302 struct target_event_action *teap;
5303
5304 for (teap = target->event_action; teap; teap = teap->next) {
5305 if (teap->event == event)
5306 return true;
5307 }
5308 return false;
5309 }
5310
5311 enum target_cfg_param {
5312 TCFG_TYPE,
5313 TCFG_EVENT,
5314 TCFG_WORK_AREA_VIRT,
5315 TCFG_WORK_AREA_PHYS,
5316 TCFG_WORK_AREA_SIZE,
5317 TCFG_WORK_AREA_BACKUP,
5318 TCFG_ENDIAN,
5319 TCFG_COREID,
5320 TCFG_CHAIN_POSITION,
5321 TCFG_DBGBASE,
5322 TCFG_RTOS,
5323 TCFG_DEFER_EXAMINE,
5324 TCFG_GDB_PORT,
5325 TCFG_GDB_MAX_CONNECTIONS,
5326 };
5327
5328 static struct jim_nvp nvp_config_opts[] = {
5329 { .name = "-type", .value = TCFG_TYPE },
5330 { .name = "-event", .value = TCFG_EVENT },
5331 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5332 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5333 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5334 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5335 { .name = "-endian", .value = TCFG_ENDIAN },
5336 { .name = "-coreid", .value = TCFG_COREID },
5337 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5338 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5339 { .name = "-rtos", .value = TCFG_RTOS },
5340 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5341 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5342 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5343 { .name = NULL, .value = -1 }
5344 };
5345
5346 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5347 {
5348 struct jim_nvp *n;
5349 Jim_Obj *o;
5350 jim_wide w;
5351 int e;
5352
5353 /* parse config or cget options ... */
5354 while (goi->argc > 0) {
5355 Jim_SetEmptyResult(goi->interp);
5356 /* jim_getopt_debug(goi); */
5357
5358 if (target->type->target_jim_configure) {
5359 /* target defines a configure function */
5360 /* target gets first dibs on parameters */
5361 e = (*(target->type->target_jim_configure))(target, goi);
5362 if (e == JIM_OK) {
5363 /* more? */
5364 continue;
5365 }
5366 if (e == JIM_ERR) {
5367 /* An error */
5368 return e;
5369 }
5370 /* otherwise we 'continue' below */
5371 }
5372 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5373 if (e != JIM_OK) {
5374 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5375 return e;
5376 }
5377 switch (n->value) {
5378 case TCFG_TYPE:
5379 /* not settable */
5380 if (goi->isconfigure) {
5381 Jim_SetResultFormatted(goi->interp,
5382 "not settable: %s", n->name);
5383 return JIM_ERR;
5384 } else {
5385 no_params:
5386 if (goi->argc != 0) {
5387 Jim_WrongNumArgs(goi->interp,
5388 goi->argc, goi->argv,
5389 "NO PARAMS");
5390 return JIM_ERR;
5391 }
5392 }
5393 Jim_SetResultString(goi->interp,
5394 target_type_name(target), -1);
5395 /* loop for more */
5396 break;
5397 case TCFG_EVENT:
5398 if (goi->argc == 0) {
5399 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5400 return JIM_ERR;
5401 }
5402
5403 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5404 if (e != JIM_OK) {
5405 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5406 return e;
5407 }
5408
5409 if (goi->isconfigure) {
5410 if (goi->argc != 1) {
5411 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5412 return JIM_ERR;
5413 }
5414 } else {
5415 if (goi->argc != 0) {
5416 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5417 return JIM_ERR;
5418 }
5419 }
5420
5421 {
5422 struct target_event_action *teap;
5423
5424 teap = target->event_action;
5425 /* replace existing? */
5426 while (teap) {
5427 if (teap->event == (enum target_event)n->value)
5428 break;
5429 teap = teap->next;
5430 }
5431
5432 if (goi->isconfigure) {
5433 /* START_DEPRECATED_TPIU */
5434 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5435 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5436 /* END_DEPRECATED_TPIU */
5437
5438 bool replace = true;
5439 if (!teap) {
5440 /* create new */
5441 teap = calloc(1, sizeof(*teap));
5442 replace = false;
5443 }
5444 teap->event = n->value;
5445 teap->interp = goi->interp;
5446 jim_getopt_obj(goi, &o);
5447 if (teap->body)
5448 Jim_DecrRefCount(teap->interp, teap->body);
5449 teap->body = Jim_DuplicateObj(goi->interp, o);
5450 /*
5451 * FIXME:
5452 * Tcl/TK - "tk events" have a nice feature.
5453 * See the "BIND" command.
5454 * We should support that here.
5455 * You can specify %X and %Y in the event code.
5456 * The idea is: %T - target name.
5457 * The idea is: %N - target number
5458 * The idea is: %E - event name.
5459 */
5460 Jim_IncrRefCount(teap->body);
5461
5462 if (!replace) {
5463 /* add to head of event list */
5464 teap->next = target->event_action;
5465 target->event_action = teap;
5466 }
5467 Jim_SetEmptyResult(goi->interp);
5468 } else {
5469 /* get */
5470 if (!teap)
5471 Jim_SetEmptyResult(goi->interp);
5472 else
5473 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5474 }
5475 }
5476 /* loop for more */
5477 break;
5478
5479 case TCFG_WORK_AREA_VIRT:
5480 if (goi->isconfigure) {
5481 target_free_all_working_areas(target);
5482 e = jim_getopt_wide(goi, &w);
5483 if (e != JIM_OK)
5484 return e;
5485 target->working_area_virt = w;
5486 target->working_area_virt_spec = true;
5487 } else {
5488 if (goi->argc != 0)
5489 goto no_params;
5490 }
5491 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5492 /* loop for more */
5493 break;
5494
5495 case TCFG_WORK_AREA_PHYS:
5496 if (goi->isconfigure) {
5497 target_free_all_working_areas(target);
5498 e = jim_getopt_wide(goi, &w);
5499 if (e != JIM_OK)
5500 return e;
5501 target->working_area_phys = w;
5502 target->working_area_phys_spec = true;
5503 } else {
5504 if (goi->argc != 0)
5505 goto no_params;
5506 }
5507 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5508 /* loop for more */
5509 break;
5510
5511 case TCFG_WORK_AREA_SIZE:
5512 if (goi->isconfigure) {
5513 target_free_all_working_areas(target);
5514 e = jim_getopt_wide(goi, &w);
5515 if (e != JIM_OK)
5516 return e;
5517 target->working_area_size = w;
5518 } else {
5519 if (goi->argc != 0)
5520 goto no_params;
5521 }
5522 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5523 /* loop for more */
5524 break;
5525
5526 case TCFG_WORK_AREA_BACKUP:
5527 if (goi->isconfigure) {
5528 target_free_all_working_areas(target);
5529 e = jim_getopt_wide(goi, &w);
5530 if (e != JIM_OK)
5531 return e;
5532 /* make this exactly 1 or 0 */
5533 target->backup_working_area = (!!w);
5534 } else {
5535 if (goi->argc != 0)
5536 goto no_params;
5537 }
5538 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5539 /* loop for more e*/
5540 break;
5541
5542
5543 case TCFG_ENDIAN:
5544 if (goi->isconfigure) {
5545 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5546 if (e != JIM_OK) {
5547 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5548 return e;
5549 }
5550 target->endianness = n->value;
5551 } else {
5552 if (goi->argc != 0)
5553 goto no_params;
5554 }
5555 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5556 if (!n->name) {
5557 target->endianness = TARGET_LITTLE_ENDIAN;
5558 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5559 }
5560 Jim_SetResultString(goi->interp, n->name, -1);
5561 /* loop for more */
5562 break;
5563
5564 case TCFG_COREID:
5565 if (goi->isconfigure) {
5566 e = jim_getopt_wide(goi, &w);
5567 if (e != JIM_OK)
5568 return e;
5569 target->coreid = (int32_t)w;
5570 } else {
5571 if (goi->argc != 0)
5572 goto no_params;
5573 }
5574 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5575 /* loop for more */
5576 break;
5577
5578 case TCFG_CHAIN_POSITION:
5579 if (goi->isconfigure) {
5580 Jim_Obj *o_t;
5581 struct jtag_tap *tap;
5582
5583 if (target->has_dap) {
5584 Jim_SetResultString(goi->interp,
5585 "target requires -dap parameter instead of -chain-position!", -1);
5586 return JIM_ERR;
5587 }
5588
5589 target_free_all_working_areas(target);
5590 e = jim_getopt_obj(goi, &o_t);
5591 if (e != JIM_OK)
5592 return e;
5593 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5594 if (!tap)
5595 return JIM_ERR;
5596 target->tap = tap;
5597 target->tap_configured = true;
5598 } else {
5599 if (goi->argc != 0)
5600 goto no_params;
5601 }
5602 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5603 /* loop for more e*/
5604 break;
5605 case TCFG_DBGBASE:
5606 if (goi->isconfigure) {
5607 e = jim_getopt_wide(goi, &w);
5608 if (e != JIM_OK)
5609 return e;
5610 target->dbgbase = (uint32_t)w;
5611 target->dbgbase_set = true;
5612 } else {
5613 if (goi->argc != 0)
5614 goto no_params;
5615 }
5616 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5617 /* loop for more */
5618 break;
5619 case TCFG_RTOS:
5620 /* RTOS */
5621 {
5622 int result = rtos_create(goi, target);
5623 if (result != JIM_OK)
5624 return result;
5625 }
5626 /* loop for more */
5627 break;
5628
5629 case TCFG_DEFER_EXAMINE:
5630 /* DEFER_EXAMINE */
5631 target->defer_examine = true;
5632 /* loop for more */
5633 break;
5634
5635 case TCFG_GDB_PORT:
5636 if (goi->isconfigure) {
5637 struct command_context *cmd_ctx = current_command_context(goi->interp);
5638 if (cmd_ctx->mode != COMMAND_CONFIG) {
5639 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5640 return JIM_ERR;
5641 }
5642
5643 const char *s;
5644 e = jim_getopt_string(goi, &s, NULL);
5645 if (e != JIM_OK)
5646 return e;
5647 free(target->gdb_port_override);
5648 target->gdb_port_override = strdup(s);
5649 } else {
5650 if (goi->argc != 0)
5651 goto no_params;
5652 }
5653 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5654 /* loop for more */
5655 break;
5656
5657 case TCFG_GDB_MAX_CONNECTIONS:
5658 if (goi->isconfigure) {
5659 struct command_context *cmd_ctx = current_command_context(goi->interp);
5660 if (cmd_ctx->mode != COMMAND_CONFIG) {
5661 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5662 return JIM_ERR;
5663 }
5664
5665 e = jim_getopt_wide(goi, &w);
5666 if (e != JIM_OK)
5667 return e;
5668 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5669 } else {
5670 if (goi->argc != 0)
5671 goto no_params;
5672 }
5673 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5674 break;
5675 }
5676 } /* while (goi->argc) */
5677
5678
5679 /* done - we return */
5680 return JIM_OK;
5681 }
5682
5683 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5684 {
5685 struct command *c = jim_to_command(interp);
5686 struct jim_getopt_info goi;
5687
5688 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5689 goi.isconfigure = !strcmp(c->name, "configure");
5690 if (goi.argc < 1) {
5691 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5692 "missing: -option ...");
5693 return JIM_ERR;
5694 }
5695 struct command_context *cmd_ctx = current_command_context(interp);
5696 assert(cmd_ctx);
5697 struct target *target = get_current_target(cmd_ctx);
5698 return target_configure(&goi, target);
5699 }
5700
5701 static int jim_target_mem2array(Jim_Interp *interp,
5702 int argc, Jim_Obj *const *argv)
5703 {
5704 struct command_context *cmd_ctx = current_command_context(interp);
5705 assert(cmd_ctx);
5706 struct target *target = get_current_target(cmd_ctx);
5707 return target_mem2array(interp, target, argc - 1, argv + 1);
5708 }
5709
5710 static int jim_target_array2mem(Jim_Interp *interp,
5711 int argc, Jim_Obj *const *argv)
5712 {
5713 struct command_context *cmd_ctx = current_command_context(interp);
5714 assert(cmd_ctx);
5715 struct target *target = get_current_target(cmd_ctx);
5716 return target_array2mem(interp, target, argc - 1, argv + 1);
5717 }
5718
5719 static int jim_target_tap_disabled(Jim_Interp *interp)
5720 {
5721 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5722 return JIM_ERR;
5723 }
5724
5725 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5726 {
5727 bool allow_defer = false;
5728
5729 struct jim_getopt_info goi;
5730 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5731 if (goi.argc > 1) {
5732 const char *cmd_name = Jim_GetString(argv[0], NULL);
5733 Jim_SetResultFormatted(goi.interp,
5734 "usage: %s ['allow-defer']", cmd_name);
5735 return JIM_ERR;
5736 }
5737 if (goi.argc > 0 &&
5738 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5739 /* consume it */
5740 Jim_Obj *obj;
5741 int e = jim_getopt_obj(&goi, &obj);
5742 if (e != JIM_OK)
5743 return e;
5744 allow_defer = true;
5745 }
5746
5747 struct command_context *cmd_ctx = current_command_context(interp);
5748 assert(cmd_ctx);
5749 struct target *target = get_current_target(cmd_ctx);
5750 if (!target->tap->enabled)
5751 return jim_target_tap_disabled(interp);
5752
5753 if (allow_defer && target->defer_examine) {
5754 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5755 LOG_INFO("Use arp_examine command to examine it manually!");
5756 return JIM_OK;
5757 }
5758
5759 int e = target->type->examine(target);
5760 if (e != ERROR_OK) {
5761 target_reset_examined(target);
5762 return JIM_ERR;
5763 }
5764
5765 target_set_examined(target);
5766
5767 return JIM_OK;
5768 }
5769
5770 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5771 {
5772 struct command_context *cmd_ctx = current_command_context(interp);
5773 assert(cmd_ctx);
5774 struct target *target = get_current_target(cmd_ctx);
5775
5776 Jim_SetResultBool(interp, target_was_examined(target));
5777 return JIM_OK;
5778 }
5779
5780 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5781 {
5782 struct command_context *cmd_ctx = current_command_context(interp);
5783 assert(cmd_ctx);
5784 struct target *target = get_current_target(cmd_ctx);
5785
5786 Jim_SetResultBool(interp, target->defer_examine);
5787 return JIM_OK;
5788 }
5789
5790 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5791 {
5792 if (argc != 1) {
5793 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5794 return JIM_ERR;
5795 }
5796 struct command_context *cmd_ctx = current_command_context(interp);
5797 assert(cmd_ctx);
5798 struct target *target = get_current_target(cmd_ctx);
5799
5800 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5801 return JIM_ERR;
5802
5803 return JIM_OK;
5804 }
5805
5806 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5807 {
5808 if (argc != 1) {
5809 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5810 return JIM_ERR;
5811 }
5812 struct command_context *cmd_ctx = current_command_context(interp);
5813 assert(cmd_ctx);
5814 struct target *target = get_current_target(cmd_ctx);
5815 if (!target->tap->enabled)
5816 return jim_target_tap_disabled(interp);
5817
5818 int e;
5819 if (!(target_was_examined(target)))
5820 e = ERROR_TARGET_NOT_EXAMINED;
5821 else
5822 e = target->type->poll(target);
5823 if (e != ERROR_OK)
5824 return JIM_ERR;
5825 return JIM_OK;
5826 }
5827
5828 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5829 {
5830 struct jim_getopt_info goi;
5831 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5832
5833 if (goi.argc != 2) {
5834 Jim_WrongNumArgs(interp, 0, argv,
5835 "([tT]|[fF]|assert|deassert) BOOL");
5836 return JIM_ERR;
5837 }
5838
5839 struct jim_nvp *n;
5840 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5841 if (e != JIM_OK) {
5842 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5843 return e;
5844 }
5845 /* the halt or not param */
5846 jim_wide a;
5847 e = jim_getopt_wide(&goi, &a);
5848 if (e != JIM_OK)
5849 return e;
5850
5851 struct command_context *cmd_ctx = current_command_context(interp);
5852 assert(cmd_ctx);
5853 struct target *target = get_current_target(cmd_ctx);
5854 if (!target->tap->enabled)
5855 return jim_target_tap_disabled(interp);
5856
5857 if (!target->type->assert_reset || !target->type->deassert_reset) {
5858 Jim_SetResultFormatted(interp,
5859 "No target-specific reset for %s",
5860 target_name(target));
5861 return JIM_ERR;
5862 }
5863
5864 if (target->defer_examine)
5865 target_reset_examined(target);
5866
5867 /* determine if we should halt or not. */
5868 target->reset_halt = (a != 0);
5869 /* When this happens - all workareas are invalid. */
5870 target_free_all_working_areas_restore(target, 0);
5871
5872 /* do the assert */
5873 if (n->value == NVP_ASSERT)
5874 e = target->type->assert_reset(target);
5875 else
5876 e = target->type->deassert_reset(target);
5877 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5878 }
5879
5880 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5881 {
5882 if (argc != 1) {
5883 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5884 return JIM_ERR;
5885 }
5886 struct command_context *cmd_ctx = current_command_context(interp);
5887 assert(cmd_ctx);
5888 struct target *target = get_current_target(cmd_ctx);
5889 if (!target->tap->enabled)
5890 return jim_target_tap_disabled(interp);
5891 int e = target->type->halt(target);
5892 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5893 }
5894
5895 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5896 {
5897 struct jim_getopt_info goi;
5898 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5899
5900 /* params: <name> statename timeoutmsecs */
5901 if (goi.argc != 2) {
5902 const char *cmd_name = Jim_GetString(argv[0], NULL);
5903 Jim_SetResultFormatted(goi.interp,
5904 "%s <state_name> <timeout_in_msec>", cmd_name);
5905 return JIM_ERR;
5906 }
5907
5908 struct jim_nvp *n;
5909 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5910 if (e != JIM_OK) {
5911 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5912 return e;
5913 }
5914 jim_wide a;
5915 e = jim_getopt_wide(&goi, &a);
5916 if (e != JIM_OK)
5917 return e;
5918 struct command_context *cmd_ctx = current_command_context(interp);
5919 assert(cmd_ctx);
5920 struct target *target = get_current_target(cmd_ctx);
5921 if (!target->tap->enabled)
5922 return jim_target_tap_disabled(interp);
5923
5924 e = target_wait_state(target, n->value, a);
5925 if (e != ERROR_OK) {
5926 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5927 Jim_SetResultFormatted(goi.interp,
5928 "target: %s wait %s fails (%#s) %s",
5929 target_name(target), n->name,
5930 obj, target_strerror_safe(e));
5931 return JIM_ERR;
5932 }
5933 return JIM_OK;
5934 }
5935 /* List for human, Events defined for this target.
5936 * scripts/programs should use 'name cget -event NAME'
5937 */
5938 COMMAND_HANDLER(handle_target_event_list)
5939 {
5940 struct target *target = get_current_target(CMD_CTX);
5941 struct target_event_action *teap = target->event_action;
5942
5943 command_print(CMD, "Event actions for target (%d) %s\n",
5944 target->target_number,
5945 target_name(target));
5946 command_print(CMD, "%-25s | Body", "Event");
5947 command_print(CMD, "------------------------- | "
5948 "----------------------------------------");
5949 while (teap) {
5950 command_print(CMD, "%-25s | %s",
5951 target_event_name(teap->event),
5952 Jim_GetString(teap->body, NULL));
5953 teap = teap->next;
5954 }
5955 command_print(CMD, "***END***");
5956 return ERROR_OK;
5957 }
5958 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5959 {
5960 if (argc != 1) {
5961 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5962 return JIM_ERR;
5963 }
5964 struct command_context *cmd_ctx = current_command_context(interp);
5965 assert(cmd_ctx);
5966 struct target *target = get_current_target(cmd_ctx);
5967 Jim_SetResultString(interp, target_state_name(target), -1);
5968 return JIM_OK;
5969 }
5970 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5971 {
5972 struct jim_getopt_info goi;
5973 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5974 if (goi.argc != 1) {
5975 const char *cmd_name = Jim_GetString(argv[0], NULL);
5976 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5977 return JIM_ERR;
5978 }
5979 struct jim_nvp *n;
5980 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5981 if (e != JIM_OK) {
5982 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5983 return e;
5984 }
5985 struct command_context *cmd_ctx = current_command_context(interp);
5986 assert(cmd_ctx);
5987 struct target *target = get_current_target(cmd_ctx);
5988 target_handle_event(target, n->value);
5989 return JIM_OK;
5990 }
5991
5992 static const struct command_registration target_instance_command_handlers[] = {
5993 {
5994 .name = "configure",
5995 .mode = COMMAND_ANY,
5996 .jim_handler = jim_target_configure,
5997 .help = "configure a new target for use",
5998 .usage = "[target_attribute ...]",
5999 },
6000 {
6001 .name = "cget",
6002 .mode = COMMAND_ANY,
6003 .jim_handler = jim_target_configure,
6004 .help = "returns the specified target attribute",
6005 .usage = "target_attribute",
6006 },
6007 {
6008 .name = "mwd",
6009 .handler = handle_mw_command,
6010 .mode = COMMAND_EXEC,
6011 .help = "Write 64-bit word(s) to target memory",
6012 .usage = "address data [count]",
6013 },
6014 {
6015 .name = "mww",
6016 .handler = handle_mw_command,
6017 .mode = COMMAND_EXEC,
6018 .help = "Write 32-bit word(s) to target memory",
6019 .usage = "address data [count]",
6020 },
6021 {
6022 .name = "mwh",
6023 .handler = handle_mw_command,
6024 .mode = COMMAND_EXEC,
6025 .help = "Write 16-bit half-word(s) to target memory",
6026 .usage = "address data [count]",
6027 },
6028 {
6029 .name = "mwb",
6030 .handler = handle_mw_command,
6031 .mode = COMMAND_EXEC,
6032 .help = "Write byte(s) to target memory",
6033 .usage = "address data [count]",
6034 },
6035 {
6036 .name = "mdd",
6037 .handler = handle_md_command,
6038 .mode = COMMAND_EXEC,
6039 .help = "Display target memory as 64-bit words",
6040 .usage = "address [count]",
6041 },
6042 {
6043 .name = "mdw",
6044 .handler = handle_md_command,
6045 .mode = COMMAND_EXEC,
6046 .help = "Display target memory as 32-bit words",
6047 .usage = "address [count]",
6048 },
6049 {
6050 .name = "mdh",
6051 .handler = handle_md_command,
6052 .mode = COMMAND_EXEC,
6053 .help = "Display target memory as 16-bit half-words",
6054 .usage = "address [count]",
6055 },
6056 {
6057 .name = "mdb",
6058 .handler = handle_md_command,
6059 .mode = COMMAND_EXEC,
6060 .help = "Display target memory as 8-bit bytes",
6061 .usage = "address [count]",
6062 },
6063 {
6064 .name = "array2mem",
6065 .mode = COMMAND_EXEC,
6066 .jim_handler = jim_target_array2mem,
6067 .help = "Writes Tcl array of 8/16/32 bit numbers "
6068 "to target memory",
6069 .usage = "arrayname bitwidth address count",
6070 },
6071 {
6072 .name = "mem2array",
6073 .mode = COMMAND_EXEC,
6074 .jim_handler = jim_target_mem2array,
6075 .help = "Loads Tcl array of 8/16/32 bit numbers "
6076 "from target memory",
6077 .usage = "arrayname bitwidth address count",
6078 },
6079 {
6080 .name = "get_reg",
6081 .mode = COMMAND_EXEC,
6082 .jim_handler = target_jim_get_reg,
6083 .help = "Get register values from the target",
6084 .usage = "list",
6085 },
6086 {
6087 .name = "set_reg",
6088 .mode = COMMAND_EXEC,
6089 .jim_handler = target_jim_set_reg,
6090 .help = "Set target register values",
6091 .usage = "dict",
6092 },
6093 {
6094 .name = "read_memory",
6095 .mode = COMMAND_EXEC,
6096 .jim_handler = target_jim_read_memory,
6097 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6098 .usage = "address width count ['phys']",
6099 },
6100 {
6101 .name = "write_memory",
6102 .mode = COMMAND_EXEC,
6103 .jim_handler = target_jim_write_memory,
6104 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6105 .usage = "address width data ['phys']",
6106 },
6107 {
6108 .name = "eventlist",
6109 .handler = handle_target_event_list,
6110 .mode = COMMAND_EXEC,
6111 .help = "displays a table of events defined for this target",
6112 .usage = "",
6113 },
6114 {
6115 .name = "curstate",
6116 .mode = COMMAND_EXEC,
6117 .jim_handler = jim_target_current_state,
6118 .help = "displays the current state of this target",
6119 },
6120 {
6121 .name = "arp_examine",
6122 .mode = COMMAND_EXEC,
6123 .jim_handler = jim_target_examine,
6124 .help = "used internally for reset processing",
6125 .usage = "['allow-defer']",
6126 },
6127 {
6128 .name = "was_examined",
6129 .mode = COMMAND_EXEC,
6130 .jim_handler = jim_target_was_examined,
6131 .help = "used internally for reset processing",
6132 },
6133 {
6134 .name = "examine_deferred",
6135 .mode = COMMAND_EXEC,
6136 .jim_handler = jim_target_examine_deferred,
6137 .help = "used internally for reset processing",
6138 },
6139 {
6140 .name = "arp_halt_gdb",
6141 .mode = COMMAND_EXEC,
6142 .jim_handler = jim_target_halt_gdb,
6143 .help = "used internally for reset processing to halt GDB",
6144 },
6145 {
6146 .name = "arp_poll",
6147 .mode = COMMAND_EXEC,
6148 .jim_handler = jim_target_poll,
6149 .help = "used internally for reset processing",
6150 },
6151 {
6152 .name = "arp_reset",
6153 .mode = COMMAND_EXEC,
6154 .jim_handler = jim_target_reset,
6155 .help = "used internally for reset processing",
6156 },
6157 {
6158 .name = "arp_halt",
6159 .mode = COMMAND_EXEC,
6160 .jim_handler = jim_target_halt,
6161 .help = "used internally for reset processing",
6162 },
6163 {
6164 .name = "arp_waitstate",
6165 .mode = COMMAND_EXEC,
6166 .jim_handler = jim_target_wait_state,
6167 .help = "used internally for reset processing",
6168 },
6169 {
6170 .name = "invoke-event",
6171 .mode = COMMAND_EXEC,
6172 .jim_handler = jim_target_invoke_event,
6173 .help = "invoke handler for specified event",
6174 .usage = "event_name",
6175 },
6176 COMMAND_REGISTRATION_DONE
6177 };
6178
6179 static int target_create(struct jim_getopt_info *goi)
6180 {
6181 Jim_Obj *new_cmd;
6182 Jim_Cmd *cmd;
6183 const char *cp;
6184 int e;
6185 int x;
6186 struct target *target;
6187 struct command_context *cmd_ctx;
6188
6189 cmd_ctx = current_command_context(goi->interp);
6190 assert(cmd_ctx);
6191
6192 if (goi->argc < 3) {
6193 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6194 return JIM_ERR;
6195 }
6196
6197 /* COMMAND */
6198 jim_getopt_obj(goi, &new_cmd);
6199 /* does this command exist? */
6200 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6201 if (cmd) {
6202 cp = Jim_GetString(new_cmd, NULL);
6203 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6204 return JIM_ERR;
6205 }
6206
6207 /* TYPE */
6208 e = jim_getopt_string(goi, &cp, NULL);
6209 if (e != JIM_OK)
6210 return e;
6211 struct transport *tr = get_current_transport();
6212 if (tr->override_target) {
6213 e = tr->override_target(&cp);
6214 if (e != ERROR_OK) {
6215 LOG_ERROR("The selected transport doesn't support this target");
6216 return JIM_ERR;
6217 }
6218 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6219 }
6220 /* now does target type exist */
6221 for (x = 0 ; target_types[x] ; x++) {
6222 if (strcmp(cp, target_types[x]->name) == 0) {
6223 /* found */
6224 break;
6225 }
6226 }
6227 if (!target_types[x]) {
6228 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6229 for (x = 0 ; target_types[x] ; x++) {
6230 if (target_types[x + 1]) {
6231 Jim_AppendStrings(goi->interp,
6232 Jim_GetResult(goi->interp),
6233 target_types[x]->name,
6234 ", ", NULL);
6235 } else {
6236 Jim_AppendStrings(goi->interp,
6237 Jim_GetResult(goi->interp),
6238 " or ",
6239 target_types[x]->name, NULL);
6240 }
6241 }
6242 return JIM_ERR;
6243 }
6244
6245 /* Create it */
6246 target = calloc(1, sizeof(struct target));
6247 if (!target) {
6248 LOG_ERROR("Out of memory");
6249 return JIM_ERR;
6250 }
6251
6252 /* set empty smp cluster */
6253 target->smp_targets = &empty_smp_targets;
6254
6255 /* set target number */
6256 target->target_number = new_target_number();
6257
6258 /* allocate memory for each unique target type */
6259 target->type = malloc(sizeof(struct target_type));
6260 if (!target->type) {
6261 LOG_ERROR("Out of memory");
6262 free(target);
6263 return JIM_ERR;
6264 }
6265
6266 memcpy(target->type, target_types[x], sizeof(struct target_type));
6267
6268 /* default to first core, override with -coreid */
6269 target->coreid = 0;
6270
6271 target->working_area = 0x0;
6272 target->working_area_size = 0x0;
6273 target->working_areas = NULL;
6274 target->backup_working_area = 0;
6275
6276 target->state = TARGET_UNKNOWN;
6277 target->debug_reason = DBG_REASON_UNDEFINED;
6278 target->reg_cache = NULL;
6279 target->breakpoints = NULL;
6280 target->watchpoints = NULL;
6281 target->next = NULL;
6282 target->arch_info = NULL;
6283
6284 target->verbose_halt_msg = true;
6285
6286 target->halt_issued = false;
6287
6288 /* initialize trace information */
6289 target->trace_info = calloc(1, sizeof(struct trace));
6290 if (!target->trace_info) {
6291 LOG_ERROR("Out of memory");
6292 free(target->type);
6293 free(target);
6294 return JIM_ERR;
6295 }
6296
6297 target->dbgmsg = NULL;
6298 target->dbg_msg_enabled = 0;
6299
6300 target->endianness = TARGET_ENDIAN_UNKNOWN;
6301
6302 target->rtos = NULL;
6303 target->rtos_auto_detect = false;
6304
6305 target->gdb_port_override = NULL;
6306 target->gdb_max_connections = 1;
6307
6308 /* Do the rest as "configure" options */
6309 goi->isconfigure = 1;
6310 e = target_configure(goi, target);
6311
6312 if (e == JIM_OK) {
6313 if (target->has_dap) {
6314 if (!target->dap_configured) {
6315 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6316 e = JIM_ERR;
6317 }
6318 } else {
6319 if (!target->tap_configured) {
6320 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6321 e = JIM_ERR;
6322 }
6323 }
6324 /* tap must be set after target was configured */
6325 if (!target->tap)
6326 e = JIM_ERR;
6327 }
6328
6329 if (e != JIM_OK) {
6330 rtos_destroy(target);
6331 free(target->gdb_port_override);
6332 free(target->trace_info);
6333 free(target->type);
6334 free(target);
6335 return e;
6336 }
6337
6338 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6339 /* default endian to little if not specified */
6340 target->endianness = TARGET_LITTLE_ENDIAN;
6341 }
6342
6343 cp = Jim_GetString(new_cmd, NULL);
6344 target->cmd_name = strdup(cp);
6345 if (!target->cmd_name) {
6346 LOG_ERROR("Out of memory");
6347 rtos_destroy(target);
6348 free(target->gdb_port_override);
6349 free(target->trace_info);
6350 free(target->type);
6351 free(target);
6352 return JIM_ERR;
6353 }
6354
6355 if (target->type->target_create) {
6356 e = (*(target->type->target_create))(target, goi->interp);
6357 if (e != ERROR_OK) {
6358 LOG_DEBUG("target_create failed");
6359 free(target->cmd_name);
6360 rtos_destroy(target);
6361 free(target->gdb_port_override);
6362 free(target->trace_info);
6363 free(target->type);
6364 free(target);
6365 return JIM_ERR;
6366 }
6367 }
6368
6369 /* create the target specific commands */
6370 if (target->type->commands) {
6371 e = register_commands(cmd_ctx, NULL, target->type->commands);
6372 if (e != ERROR_OK)
6373 LOG_ERROR("unable to register '%s' commands", cp);
6374 }
6375
6376 /* now - create the new target name command */
6377 const struct command_registration target_subcommands[] = {
6378 {
6379 .chain = target_instance_command_handlers,
6380 },
6381 {
6382 .chain = target->type->commands,
6383 },
6384 COMMAND_REGISTRATION_DONE
6385 };
6386 const struct command_registration target_commands[] = {
6387 {
6388 .name = cp,
6389 .mode = COMMAND_ANY,
6390 .help = "target command group",
6391 .usage = "",
6392 .chain = target_subcommands,
6393 },
6394 COMMAND_REGISTRATION_DONE
6395 };
6396 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6397 if (e != ERROR_OK) {
6398 if (target->type->deinit_target)
6399 target->type->deinit_target(target);
6400 free(target->cmd_name);
6401 rtos_destroy(target);
6402 free(target->gdb_port_override);
6403 free(target->trace_info);
6404 free(target->type);
6405 free(target);
6406 return JIM_ERR;
6407 }
6408
6409 /* append to end of list */
6410 append_to_list_all_targets(target);
6411
6412 cmd_ctx->current_target = target;
6413 return JIM_OK;
6414 }
6415
6416 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6417 {
6418 if (argc != 1) {
6419 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6420 return JIM_ERR;
6421 }
6422 struct command_context *cmd_ctx = current_command_context(interp);
6423 assert(cmd_ctx);
6424
6425 struct target *target = get_current_target_or_null(cmd_ctx);
6426 if (target)
6427 Jim_SetResultString(interp, target_name(target), -1);
6428 return JIM_OK;
6429 }
6430
6431 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6432 {
6433 if (argc != 1) {
6434 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6435 return JIM_ERR;
6436 }
6437 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6438 for (unsigned x = 0; target_types[x]; x++) {
6439 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6440 Jim_NewStringObj(interp, target_types[x]->name, -1));
6441 }
6442 return JIM_OK;
6443 }
6444
6445 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6446 {
6447 if (argc != 1) {
6448 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6449 return JIM_ERR;
6450 }
6451 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6452 struct target *target = all_targets;
6453 while (target) {
6454 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6455 Jim_NewStringObj(interp, target_name(target), -1));
6456 target = target->next;
6457 }
6458 return JIM_OK;
6459 }
6460
6461 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6462 {
6463 int i;
6464 const char *targetname;
6465 int retval, len;
6466 struct target *target = NULL;
6467 struct target_list *head, *new;
6468
6469 retval = 0;
6470 LOG_DEBUG("%d", argc);
6471 /* argv[1] = target to associate in smp
6472 * argv[2] = target to associate in smp
6473 * argv[3] ...
6474 */
6475
6476 struct list_head *lh = malloc(sizeof(*lh));
6477 if (!lh) {
6478 LOG_ERROR("Out of memory");
6479 return JIM_ERR;
6480 }
6481 INIT_LIST_HEAD(lh);
6482
6483 for (i = 1; i < argc; i++) {
6484
6485 targetname = Jim_GetString(argv[i], &len);
6486 target = get_target(targetname);
6487 LOG_DEBUG("%s ", targetname);
6488 if (target) {
6489 new = malloc(sizeof(struct target_list));
6490 new->target = target;
6491 list_add_tail(&new->lh, lh);
6492 }
6493 }
6494 /* now parse the list of cpu and put the target in smp mode*/
6495 foreach_smp_target(head, lh) {
6496 target = head->target;
6497 target->smp = 1;
6498 target->smp_targets = lh;
6499 }
6500
6501 if (target && target->rtos)
6502 retval = rtos_smp_init(head->target);
6503
6504 return retval;
6505 }
6506
6507
6508 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6509 {
6510 struct jim_getopt_info goi;
6511 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6512 if (goi.argc < 3) {
6513 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6514 "<name> <target_type> [<target_options> ...]");
6515 return JIM_ERR;
6516 }
6517 return target_create(&goi);
6518 }
6519
6520 static const struct command_registration target_subcommand_handlers[] = {
6521 {
6522 .name = "init",
6523 .mode = COMMAND_CONFIG,
6524 .handler = handle_target_init_command,
6525 .help = "initialize targets",
6526 .usage = "",
6527 },
6528 {
6529 .name = "create",
6530 .mode = COMMAND_CONFIG,
6531 .jim_handler = jim_target_create,
6532 .usage = "name type '-chain-position' name [options ...]",
6533 .help = "Creates and selects a new target",
6534 },
6535 {
6536 .name = "current",
6537 .mode = COMMAND_ANY,
6538 .jim_handler = jim_target_current,
6539 .help = "Returns the currently selected target",
6540 },
6541 {
6542 .name = "types",
6543 .mode = COMMAND_ANY,
6544 .jim_handler = jim_target_types,
6545 .help = "Returns the available target types as "
6546 "a list of strings",
6547 },
6548 {
6549 .name = "names",
6550 .mode = COMMAND_ANY,
6551 .jim_handler = jim_target_names,
6552 .help = "Returns the names of all targets as a list of strings",
6553 },
6554 {
6555 .name = "smp",
6556 .mode = COMMAND_ANY,
6557 .jim_handler = jim_target_smp,
6558 .usage = "targetname1 targetname2 ...",
6559 .help = "gather several target in a smp list"
6560 },
6561
6562 COMMAND_REGISTRATION_DONE
6563 };
6564
6565 struct fast_load {
6566 target_addr_t address;
6567 uint8_t *data;
6568 int length;
6569
6570 };
6571
6572 static int fastload_num;
6573 static struct fast_load *fastload;
6574
6575 static void free_fastload(void)
6576 {
6577 if (fastload) {
6578 for (int i = 0; i < fastload_num; i++)
6579 free(fastload[i].data);
6580 free(fastload);
6581 fastload = NULL;
6582 }
6583 }
6584
6585 COMMAND_HANDLER(handle_fast_load_image_command)
6586 {
6587 uint8_t *buffer;
6588 size_t buf_cnt;
6589 uint32_t image_size;
6590 target_addr_t min_address = 0;
6591 target_addr_t max_address = -1;
6592
6593 struct image image;
6594
6595 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6596 &image, &min_address, &max_address);
6597 if (retval != ERROR_OK)
6598 return retval;
6599
6600 struct duration bench;
6601 duration_start(&bench);
6602
6603 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6604 if (retval != ERROR_OK)
6605 return retval;
6606
6607 image_size = 0x0;
6608 retval = ERROR_OK;
6609 fastload_num = image.num_sections;
6610 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6611 if (!fastload) {
6612 command_print(CMD, "out of memory");
6613 image_close(&image);
6614 return ERROR_FAIL;
6615 }
6616 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6617 for (unsigned int i = 0; i < image.num_sections; i++) {
6618 buffer = malloc(image.sections[i].size);
6619 if (!buffer) {
6620 command_print(CMD, "error allocating buffer for section (%d bytes)",
6621 (int)(image.sections[i].size));
6622 retval = ERROR_FAIL;
6623 break;
6624 }
6625
6626 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6627 if (retval != ERROR_OK) {
6628 free(buffer);
6629 break;
6630 }
6631
6632 uint32_t offset = 0;
6633 uint32_t length = buf_cnt;
6634
6635 /* DANGER!!! beware of unsigned comparison here!!! */
6636
6637 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6638 (image.sections[i].base_address < max_address)) {
6639 if (image.sections[i].base_address < min_address) {
6640 /* clip addresses below */
6641 offset += min_address-image.sections[i].base_address;
6642 length -= offset;
6643 }
6644
6645 if (image.sections[i].base_address + buf_cnt > max_address)
6646 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6647
6648 fastload[i].address = image.sections[i].base_address + offset;
6649 fastload[i].data = malloc(length);
6650 if (!fastload[i].data) {
6651 free(buffer);
6652 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6653 length);
6654 retval = ERROR_FAIL;
6655 break;
6656 }
6657 memcpy(fastload[i].data, buffer + offset, length);
6658 fastload[i].length = length;
6659
6660 image_size += length;
6661 command_print(CMD, "%u bytes written at address 0x%8.8x",
6662 (unsigned int)length,
6663 ((unsigned int)(image.sections[i].base_address + offset)));
6664 }
6665
6666 free(buffer);
6667 }
6668
6669 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6670 command_print(CMD, "Loaded %" PRIu32 " bytes "
6671 "in %fs (%0.3f KiB/s)", image_size,
6672 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6673
6674 command_print(CMD,
6675 "WARNING: image has not been loaded to target!"
6676 "You can issue a 'fast_load' to finish loading.");
6677 }
6678
6679 image_close(&image);
6680
6681 if (retval != ERROR_OK)
6682 free_fastload();
6683
6684 return retval;
6685 }
6686
6687 COMMAND_HANDLER(handle_fast_load_command)
6688 {
6689 if (CMD_ARGC > 0)
6690 return ERROR_COMMAND_SYNTAX_ERROR;
6691 if (!fastload) {
6692 LOG_ERROR("No image in memory");
6693 return ERROR_FAIL;
6694 }
6695 int i;
6696 int64_t ms = timeval_ms();
6697 int size = 0;
6698 int retval = ERROR_OK;
6699 for (i = 0; i < fastload_num; i++) {
6700 struct target *target = get_current_target(CMD_CTX);
6701 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6702 (unsigned int)(fastload[i].address),
6703 (unsigned int)(fastload[i].length));
6704 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6705 if (retval != ERROR_OK)
6706 break;
6707 size += fastload[i].length;
6708 }
6709 if (retval == ERROR_OK) {
6710 int64_t after = timeval_ms();
6711 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6712 }
6713 return retval;
6714 }
6715
6716 static const struct command_registration target_command_handlers[] = {
6717 {
6718 .name = "targets",
6719 .handler = handle_targets_command,
6720 .mode = COMMAND_ANY,
6721 .help = "change current default target (one parameter) "
6722 "or prints table of all targets (no parameters)",
6723 .usage = "[target]",
6724 },
6725 {
6726 .name = "target",
6727 .mode = COMMAND_CONFIG,
6728 .help = "configure target",
6729 .chain = target_subcommand_handlers,
6730 .usage = "",
6731 },
6732 COMMAND_REGISTRATION_DONE
6733 };
6734
6735 int target_register_commands(struct command_context *cmd_ctx)
6736 {
6737 return register_commands(cmd_ctx, NULL, target_command_handlers);
6738 }
6739
6740 static bool target_reset_nag = true;
6741
6742 bool get_target_reset_nag(void)
6743 {
6744 return target_reset_nag;
6745 }
6746
6747 COMMAND_HANDLER(handle_target_reset_nag)
6748 {
6749 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6750 &target_reset_nag, "Nag after each reset about options to improve "
6751 "performance");
6752 }
6753
6754 COMMAND_HANDLER(handle_ps_command)
6755 {
6756 struct target *target = get_current_target(CMD_CTX);
6757 char *display;
6758 if (target->state != TARGET_HALTED) {
6759 LOG_INFO("target not halted !!");
6760 return ERROR_OK;
6761 }
6762
6763 if ((target->rtos) && (target->rtos->type)
6764 && (target->rtos->type->ps_command)) {
6765 display = target->rtos->type->ps_command(target);
6766 command_print(CMD, "%s", display);
6767 free(display);
6768 return ERROR_OK;
6769 } else {
6770 LOG_INFO("failed");
6771 return ERROR_TARGET_FAILURE;
6772 }
6773 }
6774
6775 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6776 {
6777 if (text)
6778 command_print_sameline(cmd, "%s", text);
6779 for (int i = 0; i < size; i++)
6780 command_print_sameline(cmd, " %02x", buf[i]);
6781 command_print(cmd, " ");
6782 }
6783
6784 COMMAND_HANDLER(handle_test_mem_access_command)
6785 {
6786 struct target *target = get_current_target(CMD_CTX);
6787 uint32_t test_size;
6788 int retval = ERROR_OK;
6789
6790 if (target->state != TARGET_HALTED) {
6791 LOG_INFO("target not halted !!");
6792 return ERROR_FAIL;
6793 }
6794
6795 if (CMD_ARGC != 1)
6796 return ERROR_COMMAND_SYNTAX_ERROR;
6797
6798 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6799
6800 /* Test reads */
6801 size_t num_bytes = test_size + 4;
6802
6803 struct working_area *wa = NULL;
6804 retval = target_alloc_working_area(target, num_bytes, &wa);
6805 if (retval != ERROR_OK) {
6806 LOG_ERROR("Not enough working area");
6807 return ERROR_FAIL;
6808 }
6809
6810 uint8_t *test_pattern = malloc(num_bytes);
6811
6812 for (size_t i = 0; i < num_bytes; i++)
6813 test_pattern[i] = rand();
6814
6815 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6816 if (retval != ERROR_OK) {
6817 LOG_ERROR("Test pattern write failed");
6818 goto out;
6819 }
6820
6821 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6822 for (int size = 1; size <= 4; size *= 2) {
6823 for (int offset = 0; offset < 4; offset++) {
6824 uint32_t count = test_size / size;
6825 size_t host_bufsiz = (count + 2) * size + host_offset;
6826 uint8_t *read_ref = malloc(host_bufsiz);
6827 uint8_t *read_buf = malloc(host_bufsiz);
6828
6829 for (size_t i = 0; i < host_bufsiz; i++) {
6830 read_ref[i] = rand();
6831 read_buf[i] = read_ref[i];
6832 }
6833 command_print_sameline(CMD,
6834 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6835 size, offset, host_offset ? "un" : "");
6836
6837 struct duration bench;
6838 duration_start(&bench);
6839
6840 retval = target_read_memory(target, wa->address + offset, size, count,
6841 read_buf + size + host_offset);
6842
6843 duration_measure(&bench);
6844
6845 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6846 command_print(CMD, "Unsupported alignment");
6847 goto next;
6848 } else if (retval != ERROR_OK) {
6849 command_print(CMD, "Memory read failed");
6850 goto next;
6851 }
6852
6853 /* replay on host */
6854 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6855
6856 /* check result */
6857 int result = memcmp(read_ref, read_buf, host_bufsiz);
6858 if (result == 0) {
6859 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6860 duration_elapsed(&bench),
6861 duration_kbps(&bench, count * size));
6862 } else {
6863 command_print(CMD, "Compare failed");
6864 binprint(CMD, "ref:", read_ref, host_bufsiz);
6865 binprint(CMD, "buf:", read_buf, host_bufsiz);
6866 }
6867 next:
6868 free(read_ref);
6869 free(read_buf);
6870 }
6871 }
6872 }
6873
6874 out:
6875 free(test_pattern);
6876
6877 target_free_working_area(target, wa);
6878
6879 /* Test writes */
6880 num_bytes = test_size + 4 + 4 + 4;
6881
6882 retval = target_alloc_working_area(target, num_bytes, &wa);
6883 if (retval != ERROR_OK) {
6884 LOG_ERROR("Not enough working area");
6885 return ERROR_FAIL;
6886 }
6887
6888 test_pattern = malloc(num_bytes);
6889
6890 for (size_t i = 0; i < num_bytes; i++)
6891 test_pattern[i] = rand();
6892
6893 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6894 for (int size = 1; size <= 4; size *= 2) {
6895 for (int offset = 0; offset < 4; offset++) {
6896 uint32_t count = test_size / size;
6897 size_t host_bufsiz = count * size + host_offset;
6898 uint8_t *read_ref = malloc(num_bytes);
6899 uint8_t *read_buf = malloc(num_bytes);
6900 uint8_t *write_buf = malloc(host_bufsiz);
6901
6902 for (size_t i = 0; i < host_bufsiz; i++)
6903 write_buf[i] = rand();
6904 command_print_sameline(CMD,
6905 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6906 size, offset, host_offset ? "un" : "");
6907
6908 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6909 if (retval != ERROR_OK) {
6910 command_print(CMD, "Test pattern write failed");
6911 goto nextw;
6912 }
6913
6914 /* replay on host */
6915 memcpy(read_ref, test_pattern, num_bytes);
6916 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6917
6918 struct duration bench;
6919 duration_start(&bench);
6920
6921 retval = target_write_memory(target, wa->address + size + offset, size, count,
6922 write_buf + host_offset);
6923
6924 duration_measure(&bench);
6925
6926 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6927 command_print(CMD, "Unsupported alignment");
6928 goto nextw;
6929 } else if (retval != ERROR_OK) {
6930 command_print(CMD, "Memory write failed");
6931 goto nextw;
6932 }
6933
6934 /* read back */
6935 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6936 if (retval != ERROR_OK) {
6937 command_print(CMD, "Test pattern write failed");
6938 goto nextw;
6939 }
6940
6941 /* check result */
6942 int result = memcmp(read_ref, read_buf, num_bytes);
6943 if (result == 0) {
6944 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6945 duration_elapsed(&bench),
6946 duration_kbps(&bench, count * size));
6947 } else {
6948 command_print(CMD, "Compare failed");
6949 binprint(CMD, "ref:", read_ref, num_bytes);
6950 binprint(CMD, "buf:", read_buf, num_bytes);
6951 }
6952 nextw:
6953 free(read_ref);
6954 free(read_buf);
6955 }
6956 }
6957 }
6958
6959 free(test_pattern);
6960
6961 target_free_working_area(target, wa);
6962 return retval;
6963 }
6964
6965 static const struct command_registration target_exec_command_handlers[] = {
6966 {
6967 .name = "fast_load_image",
6968 .handler = handle_fast_load_image_command,
6969 .mode = COMMAND_ANY,
6970 .help = "Load image into server memory for later use by "
6971 "fast_load; primarily for profiling",
6972 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6973 "[min_address [max_length]]",
6974 },
6975 {
6976 .name = "fast_load",
6977 .handler = handle_fast_load_command,
6978 .mode = COMMAND_EXEC,
6979 .help = "loads active fast load image to current target "
6980 "- mainly for profiling purposes",
6981 .usage = "",
6982 },
6983 {
6984 .name = "profile",
6985 .handler = handle_profile_command,
6986 .mode = COMMAND_EXEC,
6987 .usage = "seconds filename [start end]",
6988 .help = "profiling samples the CPU PC",
6989 },
6990 /** @todo don't register virt2phys() unless target supports it */
6991 {
6992 .name = "virt2phys",
6993 .handler = handle_virt2phys_command,
6994 .mode = COMMAND_ANY,
6995 .help = "translate a virtual address into a physical address",
6996 .usage = "virtual_address",
6997 },
6998 {
6999 .name = "reg",
7000 .handler = handle_reg_command,
7001 .mode = COMMAND_EXEC,
7002 .help = "display (reread from target with \"force\") or set a register; "
7003 "with no arguments, displays all registers and their values",
7004 .usage = "[(register_number|register_name) [(value|'force')]]",
7005 },
7006 {
7007 .name = "poll",
7008 .handler = handle_poll_command,
7009 .mode = COMMAND_EXEC,
7010 .help = "poll target state; or reconfigure background polling",
7011 .usage = "['on'|'off']",
7012 },
7013 {
7014 .name = "wait_halt",
7015 .handler = handle_wait_halt_command,
7016 .mode = COMMAND_EXEC,
7017 .help = "wait up to the specified number of milliseconds "
7018 "(default 5000) for a previously requested halt",
7019 .usage = "[milliseconds]",
7020 },
7021 {
7022 .name = "halt",
7023 .handler = handle_halt_command,
7024 .mode = COMMAND_EXEC,
7025 .help = "request target to halt, then wait up to the specified "
7026 "number of milliseconds (default 5000) for it to complete",
7027 .usage = "[milliseconds]",
7028 },
7029 {
7030 .name = "resume",
7031 .handler = handle_resume_command,
7032 .mode = COMMAND_EXEC,
7033 .help = "resume target execution from current PC or address",
7034 .usage = "[address]",
7035 },
7036 {
7037 .name = "reset",
7038 .handler = handle_reset_command,
7039 .mode = COMMAND_EXEC,
7040 .usage = "[run|halt|init]",
7041 .help = "Reset all targets into the specified mode. "
7042 "Default reset mode is run, if not given.",
7043 },
7044 {
7045 .name = "soft_reset_halt",
7046 .handler = handle_soft_reset_halt_command,
7047 .mode = COMMAND_EXEC,
7048 .usage = "",
7049 .help = "halt the target and do a soft reset",
7050 },
7051 {
7052 .name = "step",
7053 .handler = handle_step_command,
7054 .mode = COMMAND_EXEC,
7055 .help = "step one instruction from current PC or address",
7056 .usage = "[address]",
7057 },
7058 {
7059 .name = "mdd",
7060 .handler = handle_md_command,
7061 .mode = COMMAND_EXEC,
7062 .help = "display memory double-words",
7063 .usage = "['phys'] address [count]",
7064 },
7065 {
7066 .name = "mdw",
7067 .handler = handle_md_command,
7068 .mode = COMMAND_EXEC,
7069 .help = "display memory words",
7070 .usage = "['phys'] address [count]",
7071 },
7072 {
7073 .name = "mdh",
7074 .handler = handle_md_command,
7075 .mode = COMMAND_EXEC,
7076 .help = "display memory half-words",
7077 .usage = "['phys'] address [count]",
7078 },
7079 {
7080 .name = "mdb",
7081 .handler = handle_md_command,
7082 .mode = COMMAND_EXEC,
7083 .help = "display memory bytes",
7084 .usage = "['phys'] address [count]",
7085 },
7086 {
7087 .name = "mwd",
7088 .handler = handle_mw_command,
7089 .mode = COMMAND_EXEC,
7090 .help = "write memory double-word",
7091 .usage = "['phys'] address value [count]",
7092 },
7093 {
7094 .name = "mww",
7095 .handler = handle_mw_command,
7096 .mode = COMMAND_EXEC,
7097 .help = "write memory word",
7098 .usage = "['phys'] address value [count]",
7099 },
7100 {
7101 .name = "mwh",
7102 .handler = handle_mw_command,
7103 .mode = COMMAND_EXEC,
7104 .help = "write memory half-word",
7105 .usage = "['phys'] address value [count]",
7106 },
7107 {
7108 .name = "mwb",
7109 .handler = handle_mw_command,
7110 .mode = COMMAND_EXEC,
7111 .help = "write memory byte",
7112 .usage = "['phys'] address value [count]",
7113 },
7114 {
7115 .name = "bp",
7116 .handler = handle_bp_command,
7117 .mode = COMMAND_EXEC,
7118 .help = "list or set hardware or software breakpoint",
7119 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7120 },
7121 {
7122 .name = "rbp",
7123 .handler = handle_rbp_command,
7124 .mode = COMMAND_EXEC,
7125 .help = "remove breakpoint",
7126 .usage = "'all' | address",
7127 },
7128 {
7129 .name = "wp",
7130 .handler = handle_wp_command,
7131 .mode = COMMAND_EXEC,
7132 .help = "list (no params) or create watchpoints",
7133 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7134 },
7135 {
7136 .name = "rwp",
7137 .handler = handle_rwp_command,
7138 .mode = COMMAND_EXEC,
7139 .help = "remove watchpoint",
7140 .usage = "address",
7141 },
7142 {
7143 .name = "load_image",
7144 .handler = handle_load_image_command,
7145 .mode = COMMAND_EXEC,
7146 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7147 "[min_address] [max_length]",
7148 },
7149 {
7150 .name = "dump_image",
7151 .handler = handle_dump_image_command,
7152 .mode = COMMAND_EXEC,
7153 .usage = "filename address size",
7154 },
7155 {
7156 .name = "verify_image_checksum",
7157 .handler = handle_verify_image_checksum_command,
7158 .mode = COMMAND_EXEC,
7159 .usage = "filename [offset [type]]",
7160 },
7161 {
7162 .name = "verify_image",
7163 .handler = handle_verify_image_command,
7164 .mode = COMMAND_EXEC,
7165 .usage = "filename [offset [type]]",
7166 },
7167 {
7168 .name = "test_image",
7169 .handler = handle_test_image_command,
7170 .mode = COMMAND_EXEC,
7171 .usage = "filename [offset [type]]",
7172 },
7173 {
7174 .name = "mem2array",
7175 .mode = COMMAND_EXEC,
7176 .jim_handler = jim_mem2array,
7177 .help = "read 8/16/32 bit memory and return as a TCL array "
7178 "for script processing",
7179 .usage = "arrayname bitwidth address count",
7180 },
7181 {
7182 .name = "array2mem",
7183 .mode = COMMAND_EXEC,
7184 .jim_handler = jim_array2mem,
7185 .help = "convert a TCL array to memory locations "
7186 "and write the 8/16/32 bit values",
7187 .usage = "arrayname bitwidth address count",
7188 },
7189 {
7190 .name = "get_reg",
7191 .mode = COMMAND_EXEC,
7192 .jim_handler = target_jim_get_reg,
7193 .help = "Get register values from the target",
7194 .usage = "list",
7195 },
7196 {
7197 .name = "set_reg",
7198 .mode = COMMAND_EXEC,
7199 .jim_handler = target_jim_set_reg,
7200 .help = "Set target register values",
7201 .usage = "dict",
7202 },
7203 {
7204 .name = "read_memory",
7205 .mode = COMMAND_EXEC,
7206 .jim_handler = target_jim_read_memory,
7207 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7208 .usage = "address width count ['phys']",
7209 },
7210 {
7211 .name = "write_memory",
7212 .mode = COMMAND_EXEC,
7213 .jim_handler = target_jim_write_memory,
7214 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7215 .usage = "address width data ['phys']",
7216 },
7217 {
7218 .name = "reset_nag",
7219 .handler = handle_target_reset_nag,
7220 .mode = COMMAND_ANY,
7221 .help = "Nag after each reset about options that could have been "
7222 "enabled to improve performance.",
7223 .usage = "['enable'|'disable']",
7224 },
7225 {
7226 .name = "ps",
7227 .handler = handle_ps_command,
7228 .mode = COMMAND_EXEC,
7229 .help = "list all tasks",
7230 .usage = "",
7231 },
7232 {
7233 .name = "test_mem_access",
7234 .handler = handle_test_mem_access_command,
7235 .mode = COMMAND_EXEC,
7236 .help = "Test the target's memory access functions",
7237 .usage = "size",
7238 },
7239
7240 COMMAND_REGISTRATION_DONE
7241 };
7242 static int target_register_user_commands(struct command_context *cmd_ctx)
7243 {
7244 int retval = ERROR_OK;
7245 retval = target_request_register_commands(cmd_ctx);
7246 if (retval != ERROR_OK)
7247 return retval;
7248
7249 retval = trace_register_commands(cmd_ctx);
7250 if (retval != ERROR_OK)
7251 return retval;
7252
7253
7254 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7255 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)