e2004e4a9e139b53fc314bdc602120552a73b30b
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
60 #include "semihosting_common.h"
61
62 /* default halt wait timeout (ms) */
63 #define DEFAULT_HALT_TIMEOUT 5000
64
65 static int target_read_buffer_default(struct target *target, target_addr_t address,
66 uint32_t count, uint8_t *buffer);
67 static int target_write_buffer_default(struct target *target, target_addr_t address,
68 uint32_t count, const uint8_t *buffer);
69 static int target_array2mem(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_mem2array(Jim_Interp *interp, struct target *target,
72 int argc, Jim_Obj * const *argv);
73 static int target_register_user_commands(struct command_context *cmd_ctx);
74 static int target_get_gdb_fileio_info_default(struct target *target,
75 struct gdb_fileio_info *fileio_info);
76 static int target_gdb_fileio_end_default(struct target *target, int retcode,
77 int fileio_errno, bool ctrl_c);
78
79 /* targets */
80 extern struct target_type arm7tdmi_target;
81 extern struct target_type arm720t_target;
82 extern struct target_type arm9tdmi_target;
83 extern struct target_type arm920t_target;
84 extern struct target_type arm966e_target;
85 extern struct target_type arm946e_target;
86 extern struct target_type arm926ejs_target;
87 extern struct target_type fa526_target;
88 extern struct target_type feroceon_target;
89 extern struct target_type dragonite_target;
90 extern struct target_type xscale_target;
91 extern struct target_type cortexm_target;
92 extern struct target_type cortexa_target;
93 extern struct target_type aarch64_target;
94 extern struct target_type cortexr4_target;
95 extern struct target_type arm11_target;
96 extern struct target_type ls1_sap_target;
97 extern struct target_type mips_m4k_target;
98 extern struct target_type mips_mips64_target;
99 extern struct target_type avr_target;
100 extern struct target_type dsp563xx_target;
101 extern struct target_type dsp5680xx_target;
102 extern struct target_type testee_target;
103 extern struct target_type avr32_ap7k_target;
104 extern struct target_type hla_target;
105 extern struct target_type nds32_v2_target;
106 extern struct target_type nds32_v3_target;
107 extern struct target_type nds32_v3m_target;
108 extern struct target_type esp32s2_target;
109 extern struct target_type or1k_target;
110 extern struct target_type quark_x10xx_target;
111 extern struct target_type quark_d20xx_target;
112 extern struct target_type stm8_target;
113 extern struct target_type riscv_target;
114 extern struct target_type mem_ap_target;
115 extern struct target_type esirisc_target;
116 extern struct target_type arcv2_target;
117
118 static struct target_type *target_types[] = {
119 &arm7tdmi_target,
120 &arm9tdmi_target,
121 &arm920t_target,
122 &arm720t_target,
123 &arm966e_target,
124 &arm946e_target,
125 &arm926ejs_target,
126 &fa526_target,
127 &feroceon_target,
128 &dragonite_target,
129 &xscale_target,
130 &cortexm_target,
131 &cortexa_target,
132 &cortexr4_target,
133 &arm11_target,
134 &ls1_sap_target,
135 &mips_m4k_target,
136 &avr_target,
137 &dsp563xx_target,
138 &dsp5680xx_target,
139 &testee_target,
140 &avr32_ap7k_target,
141 &hla_target,
142 &nds32_v2_target,
143 &nds32_v3_target,
144 &nds32_v3m_target,
145 &esp32s2_target,
146 &or1k_target,
147 &quark_x10xx_target,
148 &quark_d20xx_target,
149 &stm8_target,
150 &riscv_target,
151 &mem_ap_target,
152 &esirisc_target,
153 &arcv2_target,
154 &aarch64_target,
155 &mips_mips64_target,
156 NULL,
157 };
158
159 struct target *all_targets;
160 static struct target_event_callback *target_event_callbacks;
161 static struct target_timer_callback *target_timer_callbacks;
162 static int64_t target_timer_next_event_value;
163 static LIST_HEAD(target_reset_callback_list);
164 static LIST_HEAD(target_trace_callback_list);
165 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
166 static LIST_HEAD(empty_smp_targets);
167
168 static const struct jim_nvp nvp_assert[] = {
169 { .name = "assert", NVP_ASSERT },
170 { .name = "deassert", NVP_DEASSERT },
171 { .name = "T", NVP_ASSERT },
172 { .name = "F", NVP_DEASSERT },
173 { .name = "t", NVP_ASSERT },
174 { .name = "f", NVP_DEASSERT },
175 { .name = NULL, .value = -1 }
176 };
177
178 static const struct jim_nvp nvp_error_target[] = {
179 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
180 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
181 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
182 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
183 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
184 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
185 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
186 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
187 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
188 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
189 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
190 { .value = -1, .name = NULL }
191 };
192
193 static const char *target_strerror_safe(int err)
194 {
195 const struct jim_nvp *n;
196
197 n = jim_nvp_value2name_simple(nvp_error_target, err);
198 if (!n->name)
199 return "unknown";
200 else
201 return n->name;
202 }
203
204 static const struct jim_nvp nvp_target_event[] = {
205
206 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
207 { .value = TARGET_EVENT_HALTED, .name = "halted" },
208 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
209 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
210 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
211 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
212 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
213
214 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
215 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
216
217 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
218 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
219 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
220 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
221 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
222 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
223 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
224 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
225
226 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
227 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
228 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
229
230 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
231 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
232
233 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
234 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
235
236 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
237 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
238
239 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
240 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
241
242 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
243
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
247 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
249 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
250 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
251 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
252
253 { .name = NULL, .value = -1 }
254 };
255
256 static const struct jim_nvp nvp_target_state[] = {
257 { .name = "unknown", .value = TARGET_UNKNOWN },
258 { .name = "running", .value = TARGET_RUNNING },
259 { .name = "halted", .value = TARGET_HALTED },
260 { .name = "reset", .value = TARGET_RESET },
261 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
262 { .name = NULL, .value = -1 },
263 };
264
265 static const struct jim_nvp nvp_target_debug_reason[] = {
266 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
267 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
268 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
269 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
270 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
271 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
272 { .name = "program-exit", .value = DBG_REASON_EXIT },
273 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
274 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
275 { .name = NULL, .value = -1 },
276 };
277
278 static const struct jim_nvp nvp_target_endian[] = {
279 { .name = "big", .value = TARGET_BIG_ENDIAN },
280 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
281 { .name = "be", .value = TARGET_BIG_ENDIAN },
282 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
283 { .name = NULL, .value = -1 },
284 };
285
286 static const struct jim_nvp nvp_reset_modes[] = {
287 { .name = "unknown", .value = RESET_UNKNOWN },
288 { .name = "run", .value = RESET_RUN },
289 { .name = "halt", .value = RESET_HALT },
290 { .name = "init", .value = RESET_INIT },
291 { .name = NULL, .value = -1 },
292 };
293
294 const char *debug_reason_name(struct target *t)
295 {
296 const char *cp;
297
298 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
299 t->debug_reason)->name;
300 if (!cp) {
301 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
302 cp = "(*BUG*unknown*BUG*)";
303 }
304 return cp;
305 }
306
307 const char *target_state_name(struct target *t)
308 {
309 const char *cp;
310 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
311 if (!cp) {
312 LOG_ERROR("Invalid target state: %d", (int)(t->state));
313 cp = "(*BUG*unknown*BUG*)";
314 }
315
316 if (!target_was_examined(t) && t->defer_examine)
317 cp = "examine deferred";
318
319 return cp;
320 }
321
322 const char *target_event_name(enum target_event event)
323 {
324 const char *cp;
325 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
326 if (!cp) {
327 LOG_ERROR("Invalid target event: %d", (int)(event));
328 cp = "(*BUG*unknown*BUG*)";
329 }
330 return cp;
331 }
332
333 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
334 {
335 const char *cp;
336 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
337 if (!cp) {
338 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
339 cp = "(*BUG*unknown*BUG*)";
340 }
341 return cp;
342 }
343
344 /* determine the number of the new target */
345 static int new_target_number(void)
346 {
347 struct target *t;
348 int x;
349
350 /* number is 0 based */
351 x = -1;
352 t = all_targets;
353 while (t) {
354 if (x < t->target_number)
355 x = t->target_number;
356 t = t->next;
357 }
358 return x + 1;
359 }
360
361 static void append_to_list_all_targets(struct target *target)
362 {
363 struct target **t = &all_targets;
364
365 while (*t)
366 t = &((*t)->next);
367 *t = target;
368 }
369
370 /* read a uint64_t from a buffer in target memory endianness */
371 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
372 {
373 if (target->endianness == TARGET_LITTLE_ENDIAN)
374 return le_to_h_u64(buffer);
375 else
376 return be_to_h_u64(buffer);
377 }
378
379 /* read a uint32_t from a buffer in target memory endianness */
380 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
381 {
382 if (target->endianness == TARGET_LITTLE_ENDIAN)
383 return le_to_h_u32(buffer);
384 else
385 return be_to_h_u32(buffer);
386 }
387
388 /* read a uint24_t from a buffer in target memory endianness */
389 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
390 {
391 if (target->endianness == TARGET_LITTLE_ENDIAN)
392 return le_to_h_u24(buffer);
393 else
394 return be_to_h_u24(buffer);
395 }
396
397 /* read a uint16_t from a buffer in target memory endianness */
398 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
399 {
400 if (target->endianness == TARGET_LITTLE_ENDIAN)
401 return le_to_h_u16(buffer);
402 else
403 return be_to_h_u16(buffer);
404 }
405
406 /* write a uint64_t to a buffer in target memory endianness */
407 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
408 {
409 if (target->endianness == TARGET_LITTLE_ENDIAN)
410 h_u64_to_le(buffer, value);
411 else
412 h_u64_to_be(buffer, value);
413 }
414
415 /* write a uint32_t to a buffer in target memory endianness */
416 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
417 {
418 if (target->endianness == TARGET_LITTLE_ENDIAN)
419 h_u32_to_le(buffer, value);
420 else
421 h_u32_to_be(buffer, value);
422 }
423
424 /* write a uint24_t to a buffer in target memory endianness */
425 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
426 {
427 if (target->endianness == TARGET_LITTLE_ENDIAN)
428 h_u24_to_le(buffer, value);
429 else
430 h_u24_to_be(buffer, value);
431 }
432
433 /* write a uint16_t to a buffer in target memory endianness */
434 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
435 {
436 if (target->endianness == TARGET_LITTLE_ENDIAN)
437 h_u16_to_le(buffer, value);
438 else
439 h_u16_to_be(buffer, value);
440 }
441
442 /* write a uint8_t to a buffer in target memory endianness */
443 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
444 {
445 *buffer = value;
446 }
447
448 /* write a uint64_t array to a buffer in target memory endianness */
449 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
450 {
451 uint32_t i;
452 for (i = 0; i < count; i++)
453 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
454 }
455
456 /* write a uint32_t array to a buffer in target memory endianness */
457 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
458 {
459 uint32_t i;
460 for (i = 0; i < count; i++)
461 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
462 }
463
464 /* write a uint16_t array to a buffer in target memory endianness */
465 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
466 {
467 uint32_t i;
468 for (i = 0; i < count; i++)
469 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
470 }
471
472 /* write a uint64_t array to a buffer in target memory endianness */
473 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
474 {
475 uint32_t i;
476 for (i = 0; i < count; i++)
477 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
478 }
479
480 /* write a uint32_t array to a buffer in target memory endianness */
481 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
482 {
483 uint32_t i;
484 for (i = 0; i < count; i++)
485 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
486 }
487
488 /* write a uint16_t array to a buffer in target memory endianness */
489 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
490 {
491 uint32_t i;
492 for (i = 0; i < count; i++)
493 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
494 }
495
496 /* return a pointer to a configured target; id is name or number */
497 struct target *get_target(const char *id)
498 {
499 struct target *target;
500
501 /* try as tcltarget name */
502 for (target = all_targets; target; target = target->next) {
503 if (!target_name(target))
504 continue;
505 if (strcmp(id, target_name(target)) == 0)
506 return target;
507 }
508
509 /* It's OK to remove this fallback sometime after August 2010 or so */
510
511 /* no match, try as number */
512 unsigned num;
513 if (parse_uint(id, &num) != ERROR_OK)
514 return NULL;
515
516 for (target = all_targets; target; target = target->next) {
517 if (target->target_number == (int)num) {
518 LOG_WARNING("use '%s' as target identifier, not '%u'",
519 target_name(target), num);
520 return target;
521 }
522 }
523
524 return NULL;
525 }
526
527 /* returns a pointer to the n-th configured target */
528 struct target *get_target_by_num(int num)
529 {
530 struct target *target = all_targets;
531
532 while (target) {
533 if (target->target_number == num)
534 return target;
535 target = target->next;
536 }
537
538 return NULL;
539 }
540
541 struct target *get_current_target(struct command_context *cmd_ctx)
542 {
543 struct target *target = get_current_target_or_null(cmd_ctx);
544
545 if (!target) {
546 LOG_ERROR("BUG: current_target out of bounds");
547 exit(-1);
548 }
549
550 return target;
551 }
552
553 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
554 {
555 return cmd_ctx->current_target_override
556 ? cmd_ctx->current_target_override
557 : cmd_ctx->current_target;
558 }
559
560 int target_poll(struct target *target)
561 {
562 int retval;
563
564 /* We can't poll until after examine */
565 if (!target_was_examined(target)) {
566 /* Fail silently lest we pollute the log */
567 return ERROR_FAIL;
568 }
569
570 retval = target->type->poll(target);
571 if (retval != ERROR_OK)
572 return retval;
573
574 if (target->halt_issued) {
575 if (target->state == TARGET_HALTED)
576 target->halt_issued = false;
577 else {
578 int64_t t = timeval_ms() - target->halt_issued_time;
579 if (t > DEFAULT_HALT_TIMEOUT) {
580 target->halt_issued = false;
581 LOG_INFO("Halt timed out, wake up GDB.");
582 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
583 }
584 }
585 }
586
587 return ERROR_OK;
588 }
589
590 int target_halt(struct target *target)
591 {
592 int retval;
593 /* We can't poll until after examine */
594 if (!target_was_examined(target)) {
595 LOG_ERROR("Target not examined yet");
596 return ERROR_FAIL;
597 }
598
599 retval = target->type->halt(target);
600 if (retval != ERROR_OK)
601 return retval;
602
603 target->halt_issued = true;
604 target->halt_issued_time = timeval_ms();
605
606 return ERROR_OK;
607 }
608
609 /**
610 * Make the target (re)start executing using its saved execution
611 * context (possibly with some modifications).
612 *
613 * @param target Which target should start executing.
614 * @param current True to use the target's saved program counter instead
615 * of the address parameter
616 * @param address Optionally used as the program counter.
617 * @param handle_breakpoints True iff breakpoints at the resumption PC
618 * should be skipped. (For example, maybe execution was stopped by
619 * such a breakpoint, in which case it would be counterproductive to
620 * let it re-trigger.
621 * @param debug_execution False if all working areas allocated by OpenOCD
622 * should be released and/or restored to their original contents.
623 * (This would for example be true to run some downloaded "helper"
624 * algorithm code, which resides in one such working buffer and uses
625 * another for data storage.)
626 *
627 * @todo Resolve the ambiguity about what the "debug_execution" flag
628 * signifies. For example, Target implementations don't agree on how
629 * it relates to invalidation of the register cache, or to whether
630 * breakpoints and watchpoints should be enabled. (It would seem wrong
631 * to enable breakpoints when running downloaded "helper" algorithms
632 * (debug_execution true), since the breakpoints would be set to match
633 * target firmware being debugged, not the helper algorithm.... and
634 * enabling them could cause such helpers to malfunction (for example,
635 * by overwriting data with a breakpoint instruction. On the other
636 * hand the infrastructure for running such helpers might use this
637 * procedure but rely on hardware breakpoint to detect termination.)
638 */
639 int target_resume(struct target *target, int current, target_addr_t address,
640 int handle_breakpoints, int debug_execution)
641 {
642 int retval;
643
644 /* We can't poll until after examine */
645 if (!target_was_examined(target)) {
646 LOG_ERROR("Target not examined yet");
647 return ERROR_FAIL;
648 }
649
650 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
651
652 /* note that resume *must* be asynchronous. The CPU can halt before
653 * we poll. The CPU can even halt at the current PC as a result of
654 * a software breakpoint being inserted by (a bug?) the application.
655 */
656 /*
657 * resume() triggers the event 'resumed'. The execution of TCL commands
658 * in the event handler causes the polling of targets. If the target has
659 * already halted for a breakpoint, polling will run the 'halted' event
660 * handler before the pending 'resumed' handler.
661 * Disable polling during resume() to guarantee the execution of handlers
662 * in the correct order.
663 */
664 bool save_poll = jtag_poll_get_enabled();
665 jtag_poll_set_enabled(false);
666 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
667 jtag_poll_set_enabled(save_poll);
668 if (retval != ERROR_OK)
669 return retval;
670
671 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
672
673 return retval;
674 }
675
676 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
677 {
678 char buf[100];
679 int retval;
680 struct jim_nvp *n;
681 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
682 if (!n->name) {
683 LOG_ERROR("invalid reset mode");
684 return ERROR_FAIL;
685 }
686
687 struct target *target;
688 for (target = all_targets; target; target = target->next)
689 target_call_reset_callbacks(target, reset_mode);
690
691 /* disable polling during reset to make reset event scripts
692 * more predictable, i.e. dr/irscan & pathmove in events will
693 * not have JTAG operations injected into the middle of a sequence.
694 */
695 bool save_poll = jtag_poll_get_enabled();
696
697 jtag_poll_set_enabled(false);
698
699 sprintf(buf, "ocd_process_reset %s", n->name);
700 retval = Jim_Eval(cmd->ctx->interp, buf);
701
702 jtag_poll_set_enabled(save_poll);
703
704 if (retval != JIM_OK) {
705 Jim_MakeErrorMessage(cmd->ctx->interp);
706 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
707 return ERROR_FAIL;
708 }
709
710 /* We want any events to be processed before the prompt */
711 retval = target_call_timer_callbacks_now();
712
713 for (target = all_targets; target; target = target->next) {
714 target->type->check_reset(target);
715 target->running_alg = false;
716 }
717
718 return retval;
719 }
720
721 static int identity_virt2phys(struct target *target,
722 target_addr_t virtual, target_addr_t *physical)
723 {
724 *physical = virtual;
725 return ERROR_OK;
726 }
727
728 static int no_mmu(struct target *target, int *enabled)
729 {
730 *enabled = 0;
731 return ERROR_OK;
732 }
733
734 /**
735 * Reset the @c examined flag for the given target.
736 * Pure paranoia -- targets are zeroed on allocation.
737 */
738 static inline void target_reset_examined(struct target *target)
739 {
740 target->examined = false;
741 }
742
743 static int default_examine(struct target *target)
744 {
745 target_set_examined(target);
746 return ERROR_OK;
747 }
748
749 /* no check by default */
750 static int default_check_reset(struct target *target)
751 {
752 return ERROR_OK;
753 }
754
755 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
756 * Keep in sync */
757 int target_examine_one(struct target *target)
758 {
759 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
760
761 int retval = target->type->examine(target);
762 if (retval != ERROR_OK) {
763 target_reset_examined(target);
764 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
765 return retval;
766 }
767
768 target_set_examined(target);
769 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
770
771 return ERROR_OK;
772 }
773
774 static int jtag_enable_callback(enum jtag_event event, void *priv)
775 {
776 struct target *target = priv;
777
778 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
779 return ERROR_OK;
780
781 jtag_unregister_event_callback(jtag_enable_callback, target);
782
783 return target_examine_one(target);
784 }
785
786 /* Targets that correctly implement init + examine, i.e.
787 * no communication with target during init:
788 *
789 * XScale
790 */
791 int target_examine(void)
792 {
793 int retval = ERROR_OK;
794 struct target *target;
795
796 for (target = all_targets; target; target = target->next) {
797 /* defer examination, but don't skip it */
798 if (!target->tap->enabled) {
799 jtag_register_event_callback(jtag_enable_callback,
800 target);
801 continue;
802 }
803
804 if (target->defer_examine)
805 continue;
806
807 int retval2 = target_examine_one(target);
808 if (retval2 != ERROR_OK) {
809 LOG_WARNING("target %s examination failed", target_name(target));
810 retval = retval2;
811 }
812 }
813 return retval;
814 }
815
816 const char *target_type_name(struct target *target)
817 {
818 return target->type->name;
819 }
820
821 static int target_soft_reset_halt(struct target *target)
822 {
823 if (!target_was_examined(target)) {
824 LOG_ERROR("Target not examined yet");
825 return ERROR_FAIL;
826 }
827 if (!target->type->soft_reset_halt) {
828 LOG_ERROR("Target %s does not support soft_reset_halt",
829 target_name(target));
830 return ERROR_FAIL;
831 }
832 return target->type->soft_reset_halt(target);
833 }
834
835 /**
836 * Downloads a target-specific native code algorithm to the target,
837 * and executes it. * Note that some targets may need to set up, enable,
838 * and tear down a breakpoint (hard or * soft) to detect algorithm
839 * termination, while others may support lower overhead schemes where
840 * soft breakpoints embedded in the algorithm automatically terminate the
841 * algorithm.
842 *
843 * @param target used to run the algorithm
844 * @param num_mem_params
845 * @param mem_params
846 * @param num_reg_params
847 * @param reg_param
848 * @param entry_point
849 * @param exit_point
850 * @param timeout_ms
851 * @param arch_info target-specific description of the algorithm.
852 */
853 int target_run_algorithm(struct target *target,
854 int num_mem_params, struct mem_param *mem_params,
855 int num_reg_params, struct reg_param *reg_param,
856 target_addr_t entry_point, target_addr_t exit_point,
857 int timeout_ms, void *arch_info)
858 {
859 int retval = ERROR_FAIL;
860
861 if (!target_was_examined(target)) {
862 LOG_ERROR("Target not examined yet");
863 goto done;
864 }
865 if (!target->type->run_algorithm) {
866 LOG_ERROR("Target type '%s' does not support %s",
867 target_type_name(target), __func__);
868 goto done;
869 }
870
871 target->running_alg = true;
872 retval = target->type->run_algorithm(target,
873 num_mem_params, mem_params,
874 num_reg_params, reg_param,
875 entry_point, exit_point, timeout_ms, arch_info);
876 target->running_alg = false;
877
878 done:
879 return retval;
880 }
881
882 /**
883 * Executes a target-specific native code algorithm and leaves it running.
884 *
885 * @param target used to run the algorithm
886 * @param num_mem_params
887 * @param mem_params
888 * @param num_reg_params
889 * @param reg_params
890 * @param entry_point
891 * @param exit_point
892 * @param arch_info target-specific description of the algorithm.
893 */
894 int target_start_algorithm(struct target *target,
895 int num_mem_params, struct mem_param *mem_params,
896 int num_reg_params, struct reg_param *reg_params,
897 target_addr_t entry_point, target_addr_t exit_point,
898 void *arch_info)
899 {
900 int retval = ERROR_FAIL;
901
902 if (!target_was_examined(target)) {
903 LOG_ERROR("Target not examined yet");
904 goto done;
905 }
906 if (!target->type->start_algorithm) {
907 LOG_ERROR("Target type '%s' does not support %s",
908 target_type_name(target), __func__);
909 goto done;
910 }
911 if (target->running_alg) {
912 LOG_ERROR("Target is already running an algorithm");
913 goto done;
914 }
915
916 target->running_alg = true;
917 retval = target->type->start_algorithm(target,
918 num_mem_params, mem_params,
919 num_reg_params, reg_params,
920 entry_point, exit_point, arch_info);
921
922 done:
923 return retval;
924 }
925
926 /**
927 * Waits for an algorithm started with target_start_algorithm() to complete.
928 *
929 * @param target used to run the algorithm
930 * @param num_mem_params
931 * @param mem_params
932 * @param num_reg_params
933 * @param reg_params
934 * @param exit_point
935 * @param timeout_ms
936 * @param arch_info target-specific description of the algorithm.
937 */
938 int target_wait_algorithm(struct target *target,
939 int num_mem_params, struct mem_param *mem_params,
940 int num_reg_params, struct reg_param *reg_params,
941 target_addr_t exit_point, int timeout_ms,
942 void *arch_info)
943 {
944 int retval = ERROR_FAIL;
945
946 if (!target->type->wait_algorithm) {
947 LOG_ERROR("Target type '%s' does not support %s",
948 target_type_name(target), __func__);
949 goto done;
950 }
951 if (!target->running_alg) {
952 LOG_ERROR("Target is not running an algorithm");
953 goto done;
954 }
955
956 retval = target->type->wait_algorithm(target,
957 num_mem_params, mem_params,
958 num_reg_params, reg_params,
959 exit_point, timeout_ms, arch_info);
960 if (retval != ERROR_TARGET_TIMEOUT)
961 target->running_alg = false;
962
963 done:
964 return retval;
965 }
966
967 /**
968 * Streams data to a circular buffer on target intended for consumption by code
969 * running asynchronously on target.
970 *
971 * This is intended for applications where target-specific native code runs
972 * on the target, receives data from the circular buffer, does something with
973 * it (most likely writing it to a flash memory), and advances the circular
974 * buffer pointer.
975 *
976 * This assumes that the helper algorithm has already been loaded to the target,
977 * but has not been started yet. Given memory and register parameters are passed
978 * to the algorithm.
979 *
980 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
981 * following format:
982 *
983 * [buffer_start + 0, buffer_start + 4):
984 * Write Pointer address (aka head). Written and updated by this
985 * routine when new data is written to the circular buffer.
986 * [buffer_start + 4, buffer_start + 8):
987 * Read Pointer address (aka tail). Updated by code running on the
988 * target after it consumes data.
989 * [buffer_start + 8, buffer_start + buffer_size):
990 * Circular buffer contents.
991 *
992 * See contrib/loaders/flash/stm32f1x.S for an example.
993 *
994 * @param target used to run the algorithm
995 * @param buffer address on the host where data to be sent is located
996 * @param count number of blocks to send
997 * @param block_size size in bytes of each block
998 * @param num_mem_params count of memory-based params to pass to algorithm
999 * @param mem_params memory-based params to pass to algorithm
1000 * @param num_reg_params count of register-based params to pass to algorithm
1001 * @param reg_params memory-based params to pass to algorithm
1002 * @param buffer_start address on the target of the circular buffer structure
1003 * @param buffer_size size of the circular buffer structure
1004 * @param entry_point address on the target to execute to start the algorithm
1005 * @param exit_point address at which to set a breakpoint to catch the
1006 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1007 * @param arch_info
1008 */
1009
1010 int target_run_flash_async_algorithm(struct target *target,
1011 const uint8_t *buffer, uint32_t count, int block_size,
1012 int num_mem_params, struct mem_param *mem_params,
1013 int num_reg_params, struct reg_param *reg_params,
1014 uint32_t buffer_start, uint32_t buffer_size,
1015 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1016 {
1017 int retval;
1018 int timeout = 0;
1019
1020 const uint8_t *buffer_orig = buffer;
1021
1022 /* Set up working area. First word is write pointer, second word is read pointer,
1023 * rest is fifo data area. */
1024 uint32_t wp_addr = buffer_start;
1025 uint32_t rp_addr = buffer_start + 4;
1026 uint32_t fifo_start_addr = buffer_start + 8;
1027 uint32_t fifo_end_addr = buffer_start + buffer_size;
1028
1029 uint32_t wp = fifo_start_addr;
1030 uint32_t rp = fifo_start_addr;
1031
1032 /* validate block_size is 2^n */
1033 assert(IS_PWR_OF_2(block_size));
1034
1035 retval = target_write_u32(target, wp_addr, wp);
1036 if (retval != ERROR_OK)
1037 return retval;
1038 retval = target_write_u32(target, rp_addr, rp);
1039 if (retval != ERROR_OK)
1040 return retval;
1041
1042 /* Start up algorithm on target and let it idle while writing the first chunk */
1043 retval = target_start_algorithm(target, num_mem_params, mem_params,
1044 num_reg_params, reg_params,
1045 entry_point,
1046 exit_point,
1047 arch_info);
1048
1049 if (retval != ERROR_OK) {
1050 LOG_ERROR("error starting target flash write algorithm");
1051 return retval;
1052 }
1053
1054 while (count > 0) {
1055
1056 retval = target_read_u32(target, rp_addr, &rp);
1057 if (retval != ERROR_OK) {
1058 LOG_ERROR("failed to get read pointer");
1059 break;
1060 }
1061
1062 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1063 (size_t) (buffer - buffer_orig), count, wp, rp);
1064
1065 if (rp == 0) {
1066 LOG_ERROR("flash write algorithm aborted by target");
1067 retval = ERROR_FLASH_OPERATION_FAILED;
1068 break;
1069 }
1070
1071 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1072 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1073 break;
1074 }
1075
1076 /* Count the number of bytes available in the fifo without
1077 * crossing the wrap around. Make sure to not fill it completely,
1078 * because that would make wp == rp and that's the empty condition. */
1079 uint32_t thisrun_bytes;
1080 if (rp > wp)
1081 thisrun_bytes = rp - wp - block_size;
1082 else if (rp > fifo_start_addr)
1083 thisrun_bytes = fifo_end_addr - wp;
1084 else
1085 thisrun_bytes = fifo_end_addr - wp - block_size;
1086
1087 if (thisrun_bytes == 0) {
1088 /* Throttle polling a bit if transfer is (much) faster than flash
1089 * programming. The exact delay shouldn't matter as long as it's
1090 * less than buffer size / flash speed. This is very unlikely to
1091 * run when using high latency connections such as USB. */
1092 alive_sleep(2);
1093
1094 /* to stop an infinite loop on some targets check and increment a timeout
1095 * this issue was observed on a stellaris using the new ICDI interface */
1096 if (timeout++ >= 2500) {
1097 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1098 return ERROR_FLASH_OPERATION_FAILED;
1099 }
1100 continue;
1101 }
1102
1103 /* reset our timeout */
1104 timeout = 0;
1105
1106 /* Limit to the amount of data we actually want to write */
1107 if (thisrun_bytes > count * block_size)
1108 thisrun_bytes = count * block_size;
1109
1110 /* Force end of large blocks to be word aligned */
1111 if (thisrun_bytes >= 16)
1112 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1113
1114 /* Write data to fifo */
1115 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1116 if (retval != ERROR_OK)
1117 break;
1118
1119 /* Update counters and wrap write pointer */
1120 buffer += thisrun_bytes;
1121 count -= thisrun_bytes / block_size;
1122 wp += thisrun_bytes;
1123 if (wp >= fifo_end_addr)
1124 wp = fifo_start_addr;
1125
1126 /* Store updated write pointer to target */
1127 retval = target_write_u32(target, wp_addr, wp);
1128 if (retval != ERROR_OK)
1129 break;
1130
1131 /* Avoid GDB timeouts */
1132 keep_alive();
1133 }
1134
1135 if (retval != ERROR_OK) {
1136 /* abort flash write algorithm on target */
1137 target_write_u32(target, wp_addr, 0);
1138 }
1139
1140 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1141 num_reg_params, reg_params,
1142 exit_point,
1143 10000,
1144 arch_info);
1145
1146 if (retval2 != ERROR_OK) {
1147 LOG_ERROR("error waiting for target flash write algorithm");
1148 retval = retval2;
1149 }
1150
1151 if (retval == ERROR_OK) {
1152 /* check if algorithm set rp = 0 after fifo writer loop finished */
1153 retval = target_read_u32(target, rp_addr, &rp);
1154 if (retval == ERROR_OK && rp == 0) {
1155 LOG_ERROR("flash write algorithm aborted by target");
1156 retval = ERROR_FLASH_OPERATION_FAILED;
1157 }
1158 }
1159
1160 return retval;
1161 }
1162
1163 int target_run_read_async_algorithm(struct target *target,
1164 uint8_t *buffer, uint32_t count, int block_size,
1165 int num_mem_params, struct mem_param *mem_params,
1166 int num_reg_params, struct reg_param *reg_params,
1167 uint32_t buffer_start, uint32_t buffer_size,
1168 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1169 {
1170 int retval;
1171 int timeout = 0;
1172
1173 const uint8_t *buffer_orig = buffer;
1174
1175 /* Set up working area. First word is write pointer, second word is read pointer,
1176 * rest is fifo data area. */
1177 uint32_t wp_addr = buffer_start;
1178 uint32_t rp_addr = buffer_start + 4;
1179 uint32_t fifo_start_addr = buffer_start + 8;
1180 uint32_t fifo_end_addr = buffer_start + buffer_size;
1181
1182 uint32_t wp = fifo_start_addr;
1183 uint32_t rp = fifo_start_addr;
1184
1185 /* validate block_size is 2^n */
1186 assert(IS_PWR_OF_2(block_size));
1187
1188 retval = target_write_u32(target, wp_addr, wp);
1189 if (retval != ERROR_OK)
1190 return retval;
1191 retval = target_write_u32(target, rp_addr, rp);
1192 if (retval != ERROR_OK)
1193 return retval;
1194
1195 /* Start up algorithm on target */
1196 retval = target_start_algorithm(target, num_mem_params, mem_params,
1197 num_reg_params, reg_params,
1198 entry_point,
1199 exit_point,
1200 arch_info);
1201
1202 if (retval != ERROR_OK) {
1203 LOG_ERROR("error starting target flash read algorithm");
1204 return retval;
1205 }
1206
1207 while (count > 0) {
1208 retval = target_read_u32(target, wp_addr, &wp);
1209 if (retval != ERROR_OK) {
1210 LOG_ERROR("failed to get write pointer");
1211 break;
1212 }
1213
1214 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1215 (size_t)(buffer - buffer_orig), count, wp, rp);
1216
1217 if (wp == 0) {
1218 LOG_ERROR("flash read algorithm aborted by target");
1219 retval = ERROR_FLASH_OPERATION_FAILED;
1220 break;
1221 }
1222
1223 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1224 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1225 break;
1226 }
1227
1228 /* Count the number of bytes available in the fifo without
1229 * crossing the wrap around. */
1230 uint32_t thisrun_bytes;
1231 if (wp >= rp)
1232 thisrun_bytes = wp - rp;
1233 else
1234 thisrun_bytes = fifo_end_addr - rp;
1235
1236 if (thisrun_bytes == 0) {
1237 /* Throttle polling a bit if transfer is (much) faster than flash
1238 * reading. The exact delay shouldn't matter as long as it's
1239 * less than buffer size / flash speed. This is very unlikely to
1240 * run when using high latency connections such as USB. */
1241 alive_sleep(2);
1242
1243 /* to stop an infinite loop on some targets check and increment a timeout
1244 * this issue was observed on a stellaris using the new ICDI interface */
1245 if (timeout++ >= 2500) {
1246 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1247 return ERROR_FLASH_OPERATION_FAILED;
1248 }
1249 continue;
1250 }
1251
1252 /* Reset our timeout */
1253 timeout = 0;
1254
1255 /* Limit to the amount of data we actually want to read */
1256 if (thisrun_bytes > count * block_size)
1257 thisrun_bytes = count * block_size;
1258
1259 /* Force end of large blocks to be word aligned */
1260 if (thisrun_bytes >= 16)
1261 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1262
1263 /* Read data from fifo */
1264 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1265 if (retval != ERROR_OK)
1266 break;
1267
1268 /* Update counters and wrap write pointer */
1269 buffer += thisrun_bytes;
1270 count -= thisrun_bytes / block_size;
1271 rp += thisrun_bytes;
1272 if (rp >= fifo_end_addr)
1273 rp = fifo_start_addr;
1274
1275 /* Store updated write pointer to target */
1276 retval = target_write_u32(target, rp_addr, rp);
1277 if (retval != ERROR_OK)
1278 break;
1279
1280 /* Avoid GDB timeouts */
1281 keep_alive();
1282
1283 }
1284
1285 if (retval != ERROR_OK) {
1286 /* abort flash write algorithm on target */
1287 target_write_u32(target, rp_addr, 0);
1288 }
1289
1290 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1291 num_reg_params, reg_params,
1292 exit_point,
1293 10000,
1294 arch_info);
1295
1296 if (retval2 != ERROR_OK) {
1297 LOG_ERROR("error waiting for target flash write algorithm");
1298 retval = retval2;
1299 }
1300
1301 if (retval == ERROR_OK) {
1302 /* check if algorithm set wp = 0 after fifo writer loop finished */
1303 retval = target_read_u32(target, wp_addr, &wp);
1304 if (retval == ERROR_OK && wp == 0) {
1305 LOG_ERROR("flash read algorithm aborted by target");
1306 retval = ERROR_FLASH_OPERATION_FAILED;
1307 }
1308 }
1309
1310 return retval;
1311 }
1312
1313 int target_read_memory(struct target *target,
1314 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1315 {
1316 if (!target_was_examined(target)) {
1317 LOG_ERROR("Target not examined yet");
1318 return ERROR_FAIL;
1319 }
1320 if (!target->type->read_memory) {
1321 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1322 return ERROR_FAIL;
1323 }
1324 return target->type->read_memory(target, address, size, count, buffer);
1325 }
1326
1327 int target_read_phys_memory(struct target *target,
1328 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1329 {
1330 if (!target_was_examined(target)) {
1331 LOG_ERROR("Target not examined yet");
1332 return ERROR_FAIL;
1333 }
1334 if (!target->type->read_phys_memory) {
1335 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1336 return ERROR_FAIL;
1337 }
1338 return target->type->read_phys_memory(target, address, size, count, buffer);
1339 }
1340
1341 int target_write_memory(struct target *target,
1342 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1343 {
1344 if (!target_was_examined(target)) {
1345 LOG_ERROR("Target not examined yet");
1346 return ERROR_FAIL;
1347 }
1348 if (!target->type->write_memory) {
1349 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1350 return ERROR_FAIL;
1351 }
1352 return target->type->write_memory(target, address, size, count, buffer);
1353 }
1354
1355 int target_write_phys_memory(struct target *target,
1356 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1357 {
1358 if (!target_was_examined(target)) {
1359 LOG_ERROR("Target not examined yet");
1360 return ERROR_FAIL;
1361 }
1362 if (!target->type->write_phys_memory) {
1363 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1364 return ERROR_FAIL;
1365 }
1366 return target->type->write_phys_memory(target, address, size, count, buffer);
1367 }
1368
1369 int target_add_breakpoint(struct target *target,
1370 struct breakpoint *breakpoint)
1371 {
1372 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1373 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1374 return ERROR_TARGET_NOT_HALTED;
1375 }
1376 return target->type->add_breakpoint(target, breakpoint);
1377 }
1378
1379 int target_add_context_breakpoint(struct target *target,
1380 struct breakpoint *breakpoint)
1381 {
1382 if (target->state != TARGET_HALTED) {
1383 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1384 return ERROR_TARGET_NOT_HALTED;
1385 }
1386 return target->type->add_context_breakpoint(target, breakpoint);
1387 }
1388
1389 int target_add_hybrid_breakpoint(struct target *target,
1390 struct breakpoint *breakpoint)
1391 {
1392 if (target->state != TARGET_HALTED) {
1393 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1394 return ERROR_TARGET_NOT_HALTED;
1395 }
1396 return target->type->add_hybrid_breakpoint(target, breakpoint);
1397 }
1398
1399 int target_remove_breakpoint(struct target *target,
1400 struct breakpoint *breakpoint)
1401 {
1402 return target->type->remove_breakpoint(target, breakpoint);
1403 }
1404
1405 int target_add_watchpoint(struct target *target,
1406 struct watchpoint *watchpoint)
1407 {
1408 if (target->state != TARGET_HALTED) {
1409 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1410 return ERROR_TARGET_NOT_HALTED;
1411 }
1412 return target->type->add_watchpoint(target, watchpoint);
1413 }
1414 int target_remove_watchpoint(struct target *target,
1415 struct watchpoint *watchpoint)
1416 {
1417 return target->type->remove_watchpoint(target, watchpoint);
1418 }
1419 int target_hit_watchpoint(struct target *target,
1420 struct watchpoint **hit_watchpoint)
1421 {
1422 if (target->state != TARGET_HALTED) {
1423 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1424 return ERROR_TARGET_NOT_HALTED;
1425 }
1426
1427 if (!target->type->hit_watchpoint) {
1428 /* For backward compatible, if hit_watchpoint is not implemented,
1429 * return ERROR_FAIL such that gdb_server will not take the nonsense
1430 * information. */
1431 return ERROR_FAIL;
1432 }
1433
1434 return target->type->hit_watchpoint(target, hit_watchpoint);
1435 }
1436
1437 const char *target_get_gdb_arch(struct target *target)
1438 {
1439 if (!target->type->get_gdb_arch)
1440 return NULL;
1441 return target->type->get_gdb_arch(target);
1442 }
1443
1444 int target_get_gdb_reg_list(struct target *target,
1445 struct reg **reg_list[], int *reg_list_size,
1446 enum target_register_class reg_class)
1447 {
1448 int result = ERROR_FAIL;
1449
1450 if (!target_was_examined(target)) {
1451 LOG_ERROR("Target not examined yet");
1452 goto done;
1453 }
1454
1455 result = target->type->get_gdb_reg_list(target, reg_list,
1456 reg_list_size, reg_class);
1457
1458 done:
1459 if (result != ERROR_OK) {
1460 *reg_list = NULL;
1461 *reg_list_size = 0;
1462 }
1463 return result;
1464 }
1465
1466 int target_get_gdb_reg_list_noread(struct target *target,
1467 struct reg **reg_list[], int *reg_list_size,
1468 enum target_register_class reg_class)
1469 {
1470 if (target->type->get_gdb_reg_list_noread &&
1471 target->type->get_gdb_reg_list_noread(target, reg_list,
1472 reg_list_size, reg_class) == ERROR_OK)
1473 return ERROR_OK;
1474 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1475 }
1476
1477 bool target_supports_gdb_connection(struct target *target)
1478 {
1479 /*
1480 * exclude all the targets that don't provide get_gdb_reg_list
1481 * or that have explicit gdb_max_connection == 0
1482 */
1483 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1484 }
1485
1486 int target_step(struct target *target,
1487 int current, target_addr_t address, int handle_breakpoints)
1488 {
1489 int retval;
1490
1491 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1492
1493 retval = target->type->step(target, current, address, handle_breakpoints);
1494 if (retval != ERROR_OK)
1495 return retval;
1496
1497 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1498
1499 return retval;
1500 }
1501
1502 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1503 {
1504 if (target->state != TARGET_HALTED) {
1505 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1506 return ERROR_TARGET_NOT_HALTED;
1507 }
1508 return target->type->get_gdb_fileio_info(target, fileio_info);
1509 }
1510
1511 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1512 {
1513 if (target->state != TARGET_HALTED) {
1514 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1515 return ERROR_TARGET_NOT_HALTED;
1516 }
1517 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1518 }
1519
1520 target_addr_t target_address_max(struct target *target)
1521 {
1522 unsigned bits = target_address_bits(target);
1523 if (sizeof(target_addr_t) * 8 == bits)
1524 return (target_addr_t) -1;
1525 else
1526 return (((target_addr_t) 1) << bits) - 1;
1527 }
1528
1529 unsigned target_address_bits(struct target *target)
1530 {
1531 if (target->type->address_bits)
1532 return target->type->address_bits(target);
1533 return 32;
1534 }
1535
1536 unsigned int target_data_bits(struct target *target)
1537 {
1538 if (target->type->data_bits)
1539 return target->type->data_bits(target);
1540 return 32;
1541 }
1542
1543 static int target_profiling(struct target *target, uint32_t *samples,
1544 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1545 {
1546 return target->type->profiling(target, samples, max_num_samples,
1547 num_samples, seconds);
1548 }
1549
1550 static int handle_target(void *priv);
1551
1552 static int target_init_one(struct command_context *cmd_ctx,
1553 struct target *target)
1554 {
1555 target_reset_examined(target);
1556
1557 struct target_type *type = target->type;
1558 if (!type->examine)
1559 type->examine = default_examine;
1560
1561 if (!type->check_reset)
1562 type->check_reset = default_check_reset;
1563
1564 assert(type->init_target);
1565
1566 int retval = type->init_target(cmd_ctx, target);
1567 if (retval != ERROR_OK) {
1568 LOG_ERROR("target '%s' init failed", target_name(target));
1569 return retval;
1570 }
1571
1572 /* Sanity-check MMU support ... stub in what we must, to help
1573 * implement it in stages, but warn if we need to do so.
1574 */
1575 if (type->mmu) {
1576 if (!type->virt2phys) {
1577 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1578 type->virt2phys = identity_virt2phys;
1579 }
1580 } else {
1581 /* Make sure no-MMU targets all behave the same: make no
1582 * distinction between physical and virtual addresses, and
1583 * ensure that virt2phys() is always an identity mapping.
1584 */
1585 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1586 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1587
1588 type->mmu = no_mmu;
1589 type->write_phys_memory = type->write_memory;
1590 type->read_phys_memory = type->read_memory;
1591 type->virt2phys = identity_virt2phys;
1592 }
1593
1594 if (!target->type->read_buffer)
1595 target->type->read_buffer = target_read_buffer_default;
1596
1597 if (!target->type->write_buffer)
1598 target->type->write_buffer = target_write_buffer_default;
1599
1600 if (!target->type->get_gdb_fileio_info)
1601 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1602
1603 if (!target->type->gdb_fileio_end)
1604 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1605
1606 if (!target->type->profiling)
1607 target->type->profiling = target_profiling_default;
1608
1609 return ERROR_OK;
1610 }
1611
1612 static int target_init(struct command_context *cmd_ctx)
1613 {
1614 struct target *target;
1615 int retval;
1616
1617 for (target = all_targets; target; target = target->next) {
1618 retval = target_init_one(cmd_ctx, target);
1619 if (retval != ERROR_OK)
1620 return retval;
1621 }
1622
1623 if (!all_targets)
1624 return ERROR_OK;
1625
1626 retval = target_register_user_commands(cmd_ctx);
1627 if (retval != ERROR_OK)
1628 return retval;
1629
1630 retval = target_register_timer_callback(&handle_target,
1631 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1632 if (retval != ERROR_OK)
1633 return retval;
1634
1635 return ERROR_OK;
1636 }
1637
1638 COMMAND_HANDLER(handle_target_init_command)
1639 {
1640 int retval;
1641
1642 if (CMD_ARGC != 0)
1643 return ERROR_COMMAND_SYNTAX_ERROR;
1644
1645 static bool target_initialized;
1646 if (target_initialized) {
1647 LOG_INFO("'target init' has already been called");
1648 return ERROR_OK;
1649 }
1650 target_initialized = true;
1651
1652 retval = command_run_line(CMD_CTX, "init_targets");
1653 if (retval != ERROR_OK)
1654 return retval;
1655
1656 retval = command_run_line(CMD_CTX, "init_target_events");
1657 if (retval != ERROR_OK)
1658 return retval;
1659
1660 retval = command_run_line(CMD_CTX, "init_board");
1661 if (retval != ERROR_OK)
1662 return retval;
1663
1664 LOG_DEBUG("Initializing targets...");
1665 return target_init(CMD_CTX);
1666 }
1667
1668 int target_register_event_callback(int (*callback)(struct target *target,
1669 enum target_event event, void *priv), void *priv)
1670 {
1671 struct target_event_callback **callbacks_p = &target_event_callbacks;
1672
1673 if (!callback)
1674 return ERROR_COMMAND_SYNTAX_ERROR;
1675
1676 if (*callbacks_p) {
1677 while ((*callbacks_p)->next)
1678 callbacks_p = &((*callbacks_p)->next);
1679 callbacks_p = &((*callbacks_p)->next);
1680 }
1681
1682 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1683 (*callbacks_p)->callback = callback;
1684 (*callbacks_p)->priv = priv;
1685 (*callbacks_p)->next = NULL;
1686
1687 return ERROR_OK;
1688 }
1689
1690 int target_register_reset_callback(int (*callback)(struct target *target,
1691 enum target_reset_mode reset_mode, void *priv), void *priv)
1692 {
1693 struct target_reset_callback *entry;
1694
1695 if (!callback)
1696 return ERROR_COMMAND_SYNTAX_ERROR;
1697
1698 entry = malloc(sizeof(struct target_reset_callback));
1699 if (!entry) {
1700 LOG_ERROR("error allocating buffer for reset callback entry");
1701 return ERROR_COMMAND_SYNTAX_ERROR;
1702 }
1703
1704 entry->callback = callback;
1705 entry->priv = priv;
1706 list_add(&entry->list, &target_reset_callback_list);
1707
1708
1709 return ERROR_OK;
1710 }
1711
1712 int target_register_trace_callback(int (*callback)(struct target *target,
1713 size_t len, uint8_t *data, void *priv), void *priv)
1714 {
1715 struct target_trace_callback *entry;
1716
1717 if (!callback)
1718 return ERROR_COMMAND_SYNTAX_ERROR;
1719
1720 entry = malloc(sizeof(struct target_trace_callback));
1721 if (!entry) {
1722 LOG_ERROR("error allocating buffer for trace callback entry");
1723 return ERROR_COMMAND_SYNTAX_ERROR;
1724 }
1725
1726 entry->callback = callback;
1727 entry->priv = priv;
1728 list_add(&entry->list, &target_trace_callback_list);
1729
1730
1731 return ERROR_OK;
1732 }
1733
1734 int target_register_timer_callback(int (*callback)(void *priv),
1735 unsigned int time_ms, enum target_timer_type type, void *priv)
1736 {
1737 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1738
1739 if (!callback)
1740 return ERROR_COMMAND_SYNTAX_ERROR;
1741
1742 if (*callbacks_p) {
1743 while ((*callbacks_p)->next)
1744 callbacks_p = &((*callbacks_p)->next);
1745 callbacks_p = &((*callbacks_p)->next);
1746 }
1747
1748 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1749 (*callbacks_p)->callback = callback;
1750 (*callbacks_p)->type = type;
1751 (*callbacks_p)->time_ms = time_ms;
1752 (*callbacks_p)->removed = false;
1753
1754 (*callbacks_p)->when = timeval_ms() + time_ms;
1755 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1756
1757 (*callbacks_p)->priv = priv;
1758 (*callbacks_p)->next = NULL;
1759
1760 return ERROR_OK;
1761 }
1762
1763 int target_unregister_event_callback(int (*callback)(struct target *target,
1764 enum target_event event, void *priv), void *priv)
1765 {
1766 struct target_event_callback **p = &target_event_callbacks;
1767 struct target_event_callback *c = target_event_callbacks;
1768
1769 if (!callback)
1770 return ERROR_COMMAND_SYNTAX_ERROR;
1771
1772 while (c) {
1773 struct target_event_callback *next = c->next;
1774 if ((c->callback == callback) && (c->priv == priv)) {
1775 *p = next;
1776 free(c);
1777 return ERROR_OK;
1778 } else
1779 p = &(c->next);
1780 c = next;
1781 }
1782
1783 return ERROR_OK;
1784 }
1785
1786 int target_unregister_reset_callback(int (*callback)(struct target *target,
1787 enum target_reset_mode reset_mode, void *priv), void *priv)
1788 {
1789 struct target_reset_callback *entry;
1790
1791 if (!callback)
1792 return ERROR_COMMAND_SYNTAX_ERROR;
1793
1794 list_for_each_entry(entry, &target_reset_callback_list, list) {
1795 if (entry->callback == callback && entry->priv == priv) {
1796 list_del(&entry->list);
1797 free(entry);
1798 break;
1799 }
1800 }
1801
1802 return ERROR_OK;
1803 }
1804
1805 int target_unregister_trace_callback(int (*callback)(struct target *target,
1806 size_t len, uint8_t *data, void *priv), void *priv)
1807 {
1808 struct target_trace_callback *entry;
1809
1810 if (!callback)
1811 return ERROR_COMMAND_SYNTAX_ERROR;
1812
1813 list_for_each_entry(entry, &target_trace_callback_list, list) {
1814 if (entry->callback == callback && entry->priv == priv) {
1815 list_del(&entry->list);
1816 free(entry);
1817 break;
1818 }
1819 }
1820
1821 return ERROR_OK;
1822 }
1823
1824 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1825 {
1826 if (!callback)
1827 return ERROR_COMMAND_SYNTAX_ERROR;
1828
1829 for (struct target_timer_callback *c = target_timer_callbacks;
1830 c; c = c->next) {
1831 if ((c->callback == callback) && (c->priv == priv)) {
1832 c->removed = true;
1833 return ERROR_OK;
1834 }
1835 }
1836
1837 return ERROR_FAIL;
1838 }
1839
1840 int target_call_event_callbacks(struct target *target, enum target_event event)
1841 {
1842 struct target_event_callback *callback = target_event_callbacks;
1843 struct target_event_callback *next_callback;
1844
1845 if (event == TARGET_EVENT_HALTED) {
1846 /* execute early halted first */
1847 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1848 }
1849
1850 LOG_DEBUG("target event %i (%s) for core %s", event,
1851 target_event_name(event),
1852 target_name(target));
1853
1854 target_handle_event(target, event);
1855
1856 while (callback) {
1857 next_callback = callback->next;
1858 callback->callback(target, event, callback->priv);
1859 callback = next_callback;
1860 }
1861
1862 return ERROR_OK;
1863 }
1864
1865 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1866 {
1867 struct target_reset_callback *callback;
1868
1869 LOG_DEBUG("target reset %i (%s)", reset_mode,
1870 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1871
1872 list_for_each_entry(callback, &target_reset_callback_list, list)
1873 callback->callback(target, reset_mode, callback->priv);
1874
1875 return ERROR_OK;
1876 }
1877
1878 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1879 {
1880 struct target_trace_callback *callback;
1881
1882 list_for_each_entry(callback, &target_trace_callback_list, list)
1883 callback->callback(target, len, data, callback->priv);
1884
1885 return ERROR_OK;
1886 }
1887
1888 static int target_timer_callback_periodic_restart(
1889 struct target_timer_callback *cb, int64_t *now)
1890 {
1891 cb->when = *now + cb->time_ms;
1892 return ERROR_OK;
1893 }
1894
1895 static int target_call_timer_callback(struct target_timer_callback *cb,
1896 int64_t *now)
1897 {
1898 cb->callback(cb->priv);
1899
1900 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1901 return target_timer_callback_periodic_restart(cb, now);
1902
1903 return target_unregister_timer_callback(cb->callback, cb->priv);
1904 }
1905
1906 static int target_call_timer_callbacks_check_time(int checktime)
1907 {
1908 static bool callback_processing;
1909
1910 /* Do not allow nesting */
1911 if (callback_processing)
1912 return ERROR_OK;
1913
1914 callback_processing = true;
1915
1916 keep_alive();
1917
1918 int64_t now = timeval_ms();
1919
1920 /* Initialize to a default value that's a ways into the future.
1921 * The loop below will make it closer to now if there are
1922 * callbacks that want to be called sooner. */
1923 target_timer_next_event_value = now + 1000;
1924
1925 /* Store an address of the place containing a pointer to the
1926 * next item; initially, that's a standalone "root of the
1927 * list" variable. */
1928 struct target_timer_callback **callback = &target_timer_callbacks;
1929 while (callback && *callback) {
1930 if ((*callback)->removed) {
1931 struct target_timer_callback *p = *callback;
1932 *callback = (*callback)->next;
1933 free(p);
1934 continue;
1935 }
1936
1937 bool call_it = (*callback)->callback &&
1938 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1939 now >= (*callback)->when);
1940
1941 if (call_it)
1942 target_call_timer_callback(*callback, &now);
1943
1944 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1945 target_timer_next_event_value = (*callback)->when;
1946
1947 callback = &(*callback)->next;
1948 }
1949
1950 callback_processing = false;
1951 return ERROR_OK;
1952 }
1953
1954 int target_call_timer_callbacks()
1955 {
1956 return target_call_timer_callbacks_check_time(1);
1957 }
1958
1959 /* invoke periodic callbacks immediately */
1960 int target_call_timer_callbacks_now()
1961 {
1962 return target_call_timer_callbacks_check_time(0);
1963 }
1964
1965 int64_t target_timer_next_event(void)
1966 {
1967 return target_timer_next_event_value;
1968 }
1969
1970 /* Prints the working area layout for debug purposes */
1971 static void print_wa_layout(struct target *target)
1972 {
1973 struct working_area *c = target->working_areas;
1974
1975 while (c) {
1976 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1977 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1978 c->address, c->address + c->size - 1, c->size);
1979 c = c->next;
1980 }
1981 }
1982
1983 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1984 static void target_split_working_area(struct working_area *area, uint32_t size)
1985 {
1986 assert(area->free); /* Shouldn't split an allocated area */
1987 assert(size <= area->size); /* Caller should guarantee this */
1988
1989 /* Split only if not already the right size */
1990 if (size < area->size) {
1991 struct working_area *new_wa = malloc(sizeof(*new_wa));
1992
1993 if (!new_wa)
1994 return;
1995
1996 new_wa->next = area->next;
1997 new_wa->size = area->size - size;
1998 new_wa->address = area->address + size;
1999 new_wa->backup = NULL;
2000 new_wa->user = NULL;
2001 new_wa->free = true;
2002
2003 area->next = new_wa;
2004 area->size = size;
2005
2006 /* If backup memory was allocated to this area, it has the wrong size
2007 * now so free it and it will be reallocated if/when needed */
2008 free(area->backup);
2009 area->backup = NULL;
2010 }
2011 }
2012
2013 /* Merge all adjacent free areas into one */
2014 static void target_merge_working_areas(struct target *target)
2015 {
2016 struct working_area *c = target->working_areas;
2017
2018 while (c && c->next) {
2019 assert(c->next->address == c->address + c->size); /* This is an invariant */
2020
2021 /* Find two adjacent free areas */
2022 if (c->free && c->next->free) {
2023 /* Merge the last into the first */
2024 c->size += c->next->size;
2025
2026 /* Remove the last */
2027 struct working_area *to_be_freed = c->next;
2028 c->next = c->next->next;
2029 free(to_be_freed->backup);
2030 free(to_be_freed);
2031
2032 /* If backup memory was allocated to the remaining area, it's has
2033 * the wrong size now */
2034 free(c->backup);
2035 c->backup = NULL;
2036 } else {
2037 c = c->next;
2038 }
2039 }
2040 }
2041
2042 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2043 {
2044 /* Reevaluate working area address based on MMU state*/
2045 if (!target->working_areas) {
2046 int retval;
2047 int enabled;
2048
2049 retval = target->type->mmu(target, &enabled);
2050 if (retval != ERROR_OK)
2051 return retval;
2052
2053 if (!enabled) {
2054 if (target->working_area_phys_spec) {
2055 LOG_DEBUG("MMU disabled, using physical "
2056 "address for working memory " TARGET_ADDR_FMT,
2057 target->working_area_phys);
2058 target->working_area = target->working_area_phys;
2059 } else {
2060 LOG_ERROR("No working memory available. "
2061 "Specify -work-area-phys to target.");
2062 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2063 }
2064 } else {
2065 if (target->working_area_virt_spec) {
2066 LOG_DEBUG("MMU enabled, using virtual "
2067 "address for working memory " TARGET_ADDR_FMT,
2068 target->working_area_virt);
2069 target->working_area = target->working_area_virt;
2070 } else {
2071 LOG_ERROR("No working memory available. "
2072 "Specify -work-area-virt to target.");
2073 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2074 }
2075 }
2076
2077 /* Set up initial working area on first call */
2078 struct working_area *new_wa = malloc(sizeof(*new_wa));
2079 if (new_wa) {
2080 new_wa->next = NULL;
2081 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2082 new_wa->address = target->working_area;
2083 new_wa->backup = NULL;
2084 new_wa->user = NULL;
2085 new_wa->free = true;
2086 }
2087
2088 target->working_areas = new_wa;
2089 }
2090
2091 /* only allocate multiples of 4 byte */
2092 if (size % 4)
2093 size = (size + 3) & (~3UL);
2094
2095 struct working_area *c = target->working_areas;
2096
2097 /* Find the first large enough working area */
2098 while (c) {
2099 if (c->free && c->size >= size)
2100 break;
2101 c = c->next;
2102 }
2103
2104 if (!c)
2105 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2106
2107 /* Split the working area into the requested size */
2108 target_split_working_area(c, size);
2109
2110 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2111 size, c->address);
2112
2113 if (target->backup_working_area) {
2114 if (!c->backup) {
2115 c->backup = malloc(c->size);
2116 if (!c->backup)
2117 return ERROR_FAIL;
2118 }
2119
2120 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2121 if (retval != ERROR_OK)
2122 return retval;
2123 }
2124
2125 /* mark as used, and return the new (reused) area */
2126 c->free = false;
2127 *area = c;
2128
2129 /* user pointer */
2130 c->user = area;
2131
2132 print_wa_layout(target);
2133
2134 return ERROR_OK;
2135 }
2136
2137 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2138 {
2139 int retval;
2140
2141 retval = target_alloc_working_area_try(target, size, area);
2142 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2143 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2144 return retval;
2145
2146 }
2147
2148 static int target_restore_working_area(struct target *target, struct working_area *area)
2149 {
2150 int retval = ERROR_OK;
2151
2152 if (target->backup_working_area && area->backup) {
2153 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2154 if (retval != ERROR_OK)
2155 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2156 area->size, area->address);
2157 }
2158
2159 return retval;
2160 }
2161
2162 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2163 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2164 {
2165 if (!area || area->free)
2166 return ERROR_OK;
2167
2168 int retval = ERROR_OK;
2169 if (restore) {
2170 retval = target_restore_working_area(target, area);
2171 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2172 if (retval != ERROR_OK)
2173 return retval;
2174 }
2175
2176 area->free = true;
2177
2178 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2179 area->size, area->address);
2180
2181 /* mark user pointer invalid */
2182 /* TODO: Is this really safe? It points to some previous caller's memory.
2183 * How could we know that the area pointer is still in that place and not
2184 * some other vital data? What's the purpose of this, anyway? */
2185 *area->user = NULL;
2186 area->user = NULL;
2187
2188 target_merge_working_areas(target);
2189
2190 print_wa_layout(target);
2191
2192 return retval;
2193 }
2194
2195 int target_free_working_area(struct target *target, struct working_area *area)
2196 {
2197 return target_free_working_area_restore(target, area, 1);
2198 }
2199
2200 /* free resources and restore memory, if restoring memory fails,
2201 * free up resources anyway
2202 */
2203 static void target_free_all_working_areas_restore(struct target *target, int restore)
2204 {
2205 struct working_area *c = target->working_areas;
2206
2207 LOG_DEBUG("freeing all working areas");
2208
2209 /* Loop through all areas, restoring the allocated ones and marking them as free */
2210 while (c) {
2211 if (!c->free) {
2212 if (restore)
2213 target_restore_working_area(target, c);
2214 c->free = true;
2215 *c->user = NULL; /* Same as above */
2216 c->user = NULL;
2217 }
2218 c = c->next;
2219 }
2220
2221 /* Run a merge pass to combine all areas into one */
2222 target_merge_working_areas(target);
2223
2224 print_wa_layout(target);
2225 }
2226
2227 void target_free_all_working_areas(struct target *target)
2228 {
2229 target_free_all_working_areas_restore(target, 1);
2230
2231 /* Now we have none or only one working area marked as free */
2232 if (target->working_areas) {
2233 /* Free the last one to allow on-the-fly moving and resizing */
2234 free(target->working_areas->backup);
2235 free(target->working_areas);
2236 target->working_areas = NULL;
2237 }
2238 }
2239
2240 /* Find the largest number of bytes that can be allocated */
2241 uint32_t target_get_working_area_avail(struct target *target)
2242 {
2243 struct working_area *c = target->working_areas;
2244 uint32_t max_size = 0;
2245
2246 if (!c)
2247 return target->working_area_size;
2248
2249 while (c) {
2250 if (c->free && max_size < c->size)
2251 max_size = c->size;
2252
2253 c = c->next;
2254 }
2255
2256 return max_size;
2257 }
2258
2259 static void target_destroy(struct target *target)
2260 {
2261 if (target->type->deinit_target)
2262 target->type->deinit_target(target);
2263
2264 if (target->semihosting)
2265 free(target->semihosting->basedir);
2266 free(target->semihosting);
2267
2268 jtag_unregister_event_callback(jtag_enable_callback, target);
2269
2270 struct target_event_action *teap = target->event_action;
2271 while (teap) {
2272 struct target_event_action *next = teap->next;
2273 Jim_DecrRefCount(teap->interp, teap->body);
2274 free(teap);
2275 teap = next;
2276 }
2277
2278 target_free_all_working_areas(target);
2279
2280 /* release the targets SMP list */
2281 if (target->smp) {
2282 struct target_list *head, *tmp;
2283
2284 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2285 list_del(&head->lh);
2286 head->target->smp = 0;
2287 free(head);
2288 }
2289 if (target->smp_targets != &empty_smp_targets)
2290 free(target->smp_targets);
2291 target->smp = 0;
2292 }
2293
2294 rtos_destroy(target);
2295
2296 free(target->gdb_port_override);
2297 free(target->type);
2298 free(target->trace_info);
2299 free(target->fileio_info);
2300 free(target->cmd_name);
2301 free(target);
2302 }
2303
2304 void target_quit(void)
2305 {
2306 struct target_event_callback *pe = target_event_callbacks;
2307 while (pe) {
2308 struct target_event_callback *t = pe->next;
2309 free(pe);
2310 pe = t;
2311 }
2312 target_event_callbacks = NULL;
2313
2314 struct target_timer_callback *pt = target_timer_callbacks;
2315 while (pt) {
2316 struct target_timer_callback *t = pt->next;
2317 free(pt);
2318 pt = t;
2319 }
2320 target_timer_callbacks = NULL;
2321
2322 for (struct target *target = all_targets; target;) {
2323 struct target *tmp;
2324
2325 tmp = target->next;
2326 target_destroy(target);
2327 target = tmp;
2328 }
2329
2330 all_targets = NULL;
2331 }
2332
2333 int target_arch_state(struct target *target)
2334 {
2335 int retval;
2336 if (!target) {
2337 LOG_WARNING("No target has been configured");
2338 return ERROR_OK;
2339 }
2340
2341 if (target->state != TARGET_HALTED)
2342 return ERROR_OK;
2343
2344 retval = target->type->arch_state(target);
2345 return retval;
2346 }
2347
2348 static int target_get_gdb_fileio_info_default(struct target *target,
2349 struct gdb_fileio_info *fileio_info)
2350 {
2351 /* If target does not support semi-hosting function, target
2352 has no need to provide .get_gdb_fileio_info callback.
2353 It just return ERROR_FAIL and gdb_server will return "Txx"
2354 as target halted every time. */
2355 return ERROR_FAIL;
2356 }
2357
2358 static int target_gdb_fileio_end_default(struct target *target,
2359 int retcode, int fileio_errno, bool ctrl_c)
2360 {
2361 return ERROR_OK;
2362 }
2363
2364 int target_profiling_default(struct target *target, uint32_t *samples,
2365 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2366 {
2367 struct timeval timeout, now;
2368
2369 gettimeofday(&timeout, NULL);
2370 timeval_add_time(&timeout, seconds, 0);
2371
2372 LOG_INFO("Starting profiling. Halting and resuming the"
2373 " target as often as we can...");
2374
2375 uint32_t sample_count = 0;
2376 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2377 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2378
2379 int retval = ERROR_OK;
2380 for (;;) {
2381 target_poll(target);
2382 if (target->state == TARGET_HALTED) {
2383 uint32_t t = buf_get_u32(reg->value, 0, 32);
2384 samples[sample_count++] = t;
2385 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2386 retval = target_resume(target, 1, 0, 0, 0);
2387 target_poll(target);
2388 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2389 } else if (target->state == TARGET_RUNNING) {
2390 /* We want to quickly sample the PC. */
2391 retval = target_halt(target);
2392 } else {
2393 LOG_INFO("Target not halted or running");
2394 retval = ERROR_OK;
2395 break;
2396 }
2397
2398 if (retval != ERROR_OK)
2399 break;
2400
2401 gettimeofday(&now, NULL);
2402 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2403 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2404 break;
2405 }
2406 }
2407
2408 *num_samples = sample_count;
2409 return retval;
2410 }
2411
2412 /* Single aligned words are guaranteed to use 16 or 32 bit access
2413 * mode respectively, otherwise data is handled as quickly as
2414 * possible
2415 */
2416 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2417 {
2418 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2419 size, address);
2420
2421 if (!target_was_examined(target)) {
2422 LOG_ERROR("Target not examined yet");
2423 return ERROR_FAIL;
2424 }
2425
2426 if (size == 0)
2427 return ERROR_OK;
2428
2429 if ((address + size - 1) < address) {
2430 /* GDB can request this when e.g. PC is 0xfffffffc */
2431 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2432 address,
2433 size);
2434 return ERROR_FAIL;
2435 }
2436
2437 return target->type->write_buffer(target, address, size, buffer);
2438 }
2439
2440 static int target_write_buffer_default(struct target *target,
2441 target_addr_t address, uint32_t count, const uint8_t *buffer)
2442 {
2443 uint32_t size;
2444 unsigned int data_bytes = target_data_bits(target) / 8;
2445
2446 /* Align up to maximum bytes. The loop condition makes sure the next pass
2447 * will have something to do with the size we leave to it. */
2448 for (size = 1;
2449 size < data_bytes && count >= size * 2 + (address & size);
2450 size *= 2) {
2451 if (address & size) {
2452 int retval = target_write_memory(target, address, size, 1, buffer);
2453 if (retval != ERROR_OK)
2454 return retval;
2455 address += size;
2456 count -= size;
2457 buffer += size;
2458 }
2459 }
2460
2461 /* Write the data with as large access size as possible. */
2462 for (; size > 0; size /= 2) {
2463 uint32_t aligned = count - count % size;
2464 if (aligned > 0) {
2465 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2466 if (retval != ERROR_OK)
2467 return retval;
2468 address += aligned;
2469 count -= aligned;
2470 buffer += aligned;
2471 }
2472 }
2473
2474 return ERROR_OK;
2475 }
2476
2477 /* Single aligned words are guaranteed to use 16 or 32 bit access
2478 * mode respectively, otherwise data is handled as quickly as
2479 * possible
2480 */
2481 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2482 {
2483 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2484 size, address);
2485
2486 if (!target_was_examined(target)) {
2487 LOG_ERROR("Target not examined yet");
2488 return ERROR_FAIL;
2489 }
2490
2491 if (size == 0)
2492 return ERROR_OK;
2493
2494 if ((address + size - 1) < address) {
2495 /* GDB can request this when e.g. PC is 0xfffffffc */
2496 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2497 address,
2498 size);
2499 return ERROR_FAIL;
2500 }
2501
2502 return target->type->read_buffer(target, address, size, buffer);
2503 }
2504
2505 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2506 {
2507 uint32_t size;
2508 unsigned int data_bytes = target_data_bits(target) / 8;
2509
2510 /* Align up to maximum bytes. The loop condition makes sure the next pass
2511 * will have something to do with the size we leave to it. */
2512 for (size = 1;
2513 size < data_bytes && count >= size * 2 + (address & size);
2514 size *= 2) {
2515 if (address & size) {
2516 int retval = target_read_memory(target, address, size, 1, buffer);
2517 if (retval != ERROR_OK)
2518 return retval;
2519 address += size;
2520 count -= size;
2521 buffer += size;
2522 }
2523 }
2524
2525 /* Read the data with as large access size as possible. */
2526 for (; size > 0; size /= 2) {
2527 uint32_t aligned = count - count % size;
2528 if (aligned > 0) {
2529 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2530 if (retval != ERROR_OK)
2531 return retval;
2532 address += aligned;
2533 count -= aligned;
2534 buffer += aligned;
2535 }
2536 }
2537
2538 return ERROR_OK;
2539 }
2540
2541 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2542 {
2543 uint8_t *buffer;
2544 int retval;
2545 uint32_t i;
2546 uint32_t checksum = 0;
2547 if (!target_was_examined(target)) {
2548 LOG_ERROR("Target not examined yet");
2549 return ERROR_FAIL;
2550 }
2551 if (!target->type->checksum_memory) {
2552 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2553 return ERROR_FAIL;
2554 }
2555
2556 retval = target->type->checksum_memory(target, address, size, &checksum);
2557 if (retval != ERROR_OK) {
2558 buffer = malloc(size);
2559 if (!buffer) {
2560 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2561 return ERROR_COMMAND_SYNTAX_ERROR;
2562 }
2563 retval = target_read_buffer(target, address, size, buffer);
2564 if (retval != ERROR_OK) {
2565 free(buffer);
2566 return retval;
2567 }
2568
2569 /* convert to target endianness */
2570 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2571 uint32_t target_data;
2572 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2573 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2574 }
2575
2576 retval = image_calculate_checksum(buffer, size, &checksum);
2577 free(buffer);
2578 }
2579
2580 *crc = checksum;
2581
2582 return retval;
2583 }
2584
2585 int target_blank_check_memory(struct target *target,
2586 struct target_memory_check_block *blocks, int num_blocks,
2587 uint8_t erased_value)
2588 {
2589 if (!target_was_examined(target)) {
2590 LOG_ERROR("Target not examined yet");
2591 return ERROR_FAIL;
2592 }
2593
2594 if (!target->type->blank_check_memory)
2595 return ERROR_NOT_IMPLEMENTED;
2596
2597 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2598 }
2599
2600 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2601 {
2602 uint8_t value_buf[8];
2603 if (!target_was_examined(target)) {
2604 LOG_ERROR("Target not examined yet");
2605 return ERROR_FAIL;
2606 }
2607
2608 int retval = target_read_memory(target, address, 8, 1, value_buf);
2609
2610 if (retval == ERROR_OK) {
2611 *value = target_buffer_get_u64(target, value_buf);
2612 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2613 address,
2614 *value);
2615 } else {
2616 *value = 0x0;
2617 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2618 address);
2619 }
2620
2621 return retval;
2622 }
2623
2624 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2625 {
2626 uint8_t value_buf[4];
2627 if (!target_was_examined(target)) {
2628 LOG_ERROR("Target not examined yet");
2629 return ERROR_FAIL;
2630 }
2631
2632 int retval = target_read_memory(target, address, 4, 1, value_buf);
2633
2634 if (retval == ERROR_OK) {
2635 *value = target_buffer_get_u32(target, value_buf);
2636 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2637 address,
2638 *value);
2639 } else {
2640 *value = 0x0;
2641 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2642 address);
2643 }
2644
2645 return retval;
2646 }
2647
2648 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2649 {
2650 uint8_t value_buf[2];
2651 if (!target_was_examined(target)) {
2652 LOG_ERROR("Target not examined yet");
2653 return ERROR_FAIL;
2654 }
2655
2656 int retval = target_read_memory(target, address, 2, 1, value_buf);
2657
2658 if (retval == ERROR_OK) {
2659 *value = target_buffer_get_u16(target, value_buf);
2660 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2661 address,
2662 *value);
2663 } else {
2664 *value = 0x0;
2665 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2666 address);
2667 }
2668
2669 return retval;
2670 }
2671
2672 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2673 {
2674 if (!target_was_examined(target)) {
2675 LOG_ERROR("Target not examined yet");
2676 return ERROR_FAIL;
2677 }
2678
2679 int retval = target_read_memory(target, address, 1, 1, value);
2680
2681 if (retval == ERROR_OK) {
2682 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2683 address,
2684 *value);
2685 } else {
2686 *value = 0x0;
2687 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2688 address);
2689 }
2690
2691 return retval;
2692 }
2693
2694 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2695 {
2696 int retval;
2697 uint8_t value_buf[8];
2698 if (!target_was_examined(target)) {
2699 LOG_ERROR("Target not examined yet");
2700 return ERROR_FAIL;
2701 }
2702
2703 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2704 address,
2705 value);
2706
2707 target_buffer_set_u64(target, value_buf, value);
2708 retval = target_write_memory(target, address, 8, 1, value_buf);
2709 if (retval != ERROR_OK)
2710 LOG_DEBUG("failed: %i", retval);
2711
2712 return retval;
2713 }
2714
2715 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2716 {
2717 int retval;
2718 uint8_t value_buf[4];
2719 if (!target_was_examined(target)) {
2720 LOG_ERROR("Target not examined yet");
2721 return ERROR_FAIL;
2722 }
2723
2724 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2725 address,
2726 value);
2727
2728 target_buffer_set_u32(target, value_buf, value);
2729 retval = target_write_memory(target, address, 4, 1, value_buf);
2730 if (retval != ERROR_OK)
2731 LOG_DEBUG("failed: %i", retval);
2732
2733 return retval;
2734 }
2735
2736 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2737 {
2738 int retval;
2739 uint8_t value_buf[2];
2740 if (!target_was_examined(target)) {
2741 LOG_ERROR("Target not examined yet");
2742 return ERROR_FAIL;
2743 }
2744
2745 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2746 address,
2747 value);
2748
2749 target_buffer_set_u16(target, value_buf, value);
2750 retval = target_write_memory(target, address, 2, 1, value_buf);
2751 if (retval != ERROR_OK)
2752 LOG_DEBUG("failed: %i", retval);
2753
2754 return retval;
2755 }
2756
2757 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2758 {
2759 int retval;
2760 if (!target_was_examined(target)) {
2761 LOG_ERROR("Target not examined yet");
2762 return ERROR_FAIL;
2763 }
2764
2765 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2766 address, value);
2767
2768 retval = target_write_memory(target, address, 1, 1, &value);
2769 if (retval != ERROR_OK)
2770 LOG_DEBUG("failed: %i", retval);
2771
2772 return retval;
2773 }
2774
2775 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2776 {
2777 int retval;
2778 uint8_t value_buf[8];
2779 if (!target_was_examined(target)) {
2780 LOG_ERROR("Target not examined yet");
2781 return ERROR_FAIL;
2782 }
2783
2784 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2785 address,
2786 value);
2787
2788 target_buffer_set_u64(target, value_buf, value);
2789 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2790 if (retval != ERROR_OK)
2791 LOG_DEBUG("failed: %i", retval);
2792
2793 return retval;
2794 }
2795
2796 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2797 {
2798 int retval;
2799 uint8_t value_buf[4];
2800 if (!target_was_examined(target)) {
2801 LOG_ERROR("Target not examined yet");
2802 return ERROR_FAIL;
2803 }
2804
2805 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2806 address,
2807 value);
2808
2809 target_buffer_set_u32(target, value_buf, value);
2810 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2811 if (retval != ERROR_OK)
2812 LOG_DEBUG("failed: %i", retval);
2813
2814 return retval;
2815 }
2816
2817 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2818 {
2819 int retval;
2820 uint8_t value_buf[2];
2821 if (!target_was_examined(target)) {
2822 LOG_ERROR("Target not examined yet");
2823 return ERROR_FAIL;
2824 }
2825
2826 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2827 address,
2828 value);
2829
2830 target_buffer_set_u16(target, value_buf, value);
2831 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2832 if (retval != ERROR_OK)
2833 LOG_DEBUG("failed: %i", retval);
2834
2835 return retval;
2836 }
2837
2838 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2839 {
2840 int retval;
2841 if (!target_was_examined(target)) {
2842 LOG_ERROR("Target not examined yet");
2843 return ERROR_FAIL;
2844 }
2845
2846 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2847 address, value);
2848
2849 retval = target_write_phys_memory(target, address, 1, 1, &value);
2850 if (retval != ERROR_OK)
2851 LOG_DEBUG("failed: %i", retval);
2852
2853 return retval;
2854 }
2855
2856 static int find_target(struct command_invocation *cmd, const char *name)
2857 {
2858 struct target *target = get_target(name);
2859 if (!target) {
2860 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2861 return ERROR_FAIL;
2862 }
2863 if (!target->tap->enabled) {
2864 command_print(cmd, "Target: TAP %s is disabled, "
2865 "can't be the current target\n",
2866 target->tap->dotted_name);
2867 return ERROR_FAIL;
2868 }
2869
2870 cmd->ctx->current_target = target;
2871 if (cmd->ctx->current_target_override)
2872 cmd->ctx->current_target_override = target;
2873
2874 return ERROR_OK;
2875 }
2876
2877
2878 COMMAND_HANDLER(handle_targets_command)
2879 {
2880 int retval = ERROR_OK;
2881 if (CMD_ARGC == 1) {
2882 retval = find_target(CMD, CMD_ARGV[0]);
2883 if (retval == ERROR_OK) {
2884 /* we're done! */
2885 return retval;
2886 }
2887 }
2888
2889 struct target *target = all_targets;
2890 command_print(CMD, " TargetName Type Endian TapName State ");
2891 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2892 while (target) {
2893 const char *state;
2894 char marker = ' ';
2895
2896 if (target->tap->enabled)
2897 state = target_state_name(target);
2898 else
2899 state = "tap-disabled";
2900
2901 if (CMD_CTX->current_target == target)
2902 marker = '*';
2903
2904 /* keep columns lined up to match the headers above */
2905 command_print(CMD,
2906 "%2d%c %-18s %-10s %-6s %-18s %s",
2907 target->target_number,
2908 marker,
2909 target_name(target),
2910 target_type_name(target),
2911 jim_nvp_value2name_simple(nvp_target_endian,
2912 target->endianness)->name,
2913 target->tap->dotted_name,
2914 state);
2915 target = target->next;
2916 }
2917
2918 return retval;
2919 }
2920
2921 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2922
2923 static int power_dropout;
2924 static int srst_asserted;
2925
2926 static int run_power_restore;
2927 static int run_power_dropout;
2928 static int run_srst_asserted;
2929 static int run_srst_deasserted;
2930
2931 static int sense_handler(void)
2932 {
2933 static int prev_srst_asserted;
2934 static int prev_power_dropout;
2935
2936 int retval = jtag_power_dropout(&power_dropout);
2937 if (retval != ERROR_OK)
2938 return retval;
2939
2940 int power_restored;
2941 power_restored = prev_power_dropout && !power_dropout;
2942 if (power_restored)
2943 run_power_restore = 1;
2944
2945 int64_t current = timeval_ms();
2946 static int64_t last_power;
2947 bool wait_more = last_power + 2000 > current;
2948 if (power_dropout && !wait_more) {
2949 run_power_dropout = 1;
2950 last_power = current;
2951 }
2952
2953 retval = jtag_srst_asserted(&srst_asserted);
2954 if (retval != ERROR_OK)
2955 return retval;
2956
2957 int srst_deasserted;
2958 srst_deasserted = prev_srst_asserted && !srst_asserted;
2959
2960 static int64_t last_srst;
2961 wait_more = last_srst + 2000 > current;
2962 if (srst_deasserted && !wait_more) {
2963 run_srst_deasserted = 1;
2964 last_srst = current;
2965 }
2966
2967 if (!prev_srst_asserted && srst_asserted)
2968 run_srst_asserted = 1;
2969
2970 prev_srst_asserted = srst_asserted;
2971 prev_power_dropout = power_dropout;
2972
2973 if (srst_deasserted || power_restored) {
2974 /* Other than logging the event we can't do anything here.
2975 * Issuing a reset is a particularly bad idea as we might
2976 * be inside a reset already.
2977 */
2978 }
2979
2980 return ERROR_OK;
2981 }
2982
2983 /* process target state changes */
2984 static int handle_target(void *priv)
2985 {
2986 Jim_Interp *interp = (Jim_Interp *)priv;
2987 int retval = ERROR_OK;
2988
2989 if (!is_jtag_poll_safe()) {
2990 /* polling is disabled currently */
2991 return ERROR_OK;
2992 }
2993
2994 /* we do not want to recurse here... */
2995 static int recursive;
2996 if (!recursive) {
2997 recursive = 1;
2998 sense_handler();
2999 /* danger! running these procedures can trigger srst assertions and power dropouts.
3000 * We need to avoid an infinite loop/recursion here and we do that by
3001 * clearing the flags after running these events.
3002 */
3003 int did_something = 0;
3004 if (run_srst_asserted) {
3005 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3006 Jim_Eval(interp, "srst_asserted");
3007 did_something = 1;
3008 }
3009 if (run_srst_deasserted) {
3010 Jim_Eval(interp, "srst_deasserted");
3011 did_something = 1;
3012 }
3013 if (run_power_dropout) {
3014 LOG_INFO("Power dropout detected, running power_dropout proc.");
3015 Jim_Eval(interp, "power_dropout");
3016 did_something = 1;
3017 }
3018 if (run_power_restore) {
3019 Jim_Eval(interp, "power_restore");
3020 did_something = 1;
3021 }
3022
3023 if (did_something) {
3024 /* clear detect flags */
3025 sense_handler();
3026 }
3027
3028 /* clear action flags */
3029
3030 run_srst_asserted = 0;
3031 run_srst_deasserted = 0;
3032 run_power_restore = 0;
3033 run_power_dropout = 0;
3034
3035 recursive = 0;
3036 }
3037
3038 /* Poll targets for state changes unless that's globally disabled.
3039 * Skip targets that are currently disabled.
3040 */
3041 for (struct target *target = all_targets;
3042 is_jtag_poll_safe() && target;
3043 target = target->next) {
3044
3045 if (!target_was_examined(target))
3046 continue;
3047
3048 if (!target->tap->enabled)
3049 continue;
3050
3051 if (target->backoff.times > target->backoff.count) {
3052 /* do not poll this time as we failed previously */
3053 target->backoff.count++;
3054 continue;
3055 }
3056 target->backoff.count = 0;
3057
3058 /* only poll target if we've got power and srst isn't asserted */
3059 if (!power_dropout && !srst_asserted) {
3060 /* polling may fail silently until the target has been examined */
3061 retval = target_poll(target);
3062 if (retval != ERROR_OK) {
3063 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3064 if (target->backoff.times * polling_interval < 5000) {
3065 target->backoff.times *= 2;
3066 target->backoff.times++;
3067 }
3068
3069 /* Tell GDB to halt the debugger. This allows the user to
3070 * run monitor commands to handle the situation.
3071 */
3072 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3073 }
3074 if (target->backoff.times > 0) {
3075 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3076 target_reset_examined(target);
3077 retval = target_examine_one(target);
3078 /* Target examination could have failed due to unstable connection,
3079 * but we set the examined flag anyway to repoll it later */
3080 if (retval != ERROR_OK) {
3081 target_set_examined(target);
3082 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3083 target->backoff.times * polling_interval);
3084 return retval;
3085 }
3086 }
3087
3088 /* Since we succeeded, we reset backoff count */
3089 target->backoff.times = 0;
3090 }
3091 }
3092
3093 return retval;
3094 }
3095
3096 COMMAND_HANDLER(handle_reg_command)
3097 {
3098 LOG_DEBUG("-");
3099
3100 struct target *target = get_current_target(CMD_CTX);
3101 struct reg *reg = NULL;
3102
3103 /* list all available registers for the current target */
3104 if (CMD_ARGC == 0) {
3105 struct reg_cache *cache = target->reg_cache;
3106
3107 unsigned int count = 0;
3108 while (cache) {
3109 unsigned i;
3110
3111 command_print(CMD, "===== %s", cache->name);
3112
3113 for (i = 0, reg = cache->reg_list;
3114 i < cache->num_regs;
3115 i++, reg++, count++) {
3116 if (reg->exist == false || reg->hidden)
3117 continue;
3118 /* only print cached values if they are valid */
3119 if (reg->valid) {
3120 char *value = buf_to_hex_str(reg->value,
3121 reg->size);
3122 command_print(CMD,
3123 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3124 count, reg->name,
3125 reg->size, value,
3126 reg->dirty
3127 ? " (dirty)"
3128 : "");
3129 free(value);
3130 } else {
3131 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3132 count, reg->name,
3133 reg->size);
3134 }
3135 }
3136 cache = cache->next;
3137 }
3138
3139 return ERROR_OK;
3140 }
3141
3142 /* access a single register by its ordinal number */
3143 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3144 unsigned num;
3145 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3146
3147 struct reg_cache *cache = target->reg_cache;
3148 unsigned int count = 0;
3149 while (cache) {
3150 unsigned i;
3151 for (i = 0; i < cache->num_regs; i++) {
3152 if (count++ == num) {
3153 reg = &cache->reg_list[i];
3154 break;
3155 }
3156 }
3157 if (reg)
3158 break;
3159 cache = cache->next;
3160 }
3161
3162 if (!reg) {
3163 command_print(CMD, "%i is out of bounds, the current target "
3164 "has only %i registers (0 - %i)", num, count, count - 1);
3165 return ERROR_OK;
3166 }
3167 } else {
3168 /* access a single register by its name */
3169 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3170
3171 if (!reg)
3172 goto not_found;
3173 }
3174
3175 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3176
3177 if (!reg->exist)
3178 goto not_found;
3179
3180 /* display a register */
3181 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3182 && (CMD_ARGV[1][0] <= '9')))) {
3183 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3184 reg->valid = 0;
3185
3186 if (reg->valid == 0) {
3187 int retval = reg->type->get(reg);
3188 if (retval != ERROR_OK) {
3189 LOG_ERROR("Could not read register '%s'", reg->name);
3190 return retval;
3191 }
3192 }
3193 char *value = buf_to_hex_str(reg->value, reg->size);
3194 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3195 free(value);
3196 return ERROR_OK;
3197 }
3198
3199 /* set register value */
3200 if (CMD_ARGC == 2) {
3201 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3202 if (!buf)
3203 return ERROR_FAIL;
3204 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3205
3206 int retval = reg->type->set(reg, buf);
3207 if (retval != ERROR_OK) {
3208 LOG_ERROR("Could not write to register '%s'", reg->name);
3209 } else {
3210 char *value = buf_to_hex_str(reg->value, reg->size);
3211 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3212 free(value);
3213 }
3214
3215 free(buf);
3216
3217 return retval;
3218 }
3219
3220 return ERROR_COMMAND_SYNTAX_ERROR;
3221
3222 not_found:
3223 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3224 return ERROR_OK;
3225 }
3226
3227 COMMAND_HANDLER(handle_poll_command)
3228 {
3229 int retval = ERROR_OK;
3230 struct target *target = get_current_target(CMD_CTX);
3231
3232 if (CMD_ARGC == 0) {
3233 command_print(CMD, "background polling: %s",
3234 jtag_poll_get_enabled() ? "on" : "off");
3235 command_print(CMD, "TAP: %s (%s)",
3236 target->tap->dotted_name,
3237 target->tap->enabled ? "enabled" : "disabled");
3238 if (!target->tap->enabled)
3239 return ERROR_OK;
3240 retval = target_poll(target);
3241 if (retval != ERROR_OK)
3242 return retval;
3243 retval = target_arch_state(target);
3244 if (retval != ERROR_OK)
3245 return retval;
3246 } else if (CMD_ARGC == 1) {
3247 bool enable;
3248 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3249 jtag_poll_set_enabled(enable);
3250 } else
3251 return ERROR_COMMAND_SYNTAX_ERROR;
3252
3253 return retval;
3254 }
3255
3256 COMMAND_HANDLER(handle_wait_halt_command)
3257 {
3258 if (CMD_ARGC > 1)
3259 return ERROR_COMMAND_SYNTAX_ERROR;
3260
3261 unsigned ms = DEFAULT_HALT_TIMEOUT;
3262 if (1 == CMD_ARGC) {
3263 int retval = parse_uint(CMD_ARGV[0], &ms);
3264 if (retval != ERROR_OK)
3265 return ERROR_COMMAND_SYNTAX_ERROR;
3266 }
3267
3268 struct target *target = get_current_target(CMD_CTX);
3269 return target_wait_state(target, TARGET_HALTED, ms);
3270 }
3271
3272 /* wait for target state to change. The trick here is to have a low
3273 * latency for short waits and not to suck up all the CPU time
3274 * on longer waits.
3275 *
3276 * After 500ms, keep_alive() is invoked
3277 */
3278 int target_wait_state(struct target *target, enum target_state state, int ms)
3279 {
3280 int retval;
3281 int64_t then = 0, cur;
3282 bool once = true;
3283
3284 for (;;) {
3285 retval = target_poll(target);
3286 if (retval != ERROR_OK)
3287 return retval;
3288 if (target->state == state)
3289 break;
3290 cur = timeval_ms();
3291 if (once) {
3292 once = false;
3293 then = timeval_ms();
3294 LOG_DEBUG("waiting for target %s...",
3295 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3296 }
3297
3298 if (cur-then > 500)
3299 keep_alive();
3300
3301 if ((cur-then) > ms) {
3302 LOG_ERROR("timed out while waiting for target %s",
3303 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3304 return ERROR_FAIL;
3305 }
3306 }
3307
3308 return ERROR_OK;
3309 }
3310
3311 COMMAND_HANDLER(handle_halt_command)
3312 {
3313 LOG_DEBUG("-");
3314
3315 struct target *target = get_current_target(CMD_CTX);
3316
3317 target->verbose_halt_msg = true;
3318
3319 int retval = target_halt(target);
3320 if (retval != ERROR_OK)
3321 return retval;
3322
3323 if (CMD_ARGC == 1) {
3324 unsigned wait_local;
3325 retval = parse_uint(CMD_ARGV[0], &wait_local);
3326 if (retval != ERROR_OK)
3327 return ERROR_COMMAND_SYNTAX_ERROR;
3328 if (!wait_local)
3329 return ERROR_OK;
3330 }
3331
3332 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3333 }
3334
3335 COMMAND_HANDLER(handle_soft_reset_halt_command)
3336 {
3337 struct target *target = get_current_target(CMD_CTX);
3338
3339 LOG_USER("requesting target halt and executing a soft reset");
3340
3341 target_soft_reset_halt(target);
3342
3343 return ERROR_OK;
3344 }
3345
3346 COMMAND_HANDLER(handle_reset_command)
3347 {
3348 if (CMD_ARGC > 1)
3349 return ERROR_COMMAND_SYNTAX_ERROR;
3350
3351 enum target_reset_mode reset_mode = RESET_RUN;
3352 if (CMD_ARGC == 1) {
3353 const struct jim_nvp *n;
3354 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3355 if ((!n->name) || (n->value == RESET_UNKNOWN))
3356 return ERROR_COMMAND_SYNTAX_ERROR;
3357 reset_mode = n->value;
3358 }
3359
3360 /* reset *all* targets */
3361 return target_process_reset(CMD, reset_mode);
3362 }
3363
3364
3365 COMMAND_HANDLER(handle_resume_command)
3366 {
3367 int current = 1;
3368 if (CMD_ARGC > 1)
3369 return ERROR_COMMAND_SYNTAX_ERROR;
3370
3371 struct target *target = get_current_target(CMD_CTX);
3372
3373 /* with no CMD_ARGV, resume from current pc, addr = 0,
3374 * with one arguments, addr = CMD_ARGV[0],
3375 * handle breakpoints, not debugging */
3376 target_addr_t addr = 0;
3377 if (CMD_ARGC == 1) {
3378 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3379 current = 0;
3380 }
3381
3382 return target_resume(target, current, addr, 1, 0);
3383 }
3384
3385 COMMAND_HANDLER(handle_step_command)
3386 {
3387 if (CMD_ARGC > 1)
3388 return ERROR_COMMAND_SYNTAX_ERROR;
3389
3390 LOG_DEBUG("-");
3391
3392 /* with no CMD_ARGV, step from current pc, addr = 0,
3393 * with one argument addr = CMD_ARGV[0],
3394 * handle breakpoints, debugging */
3395 target_addr_t addr = 0;
3396 int current_pc = 1;
3397 if (CMD_ARGC == 1) {
3398 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3399 current_pc = 0;
3400 }
3401
3402 struct target *target = get_current_target(CMD_CTX);
3403
3404 return target_step(target, current_pc, addr, 1);
3405 }
3406
3407 void target_handle_md_output(struct command_invocation *cmd,
3408 struct target *target, target_addr_t address, unsigned size,
3409 unsigned count, const uint8_t *buffer)
3410 {
3411 const unsigned line_bytecnt = 32;
3412 unsigned line_modulo = line_bytecnt / size;
3413
3414 char output[line_bytecnt * 4 + 1];
3415 unsigned output_len = 0;
3416
3417 const char *value_fmt;
3418 switch (size) {
3419 case 8:
3420 value_fmt = "%16.16"PRIx64" ";
3421 break;
3422 case 4:
3423 value_fmt = "%8.8"PRIx64" ";
3424 break;
3425 case 2:
3426 value_fmt = "%4.4"PRIx64" ";
3427 break;
3428 case 1:
3429 value_fmt = "%2.2"PRIx64" ";
3430 break;
3431 default:
3432 /* "can't happen", caller checked */
3433 LOG_ERROR("invalid memory read size: %u", size);
3434 return;
3435 }
3436
3437 for (unsigned i = 0; i < count; i++) {
3438 if (i % line_modulo == 0) {
3439 output_len += snprintf(output + output_len,
3440 sizeof(output) - output_len,
3441 TARGET_ADDR_FMT ": ",
3442 (address + (i * size)));
3443 }
3444
3445 uint64_t value = 0;
3446 const uint8_t *value_ptr = buffer + i * size;
3447 switch (size) {
3448 case 8:
3449 value = target_buffer_get_u64(target, value_ptr);
3450 break;
3451 case 4:
3452 value = target_buffer_get_u32(target, value_ptr);
3453 break;
3454 case 2:
3455 value = target_buffer_get_u16(target, value_ptr);
3456 break;
3457 case 1:
3458 value = *value_ptr;
3459 }
3460 output_len += snprintf(output + output_len,
3461 sizeof(output) - output_len,
3462 value_fmt, value);
3463
3464 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3465 command_print(cmd, "%s", output);
3466 output_len = 0;
3467 }
3468 }
3469 }
3470
3471 COMMAND_HANDLER(handle_md_command)
3472 {
3473 if (CMD_ARGC < 1)
3474 return ERROR_COMMAND_SYNTAX_ERROR;
3475
3476 unsigned size = 0;
3477 switch (CMD_NAME[2]) {
3478 case 'd':
3479 size = 8;
3480 break;
3481 case 'w':
3482 size = 4;
3483 break;
3484 case 'h':
3485 size = 2;
3486 break;
3487 case 'b':
3488 size = 1;
3489 break;
3490 default:
3491 return ERROR_COMMAND_SYNTAX_ERROR;
3492 }
3493
3494 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3495 int (*fn)(struct target *target,
3496 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3497 if (physical) {
3498 CMD_ARGC--;
3499 CMD_ARGV++;
3500 fn = target_read_phys_memory;
3501 } else
3502 fn = target_read_memory;
3503 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3504 return ERROR_COMMAND_SYNTAX_ERROR;
3505
3506 target_addr_t address;
3507 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3508
3509 unsigned count = 1;
3510 if (CMD_ARGC == 2)
3511 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3512
3513 uint8_t *buffer = calloc(count, size);
3514 if (!buffer) {
3515 LOG_ERROR("Failed to allocate md read buffer");
3516 return ERROR_FAIL;
3517 }
3518
3519 struct target *target = get_current_target(CMD_CTX);
3520 int retval = fn(target, address, size, count, buffer);
3521 if (retval == ERROR_OK)
3522 target_handle_md_output(CMD, target, address, size, count, buffer);
3523
3524 free(buffer);
3525
3526 return retval;
3527 }
3528
3529 typedef int (*target_write_fn)(struct target *target,
3530 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3531
3532 static int target_fill_mem(struct target *target,
3533 target_addr_t address,
3534 target_write_fn fn,
3535 unsigned data_size,
3536 /* value */
3537 uint64_t b,
3538 /* count */
3539 unsigned c)
3540 {
3541 /* We have to write in reasonably large chunks to be able
3542 * to fill large memory areas with any sane speed */
3543 const unsigned chunk_size = 16384;
3544 uint8_t *target_buf = malloc(chunk_size * data_size);
3545 if (!target_buf) {
3546 LOG_ERROR("Out of memory");
3547 return ERROR_FAIL;
3548 }
3549
3550 for (unsigned i = 0; i < chunk_size; i++) {
3551 switch (data_size) {
3552 case 8:
3553 target_buffer_set_u64(target, target_buf + i * data_size, b);
3554 break;
3555 case 4:
3556 target_buffer_set_u32(target, target_buf + i * data_size, b);
3557 break;
3558 case 2:
3559 target_buffer_set_u16(target, target_buf + i * data_size, b);
3560 break;
3561 case 1:
3562 target_buffer_set_u8(target, target_buf + i * data_size, b);
3563 break;
3564 default:
3565 exit(-1);
3566 }
3567 }
3568
3569 int retval = ERROR_OK;
3570
3571 for (unsigned x = 0; x < c; x += chunk_size) {
3572 unsigned current;
3573 current = c - x;
3574 if (current > chunk_size)
3575 current = chunk_size;
3576 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3577 if (retval != ERROR_OK)
3578 break;
3579 /* avoid GDB timeouts */
3580 keep_alive();
3581 }
3582 free(target_buf);
3583
3584 return retval;
3585 }
3586
3587
3588 COMMAND_HANDLER(handle_mw_command)
3589 {
3590 if (CMD_ARGC < 2)
3591 return ERROR_COMMAND_SYNTAX_ERROR;
3592 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3593 target_write_fn fn;
3594 if (physical) {
3595 CMD_ARGC--;
3596 CMD_ARGV++;
3597 fn = target_write_phys_memory;
3598 } else
3599 fn = target_write_memory;
3600 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3601 return ERROR_COMMAND_SYNTAX_ERROR;
3602
3603 target_addr_t address;
3604 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3605
3606 uint64_t value;
3607 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3608
3609 unsigned count = 1;
3610 if (CMD_ARGC == 3)
3611 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3612
3613 struct target *target = get_current_target(CMD_CTX);
3614 unsigned wordsize;
3615 switch (CMD_NAME[2]) {
3616 case 'd':
3617 wordsize = 8;
3618 break;
3619 case 'w':
3620 wordsize = 4;
3621 break;
3622 case 'h':
3623 wordsize = 2;
3624 break;
3625 case 'b':
3626 wordsize = 1;
3627 break;
3628 default:
3629 return ERROR_COMMAND_SYNTAX_ERROR;
3630 }
3631
3632 return target_fill_mem(target, address, fn, wordsize, value, count);
3633 }
3634
3635 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3636 target_addr_t *min_address, target_addr_t *max_address)
3637 {
3638 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3639 return ERROR_COMMAND_SYNTAX_ERROR;
3640
3641 /* a base address isn't always necessary,
3642 * default to 0x0 (i.e. don't relocate) */
3643 if (CMD_ARGC >= 2) {
3644 target_addr_t addr;
3645 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3646 image->base_address = addr;
3647 image->base_address_set = true;
3648 } else
3649 image->base_address_set = false;
3650
3651 image->start_address_set = false;
3652
3653 if (CMD_ARGC >= 4)
3654 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3655 if (CMD_ARGC == 5) {
3656 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3657 /* use size (given) to find max (required) */
3658 *max_address += *min_address;
3659 }
3660
3661 if (*min_address > *max_address)
3662 return ERROR_COMMAND_SYNTAX_ERROR;
3663
3664 return ERROR_OK;
3665 }
3666
3667 COMMAND_HANDLER(handle_load_image_command)
3668 {
3669 uint8_t *buffer;
3670 size_t buf_cnt;
3671 uint32_t image_size;
3672 target_addr_t min_address = 0;
3673 target_addr_t max_address = -1;
3674 struct image image;
3675
3676 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3677 &image, &min_address, &max_address);
3678 if (retval != ERROR_OK)
3679 return retval;
3680
3681 struct target *target = get_current_target(CMD_CTX);
3682
3683 struct duration bench;
3684 duration_start(&bench);
3685
3686 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3687 return ERROR_FAIL;
3688
3689 image_size = 0x0;
3690 retval = ERROR_OK;
3691 for (unsigned int i = 0; i < image.num_sections; i++) {
3692 buffer = malloc(image.sections[i].size);
3693 if (!buffer) {
3694 command_print(CMD,
3695 "error allocating buffer for section (%d bytes)",
3696 (int)(image.sections[i].size));
3697 retval = ERROR_FAIL;
3698 break;
3699 }
3700
3701 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3702 if (retval != ERROR_OK) {
3703 free(buffer);
3704 break;
3705 }
3706
3707 uint32_t offset = 0;
3708 uint32_t length = buf_cnt;
3709
3710 /* DANGER!!! beware of unsigned comparison here!!! */
3711
3712 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3713 (image.sections[i].base_address < max_address)) {
3714
3715 if (image.sections[i].base_address < min_address) {
3716 /* clip addresses below */
3717 offset += min_address-image.sections[i].base_address;
3718 length -= offset;
3719 }
3720
3721 if (image.sections[i].base_address + buf_cnt > max_address)
3722 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3723
3724 retval = target_write_buffer(target,
3725 image.sections[i].base_address + offset, length, buffer + offset);
3726 if (retval != ERROR_OK) {
3727 free(buffer);
3728 break;
3729 }
3730 image_size += length;
3731 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3732 (unsigned int)length,
3733 image.sections[i].base_address + offset);
3734 }
3735
3736 free(buffer);
3737 }
3738
3739 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3740 command_print(CMD, "downloaded %" PRIu32 " bytes "
3741 "in %fs (%0.3f KiB/s)", image_size,
3742 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3743 }
3744
3745 image_close(&image);
3746
3747 return retval;
3748
3749 }
3750
3751 COMMAND_HANDLER(handle_dump_image_command)
3752 {
3753 struct fileio *fileio;
3754 uint8_t *buffer;
3755 int retval, retvaltemp;
3756 target_addr_t address, size;
3757 struct duration bench;
3758 struct target *target = get_current_target(CMD_CTX);
3759
3760 if (CMD_ARGC != 3)
3761 return ERROR_COMMAND_SYNTAX_ERROR;
3762
3763 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3764 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3765
3766 uint32_t buf_size = (size > 4096) ? 4096 : size;
3767 buffer = malloc(buf_size);
3768 if (!buffer)
3769 return ERROR_FAIL;
3770
3771 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3772 if (retval != ERROR_OK) {
3773 free(buffer);
3774 return retval;
3775 }
3776
3777 duration_start(&bench);
3778
3779 while (size > 0) {
3780 size_t size_written;
3781 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3782 retval = target_read_buffer(target, address, this_run_size, buffer);
3783 if (retval != ERROR_OK)
3784 break;
3785
3786 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3787 if (retval != ERROR_OK)
3788 break;
3789
3790 size -= this_run_size;
3791 address += this_run_size;
3792 }
3793
3794 free(buffer);
3795
3796 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3797 size_t filesize;
3798 retval = fileio_size(fileio, &filesize);
3799 if (retval != ERROR_OK)
3800 return retval;
3801 command_print(CMD,
3802 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3803 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3804 }
3805
3806 retvaltemp = fileio_close(fileio);
3807 if (retvaltemp != ERROR_OK)
3808 return retvaltemp;
3809
3810 return retval;
3811 }
3812
3813 enum verify_mode {
3814 IMAGE_TEST = 0,
3815 IMAGE_VERIFY = 1,
3816 IMAGE_CHECKSUM_ONLY = 2
3817 };
3818
3819 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3820 {
3821 uint8_t *buffer;
3822 size_t buf_cnt;
3823 uint32_t image_size;
3824 int retval;
3825 uint32_t checksum = 0;
3826 uint32_t mem_checksum = 0;
3827
3828 struct image image;
3829
3830 struct target *target = get_current_target(CMD_CTX);
3831
3832 if (CMD_ARGC < 1)
3833 return ERROR_COMMAND_SYNTAX_ERROR;
3834
3835 if (!target) {
3836 LOG_ERROR("no target selected");
3837 return ERROR_FAIL;
3838 }
3839
3840 struct duration bench;
3841 duration_start(&bench);
3842
3843 if (CMD_ARGC >= 2) {
3844 target_addr_t addr;
3845 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3846 image.base_address = addr;
3847 image.base_address_set = true;
3848 } else {
3849 image.base_address_set = false;
3850 image.base_address = 0x0;
3851 }
3852
3853 image.start_address_set = false;
3854
3855 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3856 if (retval != ERROR_OK)
3857 return retval;
3858
3859 image_size = 0x0;
3860 int diffs = 0;
3861 retval = ERROR_OK;
3862 for (unsigned int i = 0; i < image.num_sections; i++) {
3863 buffer = malloc(image.sections[i].size);
3864 if (!buffer) {
3865 command_print(CMD,
3866 "error allocating buffer for section (%" PRIu32 " bytes)",
3867 image.sections[i].size);
3868 break;
3869 }
3870 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3871 if (retval != ERROR_OK) {
3872 free(buffer);
3873 break;
3874 }
3875
3876 if (verify >= IMAGE_VERIFY) {
3877 /* calculate checksum of image */
3878 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3879 if (retval != ERROR_OK) {
3880 free(buffer);
3881 break;
3882 }
3883
3884 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3885 if (retval != ERROR_OK) {
3886 free(buffer);
3887 break;
3888 }
3889 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3890 LOG_ERROR("checksum mismatch");
3891 free(buffer);
3892 retval = ERROR_FAIL;
3893 goto done;
3894 }
3895 if (checksum != mem_checksum) {
3896 /* failed crc checksum, fall back to a binary compare */
3897 uint8_t *data;
3898
3899 if (diffs == 0)
3900 LOG_ERROR("checksum mismatch - attempting binary compare");
3901
3902 data = malloc(buf_cnt);
3903
3904 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3905 if (retval == ERROR_OK) {
3906 uint32_t t;
3907 for (t = 0; t < buf_cnt; t++) {
3908 if (data[t] != buffer[t]) {
3909 command_print(CMD,
3910 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3911 diffs,
3912 (unsigned)(t + image.sections[i].base_address),
3913 data[t],
3914 buffer[t]);
3915 if (diffs++ >= 127) {
3916 command_print(CMD, "More than 128 errors, the rest are not printed.");
3917 free(data);
3918 free(buffer);
3919 goto done;
3920 }
3921 }
3922 keep_alive();
3923 }
3924 }
3925 free(data);
3926 }
3927 } else {
3928 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3929 image.sections[i].base_address,
3930 buf_cnt);
3931 }
3932
3933 free(buffer);
3934 image_size += buf_cnt;
3935 }
3936 if (diffs > 0)
3937 command_print(CMD, "No more differences found.");
3938 done:
3939 if (diffs > 0)
3940 retval = ERROR_FAIL;
3941 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3942 command_print(CMD, "verified %" PRIu32 " bytes "
3943 "in %fs (%0.3f KiB/s)", image_size,
3944 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3945 }
3946
3947 image_close(&image);
3948
3949 return retval;
3950 }
3951
3952 COMMAND_HANDLER(handle_verify_image_checksum_command)
3953 {
3954 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3955 }
3956
3957 COMMAND_HANDLER(handle_verify_image_command)
3958 {
3959 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3960 }
3961
3962 COMMAND_HANDLER(handle_test_image_command)
3963 {
3964 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3965 }
3966
3967 static int handle_bp_command_list(struct command_invocation *cmd)
3968 {
3969 struct target *target = get_current_target(cmd->ctx);
3970 struct breakpoint *breakpoint = target->breakpoints;
3971 while (breakpoint) {
3972 if (breakpoint->type == BKPT_SOFT) {
3973 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3974 breakpoint->length);
3975 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3976 breakpoint->address,
3977 breakpoint->length,
3978 buf);
3979 free(buf);
3980 } else {
3981 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3982 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3983 breakpoint->asid,
3984 breakpoint->length, breakpoint->number);
3985 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3986 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3987 breakpoint->address,
3988 breakpoint->length, breakpoint->number);
3989 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3990 breakpoint->asid);
3991 } else
3992 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3993 breakpoint->address,
3994 breakpoint->length, breakpoint->number);
3995 }
3996
3997 breakpoint = breakpoint->next;
3998 }
3999 return ERROR_OK;
4000 }
4001
4002 static int handle_bp_command_set(struct command_invocation *cmd,
4003 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
4004 {
4005 struct target *target = get_current_target(cmd->ctx);
4006 int retval;
4007
4008 if (asid == 0) {
4009 retval = breakpoint_add(target, addr, length, hw);
4010 /* error is always logged in breakpoint_add(), do not print it again */
4011 if (retval == ERROR_OK)
4012 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4013
4014 } else if (addr == 0) {
4015 if (!target->type->add_context_breakpoint) {
4016 LOG_ERROR("Context breakpoint not available");
4017 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4018 }
4019 retval = context_breakpoint_add(target, asid, length, hw);
4020 /* error is always logged in context_breakpoint_add(), do not print it again */
4021 if (retval == ERROR_OK)
4022 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4023
4024 } else {
4025 if (!target->type->add_hybrid_breakpoint) {
4026 LOG_ERROR("Hybrid breakpoint not available");
4027 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4028 }
4029 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4030 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4031 if (retval == ERROR_OK)
4032 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4033 }
4034 return retval;
4035 }
4036
4037 COMMAND_HANDLER(handle_bp_command)
4038 {
4039 target_addr_t addr;
4040 uint32_t asid;
4041 uint32_t length;
4042 int hw = BKPT_SOFT;
4043
4044 switch (CMD_ARGC) {
4045 case 0:
4046 return handle_bp_command_list(CMD);
4047
4048 case 2:
4049 asid = 0;
4050 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4051 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4052 return handle_bp_command_set(CMD, addr, asid, length, hw);
4053
4054 case 3:
4055 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4056 hw = BKPT_HARD;
4057 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4059 asid = 0;
4060 return handle_bp_command_set(CMD, addr, asid, length, hw);
4061 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4062 hw = BKPT_HARD;
4063 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4064 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4065 addr = 0;
4066 return handle_bp_command_set(CMD, addr, asid, length, hw);
4067 }
4068 /* fallthrough */
4069 case 4:
4070 hw = BKPT_HARD;
4071 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4072 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4073 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4074 return handle_bp_command_set(CMD, addr, asid, length, hw);
4075
4076 default:
4077 return ERROR_COMMAND_SYNTAX_ERROR;
4078 }
4079 }
4080
4081 COMMAND_HANDLER(handle_rbp_command)
4082 {
4083 if (CMD_ARGC != 1)
4084 return ERROR_COMMAND_SYNTAX_ERROR;
4085
4086 struct target *target = get_current_target(CMD_CTX);
4087
4088 if (!strcmp(CMD_ARGV[0], "all")) {
4089 breakpoint_remove_all(target);
4090 } else {
4091 target_addr_t addr;
4092 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4093
4094 breakpoint_remove(target, addr);
4095 }
4096
4097 return ERROR_OK;
4098 }
4099
4100 COMMAND_HANDLER(handle_wp_command)
4101 {
4102 struct target *target = get_current_target(CMD_CTX);
4103
4104 if (CMD_ARGC == 0) {
4105 struct watchpoint *watchpoint = target->watchpoints;
4106
4107 while (watchpoint) {
4108 command_print(CMD, "address: " TARGET_ADDR_FMT
4109 ", len: 0x%8.8" PRIx32
4110 ", r/w/a: %i, value: 0x%8.8" PRIx32
4111 ", mask: 0x%8.8" PRIx32,
4112 watchpoint->address,
4113 watchpoint->length,
4114 (int)watchpoint->rw,
4115 watchpoint->value,
4116 watchpoint->mask);
4117 watchpoint = watchpoint->next;
4118 }
4119 return ERROR_OK;
4120 }
4121
4122 enum watchpoint_rw type = WPT_ACCESS;
4123 target_addr_t addr = 0;
4124 uint32_t length = 0;
4125 uint32_t data_value = 0x0;
4126 uint32_t data_mask = 0xffffffff;
4127
4128 switch (CMD_ARGC) {
4129 case 5:
4130 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4131 /* fall through */
4132 case 4:
4133 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4134 /* fall through */
4135 case 3:
4136 switch (CMD_ARGV[2][0]) {
4137 case 'r':
4138 type = WPT_READ;
4139 break;
4140 case 'w':
4141 type = WPT_WRITE;
4142 break;
4143 case 'a':
4144 type = WPT_ACCESS;
4145 break;
4146 default:
4147 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4148 return ERROR_COMMAND_SYNTAX_ERROR;
4149 }
4150 /* fall through */
4151 case 2:
4152 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4153 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4154 break;
4155
4156 default:
4157 return ERROR_COMMAND_SYNTAX_ERROR;
4158 }
4159
4160 int retval = watchpoint_add(target, addr, length, type,
4161 data_value, data_mask);
4162 if (retval != ERROR_OK)
4163 LOG_ERROR("Failure setting watchpoints");
4164
4165 return retval;
4166 }
4167
4168 COMMAND_HANDLER(handle_rwp_command)
4169 {
4170 if (CMD_ARGC != 1)
4171 return ERROR_COMMAND_SYNTAX_ERROR;
4172
4173 target_addr_t addr;
4174 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4175
4176 struct target *target = get_current_target(CMD_CTX);
4177 watchpoint_remove(target, addr);
4178
4179 return ERROR_OK;
4180 }
4181
4182 /**
4183 * Translate a virtual address to a physical address.
4184 *
4185 * The low-level target implementation must have logged a detailed error
4186 * which is forwarded to telnet/GDB session.
4187 */
4188 COMMAND_HANDLER(handle_virt2phys_command)
4189 {
4190 if (CMD_ARGC != 1)
4191 return ERROR_COMMAND_SYNTAX_ERROR;
4192
4193 target_addr_t va;
4194 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4195 target_addr_t pa;
4196
4197 struct target *target = get_current_target(CMD_CTX);
4198 int retval = target->type->virt2phys(target, va, &pa);
4199 if (retval == ERROR_OK)
4200 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4201
4202 return retval;
4203 }
4204
4205 static void write_data(FILE *f, const void *data, size_t len)
4206 {
4207 size_t written = fwrite(data, 1, len, f);
4208 if (written != len)
4209 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4210 }
4211
4212 static void write_long(FILE *f, int l, struct target *target)
4213 {
4214 uint8_t val[4];
4215
4216 target_buffer_set_u32(target, val, l);
4217 write_data(f, val, 4);
4218 }
4219
4220 static void write_string(FILE *f, char *s)
4221 {
4222 write_data(f, s, strlen(s));
4223 }
4224
4225 typedef unsigned char UNIT[2]; /* unit of profiling */
4226
4227 /* Dump a gmon.out histogram file. */
4228 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4229 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4230 {
4231 uint32_t i;
4232 FILE *f = fopen(filename, "w");
4233 if (!f)
4234 return;
4235 write_string(f, "gmon");
4236 write_long(f, 0x00000001, target); /* Version */
4237 write_long(f, 0, target); /* padding */
4238 write_long(f, 0, target); /* padding */
4239 write_long(f, 0, target); /* padding */
4240
4241 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4242 write_data(f, &zero, 1);
4243
4244 /* figure out bucket size */
4245 uint32_t min;
4246 uint32_t max;
4247 if (with_range) {
4248 min = start_address;
4249 max = end_address;
4250 } else {
4251 min = samples[0];
4252 max = samples[0];
4253 for (i = 0; i < sample_num; i++) {
4254 if (min > samples[i])
4255 min = samples[i];
4256 if (max < samples[i])
4257 max = samples[i];
4258 }
4259
4260 /* max should be (largest sample + 1)
4261 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4262 max++;
4263 }
4264
4265 int address_space = max - min;
4266 assert(address_space >= 2);
4267
4268 /* FIXME: What is the reasonable number of buckets?
4269 * The profiling result will be more accurate if there are enough buckets. */
4270 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4271 uint32_t num_buckets = address_space / sizeof(UNIT);
4272 if (num_buckets > max_buckets)
4273 num_buckets = max_buckets;
4274 int *buckets = malloc(sizeof(int) * num_buckets);
4275 if (!buckets) {
4276 fclose(f);
4277 return;
4278 }
4279 memset(buckets, 0, sizeof(int) * num_buckets);
4280 for (i = 0; i < sample_num; i++) {
4281 uint32_t address = samples[i];
4282
4283 if ((address < min) || (max <= address))
4284 continue;
4285
4286 long long a = address - min;
4287 long long b = num_buckets;
4288 long long c = address_space;
4289 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4290 buckets[index_t]++;
4291 }
4292
4293 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4294 write_long(f, min, target); /* low_pc */
4295 write_long(f, max, target); /* high_pc */
4296 write_long(f, num_buckets, target); /* # of buckets */
4297 float sample_rate = sample_num / (duration_ms / 1000.0);
4298 write_long(f, sample_rate, target);
4299 write_string(f, "seconds");
4300 for (i = 0; i < (15-strlen("seconds")); i++)
4301 write_data(f, &zero, 1);
4302 write_string(f, "s");
4303
4304 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4305
4306 char *data = malloc(2 * num_buckets);
4307 if (data) {
4308 for (i = 0; i < num_buckets; i++) {
4309 int val;
4310 val = buckets[i];
4311 if (val > 65535)
4312 val = 65535;
4313 data[i * 2] = val&0xff;
4314 data[i * 2 + 1] = (val >> 8) & 0xff;
4315 }
4316 free(buckets);
4317 write_data(f, data, num_buckets * 2);
4318 free(data);
4319 } else
4320 free(buckets);
4321
4322 fclose(f);
4323 }
4324
4325 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4326 * which will be used as a random sampling of PC */
4327 COMMAND_HANDLER(handle_profile_command)
4328 {
4329 struct target *target = get_current_target(CMD_CTX);
4330
4331 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4332 return ERROR_COMMAND_SYNTAX_ERROR;
4333
4334 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4335 uint32_t offset;
4336 uint32_t num_of_samples;
4337 int retval = ERROR_OK;
4338 bool halted_before_profiling = target->state == TARGET_HALTED;
4339
4340 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4341
4342 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4343 if (!samples) {
4344 LOG_ERROR("No memory to store samples.");
4345 return ERROR_FAIL;
4346 }
4347
4348 uint64_t timestart_ms = timeval_ms();
4349 /**
4350 * Some cores let us sample the PC without the
4351 * annoying halt/resume step; for example, ARMv7 PCSR.
4352 * Provide a way to use that more efficient mechanism.
4353 */
4354 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4355 &num_of_samples, offset);
4356 if (retval != ERROR_OK) {
4357 free(samples);
4358 return retval;
4359 }
4360 uint32_t duration_ms = timeval_ms() - timestart_ms;
4361
4362 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4363
4364 retval = target_poll(target);
4365 if (retval != ERROR_OK) {
4366 free(samples);
4367 return retval;
4368 }
4369
4370 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4371 /* The target was halted before we started and is running now. Halt it,
4372 * for consistency. */
4373 retval = target_halt(target);
4374 if (retval != ERROR_OK) {
4375 free(samples);
4376 return retval;
4377 }
4378 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4379 /* The target was running before we started and is halted now. Resume
4380 * it, for consistency. */
4381 retval = target_resume(target, 1, 0, 0, 0);
4382 if (retval != ERROR_OK) {
4383 free(samples);
4384 return retval;
4385 }
4386 }
4387
4388 retval = target_poll(target);
4389 if (retval != ERROR_OK) {
4390 free(samples);
4391 return retval;
4392 }
4393
4394 uint32_t start_address = 0;
4395 uint32_t end_address = 0;
4396 bool with_range = false;
4397 if (CMD_ARGC == 4) {
4398 with_range = true;
4399 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4400 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4401 }
4402
4403 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4404 with_range, start_address, end_address, target, duration_ms);
4405 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4406
4407 free(samples);
4408 return retval;
4409 }
4410
4411 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4412 {
4413 char *namebuf;
4414 Jim_Obj *obj_name, *obj_val;
4415 int result;
4416
4417 namebuf = alloc_printf("%s(%d)", varname, idx);
4418 if (!namebuf)
4419 return JIM_ERR;
4420
4421 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4422 jim_wide wide_val = val;
4423 obj_val = Jim_NewWideObj(interp, wide_val);
4424 if (!obj_name || !obj_val) {
4425 free(namebuf);
4426 return JIM_ERR;
4427 }
4428
4429 Jim_IncrRefCount(obj_name);
4430 Jim_IncrRefCount(obj_val);
4431 result = Jim_SetVariable(interp, obj_name, obj_val);
4432 Jim_DecrRefCount(interp, obj_name);
4433 Jim_DecrRefCount(interp, obj_val);
4434 free(namebuf);
4435 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4436 return result;
4437 }
4438
4439 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4440 {
4441 int e;
4442
4443 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4444
4445 /* argv[0] = name of array to receive the data
4446 * argv[1] = desired element width in bits
4447 * argv[2] = memory address
4448 * argv[3] = count of times to read
4449 * argv[4] = optional "phys"
4450 */
4451 if (argc < 4 || argc > 5) {
4452 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4453 return JIM_ERR;
4454 }
4455
4456 /* Arg 0: Name of the array variable */
4457 const char *varname = Jim_GetString(argv[0], NULL);
4458
4459 /* Arg 1: Bit width of one element */
4460 long l;
4461 e = Jim_GetLong(interp, argv[1], &l);
4462 if (e != JIM_OK)
4463 return e;
4464 const unsigned int width_bits = l;
4465
4466 if (width_bits != 8 &&
4467 width_bits != 16 &&
4468 width_bits != 32 &&
4469 width_bits != 64) {
4470 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4471 Jim_AppendStrings(interp, Jim_GetResult(interp),
4472 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4473 return JIM_ERR;
4474 }
4475 const unsigned int width = width_bits / 8;
4476
4477 /* Arg 2: Memory address */
4478 jim_wide wide_addr;
4479 e = Jim_GetWide(interp, argv[2], &wide_addr);
4480 if (e != JIM_OK)
4481 return e;
4482 target_addr_t addr = (target_addr_t)wide_addr;
4483
4484 /* Arg 3: Number of elements to read */
4485 e = Jim_GetLong(interp, argv[3], &l);
4486 if (e != JIM_OK)
4487 return e;
4488 size_t len = l;
4489
4490 /* Arg 4: phys */
4491 bool is_phys = false;
4492 if (argc > 4) {
4493 int str_len = 0;
4494 const char *phys = Jim_GetString(argv[4], &str_len);
4495 if (!strncmp(phys, "phys", str_len))
4496 is_phys = true;
4497 else
4498 return JIM_ERR;
4499 }
4500
4501 /* Argument checks */
4502 if (len == 0) {
4503 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4504 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4505 return JIM_ERR;
4506 }
4507 if ((addr + (len * width)) < addr) {
4508 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4509 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4510 return JIM_ERR;
4511 }
4512 if (len > 65536) {
4513 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4514 Jim_AppendStrings(interp, Jim_GetResult(interp),
4515 "mem2array: too large read request, exceeds 64K items", NULL);
4516 return JIM_ERR;
4517 }
4518
4519 if ((width == 1) ||
4520 ((width == 2) && ((addr & 1) == 0)) ||
4521 ((width == 4) && ((addr & 3) == 0)) ||
4522 ((width == 8) && ((addr & 7) == 0))) {
4523 /* alignment correct */
4524 } else {
4525 char buf[100];
4526 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4527 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4528 addr,
4529 width);
4530 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4531 return JIM_ERR;
4532 }
4533
4534 /* Transfer loop */
4535
4536 /* index counter */
4537 size_t idx = 0;
4538
4539 const size_t buffersize = 4096;
4540 uint8_t *buffer = malloc(buffersize);
4541 if (!buffer)
4542 return JIM_ERR;
4543
4544 /* assume ok */
4545 e = JIM_OK;
4546 while (len) {
4547 /* Slurp... in buffer size chunks */
4548 const unsigned int max_chunk_len = buffersize / width;
4549 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4550
4551 int retval;
4552 if (is_phys)
4553 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4554 else
4555 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4556 if (retval != ERROR_OK) {
4557 /* BOO !*/
4558 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4559 addr,
4560 width,
4561 chunk_len);
4562 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4563 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4564 e = JIM_ERR;
4565 break;
4566 } else {
4567 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4568 uint64_t v = 0;
4569 switch (width) {
4570 case 8:
4571 v = target_buffer_get_u64(target, &buffer[i*width]);
4572 break;
4573 case 4:
4574 v = target_buffer_get_u32(target, &buffer[i*width]);
4575 break;
4576 case 2:
4577 v = target_buffer_get_u16(target, &buffer[i*width]);
4578 break;
4579 case 1:
4580 v = buffer[i] & 0x0ff;
4581 break;
4582 }
4583 new_u64_array_element(interp, varname, idx, v);
4584 }
4585 len -= chunk_len;
4586 addr += chunk_len * width;
4587 }
4588 }
4589
4590 free(buffer);
4591
4592 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4593
4594 return e;
4595 }
4596
4597 static int target_jim_read_memory(Jim_Interp *interp, int argc,
4598 Jim_Obj * const *argv)
4599 {
4600 /*
4601 * argv[1] = memory address
4602 * argv[2] = desired element width in bits
4603 * argv[3] = number of elements to read
4604 * argv[4] = optional "phys"
4605 */
4606
4607 if (argc < 4 || argc > 5) {
4608 Jim_WrongNumArgs(interp, 1, argv, "address width count ['phys']");
4609 return JIM_ERR;
4610 }
4611
4612 /* Arg 1: Memory address. */
4613 jim_wide wide_addr;
4614 int e;
4615 e = Jim_GetWide(interp, argv[1], &wide_addr);
4616
4617 if (e != JIM_OK)
4618 return e;
4619
4620 target_addr_t addr = (target_addr_t)wide_addr;
4621
4622 /* Arg 2: Bit width of one element. */
4623 long l;
4624 e = Jim_GetLong(interp, argv[2], &l);
4625
4626 if (e != JIM_OK)
4627 return e;
4628
4629 const unsigned int width_bits = l;
4630
4631 /* Arg 3: Number of elements to read. */
4632 e = Jim_GetLong(interp, argv[3], &l);
4633
4634 if (e != JIM_OK)
4635 return e;
4636
4637 size_t count = l;
4638
4639 /* Arg 4: Optional 'phys'. */
4640 bool is_phys = false;
4641
4642 if (argc > 4) {
4643 const char *phys = Jim_GetString(argv[4], NULL);
4644
4645 if (strcmp(phys, "phys")) {
4646 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4647 return JIM_ERR;
4648 }
4649
4650 is_phys = true;
4651 }
4652
4653 switch (width_bits) {
4654 case 8:
4655 case 16:
4656 case 32:
4657 case 64:
4658 break;
4659 default:
4660 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4661 return JIM_ERR;
4662 }
4663
4664 const unsigned int width = width_bits / 8;
4665
4666 if ((addr + (count * width)) < addr) {
4667 Jim_SetResultString(interp, "read_memory: addr + count wraps to zero", -1);
4668 return JIM_ERR;
4669 }
4670
4671 if (count > 65536) {
4672 Jim_SetResultString(interp, "read_memory: too large read request, exeeds 64K elements", -1);
4673 return JIM_ERR;
4674 }
4675
4676 struct command_context *cmd_ctx = current_command_context(interp);
4677 assert(cmd_ctx != NULL);
4678 struct target *target = get_current_target(cmd_ctx);
4679
4680 const size_t buffersize = 4096;
4681 uint8_t *buffer = malloc(buffersize);
4682
4683 if (!buffer) {
4684 LOG_ERROR("Failed to allocate memory");
4685 return JIM_ERR;
4686 }
4687
4688 Jim_Obj *result_list = Jim_NewListObj(interp, NULL, 0);
4689 Jim_IncrRefCount(result_list);
4690
4691 while (count > 0) {
4692 const unsigned int max_chunk_len = buffersize / width;
4693 const size_t chunk_len = MIN(count, max_chunk_len);
4694
4695 int retval;
4696
4697 if (is_phys)
4698 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4699 else
4700 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4701
4702 if (retval != ERROR_OK) {
4703 LOG_ERROR("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4704 addr, width_bits, chunk_len);
4705 Jim_SetResultString(interp, "read_memory: failed to read memory", -1);
4706 e = JIM_ERR;
4707 break;
4708 }
4709
4710 for (size_t i = 0; i < chunk_len ; i++) {
4711 uint64_t v = 0;
4712
4713 switch (width) {
4714 case 8:
4715 v = target_buffer_get_u64(target, &buffer[i * width]);
4716 break;
4717 case 4:
4718 v = target_buffer_get_u32(target, &buffer[i * width]);
4719 break;
4720 case 2:
4721 v = target_buffer_get_u16(target, &buffer[i * width]);
4722 break;
4723 case 1:
4724 v = buffer[i];
4725 break;
4726 }
4727
4728 char value_buf[11];
4729 snprintf(value_buf, sizeof(value_buf), "0x%" PRIx64, v);
4730
4731 Jim_ListAppendElement(interp, result_list,
4732 Jim_NewStringObj(interp, value_buf, -1));
4733 }
4734
4735 count -= chunk_len;
4736 addr += chunk_len * width;
4737 }
4738
4739 free(buffer);
4740
4741 if (e != JIM_OK) {
4742 Jim_DecrRefCount(interp, result_list);
4743 return e;
4744 }
4745
4746 Jim_SetResult(interp, result_list);
4747 Jim_DecrRefCount(interp, result_list);
4748
4749 return JIM_OK;
4750 }
4751
4752 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4753 {
4754 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4755 if (!namebuf)
4756 return JIM_ERR;
4757
4758 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4759 if (!obj_name) {
4760 free(namebuf);
4761 return JIM_ERR;
4762 }
4763
4764 Jim_IncrRefCount(obj_name);
4765 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4766 Jim_DecrRefCount(interp, obj_name);
4767 free(namebuf);
4768 if (!obj_val)
4769 return JIM_ERR;
4770
4771 jim_wide wide_val;
4772 int result = Jim_GetWide(interp, obj_val, &wide_val);
4773 *val = wide_val;
4774 return result;
4775 }
4776
4777 static int target_array2mem(Jim_Interp *interp, struct target *target,
4778 int argc, Jim_Obj *const *argv)
4779 {
4780 int e;
4781
4782 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4783
4784 /* argv[0] = name of array from which to read the data
4785 * argv[1] = desired element width in bits
4786 * argv[2] = memory address
4787 * argv[3] = number of elements to write
4788 * argv[4] = optional "phys"
4789 */
4790 if (argc < 4 || argc > 5) {
4791 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4792 return JIM_ERR;
4793 }
4794
4795 /* Arg 0: Name of the array variable */
4796 const char *varname = Jim_GetString(argv[0], NULL);
4797
4798 /* Arg 1: Bit width of one element */
4799 long l;
4800 e = Jim_GetLong(interp, argv[1], &l);
4801 if (e != JIM_OK)
4802 return e;
4803 const unsigned int width_bits = l;
4804
4805 if (width_bits != 8 &&
4806 width_bits != 16 &&
4807 width_bits != 32 &&
4808 width_bits != 64) {
4809 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4810 Jim_AppendStrings(interp, Jim_GetResult(interp),
4811 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4812 return JIM_ERR;
4813 }
4814 const unsigned int width = width_bits / 8;
4815
4816 /* Arg 2: Memory address */
4817 jim_wide wide_addr;
4818 e = Jim_GetWide(interp, argv[2], &wide_addr);
4819 if (e != JIM_OK)
4820 return e;
4821 target_addr_t addr = (target_addr_t)wide_addr;
4822
4823 /* Arg 3: Number of elements to write */
4824 e = Jim_GetLong(interp, argv[3], &l);
4825 if (e != JIM_OK)
4826 return e;
4827 size_t len = l;
4828
4829 /* Arg 4: Phys */
4830 bool is_phys = false;
4831 if (argc > 4) {
4832 int str_len = 0;
4833 const char *phys = Jim_GetString(argv[4], &str_len);
4834 if (!strncmp(phys, "phys", str_len))
4835 is_phys = true;
4836 else
4837 return JIM_ERR;
4838 }
4839
4840 /* Argument checks */
4841 if (len == 0) {
4842 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4843 Jim_AppendStrings(interp, Jim_GetResult(interp),
4844 "array2mem: zero width read?", NULL);
4845 return JIM_ERR;
4846 }
4847
4848 if ((addr + (len * width)) < addr) {
4849 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4850 Jim_AppendStrings(interp, Jim_GetResult(interp),
4851 "array2mem: addr + len - wraps to zero?", NULL);
4852 return JIM_ERR;
4853 }
4854
4855 if (len > 65536) {
4856 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4857 Jim_AppendStrings(interp, Jim_GetResult(interp),
4858 "array2mem: too large memory write request, exceeds 64K items", NULL);
4859 return JIM_ERR;
4860 }
4861
4862 if ((width == 1) ||
4863 ((width == 2) && ((addr & 1) == 0)) ||
4864 ((width == 4) && ((addr & 3) == 0)) ||
4865 ((width == 8) && ((addr & 7) == 0))) {
4866 /* alignment correct */
4867 } else {
4868 char buf[100];
4869 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4870 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4871 addr,
4872 width);
4873 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4874 return JIM_ERR;
4875 }
4876
4877 /* Transfer loop */
4878
4879 /* assume ok */
4880 e = JIM_OK;
4881
4882 const size_t buffersize = 4096;
4883 uint8_t *buffer = malloc(buffersize);
4884 if (!buffer)
4885 return JIM_ERR;
4886
4887 /* index counter */
4888 size_t idx = 0;
4889
4890 while (len) {
4891 /* Slurp... in buffer size chunks */
4892 const unsigned int max_chunk_len = buffersize / width;
4893
4894 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4895
4896 /* Fill the buffer */
4897 for (size_t i = 0; i < chunk_len; i++, idx++) {
4898 uint64_t v = 0;
4899 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4900 free(buffer);
4901 return JIM_ERR;
4902 }
4903 switch (width) {
4904 case 8:
4905 target_buffer_set_u64(target, &buffer[i * width], v);
4906 break;
4907 case 4:
4908 target_buffer_set_u32(target, &buffer[i * width], v);
4909 break;
4910 case 2:
4911 target_buffer_set_u16(target, &buffer[i * width], v);
4912 break;
4913 case 1:
4914 buffer[i] = v & 0x0ff;
4915 break;
4916 }
4917 }
4918 len -= chunk_len;
4919
4920 /* Write the buffer to memory */
4921 int retval;
4922 if (is_phys)
4923 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4924 else
4925 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4926 if (retval != ERROR_OK) {
4927 /* BOO !*/
4928 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4929 addr,
4930 width,
4931 chunk_len);
4932 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4933 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4934 e = JIM_ERR;
4935 break;
4936 }
4937 addr += chunk_len * width;
4938 }
4939
4940 free(buffer);
4941
4942 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4943
4944 return e;
4945 }
4946
4947 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4948 Jim_Obj * const *argv)
4949 {
4950 /*
4951 * argv[1] = memory address
4952 * argv[2] = desired element width in bits
4953 * argv[3] = list of data to write
4954 * argv[4] = optional "phys"
4955 */
4956
4957 if (argc < 4 || argc > 5) {
4958 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4959 return JIM_ERR;
4960 }
4961
4962 /* Arg 1: Memory address. */
4963 int e;
4964 jim_wide wide_addr;
4965 e = Jim_GetWide(interp, argv[1], &wide_addr);
4966
4967 if (e != JIM_OK)
4968 return e;
4969
4970 target_addr_t addr = (target_addr_t)wide_addr;
4971
4972 /* Arg 2: Bit width of one element. */
4973 long l;
4974 e = Jim_GetLong(interp, argv[2], &l);
4975
4976 if (e != JIM_OK)
4977 return e;
4978
4979 const unsigned int width_bits = l;
4980 size_t count = Jim_ListLength(interp, argv[3]);
4981
4982 /* Arg 4: Optional 'phys'. */
4983 bool is_phys = false;
4984
4985 if (argc > 4) {
4986 const char *phys = Jim_GetString(argv[4], NULL);
4987
4988 if (strcmp(phys, "phys")) {
4989 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4990 return JIM_ERR;
4991 }
4992
4993 is_phys = true;
4994 }
4995
4996 switch (width_bits) {
4997 case 8:
4998 case 16:
4999 case 32:
5000 case 64:
5001 break;
5002 default:
5003 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
5004 return JIM_ERR;
5005 }
5006
5007 const unsigned int width = width_bits / 8;
5008
5009 if ((addr + (count * width)) < addr) {
5010 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
5011 return JIM_ERR;
5012 }
5013
5014 if (count > 65536) {
5015 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
5016 return JIM_ERR;
5017 }
5018
5019 struct command_context *cmd_ctx = current_command_context(interp);
5020 assert(cmd_ctx != NULL);
5021 struct target *target = get_current_target(cmd_ctx);
5022
5023 const size_t buffersize = 4096;
5024 uint8_t *buffer = malloc(buffersize);
5025
5026 if (!buffer) {
5027 LOG_ERROR("Failed to allocate memory");
5028 return JIM_ERR;
5029 }
5030
5031 size_t j = 0;
5032
5033 while (count > 0) {
5034 const unsigned int max_chunk_len = buffersize / width;
5035 const size_t chunk_len = MIN(count, max_chunk_len);
5036
5037 for (size_t i = 0; i < chunk_len; i++, j++) {
5038 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5039 jim_wide element_wide;
5040 Jim_GetWide(interp, tmp, &element_wide);
5041
5042 const uint64_t v = element_wide;
5043
5044 switch (width) {
5045 case 8:
5046 target_buffer_set_u64(target, &buffer[i * width], v);
5047 break;
5048 case 4:
5049 target_buffer_set_u32(target, &buffer[i * width], v);
5050 break;
5051 case 2:
5052 target_buffer_set_u16(target, &buffer[i * width], v);
5053 break;
5054 case 1:
5055 buffer[i] = v & 0x0ff;
5056 break;
5057 }
5058 }
5059
5060 count -= chunk_len;
5061
5062 int retval;
5063
5064 if (is_phys)
5065 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5066 else
5067 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5068
5069 if (retval != ERROR_OK) {
5070 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5071 addr, width_bits, chunk_len);
5072 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5073 e = JIM_ERR;
5074 break;
5075 }
5076
5077 addr += chunk_len * width;
5078 }
5079
5080 free(buffer);
5081
5082 return e;
5083 }
5084
5085 /* FIX? should we propagate errors here rather than printing them
5086 * and continuing?
5087 */
5088 void target_handle_event(struct target *target, enum target_event e)
5089 {
5090 struct target_event_action *teap;
5091 int retval;
5092
5093 for (teap = target->event_action; teap; teap = teap->next) {
5094 if (teap->event == e) {
5095 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5096 target->target_number,
5097 target_name(target),
5098 target_type_name(target),
5099 e,
5100 target_event_name(e),
5101 Jim_GetString(teap->body, NULL));
5102
5103 /* Override current target by the target an event
5104 * is issued from (lot of scripts need it).
5105 * Return back to previous override as soon
5106 * as the handler processing is done */
5107 struct command_context *cmd_ctx = current_command_context(teap->interp);
5108 struct target *saved_target_override = cmd_ctx->current_target_override;
5109 cmd_ctx->current_target_override = target;
5110
5111 retval = Jim_EvalObj(teap->interp, teap->body);
5112
5113 cmd_ctx->current_target_override = saved_target_override;
5114
5115 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5116 return;
5117
5118 if (retval == JIM_RETURN)
5119 retval = teap->interp->returnCode;
5120
5121 if (retval != JIM_OK) {
5122 Jim_MakeErrorMessage(teap->interp);
5123 LOG_USER("Error executing event %s on target %s:\n%s",
5124 target_event_name(e),
5125 target_name(target),
5126 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5127 /* clean both error code and stacktrace before return */
5128 Jim_Eval(teap->interp, "error \"\" \"\"");
5129 }
5130 }
5131 }
5132 }
5133
5134 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5135 Jim_Obj * const *argv)
5136 {
5137 bool force = false;
5138
5139 if (argc == 3) {
5140 const char *option = Jim_GetString(argv[1], NULL);
5141
5142 if (!strcmp(option, "-force")) {
5143 argc--;
5144 argv++;
5145 force = true;
5146 } else {
5147 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5148 return JIM_ERR;
5149 }
5150 }
5151
5152 if (argc != 2) {
5153 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5154 return JIM_ERR;
5155 }
5156
5157 const int length = Jim_ListLength(interp, argv[1]);
5158
5159 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5160
5161 if (!result_dict)
5162 return JIM_ERR;
5163
5164 struct command_context *cmd_ctx = current_command_context(interp);
5165 assert(cmd_ctx != NULL);
5166 const struct target *target = get_current_target(cmd_ctx);
5167
5168 for (int i = 0; i < length; i++) {
5169 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5170
5171 if (!elem)
5172 return JIM_ERR;
5173
5174 const char *reg_name = Jim_String(elem);
5175
5176 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5177 false);
5178
5179 if (!reg || !reg->exist) {
5180 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5181 return JIM_ERR;
5182 }
5183
5184 if (force) {
5185 int retval = reg->type->get(reg);
5186
5187 if (retval != ERROR_OK) {
5188 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5189 reg_name);
5190 return JIM_ERR;
5191 }
5192 }
5193
5194 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5195
5196 if (!reg_value) {
5197 LOG_ERROR("Failed to allocate memory");
5198 return JIM_ERR;
5199 }
5200
5201 char *tmp = alloc_printf("0x%s", reg_value);
5202
5203 free(reg_value);
5204
5205 if (!tmp) {
5206 LOG_ERROR("Failed to allocate memory");
5207 return JIM_ERR;
5208 }
5209
5210 Jim_DictAddElement(interp, result_dict, elem,
5211 Jim_NewStringObj(interp, tmp, -1));
5212
5213 free(tmp);
5214 }
5215
5216 Jim_SetResult(interp, result_dict);
5217
5218 return JIM_OK;
5219 }
5220
5221 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5222 Jim_Obj * const *argv)
5223 {
5224 if (argc != 2) {
5225 Jim_WrongNumArgs(interp, 1, argv, "dict");
5226 return JIM_ERR;
5227 }
5228
5229 int tmp;
5230 #if JIM_VERSION >= 80
5231 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5232
5233 if (!dict)
5234 return JIM_ERR;
5235 #else
5236 Jim_Obj **dict;
5237 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5238
5239 if (ret != JIM_OK)
5240 return ret;
5241 #endif
5242
5243 const unsigned int length = tmp;
5244 struct command_context *cmd_ctx = current_command_context(interp);
5245 assert(cmd_ctx);
5246 const struct target *target = get_current_target(cmd_ctx);
5247
5248 for (unsigned int i = 0; i < length; i += 2) {
5249 const char *reg_name = Jim_String(dict[i]);
5250 const char *reg_value = Jim_String(dict[i + 1]);
5251 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5252 false);
5253
5254 if (!reg || !reg->exist) {
5255 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5256 return JIM_ERR;
5257 }
5258
5259 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5260
5261 if (!buf) {
5262 LOG_ERROR("Failed to allocate memory");
5263 return JIM_ERR;
5264 }
5265
5266 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5267 int retval = reg->type->set(reg, buf);
5268 free(buf);
5269
5270 if (retval != ERROR_OK) {
5271 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5272 reg_value, reg_name);
5273 return JIM_ERR;
5274 }
5275 }
5276
5277 return JIM_OK;
5278 }
5279
5280 /**
5281 * Returns true only if the target has a handler for the specified event.
5282 */
5283 bool target_has_event_action(struct target *target, enum target_event event)
5284 {
5285 struct target_event_action *teap;
5286
5287 for (teap = target->event_action; teap; teap = teap->next) {
5288 if (teap->event == event)
5289 return true;
5290 }
5291 return false;
5292 }
5293
5294 enum target_cfg_param {
5295 TCFG_TYPE,
5296 TCFG_EVENT,
5297 TCFG_WORK_AREA_VIRT,
5298 TCFG_WORK_AREA_PHYS,
5299 TCFG_WORK_AREA_SIZE,
5300 TCFG_WORK_AREA_BACKUP,
5301 TCFG_ENDIAN,
5302 TCFG_COREID,
5303 TCFG_CHAIN_POSITION,
5304 TCFG_DBGBASE,
5305 TCFG_RTOS,
5306 TCFG_DEFER_EXAMINE,
5307 TCFG_GDB_PORT,
5308 TCFG_GDB_MAX_CONNECTIONS,
5309 };
5310
5311 static struct jim_nvp nvp_config_opts[] = {
5312 { .name = "-type", .value = TCFG_TYPE },
5313 { .name = "-event", .value = TCFG_EVENT },
5314 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5315 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5316 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5317 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5318 { .name = "-endian", .value = TCFG_ENDIAN },
5319 { .name = "-coreid", .value = TCFG_COREID },
5320 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5321 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5322 { .name = "-rtos", .value = TCFG_RTOS },
5323 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5324 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5325 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5326 { .name = NULL, .value = -1 }
5327 };
5328
5329 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5330 {
5331 struct jim_nvp *n;
5332 Jim_Obj *o;
5333 jim_wide w;
5334 int e;
5335
5336 /* parse config or cget options ... */
5337 while (goi->argc > 0) {
5338 Jim_SetEmptyResult(goi->interp);
5339 /* jim_getopt_debug(goi); */
5340
5341 if (target->type->target_jim_configure) {
5342 /* target defines a configure function */
5343 /* target gets first dibs on parameters */
5344 e = (*(target->type->target_jim_configure))(target, goi);
5345 if (e == JIM_OK) {
5346 /* more? */
5347 continue;
5348 }
5349 if (e == JIM_ERR) {
5350 /* An error */
5351 return e;
5352 }
5353 /* otherwise we 'continue' below */
5354 }
5355 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5356 if (e != JIM_OK) {
5357 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5358 return e;
5359 }
5360 switch (n->value) {
5361 case TCFG_TYPE:
5362 /* not settable */
5363 if (goi->isconfigure) {
5364 Jim_SetResultFormatted(goi->interp,
5365 "not settable: %s", n->name);
5366 return JIM_ERR;
5367 } else {
5368 no_params:
5369 if (goi->argc != 0) {
5370 Jim_WrongNumArgs(goi->interp,
5371 goi->argc, goi->argv,
5372 "NO PARAMS");
5373 return JIM_ERR;
5374 }
5375 }
5376 Jim_SetResultString(goi->interp,
5377 target_type_name(target), -1);
5378 /* loop for more */
5379 break;
5380 case TCFG_EVENT:
5381 if (goi->argc == 0) {
5382 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5383 return JIM_ERR;
5384 }
5385
5386 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5387 if (e != JIM_OK) {
5388 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5389 return e;
5390 }
5391
5392 if (goi->isconfigure) {
5393 if (goi->argc != 1) {
5394 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5395 return JIM_ERR;
5396 }
5397 } else {
5398 if (goi->argc != 0) {
5399 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5400 return JIM_ERR;
5401 }
5402 }
5403
5404 {
5405 struct target_event_action *teap;
5406
5407 teap = target->event_action;
5408 /* replace existing? */
5409 while (teap) {
5410 if (teap->event == (enum target_event)n->value)
5411 break;
5412 teap = teap->next;
5413 }
5414
5415 if (goi->isconfigure) {
5416 /* START_DEPRECATED_TPIU */
5417 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5418 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5419 /* END_DEPRECATED_TPIU */
5420
5421 bool replace = true;
5422 if (!teap) {
5423 /* create new */
5424 teap = calloc(1, sizeof(*teap));
5425 replace = false;
5426 }
5427 teap->event = n->value;
5428 teap->interp = goi->interp;
5429 jim_getopt_obj(goi, &o);
5430 if (teap->body)
5431 Jim_DecrRefCount(teap->interp, teap->body);
5432 teap->body = Jim_DuplicateObj(goi->interp, o);
5433 /*
5434 * FIXME:
5435 * Tcl/TK - "tk events" have a nice feature.
5436 * See the "BIND" command.
5437 * We should support that here.
5438 * You can specify %X and %Y in the event code.
5439 * The idea is: %T - target name.
5440 * The idea is: %N - target number
5441 * The idea is: %E - event name.
5442 */
5443 Jim_IncrRefCount(teap->body);
5444
5445 if (!replace) {
5446 /* add to head of event list */
5447 teap->next = target->event_action;
5448 target->event_action = teap;
5449 }
5450 Jim_SetEmptyResult(goi->interp);
5451 } else {
5452 /* get */
5453 if (!teap)
5454 Jim_SetEmptyResult(goi->interp);
5455 else
5456 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5457 }
5458 }
5459 /* loop for more */
5460 break;
5461
5462 case TCFG_WORK_AREA_VIRT:
5463 if (goi->isconfigure) {
5464 target_free_all_working_areas(target);
5465 e = jim_getopt_wide(goi, &w);
5466 if (e != JIM_OK)
5467 return e;
5468 target->working_area_virt = w;
5469 target->working_area_virt_spec = true;
5470 } else {
5471 if (goi->argc != 0)
5472 goto no_params;
5473 }
5474 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5475 /* loop for more */
5476 break;
5477
5478 case TCFG_WORK_AREA_PHYS:
5479 if (goi->isconfigure) {
5480 target_free_all_working_areas(target);
5481 e = jim_getopt_wide(goi, &w);
5482 if (e != JIM_OK)
5483 return e;
5484 target->working_area_phys = w;
5485 target->working_area_phys_spec = true;
5486 } else {
5487 if (goi->argc != 0)
5488 goto no_params;
5489 }
5490 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5491 /* loop for more */
5492 break;
5493
5494 case TCFG_WORK_AREA_SIZE:
5495 if (goi->isconfigure) {
5496 target_free_all_working_areas(target);
5497 e = jim_getopt_wide(goi, &w);
5498 if (e != JIM_OK)
5499 return e;
5500 target->working_area_size = w;
5501 } else {
5502 if (goi->argc != 0)
5503 goto no_params;
5504 }
5505 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5506 /* loop for more */
5507 break;
5508
5509 case TCFG_WORK_AREA_BACKUP:
5510 if (goi->isconfigure) {
5511 target_free_all_working_areas(target);
5512 e = jim_getopt_wide(goi, &w);
5513 if (e != JIM_OK)
5514 return e;
5515 /* make this exactly 1 or 0 */
5516 target->backup_working_area = (!!w);
5517 } else {
5518 if (goi->argc != 0)
5519 goto no_params;
5520 }
5521 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5522 /* loop for more e*/
5523 break;
5524
5525
5526 case TCFG_ENDIAN:
5527 if (goi->isconfigure) {
5528 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5529 if (e != JIM_OK) {
5530 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5531 return e;
5532 }
5533 target->endianness = n->value;
5534 } else {
5535 if (goi->argc != 0)
5536 goto no_params;
5537 }
5538 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5539 if (!n->name) {
5540 target->endianness = TARGET_LITTLE_ENDIAN;
5541 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5542 }
5543 Jim_SetResultString(goi->interp, n->name, -1);
5544 /* loop for more */
5545 break;
5546
5547 case TCFG_COREID:
5548 if (goi->isconfigure) {
5549 e = jim_getopt_wide(goi, &w);
5550 if (e != JIM_OK)
5551 return e;
5552 target->coreid = (int32_t)w;
5553 } else {
5554 if (goi->argc != 0)
5555 goto no_params;
5556 }
5557 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5558 /* loop for more */
5559 break;
5560
5561 case TCFG_CHAIN_POSITION:
5562 if (goi->isconfigure) {
5563 Jim_Obj *o_t;
5564 struct jtag_tap *tap;
5565
5566 if (target->has_dap) {
5567 Jim_SetResultString(goi->interp,
5568 "target requires -dap parameter instead of -chain-position!", -1);
5569 return JIM_ERR;
5570 }
5571
5572 target_free_all_working_areas(target);
5573 e = jim_getopt_obj(goi, &o_t);
5574 if (e != JIM_OK)
5575 return e;
5576 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5577 if (!tap)
5578 return JIM_ERR;
5579 target->tap = tap;
5580 target->tap_configured = true;
5581 } else {
5582 if (goi->argc != 0)
5583 goto no_params;
5584 }
5585 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5586 /* loop for more e*/
5587 break;
5588 case TCFG_DBGBASE:
5589 if (goi->isconfigure) {
5590 e = jim_getopt_wide(goi, &w);
5591 if (e != JIM_OK)
5592 return e;
5593 target->dbgbase = (uint32_t)w;
5594 target->dbgbase_set = true;
5595 } else {
5596 if (goi->argc != 0)
5597 goto no_params;
5598 }
5599 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5600 /* loop for more */
5601 break;
5602 case TCFG_RTOS:
5603 /* RTOS */
5604 {
5605 int result = rtos_create(goi, target);
5606 if (result != JIM_OK)
5607 return result;
5608 }
5609 /* loop for more */
5610 break;
5611
5612 case TCFG_DEFER_EXAMINE:
5613 /* DEFER_EXAMINE */
5614 target->defer_examine = true;
5615 /* loop for more */
5616 break;
5617
5618 case TCFG_GDB_PORT:
5619 if (goi->isconfigure) {
5620 struct command_context *cmd_ctx = current_command_context(goi->interp);
5621 if (cmd_ctx->mode != COMMAND_CONFIG) {
5622 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5623 return JIM_ERR;
5624 }
5625
5626 const char *s;
5627 e = jim_getopt_string(goi, &s, NULL);
5628 if (e != JIM_OK)
5629 return e;
5630 free(target->gdb_port_override);
5631 target->gdb_port_override = strdup(s);
5632 } else {
5633 if (goi->argc != 0)
5634 goto no_params;
5635 }
5636 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5637 /* loop for more */
5638 break;
5639
5640 case TCFG_GDB_MAX_CONNECTIONS:
5641 if (goi->isconfigure) {
5642 struct command_context *cmd_ctx = current_command_context(goi->interp);
5643 if (cmd_ctx->mode != COMMAND_CONFIG) {
5644 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5645 return JIM_ERR;
5646 }
5647
5648 e = jim_getopt_wide(goi, &w);
5649 if (e != JIM_OK)
5650 return e;
5651 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5652 } else {
5653 if (goi->argc != 0)
5654 goto no_params;
5655 }
5656 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5657 break;
5658 }
5659 } /* while (goi->argc) */
5660
5661
5662 /* done - we return */
5663 return JIM_OK;
5664 }
5665
5666 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5667 {
5668 struct command *c = jim_to_command(interp);
5669 struct jim_getopt_info goi;
5670
5671 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5672 goi.isconfigure = !strcmp(c->name, "configure");
5673 if (goi.argc < 1) {
5674 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5675 "missing: -option ...");
5676 return JIM_ERR;
5677 }
5678 struct command_context *cmd_ctx = current_command_context(interp);
5679 assert(cmd_ctx);
5680 struct target *target = get_current_target(cmd_ctx);
5681 return target_configure(&goi, target);
5682 }
5683
5684 static int jim_target_mem2array(Jim_Interp *interp,
5685 int argc, Jim_Obj *const *argv)
5686 {
5687 struct command_context *cmd_ctx = current_command_context(interp);
5688 assert(cmd_ctx);
5689 struct target *target = get_current_target(cmd_ctx);
5690 return target_mem2array(interp, target, argc - 1, argv + 1);
5691 }
5692
5693 static int jim_target_array2mem(Jim_Interp *interp,
5694 int argc, Jim_Obj *const *argv)
5695 {
5696 struct command_context *cmd_ctx = current_command_context(interp);
5697 assert(cmd_ctx);
5698 struct target *target = get_current_target(cmd_ctx);
5699 return target_array2mem(interp, target, argc - 1, argv + 1);
5700 }
5701
5702 static int jim_target_tap_disabled(Jim_Interp *interp)
5703 {
5704 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5705 return JIM_ERR;
5706 }
5707
5708 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5709 {
5710 bool allow_defer = false;
5711
5712 struct jim_getopt_info goi;
5713 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5714 if (goi.argc > 1) {
5715 const char *cmd_name = Jim_GetString(argv[0], NULL);
5716 Jim_SetResultFormatted(goi.interp,
5717 "usage: %s ['allow-defer']", cmd_name);
5718 return JIM_ERR;
5719 }
5720 if (goi.argc > 0 &&
5721 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5722 /* consume it */
5723 Jim_Obj *obj;
5724 int e = jim_getopt_obj(&goi, &obj);
5725 if (e != JIM_OK)
5726 return e;
5727 allow_defer = true;
5728 }
5729
5730 struct command_context *cmd_ctx = current_command_context(interp);
5731 assert(cmd_ctx);
5732 struct target *target = get_current_target(cmd_ctx);
5733 if (!target->tap->enabled)
5734 return jim_target_tap_disabled(interp);
5735
5736 if (allow_defer && target->defer_examine) {
5737 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5738 LOG_INFO("Use arp_examine command to examine it manually!");
5739 return JIM_OK;
5740 }
5741
5742 int e = target->type->examine(target);
5743 if (e != ERROR_OK) {
5744 target_reset_examined(target);
5745 return JIM_ERR;
5746 }
5747
5748 target_set_examined(target);
5749
5750 return JIM_OK;
5751 }
5752
5753 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5754 {
5755 struct command_context *cmd_ctx = current_command_context(interp);
5756 assert(cmd_ctx);
5757 struct target *target = get_current_target(cmd_ctx);
5758
5759 Jim_SetResultBool(interp, target_was_examined(target));
5760 return JIM_OK;
5761 }
5762
5763 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5764 {
5765 struct command_context *cmd_ctx = current_command_context(interp);
5766 assert(cmd_ctx);
5767 struct target *target = get_current_target(cmd_ctx);
5768
5769 Jim_SetResultBool(interp, target->defer_examine);
5770 return JIM_OK;
5771 }
5772
5773 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5774 {
5775 if (argc != 1) {
5776 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5777 return JIM_ERR;
5778 }
5779 struct command_context *cmd_ctx = current_command_context(interp);
5780 assert(cmd_ctx);
5781 struct target *target = get_current_target(cmd_ctx);
5782
5783 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5784 return JIM_ERR;
5785
5786 return JIM_OK;
5787 }
5788
5789 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5790 {
5791 if (argc != 1) {
5792 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5793 return JIM_ERR;
5794 }
5795 struct command_context *cmd_ctx = current_command_context(interp);
5796 assert(cmd_ctx);
5797 struct target *target = get_current_target(cmd_ctx);
5798 if (!target->tap->enabled)
5799 return jim_target_tap_disabled(interp);
5800
5801 int e;
5802 if (!(target_was_examined(target)))
5803 e = ERROR_TARGET_NOT_EXAMINED;
5804 else
5805 e = target->type->poll(target);
5806 if (e != ERROR_OK)
5807 return JIM_ERR;
5808 return JIM_OK;
5809 }
5810
5811 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5812 {
5813 struct jim_getopt_info goi;
5814 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5815
5816 if (goi.argc != 2) {
5817 Jim_WrongNumArgs(interp, 0, argv,
5818 "([tT]|[fF]|assert|deassert) BOOL");
5819 return JIM_ERR;
5820 }
5821
5822 struct jim_nvp *n;
5823 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5824 if (e != JIM_OK) {
5825 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5826 return e;
5827 }
5828 /* the halt or not param */
5829 jim_wide a;
5830 e = jim_getopt_wide(&goi, &a);
5831 if (e != JIM_OK)
5832 return e;
5833
5834 struct command_context *cmd_ctx = current_command_context(interp);
5835 assert(cmd_ctx);
5836 struct target *target = get_current_target(cmd_ctx);
5837 if (!target->tap->enabled)
5838 return jim_target_tap_disabled(interp);
5839
5840 if (!target->type->assert_reset || !target->type->deassert_reset) {
5841 Jim_SetResultFormatted(interp,
5842 "No target-specific reset for %s",
5843 target_name(target));
5844 return JIM_ERR;
5845 }
5846
5847 if (target->defer_examine)
5848 target_reset_examined(target);
5849
5850 /* determine if we should halt or not. */
5851 target->reset_halt = (a != 0);
5852 /* When this happens - all workareas are invalid. */
5853 target_free_all_working_areas_restore(target, 0);
5854
5855 /* do the assert */
5856 if (n->value == NVP_ASSERT)
5857 e = target->type->assert_reset(target);
5858 else
5859 e = target->type->deassert_reset(target);
5860 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5861 }
5862
5863 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5864 {
5865 if (argc != 1) {
5866 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5867 return JIM_ERR;
5868 }
5869 struct command_context *cmd_ctx = current_command_context(interp);
5870 assert(cmd_ctx);
5871 struct target *target = get_current_target(cmd_ctx);
5872 if (!target->tap->enabled)
5873 return jim_target_tap_disabled(interp);
5874 int e = target->type->halt(target);
5875 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5876 }
5877
5878 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5879 {
5880 struct jim_getopt_info goi;
5881 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5882
5883 /* params: <name> statename timeoutmsecs */
5884 if (goi.argc != 2) {
5885 const char *cmd_name = Jim_GetString(argv[0], NULL);
5886 Jim_SetResultFormatted(goi.interp,
5887 "%s <state_name> <timeout_in_msec>", cmd_name);
5888 return JIM_ERR;
5889 }
5890
5891 struct jim_nvp *n;
5892 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5893 if (e != JIM_OK) {
5894 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5895 return e;
5896 }
5897 jim_wide a;
5898 e = jim_getopt_wide(&goi, &a);
5899 if (e != JIM_OK)
5900 return e;
5901 struct command_context *cmd_ctx = current_command_context(interp);
5902 assert(cmd_ctx);
5903 struct target *target = get_current_target(cmd_ctx);
5904 if (!target->tap->enabled)
5905 return jim_target_tap_disabled(interp);
5906
5907 e = target_wait_state(target, n->value, a);
5908 if (e != ERROR_OK) {
5909 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5910 Jim_SetResultFormatted(goi.interp,
5911 "target: %s wait %s fails (%#s) %s",
5912 target_name(target), n->name,
5913 obj, target_strerror_safe(e));
5914 return JIM_ERR;
5915 }
5916 return JIM_OK;
5917 }
5918 /* List for human, Events defined for this target.
5919 * scripts/programs should use 'name cget -event NAME'
5920 */
5921 COMMAND_HANDLER(handle_target_event_list)
5922 {
5923 struct target *target = get_current_target(CMD_CTX);
5924 struct target_event_action *teap = target->event_action;
5925
5926 command_print(CMD, "Event actions for target (%d) %s\n",
5927 target->target_number,
5928 target_name(target));
5929 command_print(CMD, "%-25s | Body", "Event");
5930 command_print(CMD, "------------------------- | "
5931 "----------------------------------------");
5932 while (teap) {
5933 command_print(CMD, "%-25s | %s",
5934 target_event_name(teap->event),
5935 Jim_GetString(teap->body, NULL));
5936 teap = teap->next;
5937 }
5938 command_print(CMD, "***END***");
5939 return ERROR_OK;
5940 }
5941 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5942 {
5943 if (argc != 1) {
5944 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5945 return JIM_ERR;
5946 }
5947 struct command_context *cmd_ctx = current_command_context(interp);
5948 assert(cmd_ctx);
5949 struct target *target = get_current_target(cmd_ctx);
5950 Jim_SetResultString(interp, target_state_name(target), -1);
5951 return JIM_OK;
5952 }
5953 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5954 {
5955 struct jim_getopt_info goi;
5956 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5957 if (goi.argc != 1) {
5958 const char *cmd_name = Jim_GetString(argv[0], NULL);
5959 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5960 return JIM_ERR;
5961 }
5962 struct jim_nvp *n;
5963 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5964 if (e != JIM_OK) {
5965 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5966 return e;
5967 }
5968 struct command_context *cmd_ctx = current_command_context(interp);
5969 assert(cmd_ctx);
5970 struct target *target = get_current_target(cmd_ctx);
5971 target_handle_event(target, n->value);
5972 return JIM_OK;
5973 }
5974
5975 static const struct command_registration target_instance_command_handlers[] = {
5976 {
5977 .name = "configure",
5978 .mode = COMMAND_ANY,
5979 .jim_handler = jim_target_configure,
5980 .help = "configure a new target for use",
5981 .usage = "[target_attribute ...]",
5982 },
5983 {
5984 .name = "cget",
5985 .mode = COMMAND_ANY,
5986 .jim_handler = jim_target_configure,
5987 .help = "returns the specified target attribute",
5988 .usage = "target_attribute",
5989 },
5990 {
5991 .name = "mwd",
5992 .handler = handle_mw_command,
5993 .mode = COMMAND_EXEC,
5994 .help = "Write 64-bit word(s) to target memory",
5995 .usage = "address data [count]",
5996 },
5997 {
5998 .name = "mww",
5999 .handler = handle_mw_command,
6000 .mode = COMMAND_EXEC,
6001 .help = "Write 32-bit word(s) to target memory",
6002 .usage = "address data [count]",
6003 },
6004 {
6005 .name = "mwh",
6006 .handler = handle_mw_command,
6007 .mode = COMMAND_EXEC,
6008 .help = "Write 16-bit half-word(s) to target memory",
6009 .usage = "address data [count]",
6010 },
6011 {
6012 .name = "mwb",
6013 .handler = handle_mw_command,
6014 .mode = COMMAND_EXEC,
6015 .help = "Write byte(s) to target memory",
6016 .usage = "address data [count]",
6017 },
6018 {
6019 .name = "mdd",
6020 .handler = handle_md_command,
6021 .mode = COMMAND_EXEC,
6022 .help = "Display target memory as 64-bit words",
6023 .usage = "address [count]",
6024 },
6025 {
6026 .name = "mdw",
6027 .handler = handle_md_command,
6028 .mode = COMMAND_EXEC,
6029 .help = "Display target memory as 32-bit words",
6030 .usage = "address [count]",
6031 },
6032 {
6033 .name = "mdh",
6034 .handler = handle_md_command,
6035 .mode = COMMAND_EXEC,
6036 .help = "Display target memory as 16-bit half-words",
6037 .usage = "address [count]",
6038 },
6039 {
6040 .name = "mdb",
6041 .handler = handle_md_command,
6042 .mode = COMMAND_EXEC,
6043 .help = "Display target memory as 8-bit bytes",
6044 .usage = "address [count]",
6045 },
6046 {
6047 .name = "array2mem",
6048 .mode = COMMAND_EXEC,
6049 .jim_handler = jim_target_array2mem,
6050 .help = "Writes Tcl array of 8/16/32 bit numbers "
6051 "to target memory",
6052 .usage = "arrayname bitwidth address count",
6053 },
6054 {
6055 .name = "mem2array",
6056 .mode = COMMAND_EXEC,
6057 .jim_handler = jim_target_mem2array,
6058 .help = "Loads Tcl array of 8/16/32 bit numbers "
6059 "from target memory",
6060 .usage = "arrayname bitwidth address count",
6061 },
6062 {
6063 .name = "get_reg",
6064 .mode = COMMAND_EXEC,
6065 .jim_handler = target_jim_get_reg,
6066 .help = "Get register values from the target",
6067 .usage = "list",
6068 },
6069 {
6070 .name = "set_reg",
6071 .mode = COMMAND_EXEC,
6072 .jim_handler = target_jim_set_reg,
6073 .help = "Set target register values",
6074 .usage = "dict",
6075 },
6076 {
6077 .name = "read_memory",
6078 .mode = COMMAND_EXEC,
6079 .jim_handler = target_jim_read_memory,
6080 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6081 .usage = "address width count ['phys']",
6082 },
6083 {
6084 .name = "write_memory",
6085 .mode = COMMAND_EXEC,
6086 .jim_handler = target_jim_write_memory,
6087 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6088 .usage = "address width data ['phys']",
6089 },
6090 {
6091 .name = "eventlist",
6092 .handler = handle_target_event_list,
6093 .mode = COMMAND_EXEC,
6094 .help = "displays a table of events defined for this target",
6095 .usage = "",
6096 },
6097 {
6098 .name = "curstate",
6099 .mode = COMMAND_EXEC,
6100 .jim_handler = jim_target_current_state,
6101 .help = "displays the current state of this target",
6102 },
6103 {
6104 .name = "arp_examine",
6105 .mode = COMMAND_EXEC,
6106 .jim_handler = jim_target_examine,
6107 .help = "used internally for reset processing",
6108 .usage = "['allow-defer']",
6109 },
6110 {
6111 .name = "was_examined",
6112 .mode = COMMAND_EXEC,
6113 .jim_handler = jim_target_was_examined,
6114 .help = "used internally for reset processing",
6115 },
6116 {
6117 .name = "examine_deferred",
6118 .mode = COMMAND_EXEC,
6119 .jim_handler = jim_target_examine_deferred,
6120 .help = "used internally for reset processing",
6121 },
6122 {
6123 .name = "arp_halt_gdb",
6124 .mode = COMMAND_EXEC,
6125 .jim_handler = jim_target_halt_gdb,
6126 .help = "used internally for reset processing to halt GDB",
6127 },
6128 {
6129 .name = "arp_poll",
6130 .mode = COMMAND_EXEC,
6131 .jim_handler = jim_target_poll,
6132 .help = "used internally for reset processing",
6133 },
6134 {
6135 .name = "arp_reset",
6136 .mode = COMMAND_EXEC,
6137 .jim_handler = jim_target_reset,
6138 .help = "used internally for reset processing",
6139 },
6140 {
6141 .name = "arp_halt",
6142 .mode = COMMAND_EXEC,
6143 .jim_handler = jim_target_halt,
6144 .help = "used internally for reset processing",
6145 },
6146 {
6147 .name = "arp_waitstate",
6148 .mode = COMMAND_EXEC,
6149 .jim_handler = jim_target_wait_state,
6150 .help = "used internally for reset processing",
6151 },
6152 {
6153 .name = "invoke-event",
6154 .mode = COMMAND_EXEC,
6155 .jim_handler = jim_target_invoke_event,
6156 .help = "invoke handler for specified event",
6157 .usage = "event_name",
6158 },
6159 COMMAND_REGISTRATION_DONE
6160 };
6161
6162 static int target_create(struct jim_getopt_info *goi)
6163 {
6164 Jim_Obj *new_cmd;
6165 Jim_Cmd *cmd;
6166 const char *cp;
6167 int e;
6168 int x;
6169 struct target *target;
6170 struct command_context *cmd_ctx;
6171
6172 cmd_ctx = current_command_context(goi->interp);
6173 assert(cmd_ctx);
6174
6175 if (goi->argc < 3) {
6176 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6177 return JIM_ERR;
6178 }
6179
6180 /* COMMAND */
6181 jim_getopt_obj(goi, &new_cmd);
6182 /* does this command exist? */
6183 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6184 if (cmd) {
6185 cp = Jim_GetString(new_cmd, NULL);
6186 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6187 return JIM_ERR;
6188 }
6189
6190 /* TYPE */
6191 e = jim_getopt_string(goi, &cp, NULL);
6192 if (e != JIM_OK)
6193 return e;
6194 struct transport *tr = get_current_transport();
6195 if (tr->override_target) {
6196 e = tr->override_target(&cp);
6197 if (e != ERROR_OK) {
6198 LOG_ERROR("The selected transport doesn't support this target");
6199 return JIM_ERR;
6200 }
6201 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6202 }
6203 /* now does target type exist */
6204 for (x = 0 ; target_types[x] ; x++) {
6205 if (strcmp(cp, target_types[x]->name) == 0) {
6206 /* found */
6207 break;
6208 }
6209 }
6210 if (!target_types[x]) {
6211 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6212 for (x = 0 ; target_types[x] ; x++) {
6213 if (target_types[x + 1]) {
6214 Jim_AppendStrings(goi->interp,
6215 Jim_GetResult(goi->interp),
6216 target_types[x]->name,
6217 ", ", NULL);
6218 } else {
6219 Jim_AppendStrings(goi->interp,
6220 Jim_GetResult(goi->interp),
6221 " or ",
6222 target_types[x]->name, NULL);
6223 }
6224 }
6225 return JIM_ERR;
6226 }
6227
6228 /* Create it */
6229 target = calloc(1, sizeof(struct target));
6230 if (!target) {
6231 LOG_ERROR("Out of memory");
6232 return JIM_ERR;
6233 }
6234
6235 /* set empty smp cluster */
6236 target->smp_targets = &empty_smp_targets;
6237
6238 /* set target number */
6239 target->target_number = new_target_number();
6240
6241 /* allocate memory for each unique target type */
6242 target->type = malloc(sizeof(struct target_type));
6243 if (!target->type) {
6244 LOG_ERROR("Out of memory");
6245 free(target);
6246 return JIM_ERR;
6247 }
6248
6249 memcpy(target->type, target_types[x], sizeof(struct target_type));
6250
6251 /* default to first core, override with -coreid */
6252 target->coreid = 0;
6253
6254 target->working_area = 0x0;
6255 target->working_area_size = 0x0;
6256 target->working_areas = NULL;
6257 target->backup_working_area = 0;
6258
6259 target->state = TARGET_UNKNOWN;
6260 target->debug_reason = DBG_REASON_UNDEFINED;
6261 target->reg_cache = NULL;
6262 target->breakpoints = NULL;
6263 target->watchpoints = NULL;
6264 target->next = NULL;
6265 target->arch_info = NULL;
6266
6267 target->verbose_halt_msg = true;
6268
6269 target->halt_issued = false;
6270
6271 /* initialize trace information */
6272 target->trace_info = calloc(1, sizeof(struct trace));
6273 if (!target->trace_info) {
6274 LOG_ERROR("Out of memory");
6275 free(target->type);
6276 free(target);
6277 return JIM_ERR;
6278 }
6279
6280 target->dbgmsg = NULL;
6281 target->dbg_msg_enabled = 0;
6282
6283 target->endianness = TARGET_ENDIAN_UNKNOWN;
6284
6285 target->rtos = NULL;
6286 target->rtos_auto_detect = false;
6287
6288 target->gdb_port_override = NULL;
6289 target->gdb_max_connections = 1;
6290
6291 /* Do the rest as "configure" options */
6292 goi->isconfigure = 1;
6293 e = target_configure(goi, target);
6294
6295 if (e == JIM_OK) {
6296 if (target->has_dap) {
6297 if (!target->dap_configured) {
6298 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6299 e = JIM_ERR;
6300 }
6301 } else {
6302 if (!target->tap_configured) {
6303 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6304 e = JIM_ERR;
6305 }
6306 }
6307 /* tap must be set after target was configured */
6308 if (!target->tap)
6309 e = JIM_ERR;
6310 }
6311
6312 if (e != JIM_OK) {
6313 rtos_destroy(target);
6314 free(target->gdb_port_override);
6315 free(target->trace_info);
6316 free(target->type);
6317 free(target);
6318 return e;
6319 }
6320
6321 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6322 /* default endian to little if not specified */
6323 target->endianness = TARGET_LITTLE_ENDIAN;
6324 }
6325
6326 cp = Jim_GetString(new_cmd, NULL);
6327 target->cmd_name = strdup(cp);
6328 if (!target->cmd_name) {
6329 LOG_ERROR("Out of memory");
6330 rtos_destroy(target);
6331 free(target->gdb_port_override);
6332 free(target->trace_info);
6333 free(target->type);
6334 free(target);
6335 return JIM_ERR;
6336 }
6337
6338 if (target->type->target_create) {
6339 e = (*(target->type->target_create))(target, goi->interp);
6340 if (e != ERROR_OK) {
6341 LOG_DEBUG("target_create failed");
6342 free(target->cmd_name);
6343 rtos_destroy(target);
6344 free(target->gdb_port_override);
6345 free(target->trace_info);
6346 free(target->type);
6347 free(target);
6348 return JIM_ERR;
6349 }
6350 }
6351
6352 /* create the target specific commands */
6353 if (target->type->commands) {
6354 e = register_commands(cmd_ctx, NULL, target->type->commands);
6355 if (e != ERROR_OK)
6356 LOG_ERROR("unable to register '%s' commands", cp);
6357 }
6358
6359 /* now - create the new target name command */
6360 const struct command_registration target_subcommands[] = {
6361 {
6362 .chain = target_instance_command_handlers,
6363 },
6364 {
6365 .chain = target->type->commands,
6366 },
6367 COMMAND_REGISTRATION_DONE
6368 };
6369 const struct command_registration target_commands[] = {
6370 {
6371 .name = cp,
6372 .mode = COMMAND_ANY,
6373 .help = "target command group",
6374 .usage = "",
6375 .chain = target_subcommands,
6376 },
6377 COMMAND_REGISTRATION_DONE
6378 };
6379 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6380 if (e != ERROR_OK) {
6381 if (target->type->deinit_target)
6382 target->type->deinit_target(target);
6383 free(target->cmd_name);
6384 rtos_destroy(target);
6385 free(target->gdb_port_override);
6386 free(target->trace_info);
6387 free(target->type);
6388 free(target);
6389 return JIM_ERR;
6390 }
6391
6392 /* append to end of list */
6393 append_to_list_all_targets(target);
6394
6395 cmd_ctx->current_target = target;
6396 return JIM_OK;
6397 }
6398
6399 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6400 {
6401 if (argc != 1) {
6402 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6403 return JIM_ERR;
6404 }
6405 struct command_context *cmd_ctx = current_command_context(interp);
6406 assert(cmd_ctx);
6407
6408 struct target *target = get_current_target_or_null(cmd_ctx);
6409 if (target)
6410 Jim_SetResultString(interp, target_name(target), -1);
6411 return JIM_OK;
6412 }
6413
6414 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6415 {
6416 if (argc != 1) {
6417 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6418 return JIM_ERR;
6419 }
6420 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6421 for (unsigned x = 0; target_types[x]; x++) {
6422 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6423 Jim_NewStringObj(interp, target_types[x]->name, -1));
6424 }
6425 return JIM_OK;
6426 }
6427
6428 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6429 {
6430 if (argc != 1) {
6431 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6432 return JIM_ERR;
6433 }
6434 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6435 struct target *target = all_targets;
6436 while (target) {
6437 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6438 Jim_NewStringObj(interp, target_name(target), -1));
6439 target = target->next;
6440 }
6441 return JIM_OK;
6442 }
6443
6444 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6445 {
6446 int i;
6447 const char *targetname;
6448 int retval, len;
6449 static int smp_group = 1;
6450 struct target *target = NULL;
6451 struct target_list *head, *new;
6452
6453 retval = 0;
6454 LOG_DEBUG("%d", argc);
6455 /* argv[1] = target to associate in smp
6456 * argv[2] = target to associate in smp
6457 * argv[3] ...
6458 */
6459
6460 struct list_head *lh = malloc(sizeof(*lh));
6461 if (!lh) {
6462 LOG_ERROR("Out of memory");
6463 return JIM_ERR;
6464 }
6465 INIT_LIST_HEAD(lh);
6466
6467 for (i = 1; i < argc; i++) {
6468
6469 targetname = Jim_GetString(argv[i], &len);
6470 target = get_target(targetname);
6471 LOG_DEBUG("%s ", targetname);
6472 if (target) {
6473 new = malloc(sizeof(struct target_list));
6474 new->target = target;
6475 list_add_tail(&new->lh, lh);
6476 }
6477 }
6478 /* now parse the list of cpu and put the target in smp mode*/
6479 foreach_smp_target(head, lh) {
6480 target = head->target;
6481 target->smp = smp_group;
6482 target->smp_targets = lh;
6483 }
6484 smp_group++;
6485
6486 if (target && target->rtos)
6487 retval = rtos_smp_init(target);
6488
6489 return retval;
6490 }
6491
6492
6493 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6494 {
6495 struct jim_getopt_info goi;
6496 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6497 if (goi.argc < 3) {
6498 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6499 "<name> <target_type> [<target_options> ...]");
6500 return JIM_ERR;
6501 }
6502 return target_create(&goi);
6503 }
6504
6505 static const struct command_registration target_subcommand_handlers[] = {
6506 {
6507 .name = "init",
6508 .mode = COMMAND_CONFIG,
6509 .handler = handle_target_init_command,
6510 .help = "initialize targets",
6511 .usage = "",
6512 },
6513 {
6514 .name = "create",
6515 .mode = COMMAND_CONFIG,
6516 .jim_handler = jim_target_create,
6517 .usage = "name type '-chain-position' name [options ...]",
6518 .help = "Creates and selects a new target",
6519 },
6520 {
6521 .name = "current",
6522 .mode = COMMAND_ANY,
6523 .jim_handler = jim_target_current,
6524 .help = "Returns the currently selected target",
6525 },
6526 {
6527 .name = "types",
6528 .mode = COMMAND_ANY,
6529 .jim_handler = jim_target_types,
6530 .help = "Returns the available target types as "
6531 "a list of strings",
6532 },
6533 {
6534 .name = "names",
6535 .mode = COMMAND_ANY,
6536 .jim_handler = jim_target_names,
6537 .help = "Returns the names of all targets as a list of strings",
6538 },
6539 {
6540 .name = "smp",
6541 .mode = COMMAND_ANY,
6542 .jim_handler = jim_target_smp,
6543 .usage = "targetname1 targetname2 ...",
6544 .help = "gather several target in a smp list"
6545 },
6546
6547 COMMAND_REGISTRATION_DONE
6548 };
6549
6550 struct fast_load {
6551 target_addr_t address;
6552 uint8_t *data;
6553 int length;
6554
6555 };
6556
6557 static int fastload_num;
6558 static struct fast_load *fastload;
6559
6560 static void free_fastload(void)
6561 {
6562 if (fastload) {
6563 for (int i = 0; i < fastload_num; i++)
6564 free(fastload[i].data);
6565 free(fastload);
6566 fastload = NULL;
6567 }
6568 }
6569
6570 COMMAND_HANDLER(handle_fast_load_image_command)
6571 {
6572 uint8_t *buffer;
6573 size_t buf_cnt;
6574 uint32_t image_size;
6575 target_addr_t min_address = 0;
6576 target_addr_t max_address = -1;
6577
6578 struct image image;
6579
6580 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6581 &image, &min_address, &max_address);
6582 if (retval != ERROR_OK)
6583 return retval;
6584
6585 struct duration bench;
6586 duration_start(&bench);
6587
6588 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6589 if (retval != ERROR_OK)
6590 return retval;
6591
6592 image_size = 0x0;
6593 retval = ERROR_OK;
6594 fastload_num = image.num_sections;
6595 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6596 if (!fastload) {
6597 command_print(CMD, "out of memory");
6598 image_close(&image);
6599 return ERROR_FAIL;
6600 }
6601 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6602 for (unsigned int i = 0; i < image.num_sections; i++) {
6603 buffer = malloc(image.sections[i].size);
6604 if (!buffer) {
6605 command_print(CMD, "error allocating buffer for section (%d bytes)",
6606 (int)(image.sections[i].size));
6607 retval = ERROR_FAIL;
6608 break;
6609 }
6610
6611 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6612 if (retval != ERROR_OK) {
6613 free(buffer);
6614 break;
6615 }
6616
6617 uint32_t offset = 0;
6618 uint32_t length = buf_cnt;
6619
6620 /* DANGER!!! beware of unsigned comparison here!!! */
6621
6622 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6623 (image.sections[i].base_address < max_address)) {
6624 if (image.sections[i].base_address < min_address) {
6625 /* clip addresses below */
6626 offset += min_address-image.sections[i].base_address;
6627 length -= offset;
6628 }
6629
6630 if (image.sections[i].base_address + buf_cnt > max_address)
6631 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6632
6633 fastload[i].address = image.sections[i].base_address + offset;
6634 fastload[i].data = malloc(length);
6635 if (!fastload[i].data) {
6636 free(buffer);
6637 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6638 length);
6639 retval = ERROR_FAIL;
6640 break;
6641 }
6642 memcpy(fastload[i].data, buffer + offset, length);
6643 fastload[i].length = length;
6644
6645 image_size += length;
6646 command_print(CMD, "%u bytes written at address 0x%8.8x",
6647 (unsigned int)length,
6648 ((unsigned int)(image.sections[i].base_address + offset)));
6649 }
6650
6651 free(buffer);
6652 }
6653
6654 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6655 command_print(CMD, "Loaded %" PRIu32 " bytes "
6656 "in %fs (%0.3f KiB/s)", image_size,
6657 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6658
6659 command_print(CMD,
6660 "WARNING: image has not been loaded to target!"
6661 "You can issue a 'fast_load' to finish loading.");
6662 }
6663
6664 image_close(&image);
6665
6666 if (retval != ERROR_OK)
6667 free_fastload();
6668
6669 return retval;
6670 }
6671
6672 COMMAND_HANDLER(handle_fast_load_command)
6673 {
6674 if (CMD_ARGC > 0)
6675 return ERROR_COMMAND_SYNTAX_ERROR;
6676 if (!fastload) {
6677 LOG_ERROR("No image in memory");
6678 return ERROR_FAIL;
6679 }
6680 int i;
6681 int64_t ms = timeval_ms();
6682 int size = 0;
6683 int retval = ERROR_OK;
6684 for (i = 0; i < fastload_num; i++) {
6685 struct target *target = get_current_target(CMD_CTX);
6686 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6687 (unsigned int)(fastload[i].address),
6688 (unsigned int)(fastload[i].length));
6689 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6690 if (retval != ERROR_OK)
6691 break;
6692 size += fastload[i].length;
6693 }
6694 if (retval == ERROR_OK) {
6695 int64_t after = timeval_ms();
6696 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6697 }
6698 return retval;
6699 }
6700
6701 static const struct command_registration target_command_handlers[] = {
6702 {
6703 .name = "targets",
6704 .handler = handle_targets_command,
6705 .mode = COMMAND_ANY,
6706 .help = "change current default target (one parameter) "
6707 "or prints table of all targets (no parameters)",
6708 .usage = "[target]",
6709 },
6710 {
6711 .name = "target",
6712 .mode = COMMAND_CONFIG,
6713 .help = "configure target",
6714 .chain = target_subcommand_handlers,
6715 .usage = "",
6716 },
6717 COMMAND_REGISTRATION_DONE
6718 };
6719
6720 int target_register_commands(struct command_context *cmd_ctx)
6721 {
6722 return register_commands(cmd_ctx, NULL, target_command_handlers);
6723 }
6724
6725 static bool target_reset_nag = true;
6726
6727 bool get_target_reset_nag(void)
6728 {
6729 return target_reset_nag;
6730 }
6731
6732 COMMAND_HANDLER(handle_target_reset_nag)
6733 {
6734 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6735 &target_reset_nag, "Nag after each reset about options to improve "
6736 "performance");
6737 }
6738
6739 COMMAND_HANDLER(handle_ps_command)
6740 {
6741 struct target *target = get_current_target(CMD_CTX);
6742 char *display;
6743 if (target->state != TARGET_HALTED) {
6744 LOG_INFO("target not halted !!");
6745 return ERROR_OK;
6746 }
6747
6748 if ((target->rtos) && (target->rtos->type)
6749 && (target->rtos->type->ps_command)) {
6750 display = target->rtos->type->ps_command(target);
6751 command_print(CMD, "%s", display);
6752 free(display);
6753 return ERROR_OK;
6754 } else {
6755 LOG_INFO("failed");
6756 return ERROR_TARGET_FAILURE;
6757 }
6758 }
6759
6760 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6761 {
6762 if (text)
6763 command_print_sameline(cmd, "%s", text);
6764 for (int i = 0; i < size; i++)
6765 command_print_sameline(cmd, " %02x", buf[i]);
6766 command_print(cmd, " ");
6767 }
6768
6769 COMMAND_HANDLER(handle_test_mem_access_command)
6770 {
6771 struct target *target = get_current_target(CMD_CTX);
6772 uint32_t test_size;
6773 int retval = ERROR_OK;
6774
6775 if (target->state != TARGET_HALTED) {
6776 LOG_INFO("target not halted !!");
6777 return ERROR_FAIL;
6778 }
6779
6780 if (CMD_ARGC != 1)
6781 return ERROR_COMMAND_SYNTAX_ERROR;
6782
6783 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6784
6785 /* Test reads */
6786 size_t num_bytes = test_size + 4;
6787
6788 struct working_area *wa = NULL;
6789 retval = target_alloc_working_area(target, num_bytes, &wa);
6790 if (retval != ERROR_OK) {
6791 LOG_ERROR("Not enough working area");
6792 return ERROR_FAIL;
6793 }
6794
6795 uint8_t *test_pattern = malloc(num_bytes);
6796
6797 for (size_t i = 0; i < num_bytes; i++)
6798 test_pattern[i] = rand();
6799
6800 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6801 if (retval != ERROR_OK) {
6802 LOG_ERROR("Test pattern write failed");
6803 goto out;
6804 }
6805
6806 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6807 for (int size = 1; size <= 4; size *= 2) {
6808 for (int offset = 0; offset < 4; offset++) {
6809 uint32_t count = test_size / size;
6810 size_t host_bufsiz = (count + 2) * size + host_offset;
6811 uint8_t *read_ref = malloc(host_bufsiz);
6812 uint8_t *read_buf = malloc(host_bufsiz);
6813
6814 for (size_t i = 0; i < host_bufsiz; i++) {
6815 read_ref[i] = rand();
6816 read_buf[i] = read_ref[i];
6817 }
6818 command_print_sameline(CMD,
6819 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6820 size, offset, host_offset ? "un" : "");
6821
6822 struct duration bench;
6823 duration_start(&bench);
6824
6825 retval = target_read_memory(target, wa->address + offset, size, count,
6826 read_buf + size + host_offset);
6827
6828 duration_measure(&bench);
6829
6830 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6831 command_print(CMD, "Unsupported alignment");
6832 goto next;
6833 } else if (retval != ERROR_OK) {
6834 command_print(CMD, "Memory read failed");
6835 goto next;
6836 }
6837
6838 /* replay on host */
6839 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6840
6841 /* check result */
6842 int result = memcmp(read_ref, read_buf, host_bufsiz);
6843 if (result == 0) {
6844 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6845 duration_elapsed(&bench),
6846 duration_kbps(&bench, count * size));
6847 } else {
6848 command_print(CMD, "Compare failed");
6849 binprint(CMD, "ref:", read_ref, host_bufsiz);
6850 binprint(CMD, "buf:", read_buf, host_bufsiz);
6851 }
6852 next:
6853 free(read_ref);
6854 free(read_buf);
6855 }
6856 }
6857 }
6858
6859 out:
6860 free(test_pattern);
6861
6862 target_free_working_area(target, wa);
6863
6864 /* Test writes */
6865 num_bytes = test_size + 4 + 4 + 4;
6866
6867 retval = target_alloc_working_area(target, num_bytes, &wa);
6868 if (retval != ERROR_OK) {
6869 LOG_ERROR("Not enough working area");
6870 return ERROR_FAIL;
6871 }
6872
6873 test_pattern = malloc(num_bytes);
6874
6875 for (size_t i = 0; i < num_bytes; i++)
6876 test_pattern[i] = rand();
6877
6878 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6879 for (int size = 1; size <= 4; size *= 2) {
6880 for (int offset = 0; offset < 4; offset++) {
6881 uint32_t count = test_size / size;
6882 size_t host_bufsiz = count * size + host_offset;
6883 uint8_t *read_ref = malloc(num_bytes);
6884 uint8_t *read_buf = malloc(num_bytes);
6885 uint8_t *write_buf = malloc(host_bufsiz);
6886
6887 for (size_t i = 0; i < host_bufsiz; i++)
6888 write_buf[i] = rand();
6889 command_print_sameline(CMD,
6890 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6891 size, offset, host_offset ? "un" : "");
6892
6893 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6894 if (retval != ERROR_OK) {
6895 command_print(CMD, "Test pattern write failed");
6896 goto nextw;
6897 }
6898
6899 /* replay on host */
6900 memcpy(read_ref, test_pattern, num_bytes);
6901 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6902
6903 struct duration bench;
6904 duration_start(&bench);
6905
6906 retval = target_write_memory(target, wa->address + size + offset, size, count,
6907 write_buf + host_offset);
6908
6909 duration_measure(&bench);
6910
6911 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6912 command_print(CMD, "Unsupported alignment");
6913 goto nextw;
6914 } else if (retval != ERROR_OK) {
6915 command_print(CMD, "Memory write failed");
6916 goto nextw;
6917 }
6918
6919 /* read back */
6920 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6921 if (retval != ERROR_OK) {
6922 command_print(CMD, "Test pattern write failed");
6923 goto nextw;
6924 }
6925
6926 /* check result */
6927 int result = memcmp(read_ref, read_buf, num_bytes);
6928 if (result == 0) {
6929 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6930 duration_elapsed(&bench),
6931 duration_kbps(&bench, count * size));
6932 } else {
6933 command_print(CMD, "Compare failed");
6934 binprint(CMD, "ref:", read_ref, num_bytes);
6935 binprint(CMD, "buf:", read_buf, num_bytes);
6936 }
6937 nextw:
6938 free(read_ref);
6939 free(read_buf);
6940 }
6941 }
6942 }
6943
6944 free(test_pattern);
6945
6946 target_free_working_area(target, wa);
6947 return retval;
6948 }
6949
6950 static const struct command_registration target_exec_command_handlers[] = {
6951 {
6952 .name = "fast_load_image",
6953 .handler = handle_fast_load_image_command,
6954 .mode = COMMAND_ANY,
6955 .help = "Load image into server memory for later use by "
6956 "fast_load; primarily for profiling",
6957 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6958 "[min_address [max_length]]",
6959 },
6960 {
6961 .name = "fast_load",
6962 .handler = handle_fast_load_command,
6963 .mode = COMMAND_EXEC,
6964 .help = "loads active fast load image to current target "
6965 "- mainly for profiling purposes",
6966 .usage = "",
6967 },
6968 {
6969 .name = "profile",
6970 .handler = handle_profile_command,
6971 .mode = COMMAND_EXEC,
6972 .usage = "seconds filename [start end]",
6973 .help = "profiling samples the CPU PC",
6974 },
6975 /** @todo don't register virt2phys() unless target supports it */
6976 {
6977 .name = "virt2phys",
6978 .handler = handle_virt2phys_command,
6979 .mode = COMMAND_ANY,
6980 .help = "translate a virtual address into a physical address",
6981 .usage = "virtual_address",
6982 },
6983 {
6984 .name = "reg",
6985 .handler = handle_reg_command,
6986 .mode = COMMAND_EXEC,
6987 .help = "display (reread from target with \"force\") or set a register; "
6988 "with no arguments, displays all registers and their values",
6989 .usage = "[(register_number|register_name) [(value|'force')]]",
6990 },
6991 {
6992 .name = "poll",
6993 .handler = handle_poll_command,
6994 .mode = COMMAND_EXEC,
6995 .help = "poll target state; or reconfigure background polling",
6996 .usage = "['on'|'off']",
6997 },
6998 {
6999 .name = "wait_halt",
7000 .handler = handle_wait_halt_command,
7001 .mode = COMMAND_EXEC,
7002 .help = "wait up to the specified number of milliseconds "
7003 "(default 5000) for a previously requested halt",
7004 .usage = "[milliseconds]",
7005 },
7006 {
7007 .name = "halt",
7008 .handler = handle_halt_command,
7009 .mode = COMMAND_EXEC,
7010 .help = "request target to halt, then wait up to the specified "
7011 "number of milliseconds (default 5000) for it to complete",
7012 .usage = "[milliseconds]",
7013 },
7014 {
7015 .name = "resume",
7016 .handler = handle_resume_command,
7017 .mode = COMMAND_EXEC,
7018 .help = "resume target execution from current PC or address",
7019 .usage = "[address]",
7020 },
7021 {
7022 .name = "reset",
7023 .handler = handle_reset_command,
7024 .mode = COMMAND_EXEC,
7025 .usage = "[run|halt|init]",
7026 .help = "Reset all targets into the specified mode. "
7027 "Default reset mode is run, if not given.",
7028 },
7029 {
7030 .name = "soft_reset_halt",
7031 .handler = handle_soft_reset_halt_command,
7032 .mode = COMMAND_EXEC,
7033 .usage = "",
7034 .help = "halt the target and do a soft reset",
7035 },
7036 {
7037 .name = "step",
7038 .handler = handle_step_command,
7039 .mode = COMMAND_EXEC,
7040 .help = "step one instruction from current PC or address",
7041 .usage = "[address]",
7042 },
7043 {
7044 .name = "mdd",
7045 .handler = handle_md_command,
7046 .mode = COMMAND_EXEC,
7047 .help = "display memory double-words",
7048 .usage = "['phys'] address [count]",
7049 },
7050 {
7051 .name = "mdw",
7052 .handler = handle_md_command,
7053 .mode = COMMAND_EXEC,
7054 .help = "display memory words",
7055 .usage = "['phys'] address [count]",
7056 },
7057 {
7058 .name = "mdh",
7059 .handler = handle_md_command,
7060 .mode = COMMAND_EXEC,
7061 .help = "display memory half-words",
7062 .usage = "['phys'] address [count]",
7063 },
7064 {
7065 .name = "mdb",
7066 .handler = handle_md_command,
7067 .mode = COMMAND_EXEC,
7068 .help = "display memory bytes",
7069 .usage = "['phys'] address [count]",
7070 },
7071 {
7072 .name = "mwd",
7073 .handler = handle_mw_command,
7074 .mode = COMMAND_EXEC,
7075 .help = "write memory double-word",
7076 .usage = "['phys'] address value [count]",
7077 },
7078 {
7079 .name = "mww",
7080 .handler = handle_mw_command,
7081 .mode = COMMAND_EXEC,
7082 .help = "write memory word",
7083 .usage = "['phys'] address value [count]",
7084 },
7085 {
7086 .name = "mwh",
7087 .handler = handle_mw_command,
7088 .mode = COMMAND_EXEC,
7089 .help = "write memory half-word",
7090 .usage = "['phys'] address value [count]",
7091 },
7092 {
7093 .name = "mwb",
7094 .handler = handle_mw_command,
7095 .mode = COMMAND_EXEC,
7096 .help = "write memory byte",
7097 .usage = "['phys'] address value [count]",
7098 },
7099 {
7100 .name = "bp",
7101 .handler = handle_bp_command,
7102 .mode = COMMAND_EXEC,
7103 .help = "list or set hardware or software breakpoint",
7104 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7105 },
7106 {
7107 .name = "rbp",
7108 .handler = handle_rbp_command,
7109 .mode = COMMAND_EXEC,
7110 .help = "remove breakpoint",
7111 .usage = "'all' | address",
7112 },
7113 {
7114 .name = "wp",
7115 .handler = handle_wp_command,
7116 .mode = COMMAND_EXEC,
7117 .help = "list (no params) or create watchpoints",
7118 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7119 },
7120 {
7121 .name = "rwp",
7122 .handler = handle_rwp_command,
7123 .mode = COMMAND_EXEC,
7124 .help = "remove watchpoint",
7125 .usage = "address",
7126 },
7127 {
7128 .name = "load_image",
7129 .handler = handle_load_image_command,
7130 .mode = COMMAND_EXEC,
7131 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7132 "[min_address] [max_length]",
7133 },
7134 {
7135 .name = "dump_image",
7136 .handler = handle_dump_image_command,
7137 .mode = COMMAND_EXEC,
7138 .usage = "filename address size",
7139 },
7140 {
7141 .name = "verify_image_checksum",
7142 .handler = handle_verify_image_checksum_command,
7143 .mode = COMMAND_EXEC,
7144 .usage = "filename [offset [type]]",
7145 },
7146 {
7147 .name = "verify_image",
7148 .handler = handle_verify_image_command,
7149 .mode = COMMAND_EXEC,
7150 .usage = "filename [offset [type]]",
7151 },
7152 {
7153 .name = "test_image",
7154 .handler = handle_test_image_command,
7155 .mode = COMMAND_EXEC,
7156 .usage = "filename [offset [type]]",
7157 },
7158 {
7159 .name = "get_reg",
7160 .mode = COMMAND_EXEC,
7161 .jim_handler = target_jim_get_reg,
7162 .help = "Get register values from the target",
7163 .usage = "list",
7164 },
7165 {
7166 .name = "set_reg",
7167 .mode = COMMAND_EXEC,
7168 .jim_handler = target_jim_set_reg,
7169 .help = "Set target register values",
7170 .usage = "dict",
7171 },
7172 {
7173 .name = "read_memory",
7174 .mode = COMMAND_EXEC,
7175 .jim_handler = target_jim_read_memory,
7176 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7177 .usage = "address width count ['phys']",
7178 },
7179 {
7180 .name = "write_memory",
7181 .mode = COMMAND_EXEC,
7182 .jim_handler = target_jim_write_memory,
7183 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7184 .usage = "address width data ['phys']",
7185 },
7186 {
7187 .name = "reset_nag",
7188 .handler = handle_target_reset_nag,
7189 .mode = COMMAND_ANY,
7190 .help = "Nag after each reset about options that could have been "
7191 "enabled to improve performance.",
7192 .usage = "['enable'|'disable']",
7193 },
7194 {
7195 .name = "ps",
7196 .handler = handle_ps_command,
7197 .mode = COMMAND_EXEC,
7198 .help = "list all tasks",
7199 .usage = "",
7200 },
7201 {
7202 .name = "test_mem_access",
7203 .handler = handle_test_mem_access_command,
7204 .mode = COMMAND_EXEC,
7205 .help = "Test the target's memory access functions",
7206 .usage = "size",
7207 },
7208
7209 COMMAND_REGISTRATION_DONE
7210 };
7211 static int target_register_user_commands(struct command_context *cmd_ctx)
7212 {
7213 int retval = ERROR_OK;
7214 retval = target_request_register_commands(cmd_ctx);
7215 if (retval != ERROR_OK)
7216 return retval;
7217
7218 retval = trace_register_commands(cmd_ctx);
7219 if (retval != ERROR_OK)
7220 return retval;
7221
7222
7223 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7224 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)