target: add Espressif ESP32 basic support
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
60 #include "semihosting_common.h"
61
62 /* default halt wait timeout (ms) */
63 #define DEFAULT_HALT_TIMEOUT 5000
64
65 static int target_read_buffer_default(struct target *target, target_addr_t address,
66 uint32_t count, uint8_t *buffer);
67 static int target_write_buffer_default(struct target *target, target_addr_t address,
68 uint32_t count, const uint8_t *buffer);
69 static int target_array2mem(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_mem2array(Jim_Interp *interp, struct target *target,
72 int argc, Jim_Obj * const *argv);
73 static int target_register_user_commands(struct command_context *cmd_ctx);
74 static int target_get_gdb_fileio_info_default(struct target *target,
75 struct gdb_fileio_info *fileio_info);
76 static int target_gdb_fileio_end_default(struct target *target, int retcode,
77 int fileio_errno, bool ctrl_c);
78
79 /* targets */
80 extern struct target_type arm7tdmi_target;
81 extern struct target_type arm720t_target;
82 extern struct target_type arm9tdmi_target;
83 extern struct target_type arm920t_target;
84 extern struct target_type arm966e_target;
85 extern struct target_type arm946e_target;
86 extern struct target_type arm926ejs_target;
87 extern struct target_type fa526_target;
88 extern struct target_type feroceon_target;
89 extern struct target_type dragonite_target;
90 extern struct target_type xscale_target;
91 extern struct target_type cortexm_target;
92 extern struct target_type cortexa_target;
93 extern struct target_type aarch64_target;
94 extern struct target_type cortexr4_target;
95 extern struct target_type arm11_target;
96 extern struct target_type ls1_sap_target;
97 extern struct target_type mips_m4k_target;
98 extern struct target_type mips_mips64_target;
99 extern struct target_type avr_target;
100 extern struct target_type dsp563xx_target;
101 extern struct target_type dsp5680xx_target;
102 extern struct target_type testee_target;
103 extern struct target_type avr32_ap7k_target;
104 extern struct target_type hla_target;
105 extern struct target_type nds32_v2_target;
106 extern struct target_type nds32_v3_target;
107 extern struct target_type nds32_v3m_target;
108 extern struct target_type esp32_target;
109 extern struct target_type esp32s2_target;
110 extern struct target_type or1k_target;
111 extern struct target_type quark_x10xx_target;
112 extern struct target_type quark_d20xx_target;
113 extern struct target_type stm8_target;
114 extern struct target_type riscv_target;
115 extern struct target_type mem_ap_target;
116 extern struct target_type esirisc_target;
117 extern struct target_type arcv2_target;
118
119 static struct target_type *target_types[] = {
120 &arm7tdmi_target,
121 &arm9tdmi_target,
122 &arm920t_target,
123 &arm720t_target,
124 &arm966e_target,
125 &arm946e_target,
126 &arm926ejs_target,
127 &fa526_target,
128 &feroceon_target,
129 &dragonite_target,
130 &xscale_target,
131 &cortexm_target,
132 &cortexa_target,
133 &cortexr4_target,
134 &arm11_target,
135 &ls1_sap_target,
136 &mips_m4k_target,
137 &avr_target,
138 &dsp563xx_target,
139 &dsp5680xx_target,
140 &testee_target,
141 &avr32_ap7k_target,
142 &hla_target,
143 &nds32_v2_target,
144 &nds32_v3_target,
145 &nds32_v3m_target,
146 &esp32_target,
147 &esp32s2_target,
148 &or1k_target,
149 &quark_x10xx_target,
150 &quark_d20xx_target,
151 &stm8_target,
152 &riscv_target,
153 &mem_ap_target,
154 &esirisc_target,
155 &arcv2_target,
156 &aarch64_target,
157 &mips_mips64_target,
158 NULL,
159 };
160
161 struct target *all_targets;
162 static struct target_event_callback *target_event_callbacks;
163 static struct target_timer_callback *target_timer_callbacks;
164 static int64_t target_timer_next_event_value;
165 static LIST_HEAD(target_reset_callback_list);
166 static LIST_HEAD(target_trace_callback_list);
167 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
168 static LIST_HEAD(empty_smp_targets);
169
170 static const struct jim_nvp nvp_assert[] = {
171 { .name = "assert", NVP_ASSERT },
172 { .name = "deassert", NVP_DEASSERT },
173 { .name = "T", NVP_ASSERT },
174 { .name = "F", NVP_DEASSERT },
175 { .name = "t", NVP_ASSERT },
176 { .name = "f", NVP_DEASSERT },
177 { .name = NULL, .value = -1 }
178 };
179
180 static const struct jim_nvp nvp_error_target[] = {
181 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
182 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
183 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
184 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
185 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
186 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
187 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
188 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
189 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
190 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
191 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
192 { .value = -1, .name = NULL }
193 };
194
195 static const char *target_strerror_safe(int err)
196 {
197 const struct jim_nvp *n;
198
199 n = jim_nvp_value2name_simple(nvp_error_target, err);
200 if (!n->name)
201 return "unknown";
202 else
203 return n->name;
204 }
205
206 static const struct jim_nvp nvp_target_event[] = {
207
208 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
209 { .value = TARGET_EVENT_HALTED, .name = "halted" },
210 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
211 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
212 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
213 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
214 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
215
216 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
217 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
218
219 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
220 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
221 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
222 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
223 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
224 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
225 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
226 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
227
228 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
229 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
230 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
231
232 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
233 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
234
235 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
236 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
237
238 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
239 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
240
241 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
242 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
243
244 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
245
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
247 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
249 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
250 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
251 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
252 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
253 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
254
255 { .name = NULL, .value = -1 }
256 };
257
258 static const struct jim_nvp nvp_target_state[] = {
259 { .name = "unknown", .value = TARGET_UNKNOWN },
260 { .name = "running", .value = TARGET_RUNNING },
261 { .name = "halted", .value = TARGET_HALTED },
262 { .name = "reset", .value = TARGET_RESET },
263 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
264 { .name = NULL, .value = -1 },
265 };
266
267 static const struct jim_nvp nvp_target_debug_reason[] = {
268 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
269 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
270 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
271 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
272 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
273 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
274 { .name = "program-exit", .value = DBG_REASON_EXIT },
275 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
276 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
277 { .name = NULL, .value = -1 },
278 };
279
280 static const struct jim_nvp nvp_target_endian[] = {
281 { .name = "big", .value = TARGET_BIG_ENDIAN },
282 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
283 { .name = "be", .value = TARGET_BIG_ENDIAN },
284 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
285 { .name = NULL, .value = -1 },
286 };
287
288 static const struct jim_nvp nvp_reset_modes[] = {
289 { .name = "unknown", .value = RESET_UNKNOWN },
290 { .name = "run", .value = RESET_RUN },
291 { .name = "halt", .value = RESET_HALT },
292 { .name = "init", .value = RESET_INIT },
293 { .name = NULL, .value = -1 },
294 };
295
296 const char *debug_reason_name(struct target *t)
297 {
298 const char *cp;
299
300 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
301 t->debug_reason)->name;
302 if (!cp) {
303 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
304 cp = "(*BUG*unknown*BUG*)";
305 }
306 return cp;
307 }
308
309 const char *target_state_name(struct target *t)
310 {
311 const char *cp;
312 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
313 if (!cp) {
314 LOG_ERROR("Invalid target state: %d", (int)(t->state));
315 cp = "(*BUG*unknown*BUG*)";
316 }
317
318 if (!target_was_examined(t) && t->defer_examine)
319 cp = "examine deferred";
320
321 return cp;
322 }
323
324 const char *target_event_name(enum target_event event)
325 {
326 const char *cp;
327 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
328 if (!cp) {
329 LOG_ERROR("Invalid target event: %d", (int)(event));
330 cp = "(*BUG*unknown*BUG*)";
331 }
332 return cp;
333 }
334
335 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
336 {
337 const char *cp;
338 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
339 if (!cp) {
340 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
341 cp = "(*BUG*unknown*BUG*)";
342 }
343 return cp;
344 }
345
346 /* determine the number of the new target */
347 static int new_target_number(void)
348 {
349 struct target *t;
350 int x;
351
352 /* number is 0 based */
353 x = -1;
354 t = all_targets;
355 while (t) {
356 if (x < t->target_number)
357 x = t->target_number;
358 t = t->next;
359 }
360 return x + 1;
361 }
362
363 static void append_to_list_all_targets(struct target *target)
364 {
365 struct target **t = &all_targets;
366
367 while (*t)
368 t = &((*t)->next);
369 *t = target;
370 }
371
372 /* read a uint64_t from a buffer in target memory endianness */
373 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 return le_to_h_u64(buffer);
377 else
378 return be_to_h_u64(buffer);
379 }
380
381 /* read a uint32_t from a buffer in target memory endianness */
382 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 return le_to_h_u32(buffer);
386 else
387 return be_to_h_u32(buffer);
388 }
389
390 /* read a uint24_t from a buffer in target memory endianness */
391 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 return le_to_h_u24(buffer);
395 else
396 return be_to_h_u24(buffer);
397 }
398
399 /* read a uint16_t from a buffer in target memory endianness */
400 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
401 {
402 if (target->endianness == TARGET_LITTLE_ENDIAN)
403 return le_to_h_u16(buffer);
404 else
405 return be_to_h_u16(buffer);
406 }
407
408 /* write a uint64_t to a buffer in target memory endianness */
409 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
410 {
411 if (target->endianness == TARGET_LITTLE_ENDIAN)
412 h_u64_to_le(buffer, value);
413 else
414 h_u64_to_be(buffer, value);
415 }
416
417 /* write a uint32_t to a buffer in target memory endianness */
418 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
419 {
420 if (target->endianness == TARGET_LITTLE_ENDIAN)
421 h_u32_to_le(buffer, value);
422 else
423 h_u32_to_be(buffer, value);
424 }
425
426 /* write a uint24_t to a buffer in target memory endianness */
427 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
428 {
429 if (target->endianness == TARGET_LITTLE_ENDIAN)
430 h_u24_to_le(buffer, value);
431 else
432 h_u24_to_be(buffer, value);
433 }
434
435 /* write a uint16_t to a buffer in target memory endianness */
436 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
437 {
438 if (target->endianness == TARGET_LITTLE_ENDIAN)
439 h_u16_to_le(buffer, value);
440 else
441 h_u16_to_be(buffer, value);
442 }
443
444 /* write a uint8_t to a buffer in target memory endianness */
445 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
446 {
447 *buffer = value;
448 }
449
450 /* write a uint64_t array to a buffer in target memory endianness */
451 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
452 {
453 uint32_t i;
454 for (i = 0; i < count; i++)
455 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
456 }
457
458 /* write a uint32_t array to a buffer in target memory endianness */
459 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
460 {
461 uint32_t i;
462 for (i = 0; i < count; i++)
463 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
464 }
465
466 /* write a uint16_t array to a buffer in target memory endianness */
467 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
468 {
469 uint32_t i;
470 for (i = 0; i < count; i++)
471 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
472 }
473
474 /* write a uint64_t array to a buffer in target memory endianness */
475 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
476 {
477 uint32_t i;
478 for (i = 0; i < count; i++)
479 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
480 }
481
482 /* write a uint32_t array to a buffer in target memory endianness */
483 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
484 {
485 uint32_t i;
486 for (i = 0; i < count; i++)
487 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
488 }
489
490 /* write a uint16_t array to a buffer in target memory endianness */
491 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
492 {
493 uint32_t i;
494 for (i = 0; i < count; i++)
495 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
496 }
497
498 /* return a pointer to a configured target; id is name or number */
499 struct target *get_target(const char *id)
500 {
501 struct target *target;
502
503 /* try as tcltarget name */
504 for (target = all_targets; target; target = target->next) {
505 if (!target_name(target))
506 continue;
507 if (strcmp(id, target_name(target)) == 0)
508 return target;
509 }
510
511 /* It's OK to remove this fallback sometime after August 2010 or so */
512
513 /* no match, try as number */
514 unsigned num;
515 if (parse_uint(id, &num) != ERROR_OK)
516 return NULL;
517
518 for (target = all_targets; target; target = target->next) {
519 if (target->target_number == (int)num) {
520 LOG_WARNING("use '%s' as target identifier, not '%u'",
521 target_name(target), num);
522 return target;
523 }
524 }
525
526 return NULL;
527 }
528
529 /* returns a pointer to the n-th configured target */
530 struct target *get_target_by_num(int num)
531 {
532 struct target *target = all_targets;
533
534 while (target) {
535 if (target->target_number == num)
536 return target;
537 target = target->next;
538 }
539
540 return NULL;
541 }
542
543 struct target *get_current_target(struct command_context *cmd_ctx)
544 {
545 struct target *target = get_current_target_or_null(cmd_ctx);
546
547 if (!target) {
548 LOG_ERROR("BUG: current_target out of bounds");
549 exit(-1);
550 }
551
552 return target;
553 }
554
555 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
556 {
557 return cmd_ctx->current_target_override
558 ? cmd_ctx->current_target_override
559 : cmd_ctx->current_target;
560 }
561
562 int target_poll(struct target *target)
563 {
564 int retval;
565
566 /* We can't poll until after examine */
567 if (!target_was_examined(target)) {
568 /* Fail silently lest we pollute the log */
569 return ERROR_FAIL;
570 }
571
572 retval = target->type->poll(target);
573 if (retval != ERROR_OK)
574 return retval;
575
576 if (target->halt_issued) {
577 if (target->state == TARGET_HALTED)
578 target->halt_issued = false;
579 else {
580 int64_t t = timeval_ms() - target->halt_issued_time;
581 if (t > DEFAULT_HALT_TIMEOUT) {
582 target->halt_issued = false;
583 LOG_INFO("Halt timed out, wake up GDB.");
584 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
585 }
586 }
587 }
588
589 return ERROR_OK;
590 }
591
592 int target_halt(struct target *target)
593 {
594 int retval;
595 /* We can't poll until after examine */
596 if (!target_was_examined(target)) {
597 LOG_ERROR("Target not examined yet");
598 return ERROR_FAIL;
599 }
600
601 retval = target->type->halt(target);
602 if (retval != ERROR_OK)
603 return retval;
604
605 target->halt_issued = true;
606 target->halt_issued_time = timeval_ms();
607
608 return ERROR_OK;
609 }
610
611 /**
612 * Make the target (re)start executing using its saved execution
613 * context (possibly with some modifications).
614 *
615 * @param target Which target should start executing.
616 * @param current True to use the target's saved program counter instead
617 * of the address parameter
618 * @param address Optionally used as the program counter.
619 * @param handle_breakpoints True iff breakpoints at the resumption PC
620 * should be skipped. (For example, maybe execution was stopped by
621 * such a breakpoint, in which case it would be counterproductive to
622 * let it re-trigger.
623 * @param debug_execution False if all working areas allocated by OpenOCD
624 * should be released and/or restored to their original contents.
625 * (This would for example be true to run some downloaded "helper"
626 * algorithm code, which resides in one such working buffer and uses
627 * another for data storage.)
628 *
629 * @todo Resolve the ambiguity about what the "debug_execution" flag
630 * signifies. For example, Target implementations don't agree on how
631 * it relates to invalidation of the register cache, or to whether
632 * breakpoints and watchpoints should be enabled. (It would seem wrong
633 * to enable breakpoints when running downloaded "helper" algorithms
634 * (debug_execution true), since the breakpoints would be set to match
635 * target firmware being debugged, not the helper algorithm.... and
636 * enabling them could cause such helpers to malfunction (for example,
637 * by overwriting data with a breakpoint instruction. On the other
638 * hand the infrastructure for running such helpers might use this
639 * procedure but rely on hardware breakpoint to detect termination.)
640 */
641 int target_resume(struct target *target, int current, target_addr_t address,
642 int handle_breakpoints, int debug_execution)
643 {
644 int retval;
645
646 /* We can't poll until after examine */
647 if (!target_was_examined(target)) {
648 LOG_ERROR("Target not examined yet");
649 return ERROR_FAIL;
650 }
651
652 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
653
654 /* note that resume *must* be asynchronous. The CPU can halt before
655 * we poll. The CPU can even halt at the current PC as a result of
656 * a software breakpoint being inserted by (a bug?) the application.
657 */
658 /*
659 * resume() triggers the event 'resumed'. The execution of TCL commands
660 * in the event handler causes the polling of targets. If the target has
661 * already halted for a breakpoint, polling will run the 'halted' event
662 * handler before the pending 'resumed' handler.
663 * Disable polling during resume() to guarantee the execution of handlers
664 * in the correct order.
665 */
666 bool save_poll = jtag_poll_get_enabled();
667 jtag_poll_set_enabled(false);
668 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
669 jtag_poll_set_enabled(save_poll);
670 if (retval != ERROR_OK)
671 return retval;
672
673 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
674
675 return retval;
676 }
677
678 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
679 {
680 char buf[100];
681 int retval;
682 struct jim_nvp *n;
683 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
684 if (!n->name) {
685 LOG_ERROR("invalid reset mode");
686 return ERROR_FAIL;
687 }
688
689 struct target *target;
690 for (target = all_targets; target; target = target->next)
691 target_call_reset_callbacks(target, reset_mode);
692
693 /* disable polling during reset to make reset event scripts
694 * more predictable, i.e. dr/irscan & pathmove in events will
695 * not have JTAG operations injected into the middle of a sequence.
696 */
697 bool save_poll = jtag_poll_get_enabled();
698
699 jtag_poll_set_enabled(false);
700
701 sprintf(buf, "ocd_process_reset %s", n->name);
702 retval = Jim_Eval(cmd->ctx->interp, buf);
703
704 jtag_poll_set_enabled(save_poll);
705
706 if (retval != JIM_OK) {
707 Jim_MakeErrorMessage(cmd->ctx->interp);
708 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
709 return ERROR_FAIL;
710 }
711
712 /* We want any events to be processed before the prompt */
713 retval = target_call_timer_callbacks_now();
714
715 for (target = all_targets; target; target = target->next) {
716 target->type->check_reset(target);
717 target->running_alg = false;
718 }
719
720 return retval;
721 }
722
723 static int identity_virt2phys(struct target *target,
724 target_addr_t virtual, target_addr_t *physical)
725 {
726 *physical = virtual;
727 return ERROR_OK;
728 }
729
730 static int no_mmu(struct target *target, int *enabled)
731 {
732 *enabled = 0;
733 return ERROR_OK;
734 }
735
736 /**
737 * Reset the @c examined flag for the given target.
738 * Pure paranoia -- targets are zeroed on allocation.
739 */
740 static inline void target_reset_examined(struct target *target)
741 {
742 target->examined = false;
743 }
744
745 static int default_examine(struct target *target)
746 {
747 target_set_examined(target);
748 return ERROR_OK;
749 }
750
751 /* no check by default */
752 static int default_check_reset(struct target *target)
753 {
754 return ERROR_OK;
755 }
756
757 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
758 * Keep in sync */
759 int target_examine_one(struct target *target)
760 {
761 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
762
763 int retval = target->type->examine(target);
764 if (retval != ERROR_OK) {
765 target_reset_examined(target);
766 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
767 return retval;
768 }
769
770 target_set_examined(target);
771 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
772
773 return ERROR_OK;
774 }
775
776 static int jtag_enable_callback(enum jtag_event event, void *priv)
777 {
778 struct target *target = priv;
779
780 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
781 return ERROR_OK;
782
783 jtag_unregister_event_callback(jtag_enable_callback, target);
784
785 return target_examine_one(target);
786 }
787
788 /* Targets that correctly implement init + examine, i.e.
789 * no communication with target during init:
790 *
791 * XScale
792 */
793 int target_examine(void)
794 {
795 int retval = ERROR_OK;
796 struct target *target;
797
798 for (target = all_targets; target; target = target->next) {
799 /* defer examination, but don't skip it */
800 if (!target->tap->enabled) {
801 jtag_register_event_callback(jtag_enable_callback,
802 target);
803 continue;
804 }
805
806 if (target->defer_examine)
807 continue;
808
809 int retval2 = target_examine_one(target);
810 if (retval2 != ERROR_OK) {
811 LOG_WARNING("target %s examination failed", target_name(target));
812 retval = retval2;
813 }
814 }
815 return retval;
816 }
817
818 const char *target_type_name(struct target *target)
819 {
820 return target->type->name;
821 }
822
823 static int target_soft_reset_halt(struct target *target)
824 {
825 if (!target_was_examined(target)) {
826 LOG_ERROR("Target not examined yet");
827 return ERROR_FAIL;
828 }
829 if (!target->type->soft_reset_halt) {
830 LOG_ERROR("Target %s does not support soft_reset_halt",
831 target_name(target));
832 return ERROR_FAIL;
833 }
834 return target->type->soft_reset_halt(target);
835 }
836
837 /**
838 * Downloads a target-specific native code algorithm to the target,
839 * and executes it. * Note that some targets may need to set up, enable,
840 * and tear down a breakpoint (hard or * soft) to detect algorithm
841 * termination, while others may support lower overhead schemes where
842 * soft breakpoints embedded in the algorithm automatically terminate the
843 * algorithm.
844 *
845 * @param target used to run the algorithm
846 * @param num_mem_params
847 * @param mem_params
848 * @param num_reg_params
849 * @param reg_param
850 * @param entry_point
851 * @param exit_point
852 * @param timeout_ms
853 * @param arch_info target-specific description of the algorithm.
854 */
855 int target_run_algorithm(struct target *target,
856 int num_mem_params, struct mem_param *mem_params,
857 int num_reg_params, struct reg_param *reg_param,
858 target_addr_t entry_point, target_addr_t exit_point,
859 int timeout_ms, void *arch_info)
860 {
861 int retval = ERROR_FAIL;
862
863 if (!target_was_examined(target)) {
864 LOG_ERROR("Target not examined yet");
865 goto done;
866 }
867 if (!target->type->run_algorithm) {
868 LOG_ERROR("Target type '%s' does not support %s",
869 target_type_name(target), __func__);
870 goto done;
871 }
872
873 target->running_alg = true;
874 retval = target->type->run_algorithm(target,
875 num_mem_params, mem_params,
876 num_reg_params, reg_param,
877 entry_point, exit_point, timeout_ms, arch_info);
878 target->running_alg = false;
879
880 done:
881 return retval;
882 }
883
884 /**
885 * Executes a target-specific native code algorithm and leaves it running.
886 *
887 * @param target used to run the algorithm
888 * @param num_mem_params
889 * @param mem_params
890 * @param num_reg_params
891 * @param reg_params
892 * @param entry_point
893 * @param exit_point
894 * @param arch_info target-specific description of the algorithm.
895 */
896 int target_start_algorithm(struct target *target,
897 int num_mem_params, struct mem_param *mem_params,
898 int num_reg_params, struct reg_param *reg_params,
899 target_addr_t entry_point, target_addr_t exit_point,
900 void *arch_info)
901 {
902 int retval = ERROR_FAIL;
903
904 if (!target_was_examined(target)) {
905 LOG_ERROR("Target not examined yet");
906 goto done;
907 }
908 if (!target->type->start_algorithm) {
909 LOG_ERROR("Target type '%s' does not support %s",
910 target_type_name(target), __func__);
911 goto done;
912 }
913 if (target->running_alg) {
914 LOG_ERROR("Target is already running an algorithm");
915 goto done;
916 }
917
918 target->running_alg = true;
919 retval = target->type->start_algorithm(target,
920 num_mem_params, mem_params,
921 num_reg_params, reg_params,
922 entry_point, exit_point, arch_info);
923
924 done:
925 return retval;
926 }
927
928 /**
929 * Waits for an algorithm started with target_start_algorithm() to complete.
930 *
931 * @param target used to run the algorithm
932 * @param num_mem_params
933 * @param mem_params
934 * @param num_reg_params
935 * @param reg_params
936 * @param exit_point
937 * @param timeout_ms
938 * @param arch_info target-specific description of the algorithm.
939 */
940 int target_wait_algorithm(struct target *target,
941 int num_mem_params, struct mem_param *mem_params,
942 int num_reg_params, struct reg_param *reg_params,
943 target_addr_t exit_point, int timeout_ms,
944 void *arch_info)
945 {
946 int retval = ERROR_FAIL;
947
948 if (!target->type->wait_algorithm) {
949 LOG_ERROR("Target type '%s' does not support %s",
950 target_type_name(target), __func__);
951 goto done;
952 }
953 if (!target->running_alg) {
954 LOG_ERROR("Target is not running an algorithm");
955 goto done;
956 }
957
958 retval = target->type->wait_algorithm(target,
959 num_mem_params, mem_params,
960 num_reg_params, reg_params,
961 exit_point, timeout_ms, arch_info);
962 if (retval != ERROR_TARGET_TIMEOUT)
963 target->running_alg = false;
964
965 done:
966 return retval;
967 }
968
969 /**
970 * Streams data to a circular buffer on target intended for consumption by code
971 * running asynchronously on target.
972 *
973 * This is intended for applications where target-specific native code runs
974 * on the target, receives data from the circular buffer, does something with
975 * it (most likely writing it to a flash memory), and advances the circular
976 * buffer pointer.
977 *
978 * This assumes that the helper algorithm has already been loaded to the target,
979 * but has not been started yet. Given memory and register parameters are passed
980 * to the algorithm.
981 *
982 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
983 * following format:
984 *
985 * [buffer_start + 0, buffer_start + 4):
986 * Write Pointer address (aka head). Written and updated by this
987 * routine when new data is written to the circular buffer.
988 * [buffer_start + 4, buffer_start + 8):
989 * Read Pointer address (aka tail). Updated by code running on the
990 * target after it consumes data.
991 * [buffer_start + 8, buffer_start + buffer_size):
992 * Circular buffer contents.
993 *
994 * See contrib/loaders/flash/stm32f1x.S for an example.
995 *
996 * @param target used to run the algorithm
997 * @param buffer address on the host where data to be sent is located
998 * @param count number of blocks to send
999 * @param block_size size in bytes of each block
1000 * @param num_mem_params count of memory-based params to pass to algorithm
1001 * @param mem_params memory-based params to pass to algorithm
1002 * @param num_reg_params count of register-based params to pass to algorithm
1003 * @param reg_params memory-based params to pass to algorithm
1004 * @param buffer_start address on the target of the circular buffer structure
1005 * @param buffer_size size of the circular buffer structure
1006 * @param entry_point address on the target to execute to start the algorithm
1007 * @param exit_point address at which to set a breakpoint to catch the
1008 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1009 * @param arch_info
1010 */
1011
1012 int target_run_flash_async_algorithm(struct target *target,
1013 const uint8_t *buffer, uint32_t count, int block_size,
1014 int num_mem_params, struct mem_param *mem_params,
1015 int num_reg_params, struct reg_param *reg_params,
1016 uint32_t buffer_start, uint32_t buffer_size,
1017 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1018 {
1019 int retval;
1020 int timeout = 0;
1021
1022 const uint8_t *buffer_orig = buffer;
1023
1024 /* Set up working area. First word is write pointer, second word is read pointer,
1025 * rest is fifo data area. */
1026 uint32_t wp_addr = buffer_start;
1027 uint32_t rp_addr = buffer_start + 4;
1028 uint32_t fifo_start_addr = buffer_start + 8;
1029 uint32_t fifo_end_addr = buffer_start + buffer_size;
1030
1031 uint32_t wp = fifo_start_addr;
1032 uint32_t rp = fifo_start_addr;
1033
1034 /* validate block_size is 2^n */
1035 assert(IS_PWR_OF_2(block_size));
1036
1037 retval = target_write_u32(target, wp_addr, wp);
1038 if (retval != ERROR_OK)
1039 return retval;
1040 retval = target_write_u32(target, rp_addr, rp);
1041 if (retval != ERROR_OK)
1042 return retval;
1043
1044 /* Start up algorithm on target and let it idle while writing the first chunk */
1045 retval = target_start_algorithm(target, num_mem_params, mem_params,
1046 num_reg_params, reg_params,
1047 entry_point,
1048 exit_point,
1049 arch_info);
1050
1051 if (retval != ERROR_OK) {
1052 LOG_ERROR("error starting target flash write algorithm");
1053 return retval;
1054 }
1055
1056 while (count > 0) {
1057
1058 retval = target_read_u32(target, rp_addr, &rp);
1059 if (retval != ERROR_OK) {
1060 LOG_ERROR("failed to get read pointer");
1061 break;
1062 }
1063
1064 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1065 (size_t) (buffer - buffer_orig), count, wp, rp);
1066
1067 if (rp == 0) {
1068 LOG_ERROR("flash write algorithm aborted by target");
1069 retval = ERROR_FLASH_OPERATION_FAILED;
1070 break;
1071 }
1072
1073 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1074 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1075 break;
1076 }
1077
1078 /* Count the number of bytes available in the fifo without
1079 * crossing the wrap around. Make sure to not fill it completely,
1080 * because that would make wp == rp and that's the empty condition. */
1081 uint32_t thisrun_bytes;
1082 if (rp > wp)
1083 thisrun_bytes = rp - wp - block_size;
1084 else if (rp > fifo_start_addr)
1085 thisrun_bytes = fifo_end_addr - wp;
1086 else
1087 thisrun_bytes = fifo_end_addr - wp - block_size;
1088
1089 if (thisrun_bytes == 0) {
1090 /* Throttle polling a bit if transfer is (much) faster than flash
1091 * programming. The exact delay shouldn't matter as long as it's
1092 * less than buffer size / flash speed. This is very unlikely to
1093 * run when using high latency connections such as USB. */
1094 alive_sleep(2);
1095
1096 /* to stop an infinite loop on some targets check and increment a timeout
1097 * this issue was observed on a stellaris using the new ICDI interface */
1098 if (timeout++ >= 2500) {
1099 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1100 return ERROR_FLASH_OPERATION_FAILED;
1101 }
1102 continue;
1103 }
1104
1105 /* reset our timeout */
1106 timeout = 0;
1107
1108 /* Limit to the amount of data we actually want to write */
1109 if (thisrun_bytes > count * block_size)
1110 thisrun_bytes = count * block_size;
1111
1112 /* Force end of large blocks to be word aligned */
1113 if (thisrun_bytes >= 16)
1114 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1115
1116 /* Write data to fifo */
1117 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1118 if (retval != ERROR_OK)
1119 break;
1120
1121 /* Update counters and wrap write pointer */
1122 buffer += thisrun_bytes;
1123 count -= thisrun_bytes / block_size;
1124 wp += thisrun_bytes;
1125 if (wp >= fifo_end_addr)
1126 wp = fifo_start_addr;
1127
1128 /* Store updated write pointer to target */
1129 retval = target_write_u32(target, wp_addr, wp);
1130 if (retval != ERROR_OK)
1131 break;
1132
1133 /* Avoid GDB timeouts */
1134 keep_alive();
1135 }
1136
1137 if (retval != ERROR_OK) {
1138 /* abort flash write algorithm on target */
1139 target_write_u32(target, wp_addr, 0);
1140 }
1141
1142 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1143 num_reg_params, reg_params,
1144 exit_point,
1145 10000,
1146 arch_info);
1147
1148 if (retval2 != ERROR_OK) {
1149 LOG_ERROR("error waiting for target flash write algorithm");
1150 retval = retval2;
1151 }
1152
1153 if (retval == ERROR_OK) {
1154 /* check if algorithm set rp = 0 after fifo writer loop finished */
1155 retval = target_read_u32(target, rp_addr, &rp);
1156 if (retval == ERROR_OK && rp == 0) {
1157 LOG_ERROR("flash write algorithm aborted by target");
1158 retval = ERROR_FLASH_OPERATION_FAILED;
1159 }
1160 }
1161
1162 return retval;
1163 }
1164
1165 int target_run_read_async_algorithm(struct target *target,
1166 uint8_t *buffer, uint32_t count, int block_size,
1167 int num_mem_params, struct mem_param *mem_params,
1168 int num_reg_params, struct reg_param *reg_params,
1169 uint32_t buffer_start, uint32_t buffer_size,
1170 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1171 {
1172 int retval;
1173 int timeout = 0;
1174
1175 const uint8_t *buffer_orig = buffer;
1176
1177 /* Set up working area. First word is write pointer, second word is read pointer,
1178 * rest is fifo data area. */
1179 uint32_t wp_addr = buffer_start;
1180 uint32_t rp_addr = buffer_start + 4;
1181 uint32_t fifo_start_addr = buffer_start + 8;
1182 uint32_t fifo_end_addr = buffer_start + buffer_size;
1183
1184 uint32_t wp = fifo_start_addr;
1185 uint32_t rp = fifo_start_addr;
1186
1187 /* validate block_size is 2^n */
1188 assert(IS_PWR_OF_2(block_size));
1189
1190 retval = target_write_u32(target, wp_addr, wp);
1191 if (retval != ERROR_OK)
1192 return retval;
1193 retval = target_write_u32(target, rp_addr, rp);
1194 if (retval != ERROR_OK)
1195 return retval;
1196
1197 /* Start up algorithm on target */
1198 retval = target_start_algorithm(target, num_mem_params, mem_params,
1199 num_reg_params, reg_params,
1200 entry_point,
1201 exit_point,
1202 arch_info);
1203
1204 if (retval != ERROR_OK) {
1205 LOG_ERROR("error starting target flash read algorithm");
1206 return retval;
1207 }
1208
1209 while (count > 0) {
1210 retval = target_read_u32(target, wp_addr, &wp);
1211 if (retval != ERROR_OK) {
1212 LOG_ERROR("failed to get write pointer");
1213 break;
1214 }
1215
1216 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1217 (size_t)(buffer - buffer_orig), count, wp, rp);
1218
1219 if (wp == 0) {
1220 LOG_ERROR("flash read algorithm aborted by target");
1221 retval = ERROR_FLASH_OPERATION_FAILED;
1222 break;
1223 }
1224
1225 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1226 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1227 break;
1228 }
1229
1230 /* Count the number of bytes available in the fifo without
1231 * crossing the wrap around. */
1232 uint32_t thisrun_bytes;
1233 if (wp >= rp)
1234 thisrun_bytes = wp - rp;
1235 else
1236 thisrun_bytes = fifo_end_addr - rp;
1237
1238 if (thisrun_bytes == 0) {
1239 /* Throttle polling a bit if transfer is (much) faster than flash
1240 * reading. The exact delay shouldn't matter as long as it's
1241 * less than buffer size / flash speed. This is very unlikely to
1242 * run when using high latency connections such as USB. */
1243 alive_sleep(2);
1244
1245 /* to stop an infinite loop on some targets check and increment a timeout
1246 * this issue was observed on a stellaris using the new ICDI interface */
1247 if (timeout++ >= 2500) {
1248 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1249 return ERROR_FLASH_OPERATION_FAILED;
1250 }
1251 continue;
1252 }
1253
1254 /* Reset our timeout */
1255 timeout = 0;
1256
1257 /* Limit to the amount of data we actually want to read */
1258 if (thisrun_bytes > count * block_size)
1259 thisrun_bytes = count * block_size;
1260
1261 /* Force end of large blocks to be word aligned */
1262 if (thisrun_bytes >= 16)
1263 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1264
1265 /* Read data from fifo */
1266 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1267 if (retval != ERROR_OK)
1268 break;
1269
1270 /* Update counters and wrap write pointer */
1271 buffer += thisrun_bytes;
1272 count -= thisrun_bytes / block_size;
1273 rp += thisrun_bytes;
1274 if (rp >= fifo_end_addr)
1275 rp = fifo_start_addr;
1276
1277 /* Store updated write pointer to target */
1278 retval = target_write_u32(target, rp_addr, rp);
1279 if (retval != ERROR_OK)
1280 break;
1281
1282 /* Avoid GDB timeouts */
1283 keep_alive();
1284
1285 }
1286
1287 if (retval != ERROR_OK) {
1288 /* abort flash write algorithm on target */
1289 target_write_u32(target, rp_addr, 0);
1290 }
1291
1292 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1293 num_reg_params, reg_params,
1294 exit_point,
1295 10000,
1296 arch_info);
1297
1298 if (retval2 != ERROR_OK) {
1299 LOG_ERROR("error waiting for target flash write algorithm");
1300 retval = retval2;
1301 }
1302
1303 if (retval == ERROR_OK) {
1304 /* check if algorithm set wp = 0 after fifo writer loop finished */
1305 retval = target_read_u32(target, wp_addr, &wp);
1306 if (retval == ERROR_OK && wp == 0) {
1307 LOG_ERROR("flash read algorithm aborted by target");
1308 retval = ERROR_FLASH_OPERATION_FAILED;
1309 }
1310 }
1311
1312 return retval;
1313 }
1314
1315 int target_read_memory(struct target *target,
1316 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1317 {
1318 if (!target_was_examined(target)) {
1319 LOG_ERROR("Target not examined yet");
1320 return ERROR_FAIL;
1321 }
1322 if (!target->type->read_memory) {
1323 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1324 return ERROR_FAIL;
1325 }
1326 return target->type->read_memory(target, address, size, count, buffer);
1327 }
1328
1329 int target_read_phys_memory(struct target *target,
1330 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1331 {
1332 if (!target_was_examined(target)) {
1333 LOG_ERROR("Target not examined yet");
1334 return ERROR_FAIL;
1335 }
1336 if (!target->type->read_phys_memory) {
1337 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1338 return ERROR_FAIL;
1339 }
1340 return target->type->read_phys_memory(target, address, size, count, buffer);
1341 }
1342
1343 int target_write_memory(struct target *target,
1344 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1345 {
1346 if (!target_was_examined(target)) {
1347 LOG_ERROR("Target not examined yet");
1348 return ERROR_FAIL;
1349 }
1350 if (!target->type->write_memory) {
1351 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1352 return ERROR_FAIL;
1353 }
1354 return target->type->write_memory(target, address, size, count, buffer);
1355 }
1356
1357 int target_write_phys_memory(struct target *target,
1358 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1359 {
1360 if (!target_was_examined(target)) {
1361 LOG_ERROR("Target not examined yet");
1362 return ERROR_FAIL;
1363 }
1364 if (!target->type->write_phys_memory) {
1365 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1366 return ERROR_FAIL;
1367 }
1368 return target->type->write_phys_memory(target, address, size, count, buffer);
1369 }
1370
1371 int target_add_breakpoint(struct target *target,
1372 struct breakpoint *breakpoint)
1373 {
1374 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1375 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1376 return ERROR_TARGET_NOT_HALTED;
1377 }
1378 return target->type->add_breakpoint(target, breakpoint);
1379 }
1380
1381 int target_add_context_breakpoint(struct target *target,
1382 struct breakpoint *breakpoint)
1383 {
1384 if (target->state != TARGET_HALTED) {
1385 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1386 return ERROR_TARGET_NOT_HALTED;
1387 }
1388 return target->type->add_context_breakpoint(target, breakpoint);
1389 }
1390
1391 int target_add_hybrid_breakpoint(struct target *target,
1392 struct breakpoint *breakpoint)
1393 {
1394 if (target->state != TARGET_HALTED) {
1395 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1396 return ERROR_TARGET_NOT_HALTED;
1397 }
1398 return target->type->add_hybrid_breakpoint(target, breakpoint);
1399 }
1400
1401 int target_remove_breakpoint(struct target *target,
1402 struct breakpoint *breakpoint)
1403 {
1404 return target->type->remove_breakpoint(target, breakpoint);
1405 }
1406
1407 int target_add_watchpoint(struct target *target,
1408 struct watchpoint *watchpoint)
1409 {
1410 if (target->state != TARGET_HALTED) {
1411 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1412 return ERROR_TARGET_NOT_HALTED;
1413 }
1414 return target->type->add_watchpoint(target, watchpoint);
1415 }
1416 int target_remove_watchpoint(struct target *target,
1417 struct watchpoint *watchpoint)
1418 {
1419 return target->type->remove_watchpoint(target, watchpoint);
1420 }
1421 int target_hit_watchpoint(struct target *target,
1422 struct watchpoint **hit_watchpoint)
1423 {
1424 if (target->state != TARGET_HALTED) {
1425 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1426 return ERROR_TARGET_NOT_HALTED;
1427 }
1428
1429 if (!target->type->hit_watchpoint) {
1430 /* For backward compatible, if hit_watchpoint is not implemented,
1431 * return ERROR_FAIL such that gdb_server will not take the nonsense
1432 * information. */
1433 return ERROR_FAIL;
1434 }
1435
1436 return target->type->hit_watchpoint(target, hit_watchpoint);
1437 }
1438
1439 const char *target_get_gdb_arch(struct target *target)
1440 {
1441 if (!target->type->get_gdb_arch)
1442 return NULL;
1443 return target->type->get_gdb_arch(target);
1444 }
1445
1446 int target_get_gdb_reg_list(struct target *target,
1447 struct reg **reg_list[], int *reg_list_size,
1448 enum target_register_class reg_class)
1449 {
1450 int result = ERROR_FAIL;
1451
1452 if (!target_was_examined(target)) {
1453 LOG_ERROR("Target not examined yet");
1454 goto done;
1455 }
1456
1457 result = target->type->get_gdb_reg_list(target, reg_list,
1458 reg_list_size, reg_class);
1459
1460 done:
1461 if (result != ERROR_OK) {
1462 *reg_list = NULL;
1463 *reg_list_size = 0;
1464 }
1465 return result;
1466 }
1467
1468 int target_get_gdb_reg_list_noread(struct target *target,
1469 struct reg **reg_list[], int *reg_list_size,
1470 enum target_register_class reg_class)
1471 {
1472 if (target->type->get_gdb_reg_list_noread &&
1473 target->type->get_gdb_reg_list_noread(target, reg_list,
1474 reg_list_size, reg_class) == ERROR_OK)
1475 return ERROR_OK;
1476 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1477 }
1478
1479 bool target_supports_gdb_connection(struct target *target)
1480 {
1481 /*
1482 * exclude all the targets that don't provide get_gdb_reg_list
1483 * or that have explicit gdb_max_connection == 0
1484 */
1485 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1486 }
1487
1488 int target_step(struct target *target,
1489 int current, target_addr_t address, int handle_breakpoints)
1490 {
1491 int retval;
1492
1493 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1494
1495 retval = target->type->step(target, current, address, handle_breakpoints);
1496 if (retval != ERROR_OK)
1497 return retval;
1498
1499 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1500
1501 return retval;
1502 }
1503
1504 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1505 {
1506 if (target->state != TARGET_HALTED) {
1507 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1508 return ERROR_TARGET_NOT_HALTED;
1509 }
1510 return target->type->get_gdb_fileio_info(target, fileio_info);
1511 }
1512
1513 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1514 {
1515 if (target->state != TARGET_HALTED) {
1516 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1517 return ERROR_TARGET_NOT_HALTED;
1518 }
1519 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1520 }
1521
1522 target_addr_t target_address_max(struct target *target)
1523 {
1524 unsigned bits = target_address_bits(target);
1525 if (sizeof(target_addr_t) * 8 == bits)
1526 return (target_addr_t) -1;
1527 else
1528 return (((target_addr_t) 1) << bits) - 1;
1529 }
1530
1531 unsigned target_address_bits(struct target *target)
1532 {
1533 if (target->type->address_bits)
1534 return target->type->address_bits(target);
1535 return 32;
1536 }
1537
1538 unsigned int target_data_bits(struct target *target)
1539 {
1540 if (target->type->data_bits)
1541 return target->type->data_bits(target);
1542 return 32;
1543 }
1544
1545 static int target_profiling(struct target *target, uint32_t *samples,
1546 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1547 {
1548 return target->type->profiling(target, samples, max_num_samples,
1549 num_samples, seconds);
1550 }
1551
1552 static int handle_target(void *priv);
1553
1554 static int target_init_one(struct command_context *cmd_ctx,
1555 struct target *target)
1556 {
1557 target_reset_examined(target);
1558
1559 struct target_type *type = target->type;
1560 if (!type->examine)
1561 type->examine = default_examine;
1562
1563 if (!type->check_reset)
1564 type->check_reset = default_check_reset;
1565
1566 assert(type->init_target);
1567
1568 int retval = type->init_target(cmd_ctx, target);
1569 if (retval != ERROR_OK) {
1570 LOG_ERROR("target '%s' init failed", target_name(target));
1571 return retval;
1572 }
1573
1574 /* Sanity-check MMU support ... stub in what we must, to help
1575 * implement it in stages, but warn if we need to do so.
1576 */
1577 if (type->mmu) {
1578 if (!type->virt2phys) {
1579 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1580 type->virt2phys = identity_virt2phys;
1581 }
1582 } else {
1583 /* Make sure no-MMU targets all behave the same: make no
1584 * distinction between physical and virtual addresses, and
1585 * ensure that virt2phys() is always an identity mapping.
1586 */
1587 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1588 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1589
1590 type->mmu = no_mmu;
1591 type->write_phys_memory = type->write_memory;
1592 type->read_phys_memory = type->read_memory;
1593 type->virt2phys = identity_virt2phys;
1594 }
1595
1596 if (!target->type->read_buffer)
1597 target->type->read_buffer = target_read_buffer_default;
1598
1599 if (!target->type->write_buffer)
1600 target->type->write_buffer = target_write_buffer_default;
1601
1602 if (!target->type->get_gdb_fileio_info)
1603 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1604
1605 if (!target->type->gdb_fileio_end)
1606 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1607
1608 if (!target->type->profiling)
1609 target->type->profiling = target_profiling_default;
1610
1611 return ERROR_OK;
1612 }
1613
1614 static int target_init(struct command_context *cmd_ctx)
1615 {
1616 struct target *target;
1617 int retval;
1618
1619 for (target = all_targets; target; target = target->next) {
1620 retval = target_init_one(cmd_ctx, target);
1621 if (retval != ERROR_OK)
1622 return retval;
1623 }
1624
1625 if (!all_targets)
1626 return ERROR_OK;
1627
1628 retval = target_register_user_commands(cmd_ctx);
1629 if (retval != ERROR_OK)
1630 return retval;
1631
1632 retval = target_register_timer_callback(&handle_target,
1633 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1634 if (retval != ERROR_OK)
1635 return retval;
1636
1637 return ERROR_OK;
1638 }
1639
1640 COMMAND_HANDLER(handle_target_init_command)
1641 {
1642 int retval;
1643
1644 if (CMD_ARGC != 0)
1645 return ERROR_COMMAND_SYNTAX_ERROR;
1646
1647 static bool target_initialized;
1648 if (target_initialized) {
1649 LOG_INFO("'target init' has already been called");
1650 return ERROR_OK;
1651 }
1652 target_initialized = true;
1653
1654 retval = command_run_line(CMD_CTX, "init_targets");
1655 if (retval != ERROR_OK)
1656 return retval;
1657
1658 retval = command_run_line(CMD_CTX, "init_target_events");
1659 if (retval != ERROR_OK)
1660 return retval;
1661
1662 retval = command_run_line(CMD_CTX, "init_board");
1663 if (retval != ERROR_OK)
1664 return retval;
1665
1666 LOG_DEBUG("Initializing targets...");
1667 return target_init(CMD_CTX);
1668 }
1669
1670 int target_register_event_callback(int (*callback)(struct target *target,
1671 enum target_event event, void *priv), void *priv)
1672 {
1673 struct target_event_callback **callbacks_p = &target_event_callbacks;
1674
1675 if (!callback)
1676 return ERROR_COMMAND_SYNTAX_ERROR;
1677
1678 if (*callbacks_p) {
1679 while ((*callbacks_p)->next)
1680 callbacks_p = &((*callbacks_p)->next);
1681 callbacks_p = &((*callbacks_p)->next);
1682 }
1683
1684 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1685 (*callbacks_p)->callback = callback;
1686 (*callbacks_p)->priv = priv;
1687 (*callbacks_p)->next = NULL;
1688
1689 return ERROR_OK;
1690 }
1691
1692 int target_register_reset_callback(int (*callback)(struct target *target,
1693 enum target_reset_mode reset_mode, void *priv), void *priv)
1694 {
1695 struct target_reset_callback *entry;
1696
1697 if (!callback)
1698 return ERROR_COMMAND_SYNTAX_ERROR;
1699
1700 entry = malloc(sizeof(struct target_reset_callback));
1701 if (!entry) {
1702 LOG_ERROR("error allocating buffer for reset callback entry");
1703 return ERROR_COMMAND_SYNTAX_ERROR;
1704 }
1705
1706 entry->callback = callback;
1707 entry->priv = priv;
1708 list_add(&entry->list, &target_reset_callback_list);
1709
1710
1711 return ERROR_OK;
1712 }
1713
1714 int target_register_trace_callback(int (*callback)(struct target *target,
1715 size_t len, uint8_t *data, void *priv), void *priv)
1716 {
1717 struct target_trace_callback *entry;
1718
1719 if (!callback)
1720 return ERROR_COMMAND_SYNTAX_ERROR;
1721
1722 entry = malloc(sizeof(struct target_trace_callback));
1723 if (!entry) {
1724 LOG_ERROR("error allocating buffer for trace callback entry");
1725 return ERROR_COMMAND_SYNTAX_ERROR;
1726 }
1727
1728 entry->callback = callback;
1729 entry->priv = priv;
1730 list_add(&entry->list, &target_trace_callback_list);
1731
1732
1733 return ERROR_OK;
1734 }
1735
1736 int target_register_timer_callback(int (*callback)(void *priv),
1737 unsigned int time_ms, enum target_timer_type type, void *priv)
1738 {
1739 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1740
1741 if (!callback)
1742 return ERROR_COMMAND_SYNTAX_ERROR;
1743
1744 if (*callbacks_p) {
1745 while ((*callbacks_p)->next)
1746 callbacks_p = &((*callbacks_p)->next);
1747 callbacks_p = &((*callbacks_p)->next);
1748 }
1749
1750 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1751 (*callbacks_p)->callback = callback;
1752 (*callbacks_p)->type = type;
1753 (*callbacks_p)->time_ms = time_ms;
1754 (*callbacks_p)->removed = false;
1755
1756 (*callbacks_p)->when = timeval_ms() + time_ms;
1757 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1758
1759 (*callbacks_p)->priv = priv;
1760 (*callbacks_p)->next = NULL;
1761
1762 return ERROR_OK;
1763 }
1764
1765 int target_unregister_event_callback(int (*callback)(struct target *target,
1766 enum target_event event, void *priv), void *priv)
1767 {
1768 struct target_event_callback **p = &target_event_callbacks;
1769 struct target_event_callback *c = target_event_callbacks;
1770
1771 if (!callback)
1772 return ERROR_COMMAND_SYNTAX_ERROR;
1773
1774 while (c) {
1775 struct target_event_callback *next = c->next;
1776 if ((c->callback == callback) && (c->priv == priv)) {
1777 *p = next;
1778 free(c);
1779 return ERROR_OK;
1780 } else
1781 p = &(c->next);
1782 c = next;
1783 }
1784
1785 return ERROR_OK;
1786 }
1787
1788 int target_unregister_reset_callback(int (*callback)(struct target *target,
1789 enum target_reset_mode reset_mode, void *priv), void *priv)
1790 {
1791 struct target_reset_callback *entry;
1792
1793 if (!callback)
1794 return ERROR_COMMAND_SYNTAX_ERROR;
1795
1796 list_for_each_entry(entry, &target_reset_callback_list, list) {
1797 if (entry->callback == callback && entry->priv == priv) {
1798 list_del(&entry->list);
1799 free(entry);
1800 break;
1801 }
1802 }
1803
1804 return ERROR_OK;
1805 }
1806
1807 int target_unregister_trace_callback(int (*callback)(struct target *target,
1808 size_t len, uint8_t *data, void *priv), void *priv)
1809 {
1810 struct target_trace_callback *entry;
1811
1812 if (!callback)
1813 return ERROR_COMMAND_SYNTAX_ERROR;
1814
1815 list_for_each_entry(entry, &target_trace_callback_list, list) {
1816 if (entry->callback == callback && entry->priv == priv) {
1817 list_del(&entry->list);
1818 free(entry);
1819 break;
1820 }
1821 }
1822
1823 return ERROR_OK;
1824 }
1825
1826 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1827 {
1828 if (!callback)
1829 return ERROR_COMMAND_SYNTAX_ERROR;
1830
1831 for (struct target_timer_callback *c = target_timer_callbacks;
1832 c; c = c->next) {
1833 if ((c->callback == callback) && (c->priv == priv)) {
1834 c->removed = true;
1835 return ERROR_OK;
1836 }
1837 }
1838
1839 return ERROR_FAIL;
1840 }
1841
1842 int target_call_event_callbacks(struct target *target, enum target_event event)
1843 {
1844 struct target_event_callback *callback = target_event_callbacks;
1845 struct target_event_callback *next_callback;
1846
1847 if (event == TARGET_EVENT_HALTED) {
1848 /* execute early halted first */
1849 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1850 }
1851
1852 LOG_DEBUG("target event %i (%s) for core %s", event,
1853 target_event_name(event),
1854 target_name(target));
1855
1856 target_handle_event(target, event);
1857
1858 while (callback) {
1859 next_callback = callback->next;
1860 callback->callback(target, event, callback->priv);
1861 callback = next_callback;
1862 }
1863
1864 return ERROR_OK;
1865 }
1866
1867 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1868 {
1869 struct target_reset_callback *callback;
1870
1871 LOG_DEBUG("target reset %i (%s)", reset_mode,
1872 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1873
1874 list_for_each_entry(callback, &target_reset_callback_list, list)
1875 callback->callback(target, reset_mode, callback->priv);
1876
1877 return ERROR_OK;
1878 }
1879
1880 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1881 {
1882 struct target_trace_callback *callback;
1883
1884 list_for_each_entry(callback, &target_trace_callback_list, list)
1885 callback->callback(target, len, data, callback->priv);
1886
1887 return ERROR_OK;
1888 }
1889
1890 static int target_timer_callback_periodic_restart(
1891 struct target_timer_callback *cb, int64_t *now)
1892 {
1893 cb->when = *now + cb->time_ms;
1894 return ERROR_OK;
1895 }
1896
1897 static int target_call_timer_callback(struct target_timer_callback *cb,
1898 int64_t *now)
1899 {
1900 cb->callback(cb->priv);
1901
1902 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1903 return target_timer_callback_periodic_restart(cb, now);
1904
1905 return target_unregister_timer_callback(cb->callback, cb->priv);
1906 }
1907
1908 static int target_call_timer_callbacks_check_time(int checktime)
1909 {
1910 static bool callback_processing;
1911
1912 /* Do not allow nesting */
1913 if (callback_processing)
1914 return ERROR_OK;
1915
1916 callback_processing = true;
1917
1918 keep_alive();
1919
1920 int64_t now = timeval_ms();
1921
1922 /* Initialize to a default value that's a ways into the future.
1923 * The loop below will make it closer to now if there are
1924 * callbacks that want to be called sooner. */
1925 target_timer_next_event_value = now + 1000;
1926
1927 /* Store an address of the place containing a pointer to the
1928 * next item; initially, that's a standalone "root of the
1929 * list" variable. */
1930 struct target_timer_callback **callback = &target_timer_callbacks;
1931 while (callback && *callback) {
1932 if ((*callback)->removed) {
1933 struct target_timer_callback *p = *callback;
1934 *callback = (*callback)->next;
1935 free(p);
1936 continue;
1937 }
1938
1939 bool call_it = (*callback)->callback &&
1940 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1941 now >= (*callback)->when);
1942
1943 if (call_it)
1944 target_call_timer_callback(*callback, &now);
1945
1946 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1947 target_timer_next_event_value = (*callback)->when;
1948
1949 callback = &(*callback)->next;
1950 }
1951
1952 callback_processing = false;
1953 return ERROR_OK;
1954 }
1955
1956 int target_call_timer_callbacks()
1957 {
1958 return target_call_timer_callbacks_check_time(1);
1959 }
1960
1961 /* invoke periodic callbacks immediately */
1962 int target_call_timer_callbacks_now()
1963 {
1964 return target_call_timer_callbacks_check_time(0);
1965 }
1966
1967 int64_t target_timer_next_event(void)
1968 {
1969 return target_timer_next_event_value;
1970 }
1971
1972 /* Prints the working area layout for debug purposes */
1973 static void print_wa_layout(struct target *target)
1974 {
1975 struct working_area *c = target->working_areas;
1976
1977 while (c) {
1978 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1979 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1980 c->address, c->address + c->size - 1, c->size);
1981 c = c->next;
1982 }
1983 }
1984
1985 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1986 static void target_split_working_area(struct working_area *area, uint32_t size)
1987 {
1988 assert(area->free); /* Shouldn't split an allocated area */
1989 assert(size <= area->size); /* Caller should guarantee this */
1990
1991 /* Split only if not already the right size */
1992 if (size < area->size) {
1993 struct working_area *new_wa = malloc(sizeof(*new_wa));
1994
1995 if (!new_wa)
1996 return;
1997
1998 new_wa->next = area->next;
1999 new_wa->size = area->size - size;
2000 new_wa->address = area->address + size;
2001 new_wa->backup = NULL;
2002 new_wa->user = NULL;
2003 new_wa->free = true;
2004
2005 area->next = new_wa;
2006 area->size = size;
2007
2008 /* If backup memory was allocated to this area, it has the wrong size
2009 * now so free it and it will be reallocated if/when needed */
2010 free(area->backup);
2011 area->backup = NULL;
2012 }
2013 }
2014
2015 /* Merge all adjacent free areas into one */
2016 static void target_merge_working_areas(struct target *target)
2017 {
2018 struct working_area *c = target->working_areas;
2019
2020 while (c && c->next) {
2021 assert(c->next->address == c->address + c->size); /* This is an invariant */
2022
2023 /* Find two adjacent free areas */
2024 if (c->free && c->next->free) {
2025 /* Merge the last into the first */
2026 c->size += c->next->size;
2027
2028 /* Remove the last */
2029 struct working_area *to_be_freed = c->next;
2030 c->next = c->next->next;
2031 free(to_be_freed->backup);
2032 free(to_be_freed);
2033
2034 /* If backup memory was allocated to the remaining area, it's has
2035 * the wrong size now */
2036 free(c->backup);
2037 c->backup = NULL;
2038 } else {
2039 c = c->next;
2040 }
2041 }
2042 }
2043
2044 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2045 {
2046 /* Reevaluate working area address based on MMU state*/
2047 if (!target->working_areas) {
2048 int retval;
2049 int enabled;
2050
2051 retval = target->type->mmu(target, &enabled);
2052 if (retval != ERROR_OK)
2053 return retval;
2054
2055 if (!enabled) {
2056 if (target->working_area_phys_spec) {
2057 LOG_DEBUG("MMU disabled, using physical "
2058 "address for working memory " TARGET_ADDR_FMT,
2059 target->working_area_phys);
2060 target->working_area = target->working_area_phys;
2061 } else {
2062 LOG_ERROR("No working memory available. "
2063 "Specify -work-area-phys to target.");
2064 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2065 }
2066 } else {
2067 if (target->working_area_virt_spec) {
2068 LOG_DEBUG("MMU enabled, using virtual "
2069 "address for working memory " TARGET_ADDR_FMT,
2070 target->working_area_virt);
2071 target->working_area = target->working_area_virt;
2072 } else {
2073 LOG_ERROR("No working memory available. "
2074 "Specify -work-area-virt to target.");
2075 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2076 }
2077 }
2078
2079 /* Set up initial working area on first call */
2080 struct working_area *new_wa = malloc(sizeof(*new_wa));
2081 if (new_wa) {
2082 new_wa->next = NULL;
2083 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2084 new_wa->address = target->working_area;
2085 new_wa->backup = NULL;
2086 new_wa->user = NULL;
2087 new_wa->free = true;
2088 }
2089
2090 target->working_areas = new_wa;
2091 }
2092
2093 /* only allocate multiples of 4 byte */
2094 if (size % 4)
2095 size = (size + 3) & (~3UL);
2096
2097 struct working_area *c = target->working_areas;
2098
2099 /* Find the first large enough working area */
2100 while (c) {
2101 if (c->free && c->size >= size)
2102 break;
2103 c = c->next;
2104 }
2105
2106 if (!c)
2107 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2108
2109 /* Split the working area into the requested size */
2110 target_split_working_area(c, size);
2111
2112 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2113 size, c->address);
2114
2115 if (target->backup_working_area) {
2116 if (!c->backup) {
2117 c->backup = malloc(c->size);
2118 if (!c->backup)
2119 return ERROR_FAIL;
2120 }
2121
2122 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2123 if (retval != ERROR_OK)
2124 return retval;
2125 }
2126
2127 /* mark as used, and return the new (reused) area */
2128 c->free = false;
2129 *area = c;
2130
2131 /* user pointer */
2132 c->user = area;
2133
2134 print_wa_layout(target);
2135
2136 return ERROR_OK;
2137 }
2138
2139 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2140 {
2141 int retval;
2142
2143 retval = target_alloc_working_area_try(target, size, area);
2144 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2145 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2146 return retval;
2147
2148 }
2149
2150 static int target_restore_working_area(struct target *target, struct working_area *area)
2151 {
2152 int retval = ERROR_OK;
2153
2154 if (target->backup_working_area && area->backup) {
2155 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2156 if (retval != ERROR_OK)
2157 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2158 area->size, area->address);
2159 }
2160
2161 return retval;
2162 }
2163
2164 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2165 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2166 {
2167 if (!area || area->free)
2168 return ERROR_OK;
2169
2170 int retval = ERROR_OK;
2171 if (restore) {
2172 retval = target_restore_working_area(target, area);
2173 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2174 if (retval != ERROR_OK)
2175 return retval;
2176 }
2177
2178 area->free = true;
2179
2180 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2181 area->size, area->address);
2182
2183 /* mark user pointer invalid */
2184 /* TODO: Is this really safe? It points to some previous caller's memory.
2185 * How could we know that the area pointer is still in that place and not
2186 * some other vital data? What's the purpose of this, anyway? */
2187 *area->user = NULL;
2188 area->user = NULL;
2189
2190 target_merge_working_areas(target);
2191
2192 print_wa_layout(target);
2193
2194 return retval;
2195 }
2196
2197 int target_free_working_area(struct target *target, struct working_area *area)
2198 {
2199 return target_free_working_area_restore(target, area, 1);
2200 }
2201
2202 /* free resources and restore memory, if restoring memory fails,
2203 * free up resources anyway
2204 */
2205 static void target_free_all_working_areas_restore(struct target *target, int restore)
2206 {
2207 struct working_area *c = target->working_areas;
2208
2209 LOG_DEBUG("freeing all working areas");
2210
2211 /* Loop through all areas, restoring the allocated ones and marking them as free */
2212 while (c) {
2213 if (!c->free) {
2214 if (restore)
2215 target_restore_working_area(target, c);
2216 c->free = true;
2217 *c->user = NULL; /* Same as above */
2218 c->user = NULL;
2219 }
2220 c = c->next;
2221 }
2222
2223 /* Run a merge pass to combine all areas into one */
2224 target_merge_working_areas(target);
2225
2226 print_wa_layout(target);
2227 }
2228
2229 void target_free_all_working_areas(struct target *target)
2230 {
2231 target_free_all_working_areas_restore(target, 1);
2232
2233 /* Now we have none or only one working area marked as free */
2234 if (target->working_areas) {
2235 /* Free the last one to allow on-the-fly moving and resizing */
2236 free(target->working_areas->backup);
2237 free(target->working_areas);
2238 target->working_areas = NULL;
2239 }
2240 }
2241
2242 /* Find the largest number of bytes that can be allocated */
2243 uint32_t target_get_working_area_avail(struct target *target)
2244 {
2245 struct working_area *c = target->working_areas;
2246 uint32_t max_size = 0;
2247
2248 if (!c)
2249 return target->working_area_size;
2250
2251 while (c) {
2252 if (c->free && max_size < c->size)
2253 max_size = c->size;
2254
2255 c = c->next;
2256 }
2257
2258 return max_size;
2259 }
2260
2261 static void target_destroy(struct target *target)
2262 {
2263 if (target->type->deinit_target)
2264 target->type->deinit_target(target);
2265
2266 if (target->semihosting)
2267 free(target->semihosting->basedir);
2268 free(target->semihosting);
2269
2270 jtag_unregister_event_callback(jtag_enable_callback, target);
2271
2272 struct target_event_action *teap = target->event_action;
2273 while (teap) {
2274 struct target_event_action *next = teap->next;
2275 Jim_DecrRefCount(teap->interp, teap->body);
2276 free(teap);
2277 teap = next;
2278 }
2279
2280 target_free_all_working_areas(target);
2281
2282 /* release the targets SMP list */
2283 if (target->smp) {
2284 struct target_list *head, *tmp;
2285
2286 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2287 list_del(&head->lh);
2288 head->target->smp = 0;
2289 free(head);
2290 }
2291 if (target->smp_targets != &empty_smp_targets)
2292 free(target->smp_targets);
2293 target->smp = 0;
2294 }
2295
2296 rtos_destroy(target);
2297
2298 free(target->gdb_port_override);
2299 free(target->type);
2300 free(target->trace_info);
2301 free(target->fileio_info);
2302 free(target->cmd_name);
2303 free(target);
2304 }
2305
2306 void target_quit(void)
2307 {
2308 struct target_event_callback *pe = target_event_callbacks;
2309 while (pe) {
2310 struct target_event_callback *t = pe->next;
2311 free(pe);
2312 pe = t;
2313 }
2314 target_event_callbacks = NULL;
2315
2316 struct target_timer_callback *pt = target_timer_callbacks;
2317 while (pt) {
2318 struct target_timer_callback *t = pt->next;
2319 free(pt);
2320 pt = t;
2321 }
2322 target_timer_callbacks = NULL;
2323
2324 for (struct target *target = all_targets; target;) {
2325 struct target *tmp;
2326
2327 tmp = target->next;
2328 target_destroy(target);
2329 target = tmp;
2330 }
2331
2332 all_targets = NULL;
2333 }
2334
2335 int target_arch_state(struct target *target)
2336 {
2337 int retval;
2338 if (!target) {
2339 LOG_WARNING("No target has been configured");
2340 return ERROR_OK;
2341 }
2342
2343 if (target->state != TARGET_HALTED)
2344 return ERROR_OK;
2345
2346 retval = target->type->arch_state(target);
2347 return retval;
2348 }
2349
2350 static int target_get_gdb_fileio_info_default(struct target *target,
2351 struct gdb_fileio_info *fileio_info)
2352 {
2353 /* If target does not support semi-hosting function, target
2354 has no need to provide .get_gdb_fileio_info callback.
2355 It just return ERROR_FAIL and gdb_server will return "Txx"
2356 as target halted every time. */
2357 return ERROR_FAIL;
2358 }
2359
2360 static int target_gdb_fileio_end_default(struct target *target,
2361 int retcode, int fileio_errno, bool ctrl_c)
2362 {
2363 return ERROR_OK;
2364 }
2365
2366 int target_profiling_default(struct target *target, uint32_t *samples,
2367 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2368 {
2369 struct timeval timeout, now;
2370
2371 gettimeofday(&timeout, NULL);
2372 timeval_add_time(&timeout, seconds, 0);
2373
2374 LOG_INFO("Starting profiling. Halting and resuming the"
2375 " target as often as we can...");
2376
2377 uint32_t sample_count = 0;
2378 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2379 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2380
2381 int retval = ERROR_OK;
2382 for (;;) {
2383 target_poll(target);
2384 if (target->state == TARGET_HALTED) {
2385 uint32_t t = buf_get_u32(reg->value, 0, 32);
2386 samples[sample_count++] = t;
2387 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2388 retval = target_resume(target, 1, 0, 0, 0);
2389 target_poll(target);
2390 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2391 } else if (target->state == TARGET_RUNNING) {
2392 /* We want to quickly sample the PC. */
2393 retval = target_halt(target);
2394 } else {
2395 LOG_INFO("Target not halted or running");
2396 retval = ERROR_OK;
2397 break;
2398 }
2399
2400 if (retval != ERROR_OK)
2401 break;
2402
2403 gettimeofday(&now, NULL);
2404 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2405 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2406 break;
2407 }
2408 }
2409
2410 *num_samples = sample_count;
2411 return retval;
2412 }
2413
2414 /* Single aligned words are guaranteed to use 16 or 32 bit access
2415 * mode respectively, otherwise data is handled as quickly as
2416 * possible
2417 */
2418 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2419 {
2420 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2421 size, address);
2422
2423 if (!target_was_examined(target)) {
2424 LOG_ERROR("Target not examined yet");
2425 return ERROR_FAIL;
2426 }
2427
2428 if (size == 0)
2429 return ERROR_OK;
2430
2431 if ((address + size - 1) < address) {
2432 /* GDB can request this when e.g. PC is 0xfffffffc */
2433 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2434 address,
2435 size);
2436 return ERROR_FAIL;
2437 }
2438
2439 return target->type->write_buffer(target, address, size, buffer);
2440 }
2441
2442 static int target_write_buffer_default(struct target *target,
2443 target_addr_t address, uint32_t count, const uint8_t *buffer)
2444 {
2445 uint32_t size;
2446 unsigned int data_bytes = target_data_bits(target) / 8;
2447
2448 /* Align up to maximum bytes. The loop condition makes sure the next pass
2449 * will have something to do with the size we leave to it. */
2450 for (size = 1;
2451 size < data_bytes && count >= size * 2 + (address & size);
2452 size *= 2) {
2453 if (address & size) {
2454 int retval = target_write_memory(target, address, size, 1, buffer);
2455 if (retval != ERROR_OK)
2456 return retval;
2457 address += size;
2458 count -= size;
2459 buffer += size;
2460 }
2461 }
2462
2463 /* Write the data with as large access size as possible. */
2464 for (; size > 0; size /= 2) {
2465 uint32_t aligned = count - count % size;
2466 if (aligned > 0) {
2467 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2468 if (retval != ERROR_OK)
2469 return retval;
2470 address += aligned;
2471 count -= aligned;
2472 buffer += aligned;
2473 }
2474 }
2475
2476 return ERROR_OK;
2477 }
2478
2479 /* Single aligned words are guaranteed to use 16 or 32 bit access
2480 * mode respectively, otherwise data is handled as quickly as
2481 * possible
2482 */
2483 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2484 {
2485 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2486 size, address);
2487
2488 if (!target_was_examined(target)) {
2489 LOG_ERROR("Target not examined yet");
2490 return ERROR_FAIL;
2491 }
2492
2493 if (size == 0)
2494 return ERROR_OK;
2495
2496 if ((address + size - 1) < address) {
2497 /* GDB can request this when e.g. PC is 0xfffffffc */
2498 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2499 address,
2500 size);
2501 return ERROR_FAIL;
2502 }
2503
2504 return target->type->read_buffer(target, address, size, buffer);
2505 }
2506
2507 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2508 {
2509 uint32_t size;
2510 unsigned int data_bytes = target_data_bits(target) / 8;
2511
2512 /* Align up to maximum bytes. The loop condition makes sure the next pass
2513 * will have something to do with the size we leave to it. */
2514 for (size = 1;
2515 size < data_bytes && count >= size * 2 + (address & size);
2516 size *= 2) {
2517 if (address & size) {
2518 int retval = target_read_memory(target, address, size, 1, buffer);
2519 if (retval != ERROR_OK)
2520 return retval;
2521 address += size;
2522 count -= size;
2523 buffer += size;
2524 }
2525 }
2526
2527 /* Read the data with as large access size as possible. */
2528 for (; size > 0; size /= 2) {
2529 uint32_t aligned = count - count % size;
2530 if (aligned > 0) {
2531 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2532 if (retval != ERROR_OK)
2533 return retval;
2534 address += aligned;
2535 count -= aligned;
2536 buffer += aligned;
2537 }
2538 }
2539
2540 return ERROR_OK;
2541 }
2542
2543 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2544 {
2545 uint8_t *buffer;
2546 int retval;
2547 uint32_t i;
2548 uint32_t checksum = 0;
2549 if (!target_was_examined(target)) {
2550 LOG_ERROR("Target not examined yet");
2551 return ERROR_FAIL;
2552 }
2553 if (!target->type->checksum_memory) {
2554 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2555 return ERROR_FAIL;
2556 }
2557
2558 retval = target->type->checksum_memory(target, address, size, &checksum);
2559 if (retval != ERROR_OK) {
2560 buffer = malloc(size);
2561 if (!buffer) {
2562 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2563 return ERROR_COMMAND_SYNTAX_ERROR;
2564 }
2565 retval = target_read_buffer(target, address, size, buffer);
2566 if (retval != ERROR_OK) {
2567 free(buffer);
2568 return retval;
2569 }
2570
2571 /* convert to target endianness */
2572 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2573 uint32_t target_data;
2574 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2575 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2576 }
2577
2578 retval = image_calculate_checksum(buffer, size, &checksum);
2579 free(buffer);
2580 }
2581
2582 *crc = checksum;
2583
2584 return retval;
2585 }
2586
2587 int target_blank_check_memory(struct target *target,
2588 struct target_memory_check_block *blocks, int num_blocks,
2589 uint8_t erased_value)
2590 {
2591 if (!target_was_examined(target)) {
2592 LOG_ERROR("Target not examined yet");
2593 return ERROR_FAIL;
2594 }
2595
2596 if (!target->type->blank_check_memory)
2597 return ERROR_NOT_IMPLEMENTED;
2598
2599 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2600 }
2601
2602 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2603 {
2604 uint8_t value_buf[8];
2605 if (!target_was_examined(target)) {
2606 LOG_ERROR("Target not examined yet");
2607 return ERROR_FAIL;
2608 }
2609
2610 int retval = target_read_memory(target, address, 8, 1, value_buf);
2611
2612 if (retval == ERROR_OK) {
2613 *value = target_buffer_get_u64(target, value_buf);
2614 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2615 address,
2616 *value);
2617 } else {
2618 *value = 0x0;
2619 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2620 address);
2621 }
2622
2623 return retval;
2624 }
2625
2626 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2627 {
2628 uint8_t value_buf[4];
2629 if (!target_was_examined(target)) {
2630 LOG_ERROR("Target not examined yet");
2631 return ERROR_FAIL;
2632 }
2633
2634 int retval = target_read_memory(target, address, 4, 1, value_buf);
2635
2636 if (retval == ERROR_OK) {
2637 *value = target_buffer_get_u32(target, value_buf);
2638 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2639 address,
2640 *value);
2641 } else {
2642 *value = 0x0;
2643 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2644 address);
2645 }
2646
2647 return retval;
2648 }
2649
2650 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2651 {
2652 uint8_t value_buf[2];
2653 if (!target_was_examined(target)) {
2654 LOG_ERROR("Target not examined yet");
2655 return ERROR_FAIL;
2656 }
2657
2658 int retval = target_read_memory(target, address, 2, 1, value_buf);
2659
2660 if (retval == ERROR_OK) {
2661 *value = target_buffer_get_u16(target, value_buf);
2662 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2663 address,
2664 *value);
2665 } else {
2666 *value = 0x0;
2667 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2668 address);
2669 }
2670
2671 return retval;
2672 }
2673
2674 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2675 {
2676 if (!target_was_examined(target)) {
2677 LOG_ERROR("Target not examined yet");
2678 return ERROR_FAIL;
2679 }
2680
2681 int retval = target_read_memory(target, address, 1, 1, value);
2682
2683 if (retval == ERROR_OK) {
2684 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2685 address,
2686 *value);
2687 } else {
2688 *value = 0x0;
2689 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2690 address);
2691 }
2692
2693 return retval;
2694 }
2695
2696 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2697 {
2698 int retval;
2699 uint8_t value_buf[8];
2700 if (!target_was_examined(target)) {
2701 LOG_ERROR("Target not examined yet");
2702 return ERROR_FAIL;
2703 }
2704
2705 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2706 address,
2707 value);
2708
2709 target_buffer_set_u64(target, value_buf, value);
2710 retval = target_write_memory(target, address, 8, 1, value_buf);
2711 if (retval != ERROR_OK)
2712 LOG_DEBUG("failed: %i", retval);
2713
2714 return retval;
2715 }
2716
2717 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2718 {
2719 int retval;
2720 uint8_t value_buf[4];
2721 if (!target_was_examined(target)) {
2722 LOG_ERROR("Target not examined yet");
2723 return ERROR_FAIL;
2724 }
2725
2726 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2727 address,
2728 value);
2729
2730 target_buffer_set_u32(target, value_buf, value);
2731 retval = target_write_memory(target, address, 4, 1, value_buf);
2732 if (retval != ERROR_OK)
2733 LOG_DEBUG("failed: %i", retval);
2734
2735 return retval;
2736 }
2737
2738 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2739 {
2740 int retval;
2741 uint8_t value_buf[2];
2742 if (!target_was_examined(target)) {
2743 LOG_ERROR("Target not examined yet");
2744 return ERROR_FAIL;
2745 }
2746
2747 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2748 address,
2749 value);
2750
2751 target_buffer_set_u16(target, value_buf, value);
2752 retval = target_write_memory(target, address, 2, 1, value_buf);
2753 if (retval != ERROR_OK)
2754 LOG_DEBUG("failed: %i", retval);
2755
2756 return retval;
2757 }
2758
2759 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2760 {
2761 int retval;
2762 if (!target_was_examined(target)) {
2763 LOG_ERROR("Target not examined yet");
2764 return ERROR_FAIL;
2765 }
2766
2767 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2768 address, value);
2769
2770 retval = target_write_memory(target, address, 1, 1, &value);
2771 if (retval != ERROR_OK)
2772 LOG_DEBUG("failed: %i", retval);
2773
2774 return retval;
2775 }
2776
2777 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2778 {
2779 int retval;
2780 uint8_t value_buf[8];
2781 if (!target_was_examined(target)) {
2782 LOG_ERROR("Target not examined yet");
2783 return ERROR_FAIL;
2784 }
2785
2786 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2787 address,
2788 value);
2789
2790 target_buffer_set_u64(target, value_buf, value);
2791 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2792 if (retval != ERROR_OK)
2793 LOG_DEBUG("failed: %i", retval);
2794
2795 return retval;
2796 }
2797
2798 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2799 {
2800 int retval;
2801 uint8_t value_buf[4];
2802 if (!target_was_examined(target)) {
2803 LOG_ERROR("Target not examined yet");
2804 return ERROR_FAIL;
2805 }
2806
2807 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2808 address,
2809 value);
2810
2811 target_buffer_set_u32(target, value_buf, value);
2812 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2813 if (retval != ERROR_OK)
2814 LOG_DEBUG("failed: %i", retval);
2815
2816 return retval;
2817 }
2818
2819 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2820 {
2821 int retval;
2822 uint8_t value_buf[2];
2823 if (!target_was_examined(target)) {
2824 LOG_ERROR("Target not examined yet");
2825 return ERROR_FAIL;
2826 }
2827
2828 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2829 address,
2830 value);
2831
2832 target_buffer_set_u16(target, value_buf, value);
2833 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2834 if (retval != ERROR_OK)
2835 LOG_DEBUG("failed: %i", retval);
2836
2837 return retval;
2838 }
2839
2840 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2841 {
2842 int retval;
2843 if (!target_was_examined(target)) {
2844 LOG_ERROR("Target not examined yet");
2845 return ERROR_FAIL;
2846 }
2847
2848 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2849 address, value);
2850
2851 retval = target_write_phys_memory(target, address, 1, 1, &value);
2852 if (retval != ERROR_OK)
2853 LOG_DEBUG("failed: %i", retval);
2854
2855 return retval;
2856 }
2857
2858 static int find_target(struct command_invocation *cmd, const char *name)
2859 {
2860 struct target *target = get_target(name);
2861 if (!target) {
2862 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2863 return ERROR_FAIL;
2864 }
2865 if (!target->tap->enabled) {
2866 command_print(cmd, "Target: TAP %s is disabled, "
2867 "can't be the current target\n",
2868 target->tap->dotted_name);
2869 return ERROR_FAIL;
2870 }
2871
2872 cmd->ctx->current_target = target;
2873 if (cmd->ctx->current_target_override)
2874 cmd->ctx->current_target_override = target;
2875
2876 return ERROR_OK;
2877 }
2878
2879
2880 COMMAND_HANDLER(handle_targets_command)
2881 {
2882 int retval = ERROR_OK;
2883 if (CMD_ARGC == 1) {
2884 retval = find_target(CMD, CMD_ARGV[0]);
2885 if (retval == ERROR_OK) {
2886 /* we're done! */
2887 return retval;
2888 }
2889 }
2890
2891 struct target *target = all_targets;
2892 command_print(CMD, " TargetName Type Endian TapName State ");
2893 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2894 while (target) {
2895 const char *state;
2896 char marker = ' ';
2897
2898 if (target->tap->enabled)
2899 state = target_state_name(target);
2900 else
2901 state = "tap-disabled";
2902
2903 if (CMD_CTX->current_target == target)
2904 marker = '*';
2905
2906 /* keep columns lined up to match the headers above */
2907 command_print(CMD,
2908 "%2d%c %-18s %-10s %-6s %-18s %s",
2909 target->target_number,
2910 marker,
2911 target_name(target),
2912 target_type_name(target),
2913 jim_nvp_value2name_simple(nvp_target_endian,
2914 target->endianness)->name,
2915 target->tap->dotted_name,
2916 state);
2917 target = target->next;
2918 }
2919
2920 return retval;
2921 }
2922
2923 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2924
2925 static int power_dropout;
2926 static int srst_asserted;
2927
2928 static int run_power_restore;
2929 static int run_power_dropout;
2930 static int run_srst_asserted;
2931 static int run_srst_deasserted;
2932
2933 static int sense_handler(void)
2934 {
2935 static int prev_srst_asserted;
2936 static int prev_power_dropout;
2937
2938 int retval = jtag_power_dropout(&power_dropout);
2939 if (retval != ERROR_OK)
2940 return retval;
2941
2942 int power_restored;
2943 power_restored = prev_power_dropout && !power_dropout;
2944 if (power_restored)
2945 run_power_restore = 1;
2946
2947 int64_t current = timeval_ms();
2948 static int64_t last_power;
2949 bool wait_more = last_power + 2000 > current;
2950 if (power_dropout && !wait_more) {
2951 run_power_dropout = 1;
2952 last_power = current;
2953 }
2954
2955 retval = jtag_srst_asserted(&srst_asserted);
2956 if (retval != ERROR_OK)
2957 return retval;
2958
2959 int srst_deasserted;
2960 srst_deasserted = prev_srst_asserted && !srst_asserted;
2961
2962 static int64_t last_srst;
2963 wait_more = last_srst + 2000 > current;
2964 if (srst_deasserted && !wait_more) {
2965 run_srst_deasserted = 1;
2966 last_srst = current;
2967 }
2968
2969 if (!prev_srst_asserted && srst_asserted)
2970 run_srst_asserted = 1;
2971
2972 prev_srst_asserted = srst_asserted;
2973 prev_power_dropout = power_dropout;
2974
2975 if (srst_deasserted || power_restored) {
2976 /* Other than logging the event we can't do anything here.
2977 * Issuing a reset is a particularly bad idea as we might
2978 * be inside a reset already.
2979 */
2980 }
2981
2982 return ERROR_OK;
2983 }
2984
2985 /* process target state changes */
2986 static int handle_target(void *priv)
2987 {
2988 Jim_Interp *interp = (Jim_Interp *)priv;
2989 int retval = ERROR_OK;
2990
2991 if (!is_jtag_poll_safe()) {
2992 /* polling is disabled currently */
2993 return ERROR_OK;
2994 }
2995
2996 /* we do not want to recurse here... */
2997 static int recursive;
2998 if (!recursive) {
2999 recursive = 1;
3000 sense_handler();
3001 /* danger! running these procedures can trigger srst assertions and power dropouts.
3002 * We need to avoid an infinite loop/recursion here and we do that by
3003 * clearing the flags after running these events.
3004 */
3005 int did_something = 0;
3006 if (run_srst_asserted) {
3007 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3008 Jim_Eval(interp, "srst_asserted");
3009 did_something = 1;
3010 }
3011 if (run_srst_deasserted) {
3012 Jim_Eval(interp, "srst_deasserted");
3013 did_something = 1;
3014 }
3015 if (run_power_dropout) {
3016 LOG_INFO("Power dropout detected, running power_dropout proc.");
3017 Jim_Eval(interp, "power_dropout");
3018 did_something = 1;
3019 }
3020 if (run_power_restore) {
3021 Jim_Eval(interp, "power_restore");
3022 did_something = 1;
3023 }
3024
3025 if (did_something) {
3026 /* clear detect flags */
3027 sense_handler();
3028 }
3029
3030 /* clear action flags */
3031
3032 run_srst_asserted = 0;
3033 run_srst_deasserted = 0;
3034 run_power_restore = 0;
3035 run_power_dropout = 0;
3036
3037 recursive = 0;
3038 }
3039
3040 /* Poll targets for state changes unless that's globally disabled.
3041 * Skip targets that are currently disabled.
3042 */
3043 for (struct target *target = all_targets;
3044 is_jtag_poll_safe() && target;
3045 target = target->next) {
3046
3047 if (!target_was_examined(target))
3048 continue;
3049
3050 if (!target->tap->enabled)
3051 continue;
3052
3053 if (target->backoff.times > target->backoff.count) {
3054 /* do not poll this time as we failed previously */
3055 target->backoff.count++;
3056 continue;
3057 }
3058 target->backoff.count = 0;
3059
3060 /* only poll target if we've got power and srst isn't asserted */
3061 if (!power_dropout && !srst_asserted) {
3062 /* polling may fail silently until the target has been examined */
3063 retval = target_poll(target);
3064 if (retval != ERROR_OK) {
3065 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3066 if (target->backoff.times * polling_interval < 5000) {
3067 target->backoff.times *= 2;
3068 target->backoff.times++;
3069 }
3070
3071 /* Tell GDB to halt the debugger. This allows the user to
3072 * run monitor commands to handle the situation.
3073 */
3074 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3075 }
3076 if (target->backoff.times > 0) {
3077 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3078 target_reset_examined(target);
3079 retval = target_examine_one(target);
3080 /* Target examination could have failed due to unstable connection,
3081 * but we set the examined flag anyway to repoll it later */
3082 if (retval != ERROR_OK) {
3083 target_set_examined(target);
3084 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3085 target->backoff.times * polling_interval);
3086 return retval;
3087 }
3088 }
3089
3090 /* Since we succeeded, we reset backoff count */
3091 target->backoff.times = 0;
3092 }
3093 }
3094
3095 return retval;
3096 }
3097
3098 COMMAND_HANDLER(handle_reg_command)
3099 {
3100 LOG_DEBUG("-");
3101
3102 struct target *target = get_current_target(CMD_CTX);
3103 struct reg *reg = NULL;
3104
3105 /* list all available registers for the current target */
3106 if (CMD_ARGC == 0) {
3107 struct reg_cache *cache = target->reg_cache;
3108
3109 unsigned int count = 0;
3110 while (cache) {
3111 unsigned i;
3112
3113 command_print(CMD, "===== %s", cache->name);
3114
3115 for (i = 0, reg = cache->reg_list;
3116 i < cache->num_regs;
3117 i++, reg++, count++) {
3118 if (reg->exist == false || reg->hidden)
3119 continue;
3120 /* only print cached values if they are valid */
3121 if (reg->valid) {
3122 char *value = buf_to_hex_str(reg->value,
3123 reg->size);
3124 command_print(CMD,
3125 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3126 count, reg->name,
3127 reg->size, value,
3128 reg->dirty
3129 ? " (dirty)"
3130 : "");
3131 free(value);
3132 } else {
3133 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3134 count, reg->name,
3135 reg->size);
3136 }
3137 }
3138 cache = cache->next;
3139 }
3140
3141 return ERROR_OK;
3142 }
3143
3144 /* access a single register by its ordinal number */
3145 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3146 unsigned num;
3147 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3148
3149 struct reg_cache *cache = target->reg_cache;
3150 unsigned int count = 0;
3151 while (cache) {
3152 unsigned i;
3153 for (i = 0; i < cache->num_regs; i++) {
3154 if (count++ == num) {
3155 reg = &cache->reg_list[i];
3156 break;
3157 }
3158 }
3159 if (reg)
3160 break;
3161 cache = cache->next;
3162 }
3163
3164 if (!reg) {
3165 command_print(CMD, "%i is out of bounds, the current target "
3166 "has only %i registers (0 - %i)", num, count, count - 1);
3167 return ERROR_OK;
3168 }
3169 } else {
3170 /* access a single register by its name */
3171 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3172
3173 if (!reg)
3174 goto not_found;
3175 }
3176
3177 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3178
3179 if (!reg->exist)
3180 goto not_found;
3181
3182 /* display a register */
3183 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3184 && (CMD_ARGV[1][0] <= '9')))) {
3185 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3186 reg->valid = 0;
3187
3188 if (reg->valid == 0) {
3189 int retval = reg->type->get(reg);
3190 if (retval != ERROR_OK) {
3191 LOG_ERROR("Could not read register '%s'", reg->name);
3192 return retval;
3193 }
3194 }
3195 char *value = buf_to_hex_str(reg->value, reg->size);
3196 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3197 free(value);
3198 return ERROR_OK;
3199 }
3200
3201 /* set register value */
3202 if (CMD_ARGC == 2) {
3203 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3204 if (!buf)
3205 return ERROR_FAIL;
3206 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3207
3208 int retval = reg->type->set(reg, buf);
3209 if (retval != ERROR_OK) {
3210 LOG_ERROR("Could not write to register '%s'", reg->name);
3211 } else {
3212 char *value = buf_to_hex_str(reg->value, reg->size);
3213 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3214 free(value);
3215 }
3216
3217 free(buf);
3218
3219 return retval;
3220 }
3221
3222 return ERROR_COMMAND_SYNTAX_ERROR;
3223
3224 not_found:
3225 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3226 return ERROR_OK;
3227 }
3228
3229 COMMAND_HANDLER(handle_poll_command)
3230 {
3231 int retval = ERROR_OK;
3232 struct target *target = get_current_target(CMD_CTX);
3233
3234 if (CMD_ARGC == 0) {
3235 command_print(CMD, "background polling: %s",
3236 jtag_poll_get_enabled() ? "on" : "off");
3237 command_print(CMD, "TAP: %s (%s)",
3238 target->tap->dotted_name,
3239 target->tap->enabled ? "enabled" : "disabled");
3240 if (!target->tap->enabled)
3241 return ERROR_OK;
3242 retval = target_poll(target);
3243 if (retval != ERROR_OK)
3244 return retval;
3245 retval = target_arch_state(target);
3246 if (retval != ERROR_OK)
3247 return retval;
3248 } else if (CMD_ARGC == 1) {
3249 bool enable;
3250 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3251 jtag_poll_set_enabled(enable);
3252 } else
3253 return ERROR_COMMAND_SYNTAX_ERROR;
3254
3255 return retval;
3256 }
3257
3258 COMMAND_HANDLER(handle_wait_halt_command)
3259 {
3260 if (CMD_ARGC > 1)
3261 return ERROR_COMMAND_SYNTAX_ERROR;
3262
3263 unsigned ms = DEFAULT_HALT_TIMEOUT;
3264 if (1 == CMD_ARGC) {
3265 int retval = parse_uint(CMD_ARGV[0], &ms);
3266 if (retval != ERROR_OK)
3267 return ERROR_COMMAND_SYNTAX_ERROR;
3268 }
3269
3270 struct target *target = get_current_target(CMD_CTX);
3271 return target_wait_state(target, TARGET_HALTED, ms);
3272 }
3273
3274 /* wait for target state to change. The trick here is to have a low
3275 * latency for short waits and not to suck up all the CPU time
3276 * on longer waits.
3277 *
3278 * After 500ms, keep_alive() is invoked
3279 */
3280 int target_wait_state(struct target *target, enum target_state state, int ms)
3281 {
3282 int retval;
3283 int64_t then = 0, cur;
3284 bool once = true;
3285
3286 for (;;) {
3287 retval = target_poll(target);
3288 if (retval != ERROR_OK)
3289 return retval;
3290 if (target->state == state)
3291 break;
3292 cur = timeval_ms();
3293 if (once) {
3294 once = false;
3295 then = timeval_ms();
3296 LOG_DEBUG("waiting for target %s...",
3297 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3298 }
3299
3300 if (cur-then > 500)
3301 keep_alive();
3302
3303 if ((cur-then) > ms) {
3304 LOG_ERROR("timed out while waiting for target %s",
3305 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3306 return ERROR_FAIL;
3307 }
3308 }
3309
3310 return ERROR_OK;
3311 }
3312
3313 COMMAND_HANDLER(handle_halt_command)
3314 {
3315 LOG_DEBUG("-");
3316
3317 struct target *target = get_current_target(CMD_CTX);
3318
3319 target->verbose_halt_msg = true;
3320
3321 int retval = target_halt(target);
3322 if (retval != ERROR_OK)
3323 return retval;
3324
3325 if (CMD_ARGC == 1) {
3326 unsigned wait_local;
3327 retval = parse_uint(CMD_ARGV[0], &wait_local);
3328 if (retval != ERROR_OK)
3329 return ERROR_COMMAND_SYNTAX_ERROR;
3330 if (!wait_local)
3331 return ERROR_OK;
3332 }
3333
3334 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3335 }
3336
3337 COMMAND_HANDLER(handle_soft_reset_halt_command)
3338 {
3339 struct target *target = get_current_target(CMD_CTX);
3340
3341 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3342
3343 target_soft_reset_halt(target);
3344
3345 return ERROR_OK;
3346 }
3347
3348 COMMAND_HANDLER(handle_reset_command)
3349 {
3350 if (CMD_ARGC > 1)
3351 return ERROR_COMMAND_SYNTAX_ERROR;
3352
3353 enum target_reset_mode reset_mode = RESET_RUN;
3354 if (CMD_ARGC == 1) {
3355 const struct jim_nvp *n;
3356 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3357 if ((!n->name) || (n->value == RESET_UNKNOWN))
3358 return ERROR_COMMAND_SYNTAX_ERROR;
3359 reset_mode = n->value;
3360 }
3361
3362 /* reset *all* targets */
3363 return target_process_reset(CMD, reset_mode);
3364 }
3365
3366
3367 COMMAND_HANDLER(handle_resume_command)
3368 {
3369 int current = 1;
3370 if (CMD_ARGC > 1)
3371 return ERROR_COMMAND_SYNTAX_ERROR;
3372
3373 struct target *target = get_current_target(CMD_CTX);
3374
3375 /* with no CMD_ARGV, resume from current pc, addr = 0,
3376 * with one arguments, addr = CMD_ARGV[0],
3377 * handle breakpoints, not debugging */
3378 target_addr_t addr = 0;
3379 if (CMD_ARGC == 1) {
3380 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3381 current = 0;
3382 }
3383
3384 return target_resume(target, current, addr, 1, 0);
3385 }
3386
3387 COMMAND_HANDLER(handle_step_command)
3388 {
3389 if (CMD_ARGC > 1)
3390 return ERROR_COMMAND_SYNTAX_ERROR;
3391
3392 LOG_DEBUG("-");
3393
3394 /* with no CMD_ARGV, step from current pc, addr = 0,
3395 * with one argument addr = CMD_ARGV[0],
3396 * handle breakpoints, debugging */
3397 target_addr_t addr = 0;
3398 int current_pc = 1;
3399 if (CMD_ARGC == 1) {
3400 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3401 current_pc = 0;
3402 }
3403
3404 struct target *target = get_current_target(CMD_CTX);
3405
3406 return target_step(target, current_pc, addr, 1);
3407 }
3408
3409 void target_handle_md_output(struct command_invocation *cmd,
3410 struct target *target, target_addr_t address, unsigned size,
3411 unsigned count, const uint8_t *buffer)
3412 {
3413 const unsigned line_bytecnt = 32;
3414 unsigned line_modulo = line_bytecnt / size;
3415
3416 char output[line_bytecnt * 4 + 1];
3417 unsigned output_len = 0;
3418
3419 const char *value_fmt;
3420 switch (size) {
3421 case 8:
3422 value_fmt = "%16.16"PRIx64" ";
3423 break;
3424 case 4:
3425 value_fmt = "%8.8"PRIx64" ";
3426 break;
3427 case 2:
3428 value_fmt = "%4.4"PRIx64" ";
3429 break;
3430 case 1:
3431 value_fmt = "%2.2"PRIx64" ";
3432 break;
3433 default:
3434 /* "can't happen", caller checked */
3435 LOG_ERROR("invalid memory read size: %u", size);
3436 return;
3437 }
3438
3439 for (unsigned i = 0; i < count; i++) {
3440 if (i % line_modulo == 0) {
3441 output_len += snprintf(output + output_len,
3442 sizeof(output) - output_len,
3443 TARGET_ADDR_FMT ": ",
3444 (address + (i * size)));
3445 }
3446
3447 uint64_t value = 0;
3448 const uint8_t *value_ptr = buffer + i * size;
3449 switch (size) {
3450 case 8:
3451 value = target_buffer_get_u64(target, value_ptr);
3452 break;
3453 case 4:
3454 value = target_buffer_get_u32(target, value_ptr);
3455 break;
3456 case 2:
3457 value = target_buffer_get_u16(target, value_ptr);
3458 break;
3459 case 1:
3460 value = *value_ptr;
3461 }
3462 output_len += snprintf(output + output_len,
3463 sizeof(output) - output_len,
3464 value_fmt, value);
3465
3466 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3467 command_print(cmd, "%s", output);
3468 output_len = 0;
3469 }
3470 }
3471 }
3472
3473 COMMAND_HANDLER(handle_md_command)
3474 {
3475 if (CMD_ARGC < 1)
3476 return ERROR_COMMAND_SYNTAX_ERROR;
3477
3478 unsigned size = 0;
3479 switch (CMD_NAME[2]) {
3480 case 'd':
3481 size = 8;
3482 break;
3483 case 'w':
3484 size = 4;
3485 break;
3486 case 'h':
3487 size = 2;
3488 break;
3489 case 'b':
3490 size = 1;
3491 break;
3492 default:
3493 return ERROR_COMMAND_SYNTAX_ERROR;
3494 }
3495
3496 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3497 int (*fn)(struct target *target,
3498 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3499 if (physical) {
3500 CMD_ARGC--;
3501 CMD_ARGV++;
3502 fn = target_read_phys_memory;
3503 } else
3504 fn = target_read_memory;
3505 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3506 return ERROR_COMMAND_SYNTAX_ERROR;
3507
3508 target_addr_t address;
3509 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3510
3511 unsigned count = 1;
3512 if (CMD_ARGC == 2)
3513 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3514
3515 uint8_t *buffer = calloc(count, size);
3516 if (!buffer) {
3517 LOG_ERROR("Failed to allocate md read buffer");
3518 return ERROR_FAIL;
3519 }
3520
3521 struct target *target = get_current_target(CMD_CTX);
3522 int retval = fn(target, address, size, count, buffer);
3523 if (retval == ERROR_OK)
3524 target_handle_md_output(CMD, target, address, size, count, buffer);
3525
3526 free(buffer);
3527
3528 return retval;
3529 }
3530
3531 typedef int (*target_write_fn)(struct target *target,
3532 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3533
3534 static int target_fill_mem(struct target *target,
3535 target_addr_t address,
3536 target_write_fn fn,
3537 unsigned data_size,
3538 /* value */
3539 uint64_t b,
3540 /* count */
3541 unsigned c)
3542 {
3543 /* We have to write in reasonably large chunks to be able
3544 * to fill large memory areas with any sane speed */
3545 const unsigned chunk_size = 16384;
3546 uint8_t *target_buf = malloc(chunk_size * data_size);
3547 if (!target_buf) {
3548 LOG_ERROR("Out of memory");
3549 return ERROR_FAIL;
3550 }
3551
3552 for (unsigned i = 0; i < chunk_size; i++) {
3553 switch (data_size) {
3554 case 8:
3555 target_buffer_set_u64(target, target_buf + i * data_size, b);
3556 break;
3557 case 4:
3558 target_buffer_set_u32(target, target_buf + i * data_size, b);
3559 break;
3560 case 2:
3561 target_buffer_set_u16(target, target_buf + i * data_size, b);
3562 break;
3563 case 1:
3564 target_buffer_set_u8(target, target_buf + i * data_size, b);
3565 break;
3566 default:
3567 exit(-1);
3568 }
3569 }
3570
3571 int retval = ERROR_OK;
3572
3573 for (unsigned x = 0; x < c; x += chunk_size) {
3574 unsigned current;
3575 current = c - x;
3576 if (current > chunk_size)
3577 current = chunk_size;
3578 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3579 if (retval != ERROR_OK)
3580 break;
3581 /* avoid GDB timeouts */
3582 keep_alive();
3583 }
3584 free(target_buf);
3585
3586 return retval;
3587 }
3588
3589
3590 COMMAND_HANDLER(handle_mw_command)
3591 {
3592 if (CMD_ARGC < 2)
3593 return ERROR_COMMAND_SYNTAX_ERROR;
3594 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3595 target_write_fn fn;
3596 if (physical) {
3597 CMD_ARGC--;
3598 CMD_ARGV++;
3599 fn = target_write_phys_memory;
3600 } else
3601 fn = target_write_memory;
3602 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3603 return ERROR_COMMAND_SYNTAX_ERROR;
3604
3605 target_addr_t address;
3606 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3607
3608 uint64_t value;
3609 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3610
3611 unsigned count = 1;
3612 if (CMD_ARGC == 3)
3613 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3614
3615 struct target *target = get_current_target(CMD_CTX);
3616 unsigned wordsize;
3617 switch (CMD_NAME[2]) {
3618 case 'd':
3619 wordsize = 8;
3620 break;
3621 case 'w':
3622 wordsize = 4;
3623 break;
3624 case 'h':
3625 wordsize = 2;
3626 break;
3627 case 'b':
3628 wordsize = 1;
3629 break;
3630 default:
3631 return ERROR_COMMAND_SYNTAX_ERROR;
3632 }
3633
3634 return target_fill_mem(target, address, fn, wordsize, value, count);
3635 }
3636
3637 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3638 target_addr_t *min_address, target_addr_t *max_address)
3639 {
3640 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3641 return ERROR_COMMAND_SYNTAX_ERROR;
3642
3643 /* a base address isn't always necessary,
3644 * default to 0x0 (i.e. don't relocate) */
3645 if (CMD_ARGC >= 2) {
3646 target_addr_t addr;
3647 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3648 image->base_address = addr;
3649 image->base_address_set = true;
3650 } else
3651 image->base_address_set = false;
3652
3653 image->start_address_set = false;
3654
3655 if (CMD_ARGC >= 4)
3656 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3657 if (CMD_ARGC == 5) {
3658 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3659 /* use size (given) to find max (required) */
3660 *max_address += *min_address;
3661 }
3662
3663 if (*min_address > *max_address)
3664 return ERROR_COMMAND_SYNTAX_ERROR;
3665
3666 return ERROR_OK;
3667 }
3668
3669 COMMAND_HANDLER(handle_load_image_command)
3670 {
3671 uint8_t *buffer;
3672 size_t buf_cnt;
3673 uint32_t image_size;
3674 target_addr_t min_address = 0;
3675 target_addr_t max_address = -1;
3676 struct image image;
3677
3678 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3679 &image, &min_address, &max_address);
3680 if (retval != ERROR_OK)
3681 return retval;
3682
3683 struct target *target = get_current_target(CMD_CTX);
3684
3685 struct duration bench;
3686 duration_start(&bench);
3687
3688 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3689 return ERROR_FAIL;
3690
3691 image_size = 0x0;
3692 retval = ERROR_OK;
3693 for (unsigned int i = 0; i < image.num_sections; i++) {
3694 buffer = malloc(image.sections[i].size);
3695 if (!buffer) {
3696 command_print(CMD,
3697 "error allocating buffer for section (%d bytes)",
3698 (int)(image.sections[i].size));
3699 retval = ERROR_FAIL;
3700 break;
3701 }
3702
3703 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3704 if (retval != ERROR_OK) {
3705 free(buffer);
3706 break;
3707 }
3708
3709 uint32_t offset = 0;
3710 uint32_t length = buf_cnt;
3711
3712 /* DANGER!!! beware of unsigned comparison here!!! */
3713
3714 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3715 (image.sections[i].base_address < max_address)) {
3716
3717 if (image.sections[i].base_address < min_address) {
3718 /* clip addresses below */
3719 offset += min_address-image.sections[i].base_address;
3720 length -= offset;
3721 }
3722
3723 if (image.sections[i].base_address + buf_cnt > max_address)
3724 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3725
3726 retval = target_write_buffer(target,
3727 image.sections[i].base_address + offset, length, buffer + offset);
3728 if (retval != ERROR_OK) {
3729 free(buffer);
3730 break;
3731 }
3732 image_size += length;
3733 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3734 (unsigned int)length,
3735 image.sections[i].base_address + offset);
3736 }
3737
3738 free(buffer);
3739 }
3740
3741 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3742 command_print(CMD, "downloaded %" PRIu32 " bytes "
3743 "in %fs (%0.3f KiB/s)", image_size,
3744 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3745 }
3746
3747 image_close(&image);
3748
3749 return retval;
3750
3751 }
3752
3753 COMMAND_HANDLER(handle_dump_image_command)
3754 {
3755 struct fileio *fileio;
3756 uint8_t *buffer;
3757 int retval, retvaltemp;
3758 target_addr_t address, size;
3759 struct duration bench;
3760 struct target *target = get_current_target(CMD_CTX);
3761
3762 if (CMD_ARGC != 3)
3763 return ERROR_COMMAND_SYNTAX_ERROR;
3764
3765 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3766 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3767
3768 uint32_t buf_size = (size > 4096) ? 4096 : size;
3769 buffer = malloc(buf_size);
3770 if (!buffer)
3771 return ERROR_FAIL;
3772
3773 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3774 if (retval != ERROR_OK) {
3775 free(buffer);
3776 return retval;
3777 }
3778
3779 duration_start(&bench);
3780
3781 while (size > 0) {
3782 size_t size_written;
3783 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3784 retval = target_read_buffer(target, address, this_run_size, buffer);
3785 if (retval != ERROR_OK)
3786 break;
3787
3788 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3789 if (retval != ERROR_OK)
3790 break;
3791
3792 size -= this_run_size;
3793 address += this_run_size;
3794 }
3795
3796 free(buffer);
3797
3798 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3799 size_t filesize;
3800 retval = fileio_size(fileio, &filesize);
3801 if (retval != ERROR_OK)
3802 return retval;
3803 command_print(CMD,
3804 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3805 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3806 }
3807
3808 retvaltemp = fileio_close(fileio);
3809 if (retvaltemp != ERROR_OK)
3810 return retvaltemp;
3811
3812 return retval;
3813 }
3814
3815 enum verify_mode {
3816 IMAGE_TEST = 0,
3817 IMAGE_VERIFY = 1,
3818 IMAGE_CHECKSUM_ONLY = 2
3819 };
3820
3821 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3822 {
3823 uint8_t *buffer;
3824 size_t buf_cnt;
3825 uint32_t image_size;
3826 int retval;
3827 uint32_t checksum = 0;
3828 uint32_t mem_checksum = 0;
3829
3830 struct image image;
3831
3832 struct target *target = get_current_target(CMD_CTX);
3833
3834 if (CMD_ARGC < 1)
3835 return ERROR_COMMAND_SYNTAX_ERROR;
3836
3837 if (!target) {
3838 LOG_ERROR("no target selected");
3839 return ERROR_FAIL;
3840 }
3841
3842 struct duration bench;
3843 duration_start(&bench);
3844
3845 if (CMD_ARGC >= 2) {
3846 target_addr_t addr;
3847 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3848 image.base_address = addr;
3849 image.base_address_set = true;
3850 } else {
3851 image.base_address_set = false;
3852 image.base_address = 0x0;
3853 }
3854
3855 image.start_address_set = false;
3856
3857 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3858 if (retval != ERROR_OK)
3859 return retval;
3860
3861 image_size = 0x0;
3862 int diffs = 0;
3863 retval = ERROR_OK;
3864 for (unsigned int i = 0; i < image.num_sections; i++) {
3865 buffer = malloc(image.sections[i].size);
3866 if (!buffer) {
3867 command_print(CMD,
3868 "error allocating buffer for section (%" PRIu32 " bytes)",
3869 image.sections[i].size);
3870 break;
3871 }
3872 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3873 if (retval != ERROR_OK) {
3874 free(buffer);
3875 break;
3876 }
3877
3878 if (verify >= IMAGE_VERIFY) {
3879 /* calculate checksum of image */
3880 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3881 if (retval != ERROR_OK) {
3882 free(buffer);
3883 break;
3884 }
3885
3886 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3887 if (retval != ERROR_OK) {
3888 free(buffer);
3889 break;
3890 }
3891 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3892 LOG_ERROR("checksum mismatch");
3893 free(buffer);
3894 retval = ERROR_FAIL;
3895 goto done;
3896 }
3897 if (checksum != mem_checksum) {
3898 /* failed crc checksum, fall back to a binary compare */
3899 uint8_t *data;
3900
3901 if (diffs == 0)
3902 LOG_ERROR("checksum mismatch - attempting binary compare");
3903
3904 data = malloc(buf_cnt);
3905
3906 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3907 if (retval == ERROR_OK) {
3908 uint32_t t;
3909 for (t = 0; t < buf_cnt; t++) {
3910 if (data[t] != buffer[t]) {
3911 command_print(CMD,
3912 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3913 diffs,
3914 (unsigned)(t + image.sections[i].base_address),
3915 data[t],
3916 buffer[t]);
3917 if (diffs++ >= 127) {
3918 command_print(CMD, "More than 128 errors, the rest are not printed.");
3919 free(data);
3920 free(buffer);
3921 goto done;
3922 }
3923 }
3924 keep_alive();
3925 }
3926 }
3927 free(data);
3928 }
3929 } else {
3930 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3931 image.sections[i].base_address,
3932 buf_cnt);
3933 }
3934
3935 free(buffer);
3936 image_size += buf_cnt;
3937 }
3938 if (diffs > 0)
3939 command_print(CMD, "No more differences found.");
3940 done:
3941 if (diffs > 0)
3942 retval = ERROR_FAIL;
3943 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3944 command_print(CMD, "verified %" PRIu32 " bytes "
3945 "in %fs (%0.3f KiB/s)", image_size,
3946 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3947 }
3948
3949 image_close(&image);
3950
3951 return retval;
3952 }
3953
3954 COMMAND_HANDLER(handle_verify_image_checksum_command)
3955 {
3956 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3957 }
3958
3959 COMMAND_HANDLER(handle_verify_image_command)
3960 {
3961 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3962 }
3963
3964 COMMAND_HANDLER(handle_test_image_command)
3965 {
3966 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3967 }
3968
3969 static int handle_bp_command_list(struct command_invocation *cmd)
3970 {
3971 struct target *target = get_current_target(cmd->ctx);
3972 struct breakpoint *breakpoint = target->breakpoints;
3973 while (breakpoint) {
3974 if (breakpoint->type == BKPT_SOFT) {
3975 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3976 breakpoint->length);
3977 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3978 breakpoint->address,
3979 breakpoint->length,
3980 buf);
3981 free(buf);
3982 } else {
3983 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3984 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3985 breakpoint->asid,
3986 breakpoint->length, breakpoint->number);
3987 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3988 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3989 breakpoint->address,
3990 breakpoint->length, breakpoint->number);
3991 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3992 breakpoint->asid);
3993 } else
3994 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3995 breakpoint->address,
3996 breakpoint->length, breakpoint->number);
3997 }
3998
3999 breakpoint = breakpoint->next;
4000 }
4001 return ERROR_OK;
4002 }
4003
4004 static int handle_bp_command_set(struct command_invocation *cmd,
4005 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
4006 {
4007 struct target *target = get_current_target(cmd->ctx);
4008 int retval;
4009
4010 if (asid == 0) {
4011 retval = breakpoint_add(target, addr, length, hw);
4012 /* error is always logged in breakpoint_add(), do not print it again */
4013 if (retval == ERROR_OK)
4014 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4015
4016 } else if (addr == 0) {
4017 if (!target->type->add_context_breakpoint) {
4018 LOG_ERROR("Context breakpoint not available");
4019 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4020 }
4021 retval = context_breakpoint_add(target, asid, length, hw);
4022 /* error is always logged in context_breakpoint_add(), do not print it again */
4023 if (retval == ERROR_OK)
4024 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4025
4026 } else {
4027 if (!target->type->add_hybrid_breakpoint) {
4028 LOG_ERROR("Hybrid breakpoint not available");
4029 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4030 }
4031 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4032 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4033 if (retval == ERROR_OK)
4034 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4035 }
4036 return retval;
4037 }
4038
4039 COMMAND_HANDLER(handle_bp_command)
4040 {
4041 target_addr_t addr;
4042 uint32_t asid;
4043 uint32_t length;
4044 int hw = BKPT_SOFT;
4045
4046 switch (CMD_ARGC) {
4047 case 0:
4048 return handle_bp_command_list(CMD);
4049
4050 case 2:
4051 asid = 0;
4052 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4053 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4054 return handle_bp_command_set(CMD, addr, asid, length, hw);
4055
4056 case 3:
4057 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4058 hw = BKPT_HARD;
4059 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4060 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4061 asid = 0;
4062 return handle_bp_command_set(CMD, addr, asid, length, hw);
4063 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4064 hw = BKPT_HARD;
4065 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4066 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4067 addr = 0;
4068 return handle_bp_command_set(CMD, addr, asid, length, hw);
4069 }
4070 /* fallthrough */
4071 case 4:
4072 hw = BKPT_HARD;
4073 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4074 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4075 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4076 return handle_bp_command_set(CMD, addr, asid, length, hw);
4077
4078 default:
4079 return ERROR_COMMAND_SYNTAX_ERROR;
4080 }
4081 }
4082
4083 COMMAND_HANDLER(handle_rbp_command)
4084 {
4085 if (CMD_ARGC != 1)
4086 return ERROR_COMMAND_SYNTAX_ERROR;
4087
4088 struct target *target = get_current_target(CMD_CTX);
4089
4090 if (!strcmp(CMD_ARGV[0], "all")) {
4091 breakpoint_remove_all(target);
4092 } else {
4093 target_addr_t addr;
4094 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4095
4096 breakpoint_remove(target, addr);
4097 }
4098
4099 return ERROR_OK;
4100 }
4101
4102 COMMAND_HANDLER(handle_wp_command)
4103 {
4104 struct target *target = get_current_target(CMD_CTX);
4105
4106 if (CMD_ARGC == 0) {
4107 struct watchpoint *watchpoint = target->watchpoints;
4108
4109 while (watchpoint) {
4110 command_print(CMD, "address: " TARGET_ADDR_FMT
4111 ", len: 0x%8.8" PRIx32
4112 ", r/w/a: %i, value: 0x%8.8" PRIx32
4113 ", mask: 0x%8.8" PRIx32,
4114 watchpoint->address,
4115 watchpoint->length,
4116 (int)watchpoint->rw,
4117 watchpoint->value,
4118 watchpoint->mask);
4119 watchpoint = watchpoint->next;
4120 }
4121 return ERROR_OK;
4122 }
4123
4124 enum watchpoint_rw type = WPT_ACCESS;
4125 target_addr_t addr = 0;
4126 uint32_t length = 0;
4127 uint32_t data_value = 0x0;
4128 uint32_t data_mask = 0xffffffff;
4129
4130 switch (CMD_ARGC) {
4131 case 5:
4132 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4133 /* fall through */
4134 case 4:
4135 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4136 /* fall through */
4137 case 3:
4138 switch (CMD_ARGV[2][0]) {
4139 case 'r':
4140 type = WPT_READ;
4141 break;
4142 case 'w':
4143 type = WPT_WRITE;
4144 break;
4145 case 'a':
4146 type = WPT_ACCESS;
4147 break;
4148 default:
4149 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4150 return ERROR_COMMAND_SYNTAX_ERROR;
4151 }
4152 /* fall through */
4153 case 2:
4154 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4155 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4156 break;
4157
4158 default:
4159 return ERROR_COMMAND_SYNTAX_ERROR;
4160 }
4161
4162 int retval = watchpoint_add(target, addr, length, type,
4163 data_value, data_mask);
4164 if (retval != ERROR_OK)
4165 LOG_ERROR("Failure setting watchpoints");
4166
4167 return retval;
4168 }
4169
4170 COMMAND_HANDLER(handle_rwp_command)
4171 {
4172 if (CMD_ARGC != 1)
4173 return ERROR_COMMAND_SYNTAX_ERROR;
4174
4175 target_addr_t addr;
4176 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4177
4178 struct target *target = get_current_target(CMD_CTX);
4179 watchpoint_remove(target, addr);
4180
4181 return ERROR_OK;
4182 }
4183
4184 /**
4185 * Translate a virtual address to a physical address.
4186 *
4187 * The low-level target implementation must have logged a detailed error
4188 * which is forwarded to telnet/GDB session.
4189 */
4190 COMMAND_HANDLER(handle_virt2phys_command)
4191 {
4192 if (CMD_ARGC != 1)
4193 return ERROR_COMMAND_SYNTAX_ERROR;
4194
4195 target_addr_t va;
4196 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4197 target_addr_t pa;
4198
4199 struct target *target = get_current_target(CMD_CTX);
4200 int retval = target->type->virt2phys(target, va, &pa);
4201 if (retval == ERROR_OK)
4202 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4203
4204 return retval;
4205 }
4206
4207 static void write_data(FILE *f, const void *data, size_t len)
4208 {
4209 size_t written = fwrite(data, 1, len, f);
4210 if (written != len)
4211 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4212 }
4213
4214 static void write_long(FILE *f, int l, struct target *target)
4215 {
4216 uint8_t val[4];
4217
4218 target_buffer_set_u32(target, val, l);
4219 write_data(f, val, 4);
4220 }
4221
4222 static void write_string(FILE *f, char *s)
4223 {
4224 write_data(f, s, strlen(s));
4225 }
4226
4227 typedef unsigned char UNIT[2]; /* unit of profiling */
4228
4229 /* Dump a gmon.out histogram file. */
4230 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4231 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4232 {
4233 uint32_t i;
4234 FILE *f = fopen(filename, "w");
4235 if (!f)
4236 return;
4237 write_string(f, "gmon");
4238 write_long(f, 0x00000001, target); /* Version */
4239 write_long(f, 0, target); /* padding */
4240 write_long(f, 0, target); /* padding */
4241 write_long(f, 0, target); /* padding */
4242
4243 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4244 write_data(f, &zero, 1);
4245
4246 /* figure out bucket size */
4247 uint32_t min;
4248 uint32_t max;
4249 if (with_range) {
4250 min = start_address;
4251 max = end_address;
4252 } else {
4253 min = samples[0];
4254 max = samples[0];
4255 for (i = 0; i < sample_num; i++) {
4256 if (min > samples[i])
4257 min = samples[i];
4258 if (max < samples[i])
4259 max = samples[i];
4260 }
4261
4262 /* max should be (largest sample + 1)
4263 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4264 max++;
4265 }
4266
4267 int address_space = max - min;
4268 assert(address_space >= 2);
4269
4270 /* FIXME: What is the reasonable number of buckets?
4271 * The profiling result will be more accurate if there are enough buckets. */
4272 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4273 uint32_t num_buckets = address_space / sizeof(UNIT);
4274 if (num_buckets > max_buckets)
4275 num_buckets = max_buckets;
4276 int *buckets = malloc(sizeof(int) * num_buckets);
4277 if (!buckets) {
4278 fclose(f);
4279 return;
4280 }
4281 memset(buckets, 0, sizeof(int) * num_buckets);
4282 for (i = 0; i < sample_num; i++) {
4283 uint32_t address = samples[i];
4284
4285 if ((address < min) || (max <= address))
4286 continue;
4287
4288 long long a = address - min;
4289 long long b = num_buckets;
4290 long long c = address_space;
4291 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4292 buckets[index_t]++;
4293 }
4294
4295 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4296 write_long(f, min, target); /* low_pc */
4297 write_long(f, max, target); /* high_pc */
4298 write_long(f, num_buckets, target); /* # of buckets */
4299 float sample_rate = sample_num / (duration_ms / 1000.0);
4300 write_long(f, sample_rate, target);
4301 write_string(f, "seconds");
4302 for (i = 0; i < (15-strlen("seconds")); i++)
4303 write_data(f, &zero, 1);
4304 write_string(f, "s");
4305
4306 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4307
4308 char *data = malloc(2 * num_buckets);
4309 if (data) {
4310 for (i = 0; i < num_buckets; i++) {
4311 int val;
4312 val = buckets[i];
4313 if (val > 65535)
4314 val = 65535;
4315 data[i * 2] = val&0xff;
4316 data[i * 2 + 1] = (val >> 8) & 0xff;
4317 }
4318 free(buckets);
4319 write_data(f, data, num_buckets * 2);
4320 free(data);
4321 } else
4322 free(buckets);
4323
4324 fclose(f);
4325 }
4326
4327 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4328 * which will be used as a random sampling of PC */
4329 COMMAND_HANDLER(handle_profile_command)
4330 {
4331 struct target *target = get_current_target(CMD_CTX);
4332
4333 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4334 return ERROR_COMMAND_SYNTAX_ERROR;
4335
4336 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4337 uint32_t offset;
4338 uint32_t num_of_samples;
4339 int retval = ERROR_OK;
4340 bool halted_before_profiling = target->state == TARGET_HALTED;
4341
4342 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4343
4344 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4345 if (!samples) {
4346 LOG_ERROR("No memory to store samples.");
4347 return ERROR_FAIL;
4348 }
4349
4350 uint64_t timestart_ms = timeval_ms();
4351 /**
4352 * Some cores let us sample the PC without the
4353 * annoying halt/resume step; for example, ARMv7 PCSR.
4354 * Provide a way to use that more efficient mechanism.
4355 */
4356 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4357 &num_of_samples, offset);
4358 if (retval != ERROR_OK) {
4359 free(samples);
4360 return retval;
4361 }
4362 uint32_t duration_ms = timeval_ms() - timestart_ms;
4363
4364 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4365
4366 retval = target_poll(target);
4367 if (retval != ERROR_OK) {
4368 free(samples);
4369 return retval;
4370 }
4371
4372 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4373 /* The target was halted before we started and is running now. Halt it,
4374 * for consistency. */
4375 retval = target_halt(target);
4376 if (retval != ERROR_OK) {
4377 free(samples);
4378 return retval;
4379 }
4380 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4381 /* The target was running before we started and is halted now. Resume
4382 * it, for consistency. */
4383 retval = target_resume(target, 1, 0, 0, 0);
4384 if (retval != ERROR_OK) {
4385 free(samples);
4386 return retval;
4387 }
4388 }
4389
4390 retval = target_poll(target);
4391 if (retval != ERROR_OK) {
4392 free(samples);
4393 return retval;
4394 }
4395
4396 uint32_t start_address = 0;
4397 uint32_t end_address = 0;
4398 bool with_range = false;
4399 if (CMD_ARGC == 4) {
4400 with_range = true;
4401 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4402 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4403 }
4404
4405 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4406 with_range, start_address, end_address, target, duration_ms);
4407 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4408
4409 free(samples);
4410 return retval;
4411 }
4412
4413 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4414 {
4415 char *namebuf;
4416 Jim_Obj *obj_name, *obj_val;
4417 int result;
4418
4419 namebuf = alloc_printf("%s(%d)", varname, idx);
4420 if (!namebuf)
4421 return JIM_ERR;
4422
4423 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4424 jim_wide wide_val = val;
4425 obj_val = Jim_NewWideObj(interp, wide_val);
4426 if (!obj_name || !obj_val) {
4427 free(namebuf);
4428 return JIM_ERR;
4429 }
4430
4431 Jim_IncrRefCount(obj_name);
4432 Jim_IncrRefCount(obj_val);
4433 result = Jim_SetVariable(interp, obj_name, obj_val);
4434 Jim_DecrRefCount(interp, obj_name);
4435 Jim_DecrRefCount(interp, obj_val);
4436 free(namebuf);
4437 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4438 return result;
4439 }
4440
4441 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4442 {
4443 int e;
4444
4445 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4446
4447 /* argv[0] = name of array to receive the data
4448 * argv[1] = desired element width in bits
4449 * argv[2] = memory address
4450 * argv[3] = count of times to read
4451 * argv[4] = optional "phys"
4452 */
4453 if (argc < 4 || argc > 5) {
4454 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4455 return JIM_ERR;
4456 }
4457
4458 /* Arg 0: Name of the array variable */
4459 const char *varname = Jim_GetString(argv[0], NULL);
4460
4461 /* Arg 1: Bit width of one element */
4462 long l;
4463 e = Jim_GetLong(interp, argv[1], &l);
4464 if (e != JIM_OK)
4465 return e;
4466 const unsigned int width_bits = l;
4467
4468 if (width_bits != 8 &&
4469 width_bits != 16 &&
4470 width_bits != 32 &&
4471 width_bits != 64) {
4472 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4473 Jim_AppendStrings(interp, Jim_GetResult(interp),
4474 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4475 return JIM_ERR;
4476 }
4477 const unsigned int width = width_bits / 8;
4478
4479 /* Arg 2: Memory address */
4480 jim_wide wide_addr;
4481 e = Jim_GetWide(interp, argv[2], &wide_addr);
4482 if (e != JIM_OK)
4483 return e;
4484 target_addr_t addr = (target_addr_t)wide_addr;
4485
4486 /* Arg 3: Number of elements to read */
4487 e = Jim_GetLong(interp, argv[3], &l);
4488 if (e != JIM_OK)
4489 return e;
4490 size_t len = l;
4491
4492 /* Arg 4: phys */
4493 bool is_phys = false;
4494 if (argc > 4) {
4495 int str_len = 0;
4496 const char *phys = Jim_GetString(argv[4], &str_len);
4497 if (!strncmp(phys, "phys", str_len))
4498 is_phys = true;
4499 else
4500 return JIM_ERR;
4501 }
4502
4503 /* Argument checks */
4504 if (len == 0) {
4505 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4506 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4507 return JIM_ERR;
4508 }
4509 if ((addr + (len * width)) < addr) {
4510 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4511 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4512 return JIM_ERR;
4513 }
4514 if (len > 65536) {
4515 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4516 Jim_AppendStrings(interp, Jim_GetResult(interp),
4517 "mem2array: too large read request, exceeds 64K items", NULL);
4518 return JIM_ERR;
4519 }
4520
4521 if ((width == 1) ||
4522 ((width == 2) && ((addr & 1) == 0)) ||
4523 ((width == 4) && ((addr & 3) == 0)) ||
4524 ((width == 8) && ((addr & 7) == 0))) {
4525 /* alignment correct */
4526 } else {
4527 char buf[100];
4528 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4529 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4530 addr,
4531 width);
4532 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4533 return JIM_ERR;
4534 }
4535
4536 /* Transfer loop */
4537
4538 /* index counter */
4539 size_t idx = 0;
4540
4541 const size_t buffersize = 4096;
4542 uint8_t *buffer = malloc(buffersize);
4543 if (!buffer)
4544 return JIM_ERR;
4545
4546 /* assume ok */
4547 e = JIM_OK;
4548 while (len) {
4549 /* Slurp... in buffer size chunks */
4550 const unsigned int max_chunk_len = buffersize / width;
4551 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4552
4553 int retval;
4554 if (is_phys)
4555 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4556 else
4557 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4558 if (retval != ERROR_OK) {
4559 /* BOO !*/
4560 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4561 addr,
4562 width,
4563 chunk_len);
4564 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4565 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4566 e = JIM_ERR;
4567 break;
4568 } else {
4569 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4570 uint64_t v = 0;
4571 switch (width) {
4572 case 8:
4573 v = target_buffer_get_u64(target, &buffer[i*width]);
4574 break;
4575 case 4:
4576 v = target_buffer_get_u32(target, &buffer[i*width]);
4577 break;
4578 case 2:
4579 v = target_buffer_get_u16(target, &buffer[i*width]);
4580 break;
4581 case 1:
4582 v = buffer[i] & 0x0ff;
4583 break;
4584 }
4585 new_u64_array_element(interp, varname, idx, v);
4586 }
4587 len -= chunk_len;
4588 addr += chunk_len * width;
4589 }
4590 }
4591
4592 free(buffer);
4593
4594 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4595
4596 return e;
4597 }
4598
4599 static int target_jim_read_memory(Jim_Interp *interp, int argc,
4600 Jim_Obj * const *argv)
4601 {
4602 /*
4603 * argv[1] = memory address
4604 * argv[2] = desired element width in bits
4605 * argv[3] = number of elements to read
4606 * argv[4] = optional "phys"
4607 */
4608
4609 if (argc < 4 || argc > 5) {
4610 Jim_WrongNumArgs(interp, 1, argv, "address width count ['phys']");
4611 return JIM_ERR;
4612 }
4613
4614 /* Arg 1: Memory address. */
4615 jim_wide wide_addr;
4616 int e;
4617 e = Jim_GetWide(interp, argv[1], &wide_addr);
4618
4619 if (e != JIM_OK)
4620 return e;
4621
4622 target_addr_t addr = (target_addr_t)wide_addr;
4623
4624 /* Arg 2: Bit width of one element. */
4625 long l;
4626 e = Jim_GetLong(interp, argv[2], &l);
4627
4628 if (e != JIM_OK)
4629 return e;
4630
4631 const unsigned int width_bits = l;
4632
4633 /* Arg 3: Number of elements to read. */
4634 e = Jim_GetLong(interp, argv[3], &l);
4635
4636 if (e != JIM_OK)
4637 return e;
4638
4639 size_t count = l;
4640
4641 /* Arg 4: Optional 'phys'. */
4642 bool is_phys = false;
4643
4644 if (argc > 4) {
4645 const char *phys = Jim_GetString(argv[4], NULL);
4646
4647 if (strcmp(phys, "phys")) {
4648 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4649 return JIM_ERR;
4650 }
4651
4652 is_phys = true;
4653 }
4654
4655 switch (width_bits) {
4656 case 8:
4657 case 16:
4658 case 32:
4659 case 64:
4660 break;
4661 default:
4662 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4663 return JIM_ERR;
4664 }
4665
4666 const unsigned int width = width_bits / 8;
4667
4668 if ((addr + (count * width)) < addr) {
4669 Jim_SetResultString(interp, "read_memory: addr + count wraps to zero", -1);
4670 return JIM_ERR;
4671 }
4672
4673 if (count > 65536) {
4674 Jim_SetResultString(interp, "read_memory: too large read request, exeeds 64K elements", -1);
4675 return JIM_ERR;
4676 }
4677
4678 struct command_context *cmd_ctx = current_command_context(interp);
4679 assert(cmd_ctx != NULL);
4680 struct target *target = get_current_target(cmd_ctx);
4681
4682 const size_t buffersize = 4096;
4683 uint8_t *buffer = malloc(buffersize);
4684
4685 if (!buffer) {
4686 LOG_ERROR("Failed to allocate memory");
4687 return JIM_ERR;
4688 }
4689
4690 Jim_Obj *result_list = Jim_NewListObj(interp, NULL, 0);
4691 Jim_IncrRefCount(result_list);
4692
4693 while (count > 0) {
4694 const unsigned int max_chunk_len = buffersize / width;
4695 const size_t chunk_len = MIN(count, max_chunk_len);
4696
4697 int retval;
4698
4699 if (is_phys)
4700 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4701 else
4702 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4703
4704 if (retval != ERROR_OK) {
4705 LOG_ERROR("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4706 addr, width_bits, chunk_len);
4707 Jim_SetResultString(interp, "read_memory: failed to read memory", -1);
4708 e = JIM_ERR;
4709 break;
4710 }
4711
4712 for (size_t i = 0; i < chunk_len ; i++) {
4713 uint64_t v = 0;
4714
4715 switch (width) {
4716 case 8:
4717 v = target_buffer_get_u64(target, &buffer[i * width]);
4718 break;
4719 case 4:
4720 v = target_buffer_get_u32(target, &buffer[i * width]);
4721 break;
4722 case 2:
4723 v = target_buffer_get_u16(target, &buffer[i * width]);
4724 break;
4725 case 1:
4726 v = buffer[i];
4727 break;
4728 }
4729
4730 char value_buf[11];
4731 snprintf(value_buf, sizeof(value_buf), "0x%" PRIx64, v);
4732
4733 Jim_ListAppendElement(interp, result_list,
4734 Jim_NewStringObj(interp, value_buf, -1));
4735 }
4736
4737 count -= chunk_len;
4738 addr += chunk_len * width;
4739 }
4740
4741 free(buffer);
4742
4743 if (e != JIM_OK) {
4744 Jim_DecrRefCount(interp, result_list);
4745 return e;
4746 }
4747
4748 Jim_SetResult(interp, result_list);
4749 Jim_DecrRefCount(interp, result_list);
4750
4751 return JIM_OK;
4752 }
4753
4754 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4755 {
4756 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4757 if (!namebuf)
4758 return JIM_ERR;
4759
4760 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4761 if (!obj_name) {
4762 free(namebuf);
4763 return JIM_ERR;
4764 }
4765
4766 Jim_IncrRefCount(obj_name);
4767 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4768 Jim_DecrRefCount(interp, obj_name);
4769 free(namebuf);
4770 if (!obj_val)
4771 return JIM_ERR;
4772
4773 jim_wide wide_val;
4774 int result = Jim_GetWide(interp, obj_val, &wide_val);
4775 *val = wide_val;
4776 return result;
4777 }
4778
4779 static int target_array2mem(Jim_Interp *interp, struct target *target,
4780 int argc, Jim_Obj *const *argv)
4781 {
4782 int e;
4783
4784 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4785
4786 /* argv[0] = name of array from which to read the data
4787 * argv[1] = desired element width in bits
4788 * argv[2] = memory address
4789 * argv[3] = number of elements to write
4790 * argv[4] = optional "phys"
4791 */
4792 if (argc < 4 || argc > 5) {
4793 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4794 return JIM_ERR;
4795 }
4796
4797 /* Arg 0: Name of the array variable */
4798 const char *varname = Jim_GetString(argv[0], NULL);
4799
4800 /* Arg 1: Bit width of one element */
4801 long l;
4802 e = Jim_GetLong(interp, argv[1], &l);
4803 if (e != JIM_OK)
4804 return e;
4805 const unsigned int width_bits = l;
4806
4807 if (width_bits != 8 &&
4808 width_bits != 16 &&
4809 width_bits != 32 &&
4810 width_bits != 64) {
4811 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4812 Jim_AppendStrings(interp, Jim_GetResult(interp),
4813 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4814 return JIM_ERR;
4815 }
4816 const unsigned int width = width_bits / 8;
4817
4818 /* Arg 2: Memory address */
4819 jim_wide wide_addr;
4820 e = Jim_GetWide(interp, argv[2], &wide_addr);
4821 if (e != JIM_OK)
4822 return e;
4823 target_addr_t addr = (target_addr_t)wide_addr;
4824
4825 /* Arg 3: Number of elements to write */
4826 e = Jim_GetLong(interp, argv[3], &l);
4827 if (e != JIM_OK)
4828 return e;
4829 size_t len = l;
4830
4831 /* Arg 4: Phys */
4832 bool is_phys = false;
4833 if (argc > 4) {
4834 int str_len = 0;
4835 const char *phys = Jim_GetString(argv[4], &str_len);
4836 if (!strncmp(phys, "phys", str_len))
4837 is_phys = true;
4838 else
4839 return JIM_ERR;
4840 }
4841
4842 /* Argument checks */
4843 if (len == 0) {
4844 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4845 Jim_AppendStrings(interp, Jim_GetResult(interp),
4846 "array2mem: zero width read?", NULL);
4847 return JIM_ERR;
4848 }
4849
4850 if ((addr + (len * width)) < addr) {
4851 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4852 Jim_AppendStrings(interp, Jim_GetResult(interp),
4853 "array2mem: addr + len - wraps to zero?", NULL);
4854 return JIM_ERR;
4855 }
4856
4857 if (len > 65536) {
4858 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4859 Jim_AppendStrings(interp, Jim_GetResult(interp),
4860 "array2mem: too large memory write request, exceeds 64K items", NULL);
4861 return JIM_ERR;
4862 }
4863
4864 if ((width == 1) ||
4865 ((width == 2) && ((addr & 1) == 0)) ||
4866 ((width == 4) && ((addr & 3) == 0)) ||
4867 ((width == 8) && ((addr & 7) == 0))) {
4868 /* alignment correct */
4869 } else {
4870 char buf[100];
4871 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4872 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4873 addr,
4874 width);
4875 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4876 return JIM_ERR;
4877 }
4878
4879 /* Transfer loop */
4880
4881 /* assume ok */
4882 e = JIM_OK;
4883
4884 const size_t buffersize = 4096;
4885 uint8_t *buffer = malloc(buffersize);
4886 if (!buffer)
4887 return JIM_ERR;
4888
4889 /* index counter */
4890 size_t idx = 0;
4891
4892 while (len) {
4893 /* Slurp... in buffer size chunks */
4894 const unsigned int max_chunk_len = buffersize / width;
4895
4896 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4897
4898 /* Fill the buffer */
4899 for (size_t i = 0; i < chunk_len; i++, idx++) {
4900 uint64_t v = 0;
4901 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4902 free(buffer);
4903 return JIM_ERR;
4904 }
4905 switch (width) {
4906 case 8:
4907 target_buffer_set_u64(target, &buffer[i * width], v);
4908 break;
4909 case 4:
4910 target_buffer_set_u32(target, &buffer[i * width], v);
4911 break;
4912 case 2:
4913 target_buffer_set_u16(target, &buffer[i * width], v);
4914 break;
4915 case 1:
4916 buffer[i] = v & 0x0ff;
4917 break;
4918 }
4919 }
4920 len -= chunk_len;
4921
4922 /* Write the buffer to memory */
4923 int retval;
4924 if (is_phys)
4925 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4926 else
4927 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4928 if (retval != ERROR_OK) {
4929 /* BOO !*/
4930 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4931 addr,
4932 width,
4933 chunk_len);
4934 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4935 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4936 e = JIM_ERR;
4937 break;
4938 }
4939 addr += chunk_len * width;
4940 }
4941
4942 free(buffer);
4943
4944 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4945
4946 return e;
4947 }
4948
4949 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4950 Jim_Obj * const *argv)
4951 {
4952 /*
4953 * argv[1] = memory address
4954 * argv[2] = desired element width in bits
4955 * argv[3] = list of data to write
4956 * argv[4] = optional "phys"
4957 */
4958
4959 if (argc < 4 || argc > 5) {
4960 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4961 return JIM_ERR;
4962 }
4963
4964 /* Arg 1: Memory address. */
4965 int e;
4966 jim_wide wide_addr;
4967 e = Jim_GetWide(interp, argv[1], &wide_addr);
4968
4969 if (e != JIM_OK)
4970 return e;
4971
4972 target_addr_t addr = (target_addr_t)wide_addr;
4973
4974 /* Arg 2: Bit width of one element. */
4975 long l;
4976 e = Jim_GetLong(interp, argv[2], &l);
4977
4978 if (e != JIM_OK)
4979 return e;
4980
4981 const unsigned int width_bits = l;
4982 size_t count = Jim_ListLength(interp, argv[3]);
4983
4984 /* Arg 4: Optional 'phys'. */
4985 bool is_phys = false;
4986
4987 if (argc > 4) {
4988 const char *phys = Jim_GetString(argv[4], NULL);
4989
4990 if (strcmp(phys, "phys")) {
4991 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4992 return JIM_ERR;
4993 }
4994
4995 is_phys = true;
4996 }
4997
4998 switch (width_bits) {
4999 case 8:
5000 case 16:
5001 case 32:
5002 case 64:
5003 break;
5004 default:
5005 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
5006 return JIM_ERR;
5007 }
5008
5009 const unsigned int width = width_bits / 8;
5010
5011 if ((addr + (count * width)) < addr) {
5012 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
5013 return JIM_ERR;
5014 }
5015
5016 if (count > 65536) {
5017 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
5018 return JIM_ERR;
5019 }
5020
5021 struct command_context *cmd_ctx = current_command_context(interp);
5022 assert(cmd_ctx != NULL);
5023 struct target *target = get_current_target(cmd_ctx);
5024
5025 const size_t buffersize = 4096;
5026 uint8_t *buffer = malloc(buffersize);
5027
5028 if (!buffer) {
5029 LOG_ERROR("Failed to allocate memory");
5030 return JIM_ERR;
5031 }
5032
5033 size_t j = 0;
5034
5035 while (count > 0) {
5036 const unsigned int max_chunk_len = buffersize / width;
5037 const size_t chunk_len = MIN(count, max_chunk_len);
5038
5039 for (size_t i = 0; i < chunk_len; i++, j++) {
5040 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5041 jim_wide element_wide;
5042 Jim_GetWide(interp, tmp, &element_wide);
5043
5044 const uint64_t v = element_wide;
5045
5046 switch (width) {
5047 case 8:
5048 target_buffer_set_u64(target, &buffer[i * width], v);
5049 break;
5050 case 4:
5051 target_buffer_set_u32(target, &buffer[i * width], v);
5052 break;
5053 case 2:
5054 target_buffer_set_u16(target, &buffer[i * width], v);
5055 break;
5056 case 1:
5057 buffer[i] = v & 0x0ff;
5058 break;
5059 }
5060 }
5061
5062 count -= chunk_len;
5063
5064 int retval;
5065
5066 if (is_phys)
5067 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5068 else
5069 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5070
5071 if (retval != ERROR_OK) {
5072 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5073 addr, width_bits, chunk_len);
5074 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5075 e = JIM_ERR;
5076 break;
5077 }
5078
5079 addr += chunk_len * width;
5080 }
5081
5082 free(buffer);
5083
5084 return e;
5085 }
5086
5087 /* FIX? should we propagate errors here rather than printing them
5088 * and continuing?
5089 */
5090 void target_handle_event(struct target *target, enum target_event e)
5091 {
5092 struct target_event_action *teap;
5093 int retval;
5094
5095 for (teap = target->event_action; teap; teap = teap->next) {
5096 if (teap->event == e) {
5097 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5098 target->target_number,
5099 target_name(target),
5100 target_type_name(target),
5101 e,
5102 target_event_name(e),
5103 Jim_GetString(teap->body, NULL));
5104
5105 /* Override current target by the target an event
5106 * is issued from (lot of scripts need it).
5107 * Return back to previous override as soon
5108 * as the handler processing is done */
5109 struct command_context *cmd_ctx = current_command_context(teap->interp);
5110 struct target *saved_target_override = cmd_ctx->current_target_override;
5111 cmd_ctx->current_target_override = target;
5112
5113 retval = Jim_EvalObj(teap->interp, teap->body);
5114
5115 cmd_ctx->current_target_override = saved_target_override;
5116
5117 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5118 return;
5119
5120 if (retval == JIM_RETURN)
5121 retval = teap->interp->returnCode;
5122
5123 if (retval != JIM_OK) {
5124 Jim_MakeErrorMessage(teap->interp);
5125 LOG_USER("Error executing event %s on target %s:\n%s",
5126 target_event_name(e),
5127 target_name(target),
5128 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5129 /* clean both error code and stacktrace before return */
5130 Jim_Eval(teap->interp, "error \"\" \"\"");
5131 }
5132 }
5133 }
5134 }
5135
5136 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5137 Jim_Obj * const *argv)
5138 {
5139 bool force = false;
5140
5141 if (argc == 3) {
5142 const char *option = Jim_GetString(argv[1], NULL);
5143
5144 if (!strcmp(option, "-force")) {
5145 argc--;
5146 argv++;
5147 force = true;
5148 } else {
5149 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5150 return JIM_ERR;
5151 }
5152 }
5153
5154 if (argc != 2) {
5155 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5156 return JIM_ERR;
5157 }
5158
5159 const int length = Jim_ListLength(interp, argv[1]);
5160
5161 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5162
5163 if (!result_dict)
5164 return JIM_ERR;
5165
5166 struct command_context *cmd_ctx = current_command_context(interp);
5167 assert(cmd_ctx != NULL);
5168 const struct target *target = get_current_target(cmd_ctx);
5169
5170 for (int i = 0; i < length; i++) {
5171 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5172
5173 if (!elem)
5174 return JIM_ERR;
5175
5176 const char *reg_name = Jim_String(elem);
5177
5178 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5179 false);
5180
5181 if (!reg || !reg->exist) {
5182 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5183 return JIM_ERR;
5184 }
5185
5186 if (force) {
5187 int retval = reg->type->get(reg);
5188
5189 if (retval != ERROR_OK) {
5190 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5191 reg_name);
5192 return JIM_ERR;
5193 }
5194 }
5195
5196 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5197
5198 if (!reg_value) {
5199 LOG_ERROR("Failed to allocate memory");
5200 return JIM_ERR;
5201 }
5202
5203 char *tmp = alloc_printf("0x%s", reg_value);
5204
5205 free(reg_value);
5206
5207 if (!tmp) {
5208 LOG_ERROR("Failed to allocate memory");
5209 return JIM_ERR;
5210 }
5211
5212 Jim_DictAddElement(interp, result_dict, elem,
5213 Jim_NewStringObj(interp, tmp, -1));
5214
5215 free(tmp);
5216 }
5217
5218 Jim_SetResult(interp, result_dict);
5219
5220 return JIM_OK;
5221 }
5222
5223 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5224 Jim_Obj * const *argv)
5225 {
5226 if (argc != 2) {
5227 Jim_WrongNumArgs(interp, 1, argv, "dict");
5228 return JIM_ERR;
5229 }
5230
5231 int tmp;
5232 #if JIM_VERSION >= 80
5233 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5234
5235 if (!dict)
5236 return JIM_ERR;
5237 #else
5238 Jim_Obj **dict;
5239 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5240
5241 if (ret != JIM_OK)
5242 return ret;
5243 #endif
5244
5245 const unsigned int length = tmp;
5246 struct command_context *cmd_ctx = current_command_context(interp);
5247 assert(cmd_ctx);
5248 const struct target *target = get_current_target(cmd_ctx);
5249
5250 for (unsigned int i = 0; i < length; i += 2) {
5251 const char *reg_name = Jim_String(dict[i]);
5252 const char *reg_value = Jim_String(dict[i + 1]);
5253 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5254 false);
5255
5256 if (!reg || !reg->exist) {
5257 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5258 return JIM_ERR;
5259 }
5260
5261 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5262
5263 if (!buf) {
5264 LOG_ERROR("Failed to allocate memory");
5265 return JIM_ERR;
5266 }
5267
5268 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5269 int retval = reg->type->set(reg, buf);
5270 free(buf);
5271
5272 if (retval != ERROR_OK) {
5273 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5274 reg_value, reg_name);
5275 return JIM_ERR;
5276 }
5277 }
5278
5279 return JIM_OK;
5280 }
5281
5282 /**
5283 * Returns true only if the target has a handler for the specified event.
5284 */
5285 bool target_has_event_action(struct target *target, enum target_event event)
5286 {
5287 struct target_event_action *teap;
5288
5289 for (teap = target->event_action; teap; teap = teap->next) {
5290 if (teap->event == event)
5291 return true;
5292 }
5293 return false;
5294 }
5295
5296 enum target_cfg_param {
5297 TCFG_TYPE,
5298 TCFG_EVENT,
5299 TCFG_WORK_AREA_VIRT,
5300 TCFG_WORK_AREA_PHYS,
5301 TCFG_WORK_AREA_SIZE,
5302 TCFG_WORK_AREA_BACKUP,
5303 TCFG_ENDIAN,
5304 TCFG_COREID,
5305 TCFG_CHAIN_POSITION,
5306 TCFG_DBGBASE,
5307 TCFG_RTOS,
5308 TCFG_DEFER_EXAMINE,
5309 TCFG_GDB_PORT,
5310 TCFG_GDB_MAX_CONNECTIONS,
5311 };
5312
5313 static struct jim_nvp nvp_config_opts[] = {
5314 { .name = "-type", .value = TCFG_TYPE },
5315 { .name = "-event", .value = TCFG_EVENT },
5316 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5317 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5318 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5319 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5320 { .name = "-endian", .value = TCFG_ENDIAN },
5321 { .name = "-coreid", .value = TCFG_COREID },
5322 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5323 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5324 { .name = "-rtos", .value = TCFG_RTOS },
5325 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5326 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5327 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5328 { .name = NULL, .value = -1 }
5329 };
5330
5331 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5332 {
5333 struct jim_nvp *n;
5334 Jim_Obj *o;
5335 jim_wide w;
5336 int e;
5337
5338 /* parse config or cget options ... */
5339 while (goi->argc > 0) {
5340 Jim_SetEmptyResult(goi->interp);
5341 /* jim_getopt_debug(goi); */
5342
5343 if (target->type->target_jim_configure) {
5344 /* target defines a configure function */
5345 /* target gets first dibs on parameters */
5346 e = (*(target->type->target_jim_configure))(target, goi);
5347 if (e == JIM_OK) {
5348 /* more? */
5349 continue;
5350 }
5351 if (e == JIM_ERR) {
5352 /* An error */
5353 return e;
5354 }
5355 /* otherwise we 'continue' below */
5356 }
5357 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5358 if (e != JIM_OK) {
5359 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5360 return e;
5361 }
5362 switch (n->value) {
5363 case TCFG_TYPE:
5364 /* not settable */
5365 if (goi->isconfigure) {
5366 Jim_SetResultFormatted(goi->interp,
5367 "not settable: %s", n->name);
5368 return JIM_ERR;
5369 } else {
5370 no_params:
5371 if (goi->argc != 0) {
5372 Jim_WrongNumArgs(goi->interp,
5373 goi->argc, goi->argv,
5374 "NO PARAMS");
5375 return JIM_ERR;
5376 }
5377 }
5378 Jim_SetResultString(goi->interp,
5379 target_type_name(target), -1);
5380 /* loop for more */
5381 break;
5382 case TCFG_EVENT:
5383 if (goi->argc == 0) {
5384 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5385 return JIM_ERR;
5386 }
5387
5388 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5389 if (e != JIM_OK) {
5390 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5391 return e;
5392 }
5393
5394 if (goi->isconfigure) {
5395 if (goi->argc != 1) {
5396 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5397 return JIM_ERR;
5398 }
5399 } else {
5400 if (goi->argc != 0) {
5401 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5402 return JIM_ERR;
5403 }
5404 }
5405
5406 {
5407 struct target_event_action *teap;
5408
5409 teap = target->event_action;
5410 /* replace existing? */
5411 while (teap) {
5412 if (teap->event == (enum target_event)n->value)
5413 break;
5414 teap = teap->next;
5415 }
5416
5417 if (goi->isconfigure) {
5418 /* START_DEPRECATED_TPIU */
5419 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5420 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5421 /* END_DEPRECATED_TPIU */
5422
5423 bool replace = true;
5424 if (!teap) {
5425 /* create new */
5426 teap = calloc(1, sizeof(*teap));
5427 replace = false;
5428 }
5429 teap->event = n->value;
5430 teap->interp = goi->interp;
5431 jim_getopt_obj(goi, &o);
5432 if (teap->body)
5433 Jim_DecrRefCount(teap->interp, teap->body);
5434 teap->body = Jim_DuplicateObj(goi->interp, o);
5435 /*
5436 * FIXME:
5437 * Tcl/TK - "tk events" have a nice feature.
5438 * See the "BIND" command.
5439 * We should support that here.
5440 * You can specify %X and %Y in the event code.
5441 * The idea is: %T - target name.
5442 * The idea is: %N - target number
5443 * The idea is: %E - event name.
5444 */
5445 Jim_IncrRefCount(teap->body);
5446
5447 if (!replace) {
5448 /* add to head of event list */
5449 teap->next = target->event_action;
5450 target->event_action = teap;
5451 }
5452 Jim_SetEmptyResult(goi->interp);
5453 } else {
5454 /* get */
5455 if (!teap)
5456 Jim_SetEmptyResult(goi->interp);
5457 else
5458 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5459 }
5460 }
5461 /* loop for more */
5462 break;
5463
5464 case TCFG_WORK_AREA_VIRT:
5465 if (goi->isconfigure) {
5466 target_free_all_working_areas(target);
5467 e = jim_getopt_wide(goi, &w);
5468 if (e != JIM_OK)
5469 return e;
5470 target->working_area_virt = w;
5471 target->working_area_virt_spec = true;
5472 } else {
5473 if (goi->argc != 0)
5474 goto no_params;
5475 }
5476 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5477 /* loop for more */
5478 break;
5479
5480 case TCFG_WORK_AREA_PHYS:
5481 if (goi->isconfigure) {
5482 target_free_all_working_areas(target);
5483 e = jim_getopt_wide(goi, &w);
5484 if (e != JIM_OK)
5485 return e;
5486 target->working_area_phys = w;
5487 target->working_area_phys_spec = true;
5488 } else {
5489 if (goi->argc != 0)
5490 goto no_params;
5491 }
5492 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5493 /* loop for more */
5494 break;
5495
5496 case TCFG_WORK_AREA_SIZE:
5497 if (goi->isconfigure) {
5498 target_free_all_working_areas(target);
5499 e = jim_getopt_wide(goi, &w);
5500 if (e != JIM_OK)
5501 return e;
5502 target->working_area_size = w;
5503 } else {
5504 if (goi->argc != 0)
5505 goto no_params;
5506 }
5507 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5508 /* loop for more */
5509 break;
5510
5511 case TCFG_WORK_AREA_BACKUP:
5512 if (goi->isconfigure) {
5513 target_free_all_working_areas(target);
5514 e = jim_getopt_wide(goi, &w);
5515 if (e != JIM_OK)
5516 return e;
5517 /* make this exactly 1 or 0 */
5518 target->backup_working_area = (!!w);
5519 } else {
5520 if (goi->argc != 0)
5521 goto no_params;
5522 }
5523 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5524 /* loop for more e*/
5525 break;
5526
5527
5528 case TCFG_ENDIAN:
5529 if (goi->isconfigure) {
5530 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5531 if (e != JIM_OK) {
5532 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5533 return e;
5534 }
5535 target->endianness = n->value;
5536 } else {
5537 if (goi->argc != 0)
5538 goto no_params;
5539 }
5540 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5541 if (!n->name) {
5542 target->endianness = TARGET_LITTLE_ENDIAN;
5543 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5544 }
5545 Jim_SetResultString(goi->interp, n->name, -1);
5546 /* loop for more */
5547 break;
5548
5549 case TCFG_COREID:
5550 if (goi->isconfigure) {
5551 e = jim_getopt_wide(goi, &w);
5552 if (e != JIM_OK)
5553 return e;
5554 target->coreid = (int32_t)w;
5555 } else {
5556 if (goi->argc != 0)
5557 goto no_params;
5558 }
5559 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5560 /* loop for more */
5561 break;
5562
5563 case TCFG_CHAIN_POSITION:
5564 if (goi->isconfigure) {
5565 Jim_Obj *o_t;
5566 struct jtag_tap *tap;
5567
5568 if (target->has_dap) {
5569 Jim_SetResultString(goi->interp,
5570 "target requires -dap parameter instead of -chain-position!", -1);
5571 return JIM_ERR;
5572 }
5573
5574 target_free_all_working_areas(target);
5575 e = jim_getopt_obj(goi, &o_t);
5576 if (e != JIM_OK)
5577 return e;
5578 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5579 if (!tap)
5580 return JIM_ERR;
5581 target->tap = tap;
5582 target->tap_configured = true;
5583 } else {
5584 if (goi->argc != 0)
5585 goto no_params;
5586 }
5587 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5588 /* loop for more e*/
5589 break;
5590 case TCFG_DBGBASE:
5591 if (goi->isconfigure) {
5592 e = jim_getopt_wide(goi, &w);
5593 if (e != JIM_OK)
5594 return e;
5595 target->dbgbase = (uint32_t)w;
5596 target->dbgbase_set = true;
5597 } else {
5598 if (goi->argc != 0)
5599 goto no_params;
5600 }
5601 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5602 /* loop for more */
5603 break;
5604 case TCFG_RTOS:
5605 /* RTOS */
5606 {
5607 int result = rtos_create(goi, target);
5608 if (result != JIM_OK)
5609 return result;
5610 }
5611 /* loop for more */
5612 break;
5613
5614 case TCFG_DEFER_EXAMINE:
5615 /* DEFER_EXAMINE */
5616 target->defer_examine = true;
5617 /* loop for more */
5618 break;
5619
5620 case TCFG_GDB_PORT:
5621 if (goi->isconfigure) {
5622 struct command_context *cmd_ctx = current_command_context(goi->interp);
5623 if (cmd_ctx->mode != COMMAND_CONFIG) {
5624 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5625 return JIM_ERR;
5626 }
5627
5628 const char *s;
5629 e = jim_getopt_string(goi, &s, NULL);
5630 if (e != JIM_OK)
5631 return e;
5632 free(target->gdb_port_override);
5633 target->gdb_port_override = strdup(s);
5634 } else {
5635 if (goi->argc != 0)
5636 goto no_params;
5637 }
5638 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5639 /* loop for more */
5640 break;
5641
5642 case TCFG_GDB_MAX_CONNECTIONS:
5643 if (goi->isconfigure) {
5644 struct command_context *cmd_ctx = current_command_context(goi->interp);
5645 if (cmd_ctx->mode != COMMAND_CONFIG) {
5646 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5647 return JIM_ERR;
5648 }
5649
5650 e = jim_getopt_wide(goi, &w);
5651 if (e != JIM_OK)
5652 return e;
5653 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5654 } else {
5655 if (goi->argc != 0)
5656 goto no_params;
5657 }
5658 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5659 break;
5660 }
5661 } /* while (goi->argc) */
5662
5663
5664 /* done - we return */
5665 return JIM_OK;
5666 }
5667
5668 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5669 {
5670 struct command *c = jim_to_command(interp);
5671 struct jim_getopt_info goi;
5672
5673 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5674 goi.isconfigure = !strcmp(c->name, "configure");
5675 if (goi.argc < 1) {
5676 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5677 "missing: -option ...");
5678 return JIM_ERR;
5679 }
5680 struct command_context *cmd_ctx = current_command_context(interp);
5681 assert(cmd_ctx);
5682 struct target *target = get_current_target(cmd_ctx);
5683 return target_configure(&goi, target);
5684 }
5685
5686 static int jim_target_mem2array(Jim_Interp *interp,
5687 int argc, Jim_Obj *const *argv)
5688 {
5689 struct command_context *cmd_ctx = current_command_context(interp);
5690 assert(cmd_ctx);
5691 struct target *target = get_current_target(cmd_ctx);
5692 return target_mem2array(interp, target, argc - 1, argv + 1);
5693 }
5694
5695 static int jim_target_array2mem(Jim_Interp *interp,
5696 int argc, Jim_Obj *const *argv)
5697 {
5698 struct command_context *cmd_ctx = current_command_context(interp);
5699 assert(cmd_ctx);
5700 struct target *target = get_current_target(cmd_ctx);
5701 return target_array2mem(interp, target, argc - 1, argv + 1);
5702 }
5703
5704 static int jim_target_tap_disabled(Jim_Interp *interp)
5705 {
5706 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5707 return JIM_ERR;
5708 }
5709
5710 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5711 {
5712 bool allow_defer = false;
5713
5714 struct jim_getopt_info goi;
5715 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5716 if (goi.argc > 1) {
5717 const char *cmd_name = Jim_GetString(argv[0], NULL);
5718 Jim_SetResultFormatted(goi.interp,
5719 "usage: %s ['allow-defer']", cmd_name);
5720 return JIM_ERR;
5721 }
5722 if (goi.argc > 0 &&
5723 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5724 /* consume it */
5725 Jim_Obj *obj;
5726 int e = jim_getopt_obj(&goi, &obj);
5727 if (e != JIM_OK)
5728 return e;
5729 allow_defer = true;
5730 }
5731
5732 struct command_context *cmd_ctx = current_command_context(interp);
5733 assert(cmd_ctx);
5734 struct target *target = get_current_target(cmd_ctx);
5735 if (!target->tap->enabled)
5736 return jim_target_tap_disabled(interp);
5737
5738 if (allow_defer && target->defer_examine) {
5739 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5740 LOG_INFO("Use arp_examine command to examine it manually!");
5741 return JIM_OK;
5742 }
5743
5744 int e = target->type->examine(target);
5745 if (e != ERROR_OK) {
5746 target_reset_examined(target);
5747 return JIM_ERR;
5748 }
5749
5750 target_set_examined(target);
5751
5752 return JIM_OK;
5753 }
5754
5755 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5756 {
5757 struct command_context *cmd_ctx = current_command_context(interp);
5758 assert(cmd_ctx);
5759 struct target *target = get_current_target(cmd_ctx);
5760
5761 Jim_SetResultBool(interp, target_was_examined(target));
5762 return JIM_OK;
5763 }
5764
5765 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5766 {
5767 struct command_context *cmd_ctx = current_command_context(interp);
5768 assert(cmd_ctx);
5769 struct target *target = get_current_target(cmd_ctx);
5770
5771 Jim_SetResultBool(interp, target->defer_examine);
5772 return JIM_OK;
5773 }
5774
5775 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5776 {
5777 if (argc != 1) {
5778 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5779 return JIM_ERR;
5780 }
5781 struct command_context *cmd_ctx = current_command_context(interp);
5782 assert(cmd_ctx);
5783 struct target *target = get_current_target(cmd_ctx);
5784
5785 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5786 return JIM_ERR;
5787
5788 return JIM_OK;
5789 }
5790
5791 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5792 {
5793 if (argc != 1) {
5794 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5795 return JIM_ERR;
5796 }
5797 struct command_context *cmd_ctx = current_command_context(interp);
5798 assert(cmd_ctx);
5799 struct target *target = get_current_target(cmd_ctx);
5800 if (!target->tap->enabled)
5801 return jim_target_tap_disabled(interp);
5802
5803 int e;
5804 if (!(target_was_examined(target)))
5805 e = ERROR_TARGET_NOT_EXAMINED;
5806 else
5807 e = target->type->poll(target);
5808 if (e != ERROR_OK)
5809 return JIM_ERR;
5810 return JIM_OK;
5811 }
5812
5813 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5814 {
5815 struct jim_getopt_info goi;
5816 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5817
5818 if (goi.argc != 2) {
5819 Jim_WrongNumArgs(interp, 0, argv,
5820 "([tT]|[fF]|assert|deassert) BOOL");
5821 return JIM_ERR;
5822 }
5823
5824 struct jim_nvp *n;
5825 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5826 if (e != JIM_OK) {
5827 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5828 return e;
5829 }
5830 /* the halt or not param */
5831 jim_wide a;
5832 e = jim_getopt_wide(&goi, &a);
5833 if (e != JIM_OK)
5834 return e;
5835
5836 struct command_context *cmd_ctx = current_command_context(interp);
5837 assert(cmd_ctx);
5838 struct target *target = get_current_target(cmd_ctx);
5839 if (!target->tap->enabled)
5840 return jim_target_tap_disabled(interp);
5841
5842 if (!target->type->assert_reset || !target->type->deassert_reset) {
5843 Jim_SetResultFormatted(interp,
5844 "No target-specific reset for %s",
5845 target_name(target));
5846 return JIM_ERR;
5847 }
5848
5849 if (target->defer_examine)
5850 target_reset_examined(target);
5851
5852 /* determine if we should halt or not. */
5853 target->reset_halt = (a != 0);
5854 /* When this happens - all workareas are invalid. */
5855 target_free_all_working_areas_restore(target, 0);
5856
5857 /* do the assert */
5858 if (n->value == NVP_ASSERT)
5859 e = target->type->assert_reset(target);
5860 else
5861 e = target->type->deassert_reset(target);
5862 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5863 }
5864
5865 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5866 {
5867 if (argc != 1) {
5868 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5869 return JIM_ERR;
5870 }
5871 struct command_context *cmd_ctx = current_command_context(interp);
5872 assert(cmd_ctx);
5873 struct target *target = get_current_target(cmd_ctx);
5874 if (!target->tap->enabled)
5875 return jim_target_tap_disabled(interp);
5876 int e = target->type->halt(target);
5877 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5878 }
5879
5880 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5881 {
5882 struct jim_getopt_info goi;
5883 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5884
5885 /* params: <name> statename timeoutmsecs */
5886 if (goi.argc != 2) {
5887 const char *cmd_name = Jim_GetString(argv[0], NULL);
5888 Jim_SetResultFormatted(goi.interp,
5889 "%s <state_name> <timeout_in_msec>", cmd_name);
5890 return JIM_ERR;
5891 }
5892
5893 struct jim_nvp *n;
5894 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5895 if (e != JIM_OK) {
5896 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5897 return e;
5898 }
5899 jim_wide a;
5900 e = jim_getopt_wide(&goi, &a);
5901 if (e != JIM_OK)
5902 return e;
5903 struct command_context *cmd_ctx = current_command_context(interp);
5904 assert(cmd_ctx);
5905 struct target *target = get_current_target(cmd_ctx);
5906 if (!target->tap->enabled)
5907 return jim_target_tap_disabled(interp);
5908
5909 e = target_wait_state(target, n->value, a);
5910 if (e != ERROR_OK) {
5911 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5912 Jim_SetResultFormatted(goi.interp,
5913 "target: %s wait %s fails (%#s) %s",
5914 target_name(target), n->name,
5915 obj, target_strerror_safe(e));
5916 return JIM_ERR;
5917 }
5918 return JIM_OK;
5919 }
5920 /* List for human, Events defined for this target.
5921 * scripts/programs should use 'name cget -event NAME'
5922 */
5923 COMMAND_HANDLER(handle_target_event_list)
5924 {
5925 struct target *target = get_current_target(CMD_CTX);
5926 struct target_event_action *teap = target->event_action;
5927
5928 command_print(CMD, "Event actions for target (%d) %s\n",
5929 target->target_number,
5930 target_name(target));
5931 command_print(CMD, "%-25s | Body", "Event");
5932 command_print(CMD, "------------------------- | "
5933 "----------------------------------------");
5934 while (teap) {
5935 command_print(CMD, "%-25s | %s",
5936 target_event_name(teap->event),
5937 Jim_GetString(teap->body, NULL));
5938 teap = teap->next;
5939 }
5940 command_print(CMD, "***END***");
5941 return ERROR_OK;
5942 }
5943 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5944 {
5945 if (argc != 1) {
5946 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5947 return JIM_ERR;
5948 }
5949 struct command_context *cmd_ctx = current_command_context(interp);
5950 assert(cmd_ctx);
5951 struct target *target = get_current_target(cmd_ctx);
5952 Jim_SetResultString(interp, target_state_name(target), -1);
5953 return JIM_OK;
5954 }
5955 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5956 {
5957 struct jim_getopt_info goi;
5958 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5959 if (goi.argc != 1) {
5960 const char *cmd_name = Jim_GetString(argv[0], NULL);
5961 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5962 return JIM_ERR;
5963 }
5964 struct jim_nvp *n;
5965 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5966 if (e != JIM_OK) {
5967 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5968 return e;
5969 }
5970 struct command_context *cmd_ctx = current_command_context(interp);
5971 assert(cmd_ctx);
5972 struct target *target = get_current_target(cmd_ctx);
5973 target_handle_event(target, n->value);
5974 return JIM_OK;
5975 }
5976
5977 static const struct command_registration target_instance_command_handlers[] = {
5978 {
5979 .name = "configure",
5980 .mode = COMMAND_ANY,
5981 .jim_handler = jim_target_configure,
5982 .help = "configure a new target for use",
5983 .usage = "[target_attribute ...]",
5984 },
5985 {
5986 .name = "cget",
5987 .mode = COMMAND_ANY,
5988 .jim_handler = jim_target_configure,
5989 .help = "returns the specified target attribute",
5990 .usage = "target_attribute",
5991 },
5992 {
5993 .name = "mwd",
5994 .handler = handle_mw_command,
5995 .mode = COMMAND_EXEC,
5996 .help = "Write 64-bit word(s) to target memory",
5997 .usage = "address data [count]",
5998 },
5999 {
6000 .name = "mww",
6001 .handler = handle_mw_command,
6002 .mode = COMMAND_EXEC,
6003 .help = "Write 32-bit word(s) to target memory",
6004 .usage = "address data [count]",
6005 },
6006 {
6007 .name = "mwh",
6008 .handler = handle_mw_command,
6009 .mode = COMMAND_EXEC,
6010 .help = "Write 16-bit half-word(s) to target memory",
6011 .usage = "address data [count]",
6012 },
6013 {
6014 .name = "mwb",
6015 .handler = handle_mw_command,
6016 .mode = COMMAND_EXEC,
6017 .help = "Write byte(s) to target memory",
6018 .usage = "address data [count]",
6019 },
6020 {
6021 .name = "mdd",
6022 .handler = handle_md_command,
6023 .mode = COMMAND_EXEC,
6024 .help = "Display target memory as 64-bit words",
6025 .usage = "address [count]",
6026 },
6027 {
6028 .name = "mdw",
6029 .handler = handle_md_command,
6030 .mode = COMMAND_EXEC,
6031 .help = "Display target memory as 32-bit words",
6032 .usage = "address [count]",
6033 },
6034 {
6035 .name = "mdh",
6036 .handler = handle_md_command,
6037 .mode = COMMAND_EXEC,
6038 .help = "Display target memory as 16-bit half-words",
6039 .usage = "address [count]",
6040 },
6041 {
6042 .name = "mdb",
6043 .handler = handle_md_command,
6044 .mode = COMMAND_EXEC,
6045 .help = "Display target memory as 8-bit bytes",
6046 .usage = "address [count]",
6047 },
6048 {
6049 .name = "array2mem",
6050 .mode = COMMAND_EXEC,
6051 .jim_handler = jim_target_array2mem,
6052 .help = "Writes Tcl array of 8/16/32 bit numbers "
6053 "to target memory",
6054 .usage = "arrayname bitwidth address count",
6055 },
6056 {
6057 .name = "mem2array",
6058 .mode = COMMAND_EXEC,
6059 .jim_handler = jim_target_mem2array,
6060 .help = "Loads Tcl array of 8/16/32 bit numbers "
6061 "from target memory",
6062 .usage = "arrayname bitwidth address count",
6063 },
6064 {
6065 .name = "get_reg",
6066 .mode = COMMAND_EXEC,
6067 .jim_handler = target_jim_get_reg,
6068 .help = "Get register values from the target",
6069 .usage = "list",
6070 },
6071 {
6072 .name = "set_reg",
6073 .mode = COMMAND_EXEC,
6074 .jim_handler = target_jim_set_reg,
6075 .help = "Set target register values",
6076 .usage = "dict",
6077 },
6078 {
6079 .name = "read_memory",
6080 .mode = COMMAND_EXEC,
6081 .jim_handler = target_jim_read_memory,
6082 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6083 .usage = "address width count ['phys']",
6084 },
6085 {
6086 .name = "write_memory",
6087 .mode = COMMAND_EXEC,
6088 .jim_handler = target_jim_write_memory,
6089 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6090 .usage = "address width data ['phys']",
6091 },
6092 {
6093 .name = "eventlist",
6094 .handler = handle_target_event_list,
6095 .mode = COMMAND_EXEC,
6096 .help = "displays a table of events defined for this target",
6097 .usage = "",
6098 },
6099 {
6100 .name = "curstate",
6101 .mode = COMMAND_EXEC,
6102 .jim_handler = jim_target_current_state,
6103 .help = "displays the current state of this target",
6104 },
6105 {
6106 .name = "arp_examine",
6107 .mode = COMMAND_EXEC,
6108 .jim_handler = jim_target_examine,
6109 .help = "used internally for reset processing",
6110 .usage = "['allow-defer']",
6111 },
6112 {
6113 .name = "was_examined",
6114 .mode = COMMAND_EXEC,
6115 .jim_handler = jim_target_was_examined,
6116 .help = "used internally for reset processing",
6117 },
6118 {
6119 .name = "examine_deferred",
6120 .mode = COMMAND_EXEC,
6121 .jim_handler = jim_target_examine_deferred,
6122 .help = "used internally for reset processing",
6123 },
6124 {
6125 .name = "arp_halt_gdb",
6126 .mode = COMMAND_EXEC,
6127 .jim_handler = jim_target_halt_gdb,
6128 .help = "used internally for reset processing to halt GDB",
6129 },
6130 {
6131 .name = "arp_poll",
6132 .mode = COMMAND_EXEC,
6133 .jim_handler = jim_target_poll,
6134 .help = "used internally for reset processing",
6135 },
6136 {
6137 .name = "arp_reset",
6138 .mode = COMMAND_EXEC,
6139 .jim_handler = jim_target_reset,
6140 .help = "used internally for reset processing",
6141 },
6142 {
6143 .name = "arp_halt",
6144 .mode = COMMAND_EXEC,
6145 .jim_handler = jim_target_halt,
6146 .help = "used internally for reset processing",
6147 },
6148 {
6149 .name = "arp_waitstate",
6150 .mode = COMMAND_EXEC,
6151 .jim_handler = jim_target_wait_state,
6152 .help = "used internally for reset processing",
6153 },
6154 {
6155 .name = "invoke-event",
6156 .mode = COMMAND_EXEC,
6157 .jim_handler = jim_target_invoke_event,
6158 .help = "invoke handler for specified event",
6159 .usage = "event_name",
6160 },
6161 COMMAND_REGISTRATION_DONE
6162 };
6163
6164 static int target_create(struct jim_getopt_info *goi)
6165 {
6166 Jim_Obj *new_cmd;
6167 Jim_Cmd *cmd;
6168 const char *cp;
6169 int e;
6170 int x;
6171 struct target *target;
6172 struct command_context *cmd_ctx;
6173
6174 cmd_ctx = current_command_context(goi->interp);
6175 assert(cmd_ctx);
6176
6177 if (goi->argc < 3) {
6178 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6179 return JIM_ERR;
6180 }
6181
6182 /* COMMAND */
6183 jim_getopt_obj(goi, &new_cmd);
6184 /* does this command exist? */
6185 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6186 if (cmd) {
6187 cp = Jim_GetString(new_cmd, NULL);
6188 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6189 return JIM_ERR;
6190 }
6191
6192 /* TYPE */
6193 e = jim_getopt_string(goi, &cp, NULL);
6194 if (e != JIM_OK)
6195 return e;
6196 struct transport *tr = get_current_transport();
6197 if (tr->override_target) {
6198 e = tr->override_target(&cp);
6199 if (e != ERROR_OK) {
6200 LOG_ERROR("The selected transport doesn't support this target");
6201 return JIM_ERR;
6202 }
6203 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6204 }
6205 /* now does target type exist */
6206 for (x = 0 ; target_types[x] ; x++) {
6207 if (strcmp(cp, target_types[x]->name) == 0) {
6208 /* found */
6209 break;
6210 }
6211 }
6212 if (!target_types[x]) {
6213 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6214 for (x = 0 ; target_types[x] ; x++) {
6215 if (target_types[x + 1]) {
6216 Jim_AppendStrings(goi->interp,
6217 Jim_GetResult(goi->interp),
6218 target_types[x]->name,
6219 ", ", NULL);
6220 } else {
6221 Jim_AppendStrings(goi->interp,
6222 Jim_GetResult(goi->interp),
6223 " or ",
6224 target_types[x]->name, NULL);
6225 }
6226 }
6227 return JIM_ERR;
6228 }
6229
6230 /* Create it */
6231 target = calloc(1, sizeof(struct target));
6232 if (!target) {
6233 LOG_ERROR("Out of memory");
6234 return JIM_ERR;
6235 }
6236
6237 /* set empty smp cluster */
6238 target->smp_targets = &empty_smp_targets;
6239
6240 /* set target number */
6241 target->target_number = new_target_number();
6242
6243 /* allocate memory for each unique target type */
6244 target->type = malloc(sizeof(struct target_type));
6245 if (!target->type) {
6246 LOG_ERROR("Out of memory");
6247 free(target);
6248 return JIM_ERR;
6249 }
6250
6251 memcpy(target->type, target_types[x], sizeof(struct target_type));
6252
6253 /* default to first core, override with -coreid */
6254 target->coreid = 0;
6255
6256 target->working_area = 0x0;
6257 target->working_area_size = 0x0;
6258 target->working_areas = NULL;
6259 target->backup_working_area = 0;
6260
6261 target->state = TARGET_UNKNOWN;
6262 target->debug_reason = DBG_REASON_UNDEFINED;
6263 target->reg_cache = NULL;
6264 target->breakpoints = NULL;
6265 target->watchpoints = NULL;
6266 target->next = NULL;
6267 target->arch_info = NULL;
6268
6269 target->verbose_halt_msg = true;
6270
6271 target->halt_issued = false;
6272
6273 /* initialize trace information */
6274 target->trace_info = calloc(1, sizeof(struct trace));
6275 if (!target->trace_info) {
6276 LOG_ERROR("Out of memory");
6277 free(target->type);
6278 free(target);
6279 return JIM_ERR;
6280 }
6281
6282 target->dbgmsg = NULL;
6283 target->dbg_msg_enabled = 0;
6284
6285 target->endianness = TARGET_ENDIAN_UNKNOWN;
6286
6287 target->rtos = NULL;
6288 target->rtos_auto_detect = false;
6289
6290 target->gdb_port_override = NULL;
6291 target->gdb_max_connections = 1;
6292
6293 /* Do the rest as "configure" options */
6294 goi->isconfigure = 1;
6295 e = target_configure(goi, target);
6296
6297 if (e == JIM_OK) {
6298 if (target->has_dap) {
6299 if (!target->dap_configured) {
6300 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6301 e = JIM_ERR;
6302 }
6303 } else {
6304 if (!target->tap_configured) {
6305 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6306 e = JIM_ERR;
6307 }
6308 }
6309 /* tap must be set after target was configured */
6310 if (!target->tap)
6311 e = JIM_ERR;
6312 }
6313
6314 if (e != JIM_OK) {
6315 rtos_destroy(target);
6316 free(target->gdb_port_override);
6317 free(target->trace_info);
6318 free(target->type);
6319 free(target);
6320 return e;
6321 }
6322
6323 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6324 /* default endian to little if not specified */
6325 target->endianness = TARGET_LITTLE_ENDIAN;
6326 }
6327
6328 cp = Jim_GetString(new_cmd, NULL);
6329 target->cmd_name = strdup(cp);
6330 if (!target->cmd_name) {
6331 LOG_ERROR("Out of memory");
6332 rtos_destroy(target);
6333 free(target->gdb_port_override);
6334 free(target->trace_info);
6335 free(target->type);
6336 free(target);
6337 return JIM_ERR;
6338 }
6339
6340 if (target->type->target_create) {
6341 e = (*(target->type->target_create))(target, goi->interp);
6342 if (e != ERROR_OK) {
6343 LOG_DEBUG("target_create failed");
6344 free(target->cmd_name);
6345 rtos_destroy(target);
6346 free(target->gdb_port_override);
6347 free(target->trace_info);
6348 free(target->type);
6349 free(target);
6350 return JIM_ERR;
6351 }
6352 }
6353
6354 /* create the target specific commands */
6355 if (target->type->commands) {
6356 e = register_commands(cmd_ctx, NULL, target->type->commands);
6357 if (e != ERROR_OK)
6358 LOG_ERROR("unable to register '%s' commands", cp);
6359 }
6360
6361 /* now - create the new target name command */
6362 const struct command_registration target_subcommands[] = {
6363 {
6364 .chain = target_instance_command_handlers,
6365 },
6366 {
6367 .chain = target->type->commands,
6368 },
6369 COMMAND_REGISTRATION_DONE
6370 };
6371 const struct command_registration target_commands[] = {
6372 {
6373 .name = cp,
6374 .mode = COMMAND_ANY,
6375 .help = "target command group",
6376 .usage = "",
6377 .chain = target_subcommands,
6378 },
6379 COMMAND_REGISTRATION_DONE
6380 };
6381 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6382 if (e != ERROR_OK) {
6383 if (target->type->deinit_target)
6384 target->type->deinit_target(target);
6385 free(target->cmd_name);
6386 rtos_destroy(target);
6387 free(target->gdb_port_override);
6388 free(target->trace_info);
6389 free(target->type);
6390 free(target);
6391 return JIM_ERR;
6392 }
6393
6394 /* append to end of list */
6395 append_to_list_all_targets(target);
6396
6397 cmd_ctx->current_target = target;
6398 return JIM_OK;
6399 }
6400
6401 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6402 {
6403 if (argc != 1) {
6404 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6405 return JIM_ERR;
6406 }
6407 struct command_context *cmd_ctx = current_command_context(interp);
6408 assert(cmd_ctx);
6409
6410 struct target *target = get_current_target_or_null(cmd_ctx);
6411 if (target)
6412 Jim_SetResultString(interp, target_name(target), -1);
6413 return JIM_OK;
6414 }
6415
6416 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6417 {
6418 if (argc != 1) {
6419 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6420 return JIM_ERR;
6421 }
6422 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6423 for (unsigned x = 0; target_types[x]; x++) {
6424 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6425 Jim_NewStringObj(interp, target_types[x]->name, -1));
6426 }
6427 return JIM_OK;
6428 }
6429
6430 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6431 {
6432 if (argc != 1) {
6433 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6434 return JIM_ERR;
6435 }
6436 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6437 struct target *target = all_targets;
6438 while (target) {
6439 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6440 Jim_NewStringObj(interp, target_name(target), -1));
6441 target = target->next;
6442 }
6443 return JIM_OK;
6444 }
6445
6446 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6447 {
6448 int i;
6449 const char *targetname;
6450 int retval, len;
6451 static int smp_group = 1;
6452 struct target *target = NULL;
6453 struct target_list *head, *new;
6454
6455 retval = 0;
6456 LOG_DEBUG("%d", argc);
6457 /* argv[1] = target to associate in smp
6458 * argv[2] = target to associate in smp
6459 * argv[3] ...
6460 */
6461
6462 struct list_head *lh = malloc(sizeof(*lh));
6463 if (!lh) {
6464 LOG_ERROR("Out of memory");
6465 return JIM_ERR;
6466 }
6467 INIT_LIST_HEAD(lh);
6468
6469 for (i = 1; i < argc; i++) {
6470
6471 targetname = Jim_GetString(argv[i], &len);
6472 target = get_target(targetname);
6473 LOG_DEBUG("%s ", targetname);
6474 if (target) {
6475 new = malloc(sizeof(struct target_list));
6476 new->target = target;
6477 list_add_tail(&new->lh, lh);
6478 }
6479 }
6480 /* now parse the list of cpu and put the target in smp mode*/
6481 foreach_smp_target(head, lh) {
6482 target = head->target;
6483 target->smp = smp_group;
6484 target->smp_targets = lh;
6485 }
6486 smp_group++;
6487
6488 if (target && target->rtos)
6489 retval = rtos_smp_init(target);
6490
6491 return retval;
6492 }
6493
6494
6495 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6496 {
6497 struct jim_getopt_info goi;
6498 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6499 if (goi.argc < 3) {
6500 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6501 "<name> <target_type> [<target_options> ...]");
6502 return JIM_ERR;
6503 }
6504 return target_create(&goi);
6505 }
6506
6507 static const struct command_registration target_subcommand_handlers[] = {
6508 {
6509 .name = "init",
6510 .mode = COMMAND_CONFIG,
6511 .handler = handle_target_init_command,
6512 .help = "initialize targets",
6513 .usage = "",
6514 },
6515 {
6516 .name = "create",
6517 .mode = COMMAND_CONFIG,
6518 .jim_handler = jim_target_create,
6519 .usage = "name type '-chain-position' name [options ...]",
6520 .help = "Creates and selects a new target",
6521 },
6522 {
6523 .name = "current",
6524 .mode = COMMAND_ANY,
6525 .jim_handler = jim_target_current,
6526 .help = "Returns the currently selected target",
6527 },
6528 {
6529 .name = "types",
6530 .mode = COMMAND_ANY,
6531 .jim_handler = jim_target_types,
6532 .help = "Returns the available target types as "
6533 "a list of strings",
6534 },
6535 {
6536 .name = "names",
6537 .mode = COMMAND_ANY,
6538 .jim_handler = jim_target_names,
6539 .help = "Returns the names of all targets as a list of strings",
6540 },
6541 {
6542 .name = "smp",
6543 .mode = COMMAND_ANY,
6544 .jim_handler = jim_target_smp,
6545 .usage = "targetname1 targetname2 ...",
6546 .help = "gather several target in a smp list"
6547 },
6548
6549 COMMAND_REGISTRATION_DONE
6550 };
6551
6552 struct fast_load {
6553 target_addr_t address;
6554 uint8_t *data;
6555 int length;
6556
6557 };
6558
6559 static int fastload_num;
6560 static struct fast_load *fastload;
6561
6562 static void free_fastload(void)
6563 {
6564 if (fastload) {
6565 for (int i = 0; i < fastload_num; i++)
6566 free(fastload[i].data);
6567 free(fastload);
6568 fastload = NULL;
6569 }
6570 }
6571
6572 COMMAND_HANDLER(handle_fast_load_image_command)
6573 {
6574 uint8_t *buffer;
6575 size_t buf_cnt;
6576 uint32_t image_size;
6577 target_addr_t min_address = 0;
6578 target_addr_t max_address = -1;
6579
6580 struct image image;
6581
6582 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6583 &image, &min_address, &max_address);
6584 if (retval != ERROR_OK)
6585 return retval;
6586
6587 struct duration bench;
6588 duration_start(&bench);
6589
6590 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6591 if (retval != ERROR_OK)
6592 return retval;
6593
6594 image_size = 0x0;
6595 retval = ERROR_OK;
6596 fastload_num = image.num_sections;
6597 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6598 if (!fastload) {
6599 command_print(CMD, "out of memory");
6600 image_close(&image);
6601 return ERROR_FAIL;
6602 }
6603 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6604 for (unsigned int i = 0; i < image.num_sections; i++) {
6605 buffer = malloc(image.sections[i].size);
6606 if (!buffer) {
6607 command_print(CMD, "error allocating buffer for section (%d bytes)",
6608 (int)(image.sections[i].size));
6609 retval = ERROR_FAIL;
6610 break;
6611 }
6612
6613 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6614 if (retval != ERROR_OK) {
6615 free(buffer);
6616 break;
6617 }
6618
6619 uint32_t offset = 0;
6620 uint32_t length = buf_cnt;
6621
6622 /* DANGER!!! beware of unsigned comparison here!!! */
6623
6624 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6625 (image.sections[i].base_address < max_address)) {
6626 if (image.sections[i].base_address < min_address) {
6627 /* clip addresses below */
6628 offset += min_address-image.sections[i].base_address;
6629 length -= offset;
6630 }
6631
6632 if (image.sections[i].base_address + buf_cnt > max_address)
6633 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6634
6635 fastload[i].address = image.sections[i].base_address + offset;
6636 fastload[i].data = malloc(length);
6637 if (!fastload[i].data) {
6638 free(buffer);
6639 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6640 length);
6641 retval = ERROR_FAIL;
6642 break;
6643 }
6644 memcpy(fastload[i].data, buffer + offset, length);
6645 fastload[i].length = length;
6646
6647 image_size += length;
6648 command_print(CMD, "%u bytes written at address 0x%8.8x",
6649 (unsigned int)length,
6650 ((unsigned int)(image.sections[i].base_address + offset)));
6651 }
6652
6653 free(buffer);
6654 }
6655
6656 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6657 command_print(CMD, "Loaded %" PRIu32 " bytes "
6658 "in %fs (%0.3f KiB/s)", image_size,
6659 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6660
6661 command_print(CMD,
6662 "WARNING: image has not been loaded to target!"
6663 "You can issue a 'fast_load' to finish loading.");
6664 }
6665
6666 image_close(&image);
6667
6668 if (retval != ERROR_OK)
6669 free_fastload();
6670
6671 return retval;
6672 }
6673
6674 COMMAND_HANDLER(handle_fast_load_command)
6675 {
6676 if (CMD_ARGC > 0)
6677 return ERROR_COMMAND_SYNTAX_ERROR;
6678 if (!fastload) {
6679 LOG_ERROR("No image in memory");
6680 return ERROR_FAIL;
6681 }
6682 int i;
6683 int64_t ms = timeval_ms();
6684 int size = 0;
6685 int retval = ERROR_OK;
6686 for (i = 0; i < fastload_num; i++) {
6687 struct target *target = get_current_target(CMD_CTX);
6688 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6689 (unsigned int)(fastload[i].address),
6690 (unsigned int)(fastload[i].length));
6691 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6692 if (retval != ERROR_OK)
6693 break;
6694 size += fastload[i].length;
6695 }
6696 if (retval == ERROR_OK) {
6697 int64_t after = timeval_ms();
6698 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6699 }
6700 return retval;
6701 }
6702
6703 static const struct command_registration target_command_handlers[] = {
6704 {
6705 .name = "targets",
6706 .handler = handle_targets_command,
6707 .mode = COMMAND_ANY,
6708 .help = "change current default target (one parameter) "
6709 "or prints table of all targets (no parameters)",
6710 .usage = "[target]",
6711 },
6712 {
6713 .name = "target",
6714 .mode = COMMAND_CONFIG,
6715 .help = "configure target",
6716 .chain = target_subcommand_handlers,
6717 .usage = "",
6718 },
6719 COMMAND_REGISTRATION_DONE
6720 };
6721
6722 int target_register_commands(struct command_context *cmd_ctx)
6723 {
6724 return register_commands(cmd_ctx, NULL, target_command_handlers);
6725 }
6726
6727 static bool target_reset_nag = true;
6728
6729 bool get_target_reset_nag(void)
6730 {
6731 return target_reset_nag;
6732 }
6733
6734 COMMAND_HANDLER(handle_target_reset_nag)
6735 {
6736 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6737 &target_reset_nag, "Nag after each reset about options to improve "
6738 "performance");
6739 }
6740
6741 COMMAND_HANDLER(handle_ps_command)
6742 {
6743 struct target *target = get_current_target(CMD_CTX);
6744 char *display;
6745 if (target->state != TARGET_HALTED) {
6746 LOG_INFO("target not halted !!");
6747 return ERROR_OK;
6748 }
6749
6750 if ((target->rtos) && (target->rtos->type)
6751 && (target->rtos->type->ps_command)) {
6752 display = target->rtos->type->ps_command(target);
6753 command_print(CMD, "%s", display);
6754 free(display);
6755 return ERROR_OK;
6756 } else {
6757 LOG_INFO("failed");
6758 return ERROR_TARGET_FAILURE;
6759 }
6760 }
6761
6762 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6763 {
6764 if (text)
6765 command_print_sameline(cmd, "%s", text);
6766 for (int i = 0; i < size; i++)
6767 command_print_sameline(cmd, " %02x", buf[i]);
6768 command_print(cmd, " ");
6769 }
6770
6771 COMMAND_HANDLER(handle_test_mem_access_command)
6772 {
6773 struct target *target = get_current_target(CMD_CTX);
6774 uint32_t test_size;
6775 int retval = ERROR_OK;
6776
6777 if (target->state != TARGET_HALTED) {
6778 LOG_INFO("target not halted !!");
6779 return ERROR_FAIL;
6780 }
6781
6782 if (CMD_ARGC != 1)
6783 return ERROR_COMMAND_SYNTAX_ERROR;
6784
6785 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6786
6787 /* Test reads */
6788 size_t num_bytes = test_size + 4;
6789
6790 struct working_area *wa = NULL;
6791 retval = target_alloc_working_area(target, num_bytes, &wa);
6792 if (retval != ERROR_OK) {
6793 LOG_ERROR("Not enough working area");
6794 return ERROR_FAIL;
6795 }
6796
6797 uint8_t *test_pattern = malloc(num_bytes);
6798
6799 for (size_t i = 0; i < num_bytes; i++)
6800 test_pattern[i] = rand();
6801
6802 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6803 if (retval != ERROR_OK) {
6804 LOG_ERROR("Test pattern write failed");
6805 goto out;
6806 }
6807
6808 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6809 for (int size = 1; size <= 4; size *= 2) {
6810 for (int offset = 0; offset < 4; offset++) {
6811 uint32_t count = test_size / size;
6812 size_t host_bufsiz = (count + 2) * size + host_offset;
6813 uint8_t *read_ref = malloc(host_bufsiz);
6814 uint8_t *read_buf = malloc(host_bufsiz);
6815
6816 for (size_t i = 0; i < host_bufsiz; i++) {
6817 read_ref[i] = rand();
6818 read_buf[i] = read_ref[i];
6819 }
6820 command_print_sameline(CMD,
6821 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6822 size, offset, host_offset ? "un" : "");
6823
6824 struct duration bench;
6825 duration_start(&bench);
6826
6827 retval = target_read_memory(target, wa->address + offset, size, count,
6828 read_buf + size + host_offset);
6829
6830 duration_measure(&bench);
6831
6832 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6833 command_print(CMD, "Unsupported alignment");
6834 goto next;
6835 } else if (retval != ERROR_OK) {
6836 command_print(CMD, "Memory read failed");
6837 goto next;
6838 }
6839
6840 /* replay on host */
6841 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6842
6843 /* check result */
6844 int result = memcmp(read_ref, read_buf, host_bufsiz);
6845 if (result == 0) {
6846 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6847 duration_elapsed(&bench),
6848 duration_kbps(&bench, count * size));
6849 } else {
6850 command_print(CMD, "Compare failed");
6851 binprint(CMD, "ref:", read_ref, host_bufsiz);
6852 binprint(CMD, "buf:", read_buf, host_bufsiz);
6853 }
6854 next:
6855 free(read_ref);
6856 free(read_buf);
6857 }
6858 }
6859 }
6860
6861 out:
6862 free(test_pattern);
6863
6864 target_free_working_area(target, wa);
6865
6866 /* Test writes */
6867 num_bytes = test_size + 4 + 4 + 4;
6868
6869 retval = target_alloc_working_area(target, num_bytes, &wa);
6870 if (retval != ERROR_OK) {
6871 LOG_ERROR("Not enough working area");
6872 return ERROR_FAIL;
6873 }
6874
6875 test_pattern = malloc(num_bytes);
6876
6877 for (size_t i = 0; i < num_bytes; i++)
6878 test_pattern[i] = rand();
6879
6880 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6881 for (int size = 1; size <= 4; size *= 2) {
6882 for (int offset = 0; offset < 4; offset++) {
6883 uint32_t count = test_size / size;
6884 size_t host_bufsiz = count * size + host_offset;
6885 uint8_t *read_ref = malloc(num_bytes);
6886 uint8_t *read_buf = malloc(num_bytes);
6887 uint8_t *write_buf = malloc(host_bufsiz);
6888
6889 for (size_t i = 0; i < host_bufsiz; i++)
6890 write_buf[i] = rand();
6891 command_print_sameline(CMD,
6892 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6893 size, offset, host_offset ? "un" : "");
6894
6895 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6896 if (retval != ERROR_OK) {
6897 command_print(CMD, "Test pattern write failed");
6898 goto nextw;
6899 }
6900
6901 /* replay on host */
6902 memcpy(read_ref, test_pattern, num_bytes);
6903 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6904
6905 struct duration bench;
6906 duration_start(&bench);
6907
6908 retval = target_write_memory(target, wa->address + size + offset, size, count,
6909 write_buf + host_offset);
6910
6911 duration_measure(&bench);
6912
6913 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6914 command_print(CMD, "Unsupported alignment");
6915 goto nextw;
6916 } else if (retval != ERROR_OK) {
6917 command_print(CMD, "Memory write failed");
6918 goto nextw;
6919 }
6920
6921 /* read back */
6922 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6923 if (retval != ERROR_OK) {
6924 command_print(CMD, "Test pattern write failed");
6925 goto nextw;
6926 }
6927
6928 /* check result */
6929 int result = memcmp(read_ref, read_buf, num_bytes);
6930 if (result == 0) {
6931 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6932 duration_elapsed(&bench),
6933 duration_kbps(&bench, count * size));
6934 } else {
6935 command_print(CMD, "Compare failed");
6936 binprint(CMD, "ref:", read_ref, num_bytes);
6937 binprint(CMD, "buf:", read_buf, num_bytes);
6938 }
6939 nextw:
6940 free(read_ref);
6941 free(read_buf);
6942 }
6943 }
6944 }
6945
6946 free(test_pattern);
6947
6948 target_free_working_area(target, wa);
6949 return retval;
6950 }
6951
6952 static const struct command_registration target_exec_command_handlers[] = {
6953 {
6954 .name = "fast_load_image",
6955 .handler = handle_fast_load_image_command,
6956 .mode = COMMAND_ANY,
6957 .help = "Load image into server memory for later use by "
6958 "fast_load; primarily for profiling",
6959 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6960 "[min_address [max_length]]",
6961 },
6962 {
6963 .name = "fast_load",
6964 .handler = handle_fast_load_command,
6965 .mode = COMMAND_EXEC,
6966 .help = "loads active fast load image to current target "
6967 "- mainly for profiling purposes",
6968 .usage = "",
6969 },
6970 {
6971 .name = "profile",
6972 .handler = handle_profile_command,
6973 .mode = COMMAND_EXEC,
6974 .usage = "seconds filename [start end]",
6975 .help = "profiling samples the CPU PC",
6976 },
6977 /** @todo don't register virt2phys() unless target supports it */
6978 {
6979 .name = "virt2phys",
6980 .handler = handle_virt2phys_command,
6981 .mode = COMMAND_ANY,
6982 .help = "translate a virtual address into a physical address",
6983 .usage = "virtual_address",
6984 },
6985 {
6986 .name = "reg",
6987 .handler = handle_reg_command,
6988 .mode = COMMAND_EXEC,
6989 .help = "display (reread from target with \"force\") or set a register; "
6990 "with no arguments, displays all registers and their values",
6991 .usage = "[(register_number|register_name) [(value|'force')]]",
6992 },
6993 {
6994 .name = "poll",
6995 .handler = handle_poll_command,
6996 .mode = COMMAND_EXEC,
6997 .help = "poll target state; or reconfigure background polling",
6998 .usage = "['on'|'off']",
6999 },
7000 {
7001 .name = "wait_halt",
7002 .handler = handle_wait_halt_command,
7003 .mode = COMMAND_EXEC,
7004 .help = "wait up to the specified number of milliseconds "
7005 "(default 5000) for a previously requested halt",
7006 .usage = "[milliseconds]",
7007 },
7008 {
7009 .name = "halt",
7010 .handler = handle_halt_command,
7011 .mode = COMMAND_EXEC,
7012 .help = "request target to halt, then wait up to the specified "
7013 "number of milliseconds (default 5000) for it to complete",
7014 .usage = "[milliseconds]",
7015 },
7016 {
7017 .name = "resume",
7018 .handler = handle_resume_command,
7019 .mode = COMMAND_EXEC,
7020 .help = "resume target execution from current PC or address",
7021 .usage = "[address]",
7022 },
7023 {
7024 .name = "reset",
7025 .handler = handle_reset_command,
7026 .mode = COMMAND_EXEC,
7027 .usage = "[run|halt|init]",
7028 .help = "Reset all targets into the specified mode. "
7029 "Default reset mode is run, if not given.",
7030 },
7031 {
7032 .name = "soft_reset_halt",
7033 .handler = handle_soft_reset_halt_command,
7034 .mode = COMMAND_EXEC,
7035 .usage = "",
7036 .help = "halt the target and do a soft reset",
7037 },
7038 {
7039 .name = "step",
7040 .handler = handle_step_command,
7041 .mode = COMMAND_EXEC,
7042 .help = "step one instruction from current PC or address",
7043 .usage = "[address]",
7044 },
7045 {
7046 .name = "mdd",
7047 .handler = handle_md_command,
7048 .mode = COMMAND_EXEC,
7049 .help = "display memory double-words",
7050 .usage = "['phys'] address [count]",
7051 },
7052 {
7053 .name = "mdw",
7054 .handler = handle_md_command,
7055 .mode = COMMAND_EXEC,
7056 .help = "display memory words",
7057 .usage = "['phys'] address [count]",
7058 },
7059 {
7060 .name = "mdh",
7061 .handler = handle_md_command,
7062 .mode = COMMAND_EXEC,
7063 .help = "display memory half-words",
7064 .usage = "['phys'] address [count]",
7065 },
7066 {
7067 .name = "mdb",
7068 .handler = handle_md_command,
7069 .mode = COMMAND_EXEC,
7070 .help = "display memory bytes",
7071 .usage = "['phys'] address [count]",
7072 },
7073 {
7074 .name = "mwd",
7075 .handler = handle_mw_command,
7076 .mode = COMMAND_EXEC,
7077 .help = "write memory double-word",
7078 .usage = "['phys'] address value [count]",
7079 },
7080 {
7081 .name = "mww",
7082 .handler = handle_mw_command,
7083 .mode = COMMAND_EXEC,
7084 .help = "write memory word",
7085 .usage = "['phys'] address value [count]",
7086 },
7087 {
7088 .name = "mwh",
7089 .handler = handle_mw_command,
7090 .mode = COMMAND_EXEC,
7091 .help = "write memory half-word",
7092 .usage = "['phys'] address value [count]",
7093 },
7094 {
7095 .name = "mwb",
7096 .handler = handle_mw_command,
7097 .mode = COMMAND_EXEC,
7098 .help = "write memory byte",
7099 .usage = "['phys'] address value [count]",
7100 },
7101 {
7102 .name = "bp",
7103 .handler = handle_bp_command,
7104 .mode = COMMAND_EXEC,
7105 .help = "list or set hardware or software breakpoint",
7106 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7107 },
7108 {
7109 .name = "rbp",
7110 .handler = handle_rbp_command,
7111 .mode = COMMAND_EXEC,
7112 .help = "remove breakpoint",
7113 .usage = "'all' | address",
7114 },
7115 {
7116 .name = "wp",
7117 .handler = handle_wp_command,
7118 .mode = COMMAND_EXEC,
7119 .help = "list (no params) or create watchpoints",
7120 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7121 },
7122 {
7123 .name = "rwp",
7124 .handler = handle_rwp_command,
7125 .mode = COMMAND_EXEC,
7126 .help = "remove watchpoint",
7127 .usage = "address",
7128 },
7129 {
7130 .name = "load_image",
7131 .handler = handle_load_image_command,
7132 .mode = COMMAND_EXEC,
7133 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7134 "[min_address] [max_length]",
7135 },
7136 {
7137 .name = "dump_image",
7138 .handler = handle_dump_image_command,
7139 .mode = COMMAND_EXEC,
7140 .usage = "filename address size",
7141 },
7142 {
7143 .name = "verify_image_checksum",
7144 .handler = handle_verify_image_checksum_command,
7145 .mode = COMMAND_EXEC,
7146 .usage = "filename [offset [type]]",
7147 },
7148 {
7149 .name = "verify_image",
7150 .handler = handle_verify_image_command,
7151 .mode = COMMAND_EXEC,
7152 .usage = "filename [offset [type]]",
7153 },
7154 {
7155 .name = "test_image",
7156 .handler = handle_test_image_command,
7157 .mode = COMMAND_EXEC,
7158 .usage = "filename [offset [type]]",
7159 },
7160 {
7161 .name = "get_reg",
7162 .mode = COMMAND_EXEC,
7163 .jim_handler = target_jim_get_reg,
7164 .help = "Get register values from the target",
7165 .usage = "list",
7166 },
7167 {
7168 .name = "set_reg",
7169 .mode = COMMAND_EXEC,
7170 .jim_handler = target_jim_set_reg,
7171 .help = "Set target register values",
7172 .usage = "dict",
7173 },
7174 {
7175 .name = "read_memory",
7176 .mode = COMMAND_EXEC,
7177 .jim_handler = target_jim_read_memory,
7178 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7179 .usage = "address width count ['phys']",
7180 },
7181 {
7182 .name = "write_memory",
7183 .mode = COMMAND_EXEC,
7184 .jim_handler = target_jim_write_memory,
7185 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7186 .usage = "address width data ['phys']",
7187 },
7188 {
7189 .name = "reset_nag",
7190 .handler = handle_target_reset_nag,
7191 .mode = COMMAND_ANY,
7192 .help = "Nag after each reset about options that could have been "
7193 "enabled to improve performance.",
7194 .usage = "['enable'|'disable']",
7195 },
7196 {
7197 .name = "ps",
7198 .handler = handle_ps_command,
7199 .mode = COMMAND_EXEC,
7200 .help = "list all tasks",
7201 .usage = "",
7202 },
7203 {
7204 .name = "test_mem_access",
7205 .handler = handle_test_mem_access_command,
7206 .mode = COMMAND_EXEC,
7207 .help = "Test the target's memory access functions",
7208 .usage = "size",
7209 },
7210
7211 COMMAND_REGISTRATION_DONE
7212 };
7213 static int target_register_user_commands(struct command_context *cmd_ctx)
7214 {
7215 int retval = ERROR_OK;
7216 retval = target_request_register_commands(cmd_ctx);
7217 if (retval != ERROR_OK)
7218 return retval;
7219
7220 retval = trace_register_commands(cmd_ctx);
7221 if (retval != ERROR_OK)
7222 return retval;
7223
7224
7225 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7226 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)