Call poll at a fixed interval.
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75
76 /* targets */
77 extern struct target_type arm7tdmi_target;
78 extern struct target_type arm720t_target;
79 extern struct target_type arm9tdmi_target;
80 extern struct target_type arm920t_target;
81 extern struct target_type arm966e_target;
82 extern struct target_type arm946e_target;
83 extern struct target_type arm926ejs_target;
84 extern struct target_type fa526_target;
85 extern struct target_type feroceon_target;
86 extern struct target_type dragonite_target;
87 extern struct target_type xscale_target;
88 extern struct target_type cortexm_target;
89 extern struct target_type cortexa_target;
90 extern struct target_type aarch64_target;
91 extern struct target_type cortexr4_target;
92 extern struct target_type arm11_target;
93 extern struct target_type ls1_sap_target;
94 extern struct target_type mips_m4k_target;
95 extern struct target_type mips_mips64_target;
96 extern struct target_type avr_target;
97 extern struct target_type dsp563xx_target;
98 extern struct target_type dsp5680xx_target;
99 extern struct target_type testee_target;
100 extern struct target_type avr32_ap7k_target;
101 extern struct target_type hla_target;
102 extern struct target_type nds32_v2_target;
103 extern struct target_type nds32_v3_target;
104 extern struct target_type nds32_v3m_target;
105 extern struct target_type or1k_target;
106 extern struct target_type quark_x10xx_target;
107 extern struct target_type quark_d20xx_target;
108 extern struct target_type stm8_target;
109 extern struct target_type riscv_target;
110 extern struct target_type mem_ap_target;
111 extern struct target_type esirisc_target;
112 extern struct target_type arcv2_target;
113
114 static struct target_type *target_types[] = {
115 &arm7tdmi_target,
116 &arm9tdmi_target,
117 &arm920t_target,
118 &arm720t_target,
119 &arm966e_target,
120 &arm946e_target,
121 &arm926ejs_target,
122 &fa526_target,
123 &feroceon_target,
124 &dragonite_target,
125 &xscale_target,
126 &cortexm_target,
127 &cortexa_target,
128 &cortexr4_target,
129 &arm11_target,
130 &ls1_sap_target,
131 &mips_m4k_target,
132 &avr_target,
133 &dsp563xx_target,
134 &dsp5680xx_target,
135 &testee_target,
136 &avr32_ap7k_target,
137 &hla_target,
138 &nds32_v2_target,
139 &nds32_v3_target,
140 &nds32_v3m_target,
141 &or1k_target,
142 &quark_x10xx_target,
143 &quark_d20xx_target,
144 &stm8_target,
145 &riscv_target,
146 &mem_ap_target,
147 &esirisc_target,
148 &arcv2_target,
149 &aarch64_target,
150 &mips_mips64_target,
151 NULL,
152 };
153
154 struct target *all_targets;
155 static struct target_event_callback *target_event_callbacks;
156 static struct target_timer_callback *target_timer_callbacks;
157 static int64_t target_timer_next_event_value;
158 static LIST_HEAD(target_reset_callback_list);
159 static LIST_HEAD(target_trace_callback_list);
160 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
161
162 static const struct jim_nvp nvp_assert[] = {
163 { .name = "assert", NVP_ASSERT },
164 { .name = "deassert", NVP_DEASSERT },
165 { .name = "T", NVP_ASSERT },
166 { .name = "F", NVP_DEASSERT },
167 { .name = "t", NVP_ASSERT },
168 { .name = "f", NVP_DEASSERT },
169 { .name = NULL, .value = -1 }
170 };
171
172 static const struct jim_nvp nvp_error_target[] = {
173 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
174 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
175 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
176 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
177 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
178 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
179 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
180 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
181 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
182 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
183 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
184 { .value = -1, .name = NULL }
185 };
186
187 static const char *target_strerror_safe(int err)
188 {
189 const struct jim_nvp *n;
190
191 n = jim_nvp_value2name_simple(nvp_error_target, err);
192 if (!n->name)
193 return "unknown";
194 else
195 return n->name;
196 }
197
198 static const struct jim_nvp nvp_target_event[] = {
199
200 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
201 { .value = TARGET_EVENT_HALTED, .name = "halted" },
202 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
203 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
204 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
205 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
206 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
207
208 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
209 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
210
211 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
212 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
213 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
214 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
215 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
216 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
217 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
218 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
219
220 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
221 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
222 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
223
224 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
225 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
226
227 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
228 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
229
230 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
231 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
232
233 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
234 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
235
236 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
237
238 { .name = NULL, .value = -1 }
239 };
240
241 static const struct jim_nvp nvp_target_state[] = {
242 { .name = "unknown", .value = TARGET_UNKNOWN },
243 { .name = "running", .value = TARGET_RUNNING },
244 { .name = "halted", .value = TARGET_HALTED },
245 { .name = "reset", .value = TARGET_RESET },
246 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
247 { .name = NULL, .value = -1 },
248 };
249
250 static const struct jim_nvp nvp_target_debug_reason[] = {
251 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
252 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
253 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
254 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
255 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
256 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
257 { .name = "program-exit", .value = DBG_REASON_EXIT },
258 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
259 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
260 { .name = NULL, .value = -1 },
261 };
262
263 static const struct jim_nvp nvp_target_endian[] = {
264 { .name = "big", .value = TARGET_BIG_ENDIAN },
265 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
266 { .name = "be", .value = TARGET_BIG_ENDIAN },
267 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
268 { .name = NULL, .value = -1 },
269 };
270
271 static const struct jim_nvp nvp_reset_modes[] = {
272 { .name = "unknown", .value = RESET_UNKNOWN },
273 { .name = "run", .value = RESET_RUN },
274 { .name = "halt", .value = RESET_HALT },
275 { .name = "init", .value = RESET_INIT },
276 { .name = NULL, .value = -1 },
277 };
278
279 const char *debug_reason_name(struct target *t)
280 {
281 const char *cp;
282
283 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
284 t->debug_reason)->name;
285 if (!cp) {
286 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
287 cp = "(*BUG*unknown*BUG*)";
288 }
289 return cp;
290 }
291
292 const char *target_state_name(struct target *t)
293 {
294 const char *cp;
295 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
296 if (!cp) {
297 LOG_ERROR("Invalid target state: %d", (int)(t->state));
298 cp = "(*BUG*unknown*BUG*)";
299 }
300
301 if (!target_was_examined(t) && t->defer_examine)
302 cp = "examine deferred";
303
304 return cp;
305 }
306
307 const char *target_event_name(enum target_event event)
308 {
309 const char *cp;
310 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
311 if (!cp) {
312 LOG_ERROR("Invalid target event: %d", (int)(event));
313 cp = "(*BUG*unknown*BUG*)";
314 }
315 return cp;
316 }
317
318 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
319 {
320 const char *cp;
321 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
322 if (!cp) {
323 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
324 cp = "(*BUG*unknown*BUG*)";
325 }
326 return cp;
327 }
328
329 /* determine the number of the new target */
330 static int new_target_number(void)
331 {
332 struct target *t;
333 int x;
334
335 /* number is 0 based */
336 x = -1;
337 t = all_targets;
338 while (t) {
339 if (x < t->target_number)
340 x = t->target_number;
341 t = t->next;
342 }
343 return x + 1;
344 }
345
346 static void append_to_list_all_targets(struct target *target)
347 {
348 struct target **t = &all_targets;
349
350 while (*t)
351 t = &((*t)->next);
352 *t = target;
353 }
354
355 /* read a uint64_t from a buffer in target memory endianness */
356 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
357 {
358 if (target->endianness == TARGET_LITTLE_ENDIAN)
359 return le_to_h_u64(buffer);
360 else
361 return be_to_h_u64(buffer);
362 }
363
364 /* read a uint32_t from a buffer in target memory endianness */
365 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
366 {
367 if (target->endianness == TARGET_LITTLE_ENDIAN)
368 return le_to_h_u32(buffer);
369 else
370 return be_to_h_u32(buffer);
371 }
372
373 /* read a uint24_t from a buffer in target memory endianness */
374 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
375 {
376 if (target->endianness == TARGET_LITTLE_ENDIAN)
377 return le_to_h_u24(buffer);
378 else
379 return be_to_h_u24(buffer);
380 }
381
382 /* read a uint16_t from a buffer in target memory endianness */
383 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
384 {
385 if (target->endianness == TARGET_LITTLE_ENDIAN)
386 return le_to_h_u16(buffer);
387 else
388 return be_to_h_u16(buffer);
389 }
390
391 /* write a uint64_t to a buffer in target memory endianness */
392 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
393 {
394 if (target->endianness == TARGET_LITTLE_ENDIAN)
395 h_u64_to_le(buffer, value);
396 else
397 h_u64_to_be(buffer, value);
398 }
399
400 /* write a uint32_t to a buffer in target memory endianness */
401 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
402 {
403 if (target->endianness == TARGET_LITTLE_ENDIAN)
404 h_u32_to_le(buffer, value);
405 else
406 h_u32_to_be(buffer, value);
407 }
408
409 /* write a uint24_t to a buffer in target memory endianness */
410 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
411 {
412 if (target->endianness == TARGET_LITTLE_ENDIAN)
413 h_u24_to_le(buffer, value);
414 else
415 h_u24_to_be(buffer, value);
416 }
417
418 /* write a uint16_t to a buffer in target memory endianness */
419 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
420 {
421 if (target->endianness == TARGET_LITTLE_ENDIAN)
422 h_u16_to_le(buffer, value);
423 else
424 h_u16_to_be(buffer, value);
425 }
426
427 /* write a uint8_t to a buffer in target memory endianness */
428 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
429 {
430 *buffer = value;
431 }
432
433 /* write a uint64_t array to a buffer in target memory endianness */
434 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
435 {
436 uint32_t i;
437 for (i = 0; i < count; i++)
438 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
439 }
440
441 /* write a uint32_t array to a buffer in target memory endianness */
442 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
443 {
444 uint32_t i;
445 for (i = 0; i < count; i++)
446 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
447 }
448
449 /* write a uint16_t array to a buffer in target memory endianness */
450 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
451 {
452 uint32_t i;
453 for (i = 0; i < count; i++)
454 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
455 }
456
457 /* write a uint64_t array to a buffer in target memory endianness */
458 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
459 {
460 uint32_t i;
461 for (i = 0; i < count; i++)
462 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
463 }
464
465 /* write a uint32_t array to a buffer in target memory endianness */
466 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
467 {
468 uint32_t i;
469 for (i = 0; i < count; i++)
470 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
471 }
472
473 /* write a uint16_t array to a buffer in target memory endianness */
474 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
475 {
476 uint32_t i;
477 for (i = 0; i < count; i++)
478 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
479 }
480
481 /* return a pointer to a configured target; id is name or number */
482 struct target *get_target(const char *id)
483 {
484 struct target *target;
485
486 /* try as tcltarget name */
487 for (target = all_targets; target; target = target->next) {
488 if (!target_name(target))
489 continue;
490 if (strcmp(id, target_name(target)) == 0)
491 return target;
492 }
493
494 /* It's OK to remove this fallback sometime after August 2010 or so */
495
496 /* no match, try as number */
497 unsigned num;
498 if (parse_uint(id, &num) != ERROR_OK)
499 return NULL;
500
501 for (target = all_targets; target; target = target->next) {
502 if (target->target_number == (int)num) {
503 LOG_WARNING("use '%s' as target identifier, not '%u'",
504 target_name(target), num);
505 return target;
506 }
507 }
508
509 return NULL;
510 }
511
512 /* returns a pointer to the n-th configured target */
513 struct target *get_target_by_num(int num)
514 {
515 struct target *target = all_targets;
516
517 while (target) {
518 if (target->target_number == num)
519 return target;
520 target = target->next;
521 }
522
523 return NULL;
524 }
525
526 struct target *get_current_target(struct command_context *cmd_ctx)
527 {
528 struct target *target = get_current_target_or_null(cmd_ctx);
529
530 if (!target) {
531 LOG_ERROR("BUG: current_target out of bounds");
532 exit(-1);
533 }
534
535 return target;
536 }
537
538 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
539 {
540 return cmd_ctx->current_target_override
541 ? cmd_ctx->current_target_override
542 : cmd_ctx->current_target;
543 }
544
545 int target_poll(struct target *target)
546 {
547 int retval;
548
549 /* We can't poll until after examine */
550 if (!target_was_examined(target)) {
551 /* Fail silently lest we pollute the log */
552 return ERROR_FAIL;
553 }
554
555 retval = target->type->poll(target);
556 if (retval != ERROR_OK)
557 return retval;
558
559 if (target->halt_issued) {
560 if (target->state == TARGET_HALTED)
561 target->halt_issued = false;
562 else {
563 int64_t t = timeval_ms() - target->halt_issued_time;
564 if (t > DEFAULT_HALT_TIMEOUT) {
565 target->halt_issued = false;
566 LOG_INFO("Halt timed out, wake up GDB.");
567 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
568 }
569 }
570 }
571
572 return ERROR_OK;
573 }
574
575 int target_halt(struct target *target)
576 {
577 int retval;
578 /* We can't poll until after examine */
579 if (!target_was_examined(target)) {
580 LOG_ERROR("Target not examined yet");
581 return ERROR_FAIL;
582 }
583
584 retval = target->type->halt(target);
585 if (retval != ERROR_OK)
586 return retval;
587
588 target->halt_issued = true;
589 target->halt_issued_time = timeval_ms();
590
591 return ERROR_OK;
592 }
593
594 /**
595 * Make the target (re)start executing using its saved execution
596 * context (possibly with some modifications).
597 *
598 * @param target Which target should start executing.
599 * @param current True to use the target's saved program counter instead
600 * of the address parameter
601 * @param address Optionally used as the program counter.
602 * @param handle_breakpoints True iff breakpoints at the resumption PC
603 * should be skipped. (For example, maybe execution was stopped by
604 * such a breakpoint, in which case it would be counterproductive to
605 * let it re-trigger.
606 * @param debug_execution False if all working areas allocated by OpenOCD
607 * should be released and/or restored to their original contents.
608 * (This would for example be true to run some downloaded "helper"
609 * algorithm code, which resides in one such working buffer and uses
610 * another for data storage.)
611 *
612 * @todo Resolve the ambiguity about what the "debug_execution" flag
613 * signifies. For example, Target implementations don't agree on how
614 * it relates to invalidation of the register cache, or to whether
615 * breakpoints and watchpoints should be enabled. (It would seem wrong
616 * to enable breakpoints when running downloaded "helper" algorithms
617 * (debug_execution true), since the breakpoints would be set to match
618 * target firmware being debugged, not the helper algorithm.... and
619 * enabling them could cause such helpers to malfunction (for example,
620 * by overwriting data with a breakpoint instruction. On the other
621 * hand the infrastructure for running such helpers might use this
622 * procedure but rely on hardware breakpoint to detect termination.)
623 */
624 int target_resume(struct target *target, int current, target_addr_t address,
625 int handle_breakpoints, int debug_execution)
626 {
627 int retval;
628
629 /* We can't poll until after examine */
630 if (!target_was_examined(target)) {
631 LOG_ERROR("Target not examined yet");
632 return ERROR_FAIL;
633 }
634
635 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
636
637 /* note that resume *must* be asynchronous. The CPU can halt before
638 * we poll. The CPU can even halt at the current PC as a result of
639 * a software breakpoint being inserted by (a bug?) the application.
640 */
641 /*
642 * resume() triggers the event 'resumed'. The execution of TCL commands
643 * in the event handler causes the polling of targets. If the target has
644 * already halted for a breakpoint, polling will run the 'halted' event
645 * handler before the pending 'resumed' handler.
646 * Disable polling during resume() to guarantee the execution of handlers
647 * in the correct order.
648 */
649 bool save_poll = jtag_poll_get_enabled();
650 jtag_poll_set_enabled(false);
651 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
652 jtag_poll_set_enabled(save_poll);
653 if (retval != ERROR_OK)
654 return retval;
655
656 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
657
658 return retval;
659 }
660
661 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
662 {
663 char buf[100];
664 int retval;
665 struct jim_nvp *n;
666 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
667 if (!n->name) {
668 LOG_ERROR("invalid reset mode");
669 return ERROR_FAIL;
670 }
671
672 struct target *target;
673 for (target = all_targets; target; target = target->next)
674 target_call_reset_callbacks(target, reset_mode);
675
676 /* disable polling during reset to make reset event scripts
677 * more predictable, i.e. dr/irscan & pathmove in events will
678 * not have JTAG operations injected into the middle of a sequence.
679 */
680 bool save_poll = jtag_poll_get_enabled();
681
682 jtag_poll_set_enabled(false);
683
684 sprintf(buf, "ocd_process_reset %s", n->name);
685 retval = Jim_Eval(cmd->ctx->interp, buf);
686
687 jtag_poll_set_enabled(save_poll);
688
689 if (retval != JIM_OK) {
690 Jim_MakeErrorMessage(cmd->ctx->interp);
691 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
692 return ERROR_FAIL;
693 }
694
695 /* We want any events to be processed before the prompt */
696 retval = target_call_timer_callbacks_now();
697
698 for (target = all_targets; target; target = target->next) {
699 target->type->check_reset(target);
700 target->running_alg = false;
701 }
702
703 return retval;
704 }
705
706 static int identity_virt2phys(struct target *target,
707 target_addr_t virtual, target_addr_t *physical)
708 {
709 *physical = virtual;
710 return ERROR_OK;
711 }
712
713 static int no_mmu(struct target *target, int *enabled)
714 {
715 *enabled = 0;
716 return ERROR_OK;
717 }
718
719 static int default_examine(struct target *target)
720 {
721 target_set_examined(target);
722 return ERROR_OK;
723 }
724
725 /* no check by default */
726 static int default_check_reset(struct target *target)
727 {
728 return ERROR_OK;
729 }
730
731 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
732 * Keep in sync */
733 int target_examine_one(struct target *target)
734 {
735 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
736
737 int retval = target->type->examine(target);
738 if (retval != ERROR_OK) {
739 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
740 return retval;
741 }
742
743 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
744
745 return ERROR_OK;
746 }
747
748 static int jtag_enable_callback(enum jtag_event event, void *priv)
749 {
750 struct target *target = priv;
751
752 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
753 return ERROR_OK;
754
755 jtag_unregister_event_callback(jtag_enable_callback, target);
756
757 return target_examine_one(target);
758 }
759
760 /* Targets that correctly implement init + examine, i.e.
761 * no communication with target during init:
762 *
763 * XScale
764 */
765 int target_examine(void)
766 {
767 int retval = ERROR_OK;
768 struct target *target;
769
770 for (target = all_targets; target; target = target->next) {
771 /* defer examination, but don't skip it */
772 if (!target->tap->enabled) {
773 jtag_register_event_callback(jtag_enable_callback,
774 target);
775 continue;
776 }
777
778 if (target->defer_examine)
779 continue;
780
781 int retval2 = target_examine_one(target);
782 if (retval2 != ERROR_OK) {
783 LOG_WARNING("target %s examination failed", target_name(target));
784 retval = retval2;
785 }
786 }
787 return retval;
788 }
789
790 const char *target_type_name(struct target *target)
791 {
792 return target->type->name;
793 }
794
795 static int target_soft_reset_halt(struct target *target)
796 {
797 if (!target_was_examined(target)) {
798 LOG_ERROR("Target not examined yet");
799 return ERROR_FAIL;
800 }
801 if (!target->type->soft_reset_halt) {
802 LOG_ERROR("Target %s does not support soft_reset_halt",
803 target_name(target));
804 return ERROR_FAIL;
805 }
806 return target->type->soft_reset_halt(target);
807 }
808
809 /**
810 * Downloads a target-specific native code algorithm to the target,
811 * and executes it. * Note that some targets may need to set up, enable,
812 * and tear down a breakpoint (hard or * soft) to detect algorithm
813 * termination, while others may support lower overhead schemes where
814 * soft breakpoints embedded in the algorithm automatically terminate the
815 * algorithm.
816 *
817 * @param target used to run the algorithm
818 * @param num_mem_params
819 * @param mem_params
820 * @param num_reg_params
821 * @param reg_param
822 * @param entry_point
823 * @param exit_point
824 * @param timeout_ms
825 * @param arch_info target-specific description of the algorithm.
826 */
827 int target_run_algorithm(struct target *target,
828 int num_mem_params, struct mem_param *mem_params,
829 int num_reg_params, struct reg_param *reg_param,
830 uint32_t entry_point, uint32_t exit_point,
831 int timeout_ms, void *arch_info)
832 {
833 int retval = ERROR_FAIL;
834
835 if (!target_was_examined(target)) {
836 LOG_ERROR("Target not examined yet");
837 goto done;
838 }
839 if (!target->type->run_algorithm) {
840 LOG_ERROR("Target type '%s' does not support %s",
841 target_type_name(target), __func__);
842 goto done;
843 }
844
845 target->running_alg = true;
846 retval = target->type->run_algorithm(target,
847 num_mem_params, mem_params,
848 num_reg_params, reg_param,
849 entry_point, exit_point, timeout_ms, arch_info);
850 target->running_alg = false;
851
852 done:
853 return retval;
854 }
855
856 /**
857 * Executes a target-specific native code algorithm and leaves it running.
858 *
859 * @param target used to run the algorithm
860 * @param num_mem_params
861 * @param mem_params
862 * @param num_reg_params
863 * @param reg_params
864 * @param entry_point
865 * @param exit_point
866 * @param arch_info target-specific description of the algorithm.
867 */
868 int target_start_algorithm(struct target *target,
869 int num_mem_params, struct mem_param *mem_params,
870 int num_reg_params, struct reg_param *reg_params,
871 uint32_t entry_point, uint32_t exit_point,
872 void *arch_info)
873 {
874 int retval = ERROR_FAIL;
875
876 if (!target_was_examined(target)) {
877 LOG_ERROR("Target not examined yet");
878 goto done;
879 }
880 if (!target->type->start_algorithm) {
881 LOG_ERROR("Target type '%s' does not support %s",
882 target_type_name(target), __func__);
883 goto done;
884 }
885 if (target->running_alg) {
886 LOG_ERROR("Target is already running an algorithm");
887 goto done;
888 }
889
890 target->running_alg = true;
891 retval = target->type->start_algorithm(target,
892 num_mem_params, mem_params,
893 num_reg_params, reg_params,
894 entry_point, exit_point, arch_info);
895
896 done:
897 return retval;
898 }
899
900 /**
901 * Waits for an algorithm started with target_start_algorithm() to complete.
902 *
903 * @param target used to run the algorithm
904 * @param num_mem_params
905 * @param mem_params
906 * @param num_reg_params
907 * @param reg_params
908 * @param exit_point
909 * @param timeout_ms
910 * @param arch_info target-specific description of the algorithm.
911 */
912 int target_wait_algorithm(struct target *target,
913 int num_mem_params, struct mem_param *mem_params,
914 int num_reg_params, struct reg_param *reg_params,
915 uint32_t exit_point, int timeout_ms,
916 void *arch_info)
917 {
918 int retval = ERROR_FAIL;
919
920 if (!target->type->wait_algorithm) {
921 LOG_ERROR("Target type '%s' does not support %s",
922 target_type_name(target), __func__);
923 goto done;
924 }
925 if (!target->running_alg) {
926 LOG_ERROR("Target is not running an algorithm");
927 goto done;
928 }
929
930 retval = target->type->wait_algorithm(target,
931 num_mem_params, mem_params,
932 num_reg_params, reg_params,
933 exit_point, timeout_ms, arch_info);
934 if (retval != ERROR_TARGET_TIMEOUT)
935 target->running_alg = false;
936
937 done:
938 return retval;
939 }
940
941 /**
942 * Streams data to a circular buffer on target intended for consumption by code
943 * running asynchronously on target.
944 *
945 * This is intended for applications where target-specific native code runs
946 * on the target, receives data from the circular buffer, does something with
947 * it (most likely writing it to a flash memory), and advances the circular
948 * buffer pointer.
949 *
950 * This assumes that the helper algorithm has already been loaded to the target,
951 * but has not been started yet. Given memory and register parameters are passed
952 * to the algorithm.
953 *
954 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
955 * following format:
956 *
957 * [buffer_start + 0, buffer_start + 4):
958 * Write Pointer address (aka head). Written and updated by this
959 * routine when new data is written to the circular buffer.
960 * [buffer_start + 4, buffer_start + 8):
961 * Read Pointer address (aka tail). Updated by code running on the
962 * target after it consumes data.
963 * [buffer_start + 8, buffer_start + buffer_size):
964 * Circular buffer contents.
965 *
966 * See contrib/loaders/flash/stm32f1x.S for an example.
967 *
968 * @param target used to run the algorithm
969 * @param buffer address on the host where data to be sent is located
970 * @param count number of blocks to send
971 * @param block_size size in bytes of each block
972 * @param num_mem_params count of memory-based params to pass to algorithm
973 * @param mem_params memory-based params to pass to algorithm
974 * @param num_reg_params count of register-based params to pass to algorithm
975 * @param reg_params memory-based params to pass to algorithm
976 * @param buffer_start address on the target of the circular buffer structure
977 * @param buffer_size size of the circular buffer structure
978 * @param entry_point address on the target to execute to start the algorithm
979 * @param exit_point address at which to set a breakpoint to catch the
980 * end of the algorithm; can be 0 if target triggers a breakpoint itself
981 * @param arch_info
982 */
983
984 int target_run_flash_async_algorithm(struct target *target,
985 const uint8_t *buffer, uint32_t count, int block_size,
986 int num_mem_params, struct mem_param *mem_params,
987 int num_reg_params, struct reg_param *reg_params,
988 uint32_t buffer_start, uint32_t buffer_size,
989 uint32_t entry_point, uint32_t exit_point, void *arch_info)
990 {
991 int retval;
992 int timeout = 0;
993
994 const uint8_t *buffer_orig = buffer;
995
996 /* Set up working area. First word is write pointer, second word is read pointer,
997 * rest is fifo data area. */
998 uint32_t wp_addr = buffer_start;
999 uint32_t rp_addr = buffer_start + 4;
1000 uint32_t fifo_start_addr = buffer_start + 8;
1001 uint32_t fifo_end_addr = buffer_start + buffer_size;
1002
1003 uint32_t wp = fifo_start_addr;
1004 uint32_t rp = fifo_start_addr;
1005
1006 /* validate block_size is 2^n */
1007 assert(!block_size || !(block_size & (block_size - 1)));
1008
1009 retval = target_write_u32(target, wp_addr, wp);
1010 if (retval != ERROR_OK)
1011 return retval;
1012 retval = target_write_u32(target, rp_addr, rp);
1013 if (retval != ERROR_OK)
1014 return retval;
1015
1016 /* Start up algorithm on target and let it idle while writing the first chunk */
1017 retval = target_start_algorithm(target, num_mem_params, mem_params,
1018 num_reg_params, reg_params,
1019 entry_point,
1020 exit_point,
1021 arch_info);
1022
1023 if (retval != ERROR_OK) {
1024 LOG_ERROR("error starting target flash write algorithm");
1025 return retval;
1026 }
1027
1028 while (count > 0) {
1029
1030 retval = target_read_u32(target, rp_addr, &rp);
1031 if (retval != ERROR_OK) {
1032 LOG_ERROR("failed to get read pointer");
1033 break;
1034 }
1035
1036 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1037 (size_t) (buffer - buffer_orig), count, wp, rp);
1038
1039 if (rp == 0) {
1040 LOG_ERROR("flash write algorithm aborted by target");
1041 retval = ERROR_FLASH_OPERATION_FAILED;
1042 break;
1043 }
1044
1045 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1046 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1047 break;
1048 }
1049
1050 /* Count the number of bytes available in the fifo without
1051 * crossing the wrap around. Make sure to not fill it completely,
1052 * because that would make wp == rp and that's the empty condition. */
1053 uint32_t thisrun_bytes;
1054 if (rp > wp)
1055 thisrun_bytes = rp - wp - block_size;
1056 else if (rp > fifo_start_addr)
1057 thisrun_bytes = fifo_end_addr - wp;
1058 else
1059 thisrun_bytes = fifo_end_addr - wp - block_size;
1060
1061 if (thisrun_bytes == 0) {
1062 /* Throttle polling a bit if transfer is (much) faster than flash
1063 * programming. The exact delay shouldn't matter as long as it's
1064 * less than buffer size / flash speed. This is very unlikely to
1065 * run when using high latency connections such as USB. */
1066 alive_sleep(2);
1067
1068 /* to stop an infinite loop on some targets check and increment a timeout
1069 * this issue was observed on a stellaris using the new ICDI interface */
1070 if (timeout++ >= 2500) {
1071 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1072 return ERROR_FLASH_OPERATION_FAILED;
1073 }
1074 continue;
1075 }
1076
1077 /* reset our timeout */
1078 timeout = 0;
1079
1080 /* Limit to the amount of data we actually want to write */
1081 if (thisrun_bytes > count * block_size)
1082 thisrun_bytes = count * block_size;
1083
1084 /* Force end of large blocks to be word aligned */
1085 if (thisrun_bytes >= 16)
1086 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1087
1088 /* Write data to fifo */
1089 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1090 if (retval != ERROR_OK)
1091 break;
1092
1093 /* Update counters and wrap write pointer */
1094 buffer += thisrun_bytes;
1095 count -= thisrun_bytes / block_size;
1096 wp += thisrun_bytes;
1097 if (wp >= fifo_end_addr)
1098 wp = fifo_start_addr;
1099
1100 /* Store updated write pointer to target */
1101 retval = target_write_u32(target, wp_addr, wp);
1102 if (retval != ERROR_OK)
1103 break;
1104
1105 /* Avoid GDB timeouts */
1106 keep_alive();
1107 }
1108
1109 if (retval != ERROR_OK) {
1110 /* abort flash write algorithm on target */
1111 target_write_u32(target, wp_addr, 0);
1112 }
1113
1114 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1115 num_reg_params, reg_params,
1116 exit_point,
1117 10000,
1118 arch_info);
1119
1120 if (retval2 != ERROR_OK) {
1121 LOG_ERROR("error waiting for target flash write algorithm");
1122 retval = retval2;
1123 }
1124
1125 if (retval == ERROR_OK) {
1126 /* check if algorithm set rp = 0 after fifo writer loop finished */
1127 retval = target_read_u32(target, rp_addr, &rp);
1128 if (retval == ERROR_OK && rp == 0) {
1129 LOG_ERROR("flash write algorithm aborted by target");
1130 retval = ERROR_FLASH_OPERATION_FAILED;
1131 }
1132 }
1133
1134 return retval;
1135 }
1136
1137 int target_run_read_async_algorithm(struct target *target,
1138 uint8_t *buffer, uint32_t count, int block_size,
1139 int num_mem_params, struct mem_param *mem_params,
1140 int num_reg_params, struct reg_param *reg_params,
1141 uint32_t buffer_start, uint32_t buffer_size,
1142 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1143 {
1144 int retval;
1145 int timeout = 0;
1146
1147 const uint8_t *buffer_orig = buffer;
1148
1149 /* Set up working area. First word is write pointer, second word is read pointer,
1150 * rest is fifo data area. */
1151 uint32_t wp_addr = buffer_start;
1152 uint32_t rp_addr = buffer_start + 4;
1153 uint32_t fifo_start_addr = buffer_start + 8;
1154 uint32_t fifo_end_addr = buffer_start + buffer_size;
1155
1156 uint32_t wp = fifo_start_addr;
1157 uint32_t rp = fifo_start_addr;
1158
1159 /* validate block_size is 2^n */
1160 assert(!block_size || !(block_size & (block_size - 1)));
1161
1162 retval = target_write_u32(target, wp_addr, wp);
1163 if (retval != ERROR_OK)
1164 return retval;
1165 retval = target_write_u32(target, rp_addr, rp);
1166 if (retval != ERROR_OK)
1167 return retval;
1168
1169 /* Start up algorithm on target */
1170 retval = target_start_algorithm(target, num_mem_params, mem_params,
1171 num_reg_params, reg_params,
1172 entry_point,
1173 exit_point,
1174 arch_info);
1175
1176 if (retval != ERROR_OK) {
1177 LOG_ERROR("error starting target flash read algorithm");
1178 return retval;
1179 }
1180
1181 while (count > 0) {
1182 retval = target_read_u32(target, wp_addr, &wp);
1183 if (retval != ERROR_OK) {
1184 LOG_ERROR("failed to get write pointer");
1185 break;
1186 }
1187
1188 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1189 (size_t)(buffer - buffer_orig), count, wp, rp);
1190
1191 if (wp == 0) {
1192 LOG_ERROR("flash read algorithm aborted by target");
1193 retval = ERROR_FLASH_OPERATION_FAILED;
1194 break;
1195 }
1196
1197 if (((wp - fifo_start_addr) & (block_size - 1)) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1198 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1199 break;
1200 }
1201
1202 /* Count the number of bytes available in the fifo without
1203 * crossing the wrap around. */
1204 uint32_t thisrun_bytes;
1205 if (wp >= rp)
1206 thisrun_bytes = wp - rp;
1207 else
1208 thisrun_bytes = fifo_end_addr - rp;
1209
1210 if (thisrun_bytes == 0) {
1211 /* Throttle polling a bit if transfer is (much) faster than flash
1212 * reading. The exact delay shouldn't matter as long as it's
1213 * less than buffer size / flash speed. This is very unlikely to
1214 * run when using high latency connections such as USB. */
1215 alive_sleep(2);
1216
1217 /* to stop an infinite loop on some targets check and increment a timeout
1218 * this issue was observed on a stellaris using the new ICDI interface */
1219 if (timeout++ >= 2500) {
1220 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1221 return ERROR_FLASH_OPERATION_FAILED;
1222 }
1223 continue;
1224 }
1225
1226 /* Reset our timeout */
1227 timeout = 0;
1228
1229 /* Limit to the amount of data we actually want to read */
1230 if (thisrun_bytes > count * block_size)
1231 thisrun_bytes = count * block_size;
1232
1233 /* Force end of large blocks to be word aligned */
1234 if (thisrun_bytes >= 16)
1235 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1236
1237 /* Read data from fifo */
1238 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1239 if (retval != ERROR_OK)
1240 break;
1241
1242 /* Update counters and wrap write pointer */
1243 buffer += thisrun_bytes;
1244 count -= thisrun_bytes / block_size;
1245 rp += thisrun_bytes;
1246 if (rp >= fifo_end_addr)
1247 rp = fifo_start_addr;
1248
1249 /* Store updated write pointer to target */
1250 retval = target_write_u32(target, rp_addr, rp);
1251 if (retval != ERROR_OK)
1252 break;
1253
1254 /* Avoid GDB timeouts */
1255 keep_alive();
1256
1257 }
1258
1259 if (retval != ERROR_OK) {
1260 /* abort flash write algorithm on target */
1261 target_write_u32(target, rp_addr, 0);
1262 }
1263
1264 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1265 num_reg_params, reg_params,
1266 exit_point,
1267 10000,
1268 arch_info);
1269
1270 if (retval2 != ERROR_OK) {
1271 LOG_ERROR("error waiting for target flash write algorithm");
1272 retval = retval2;
1273 }
1274
1275 if (retval == ERROR_OK) {
1276 /* check if algorithm set wp = 0 after fifo writer loop finished */
1277 retval = target_read_u32(target, wp_addr, &wp);
1278 if (retval == ERROR_OK && wp == 0) {
1279 LOG_ERROR("flash read algorithm aborted by target");
1280 retval = ERROR_FLASH_OPERATION_FAILED;
1281 }
1282 }
1283
1284 return retval;
1285 }
1286
1287 int target_read_memory(struct target *target,
1288 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1289 {
1290 if (!target_was_examined(target)) {
1291 LOG_ERROR("Target not examined yet");
1292 return ERROR_FAIL;
1293 }
1294 if (!target->type->read_memory) {
1295 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1296 return ERROR_FAIL;
1297 }
1298 return target->type->read_memory(target, address, size, count, buffer);
1299 }
1300
1301 int target_read_phys_memory(struct target *target,
1302 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1303 {
1304 if (!target_was_examined(target)) {
1305 LOG_ERROR("Target not examined yet");
1306 return ERROR_FAIL;
1307 }
1308 if (!target->type->read_phys_memory) {
1309 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1310 return ERROR_FAIL;
1311 }
1312 return target->type->read_phys_memory(target, address, size, count, buffer);
1313 }
1314
1315 int target_write_memory(struct target *target,
1316 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1317 {
1318 if (!target_was_examined(target)) {
1319 LOG_ERROR("Target not examined yet");
1320 return ERROR_FAIL;
1321 }
1322 if (!target->type->write_memory) {
1323 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1324 return ERROR_FAIL;
1325 }
1326 return target->type->write_memory(target, address, size, count, buffer);
1327 }
1328
1329 int target_write_phys_memory(struct target *target,
1330 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1331 {
1332 if (!target_was_examined(target)) {
1333 LOG_ERROR("Target not examined yet");
1334 return ERROR_FAIL;
1335 }
1336 if (!target->type->write_phys_memory) {
1337 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1338 return ERROR_FAIL;
1339 }
1340 return target->type->write_phys_memory(target, address, size, count, buffer);
1341 }
1342
1343 int target_add_breakpoint(struct target *target,
1344 struct breakpoint *breakpoint)
1345 {
1346 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1347 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1348 return ERROR_TARGET_NOT_HALTED;
1349 }
1350 return target->type->add_breakpoint(target, breakpoint);
1351 }
1352
1353 int target_add_context_breakpoint(struct target *target,
1354 struct breakpoint *breakpoint)
1355 {
1356 if (target->state != TARGET_HALTED) {
1357 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1358 return ERROR_TARGET_NOT_HALTED;
1359 }
1360 return target->type->add_context_breakpoint(target, breakpoint);
1361 }
1362
1363 int target_add_hybrid_breakpoint(struct target *target,
1364 struct breakpoint *breakpoint)
1365 {
1366 if (target->state != TARGET_HALTED) {
1367 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1368 return ERROR_TARGET_NOT_HALTED;
1369 }
1370 return target->type->add_hybrid_breakpoint(target, breakpoint);
1371 }
1372
1373 int target_remove_breakpoint(struct target *target,
1374 struct breakpoint *breakpoint)
1375 {
1376 return target->type->remove_breakpoint(target, breakpoint);
1377 }
1378
1379 int target_add_watchpoint(struct target *target,
1380 struct watchpoint *watchpoint)
1381 {
1382 if (target->state != TARGET_HALTED) {
1383 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1384 return ERROR_TARGET_NOT_HALTED;
1385 }
1386 return target->type->add_watchpoint(target, watchpoint);
1387 }
1388 int target_remove_watchpoint(struct target *target,
1389 struct watchpoint *watchpoint)
1390 {
1391 return target->type->remove_watchpoint(target, watchpoint);
1392 }
1393 int target_hit_watchpoint(struct target *target,
1394 struct watchpoint **hit_watchpoint)
1395 {
1396 if (target->state != TARGET_HALTED) {
1397 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1398 return ERROR_TARGET_NOT_HALTED;
1399 }
1400
1401 if (!target->type->hit_watchpoint) {
1402 /* For backward compatible, if hit_watchpoint is not implemented,
1403 * return ERROR_FAIL such that gdb_server will not take the nonsense
1404 * information. */
1405 return ERROR_FAIL;
1406 }
1407
1408 return target->type->hit_watchpoint(target, hit_watchpoint);
1409 }
1410
1411 const char *target_get_gdb_arch(struct target *target)
1412 {
1413 if (!target->type->get_gdb_arch)
1414 return NULL;
1415 return target->type->get_gdb_arch(target);
1416 }
1417
1418 int target_get_gdb_reg_list(struct target *target,
1419 struct reg **reg_list[], int *reg_list_size,
1420 enum target_register_class reg_class)
1421 {
1422 int result = ERROR_FAIL;
1423
1424 if (!target_was_examined(target)) {
1425 LOG_ERROR("Target not examined yet");
1426 goto done;
1427 }
1428
1429 result = target->type->get_gdb_reg_list(target, reg_list,
1430 reg_list_size, reg_class);
1431
1432 done:
1433 if (result != ERROR_OK) {
1434 *reg_list = NULL;
1435 *reg_list_size = 0;
1436 }
1437 return result;
1438 }
1439
1440 int target_get_gdb_reg_list_noread(struct target *target,
1441 struct reg **reg_list[], int *reg_list_size,
1442 enum target_register_class reg_class)
1443 {
1444 if (target->type->get_gdb_reg_list_noread &&
1445 target->type->get_gdb_reg_list_noread(target, reg_list,
1446 reg_list_size, reg_class) == ERROR_OK)
1447 return ERROR_OK;
1448 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1449 }
1450
1451 bool target_supports_gdb_connection(struct target *target)
1452 {
1453 /*
1454 * exclude all the targets that don't provide get_gdb_reg_list
1455 * or that have explicit gdb_max_connection == 0
1456 */
1457 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1458 }
1459
1460 int target_step(struct target *target,
1461 int current, target_addr_t address, int handle_breakpoints)
1462 {
1463 int retval;
1464
1465 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1466
1467 retval = target->type->step(target, current, address, handle_breakpoints);
1468 if (retval != ERROR_OK)
1469 return retval;
1470
1471 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1472
1473 return retval;
1474 }
1475
1476 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1477 {
1478 if (target->state != TARGET_HALTED) {
1479 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1480 return ERROR_TARGET_NOT_HALTED;
1481 }
1482 return target->type->get_gdb_fileio_info(target, fileio_info);
1483 }
1484
1485 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1486 {
1487 if (target->state != TARGET_HALTED) {
1488 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1489 return ERROR_TARGET_NOT_HALTED;
1490 }
1491 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1492 }
1493
1494 target_addr_t target_address_max(struct target *target)
1495 {
1496 unsigned bits = target_address_bits(target);
1497 if (sizeof(target_addr_t) * 8 == bits)
1498 return (target_addr_t) -1;
1499 else
1500 return (((target_addr_t) 1) << bits) - 1;
1501 }
1502
1503 unsigned target_address_bits(struct target *target)
1504 {
1505 if (target->type->address_bits)
1506 return target->type->address_bits(target);
1507 return 32;
1508 }
1509
1510 unsigned int target_data_bits(struct target *target)
1511 {
1512 if (target->type->data_bits)
1513 return target->type->data_bits(target);
1514 return 32;
1515 }
1516
1517 static int target_profiling(struct target *target, uint32_t *samples,
1518 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1519 {
1520 return target->type->profiling(target, samples, max_num_samples,
1521 num_samples, seconds);
1522 }
1523
1524 /**
1525 * Reset the @c examined flag for the given target.
1526 * Pure paranoia -- targets are zeroed on allocation.
1527 */
1528 static void target_reset_examined(struct target *target)
1529 {
1530 target->examined = false;
1531 }
1532
1533 static int handle_target(void *priv);
1534
1535 static int target_init_one(struct command_context *cmd_ctx,
1536 struct target *target)
1537 {
1538 target_reset_examined(target);
1539
1540 struct target_type *type = target->type;
1541 if (!type->examine)
1542 type->examine = default_examine;
1543
1544 if (!type->check_reset)
1545 type->check_reset = default_check_reset;
1546
1547 assert(type->init_target);
1548
1549 int retval = type->init_target(cmd_ctx, target);
1550 if (retval != ERROR_OK) {
1551 LOG_ERROR("target '%s' init failed", target_name(target));
1552 return retval;
1553 }
1554
1555 /* Sanity-check MMU support ... stub in what we must, to help
1556 * implement it in stages, but warn if we need to do so.
1557 */
1558 if (type->mmu) {
1559 if (!type->virt2phys) {
1560 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1561 type->virt2phys = identity_virt2phys;
1562 }
1563 } else {
1564 /* Make sure no-MMU targets all behave the same: make no
1565 * distinction between physical and virtual addresses, and
1566 * ensure that virt2phys() is always an identity mapping.
1567 */
1568 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1569 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1570
1571 type->mmu = no_mmu;
1572 type->write_phys_memory = type->write_memory;
1573 type->read_phys_memory = type->read_memory;
1574 type->virt2phys = identity_virt2phys;
1575 }
1576
1577 if (!target->type->read_buffer)
1578 target->type->read_buffer = target_read_buffer_default;
1579
1580 if (!target->type->write_buffer)
1581 target->type->write_buffer = target_write_buffer_default;
1582
1583 if (!target->type->get_gdb_fileio_info)
1584 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1585
1586 if (!target->type->gdb_fileio_end)
1587 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1588
1589 if (!target->type->profiling)
1590 target->type->profiling = target_profiling_default;
1591
1592 return ERROR_OK;
1593 }
1594
1595 static int target_init(struct command_context *cmd_ctx)
1596 {
1597 struct target *target;
1598 int retval;
1599
1600 for (target = all_targets; target; target = target->next) {
1601 retval = target_init_one(cmd_ctx, target);
1602 if (retval != ERROR_OK)
1603 return retval;
1604 }
1605
1606 if (!all_targets)
1607 return ERROR_OK;
1608
1609 retval = target_register_user_commands(cmd_ctx);
1610 if (retval != ERROR_OK)
1611 return retval;
1612
1613 retval = target_register_timer_callback(&handle_target,
1614 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1615 if (retval != ERROR_OK)
1616 return retval;
1617
1618 return ERROR_OK;
1619 }
1620
1621 COMMAND_HANDLER(handle_target_init_command)
1622 {
1623 int retval;
1624
1625 if (CMD_ARGC != 0)
1626 return ERROR_COMMAND_SYNTAX_ERROR;
1627
1628 static bool target_initialized;
1629 if (target_initialized) {
1630 LOG_INFO("'target init' has already been called");
1631 return ERROR_OK;
1632 }
1633 target_initialized = true;
1634
1635 retval = command_run_line(CMD_CTX, "init_targets");
1636 if (retval != ERROR_OK)
1637 return retval;
1638
1639 retval = command_run_line(CMD_CTX, "init_target_events");
1640 if (retval != ERROR_OK)
1641 return retval;
1642
1643 retval = command_run_line(CMD_CTX, "init_board");
1644 if (retval != ERROR_OK)
1645 return retval;
1646
1647 LOG_DEBUG("Initializing targets...");
1648 return target_init(CMD_CTX);
1649 }
1650
1651 int target_register_event_callback(int (*callback)(struct target *target,
1652 enum target_event event, void *priv), void *priv)
1653 {
1654 struct target_event_callback **callbacks_p = &target_event_callbacks;
1655
1656 if (!callback)
1657 return ERROR_COMMAND_SYNTAX_ERROR;
1658
1659 if (*callbacks_p) {
1660 while ((*callbacks_p)->next)
1661 callbacks_p = &((*callbacks_p)->next);
1662 callbacks_p = &((*callbacks_p)->next);
1663 }
1664
1665 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1666 (*callbacks_p)->callback = callback;
1667 (*callbacks_p)->priv = priv;
1668 (*callbacks_p)->next = NULL;
1669
1670 return ERROR_OK;
1671 }
1672
1673 int target_register_reset_callback(int (*callback)(struct target *target,
1674 enum target_reset_mode reset_mode, void *priv), void *priv)
1675 {
1676 struct target_reset_callback *entry;
1677
1678 if (!callback)
1679 return ERROR_COMMAND_SYNTAX_ERROR;
1680
1681 entry = malloc(sizeof(struct target_reset_callback));
1682 if (!entry) {
1683 LOG_ERROR("error allocating buffer for reset callback entry");
1684 return ERROR_COMMAND_SYNTAX_ERROR;
1685 }
1686
1687 entry->callback = callback;
1688 entry->priv = priv;
1689 list_add(&entry->list, &target_reset_callback_list);
1690
1691
1692 return ERROR_OK;
1693 }
1694
1695 int target_register_trace_callback(int (*callback)(struct target *target,
1696 size_t len, uint8_t *data, void *priv), void *priv)
1697 {
1698 struct target_trace_callback *entry;
1699
1700 if (!callback)
1701 return ERROR_COMMAND_SYNTAX_ERROR;
1702
1703 entry = malloc(sizeof(struct target_trace_callback));
1704 if (!entry) {
1705 LOG_ERROR("error allocating buffer for trace callback entry");
1706 return ERROR_COMMAND_SYNTAX_ERROR;
1707 }
1708
1709 entry->callback = callback;
1710 entry->priv = priv;
1711 list_add(&entry->list, &target_trace_callback_list);
1712
1713
1714 return ERROR_OK;
1715 }
1716
1717 int target_register_timer_callback(int (*callback)(void *priv),
1718 unsigned int time_ms, enum target_timer_type type, void *priv)
1719 {
1720 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1721
1722 if (!callback)
1723 return ERROR_COMMAND_SYNTAX_ERROR;
1724
1725 if (*callbacks_p) {
1726 while ((*callbacks_p)->next)
1727 callbacks_p = &((*callbacks_p)->next);
1728 callbacks_p = &((*callbacks_p)->next);
1729 }
1730
1731 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1732 (*callbacks_p)->callback = callback;
1733 (*callbacks_p)->type = type;
1734 (*callbacks_p)->time_ms = time_ms;
1735 (*callbacks_p)->removed = false;
1736
1737 (*callbacks_p)->when = timeval_ms() + time_ms;
1738 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1739
1740 (*callbacks_p)->priv = priv;
1741 (*callbacks_p)->next = NULL;
1742
1743 return ERROR_OK;
1744 }
1745
1746 int target_unregister_event_callback(int (*callback)(struct target *target,
1747 enum target_event event, void *priv), void *priv)
1748 {
1749 struct target_event_callback **p = &target_event_callbacks;
1750 struct target_event_callback *c = target_event_callbacks;
1751
1752 if (!callback)
1753 return ERROR_COMMAND_SYNTAX_ERROR;
1754
1755 while (c) {
1756 struct target_event_callback *next = c->next;
1757 if ((c->callback == callback) && (c->priv == priv)) {
1758 *p = next;
1759 free(c);
1760 return ERROR_OK;
1761 } else
1762 p = &(c->next);
1763 c = next;
1764 }
1765
1766 return ERROR_OK;
1767 }
1768
1769 int target_unregister_reset_callback(int (*callback)(struct target *target,
1770 enum target_reset_mode reset_mode, void *priv), void *priv)
1771 {
1772 struct target_reset_callback *entry;
1773
1774 if (!callback)
1775 return ERROR_COMMAND_SYNTAX_ERROR;
1776
1777 list_for_each_entry(entry, &target_reset_callback_list, list) {
1778 if (entry->callback == callback && entry->priv == priv) {
1779 list_del(&entry->list);
1780 free(entry);
1781 break;
1782 }
1783 }
1784
1785 return ERROR_OK;
1786 }
1787
1788 int target_unregister_trace_callback(int (*callback)(struct target *target,
1789 size_t len, uint8_t *data, void *priv), void *priv)
1790 {
1791 struct target_trace_callback *entry;
1792
1793 if (!callback)
1794 return ERROR_COMMAND_SYNTAX_ERROR;
1795
1796 list_for_each_entry(entry, &target_trace_callback_list, list) {
1797 if (entry->callback == callback && entry->priv == priv) {
1798 list_del(&entry->list);
1799 free(entry);
1800 break;
1801 }
1802 }
1803
1804 return ERROR_OK;
1805 }
1806
1807 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1808 {
1809 if (!callback)
1810 return ERROR_COMMAND_SYNTAX_ERROR;
1811
1812 for (struct target_timer_callback *c = target_timer_callbacks;
1813 c; c = c->next) {
1814 if ((c->callback == callback) && (c->priv == priv)) {
1815 c->removed = true;
1816 return ERROR_OK;
1817 }
1818 }
1819
1820 return ERROR_FAIL;
1821 }
1822
1823 int target_call_event_callbacks(struct target *target, enum target_event event)
1824 {
1825 struct target_event_callback *callback = target_event_callbacks;
1826 struct target_event_callback *next_callback;
1827
1828 if (event == TARGET_EVENT_HALTED) {
1829 /* execute early halted first */
1830 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1831 }
1832
1833 LOG_DEBUG("target event %i (%s) for core %s", event,
1834 jim_nvp_value2name_simple(nvp_target_event, event)->name,
1835 target_name(target));
1836
1837 target_handle_event(target, event);
1838
1839 while (callback) {
1840 next_callback = callback->next;
1841 callback->callback(target, event, callback->priv);
1842 callback = next_callback;
1843 }
1844
1845 return ERROR_OK;
1846 }
1847
1848 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1849 {
1850 struct target_reset_callback *callback;
1851
1852 LOG_DEBUG("target reset %i (%s)", reset_mode,
1853 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1854
1855 list_for_each_entry(callback, &target_reset_callback_list, list)
1856 callback->callback(target, reset_mode, callback->priv);
1857
1858 return ERROR_OK;
1859 }
1860
1861 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1862 {
1863 struct target_trace_callback *callback;
1864
1865 list_for_each_entry(callback, &target_trace_callback_list, list)
1866 callback->callback(target, len, data, callback->priv);
1867
1868 return ERROR_OK;
1869 }
1870
1871 static int target_timer_callback_periodic_restart(
1872 struct target_timer_callback *cb, int64_t *now)
1873 {
1874 cb->when = *now + cb->time_ms;
1875 return ERROR_OK;
1876 }
1877
1878 static int target_call_timer_callback(struct target_timer_callback *cb,
1879 int64_t *now)
1880 {
1881 cb->callback(cb->priv);
1882
1883 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1884 return target_timer_callback_periodic_restart(cb, now);
1885
1886 return target_unregister_timer_callback(cb->callback, cb->priv);
1887 }
1888
1889 static int target_call_timer_callbacks_check_time(int checktime)
1890 {
1891 static bool callback_processing;
1892
1893 /* Do not allow nesting */
1894 if (callback_processing)
1895 return ERROR_OK;
1896
1897 callback_processing = true;
1898
1899 keep_alive();
1900
1901 int64_t now = timeval_ms();
1902
1903 /* Initialize to a default value that's a ways into the future.
1904 * The loop below will make it closer to now if there are
1905 * callbacks that want to be called sooner. */
1906 target_timer_next_event_value = now + 1000;
1907
1908 /* Store an address of the place containing a pointer to the
1909 * next item; initially, that's a standalone "root of the
1910 * list" variable. */
1911 struct target_timer_callback **callback = &target_timer_callbacks;
1912 while (callback && *callback) {
1913 if ((*callback)->removed) {
1914 struct target_timer_callback *p = *callback;
1915 *callback = (*callback)->next;
1916 free(p);
1917 continue;
1918 }
1919
1920 bool call_it = (*callback)->callback &&
1921 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1922 now >= (*callback)->when);
1923
1924 if (call_it)
1925 target_call_timer_callback(*callback, &now);
1926
1927 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1928 target_timer_next_event_value = (*callback)->when;
1929
1930 callback = &(*callback)->next;
1931 }
1932
1933 callback_processing = false;
1934 return ERROR_OK;
1935 }
1936
1937 int target_call_timer_callbacks()
1938 {
1939 return target_call_timer_callbacks_check_time(1);
1940 }
1941
1942 /* invoke periodic callbacks immediately */
1943 int target_call_timer_callbacks_now()
1944 {
1945 return target_call_timer_callbacks_check_time(0);
1946 }
1947
1948 int64_t target_timer_next_event(void)
1949 {
1950 return target_timer_next_event_value;
1951 }
1952
1953 /* Prints the working area layout for debug purposes */
1954 static void print_wa_layout(struct target *target)
1955 {
1956 struct working_area *c = target->working_areas;
1957
1958 while (c) {
1959 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1960 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1961 c->address, c->address + c->size - 1, c->size);
1962 c = c->next;
1963 }
1964 }
1965
1966 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1967 static void target_split_working_area(struct working_area *area, uint32_t size)
1968 {
1969 assert(area->free); /* Shouldn't split an allocated area */
1970 assert(size <= area->size); /* Caller should guarantee this */
1971
1972 /* Split only if not already the right size */
1973 if (size < area->size) {
1974 struct working_area *new_wa = malloc(sizeof(*new_wa));
1975
1976 if (!new_wa)
1977 return;
1978
1979 new_wa->next = area->next;
1980 new_wa->size = area->size - size;
1981 new_wa->address = area->address + size;
1982 new_wa->backup = NULL;
1983 new_wa->user = NULL;
1984 new_wa->free = true;
1985
1986 area->next = new_wa;
1987 area->size = size;
1988
1989 /* If backup memory was allocated to this area, it has the wrong size
1990 * now so free it and it will be reallocated if/when needed */
1991 free(area->backup);
1992 area->backup = NULL;
1993 }
1994 }
1995
1996 /* Merge all adjacent free areas into one */
1997 static void target_merge_working_areas(struct target *target)
1998 {
1999 struct working_area *c = target->working_areas;
2000
2001 while (c && c->next) {
2002 assert(c->next->address == c->address + c->size); /* This is an invariant */
2003
2004 /* Find two adjacent free areas */
2005 if (c->free && c->next->free) {
2006 /* Merge the last into the first */
2007 c->size += c->next->size;
2008
2009 /* Remove the last */
2010 struct working_area *to_be_freed = c->next;
2011 c->next = c->next->next;
2012 free(to_be_freed->backup);
2013 free(to_be_freed);
2014
2015 /* If backup memory was allocated to the remaining area, it's has
2016 * the wrong size now */
2017 free(c->backup);
2018 c->backup = NULL;
2019 } else {
2020 c = c->next;
2021 }
2022 }
2023 }
2024
2025 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2026 {
2027 /* Reevaluate working area address based on MMU state*/
2028 if (!target->working_areas) {
2029 int retval;
2030 int enabled;
2031
2032 retval = target->type->mmu(target, &enabled);
2033 if (retval != ERROR_OK)
2034 return retval;
2035
2036 if (!enabled) {
2037 if (target->working_area_phys_spec) {
2038 LOG_DEBUG("MMU disabled, using physical "
2039 "address for working memory " TARGET_ADDR_FMT,
2040 target->working_area_phys);
2041 target->working_area = target->working_area_phys;
2042 } else {
2043 LOG_ERROR("No working memory available. "
2044 "Specify -work-area-phys to target.");
2045 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2046 }
2047 } else {
2048 if (target->working_area_virt_spec) {
2049 LOG_DEBUG("MMU enabled, using virtual "
2050 "address for working memory " TARGET_ADDR_FMT,
2051 target->working_area_virt);
2052 target->working_area = target->working_area_virt;
2053 } else {
2054 LOG_ERROR("No working memory available. "
2055 "Specify -work-area-virt to target.");
2056 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2057 }
2058 }
2059
2060 /* Set up initial working area on first call */
2061 struct working_area *new_wa = malloc(sizeof(*new_wa));
2062 if (new_wa) {
2063 new_wa->next = NULL;
2064 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2065 new_wa->address = target->working_area;
2066 new_wa->backup = NULL;
2067 new_wa->user = NULL;
2068 new_wa->free = true;
2069 }
2070
2071 target->working_areas = new_wa;
2072 }
2073
2074 /* only allocate multiples of 4 byte */
2075 if (size % 4)
2076 size = (size + 3) & (~3UL);
2077
2078 struct working_area *c = target->working_areas;
2079
2080 /* Find the first large enough working area */
2081 while (c) {
2082 if (c->free && c->size >= size)
2083 break;
2084 c = c->next;
2085 }
2086
2087 if (!c)
2088 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2089
2090 /* Split the working area into the requested size */
2091 target_split_working_area(c, size);
2092
2093 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2094 size, c->address);
2095
2096 if (target->backup_working_area) {
2097 if (!c->backup) {
2098 c->backup = malloc(c->size);
2099 if (!c->backup)
2100 return ERROR_FAIL;
2101 }
2102
2103 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2104 if (retval != ERROR_OK)
2105 return retval;
2106 }
2107
2108 /* mark as used, and return the new (reused) area */
2109 c->free = false;
2110 *area = c;
2111
2112 /* user pointer */
2113 c->user = area;
2114
2115 print_wa_layout(target);
2116
2117 return ERROR_OK;
2118 }
2119
2120 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2121 {
2122 int retval;
2123
2124 retval = target_alloc_working_area_try(target, size, area);
2125 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2126 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2127 return retval;
2128
2129 }
2130
2131 static int target_restore_working_area(struct target *target, struct working_area *area)
2132 {
2133 int retval = ERROR_OK;
2134
2135 if (target->backup_working_area && area->backup) {
2136 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2137 if (retval != ERROR_OK)
2138 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2139 area->size, area->address);
2140 }
2141
2142 return retval;
2143 }
2144
2145 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2146 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2147 {
2148 int retval = ERROR_OK;
2149
2150 if (area->free)
2151 return retval;
2152
2153 if (restore) {
2154 retval = target_restore_working_area(target, area);
2155 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2156 if (retval != ERROR_OK)
2157 return retval;
2158 }
2159
2160 area->free = true;
2161
2162 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2163 area->size, area->address);
2164
2165 /* mark user pointer invalid */
2166 /* TODO: Is this really safe? It points to some previous caller's memory.
2167 * How could we know that the area pointer is still in that place and not
2168 * some other vital data? What's the purpose of this, anyway? */
2169 *area->user = NULL;
2170 area->user = NULL;
2171
2172 target_merge_working_areas(target);
2173
2174 print_wa_layout(target);
2175
2176 return retval;
2177 }
2178
2179 int target_free_working_area(struct target *target, struct working_area *area)
2180 {
2181 return target_free_working_area_restore(target, area, 1);
2182 }
2183
2184 /* free resources and restore memory, if restoring memory fails,
2185 * free up resources anyway
2186 */
2187 static void target_free_all_working_areas_restore(struct target *target, int restore)
2188 {
2189 struct working_area *c = target->working_areas;
2190
2191 LOG_DEBUG("freeing all working areas");
2192
2193 /* Loop through all areas, restoring the allocated ones and marking them as free */
2194 while (c) {
2195 if (!c->free) {
2196 if (restore)
2197 target_restore_working_area(target, c);
2198 c->free = true;
2199 *c->user = NULL; /* Same as above */
2200 c->user = NULL;
2201 }
2202 c = c->next;
2203 }
2204
2205 /* Run a merge pass to combine all areas into one */
2206 target_merge_working_areas(target);
2207
2208 print_wa_layout(target);
2209 }
2210
2211 void target_free_all_working_areas(struct target *target)
2212 {
2213 target_free_all_working_areas_restore(target, 1);
2214
2215 /* Now we have none or only one working area marked as free */
2216 if (target->working_areas) {
2217 /* Free the last one to allow on-the-fly moving and resizing */
2218 free(target->working_areas->backup);
2219 free(target->working_areas);
2220 target->working_areas = NULL;
2221 }
2222 }
2223
2224 /* Find the largest number of bytes that can be allocated */
2225 uint32_t target_get_working_area_avail(struct target *target)
2226 {
2227 struct working_area *c = target->working_areas;
2228 uint32_t max_size = 0;
2229
2230 if (!c)
2231 return target->working_area_size;
2232
2233 while (c) {
2234 if (c->free && max_size < c->size)
2235 max_size = c->size;
2236
2237 c = c->next;
2238 }
2239
2240 return max_size;
2241 }
2242
2243 static void target_destroy(struct target *target)
2244 {
2245 if (target->type->deinit_target)
2246 target->type->deinit_target(target);
2247
2248 free(target->semihosting);
2249
2250 jtag_unregister_event_callback(jtag_enable_callback, target);
2251
2252 struct target_event_action *teap = target->event_action;
2253 while (teap) {
2254 struct target_event_action *next = teap->next;
2255 Jim_DecrRefCount(teap->interp, teap->body);
2256 free(teap);
2257 teap = next;
2258 }
2259
2260 target_free_all_working_areas(target);
2261
2262 /* release the targets SMP list */
2263 if (target->smp) {
2264 struct target_list *head = target->head;
2265 while (head) {
2266 struct target_list *pos = head->next;
2267 head->target->smp = 0;
2268 free(head);
2269 head = pos;
2270 }
2271 target->smp = 0;
2272 }
2273
2274 rtos_destroy(target);
2275
2276 free(target->gdb_port_override);
2277 free(target->type);
2278 free(target->trace_info);
2279 free(target->fileio_info);
2280 free(target->cmd_name);
2281 free(target);
2282 }
2283
2284 void target_quit(void)
2285 {
2286 struct target_event_callback *pe = target_event_callbacks;
2287 while (pe) {
2288 struct target_event_callback *t = pe->next;
2289 free(pe);
2290 pe = t;
2291 }
2292 target_event_callbacks = NULL;
2293
2294 struct target_timer_callback *pt = target_timer_callbacks;
2295 while (pt) {
2296 struct target_timer_callback *t = pt->next;
2297 free(pt);
2298 pt = t;
2299 }
2300 target_timer_callbacks = NULL;
2301
2302 for (struct target *target = all_targets; target;) {
2303 struct target *tmp;
2304
2305 tmp = target->next;
2306 target_destroy(target);
2307 target = tmp;
2308 }
2309
2310 all_targets = NULL;
2311 }
2312
2313 int target_arch_state(struct target *target)
2314 {
2315 int retval;
2316 if (!target) {
2317 LOG_WARNING("No target has been configured");
2318 return ERROR_OK;
2319 }
2320
2321 if (target->state != TARGET_HALTED)
2322 return ERROR_OK;
2323
2324 retval = target->type->arch_state(target);
2325 return retval;
2326 }
2327
2328 static int target_get_gdb_fileio_info_default(struct target *target,
2329 struct gdb_fileio_info *fileio_info)
2330 {
2331 /* If target does not support semi-hosting function, target
2332 has no need to provide .get_gdb_fileio_info callback.
2333 It just return ERROR_FAIL and gdb_server will return "Txx"
2334 as target halted every time. */
2335 return ERROR_FAIL;
2336 }
2337
2338 static int target_gdb_fileio_end_default(struct target *target,
2339 int retcode, int fileio_errno, bool ctrl_c)
2340 {
2341 return ERROR_OK;
2342 }
2343
2344 int target_profiling_default(struct target *target, uint32_t *samples,
2345 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2346 {
2347 struct timeval timeout, now;
2348
2349 gettimeofday(&timeout, NULL);
2350 timeval_add_time(&timeout, seconds, 0);
2351
2352 LOG_INFO("Starting profiling. Halting and resuming the"
2353 " target as often as we can...");
2354
2355 uint32_t sample_count = 0;
2356 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2357 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2358
2359 int retval = ERROR_OK;
2360 for (;;) {
2361 target_poll(target);
2362 if (target->state == TARGET_HALTED) {
2363 uint32_t t = buf_get_u32(reg->value, 0, 32);
2364 samples[sample_count++] = t;
2365 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2366 retval = target_resume(target, 1, 0, 0, 0);
2367 target_poll(target);
2368 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2369 } else if (target->state == TARGET_RUNNING) {
2370 /* We want to quickly sample the PC. */
2371 retval = target_halt(target);
2372 } else {
2373 LOG_INFO("Target not halted or running");
2374 retval = ERROR_OK;
2375 break;
2376 }
2377
2378 if (retval != ERROR_OK)
2379 break;
2380
2381 gettimeofday(&now, NULL);
2382 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2383 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2384 break;
2385 }
2386 }
2387
2388 *num_samples = sample_count;
2389 return retval;
2390 }
2391
2392 /* Single aligned words are guaranteed to use 16 or 32 bit access
2393 * mode respectively, otherwise data is handled as quickly as
2394 * possible
2395 */
2396 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2397 {
2398 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2399 size, address);
2400
2401 if (!target_was_examined(target)) {
2402 LOG_ERROR("Target not examined yet");
2403 return ERROR_FAIL;
2404 }
2405
2406 if (size == 0)
2407 return ERROR_OK;
2408
2409 if ((address + size - 1) < address) {
2410 /* GDB can request this when e.g. PC is 0xfffffffc */
2411 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2412 address,
2413 size);
2414 return ERROR_FAIL;
2415 }
2416
2417 return target->type->write_buffer(target, address, size, buffer);
2418 }
2419
2420 static int target_write_buffer_default(struct target *target,
2421 target_addr_t address, uint32_t count, const uint8_t *buffer)
2422 {
2423 uint32_t size;
2424 unsigned int data_bytes = target_data_bits(target) / 8;
2425
2426 /* Align up to maximum bytes. The loop condition makes sure the next pass
2427 * will have something to do with the size we leave to it. */
2428 for (size = 1;
2429 size < data_bytes && count >= size * 2 + (address & size);
2430 size *= 2) {
2431 if (address & size) {
2432 int retval = target_write_memory(target, address, size, 1, buffer);
2433 if (retval != ERROR_OK)
2434 return retval;
2435 address += size;
2436 count -= size;
2437 buffer += size;
2438 }
2439 }
2440
2441 /* Write the data with as large access size as possible. */
2442 for (; size > 0; size /= 2) {
2443 uint32_t aligned = count - count % size;
2444 if (aligned > 0) {
2445 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2446 if (retval != ERROR_OK)
2447 return retval;
2448 address += aligned;
2449 count -= aligned;
2450 buffer += aligned;
2451 }
2452 }
2453
2454 return ERROR_OK;
2455 }
2456
2457 /* Single aligned words are guaranteed to use 16 or 32 bit access
2458 * mode respectively, otherwise data is handled as quickly as
2459 * possible
2460 */
2461 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2462 {
2463 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2464 size, address);
2465
2466 if (!target_was_examined(target)) {
2467 LOG_ERROR("Target not examined yet");
2468 return ERROR_FAIL;
2469 }
2470
2471 if (size == 0)
2472 return ERROR_OK;
2473
2474 if ((address + size - 1) < address) {
2475 /* GDB can request this when e.g. PC is 0xfffffffc */
2476 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2477 address,
2478 size);
2479 return ERROR_FAIL;
2480 }
2481
2482 return target->type->read_buffer(target, address, size, buffer);
2483 }
2484
2485 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2486 {
2487 uint32_t size;
2488 unsigned int data_bytes = target_data_bits(target) / 8;
2489
2490 /* Align up to maximum bytes. The loop condition makes sure the next pass
2491 * will have something to do with the size we leave to it. */
2492 for (size = 1;
2493 size < data_bytes && count >= size * 2 + (address & size);
2494 size *= 2) {
2495 if (address & size) {
2496 int retval = target_read_memory(target, address, size, 1, buffer);
2497 if (retval != ERROR_OK)
2498 return retval;
2499 address += size;
2500 count -= size;
2501 buffer += size;
2502 }
2503 }
2504
2505 /* Read the data with as large access size as possible. */
2506 for (; size > 0; size /= 2) {
2507 uint32_t aligned = count - count % size;
2508 if (aligned > 0) {
2509 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2510 if (retval != ERROR_OK)
2511 return retval;
2512 address += aligned;
2513 count -= aligned;
2514 buffer += aligned;
2515 }
2516 }
2517
2518 return ERROR_OK;
2519 }
2520
2521 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2522 {
2523 uint8_t *buffer;
2524 int retval;
2525 uint32_t i;
2526 uint32_t checksum = 0;
2527 if (!target_was_examined(target)) {
2528 LOG_ERROR("Target not examined yet");
2529 return ERROR_FAIL;
2530 }
2531
2532 retval = target->type->checksum_memory(target, address, size, &checksum);
2533 if (retval != ERROR_OK) {
2534 buffer = malloc(size);
2535 if (!buffer) {
2536 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2537 return ERROR_COMMAND_SYNTAX_ERROR;
2538 }
2539 retval = target_read_buffer(target, address, size, buffer);
2540 if (retval != ERROR_OK) {
2541 free(buffer);
2542 return retval;
2543 }
2544
2545 /* convert to target endianness */
2546 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2547 uint32_t target_data;
2548 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2549 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2550 }
2551
2552 retval = image_calculate_checksum(buffer, size, &checksum);
2553 free(buffer);
2554 }
2555
2556 *crc = checksum;
2557
2558 return retval;
2559 }
2560
2561 int target_blank_check_memory(struct target *target,
2562 struct target_memory_check_block *blocks, int num_blocks,
2563 uint8_t erased_value)
2564 {
2565 if (!target_was_examined(target)) {
2566 LOG_ERROR("Target not examined yet");
2567 return ERROR_FAIL;
2568 }
2569
2570 if (!target->type->blank_check_memory)
2571 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2572
2573 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2574 }
2575
2576 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2577 {
2578 uint8_t value_buf[8];
2579 if (!target_was_examined(target)) {
2580 LOG_ERROR("Target not examined yet");
2581 return ERROR_FAIL;
2582 }
2583
2584 int retval = target_read_memory(target, address, 8, 1, value_buf);
2585
2586 if (retval == ERROR_OK) {
2587 *value = target_buffer_get_u64(target, value_buf);
2588 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2589 address,
2590 *value);
2591 } else {
2592 *value = 0x0;
2593 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2594 address);
2595 }
2596
2597 return retval;
2598 }
2599
2600 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2601 {
2602 uint8_t value_buf[4];
2603 if (!target_was_examined(target)) {
2604 LOG_ERROR("Target not examined yet");
2605 return ERROR_FAIL;
2606 }
2607
2608 int retval = target_read_memory(target, address, 4, 1, value_buf);
2609
2610 if (retval == ERROR_OK) {
2611 *value = target_buffer_get_u32(target, value_buf);
2612 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2613 address,
2614 *value);
2615 } else {
2616 *value = 0x0;
2617 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2618 address);
2619 }
2620
2621 return retval;
2622 }
2623
2624 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2625 {
2626 uint8_t value_buf[2];
2627 if (!target_was_examined(target)) {
2628 LOG_ERROR("Target not examined yet");
2629 return ERROR_FAIL;
2630 }
2631
2632 int retval = target_read_memory(target, address, 2, 1, value_buf);
2633
2634 if (retval == ERROR_OK) {
2635 *value = target_buffer_get_u16(target, value_buf);
2636 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2637 address,
2638 *value);
2639 } else {
2640 *value = 0x0;
2641 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2642 address);
2643 }
2644
2645 return retval;
2646 }
2647
2648 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2649 {
2650 if (!target_was_examined(target)) {
2651 LOG_ERROR("Target not examined yet");
2652 return ERROR_FAIL;
2653 }
2654
2655 int retval = target_read_memory(target, address, 1, 1, value);
2656
2657 if (retval == ERROR_OK) {
2658 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2659 address,
2660 *value);
2661 } else {
2662 *value = 0x0;
2663 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2664 address);
2665 }
2666
2667 return retval;
2668 }
2669
2670 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2671 {
2672 int retval;
2673 uint8_t value_buf[8];
2674 if (!target_was_examined(target)) {
2675 LOG_ERROR("Target not examined yet");
2676 return ERROR_FAIL;
2677 }
2678
2679 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2680 address,
2681 value);
2682
2683 target_buffer_set_u64(target, value_buf, value);
2684 retval = target_write_memory(target, address, 8, 1, value_buf);
2685 if (retval != ERROR_OK)
2686 LOG_DEBUG("failed: %i", retval);
2687
2688 return retval;
2689 }
2690
2691 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2692 {
2693 int retval;
2694 uint8_t value_buf[4];
2695 if (!target_was_examined(target)) {
2696 LOG_ERROR("Target not examined yet");
2697 return ERROR_FAIL;
2698 }
2699
2700 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2701 address,
2702 value);
2703
2704 target_buffer_set_u32(target, value_buf, value);
2705 retval = target_write_memory(target, address, 4, 1, value_buf);
2706 if (retval != ERROR_OK)
2707 LOG_DEBUG("failed: %i", retval);
2708
2709 return retval;
2710 }
2711
2712 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2713 {
2714 int retval;
2715 uint8_t value_buf[2];
2716 if (!target_was_examined(target)) {
2717 LOG_ERROR("Target not examined yet");
2718 return ERROR_FAIL;
2719 }
2720
2721 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2722 address,
2723 value);
2724
2725 target_buffer_set_u16(target, value_buf, value);
2726 retval = target_write_memory(target, address, 2, 1, value_buf);
2727 if (retval != ERROR_OK)
2728 LOG_DEBUG("failed: %i", retval);
2729
2730 return retval;
2731 }
2732
2733 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2734 {
2735 int retval;
2736 if (!target_was_examined(target)) {
2737 LOG_ERROR("Target not examined yet");
2738 return ERROR_FAIL;
2739 }
2740
2741 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2742 address, value);
2743
2744 retval = target_write_memory(target, address, 1, 1, &value);
2745 if (retval != ERROR_OK)
2746 LOG_DEBUG("failed: %i", retval);
2747
2748 return retval;
2749 }
2750
2751 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2752 {
2753 int retval;
2754 uint8_t value_buf[8];
2755 if (!target_was_examined(target)) {
2756 LOG_ERROR("Target not examined yet");
2757 return ERROR_FAIL;
2758 }
2759
2760 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2761 address,
2762 value);
2763
2764 target_buffer_set_u64(target, value_buf, value);
2765 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2766 if (retval != ERROR_OK)
2767 LOG_DEBUG("failed: %i", retval);
2768
2769 return retval;
2770 }
2771
2772 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2773 {
2774 int retval;
2775 uint8_t value_buf[4];
2776 if (!target_was_examined(target)) {
2777 LOG_ERROR("Target not examined yet");
2778 return ERROR_FAIL;
2779 }
2780
2781 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2782 address,
2783 value);
2784
2785 target_buffer_set_u32(target, value_buf, value);
2786 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2787 if (retval != ERROR_OK)
2788 LOG_DEBUG("failed: %i", retval);
2789
2790 return retval;
2791 }
2792
2793 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2794 {
2795 int retval;
2796 uint8_t value_buf[2];
2797 if (!target_was_examined(target)) {
2798 LOG_ERROR("Target not examined yet");
2799 return ERROR_FAIL;
2800 }
2801
2802 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2803 address,
2804 value);
2805
2806 target_buffer_set_u16(target, value_buf, value);
2807 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2808 if (retval != ERROR_OK)
2809 LOG_DEBUG("failed: %i", retval);
2810
2811 return retval;
2812 }
2813
2814 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2815 {
2816 int retval;
2817 if (!target_was_examined(target)) {
2818 LOG_ERROR("Target not examined yet");
2819 return ERROR_FAIL;
2820 }
2821
2822 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2823 address, value);
2824
2825 retval = target_write_phys_memory(target, address, 1, 1, &value);
2826 if (retval != ERROR_OK)
2827 LOG_DEBUG("failed: %i", retval);
2828
2829 return retval;
2830 }
2831
2832 static int find_target(struct command_invocation *cmd, const char *name)
2833 {
2834 struct target *target = get_target(name);
2835 if (!target) {
2836 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2837 return ERROR_FAIL;
2838 }
2839 if (!target->tap->enabled) {
2840 command_print(cmd, "Target: TAP %s is disabled, "
2841 "can't be the current target\n",
2842 target->tap->dotted_name);
2843 return ERROR_FAIL;
2844 }
2845
2846 cmd->ctx->current_target = target;
2847 if (cmd->ctx->current_target_override)
2848 cmd->ctx->current_target_override = target;
2849
2850 return ERROR_OK;
2851 }
2852
2853
2854 COMMAND_HANDLER(handle_targets_command)
2855 {
2856 int retval = ERROR_OK;
2857 if (CMD_ARGC == 1) {
2858 retval = find_target(CMD, CMD_ARGV[0]);
2859 if (retval == ERROR_OK) {
2860 /* we're done! */
2861 return retval;
2862 }
2863 }
2864
2865 struct target *target = all_targets;
2866 command_print(CMD, " TargetName Type Endian TapName State ");
2867 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2868 while (target) {
2869 const char *state;
2870 char marker = ' ';
2871
2872 if (target->tap->enabled)
2873 state = target_state_name(target);
2874 else
2875 state = "tap-disabled";
2876
2877 if (CMD_CTX->current_target == target)
2878 marker = '*';
2879
2880 /* keep columns lined up to match the headers above */
2881 command_print(CMD,
2882 "%2d%c %-18s %-10s %-6s %-18s %s",
2883 target->target_number,
2884 marker,
2885 target_name(target),
2886 target_type_name(target),
2887 jim_nvp_value2name_simple(nvp_target_endian,
2888 target->endianness)->name,
2889 target->tap->dotted_name,
2890 state);
2891 target = target->next;
2892 }
2893
2894 return retval;
2895 }
2896
2897 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2898
2899 static int power_dropout;
2900 static int srst_asserted;
2901
2902 static int run_power_restore;
2903 static int run_power_dropout;
2904 static int run_srst_asserted;
2905 static int run_srst_deasserted;
2906
2907 static int sense_handler(void)
2908 {
2909 static int prev_srst_asserted;
2910 static int prev_power_dropout;
2911
2912 int retval = jtag_power_dropout(&power_dropout);
2913 if (retval != ERROR_OK)
2914 return retval;
2915
2916 int power_restored;
2917 power_restored = prev_power_dropout && !power_dropout;
2918 if (power_restored)
2919 run_power_restore = 1;
2920
2921 int64_t current = timeval_ms();
2922 static int64_t last_power;
2923 bool wait_more = last_power + 2000 > current;
2924 if (power_dropout && !wait_more) {
2925 run_power_dropout = 1;
2926 last_power = current;
2927 }
2928
2929 retval = jtag_srst_asserted(&srst_asserted);
2930 if (retval != ERROR_OK)
2931 return retval;
2932
2933 int srst_deasserted;
2934 srst_deasserted = prev_srst_asserted && !srst_asserted;
2935
2936 static int64_t last_srst;
2937 wait_more = last_srst + 2000 > current;
2938 if (srst_deasserted && !wait_more) {
2939 run_srst_deasserted = 1;
2940 last_srst = current;
2941 }
2942
2943 if (!prev_srst_asserted && srst_asserted)
2944 run_srst_asserted = 1;
2945
2946 prev_srst_asserted = srst_asserted;
2947 prev_power_dropout = power_dropout;
2948
2949 if (srst_deasserted || power_restored) {
2950 /* Other than logging the event we can't do anything here.
2951 * Issuing a reset is a particularly bad idea as we might
2952 * be inside a reset already.
2953 */
2954 }
2955
2956 return ERROR_OK;
2957 }
2958
2959 /* process target state changes */
2960 static int handle_target(void *priv)
2961 {
2962 Jim_Interp *interp = (Jim_Interp *)priv;
2963 int retval = ERROR_OK;
2964
2965 if (!is_jtag_poll_safe()) {
2966 /* polling is disabled currently */
2967 return ERROR_OK;
2968 }
2969
2970 /* we do not want to recurse here... */
2971 static int recursive;
2972 if (!recursive) {
2973 recursive = 1;
2974 sense_handler();
2975 /* danger! running these procedures can trigger srst assertions and power dropouts.
2976 * We need to avoid an infinite loop/recursion here and we do that by
2977 * clearing the flags after running these events.
2978 */
2979 int did_something = 0;
2980 if (run_srst_asserted) {
2981 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2982 Jim_Eval(interp, "srst_asserted");
2983 did_something = 1;
2984 }
2985 if (run_srst_deasserted) {
2986 Jim_Eval(interp, "srst_deasserted");
2987 did_something = 1;
2988 }
2989 if (run_power_dropout) {
2990 LOG_INFO("Power dropout detected, running power_dropout proc.");
2991 Jim_Eval(interp, "power_dropout");
2992 did_something = 1;
2993 }
2994 if (run_power_restore) {
2995 Jim_Eval(interp, "power_restore");
2996 did_something = 1;
2997 }
2998
2999 if (did_something) {
3000 /* clear detect flags */
3001 sense_handler();
3002 }
3003
3004 /* clear action flags */
3005
3006 run_srst_asserted = 0;
3007 run_srst_deasserted = 0;
3008 run_power_restore = 0;
3009 run_power_dropout = 0;
3010
3011 recursive = 0;
3012 }
3013
3014 /* Poll targets for state changes unless that's globally disabled.
3015 * Skip targets that are currently disabled.
3016 */
3017 for (struct target *target = all_targets;
3018 is_jtag_poll_safe() && target;
3019 target = target->next) {
3020
3021 if (!target_was_examined(target))
3022 continue;
3023
3024 if (!target->tap->enabled)
3025 continue;
3026
3027 if (target->backoff.times > target->backoff.count) {
3028 /* do not poll this time as we failed previously */
3029 target->backoff.count++;
3030 continue;
3031 }
3032 target->backoff.count = 0;
3033
3034 /* only poll target if we've got power and srst isn't asserted */
3035 if (!power_dropout && !srst_asserted) {
3036 /* polling may fail silently until the target has been examined */
3037 retval = target_poll(target);
3038 if (retval != ERROR_OK) {
3039 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3040 if (target->backoff.times * polling_interval < 5000) {
3041 target->backoff.times *= 2;
3042 target->backoff.times++;
3043 }
3044
3045 /* Tell GDB to halt the debugger. This allows the user to
3046 * run monitor commands to handle the situation.
3047 */
3048 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3049 }
3050 if (target->backoff.times > 0) {
3051 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3052 target_reset_examined(target);
3053 retval = target_examine_one(target);
3054 /* Target examination could have failed due to unstable connection,
3055 * but we set the examined flag anyway to repoll it later */
3056 if (retval != ERROR_OK) {
3057 target->examined = true;
3058 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3059 target->backoff.times * polling_interval);
3060 return retval;
3061 }
3062 }
3063
3064 /* Since we succeeded, we reset backoff count */
3065 target->backoff.times = 0;
3066 }
3067 }
3068
3069 return retval;
3070 }
3071
3072 COMMAND_HANDLER(handle_reg_command)
3073 {
3074 LOG_DEBUG("-");
3075
3076 struct target *target = get_current_target(CMD_CTX);
3077 struct reg *reg = NULL;
3078
3079 /* list all available registers for the current target */
3080 if (CMD_ARGC == 0) {
3081 struct reg_cache *cache = target->reg_cache;
3082
3083 unsigned int count = 0;
3084 while (cache) {
3085 unsigned i;
3086
3087 command_print(CMD, "===== %s", cache->name);
3088
3089 for (i = 0, reg = cache->reg_list;
3090 i < cache->num_regs;
3091 i++, reg++, count++) {
3092 if (reg->exist == false || reg->hidden)
3093 continue;
3094 /* only print cached values if they are valid */
3095 if (reg->valid) {
3096 char *value = buf_to_hex_str(reg->value,
3097 reg->size);
3098 command_print(CMD,
3099 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3100 count, reg->name,
3101 reg->size, value,
3102 reg->dirty
3103 ? " (dirty)"
3104 : "");
3105 free(value);
3106 } else {
3107 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3108 count, reg->name,
3109 reg->size);
3110 }
3111 }
3112 cache = cache->next;
3113 }
3114
3115 return ERROR_OK;
3116 }
3117
3118 /* access a single register by its ordinal number */
3119 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3120 unsigned num;
3121 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3122
3123 struct reg_cache *cache = target->reg_cache;
3124 unsigned int count = 0;
3125 while (cache) {
3126 unsigned i;
3127 for (i = 0; i < cache->num_regs; i++) {
3128 if (count++ == num) {
3129 reg = &cache->reg_list[i];
3130 break;
3131 }
3132 }
3133 if (reg)
3134 break;
3135 cache = cache->next;
3136 }
3137
3138 if (!reg) {
3139 command_print(CMD, "%i is out of bounds, the current target "
3140 "has only %i registers (0 - %i)", num, count, count - 1);
3141 return ERROR_OK;
3142 }
3143 } else {
3144 /* access a single register by its name */
3145 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3146
3147 if (!reg)
3148 goto not_found;
3149 }
3150
3151 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3152
3153 if (!reg->exist)
3154 goto not_found;
3155
3156 /* display a register */
3157 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3158 && (CMD_ARGV[1][0] <= '9')))) {
3159 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3160 reg->valid = 0;
3161
3162 if (reg->valid == 0) {
3163 int retval = reg->type->get(reg);
3164 if (retval != ERROR_OK) {
3165 LOG_ERROR("Could not read register '%s'", reg->name);
3166 return retval;
3167 }
3168 }
3169 char *value = buf_to_hex_str(reg->value, reg->size);
3170 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3171 free(value);
3172 return ERROR_OK;
3173 }
3174
3175 /* set register value */
3176 if (CMD_ARGC == 2) {
3177 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3178 if (!buf)
3179 return ERROR_FAIL;
3180 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3181
3182 int retval = reg->type->set(reg, buf);
3183 if (retval != ERROR_OK) {
3184 LOG_ERROR("Could not write to register '%s'", reg->name);
3185 } else {
3186 char *value = buf_to_hex_str(reg->value, reg->size);
3187 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3188 free(value);
3189 }
3190
3191 free(buf);
3192
3193 return retval;
3194 }
3195
3196 return ERROR_COMMAND_SYNTAX_ERROR;
3197
3198 not_found:
3199 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3200 return ERROR_OK;
3201 }
3202
3203 COMMAND_HANDLER(handle_poll_command)
3204 {
3205 int retval = ERROR_OK;
3206 struct target *target = get_current_target(CMD_CTX);
3207
3208 if (CMD_ARGC == 0) {
3209 command_print(CMD, "background polling: %s",
3210 jtag_poll_get_enabled() ? "on" : "off");
3211 command_print(CMD, "TAP: %s (%s)",
3212 target->tap->dotted_name,
3213 target->tap->enabled ? "enabled" : "disabled");
3214 if (!target->tap->enabled)
3215 return ERROR_OK;
3216 retval = target_poll(target);
3217 if (retval != ERROR_OK)
3218 return retval;
3219 retval = target_arch_state(target);
3220 if (retval != ERROR_OK)
3221 return retval;
3222 } else if (CMD_ARGC == 1) {
3223 bool enable;
3224 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3225 jtag_poll_set_enabled(enable);
3226 } else
3227 return ERROR_COMMAND_SYNTAX_ERROR;
3228
3229 return retval;
3230 }
3231
3232 COMMAND_HANDLER(handle_wait_halt_command)
3233 {
3234 if (CMD_ARGC > 1)
3235 return ERROR_COMMAND_SYNTAX_ERROR;
3236
3237 unsigned ms = DEFAULT_HALT_TIMEOUT;
3238 if (1 == CMD_ARGC) {
3239 int retval = parse_uint(CMD_ARGV[0], &ms);
3240 if (retval != ERROR_OK)
3241 return ERROR_COMMAND_SYNTAX_ERROR;
3242 }
3243
3244 struct target *target = get_current_target(CMD_CTX);
3245 return target_wait_state(target, TARGET_HALTED, ms);
3246 }
3247
3248 /* wait for target state to change. The trick here is to have a low
3249 * latency for short waits and not to suck up all the CPU time
3250 * on longer waits.
3251 *
3252 * After 500ms, keep_alive() is invoked
3253 */
3254 int target_wait_state(struct target *target, enum target_state state, int ms)
3255 {
3256 int retval;
3257 int64_t then = 0, cur;
3258 bool once = true;
3259
3260 for (;;) {
3261 retval = target_poll(target);
3262 if (retval != ERROR_OK)
3263 return retval;
3264 if (target->state == state)
3265 break;
3266 cur = timeval_ms();
3267 if (once) {
3268 once = false;
3269 then = timeval_ms();
3270 LOG_DEBUG("waiting for target %s...",
3271 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3272 }
3273
3274 if (cur-then > 500)
3275 keep_alive();
3276
3277 if ((cur-then) > ms) {
3278 LOG_ERROR("timed out while waiting for target %s",
3279 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3280 return ERROR_FAIL;
3281 }
3282 }
3283
3284 return ERROR_OK;
3285 }
3286
3287 COMMAND_HANDLER(handle_halt_command)
3288 {
3289 LOG_DEBUG("-");
3290
3291 struct target *target = get_current_target(CMD_CTX);
3292
3293 target->verbose_halt_msg = true;
3294
3295 int retval = target_halt(target);
3296 if (retval != ERROR_OK)
3297 return retval;
3298
3299 if (CMD_ARGC == 1) {
3300 unsigned wait_local;
3301 retval = parse_uint(CMD_ARGV[0], &wait_local);
3302 if (retval != ERROR_OK)
3303 return ERROR_COMMAND_SYNTAX_ERROR;
3304 if (!wait_local)
3305 return ERROR_OK;
3306 }
3307
3308 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3309 }
3310
3311 COMMAND_HANDLER(handle_soft_reset_halt_command)
3312 {
3313 struct target *target = get_current_target(CMD_CTX);
3314
3315 LOG_USER("requesting target halt and executing a soft reset");
3316
3317 target_soft_reset_halt(target);
3318
3319 return ERROR_OK;
3320 }
3321
3322 COMMAND_HANDLER(handle_reset_command)
3323 {
3324 if (CMD_ARGC > 1)
3325 return ERROR_COMMAND_SYNTAX_ERROR;
3326
3327 enum target_reset_mode reset_mode = RESET_RUN;
3328 if (CMD_ARGC == 1) {
3329 const struct jim_nvp *n;
3330 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3331 if ((!n->name) || (n->value == RESET_UNKNOWN))
3332 return ERROR_COMMAND_SYNTAX_ERROR;
3333 reset_mode = n->value;
3334 }
3335
3336 /* reset *all* targets */
3337 return target_process_reset(CMD, reset_mode);
3338 }
3339
3340
3341 COMMAND_HANDLER(handle_resume_command)
3342 {
3343 int current = 1;
3344 if (CMD_ARGC > 1)
3345 return ERROR_COMMAND_SYNTAX_ERROR;
3346
3347 struct target *target = get_current_target(CMD_CTX);
3348
3349 /* with no CMD_ARGV, resume from current pc, addr = 0,
3350 * with one arguments, addr = CMD_ARGV[0],
3351 * handle breakpoints, not debugging */
3352 target_addr_t addr = 0;
3353 if (CMD_ARGC == 1) {
3354 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3355 current = 0;
3356 }
3357
3358 return target_resume(target, current, addr, 1, 0);
3359 }
3360
3361 COMMAND_HANDLER(handle_step_command)
3362 {
3363 if (CMD_ARGC > 1)
3364 return ERROR_COMMAND_SYNTAX_ERROR;
3365
3366 LOG_DEBUG("-");
3367
3368 /* with no CMD_ARGV, step from current pc, addr = 0,
3369 * with one argument addr = CMD_ARGV[0],
3370 * handle breakpoints, debugging */
3371 target_addr_t addr = 0;
3372 int current_pc = 1;
3373 if (CMD_ARGC == 1) {
3374 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3375 current_pc = 0;
3376 }
3377
3378 struct target *target = get_current_target(CMD_CTX);
3379
3380 return target_step(target, current_pc, addr, 1);
3381 }
3382
3383 void target_handle_md_output(struct command_invocation *cmd,
3384 struct target *target, target_addr_t address, unsigned size,
3385 unsigned count, const uint8_t *buffer)
3386 {
3387 const unsigned line_bytecnt = 32;
3388 unsigned line_modulo = line_bytecnt / size;
3389
3390 char output[line_bytecnt * 4 + 1];
3391 unsigned output_len = 0;
3392
3393 const char *value_fmt;
3394 switch (size) {
3395 case 8:
3396 value_fmt = "%16.16"PRIx64" ";
3397 break;
3398 case 4:
3399 value_fmt = "%8.8"PRIx64" ";
3400 break;
3401 case 2:
3402 value_fmt = "%4.4"PRIx64" ";
3403 break;
3404 case 1:
3405 value_fmt = "%2.2"PRIx64" ";
3406 break;
3407 default:
3408 /* "can't happen", caller checked */
3409 LOG_ERROR("invalid memory read size: %u", size);
3410 return;
3411 }
3412
3413 for (unsigned i = 0; i < count; i++) {
3414 if (i % line_modulo == 0) {
3415 output_len += snprintf(output + output_len,
3416 sizeof(output) - output_len,
3417 TARGET_ADDR_FMT ": ",
3418 (address + (i * size)));
3419 }
3420
3421 uint64_t value = 0;
3422 const uint8_t *value_ptr = buffer + i * size;
3423 switch (size) {
3424 case 8:
3425 value = target_buffer_get_u64(target, value_ptr);
3426 break;
3427 case 4:
3428 value = target_buffer_get_u32(target, value_ptr);
3429 break;
3430 case 2:
3431 value = target_buffer_get_u16(target, value_ptr);
3432 break;
3433 case 1:
3434 value = *value_ptr;
3435 }
3436 output_len += snprintf(output + output_len,
3437 sizeof(output) - output_len,
3438 value_fmt, value);
3439
3440 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3441 command_print(cmd, "%s", output);
3442 output_len = 0;
3443 }
3444 }
3445 }
3446
3447 COMMAND_HANDLER(handle_md_command)
3448 {
3449 if (CMD_ARGC < 1)
3450 return ERROR_COMMAND_SYNTAX_ERROR;
3451
3452 unsigned size = 0;
3453 switch (CMD_NAME[2]) {
3454 case 'd':
3455 size = 8;
3456 break;
3457 case 'w':
3458 size = 4;
3459 break;
3460 case 'h':
3461 size = 2;
3462 break;
3463 case 'b':
3464 size = 1;
3465 break;
3466 default:
3467 return ERROR_COMMAND_SYNTAX_ERROR;
3468 }
3469
3470 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3471 int (*fn)(struct target *target,
3472 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3473 if (physical) {
3474 CMD_ARGC--;
3475 CMD_ARGV++;
3476 fn = target_read_phys_memory;
3477 } else
3478 fn = target_read_memory;
3479 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3480 return ERROR_COMMAND_SYNTAX_ERROR;
3481
3482 target_addr_t address;
3483 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3484
3485 unsigned count = 1;
3486 if (CMD_ARGC == 2)
3487 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3488
3489 uint8_t *buffer = calloc(count, size);
3490 if (!buffer) {
3491 LOG_ERROR("Failed to allocate md read buffer");
3492 return ERROR_FAIL;
3493 }
3494
3495 struct target *target = get_current_target(CMD_CTX);
3496 int retval = fn(target, address, size, count, buffer);
3497 if (retval == ERROR_OK)
3498 target_handle_md_output(CMD, target, address, size, count, buffer);
3499
3500 free(buffer);
3501
3502 return retval;
3503 }
3504
3505 typedef int (*target_write_fn)(struct target *target,
3506 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3507
3508 static int target_fill_mem(struct target *target,
3509 target_addr_t address,
3510 target_write_fn fn,
3511 unsigned data_size,
3512 /* value */
3513 uint64_t b,
3514 /* count */
3515 unsigned c)
3516 {
3517 /* We have to write in reasonably large chunks to be able
3518 * to fill large memory areas with any sane speed */
3519 const unsigned chunk_size = 16384;
3520 uint8_t *target_buf = malloc(chunk_size * data_size);
3521 if (!target_buf) {
3522 LOG_ERROR("Out of memory");
3523 return ERROR_FAIL;
3524 }
3525
3526 for (unsigned i = 0; i < chunk_size; i++) {
3527 switch (data_size) {
3528 case 8:
3529 target_buffer_set_u64(target, target_buf + i * data_size, b);
3530 break;
3531 case 4:
3532 target_buffer_set_u32(target, target_buf + i * data_size, b);
3533 break;
3534 case 2:
3535 target_buffer_set_u16(target, target_buf + i * data_size, b);
3536 break;
3537 case 1:
3538 target_buffer_set_u8(target, target_buf + i * data_size, b);
3539 break;
3540 default:
3541 exit(-1);
3542 }
3543 }
3544
3545 int retval = ERROR_OK;
3546
3547 for (unsigned x = 0; x < c; x += chunk_size) {
3548 unsigned current;
3549 current = c - x;
3550 if (current > chunk_size)
3551 current = chunk_size;
3552 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3553 if (retval != ERROR_OK)
3554 break;
3555 /* avoid GDB timeouts */
3556 keep_alive();
3557 }
3558 free(target_buf);
3559
3560 return retval;
3561 }
3562
3563
3564 COMMAND_HANDLER(handle_mw_command)
3565 {
3566 if (CMD_ARGC < 2)
3567 return ERROR_COMMAND_SYNTAX_ERROR;
3568 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3569 target_write_fn fn;
3570 if (physical) {
3571 CMD_ARGC--;
3572 CMD_ARGV++;
3573 fn = target_write_phys_memory;
3574 } else
3575 fn = target_write_memory;
3576 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3577 return ERROR_COMMAND_SYNTAX_ERROR;
3578
3579 target_addr_t address;
3580 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3581
3582 uint64_t value;
3583 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3584
3585 unsigned count = 1;
3586 if (CMD_ARGC == 3)
3587 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3588
3589 struct target *target = get_current_target(CMD_CTX);
3590 unsigned wordsize;
3591 switch (CMD_NAME[2]) {
3592 case 'd':
3593 wordsize = 8;
3594 break;
3595 case 'w':
3596 wordsize = 4;
3597 break;
3598 case 'h':
3599 wordsize = 2;
3600 break;
3601 case 'b':
3602 wordsize = 1;
3603 break;
3604 default:
3605 return ERROR_COMMAND_SYNTAX_ERROR;
3606 }
3607
3608 return target_fill_mem(target, address, fn, wordsize, value, count);
3609 }
3610
3611 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3612 target_addr_t *min_address, target_addr_t *max_address)
3613 {
3614 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3615 return ERROR_COMMAND_SYNTAX_ERROR;
3616
3617 /* a base address isn't always necessary,
3618 * default to 0x0 (i.e. don't relocate) */
3619 if (CMD_ARGC >= 2) {
3620 target_addr_t addr;
3621 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3622 image->base_address = addr;
3623 image->base_address_set = true;
3624 } else
3625 image->base_address_set = false;
3626
3627 image->start_address_set = false;
3628
3629 if (CMD_ARGC >= 4)
3630 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3631 if (CMD_ARGC == 5) {
3632 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3633 /* use size (given) to find max (required) */
3634 *max_address += *min_address;
3635 }
3636
3637 if (*min_address > *max_address)
3638 return ERROR_COMMAND_SYNTAX_ERROR;
3639
3640 return ERROR_OK;
3641 }
3642
3643 COMMAND_HANDLER(handle_load_image_command)
3644 {
3645 uint8_t *buffer;
3646 size_t buf_cnt;
3647 uint32_t image_size;
3648 target_addr_t min_address = 0;
3649 target_addr_t max_address = -1;
3650 struct image image;
3651
3652 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3653 &image, &min_address, &max_address);
3654 if (retval != ERROR_OK)
3655 return retval;
3656
3657 struct target *target = get_current_target(CMD_CTX);
3658
3659 struct duration bench;
3660 duration_start(&bench);
3661
3662 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3663 return ERROR_FAIL;
3664
3665 image_size = 0x0;
3666 retval = ERROR_OK;
3667 for (unsigned int i = 0; i < image.num_sections; i++) {
3668 buffer = malloc(image.sections[i].size);
3669 if (!buffer) {
3670 command_print(CMD,
3671 "error allocating buffer for section (%d bytes)",
3672 (int)(image.sections[i].size));
3673 retval = ERROR_FAIL;
3674 break;
3675 }
3676
3677 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3678 if (retval != ERROR_OK) {
3679 free(buffer);
3680 break;
3681 }
3682
3683 uint32_t offset = 0;
3684 uint32_t length = buf_cnt;
3685
3686 /* DANGER!!! beware of unsigned comparison here!!! */
3687
3688 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3689 (image.sections[i].base_address < max_address)) {
3690
3691 if (image.sections[i].base_address < min_address) {
3692 /* clip addresses below */
3693 offset += min_address-image.sections[i].base_address;
3694 length -= offset;
3695 }
3696
3697 if (image.sections[i].base_address + buf_cnt > max_address)
3698 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3699
3700 retval = target_write_buffer(target,
3701 image.sections[i].base_address + offset, length, buffer + offset);
3702 if (retval != ERROR_OK) {
3703 free(buffer);
3704 break;
3705 }
3706 image_size += length;
3707 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3708 (unsigned int)length,
3709 image.sections[i].base_address + offset);
3710 }
3711
3712 free(buffer);
3713 }
3714
3715 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3716 command_print(CMD, "downloaded %" PRIu32 " bytes "
3717 "in %fs (%0.3f KiB/s)", image_size,
3718 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3719 }
3720
3721 image_close(&image);
3722
3723 return retval;
3724
3725 }
3726
3727 COMMAND_HANDLER(handle_dump_image_command)
3728 {
3729 struct fileio *fileio;
3730 uint8_t *buffer;
3731 int retval, retvaltemp;
3732 target_addr_t address, size;
3733 struct duration bench;
3734 struct target *target = get_current_target(CMD_CTX);
3735
3736 if (CMD_ARGC != 3)
3737 return ERROR_COMMAND_SYNTAX_ERROR;
3738
3739 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3740 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3741
3742 uint32_t buf_size = (size > 4096) ? 4096 : size;
3743 buffer = malloc(buf_size);
3744 if (!buffer)
3745 return ERROR_FAIL;
3746
3747 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3748 if (retval != ERROR_OK) {
3749 free(buffer);
3750 return retval;
3751 }
3752
3753 duration_start(&bench);
3754
3755 while (size > 0) {
3756 size_t size_written;
3757 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3758 retval = target_read_buffer(target, address, this_run_size, buffer);
3759 if (retval != ERROR_OK)
3760 break;
3761
3762 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3763 if (retval != ERROR_OK)
3764 break;
3765
3766 size -= this_run_size;
3767 address += this_run_size;
3768 }
3769
3770 free(buffer);
3771
3772 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3773 size_t filesize;
3774 retval = fileio_size(fileio, &filesize);
3775 if (retval != ERROR_OK)
3776 return retval;
3777 command_print(CMD,
3778 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3779 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3780 }
3781
3782 retvaltemp = fileio_close(fileio);
3783 if (retvaltemp != ERROR_OK)
3784 return retvaltemp;
3785
3786 return retval;
3787 }
3788
3789 enum verify_mode {
3790 IMAGE_TEST = 0,
3791 IMAGE_VERIFY = 1,
3792 IMAGE_CHECKSUM_ONLY = 2
3793 };
3794
3795 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3796 {
3797 uint8_t *buffer;
3798 size_t buf_cnt;
3799 uint32_t image_size;
3800 int retval;
3801 uint32_t checksum = 0;
3802 uint32_t mem_checksum = 0;
3803
3804 struct image image;
3805
3806 struct target *target = get_current_target(CMD_CTX);
3807
3808 if (CMD_ARGC < 1)
3809 return ERROR_COMMAND_SYNTAX_ERROR;
3810
3811 if (!target) {
3812 LOG_ERROR("no target selected");
3813 return ERROR_FAIL;
3814 }
3815
3816 struct duration bench;
3817 duration_start(&bench);
3818
3819 if (CMD_ARGC >= 2) {
3820 target_addr_t addr;
3821 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3822 image.base_address = addr;
3823 image.base_address_set = true;
3824 } else {
3825 image.base_address_set = false;
3826 image.base_address = 0x0;
3827 }
3828
3829 image.start_address_set = false;
3830
3831 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3832 if (retval != ERROR_OK)
3833 return retval;
3834
3835 image_size = 0x0;
3836 int diffs = 0;
3837 retval = ERROR_OK;
3838 for (unsigned int i = 0; i < image.num_sections; i++) {
3839 buffer = malloc(image.sections[i].size);
3840 if (!buffer) {
3841 command_print(CMD,
3842 "error allocating buffer for section (%" PRIu32 " bytes)",
3843 image.sections[i].size);
3844 break;
3845 }
3846 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3847 if (retval != ERROR_OK) {
3848 free(buffer);
3849 break;
3850 }
3851
3852 if (verify >= IMAGE_VERIFY) {
3853 /* calculate checksum of image */
3854 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3855 if (retval != ERROR_OK) {
3856 free(buffer);
3857 break;
3858 }
3859
3860 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3861 if (retval != ERROR_OK) {
3862 free(buffer);
3863 break;
3864 }
3865 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3866 LOG_ERROR("checksum mismatch");
3867 free(buffer);
3868 retval = ERROR_FAIL;
3869 goto done;
3870 }
3871 if (checksum != mem_checksum) {
3872 /* failed crc checksum, fall back to a binary compare */
3873 uint8_t *data;
3874
3875 if (diffs == 0)
3876 LOG_ERROR("checksum mismatch - attempting binary compare");
3877
3878 data = malloc(buf_cnt);
3879
3880 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3881 if (retval == ERROR_OK) {
3882 uint32_t t;
3883 for (t = 0; t < buf_cnt; t++) {
3884 if (data[t] != buffer[t]) {
3885 command_print(CMD,
3886 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3887 diffs,
3888 (unsigned)(t + image.sections[i].base_address),
3889 data[t],
3890 buffer[t]);
3891 if (diffs++ >= 127) {
3892 command_print(CMD, "More than 128 errors, the rest are not printed.");
3893 free(data);
3894 free(buffer);
3895 goto done;
3896 }
3897 }
3898 keep_alive();
3899 }
3900 }
3901 free(data);
3902 }
3903 } else {
3904 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3905 image.sections[i].base_address,
3906 buf_cnt);
3907 }
3908
3909 free(buffer);
3910 image_size += buf_cnt;
3911 }
3912 if (diffs > 0)
3913 command_print(CMD, "No more differences found.");
3914 done:
3915 if (diffs > 0)
3916 retval = ERROR_FAIL;
3917 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3918 command_print(CMD, "verified %" PRIu32 " bytes "
3919 "in %fs (%0.3f KiB/s)", image_size,
3920 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3921 }
3922
3923 image_close(&image);
3924
3925 return retval;
3926 }
3927
3928 COMMAND_HANDLER(handle_verify_image_checksum_command)
3929 {
3930 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3931 }
3932
3933 COMMAND_HANDLER(handle_verify_image_command)
3934 {
3935 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3936 }
3937
3938 COMMAND_HANDLER(handle_test_image_command)
3939 {
3940 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3941 }
3942
3943 static int handle_bp_command_list(struct command_invocation *cmd)
3944 {
3945 struct target *target = get_current_target(cmd->ctx);
3946 struct breakpoint *breakpoint = target->breakpoints;
3947 while (breakpoint) {
3948 if (breakpoint->type == BKPT_SOFT) {
3949 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3950 breakpoint->length);
3951 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3952 breakpoint->address,
3953 breakpoint->length,
3954 breakpoint->set, buf);
3955 free(buf);
3956 } else {
3957 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3958 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3959 breakpoint->asid,
3960 breakpoint->length, breakpoint->set);
3961 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3962 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3963 breakpoint->address,
3964 breakpoint->length, breakpoint->set);
3965 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3966 breakpoint->asid);
3967 } else
3968 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3969 breakpoint->address,
3970 breakpoint->length, breakpoint->set);
3971 }
3972
3973 breakpoint = breakpoint->next;
3974 }
3975 return ERROR_OK;
3976 }
3977
3978 static int handle_bp_command_set(struct command_invocation *cmd,
3979 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3980 {
3981 struct target *target = get_current_target(cmd->ctx);
3982 int retval;
3983
3984 if (asid == 0) {
3985 retval = breakpoint_add(target, addr, length, hw);
3986 /* error is always logged in breakpoint_add(), do not print it again */
3987 if (retval == ERROR_OK)
3988 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3989
3990 } else if (addr == 0) {
3991 if (!target->type->add_context_breakpoint) {
3992 LOG_ERROR("Context breakpoint not available");
3993 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3994 }
3995 retval = context_breakpoint_add(target, asid, length, hw);
3996 /* error is always logged in context_breakpoint_add(), do not print it again */
3997 if (retval == ERROR_OK)
3998 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3999
4000 } else {
4001 if (!target->type->add_hybrid_breakpoint) {
4002 LOG_ERROR("Hybrid breakpoint not available");
4003 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4004 }
4005 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4006 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4007 if (retval == ERROR_OK)
4008 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4009 }
4010 return retval;
4011 }
4012
4013 COMMAND_HANDLER(handle_bp_command)
4014 {
4015 target_addr_t addr;
4016 uint32_t asid;
4017 uint32_t length;
4018 int hw = BKPT_SOFT;
4019
4020 switch (CMD_ARGC) {
4021 case 0:
4022 return handle_bp_command_list(CMD);
4023
4024 case 2:
4025 asid = 0;
4026 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4027 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4028 return handle_bp_command_set(CMD, addr, asid, length, hw);
4029
4030 case 3:
4031 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4032 hw = BKPT_HARD;
4033 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4034 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4035 asid = 0;
4036 return handle_bp_command_set(CMD, addr, asid, length, hw);
4037 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4038 hw = BKPT_HARD;
4039 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4040 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4041 addr = 0;
4042 return handle_bp_command_set(CMD, addr, asid, length, hw);
4043 }
4044 /* fallthrough */
4045 case 4:
4046 hw = BKPT_HARD;
4047 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4048 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4049 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4050 return handle_bp_command_set(CMD, addr, asid, length, hw);
4051
4052 default:
4053 return ERROR_COMMAND_SYNTAX_ERROR;
4054 }
4055 }
4056
4057 COMMAND_HANDLER(handle_rbp_command)
4058 {
4059 if (CMD_ARGC != 1)
4060 return ERROR_COMMAND_SYNTAX_ERROR;
4061
4062 struct target *target = get_current_target(CMD_CTX);
4063
4064 if (!strcmp(CMD_ARGV[0], "all")) {
4065 breakpoint_remove_all(target);
4066 } else {
4067 target_addr_t addr;
4068 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4069
4070 breakpoint_remove(target, addr);
4071 }
4072
4073 return ERROR_OK;
4074 }
4075
4076 COMMAND_HANDLER(handle_wp_command)
4077 {
4078 struct target *target = get_current_target(CMD_CTX);
4079
4080 if (CMD_ARGC == 0) {
4081 struct watchpoint *watchpoint = target->watchpoints;
4082
4083 while (watchpoint) {
4084 command_print(CMD, "address: " TARGET_ADDR_FMT
4085 ", len: 0x%8.8" PRIx32
4086 ", r/w/a: %i, value: 0x%8.8" PRIx32
4087 ", mask: 0x%8.8" PRIx32,
4088 watchpoint->address,
4089 watchpoint->length,
4090 (int)watchpoint->rw,
4091 watchpoint->value,
4092 watchpoint->mask);
4093 watchpoint = watchpoint->next;
4094 }
4095 return ERROR_OK;
4096 }
4097
4098 enum watchpoint_rw type = WPT_ACCESS;
4099 target_addr_t addr = 0;
4100 uint32_t length = 0;
4101 uint32_t data_value = 0x0;
4102 uint32_t data_mask = 0xffffffff;
4103
4104 switch (CMD_ARGC) {
4105 case 5:
4106 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4107 /* fall through */
4108 case 4:
4109 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4110 /* fall through */
4111 case 3:
4112 switch (CMD_ARGV[2][0]) {
4113 case 'r':
4114 type = WPT_READ;
4115 break;
4116 case 'w':
4117 type = WPT_WRITE;
4118 break;
4119 case 'a':
4120 type = WPT_ACCESS;
4121 break;
4122 default:
4123 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4124 return ERROR_COMMAND_SYNTAX_ERROR;
4125 }
4126 /* fall through */
4127 case 2:
4128 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4129 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4130 break;
4131
4132 default:
4133 return ERROR_COMMAND_SYNTAX_ERROR;
4134 }
4135
4136 int retval = watchpoint_add(target, addr, length, type,
4137 data_value, data_mask);
4138 if (retval != ERROR_OK)
4139 LOG_ERROR("Failure setting watchpoints");
4140
4141 return retval;
4142 }
4143
4144 COMMAND_HANDLER(handle_rwp_command)
4145 {
4146 if (CMD_ARGC != 1)
4147 return ERROR_COMMAND_SYNTAX_ERROR;
4148
4149 target_addr_t addr;
4150 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4151
4152 struct target *target = get_current_target(CMD_CTX);
4153 watchpoint_remove(target, addr);
4154
4155 return ERROR_OK;
4156 }
4157
4158 /**
4159 * Translate a virtual address to a physical address.
4160 *
4161 * The low-level target implementation must have logged a detailed error
4162 * which is forwarded to telnet/GDB session.
4163 */
4164 COMMAND_HANDLER(handle_virt2phys_command)
4165 {
4166 if (CMD_ARGC != 1)
4167 return ERROR_COMMAND_SYNTAX_ERROR;
4168
4169 target_addr_t va;
4170 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4171 target_addr_t pa;
4172
4173 struct target *target = get_current_target(CMD_CTX);
4174 int retval = target->type->virt2phys(target, va, &pa);
4175 if (retval == ERROR_OK)
4176 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4177
4178 return retval;
4179 }
4180
4181 static void write_data(FILE *f, const void *data, size_t len)
4182 {
4183 size_t written = fwrite(data, 1, len, f);
4184 if (written != len)
4185 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4186 }
4187
4188 static void write_long(FILE *f, int l, struct target *target)
4189 {
4190 uint8_t val[4];
4191
4192 target_buffer_set_u32(target, val, l);
4193 write_data(f, val, 4);
4194 }
4195
4196 static void write_string(FILE *f, char *s)
4197 {
4198 write_data(f, s, strlen(s));
4199 }
4200
4201 typedef unsigned char UNIT[2]; /* unit of profiling */
4202
4203 /* Dump a gmon.out histogram file. */
4204 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4205 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4206 {
4207 uint32_t i;
4208 FILE *f = fopen(filename, "w");
4209 if (!f)
4210 return;
4211 write_string(f, "gmon");
4212 write_long(f, 0x00000001, target); /* Version */
4213 write_long(f, 0, target); /* padding */
4214 write_long(f, 0, target); /* padding */
4215 write_long(f, 0, target); /* padding */
4216
4217 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4218 write_data(f, &zero, 1);
4219
4220 /* figure out bucket size */
4221 uint32_t min;
4222 uint32_t max;
4223 if (with_range) {
4224 min = start_address;
4225 max = end_address;
4226 } else {
4227 min = samples[0];
4228 max = samples[0];
4229 for (i = 0; i < sample_num; i++) {
4230 if (min > samples[i])
4231 min = samples[i];
4232 if (max < samples[i])
4233 max = samples[i];
4234 }
4235
4236 /* max should be (largest sample + 1)
4237 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4238 max++;
4239 }
4240
4241 int address_space = max - min;
4242 assert(address_space >= 2);
4243
4244 /* FIXME: What is the reasonable number of buckets?
4245 * The profiling result will be more accurate if there are enough buckets. */
4246 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4247 uint32_t num_buckets = address_space / sizeof(UNIT);
4248 if (num_buckets > max_buckets)
4249 num_buckets = max_buckets;
4250 int *buckets = malloc(sizeof(int) * num_buckets);
4251 if (!buckets) {
4252 fclose(f);
4253 return;
4254 }
4255 memset(buckets, 0, sizeof(int) * num_buckets);
4256 for (i = 0; i < sample_num; i++) {
4257 uint32_t address = samples[i];
4258
4259 if ((address < min) || (max <= address))
4260 continue;
4261
4262 long long a = address - min;
4263 long long b = num_buckets;
4264 long long c = address_space;
4265 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4266 buckets[index_t]++;
4267 }
4268
4269 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4270 write_long(f, min, target); /* low_pc */
4271 write_long(f, max, target); /* high_pc */
4272 write_long(f, num_buckets, target); /* # of buckets */
4273 float sample_rate = sample_num / (duration_ms / 1000.0);
4274 write_long(f, sample_rate, target);
4275 write_string(f, "seconds");
4276 for (i = 0; i < (15-strlen("seconds")); i++)
4277 write_data(f, &zero, 1);
4278 write_string(f, "s");
4279
4280 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4281
4282 char *data = malloc(2 * num_buckets);
4283 if (data) {
4284 for (i = 0; i < num_buckets; i++) {
4285 int val;
4286 val = buckets[i];
4287 if (val > 65535)
4288 val = 65535;
4289 data[i * 2] = val&0xff;
4290 data[i * 2 + 1] = (val >> 8) & 0xff;
4291 }
4292 free(buckets);
4293 write_data(f, data, num_buckets * 2);
4294 free(data);
4295 } else
4296 free(buckets);
4297
4298 fclose(f);
4299 }
4300
4301 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4302 * which will be used as a random sampling of PC */
4303 COMMAND_HANDLER(handle_profile_command)
4304 {
4305 struct target *target = get_current_target(CMD_CTX);
4306
4307 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4308 return ERROR_COMMAND_SYNTAX_ERROR;
4309
4310 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4311 uint32_t offset;
4312 uint32_t num_of_samples;
4313 int retval = ERROR_OK;
4314 bool halted_before_profiling = target->state == TARGET_HALTED;
4315
4316 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4317
4318 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4319 if (!samples) {
4320 LOG_ERROR("No memory to store samples.");
4321 return ERROR_FAIL;
4322 }
4323
4324 uint64_t timestart_ms = timeval_ms();
4325 /**
4326 * Some cores let us sample the PC without the
4327 * annoying halt/resume step; for example, ARMv7 PCSR.
4328 * Provide a way to use that more efficient mechanism.
4329 */
4330 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4331 &num_of_samples, offset);
4332 if (retval != ERROR_OK) {
4333 free(samples);
4334 return retval;
4335 }
4336 uint32_t duration_ms = timeval_ms() - timestart_ms;
4337
4338 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4339
4340 retval = target_poll(target);
4341 if (retval != ERROR_OK) {
4342 free(samples);
4343 return retval;
4344 }
4345
4346 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4347 /* The target was halted before we started and is running now. Halt it,
4348 * for consistency. */
4349 retval = target_halt(target);
4350 if (retval != ERROR_OK) {
4351 free(samples);
4352 return retval;
4353 }
4354 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4355 /* The target was running before we started and is halted now. Resume
4356 * it, for consistency. */
4357 retval = target_resume(target, 1, 0, 0, 0);
4358 if (retval != ERROR_OK) {
4359 free(samples);
4360 return retval;
4361 }
4362 }
4363
4364 retval = target_poll(target);
4365 if (retval != ERROR_OK) {
4366 free(samples);
4367 return retval;
4368 }
4369
4370 uint32_t start_address = 0;
4371 uint32_t end_address = 0;
4372 bool with_range = false;
4373 if (CMD_ARGC == 4) {
4374 with_range = true;
4375 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4376 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4377 }
4378
4379 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4380 with_range, start_address, end_address, target, duration_ms);
4381 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4382
4383 free(samples);
4384 return retval;
4385 }
4386
4387 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4388 {
4389 char *namebuf;
4390 Jim_Obj *obj_name, *obj_val;
4391 int result;
4392
4393 namebuf = alloc_printf("%s(%d)", varname, idx);
4394 if (!namebuf)
4395 return JIM_ERR;
4396
4397 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4398 jim_wide wide_val = val;
4399 obj_val = Jim_NewWideObj(interp, wide_val);
4400 if (!obj_name || !obj_val) {
4401 free(namebuf);
4402 return JIM_ERR;
4403 }
4404
4405 Jim_IncrRefCount(obj_name);
4406 Jim_IncrRefCount(obj_val);
4407 result = Jim_SetVariable(interp, obj_name, obj_val);
4408 Jim_DecrRefCount(interp, obj_name);
4409 Jim_DecrRefCount(interp, obj_val);
4410 free(namebuf);
4411 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4412 return result;
4413 }
4414
4415 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4416 {
4417 struct command_context *context;
4418 struct target *target;
4419
4420 context = current_command_context(interp);
4421 assert(context);
4422
4423 target = get_current_target(context);
4424 if (!target) {
4425 LOG_ERROR("mem2array: no current target");
4426 return JIM_ERR;
4427 }
4428
4429 return target_mem2array(interp, target, argc - 1, argv + 1);
4430 }
4431
4432 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4433 {
4434 int e;
4435
4436 /* argv[0] = name of array to receive the data
4437 * argv[1] = desired element width in bits
4438 * argv[2] = memory address
4439 * argv[3] = count of times to read
4440 * argv[4] = optional "phys"
4441 */
4442 if (argc < 4 || argc > 5) {
4443 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4444 return JIM_ERR;
4445 }
4446
4447 /* Arg 0: Name of the array variable */
4448 const char *varname = Jim_GetString(argv[0], NULL);
4449
4450 /* Arg 1: Bit width of one element */
4451 long l;
4452 e = Jim_GetLong(interp, argv[1], &l);
4453 if (e != JIM_OK)
4454 return e;
4455 const unsigned int width_bits = l;
4456
4457 if (width_bits != 8 &&
4458 width_bits != 16 &&
4459 width_bits != 32 &&
4460 width_bits != 64) {
4461 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4462 Jim_AppendStrings(interp, Jim_GetResult(interp),
4463 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4464 return JIM_ERR;
4465 }
4466 const unsigned int width = width_bits / 8;
4467
4468 /* Arg 2: Memory address */
4469 jim_wide wide_addr;
4470 e = Jim_GetWide(interp, argv[2], &wide_addr);
4471 if (e != JIM_OK)
4472 return e;
4473 target_addr_t addr = (target_addr_t)wide_addr;
4474
4475 /* Arg 3: Number of elements to read */
4476 e = Jim_GetLong(interp, argv[3], &l);
4477 if (e != JIM_OK)
4478 return e;
4479 size_t len = l;
4480
4481 /* Arg 4: phys */
4482 bool is_phys = false;
4483 if (argc > 4) {
4484 int str_len = 0;
4485 const char *phys = Jim_GetString(argv[4], &str_len);
4486 if (!strncmp(phys, "phys", str_len))
4487 is_phys = true;
4488 else
4489 return JIM_ERR;
4490 }
4491
4492 /* Argument checks */
4493 if (len == 0) {
4494 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4495 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4496 return JIM_ERR;
4497 }
4498 if ((addr + (len * width)) < addr) {
4499 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4500 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4501 return JIM_ERR;
4502 }
4503 if (len > 65536) {
4504 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4505 Jim_AppendStrings(interp, Jim_GetResult(interp),
4506 "mem2array: too large read request, exceeds 64K items", NULL);
4507 return JIM_ERR;
4508 }
4509
4510 if ((width == 1) ||
4511 ((width == 2) && ((addr & 1) == 0)) ||
4512 ((width == 4) && ((addr & 3) == 0)) ||
4513 ((width == 8) && ((addr & 7) == 0))) {
4514 /* alignment correct */
4515 } else {
4516 char buf[100];
4517 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4518 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4519 addr,
4520 width);
4521 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4522 return JIM_ERR;
4523 }
4524
4525 /* Transfer loop */
4526
4527 /* index counter */
4528 size_t idx = 0;
4529
4530 const size_t buffersize = 4096;
4531 uint8_t *buffer = malloc(buffersize);
4532 if (!buffer)
4533 return JIM_ERR;
4534
4535 /* assume ok */
4536 e = JIM_OK;
4537 while (len) {
4538 /* Slurp... in buffer size chunks */
4539 const unsigned int max_chunk_len = buffersize / width;
4540 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4541
4542 int retval;
4543 if (is_phys)
4544 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4545 else
4546 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4547 if (retval != ERROR_OK) {
4548 /* BOO !*/
4549 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4550 addr,
4551 width,
4552 chunk_len);
4553 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4554 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4555 e = JIM_ERR;
4556 break;
4557 } else {
4558 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4559 uint64_t v = 0;
4560 switch (width) {
4561 case 8:
4562 v = target_buffer_get_u64(target, &buffer[i*width]);
4563 break;
4564 case 4:
4565 v = target_buffer_get_u32(target, &buffer[i*width]);
4566 break;
4567 case 2:
4568 v = target_buffer_get_u16(target, &buffer[i*width]);
4569 break;
4570 case 1:
4571 v = buffer[i] & 0x0ff;
4572 break;
4573 }
4574 new_u64_array_element(interp, varname, idx, v);
4575 }
4576 len -= chunk_len;
4577 addr += chunk_len * width;
4578 }
4579 }
4580
4581 free(buffer);
4582
4583 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4584
4585 return e;
4586 }
4587
4588 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4589 {
4590 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4591 if (!namebuf)
4592 return JIM_ERR;
4593
4594 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4595 if (!obj_name) {
4596 free(namebuf);
4597 return JIM_ERR;
4598 }
4599
4600 Jim_IncrRefCount(obj_name);
4601 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4602 Jim_DecrRefCount(interp, obj_name);
4603 free(namebuf);
4604 if (!obj_val)
4605 return JIM_ERR;
4606
4607 jim_wide wide_val;
4608 int result = Jim_GetWide(interp, obj_val, &wide_val);
4609 *val = wide_val;
4610 return result;
4611 }
4612
4613 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4614 {
4615 struct command_context *context;
4616 struct target *target;
4617
4618 context = current_command_context(interp);
4619 assert(context);
4620
4621 target = get_current_target(context);
4622 if (!target) {
4623 LOG_ERROR("array2mem: no current target");
4624 return JIM_ERR;
4625 }
4626
4627 return target_array2mem(interp, target, argc-1, argv + 1);
4628 }
4629
4630 static int target_array2mem(Jim_Interp *interp, struct target *target,
4631 int argc, Jim_Obj *const *argv)
4632 {
4633 int e;
4634
4635 /* argv[0] = name of array from which to read the data
4636 * argv[1] = desired element width in bits
4637 * argv[2] = memory address
4638 * argv[3] = number of elements to write
4639 * argv[4] = optional "phys"
4640 */
4641 if (argc < 4 || argc > 5) {
4642 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4643 return JIM_ERR;
4644 }
4645
4646 /* Arg 0: Name of the array variable */
4647 const char *varname = Jim_GetString(argv[0], NULL);
4648
4649 /* Arg 1: Bit width of one element */
4650 long l;
4651 e = Jim_GetLong(interp, argv[1], &l);
4652 if (e != JIM_OK)
4653 return e;
4654 const unsigned int width_bits = l;
4655
4656 if (width_bits != 8 &&
4657 width_bits != 16 &&
4658 width_bits != 32 &&
4659 width_bits != 64) {
4660 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4661 Jim_AppendStrings(interp, Jim_GetResult(interp),
4662 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4663 return JIM_ERR;
4664 }
4665 const unsigned int width = width_bits / 8;
4666
4667 /* Arg 2: Memory address */
4668 jim_wide wide_addr;
4669 e = Jim_GetWide(interp, argv[2], &wide_addr);
4670 if (e != JIM_OK)
4671 return e;
4672 target_addr_t addr = (target_addr_t)wide_addr;
4673
4674 /* Arg 3: Number of elements to write */
4675 e = Jim_GetLong(interp, argv[3], &l);
4676 if (e != JIM_OK)
4677 return e;
4678 size_t len = l;
4679
4680 /* Arg 4: Phys */
4681 bool is_phys = false;
4682 if (argc > 4) {
4683 int str_len = 0;
4684 const char *phys = Jim_GetString(argv[4], &str_len);
4685 if (!strncmp(phys, "phys", str_len))
4686 is_phys = true;
4687 else
4688 return JIM_ERR;
4689 }
4690
4691 /* Argument checks */
4692 if (len == 0) {
4693 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4694 Jim_AppendStrings(interp, Jim_GetResult(interp),
4695 "array2mem: zero width read?", NULL);
4696 return JIM_ERR;
4697 }
4698
4699 if ((addr + (len * width)) < addr) {
4700 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4701 Jim_AppendStrings(interp, Jim_GetResult(interp),
4702 "array2mem: addr + len - wraps to zero?", NULL);
4703 return JIM_ERR;
4704 }
4705
4706 if (len > 65536) {
4707 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4708 Jim_AppendStrings(interp, Jim_GetResult(interp),
4709 "array2mem: too large memory write request, exceeds 64K items", NULL);
4710 return JIM_ERR;
4711 }
4712
4713 if ((width == 1) ||
4714 ((width == 2) && ((addr & 1) == 0)) ||
4715 ((width == 4) && ((addr & 3) == 0)) ||
4716 ((width == 8) && ((addr & 7) == 0))) {
4717 /* alignment correct */
4718 } else {
4719 char buf[100];
4720 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4721 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4722 addr,
4723 width);
4724 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4725 return JIM_ERR;
4726 }
4727
4728 /* Transfer loop */
4729
4730 /* assume ok */
4731 e = JIM_OK;
4732
4733 const size_t buffersize = 4096;
4734 uint8_t *buffer = malloc(buffersize);
4735 if (!buffer)
4736 return JIM_ERR;
4737
4738 /* index counter */
4739 size_t idx = 0;
4740
4741 while (len) {
4742 /* Slurp... in buffer size chunks */
4743 const unsigned int max_chunk_len = buffersize / width;
4744
4745 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4746
4747 /* Fill the buffer */
4748 for (size_t i = 0; i < chunk_len; i++, idx++) {
4749 uint64_t v = 0;
4750 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4751 free(buffer);
4752 return JIM_ERR;
4753 }
4754 switch (width) {
4755 case 8:
4756 target_buffer_set_u64(target, &buffer[i * width], v);
4757 break;
4758 case 4:
4759 target_buffer_set_u32(target, &buffer[i * width], v);
4760 break;
4761 case 2:
4762 target_buffer_set_u16(target, &buffer[i * width], v);
4763 break;
4764 case 1:
4765 buffer[i] = v & 0x0ff;
4766 break;
4767 }
4768 }
4769 len -= chunk_len;
4770
4771 /* Write the buffer to memory */
4772 int retval;
4773 if (is_phys)
4774 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4775 else
4776 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4777 if (retval != ERROR_OK) {
4778 /* BOO !*/
4779 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4780 addr,
4781 width,
4782 chunk_len);
4783 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4784 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4785 e = JIM_ERR;
4786 break;
4787 }
4788 addr += chunk_len * width;
4789 }
4790
4791 free(buffer);
4792
4793 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4794
4795 return e;
4796 }
4797
4798 /* FIX? should we propagate errors here rather than printing them
4799 * and continuing?
4800 */
4801 void target_handle_event(struct target *target, enum target_event e)
4802 {
4803 struct target_event_action *teap;
4804 int retval;
4805
4806 for (teap = target->event_action; teap; teap = teap->next) {
4807 if (teap->event == e) {
4808 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4809 target->target_number,
4810 target_name(target),
4811 target_type_name(target),
4812 e,
4813 jim_nvp_value2name_simple(nvp_target_event, e)->name,
4814 Jim_GetString(teap->body, NULL));
4815
4816 /* Override current target by the target an event
4817 * is issued from (lot of scripts need it).
4818 * Return back to previous override as soon
4819 * as the handler processing is done */
4820 struct command_context *cmd_ctx = current_command_context(teap->interp);
4821 struct target *saved_target_override = cmd_ctx->current_target_override;
4822 cmd_ctx->current_target_override = target;
4823
4824 retval = Jim_EvalObj(teap->interp, teap->body);
4825
4826 cmd_ctx->current_target_override = saved_target_override;
4827
4828 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4829 return;
4830
4831 if (retval == JIM_RETURN)
4832 retval = teap->interp->returnCode;
4833
4834 if (retval != JIM_OK) {
4835 Jim_MakeErrorMessage(teap->interp);
4836 LOG_USER("Error executing event %s on target %s:\n%s",
4837 jim_nvp_value2name_simple(nvp_target_event, e)->name,
4838 target_name(target),
4839 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4840 /* clean both error code and stacktrace before return */
4841 Jim_Eval(teap->interp, "error \"\" \"\"");
4842 }
4843 }
4844 }
4845 }
4846
4847 /**
4848 * Returns true only if the target has a handler for the specified event.
4849 */
4850 bool target_has_event_action(struct target *target, enum target_event event)
4851 {
4852 struct target_event_action *teap;
4853
4854 for (teap = target->event_action; teap; teap = teap->next) {
4855 if (teap->event == event)
4856 return true;
4857 }
4858 return false;
4859 }
4860
4861 enum target_cfg_param {
4862 TCFG_TYPE,
4863 TCFG_EVENT,
4864 TCFG_WORK_AREA_VIRT,
4865 TCFG_WORK_AREA_PHYS,
4866 TCFG_WORK_AREA_SIZE,
4867 TCFG_WORK_AREA_BACKUP,
4868 TCFG_ENDIAN,
4869 TCFG_COREID,
4870 TCFG_CHAIN_POSITION,
4871 TCFG_DBGBASE,
4872 TCFG_RTOS,
4873 TCFG_DEFER_EXAMINE,
4874 TCFG_GDB_PORT,
4875 TCFG_GDB_MAX_CONNECTIONS,
4876 };
4877
4878 static struct jim_nvp nvp_config_opts[] = {
4879 { .name = "-type", .value = TCFG_TYPE },
4880 { .name = "-event", .value = TCFG_EVENT },
4881 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4882 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4883 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4884 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4885 { .name = "-endian", .value = TCFG_ENDIAN },
4886 { .name = "-coreid", .value = TCFG_COREID },
4887 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4888 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4889 { .name = "-rtos", .value = TCFG_RTOS },
4890 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4891 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4892 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
4893 { .name = NULL, .value = -1 }
4894 };
4895
4896 static int target_configure(struct jim_getopt_info *goi, struct target *target)
4897 {
4898 struct jim_nvp *n;
4899 Jim_Obj *o;
4900 jim_wide w;
4901 int e;
4902
4903 /* parse config or cget options ... */
4904 while (goi->argc > 0) {
4905 Jim_SetEmptyResult(goi->interp);
4906 /* jim_getopt_debug(goi); */
4907
4908 if (target->type->target_jim_configure) {
4909 /* target defines a configure function */
4910 /* target gets first dibs on parameters */
4911 e = (*(target->type->target_jim_configure))(target, goi);
4912 if (e == JIM_OK) {
4913 /* more? */
4914 continue;
4915 }
4916 if (e == JIM_ERR) {
4917 /* An error */
4918 return e;
4919 }
4920 /* otherwise we 'continue' below */
4921 }
4922 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
4923 if (e != JIM_OK) {
4924 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
4925 return e;
4926 }
4927 switch (n->value) {
4928 case TCFG_TYPE:
4929 /* not settable */
4930 if (goi->isconfigure) {
4931 Jim_SetResultFormatted(goi->interp,
4932 "not settable: %s", n->name);
4933 return JIM_ERR;
4934 } else {
4935 no_params:
4936 if (goi->argc != 0) {
4937 Jim_WrongNumArgs(goi->interp,
4938 goi->argc, goi->argv,
4939 "NO PARAMS");
4940 return JIM_ERR;
4941 }
4942 }
4943 Jim_SetResultString(goi->interp,
4944 target_type_name(target), -1);
4945 /* loop for more */
4946 break;
4947 case TCFG_EVENT:
4948 if (goi->argc == 0) {
4949 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4950 return JIM_ERR;
4951 }
4952
4953 e = jim_getopt_nvp(goi, nvp_target_event, &n);
4954 if (e != JIM_OK) {
4955 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
4956 return e;
4957 }
4958
4959 if (goi->isconfigure) {
4960 if (goi->argc != 1) {
4961 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4962 return JIM_ERR;
4963 }
4964 } else {
4965 if (goi->argc != 0) {
4966 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4967 return JIM_ERR;
4968 }
4969 }
4970
4971 {
4972 struct target_event_action *teap;
4973
4974 teap = target->event_action;
4975 /* replace existing? */
4976 while (teap) {
4977 if (teap->event == (enum target_event)n->value)
4978 break;
4979 teap = teap->next;
4980 }
4981
4982 if (goi->isconfigure) {
4983 /* START_DEPRECATED_TPIU */
4984 if (n->value == TARGET_EVENT_TRACE_CONFIG)
4985 LOG_INFO("DEPRECATED target event %s", n->name);
4986 /* END_DEPRECATED_TPIU */
4987
4988 bool replace = true;
4989 if (!teap) {
4990 /* create new */
4991 teap = calloc(1, sizeof(*teap));
4992 replace = false;
4993 }
4994 teap->event = n->value;
4995 teap->interp = goi->interp;
4996 jim_getopt_obj(goi, &o);
4997 if (teap->body)
4998 Jim_DecrRefCount(teap->interp, teap->body);
4999 teap->body = Jim_DuplicateObj(goi->interp, o);
5000 /*
5001 * FIXME:
5002 * Tcl/TK - "tk events" have a nice feature.
5003 * See the "BIND" command.
5004 * We should support that here.
5005 * You can specify %X and %Y in the event code.
5006 * The idea is: %T - target name.
5007 * The idea is: %N - target number
5008 * The idea is: %E - event name.
5009 */
5010 Jim_IncrRefCount(teap->body);
5011
5012 if (!replace) {
5013 /* add to head of event list */
5014 teap->next = target->event_action;
5015 target->event_action = teap;
5016 }
5017 Jim_SetEmptyResult(goi->interp);
5018 } else {
5019 /* get */
5020 if (!teap)
5021 Jim_SetEmptyResult(goi->interp);
5022 else
5023 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5024 }
5025 }
5026 /* loop for more */
5027 break;
5028
5029 case TCFG_WORK_AREA_VIRT:
5030 if (goi->isconfigure) {
5031 target_free_all_working_areas(target);
5032 e = jim_getopt_wide(goi, &w);
5033 if (e != JIM_OK)
5034 return e;
5035 target->working_area_virt = w;
5036 target->working_area_virt_spec = true;
5037 } else {
5038 if (goi->argc != 0)
5039 goto no_params;
5040 }
5041 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5042 /* loop for more */
5043 break;
5044
5045 case TCFG_WORK_AREA_PHYS:
5046 if (goi->isconfigure) {
5047 target_free_all_working_areas(target);
5048 e = jim_getopt_wide(goi, &w);
5049 if (e != JIM_OK)
5050 return e;
5051 target->working_area_phys = w;
5052 target->working_area_phys_spec = true;
5053 } else {
5054 if (goi->argc != 0)
5055 goto no_params;
5056 }
5057 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5058 /* loop for more */
5059 break;
5060
5061 case TCFG_WORK_AREA_SIZE:
5062 if (goi->isconfigure) {
5063 target_free_all_working_areas(target);
5064 e = jim_getopt_wide(goi, &w);
5065 if (e != JIM_OK)
5066 return e;
5067 target->working_area_size = w;
5068 } else {
5069 if (goi->argc != 0)
5070 goto no_params;
5071 }
5072 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5073 /* loop for more */
5074 break;
5075
5076 case TCFG_WORK_AREA_BACKUP:
5077 if (goi->isconfigure) {
5078 target_free_all_working_areas(target);
5079 e = jim_getopt_wide(goi, &w);
5080 if (e != JIM_OK)
5081 return e;
5082 /* make this exactly 1 or 0 */
5083 target->backup_working_area = (!!w);
5084 } else {
5085 if (goi->argc != 0)
5086 goto no_params;
5087 }
5088 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5089 /* loop for more e*/
5090 break;
5091
5092
5093 case TCFG_ENDIAN:
5094 if (goi->isconfigure) {
5095 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5096 if (e != JIM_OK) {
5097 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5098 return e;
5099 }
5100 target->endianness = n->value;
5101 } else {
5102 if (goi->argc != 0)
5103 goto no_params;
5104 }
5105 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5106 if (!n->name) {
5107 target->endianness = TARGET_LITTLE_ENDIAN;
5108 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5109 }
5110 Jim_SetResultString(goi->interp, n->name, -1);
5111 /* loop for more */
5112 break;
5113
5114 case TCFG_COREID:
5115 if (goi->isconfigure) {
5116 e = jim_getopt_wide(goi, &w);
5117 if (e != JIM_OK)
5118 return e;
5119 target->coreid = (int32_t)w;
5120 } else {
5121 if (goi->argc != 0)
5122 goto no_params;
5123 }
5124 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5125 /* loop for more */
5126 break;
5127
5128 case TCFG_CHAIN_POSITION:
5129 if (goi->isconfigure) {
5130 Jim_Obj *o_t;
5131 struct jtag_tap *tap;
5132
5133 if (target->has_dap) {
5134 Jim_SetResultString(goi->interp,
5135 "target requires -dap parameter instead of -chain-position!", -1);
5136 return JIM_ERR;
5137 }
5138
5139 target_free_all_working_areas(target);
5140 e = jim_getopt_obj(goi, &o_t);
5141 if (e != JIM_OK)
5142 return e;
5143 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5144 if (!tap)
5145 return JIM_ERR;
5146 target->tap = tap;
5147 target->tap_configured = true;
5148 } else {
5149 if (goi->argc != 0)
5150 goto no_params;
5151 }
5152 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5153 /* loop for more e*/
5154 break;
5155 case TCFG_DBGBASE:
5156 if (goi->isconfigure) {
5157 e = jim_getopt_wide(goi, &w);
5158 if (e != JIM_OK)
5159 return e;
5160 target->dbgbase = (uint32_t)w;
5161 target->dbgbase_set = true;
5162 } else {
5163 if (goi->argc != 0)
5164 goto no_params;
5165 }
5166 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5167 /* loop for more */
5168 break;
5169 case TCFG_RTOS:
5170 /* RTOS */
5171 {
5172 int result = rtos_create(goi, target);
5173 if (result != JIM_OK)
5174 return result;
5175 }
5176 /* loop for more */
5177 break;
5178
5179 case TCFG_DEFER_EXAMINE:
5180 /* DEFER_EXAMINE */
5181 target->defer_examine = true;
5182 /* loop for more */
5183 break;
5184
5185 case TCFG_GDB_PORT:
5186 if (goi->isconfigure) {
5187 struct command_context *cmd_ctx = current_command_context(goi->interp);
5188 if (cmd_ctx->mode != COMMAND_CONFIG) {
5189 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5190 return JIM_ERR;
5191 }
5192
5193 const char *s;
5194 e = jim_getopt_string(goi, &s, NULL);
5195 if (e != JIM_OK)
5196 return e;
5197 free(target->gdb_port_override);
5198 target->gdb_port_override = strdup(s);
5199 } else {
5200 if (goi->argc != 0)
5201 goto no_params;
5202 }
5203 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5204 /* loop for more */
5205 break;
5206
5207 case TCFG_GDB_MAX_CONNECTIONS:
5208 if (goi->isconfigure) {
5209 struct command_context *cmd_ctx = current_command_context(goi->interp);
5210 if (cmd_ctx->mode != COMMAND_CONFIG) {
5211 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5212 return JIM_ERR;
5213 }
5214
5215 e = jim_getopt_wide(goi, &w);
5216 if (e != JIM_OK)
5217 return e;
5218 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5219 } else {
5220 if (goi->argc != 0)
5221 goto no_params;
5222 }
5223 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5224 break;
5225 }
5226 } /* while (goi->argc) */
5227
5228
5229 /* done - we return */
5230 return JIM_OK;
5231 }
5232
5233 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5234 {
5235 struct command *c = jim_to_command(interp);
5236 struct jim_getopt_info goi;
5237
5238 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5239 goi.isconfigure = !strcmp(c->name, "configure");
5240 if (goi.argc < 1) {
5241 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5242 "missing: -option ...");
5243 return JIM_ERR;
5244 }
5245 struct command_context *cmd_ctx = current_command_context(interp);
5246 assert(cmd_ctx);
5247 struct target *target = get_current_target(cmd_ctx);
5248 return target_configure(&goi, target);
5249 }
5250
5251 static int jim_target_mem2array(Jim_Interp *interp,
5252 int argc, Jim_Obj *const *argv)
5253 {
5254 struct command_context *cmd_ctx = current_command_context(interp);
5255 assert(cmd_ctx);
5256 struct target *target = get_current_target(cmd_ctx);
5257 return target_mem2array(interp, target, argc - 1, argv + 1);
5258 }
5259
5260 static int jim_target_array2mem(Jim_Interp *interp,
5261 int argc, Jim_Obj *const *argv)
5262 {
5263 struct command_context *cmd_ctx = current_command_context(interp);
5264 assert(cmd_ctx);
5265 struct target *target = get_current_target(cmd_ctx);
5266 return target_array2mem(interp, target, argc - 1, argv + 1);
5267 }
5268
5269 static int jim_target_tap_disabled(Jim_Interp *interp)
5270 {
5271 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5272 return JIM_ERR;
5273 }
5274
5275 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5276 {
5277 bool allow_defer = false;
5278
5279 struct jim_getopt_info goi;
5280 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5281 if (goi.argc > 1) {
5282 const char *cmd_name = Jim_GetString(argv[0], NULL);
5283 Jim_SetResultFormatted(goi.interp,
5284 "usage: %s ['allow-defer']", cmd_name);
5285 return JIM_ERR;
5286 }
5287 if (goi.argc > 0 &&
5288 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5289 /* consume it */
5290 Jim_Obj *obj;
5291 int e = jim_getopt_obj(&goi, &obj);
5292 if (e != JIM_OK)
5293 return e;
5294 allow_defer = true;
5295 }
5296
5297 struct command_context *cmd_ctx = current_command_context(interp);
5298 assert(cmd_ctx);
5299 struct target *target = get_current_target(cmd_ctx);
5300 if (!target->tap->enabled)
5301 return jim_target_tap_disabled(interp);
5302
5303 if (allow_defer && target->defer_examine) {
5304 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5305 LOG_INFO("Use arp_examine command to examine it manually!");
5306 return JIM_OK;
5307 }
5308
5309 int e = target->type->examine(target);
5310 if (e != ERROR_OK)
5311 return JIM_ERR;
5312 return JIM_OK;
5313 }
5314
5315 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5316 {
5317 struct command_context *cmd_ctx = current_command_context(interp);
5318 assert(cmd_ctx);
5319 struct target *target = get_current_target(cmd_ctx);
5320
5321 Jim_SetResultBool(interp, target_was_examined(target));
5322 return JIM_OK;
5323 }
5324
5325 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5326 {
5327 struct command_context *cmd_ctx = current_command_context(interp);
5328 assert(cmd_ctx);
5329 struct target *target = get_current_target(cmd_ctx);
5330
5331 Jim_SetResultBool(interp, target->defer_examine);
5332 return JIM_OK;
5333 }
5334
5335 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5336 {
5337 if (argc != 1) {
5338 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5339 return JIM_ERR;
5340 }
5341 struct command_context *cmd_ctx = current_command_context(interp);
5342 assert(cmd_ctx);
5343 struct target *target = get_current_target(cmd_ctx);
5344
5345 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5346 return JIM_ERR;
5347
5348 return JIM_OK;
5349 }
5350
5351 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5352 {
5353 if (argc != 1) {
5354 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5355 return JIM_ERR;
5356 }
5357 struct command_context *cmd_ctx = current_command_context(interp);
5358 assert(cmd_ctx);
5359 struct target *target = get_current_target(cmd_ctx);
5360 if (!target->tap->enabled)
5361 return jim_target_tap_disabled(interp);
5362
5363 int e;
5364 if (!(target_was_examined(target)))
5365 e = ERROR_TARGET_NOT_EXAMINED;
5366 else
5367 e = target->type->poll(target);
5368 if (e != ERROR_OK)
5369 return JIM_ERR;
5370 return JIM_OK;
5371 }
5372
5373 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5374 {
5375 struct jim_getopt_info goi;
5376 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5377
5378 if (goi.argc != 2) {
5379 Jim_WrongNumArgs(interp, 0, argv,
5380 "([tT]|[fF]|assert|deassert) BOOL");
5381 return JIM_ERR;
5382 }
5383
5384 struct jim_nvp *n;
5385 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5386 if (e != JIM_OK) {
5387 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5388 return e;
5389 }
5390 /* the halt or not param */
5391 jim_wide a;
5392 e = jim_getopt_wide(&goi, &a);
5393 if (e != JIM_OK)
5394 return e;
5395
5396 struct command_context *cmd_ctx = current_command_context(interp);
5397 assert(cmd_ctx);
5398 struct target *target = get_current_target(cmd_ctx);
5399 if (!target->tap->enabled)
5400 return jim_target_tap_disabled(interp);
5401
5402 if (!target->type->assert_reset || !target->type->deassert_reset) {
5403 Jim_SetResultFormatted(interp,
5404 "No target-specific reset for %s",
5405 target_name(target));
5406 return JIM_ERR;
5407 }
5408
5409 if (target->defer_examine)
5410 target_reset_examined(target);
5411
5412 /* determine if we should halt or not. */
5413 target->reset_halt = (a != 0);
5414 /* When this happens - all workareas are invalid. */
5415 target_free_all_working_areas_restore(target, 0);
5416
5417 /* do the assert */
5418 if (n->value == NVP_ASSERT)
5419 e = target->type->assert_reset(target);
5420 else
5421 e = target->type->deassert_reset(target);
5422 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5423 }
5424
5425 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5426 {
5427 if (argc != 1) {
5428 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5429 return JIM_ERR;
5430 }
5431 struct command_context *cmd_ctx = current_command_context(interp);
5432 assert(cmd_ctx);
5433 struct target *target = get_current_target(cmd_ctx);
5434 if (!target->tap->enabled)
5435 return jim_target_tap_disabled(interp);
5436 int e = target->type->halt(target);
5437 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5438 }
5439
5440 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5441 {
5442 struct jim_getopt_info goi;
5443 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5444
5445 /* params: <name> statename timeoutmsecs */
5446 if (goi.argc != 2) {
5447 const char *cmd_name = Jim_GetString(argv[0], NULL);
5448 Jim_SetResultFormatted(goi.interp,
5449 "%s <state_name> <timeout_in_msec>", cmd_name);
5450 return JIM_ERR;
5451 }
5452
5453 struct jim_nvp *n;
5454 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5455 if (e != JIM_OK) {
5456 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5457 return e;
5458 }
5459 jim_wide a;
5460 e = jim_getopt_wide(&goi, &a);
5461 if (e != JIM_OK)
5462 return e;
5463 struct command_context *cmd_ctx = current_command_context(interp);
5464 assert(cmd_ctx);
5465 struct target *target = get_current_target(cmd_ctx);
5466 if (!target->tap->enabled)
5467 return jim_target_tap_disabled(interp);
5468
5469 e = target_wait_state(target, n->value, a);
5470 if (e != ERROR_OK) {
5471 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5472 Jim_SetResultFormatted(goi.interp,
5473 "target: %s wait %s fails (%#s) %s",
5474 target_name(target), n->name,
5475 obj, target_strerror_safe(e));
5476 return JIM_ERR;
5477 }
5478 return JIM_OK;
5479 }
5480 /* List for human, Events defined for this target.
5481 * scripts/programs should use 'name cget -event NAME'
5482 */
5483 COMMAND_HANDLER(handle_target_event_list)
5484 {
5485 struct target *target = get_current_target(CMD_CTX);
5486 struct target_event_action *teap = target->event_action;
5487
5488 command_print(CMD, "Event actions for target (%d) %s\n",
5489 target->target_number,
5490 target_name(target));
5491 command_print(CMD, "%-25s | Body", "Event");
5492 command_print(CMD, "------------------------- | "
5493 "----------------------------------------");
5494 while (teap) {
5495 struct jim_nvp *opt = jim_nvp_value2name_simple(nvp_target_event, teap->event);
5496 command_print(CMD, "%-25s | %s",
5497 opt->name, Jim_GetString(teap->body, NULL));
5498 teap = teap->next;
5499 }
5500 command_print(CMD, "***END***");
5501 return ERROR_OK;
5502 }
5503 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5504 {
5505 if (argc != 1) {
5506 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5507 return JIM_ERR;
5508 }
5509 struct command_context *cmd_ctx = current_command_context(interp);
5510 assert(cmd_ctx);
5511 struct target *target = get_current_target(cmd_ctx);
5512 Jim_SetResultString(interp, target_state_name(target), -1);
5513 return JIM_OK;
5514 }
5515 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5516 {
5517 struct jim_getopt_info goi;
5518 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5519 if (goi.argc != 1) {
5520 const char *cmd_name = Jim_GetString(argv[0], NULL);
5521 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5522 return JIM_ERR;
5523 }
5524 struct jim_nvp *n;
5525 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5526 if (e != JIM_OK) {
5527 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5528 return e;
5529 }
5530 struct command_context *cmd_ctx = current_command_context(interp);
5531 assert(cmd_ctx);
5532 struct target *target = get_current_target(cmd_ctx);
5533 target_handle_event(target, n->value);
5534 return JIM_OK;
5535 }
5536
5537 static const struct command_registration target_instance_command_handlers[] = {
5538 {
5539 .name = "configure",
5540 .mode = COMMAND_ANY,
5541 .jim_handler = jim_target_configure,
5542 .help = "configure a new target for use",
5543 .usage = "[target_attribute ...]",
5544 },
5545 {
5546 .name = "cget",
5547 .mode = COMMAND_ANY,
5548 .jim_handler = jim_target_configure,
5549 .help = "returns the specified target attribute",
5550 .usage = "target_attribute",
5551 },
5552 {
5553 .name = "mwd",
5554 .handler = handle_mw_command,
5555 .mode = COMMAND_EXEC,
5556 .help = "Write 64-bit word(s) to target memory",
5557 .usage = "address data [count]",
5558 },
5559 {
5560 .name = "mww",
5561 .handler = handle_mw_command,
5562 .mode = COMMAND_EXEC,
5563 .help = "Write 32-bit word(s) to target memory",
5564 .usage = "address data [count]",
5565 },
5566 {
5567 .name = "mwh",
5568 .handler = handle_mw_command,
5569 .mode = COMMAND_EXEC,
5570 .help = "Write 16-bit half-word(s) to target memory",
5571 .usage = "address data [count]",
5572 },
5573 {
5574 .name = "mwb",
5575 .handler = handle_mw_command,
5576 .mode = COMMAND_EXEC,
5577 .help = "Write byte(s) to target memory",
5578 .usage = "address data [count]",
5579 },
5580 {
5581 .name = "mdd",
5582 .handler = handle_md_command,
5583 .mode = COMMAND_EXEC,
5584 .help = "Display target memory as 64-bit words",
5585 .usage = "address [count]",
5586 },
5587 {
5588 .name = "mdw",
5589 .handler = handle_md_command,
5590 .mode = COMMAND_EXEC,
5591 .help = "Display target memory as 32-bit words",
5592 .usage = "address [count]",
5593 },
5594 {
5595 .name = "mdh",
5596 .handler = handle_md_command,
5597 .mode = COMMAND_EXEC,
5598 .help = "Display target memory as 16-bit half-words",
5599 .usage = "address [count]",
5600 },
5601 {
5602 .name = "mdb",
5603 .handler = handle_md_command,
5604 .mode = COMMAND_EXEC,
5605 .help = "Display target memory as 8-bit bytes",
5606 .usage = "address [count]",
5607 },
5608 {
5609 .name = "array2mem",
5610 .mode = COMMAND_EXEC,
5611 .jim_handler = jim_target_array2mem,
5612 .help = "Writes Tcl array of 8/16/32 bit numbers "
5613 "to target memory",
5614 .usage = "arrayname bitwidth address count",
5615 },
5616 {
5617 .name = "mem2array",
5618 .mode = COMMAND_EXEC,
5619 .jim_handler = jim_target_mem2array,
5620 .help = "Loads Tcl array of 8/16/32 bit numbers "
5621 "from target memory",
5622 .usage = "arrayname bitwidth address count",
5623 },
5624 {
5625 .name = "eventlist",
5626 .handler = handle_target_event_list,
5627 .mode = COMMAND_EXEC,
5628 .help = "displays a table of events defined for this target",
5629 .usage = "",
5630 },
5631 {
5632 .name = "curstate",
5633 .mode = COMMAND_EXEC,
5634 .jim_handler = jim_target_current_state,
5635 .help = "displays the current state of this target",
5636 },
5637 {
5638 .name = "arp_examine",
5639 .mode = COMMAND_EXEC,
5640 .jim_handler = jim_target_examine,
5641 .help = "used internally for reset processing",
5642 .usage = "['allow-defer']",
5643 },
5644 {
5645 .name = "was_examined",
5646 .mode = COMMAND_EXEC,
5647 .jim_handler = jim_target_was_examined,
5648 .help = "used internally for reset processing",
5649 },
5650 {
5651 .name = "examine_deferred",
5652 .mode = COMMAND_EXEC,
5653 .jim_handler = jim_target_examine_deferred,
5654 .help = "used internally for reset processing",
5655 },
5656 {
5657 .name = "arp_halt_gdb",
5658 .mode = COMMAND_EXEC,
5659 .jim_handler = jim_target_halt_gdb,
5660 .help = "used internally for reset processing to halt GDB",
5661 },
5662 {
5663 .name = "arp_poll",
5664 .mode = COMMAND_EXEC,
5665 .jim_handler = jim_target_poll,
5666 .help = "used internally for reset processing",
5667 },
5668 {
5669 .name = "arp_reset",
5670 .mode = COMMAND_EXEC,
5671 .jim_handler = jim_target_reset,
5672 .help = "used internally for reset processing",
5673 },
5674 {
5675 .name = "arp_halt",
5676 .mode = COMMAND_EXEC,
5677 .jim_handler = jim_target_halt,
5678 .help = "used internally for reset processing",
5679 },
5680 {
5681 .name = "arp_waitstate",
5682 .mode = COMMAND_EXEC,
5683 .jim_handler = jim_target_wait_state,
5684 .help = "used internally for reset processing",
5685 },
5686 {
5687 .name = "invoke-event",
5688 .mode = COMMAND_EXEC,
5689 .jim_handler = jim_target_invoke_event,
5690 .help = "invoke handler for specified event",
5691 .usage = "event_name",
5692 },
5693 COMMAND_REGISTRATION_DONE
5694 };
5695
5696 static int target_create(struct jim_getopt_info *goi)
5697 {
5698 Jim_Obj *new_cmd;
5699 Jim_Cmd *cmd;
5700 const char *cp;
5701 int e;
5702 int x;
5703 struct target *target;
5704 struct command_context *cmd_ctx;
5705
5706 cmd_ctx = current_command_context(goi->interp);
5707 assert(cmd_ctx);
5708
5709 if (goi->argc < 3) {
5710 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5711 return JIM_ERR;
5712 }
5713
5714 /* COMMAND */
5715 jim_getopt_obj(goi, &new_cmd);
5716 /* does this command exist? */
5717 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5718 if (cmd) {
5719 cp = Jim_GetString(new_cmd, NULL);
5720 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5721 return JIM_ERR;
5722 }
5723
5724 /* TYPE */
5725 e = jim_getopt_string(goi, &cp, NULL);
5726 if (e != JIM_OK)
5727 return e;
5728 struct transport *tr = get_current_transport();
5729 if (tr->override_target) {
5730 e = tr->override_target(&cp);
5731 if (e != ERROR_OK) {
5732 LOG_ERROR("The selected transport doesn't support this target");
5733 return JIM_ERR;
5734 }
5735 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5736 }
5737 /* now does target type exist */
5738 for (x = 0 ; target_types[x] ; x++) {
5739 if (strcmp(cp, target_types[x]->name) == 0) {
5740 /* found */
5741 break;
5742 }
5743 }
5744 if (!target_types[x]) {
5745 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5746 for (x = 0 ; target_types[x] ; x++) {
5747 if (target_types[x + 1]) {
5748 Jim_AppendStrings(goi->interp,
5749 Jim_GetResult(goi->interp),
5750 target_types[x]->name,
5751 ", ", NULL);
5752 } else {
5753 Jim_AppendStrings(goi->interp,
5754 Jim_GetResult(goi->interp),
5755 " or ",
5756 target_types[x]->name, NULL);
5757 }
5758 }
5759 return JIM_ERR;
5760 }
5761
5762 /* Create it */
5763 target = calloc(1, sizeof(struct target));
5764 if (!target) {
5765 LOG_ERROR("Out of memory");
5766 return JIM_ERR;
5767 }
5768
5769 /* set target number */
5770 target->target_number = new_target_number();
5771
5772 /* allocate memory for each unique target type */
5773 target->type = malloc(sizeof(struct target_type));
5774 if (!target->type) {
5775 LOG_ERROR("Out of memory");
5776 free(target);
5777 return JIM_ERR;
5778 }
5779
5780 memcpy(target->type, target_types[x], sizeof(struct target_type));
5781
5782 /* default to first core, override with -coreid */
5783 target->coreid = 0;
5784
5785 target->working_area = 0x0;
5786 target->working_area_size = 0x0;
5787 target->working_areas = NULL;
5788 target->backup_working_area = 0;
5789
5790 target->state = TARGET_UNKNOWN;
5791 target->debug_reason = DBG_REASON_UNDEFINED;
5792 target->reg_cache = NULL;
5793 target->breakpoints = NULL;
5794 target->watchpoints = NULL;
5795 target->next = NULL;
5796 target->arch_info = NULL;
5797
5798 target->verbose_halt_msg = true;
5799
5800 target->halt_issued = false;
5801
5802 /* initialize trace information */
5803 target->trace_info = calloc(1, sizeof(struct trace));
5804 if (!target->trace_info) {
5805 LOG_ERROR("Out of memory");
5806 free(target->type);
5807 free(target);
5808 return JIM_ERR;
5809 }
5810
5811 target->dbgmsg = NULL;
5812 target->dbg_msg_enabled = 0;
5813
5814 target->endianness = TARGET_ENDIAN_UNKNOWN;
5815
5816 target->rtos = NULL;
5817 target->rtos_auto_detect = false;
5818
5819 target->gdb_port_override = NULL;
5820 target->gdb_max_connections = 1;
5821
5822 /* Do the rest as "configure" options */
5823 goi->isconfigure = 1;
5824 e = target_configure(goi, target);
5825
5826 if (e == JIM_OK) {
5827 if (target->has_dap) {
5828 if (!target->dap_configured) {
5829 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5830 e = JIM_ERR;
5831 }
5832 } else {
5833 if (!target->tap_configured) {
5834 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5835 e = JIM_ERR;
5836 }
5837 }
5838 /* tap must be set after target was configured */
5839 if (!target->tap)
5840 e = JIM_ERR;
5841 }
5842
5843 if (e != JIM_OK) {
5844 rtos_destroy(target);
5845 free(target->gdb_port_override);
5846 free(target->trace_info);
5847 free(target->type);
5848 free(target);
5849 return e;
5850 }
5851
5852 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5853 /* default endian to little if not specified */
5854 target->endianness = TARGET_LITTLE_ENDIAN;
5855 }
5856
5857 cp = Jim_GetString(new_cmd, NULL);
5858 target->cmd_name = strdup(cp);
5859 if (!target->cmd_name) {
5860 LOG_ERROR("Out of memory");
5861 rtos_destroy(target);
5862 free(target->gdb_port_override);
5863 free(target->trace_info);
5864 free(target->type);
5865 free(target);
5866 return JIM_ERR;
5867 }
5868
5869 if (target->type->target_create) {
5870 e = (*(target->type->target_create))(target, goi->interp);
5871 if (e != ERROR_OK) {
5872 LOG_DEBUG("target_create failed");
5873 free(target->cmd_name);
5874 rtos_destroy(target);
5875 free(target->gdb_port_override);
5876 free(target->trace_info);
5877 free(target->type);
5878 free(target);
5879 return JIM_ERR;
5880 }
5881 }
5882
5883 /* create the target specific commands */
5884 if (target->type->commands) {
5885 e = register_commands(cmd_ctx, NULL, target->type->commands);
5886 if (e != ERROR_OK)
5887 LOG_ERROR("unable to register '%s' commands", cp);
5888 }
5889
5890 /* now - create the new target name command */
5891 const struct command_registration target_subcommands[] = {
5892 {
5893 .chain = target_instance_command_handlers,
5894 },
5895 {
5896 .chain = target->type->commands,
5897 },
5898 COMMAND_REGISTRATION_DONE
5899 };
5900 const struct command_registration target_commands[] = {
5901 {
5902 .name = cp,
5903 .mode = COMMAND_ANY,
5904 .help = "target command group",
5905 .usage = "",
5906 .chain = target_subcommands,
5907 },
5908 COMMAND_REGISTRATION_DONE
5909 };
5910 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
5911 if (e != ERROR_OK) {
5912 if (target->type->deinit_target)
5913 target->type->deinit_target(target);
5914 free(target->cmd_name);
5915 rtos_destroy(target);
5916 free(target->gdb_port_override);
5917 free(target->trace_info);
5918 free(target->type);
5919 free(target);
5920 return JIM_ERR;
5921 }
5922
5923 /* append to end of list */
5924 append_to_list_all_targets(target);
5925
5926 cmd_ctx->current_target = target;
5927 return JIM_OK;
5928 }
5929
5930 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5931 {
5932 if (argc != 1) {
5933 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5934 return JIM_ERR;
5935 }
5936 struct command_context *cmd_ctx = current_command_context(interp);
5937 assert(cmd_ctx);
5938
5939 struct target *target = get_current_target_or_null(cmd_ctx);
5940 if (target)
5941 Jim_SetResultString(interp, target_name(target), -1);
5942 return JIM_OK;
5943 }
5944
5945 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5946 {
5947 if (argc != 1) {
5948 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5949 return JIM_ERR;
5950 }
5951 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5952 for (unsigned x = 0; target_types[x]; x++) {
5953 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5954 Jim_NewStringObj(interp, target_types[x]->name, -1));
5955 }
5956 return JIM_OK;
5957 }
5958
5959 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5960 {
5961 if (argc != 1) {
5962 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5963 return JIM_ERR;
5964 }
5965 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5966 struct target *target = all_targets;
5967 while (target) {
5968 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5969 Jim_NewStringObj(interp, target_name(target), -1));
5970 target = target->next;
5971 }
5972 return JIM_OK;
5973 }
5974
5975 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5976 {
5977 int i;
5978 const char *targetname;
5979 int retval, len;
5980 struct target *target = (struct target *) NULL;
5981 struct target_list *head, *curr, *new;
5982 curr = (struct target_list *) NULL;
5983 head = (struct target_list *) NULL;
5984
5985 retval = 0;
5986 LOG_DEBUG("%d", argc);
5987 /* argv[1] = target to associate in smp
5988 * argv[2] = target to associate in smp
5989 * argv[3] ...
5990 */
5991
5992 for (i = 1; i < argc; i++) {
5993
5994 targetname = Jim_GetString(argv[i], &len);
5995 target = get_target(targetname);
5996 LOG_DEBUG("%s ", targetname);
5997 if (target) {
5998 new = malloc(sizeof(struct target_list));
5999 new->target = target;
6000 new->next = (struct target_list *)NULL;
6001 if (head == (struct target_list *)NULL) {
6002 head = new;
6003 curr = head;
6004 } else {
6005 curr->next = new;
6006 curr = new;
6007 }
6008 }
6009 }
6010 /* now parse the list of cpu and put the target in smp mode*/
6011 curr = head;
6012
6013 while (curr != (struct target_list *)NULL) {
6014 target = curr->target;
6015 target->smp = 1;
6016 target->head = head;
6017 curr = curr->next;
6018 }
6019
6020 if (target && target->rtos)
6021 retval = rtos_smp_init(head->target);
6022
6023 return retval;
6024 }
6025
6026
6027 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6028 {
6029 struct jim_getopt_info goi;
6030 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6031 if (goi.argc < 3) {
6032 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6033 "<name> <target_type> [<target_options> ...]");
6034 return JIM_ERR;
6035 }
6036 return target_create(&goi);
6037 }
6038
6039 static const struct command_registration target_subcommand_handlers[] = {
6040 {
6041 .name = "init",
6042 .mode = COMMAND_CONFIG,
6043 .handler = handle_target_init_command,
6044 .help = "initialize targets",
6045 .usage = "",
6046 },
6047 {
6048 .name = "create",
6049 .mode = COMMAND_CONFIG,
6050 .jim_handler = jim_target_create,
6051 .usage = "name type '-chain-position' name [options ...]",
6052 .help = "Creates and selects a new target",
6053 },
6054 {
6055 .name = "current",
6056 .mode = COMMAND_ANY,
6057 .jim_handler = jim_target_current,
6058 .help = "Returns the currently selected target",
6059 },
6060 {
6061 .name = "types",
6062 .mode = COMMAND_ANY,
6063 .jim_handler = jim_target_types,
6064 .help = "Returns the available target types as "
6065 "a list of strings",
6066 },
6067 {
6068 .name = "names",
6069 .mode = COMMAND_ANY,
6070 .jim_handler = jim_target_names,
6071 .help = "Returns the names of all targets as a list of strings",
6072 },
6073 {
6074 .name = "smp",
6075 .mode = COMMAND_ANY,
6076 .jim_handler = jim_target_smp,
6077 .usage = "targetname1 targetname2 ...",
6078 .help = "gather several target in a smp list"
6079 },
6080
6081 COMMAND_REGISTRATION_DONE
6082 };
6083
6084 struct fast_load {
6085 target_addr_t address;
6086 uint8_t *data;
6087 int length;
6088
6089 };
6090
6091 static int fastload_num;
6092 static struct fast_load *fastload;
6093
6094 static void free_fastload(void)
6095 {
6096 if (fastload) {
6097 for (int i = 0; i < fastload_num; i++)
6098 free(fastload[i].data);
6099 free(fastload);
6100 fastload = NULL;
6101 }
6102 }
6103
6104 COMMAND_HANDLER(handle_fast_load_image_command)
6105 {
6106 uint8_t *buffer;
6107 size_t buf_cnt;
6108 uint32_t image_size;
6109 target_addr_t min_address = 0;
6110 target_addr_t max_address = -1;
6111
6112 struct image image;
6113
6114 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6115 &image, &min_address, &max_address);
6116 if (retval != ERROR_OK)
6117 return retval;
6118
6119 struct duration bench;
6120 duration_start(&bench);
6121
6122 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6123 if (retval != ERROR_OK)
6124 return retval;
6125
6126 image_size = 0x0;
6127 retval = ERROR_OK;
6128 fastload_num = image.num_sections;
6129 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6130 if (!fastload) {
6131 command_print(CMD, "out of memory");
6132 image_close(&image);
6133 return ERROR_FAIL;
6134 }
6135 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6136 for (unsigned int i = 0; i < image.num_sections; i++) {
6137 buffer = malloc(image.sections[i].size);
6138 if (!buffer) {
6139 command_print(CMD, "error allocating buffer for section (%d bytes)",
6140 (int)(image.sections[i].size));
6141 retval = ERROR_FAIL;
6142 break;
6143 }
6144
6145 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6146 if (retval != ERROR_OK) {
6147 free(buffer);
6148 break;
6149 }
6150
6151 uint32_t offset = 0;
6152 uint32_t length = buf_cnt;
6153
6154 /* DANGER!!! beware of unsigned comparison here!!! */
6155
6156 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6157 (image.sections[i].base_address < max_address)) {
6158 if (image.sections[i].base_address < min_address) {
6159 /* clip addresses below */
6160 offset += min_address-image.sections[i].base_address;
6161 length -= offset;
6162 }
6163
6164 if (image.sections[i].base_address + buf_cnt > max_address)
6165 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6166
6167 fastload[i].address = image.sections[i].base_address + offset;
6168 fastload[i].data = malloc(length);
6169 if (!fastload[i].data) {
6170 free(buffer);
6171 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6172 length);
6173 retval = ERROR_FAIL;
6174 break;
6175 }
6176 memcpy(fastload[i].data, buffer + offset, length);
6177 fastload[i].length = length;
6178
6179 image_size += length;
6180 command_print(CMD, "%u bytes written at address 0x%8.8x",
6181 (unsigned int)length,
6182 ((unsigned int)(image.sections[i].base_address + offset)));
6183 }
6184
6185 free(buffer);
6186 }
6187
6188 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6189 command_print(CMD, "Loaded %" PRIu32 " bytes "
6190 "in %fs (%0.3f KiB/s)", image_size,
6191 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6192
6193 command_print(CMD,
6194 "WARNING: image has not been loaded to target!"
6195 "You can issue a 'fast_load' to finish loading.");
6196 }
6197
6198 image_close(&image);
6199
6200 if (retval != ERROR_OK)
6201 free_fastload();
6202
6203 return retval;
6204 }
6205
6206 COMMAND_HANDLER(handle_fast_load_command)
6207 {
6208 if (CMD_ARGC > 0)
6209 return ERROR_COMMAND_SYNTAX_ERROR;
6210 if (!fastload) {
6211 LOG_ERROR("No image in memory");
6212 return ERROR_FAIL;
6213 }
6214 int i;
6215 int64_t ms = timeval_ms();
6216 int size = 0;
6217 int retval = ERROR_OK;
6218 for (i = 0; i < fastload_num; i++) {
6219 struct target *target = get_current_target(CMD_CTX);
6220 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6221 (unsigned int)(fastload[i].address),
6222 (unsigned int)(fastload[i].length));
6223 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6224 if (retval != ERROR_OK)
6225 break;
6226 size += fastload[i].length;
6227 }
6228 if (retval == ERROR_OK) {
6229 int64_t after = timeval_ms();
6230 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6231 }
6232 return retval;
6233 }
6234
6235 static const struct command_registration target_command_handlers[] = {
6236 {
6237 .name = "targets",
6238 .handler = handle_targets_command,
6239 .mode = COMMAND_ANY,
6240 .help = "change current default target (one parameter) "
6241 "or prints table of all targets (no parameters)",
6242 .usage = "[target]",
6243 },
6244 {
6245 .name = "target",
6246 .mode = COMMAND_CONFIG,
6247 .help = "configure target",
6248 .chain = target_subcommand_handlers,
6249 .usage = "",
6250 },
6251 COMMAND_REGISTRATION_DONE
6252 };
6253
6254 int target_register_commands(struct command_context *cmd_ctx)
6255 {
6256 return register_commands(cmd_ctx, NULL, target_command_handlers);
6257 }
6258
6259 static bool target_reset_nag = true;
6260
6261 bool get_target_reset_nag(void)
6262 {
6263 return target_reset_nag;
6264 }
6265
6266 COMMAND_HANDLER(handle_target_reset_nag)
6267 {
6268 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6269 &target_reset_nag, "Nag after each reset about options to improve "
6270 "performance");
6271 }
6272
6273 COMMAND_HANDLER(handle_ps_command)
6274 {
6275 struct target *target = get_current_target(CMD_CTX);
6276 char *display;
6277 if (target->state != TARGET_HALTED) {
6278 LOG_INFO("target not halted !!");
6279 return ERROR_OK;
6280 }
6281
6282 if ((target->rtos) && (target->rtos->type)
6283 && (target->rtos->type->ps_command)) {
6284 display = target->rtos->type->ps_command(target);
6285 command_print(CMD, "%s", display);
6286 free(display);
6287 return ERROR_OK;
6288 } else {
6289 LOG_INFO("failed");
6290 return ERROR_TARGET_FAILURE;
6291 }
6292 }
6293
6294 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6295 {
6296 if (text)
6297 command_print_sameline(cmd, "%s", text);
6298 for (int i = 0; i < size; i++)
6299 command_print_sameline(cmd, " %02x", buf[i]);
6300 command_print(cmd, " ");
6301 }
6302
6303 COMMAND_HANDLER(handle_test_mem_access_command)
6304 {
6305 struct target *target = get_current_target(CMD_CTX);
6306 uint32_t test_size;
6307 int retval = ERROR_OK;
6308
6309 if (target->state != TARGET_HALTED) {
6310 LOG_INFO("target not halted !!");
6311 return ERROR_FAIL;
6312 }
6313
6314 if (CMD_ARGC != 1)
6315 return ERROR_COMMAND_SYNTAX_ERROR;
6316
6317 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6318
6319 /* Test reads */
6320 size_t num_bytes = test_size + 4;
6321
6322 struct working_area *wa = NULL;
6323 retval = target_alloc_working_area(target, num_bytes, &wa);
6324 if (retval != ERROR_OK) {
6325 LOG_ERROR("Not enough working area");
6326 return ERROR_FAIL;
6327 }
6328
6329 uint8_t *test_pattern = malloc(num_bytes);
6330
6331 for (size_t i = 0; i < num_bytes; i++)
6332 test_pattern[i] = rand();
6333
6334 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6335 if (retval != ERROR_OK) {
6336 LOG_ERROR("Test pattern write failed");
6337 goto out;
6338 }
6339
6340 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6341 for (int size = 1; size <= 4; size *= 2) {
6342 for (int offset = 0; offset < 4; offset++) {
6343 uint32_t count = test_size / size;
6344 size_t host_bufsiz = (count + 2) * size + host_offset;
6345 uint8_t *read_ref = malloc(host_bufsiz);
6346 uint8_t *read_buf = malloc(host_bufsiz);
6347
6348 for (size_t i = 0; i < host_bufsiz; i++) {
6349 read_ref[i] = rand();
6350 read_buf[i] = read_ref[i];
6351 }
6352 command_print_sameline(CMD,
6353 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6354 size, offset, host_offset ? "un" : "");
6355
6356 struct duration bench;
6357 duration_start(&bench);
6358
6359 retval = target_read_memory(target, wa->address + offset, size, count,
6360 read_buf + size + host_offset);
6361
6362 duration_measure(&bench);
6363
6364 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6365 command_print(CMD, "Unsupported alignment");
6366 goto next;
6367 } else if (retval != ERROR_OK) {
6368 command_print(CMD, "Memory read failed");
6369 goto next;
6370 }
6371
6372 /* replay on host */
6373 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6374
6375 /* check result */
6376 int result = memcmp(read_ref, read_buf, host_bufsiz);
6377 if (result == 0) {
6378 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6379 duration_elapsed(&bench),
6380 duration_kbps(&bench, count * size));
6381 } else {
6382 command_print(CMD, "Compare failed");
6383 binprint(CMD, "ref:", read_ref, host_bufsiz);
6384 binprint(CMD, "buf:", read_buf, host_bufsiz);
6385 }
6386 next:
6387 free(read_ref);
6388 free(read_buf);
6389 }
6390 }
6391 }
6392
6393 out:
6394 free(test_pattern);
6395
6396 if (wa)
6397 target_free_working_area(target, wa);
6398
6399 /* Test writes */
6400 num_bytes = test_size + 4 + 4 + 4;
6401
6402 retval = target_alloc_working_area(target, num_bytes, &wa);
6403 if (retval != ERROR_OK) {
6404 LOG_ERROR("Not enough working area");
6405 return ERROR_FAIL;
6406 }
6407
6408 test_pattern = malloc(num_bytes);
6409
6410 for (size_t i = 0; i < num_bytes; i++)
6411 test_pattern[i] = rand();
6412
6413 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6414 for (int size = 1; size <= 4; size *= 2) {
6415 for (int offset = 0; offset < 4; offset++) {
6416 uint32_t count = test_size / size;
6417 size_t host_bufsiz = count * size + host_offset;
6418 uint8_t *read_ref = malloc(num_bytes);
6419 uint8_t *read_buf = malloc(num_bytes);
6420 uint8_t *write_buf = malloc(host_bufsiz);
6421
6422 for (size_t i = 0; i < host_bufsiz; i++)
6423 write_buf[i] = rand();
6424 command_print_sameline(CMD,
6425 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6426 size, offset, host_offset ? "un" : "");
6427
6428 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6429 if (retval != ERROR_OK) {
6430 command_print(CMD, "Test pattern write failed");
6431 goto nextw;
6432 }
6433
6434 /* replay on host */
6435 memcpy(read_ref, test_pattern, num_bytes);
6436 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6437
6438 struct duration bench;
6439 duration_start(&bench);
6440
6441 retval = target_write_memory(target, wa->address + size + offset, size, count,
6442 write_buf + host_offset);
6443
6444 duration_measure(&bench);
6445
6446 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6447 command_print(CMD, "Unsupported alignment");
6448 goto nextw;
6449 } else if (retval != ERROR_OK) {
6450 command_print(CMD, "Memory write failed");
6451 goto nextw;
6452 }
6453
6454 /* read back */
6455 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6456 if (retval != ERROR_OK) {
6457 command_print(CMD, "Test pattern write failed");
6458 goto nextw;
6459 }
6460
6461 /* check result */
6462 int result = memcmp(read_ref, read_buf, num_bytes);
6463 if (result == 0) {
6464 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6465 duration_elapsed(&bench),
6466 duration_kbps(&bench, count * size));
6467 } else {
6468 command_print(CMD, "Compare failed");
6469 binprint(CMD, "ref:", read_ref, num_bytes);
6470 binprint(CMD, "buf:", read_buf, num_bytes);
6471 }
6472 nextw:
6473 free(read_ref);
6474 free(read_buf);
6475 }
6476 }
6477 }
6478
6479 free(test_pattern);
6480
6481 if (wa)
6482 target_free_working_area(target, wa);
6483 return retval;
6484 }
6485
6486 static const struct command_registration target_exec_command_handlers[] = {
6487 {
6488 .name = "fast_load_image",
6489 .handler = handle_fast_load_image_command,
6490 .mode = COMMAND_ANY,
6491 .help = "Load image into server memory for later use by "
6492 "fast_load; primarily for profiling",
6493 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6494 "[min_address [max_length]]",
6495 },
6496 {
6497 .name = "fast_load",
6498 .handler = handle_fast_load_command,
6499 .mode = COMMAND_EXEC,
6500 .help = "loads active fast load image to current target "
6501 "- mainly for profiling purposes",
6502 .usage = "",
6503 },
6504 {
6505 .name = "profile",
6506 .handler = handle_profile_command,
6507 .mode = COMMAND_EXEC,
6508 .usage = "seconds filename [start end]",
6509 .help = "profiling samples the CPU PC",
6510 },
6511 /** @todo don't register virt2phys() unless target supports it */
6512 {
6513 .name = "virt2phys",
6514 .handler = handle_virt2phys_command,
6515 .mode = COMMAND_ANY,
6516 .help = "translate a virtual address into a physical address",
6517 .usage = "virtual_address",
6518 },
6519 {
6520 .name = "reg",
6521 .handler = handle_reg_command,
6522 .mode = COMMAND_EXEC,
6523 .help = "display (reread from target with \"force\") or set a register; "
6524 "with no arguments, displays all registers and their values",
6525 .usage = "[(register_number|register_name) [(value|'force')]]",
6526 },
6527 {
6528 .name = "poll",
6529 .handler = handle_poll_command,
6530 .mode = COMMAND_EXEC,
6531 .help = "poll target state; or reconfigure background polling",
6532 .usage = "['on'|'off']",
6533 },
6534 {
6535 .name = "wait_halt",
6536 .handler = handle_wait_halt_command,
6537 .mode = COMMAND_EXEC,
6538 .help = "wait up to the specified number of milliseconds "
6539 "(default 5000) for a previously requested halt",
6540 .usage = "[milliseconds]",
6541 },
6542 {
6543 .name = "halt",
6544 .handler = handle_halt_command,
6545 .mode = COMMAND_EXEC,
6546 .help = "request target to halt, then wait up to the specified "
6547 "number of milliseconds (default 5000) for it to complete",
6548 .usage = "[milliseconds]",
6549 },
6550 {
6551 .name = "resume",
6552 .handler = handle_resume_command,
6553 .mode = COMMAND_EXEC,
6554 .help = "resume target execution from current PC or address",
6555 .usage = "[address]",
6556 },
6557 {
6558 .name = "reset",
6559 .handler = handle_reset_command,
6560 .mode = COMMAND_EXEC,
6561 .usage = "[run|halt|init]",
6562 .help = "Reset all targets into the specified mode. "
6563 "Default reset mode is run, if not given.",
6564 },
6565 {
6566 .name = "soft_reset_halt",
6567 .handler = handle_soft_reset_halt_command,
6568 .mode = COMMAND_EXEC,
6569 .usage = "",
6570 .help = "halt the target and do a soft reset",
6571 },
6572 {
6573 .name = "step",
6574 .handler = handle_step_command,
6575 .mode = COMMAND_EXEC,
6576 .help = "step one instruction from current PC or address",
6577 .usage = "[address]",
6578 },
6579 {
6580 .name = "mdd",
6581 .handler = handle_md_command,
6582 .mode = COMMAND_EXEC,
6583 .help = "display memory double-words",
6584 .usage = "['phys'] address [count]",
6585 },
6586 {
6587 .name = "mdw",
6588 .handler = handle_md_command,
6589 .mode = COMMAND_EXEC,
6590 .help = "display memory words",
6591 .usage = "['phys'] address [count]",
6592 },
6593 {
6594 .name = "mdh",
6595 .handler = handle_md_command,
6596 .mode = COMMAND_EXEC,
6597 .help = "display memory half-words",
6598 .usage = "['phys'] address [count]",
6599 },
6600 {
6601 .name = "mdb",
6602 .handler = handle_md_command,
6603 .mode = COMMAND_EXEC,
6604 .help = "display memory bytes",
6605 .usage = "['phys'] address [count]",
6606 },
6607 {
6608 .name = "mwd",
6609 .handler = handle_mw_command,
6610 .mode = COMMAND_EXEC,
6611 .help = "write memory double-word",
6612 .usage = "['phys'] address value [count]",
6613 },
6614 {
6615 .name = "mww",
6616 .handler = handle_mw_command,
6617 .mode = COMMAND_EXEC,
6618 .help = "write memory word",
6619 .usage = "['phys'] address value [count]",
6620 },
6621 {
6622 .name = "mwh",
6623 .handler = handle_mw_command,
6624 .mode = COMMAND_EXEC,
6625 .help = "write memory half-word",
6626 .usage = "['phys'] address value [count]",
6627 },
6628 {
6629 .name = "mwb",
6630 .handler = handle_mw_command,
6631 .mode = COMMAND_EXEC,
6632 .help = "write memory byte",
6633 .usage = "['phys'] address value [count]",
6634 },
6635 {
6636 .name = "bp",
6637 .handler = handle_bp_command,
6638 .mode = COMMAND_EXEC,
6639 .help = "list or set hardware or software breakpoint",
6640 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6641 },
6642 {
6643 .name = "rbp",
6644 .handler = handle_rbp_command,
6645 .mode = COMMAND_EXEC,
6646 .help = "remove breakpoint",
6647 .usage = "'all' | address",
6648 },
6649 {
6650 .name = "wp",
6651 .handler = handle_wp_command,
6652 .mode = COMMAND_EXEC,
6653 .help = "list (no params) or create watchpoints",
6654 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6655 },
6656 {
6657 .name = "rwp",
6658 .handler = handle_rwp_command,
6659 .mode = COMMAND_EXEC,
6660 .help = "remove watchpoint",
6661 .usage = "address",
6662 },
6663 {
6664 .name = "load_image",
6665 .handler = handle_load_image_command,
6666 .mode = COMMAND_EXEC,
6667 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6668 "[min_address] [max_length]",
6669 },
6670 {
6671 .name = "dump_image",
6672 .handler = handle_dump_image_command,
6673 .mode = COMMAND_EXEC,
6674 .usage = "filename address size",
6675 },
6676 {
6677 .name = "verify_image_checksum",
6678 .handler = handle_verify_image_checksum_command,
6679 .mode = COMMAND_EXEC,
6680 .usage = "filename [offset [type]]",
6681 },
6682 {
6683 .name = "verify_image",
6684 .handler = handle_verify_image_command,
6685 .mode = COMMAND_EXEC,
6686 .usage = "filename [offset [type]]",
6687 },
6688 {
6689 .name = "test_image",
6690 .handler = handle_test_image_command,
6691 .mode = COMMAND_EXEC,
6692 .usage = "filename [offset [type]]",
6693 },
6694 {
6695 .name = "mem2array",
6696 .mode = COMMAND_EXEC,
6697 .jim_handler = jim_mem2array,
6698 .help = "read 8/16/32 bit memory and return as a TCL array "
6699 "for script processing",
6700 .usage = "arrayname bitwidth address count",
6701 },
6702 {
6703 .name = "array2mem",
6704 .mode = COMMAND_EXEC,
6705 .jim_handler = jim_array2mem,
6706 .help = "convert a TCL array to memory locations "
6707 "and write the 8/16/32 bit values",
6708 .usage = "arrayname bitwidth address count",
6709 },
6710 {
6711 .name = "reset_nag",
6712 .handler = handle_target_reset_nag,
6713 .mode = COMMAND_ANY,
6714 .help = "Nag after each reset about options that could have been "
6715 "enabled to improve performance.",
6716 .usage = "['enable'|'disable']",
6717 },
6718 {
6719 .name = "ps",
6720 .handler = handle_ps_command,
6721 .mode = COMMAND_EXEC,
6722 .help = "list all tasks",
6723 .usage = "",
6724 },
6725 {
6726 .name = "test_mem_access",
6727 .handler = handle_test_mem_access_command,
6728 .mode = COMMAND_EXEC,
6729 .help = "Test the target's memory access functions",
6730 .usage = "size",
6731 },
6732
6733 COMMAND_REGISTRATION_DONE
6734 };
6735 static int target_register_user_commands(struct command_context *cmd_ctx)
6736 {
6737 int retval = ERROR_OK;
6738 retval = target_request_register_commands(cmd_ctx);
6739 if (retval != ERROR_OK)
6740 return retval;
6741
6742 retval = trace_register_commands(cmd_ctx);
6743 if (retval != ERROR_OK)
6744 return retval;
6745
6746
6747 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6748 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)