Add target_data_bits().
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75
76 /* targets */
77 extern struct target_type arm7tdmi_target;
78 extern struct target_type arm720t_target;
79 extern struct target_type arm9tdmi_target;
80 extern struct target_type arm920t_target;
81 extern struct target_type arm966e_target;
82 extern struct target_type arm946e_target;
83 extern struct target_type arm926ejs_target;
84 extern struct target_type fa526_target;
85 extern struct target_type feroceon_target;
86 extern struct target_type dragonite_target;
87 extern struct target_type xscale_target;
88 extern struct target_type cortexm_target;
89 extern struct target_type cortexa_target;
90 extern struct target_type aarch64_target;
91 extern struct target_type cortexr4_target;
92 extern struct target_type arm11_target;
93 extern struct target_type ls1_sap_target;
94 extern struct target_type mips_m4k_target;
95 extern struct target_type mips_mips64_target;
96 extern struct target_type avr_target;
97 extern struct target_type dsp563xx_target;
98 extern struct target_type dsp5680xx_target;
99 extern struct target_type testee_target;
100 extern struct target_type avr32_ap7k_target;
101 extern struct target_type hla_target;
102 extern struct target_type nds32_v2_target;
103 extern struct target_type nds32_v3_target;
104 extern struct target_type nds32_v3m_target;
105 extern struct target_type or1k_target;
106 extern struct target_type quark_x10xx_target;
107 extern struct target_type quark_d20xx_target;
108 extern struct target_type stm8_target;
109 extern struct target_type riscv_target;
110 extern struct target_type mem_ap_target;
111 extern struct target_type esirisc_target;
112 extern struct target_type arcv2_target;
113
114 static struct target_type *target_types[] = {
115 &arm7tdmi_target,
116 &arm9tdmi_target,
117 &arm920t_target,
118 &arm720t_target,
119 &arm966e_target,
120 &arm946e_target,
121 &arm926ejs_target,
122 &fa526_target,
123 &feroceon_target,
124 &dragonite_target,
125 &xscale_target,
126 &cortexm_target,
127 &cortexa_target,
128 &cortexr4_target,
129 &arm11_target,
130 &ls1_sap_target,
131 &mips_m4k_target,
132 &avr_target,
133 &dsp563xx_target,
134 &dsp5680xx_target,
135 &testee_target,
136 &avr32_ap7k_target,
137 &hla_target,
138 &nds32_v2_target,
139 &nds32_v3_target,
140 &nds32_v3m_target,
141 &or1k_target,
142 &quark_x10xx_target,
143 &quark_d20xx_target,
144 &stm8_target,
145 &riscv_target,
146 &mem_ap_target,
147 &esirisc_target,
148 &arcv2_target,
149 &aarch64_target,
150 &mips_mips64_target,
151 NULL,
152 };
153
154 struct target *all_targets;
155 static struct target_event_callback *target_event_callbacks;
156 static struct target_timer_callback *target_timer_callbacks;
157 static LIST_HEAD(target_reset_callback_list);
158 static LIST_HEAD(target_trace_callback_list);
159 static const int polling_interval = 100;
160
161 static const struct jim_nvp nvp_assert[] = {
162 { .name = "assert", NVP_ASSERT },
163 { .name = "deassert", NVP_DEASSERT },
164 { .name = "T", NVP_ASSERT },
165 { .name = "F", NVP_DEASSERT },
166 { .name = "t", NVP_ASSERT },
167 { .name = "f", NVP_DEASSERT },
168 { .name = NULL, .value = -1 }
169 };
170
171 static const struct jim_nvp nvp_error_target[] = {
172 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
173 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
174 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
175 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
176 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
177 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
178 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
179 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
180 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
181 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
182 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
183 { .value = -1, .name = NULL }
184 };
185
186 static const char *target_strerror_safe(int err)
187 {
188 const struct jim_nvp *n;
189
190 n = jim_nvp_value2name_simple(nvp_error_target, err);
191 if (n->name == NULL)
192 return "unknown";
193 else
194 return n->name;
195 }
196
197 static const struct jim_nvp nvp_target_event[] = {
198
199 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
200 { .value = TARGET_EVENT_HALTED, .name = "halted" },
201 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
202 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
203 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
204 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
205 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
206
207 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
208 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
209
210 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
211 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
212 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
213 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
214 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
215 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
216 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
217 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
218
219 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
220 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
221 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
222
223 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
224 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
225
226 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
227 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
228
229 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
230 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
231
232 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
233 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
234
235 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
236
237 { .name = NULL, .value = -1 }
238 };
239
240 static const struct jim_nvp nvp_target_state[] = {
241 { .name = "unknown", .value = TARGET_UNKNOWN },
242 { .name = "running", .value = TARGET_RUNNING },
243 { .name = "halted", .value = TARGET_HALTED },
244 { .name = "reset", .value = TARGET_RESET },
245 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
246 { .name = NULL, .value = -1 },
247 };
248
249 static const struct jim_nvp nvp_target_debug_reason[] = {
250 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
251 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
252 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
253 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
254 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
255 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
256 { .name = "program-exit", .value = DBG_REASON_EXIT },
257 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
258 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const struct jim_nvp nvp_target_endian[] = {
263 { .name = "big", .value = TARGET_BIG_ENDIAN },
264 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
265 { .name = "be", .value = TARGET_BIG_ENDIAN },
266 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
267 { .name = NULL, .value = -1 },
268 };
269
270 static const struct jim_nvp nvp_reset_modes[] = {
271 { .name = "unknown", .value = RESET_UNKNOWN },
272 { .name = "run", .value = RESET_RUN },
273 { .name = "halt", .value = RESET_HALT },
274 { .name = "init", .value = RESET_INIT },
275 { .name = NULL, .value = -1 },
276 };
277
278 const char *debug_reason_name(struct target *t)
279 {
280 const char *cp;
281
282 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
283 t->debug_reason)->name;
284 if (!cp) {
285 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
286 cp = "(*BUG*unknown*BUG*)";
287 }
288 return cp;
289 }
290
291 const char *target_state_name(struct target *t)
292 {
293 const char *cp;
294 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
295 if (!cp) {
296 LOG_ERROR("Invalid target state: %d", (int)(t->state));
297 cp = "(*BUG*unknown*BUG*)";
298 }
299
300 if (!target_was_examined(t) && t->defer_examine)
301 cp = "examine deferred";
302
303 return cp;
304 }
305
306 const char *target_event_name(enum target_event event)
307 {
308 const char *cp;
309 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
310 if (!cp) {
311 LOG_ERROR("Invalid target event: %d", (int)(event));
312 cp = "(*BUG*unknown*BUG*)";
313 }
314 return cp;
315 }
316
317 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
318 {
319 const char *cp;
320 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
321 if (!cp) {
322 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
323 cp = "(*BUG*unknown*BUG*)";
324 }
325 return cp;
326 }
327
328 /* determine the number of the new target */
329 static int new_target_number(void)
330 {
331 struct target *t;
332 int x;
333
334 /* number is 0 based */
335 x = -1;
336 t = all_targets;
337 while (t) {
338 if (x < t->target_number)
339 x = t->target_number;
340 t = t->next;
341 }
342 return x + 1;
343 }
344
345 static void append_to_list_all_targets(struct target *target)
346 {
347 struct target **t = &all_targets;
348
349 while (*t)
350 t = &((*t)->next);
351 *t = target;
352 }
353
354 /* read a uint64_t from a buffer in target memory endianness */
355 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
356 {
357 if (target->endianness == TARGET_LITTLE_ENDIAN)
358 return le_to_h_u64(buffer);
359 else
360 return be_to_h_u64(buffer);
361 }
362
363 /* read a uint32_t from a buffer in target memory endianness */
364 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
365 {
366 if (target->endianness == TARGET_LITTLE_ENDIAN)
367 return le_to_h_u32(buffer);
368 else
369 return be_to_h_u32(buffer);
370 }
371
372 /* read a uint24_t from a buffer in target memory endianness */
373 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 return le_to_h_u24(buffer);
377 else
378 return be_to_h_u24(buffer);
379 }
380
381 /* read a uint16_t from a buffer in target memory endianness */
382 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 return le_to_h_u16(buffer);
386 else
387 return be_to_h_u16(buffer);
388 }
389
390 /* write a uint64_t to a buffer in target memory endianness */
391 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 h_u64_to_le(buffer, value);
395 else
396 h_u64_to_be(buffer, value);
397 }
398
399 /* write a uint32_t to a buffer in target memory endianness */
400 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
401 {
402 if (target->endianness == TARGET_LITTLE_ENDIAN)
403 h_u32_to_le(buffer, value);
404 else
405 h_u32_to_be(buffer, value);
406 }
407
408 /* write a uint24_t to a buffer in target memory endianness */
409 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
410 {
411 if (target->endianness == TARGET_LITTLE_ENDIAN)
412 h_u24_to_le(buffer, value);
413 else
414 h_u24_to_be(buffer, value);
415 }
416
417 /* write a uint16_t to a buffer in target memory endianness */
418 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
419 {
420 if (target->endianness == TARGET_LITTLE_ENDIAN)
421 h_u16_to_le(buffer, value);
422 else
423 h_u16_to_be(buffer, value);
424 }
425
426 /* write a uint8_t to a buffer in target memory endianness */
427 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
428 {
429 *buffer = value;
430 }
431
432 /* write a uint64_t array to a buffer in target memory endianness */
433 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
434 {
435 uint32_t i;
436 for (i = 0; i < count; i++)
437 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
438 }
439
440 /* write a uint32_t array to a buffer in target memory endianness */
441 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
442 {
443 uint32_t i;
444 for (i = 0; i < count; i++)
445 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
446 }
447
448 /* write a uint16_t array to a buffer in target memory endianness */
449 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
450 {
451 uint32_t i;
452 for (i = 0; i < count; i++)
453 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
454 }
455
456 /* write a uint64_t array to a buffer in target memory endianness */
457 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
458 {
459 uint32_t i;
460 for (i = 0; i < count; i++)
461 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
462 }
463
464 /* write a uint32_t array to a buffer in target memory endianness */
465 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
466 {
467 uint32_t i;
468 for (i = 0; i < count; i++)
469 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
470 }
471
472 /* write a uint16_t array to a buffer in target memory endianness */
473 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
474 {
475 uint32_t i;
476 for (i = 0; i < count; i++)
477 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
478 }
479
480 /* return a pointer to a configured target; id is name or number */
481 struct target *get_target(const char *id)
482 {
483 struct target *target;
484
485 /* try as tcltarget name */
486 for (target = all_targets; target; target = target->next) {
487 if (target_name(target) == NULL)
488 continue;
489 if (strcmp(id, target_name(target)) == 0)
490 return target;
491 }
492
493 /* It's OK to remove this fallback sometime after August 2010 or so */
494
495 /* no match, try as number */
496 unsigned num;
497 if (parse_uint(id, &num) != ERROR_OK)
498 return NULL;
499
500 for (target = all_targets; target; target = target->next) {
501 if (target->target_number == (int)num) {
502 LOG_WARNING("use '%s' as target identifier, not '%u'",
503 target_name(target), num);
504 return target;
505 }
506 }
507
508 return NULL;
509 }
510
511 /* returns a pointer to the n-th configured target */
512 struct target *get_target_by_num(int num)
513 {
514 struct target *target = all_targets;
515
516 while (target) {
517 if (target->target_number == num)
518 return target;
519 target = target->next;
520 }
521
522 return NULL;
523 }
524
525 struct target *get_current_target(struct command_context *cmd_ctx)
526 {
527 struct target *target = get_current_target_or_null(cmd_ctx);
528
529 if (target == NULL) {
530 LOG_ERROR("BUG: current_target out of bounds");
531 exit(-1);
532 }
533
534 return target;
535 }
536
537 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
538 {
539 return cmd_ctx->current_target_override
540 ? cmd_ctx->current_target_override
541 : cmd_ctx->current_target;
542 }
543
544 int target_poll(struct target *target)
545 {
546 int retval;
547
548 /* We can't poll until after examine */
549 if (!target_was_examined(target)) {
550 /* Fail silently lest we pollute the log */
551 return ERROR_FAIL;
552 }
553
554 retval = target->type->poll(target);
555 if (retval != ERROR_OK)
556 return retval;
557
558 if (target->halt_issued) {
559 if (target->state == TARGET_HALTED)
560 target->halt_issued = false;
561 else {
562 int64_t t = timeval_ms() - target->halt_issued_time;
563 if (t > DEFAULT_HALT_TIMEOUT) {
564 target->halt_issued = false;
565 LOG_INFO("Halt timed out, wake up GDB.");
566 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
567 }
568 }
569 }
570
571 return ERROR_OK;
572 }
573
574 int target_halt(struct target *target)
575 {
576 int retval;
577 /* We can't poll until after examine */
578 if (!target_was_examined(target)) {
579 LOG_ERROR("Target not examined yet");
580 return ERROR_FAIL;
581 }
582
583 retval = target->type->halt(target);
584 if (retval != ERROR_OK)
585 return retval;
586
587 target->halt_issued = true;
588 target->halt_issued_time = timeval_ms();
589
590 return ERROR_OK;
591 }
592
593 /**
594 * Make the target (re)start executing using its saved execution
595 * context (possibly with some modifications).
596 *
597 * @param target Which target should start executing.
598 * @param current True to use the target's saved program counter instead
599 * of the address parameter
600 * @param address Optionally used as the program counter.
601 * @param handle_breakpoints True iff breakpoints at the resumption PC
602 * should be skipped. (For example, maybe execution was stopped by
603 * such a breakpoint, in which case it would be counterproductive to
604 * let it re-trigger.
605 * @param debug_execution False if all working areas allocated by OpenOCD
606 * should be released and/or restored to their original contents.
607 * (This would for example be true to run some downloaded "helper"
608 * algorithm code, which resides in one such working buffer and uses
609 * another for data storage.)
610 *
611 * @todo Resolve the ambiguity about what the "debug_execution" flag
612 * signifies. For example, Target implementations don't agree on how
613 * it relates to invalidation of the register cache, or to whether
614 * breakpoints and watchpoints should be enabled. (It would seem wrong
615 * to enable breakpoints when running downloaded "helper" algorithms
616 * (debug_execution true), since the breakpoints would be set to match
617 * target firmware being debugged, not the helper algorithm.... and
618 * enabling them could cause such helpers to malfunction (for example,
619 * by overwriting data with a breakpoint instruction. On the other
620 * hand the infrastructure for running such helpers might use this
621 * procedure but rely on hardware breakpoint to detect termination.)
622 */
623 int target_resume(struct target *target, int current, target_addr_t address,
624 int handle_breakpoints, int debug_execution)
625 {
626 int retval;
627
628 /* We can't poll until after examine */
629 if (!target_was_examined(target)) {
630 LOG_ERROR("Target not examined yet");
631 return ERROR_FAIL;
632 }
633
634 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
635
636 /* note that resume *must* be asynchronous. The CPU can halt before
637 * we poll. The CPU can even halt at the current PC as a result of
638 * a software breakpoint being inserted by (a bug?) the application.
639 */
640 /*
641 * resume() triggers the event 'resumed'. The execution of TCL commands
642 * in the event handler causes the polling of targets. If the target has
643 * already halted for a breakpoint, polling will run the 'halted' event
644 * handler before the pending 'resumed' handler.
645 * Disable polling during resume() to guarantee the execution of handlers
646 * in the correct order.
647 */
648 bool save_poll = jtag_poll_get_enabled();
649 jtag_poll_set_enabled(false);
650 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
651 jtag_poll_set_enabled(save_poll);
652 if (retval != ERROR_OK)
653 return retval;
654
655 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
656
657 return retval;
658 }
659
660 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
661 {
662 char buf[100];
663 int retval;
664 struct jim_nvp *n;
665 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
666 if (n->name == NULL) {
667 LOG_ERROR("invalid reset mode");
668 return ERROR_FAIL;
669 }
670
671 struct target *target;
672 for (target = all_targets; target; target = target->next)
673 target_call_reset_callbacks(target, reset_mode);
674
675 /* disable polling during reset to make reset event scripts
676 * more predictable, i.e. dr/irscan & pathmove in events will
677 * not have JTAG operations injected into the middle of a sequence.
678 */
679 bool save_poll = jtag_poll_get_enabled();
680
681 jtag_poll_set_enabled(false);
682
683 sprintf(buf, "ocd_process_reset %s", n->name);
684 retval = Jim_Eval(cmd->ctx->interp, buf);
685
686 jtag_poll_set_enabled(save_poll);
687
688 if (retval != JIM_OK) {
689 Jim_MakeErrorMessage(cmd->ctx->interp);
690 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
691 return ERROR_FAIL;
692 }
693
694 /* We want any events to be processed before the prompt */
695 retval = target_call_timer_callbacks_now();
696
697 for (target = all_targets; target; target = target->next) {
698 target->type->check_reset(target);
699 target->running_alg = false;
700 }
701
702 return retval;
703 }
704
705 static int identity_virt2phys(struct target *target,
706 target_addr_t virtual, target_addr_t *physical)
707 {
708 *physical = virtual;
709 return ERROR_OK;
710 }
711
712 static int no_mmu(struct target *target, int *enabled)
713 {
714 *enabled = 0;
715 return ERROR_OK;
716 }
717
718 static int default_examine(struct target *target)
719 {
720 target_set_examined(target);
721 return ERROR_OK;
722 }
723
724 /* no check by default */
725 static int default_check_reset(struct target *target)
726 {
727 return ERROR_OK;
728 }
729
730 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
731 * Keep in sync */
732 int target_examine_one(struct target *target)
733 {
734 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
735
736 int retval = target->type->examine(target);
737 if (retval != ERROR_OK) {
738 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
739 return retval;
740 }
741
742 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
743
744 return ERROR_OK;
745 }
746
747 static int jtag_enable_callback(enum jtag_event event, void *priv)
748 {
749 struct target *target = priv;
750
751 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
752 return ERROR_OK;
753
754 jtag_unregister_event_callback(jtag_enable_callback, target);
755
756 return target_examine_one(target);
757 }
758
759 /* Targets that correctly implement init + examine, i.e.
760 * no communication with target during init:
761 *
762 * XScale
763 */
764 int target_examine(void)
765 {
766 int retval = ERROR_OK;
767 struct target *target;
768
769 for (target = all_targets; target; target = target->next) {
770 /* defer examination, but don't skip it */
771 if (!target->tap->enabled) {
772 jtag_register_event_callback(jtag_enable_callback,
773 target);
774 continue;
775 }
776
777 if (target->defer_examine)
778 continue;
779
780 int retval2 = target_examine_one(target);
781 if (retval2 != ERROR_OK) {
782 LOG_WARNING("target %s examination failed", target_name(target));
783 retval = retval2;
784 }
785 }
786 return retval;
787 }
788
789 const char *target_type_name(struct target *target)
790 {
791 return target->type->name;
792 }
793
794 static int target_soft_reset_halt(struct target *target)
795 {
796 if (!target_was_examined(target)) {
797 LOG_ERROR("Target not examined yet");
798 return ERROR_FAIL;
799 }
800 if (!target->type->soft_reset_halt) {
801 LOG_ERROR("Target %s does not support soft_reset_halt",
802 target_name(target));
803 return ERROR_FAIL;
804 }
805 return target->type->soft_reset_halt(target);
806 }
807
808 /**
809 * Downloads a target-specific native code algorithm to the target,
810 * and executes it. * Note that some targets may need to set up, enable,
811 * and tear down a breakpoint (hard or * soft) to detect algorithm
812 * termination, while others may support lower overhead schemes where
813 * soft breakpoints embedded in the algorithm automatically terminate the
814 * algorithm.
815 *
816 * @param target used to run the algorithm
817 * @param num_mem_params
818 * @param mem_params
819 * @param num_reg_params
820 * @param reg_param
821 * @param entry_point
822 * @param exit_point
823 * @param timeout_ms
824 * @param arch_info target-specific description of the algorithm.
825 */
826 int target_run_algorithm(struct target *target,
827 int num_mem_params, struct mem_param *mem_params,
828 int num_reg_params, struct reg_param *reg_param,
829 uint32_t entry_point, uint32_t exit_point,
830 int timeout_ms, void *arch_info)
831 {
832 int retval = ERROR_FAIL;
833
834 if (!target_was_examined(target)) {
835 LOG_ERROR("Target not examined yet");
836 goto done;
837 }
838 if (!target->type->run_algorithm) {
839 LOG_ERROR("Target type '%s' does not support %s",
840 target_type_name(target), __func__);
841 goto done;
842 }
843
844 target->running_alg = true;
845 retval = target->type->run_algorithm(target,
846 num_mem_params, mem_params,
847 num_reg_params, reg_param,
848 entry_point, exit_point, timeout_ms, arch_info);
849 target->running_alg = false;
850
851 done:
852 return retval;
853 }
854
855 /**
856 * Executes a target-specific native code algorithm and leaves it running.
857 *
858 * @param target used to run the algorithm
859 * @param num_mem_params
860 * @param mem_params
861 * @param num_reg_params
862 * @param reg_params
863 * @param entry_point
864 * @param exit_point
865 * @param arch_info target-specific description of the algorithm.
866 */
867 int target_start_algorithm(struct target *target,
868 int num_mem_params, struct mem_param *mem_params,
869 int num_reg_params, struct reg_param *reg_params,
870 uint32_t entry_point, uint32_t exit_point,
871 void *arch_info)
872 {
873 int retval = ERROR_FAIL;
874
875 if (!target_was_examined(target)) {
876 LOG_ERROR("Target not examined yet");
877 goto done;
878 }
879 if (!target->type->start_algorithm) {
880 LOG_ERROR("Target type '%s' does not support %s",
881 target_type_name(target), __func__);
882 goto done;
883 }
884 if (target->running_alg) {
885 LOG_ERROR("Target is already running an algorithm");
886 goto done;
887 }
888
889 target->running_alg = true;
890 retval = target->type->start_algorithm(target,
891 num_mem_params, mem_params,
892 num_reg_params, reg_params,
893 entry_point, exit_point, arch_info);
894
895 done:
896 return retval;
897 }
898
899 /**
900 * Waits for an algorithm started with target_start_algorithm() to complete.
901 *
902 * @param target used to run the algorithm
903 * @param num_mem_params
904 * @param mem_params
905 * @param num_reg_params
906 * @param reg_params
907 * @param exit_point
908 * @param timeout_ms
909 * @param arch_info target-specific description of the algorithm.
910 */
911 int target_wait_algorithm(struct target *target,
912 int num_mem_params, struct mem_param *mem_params,
913 int num_reg_params, struct reg_param *reg_params,
914 uint32_t exit_point, int timeout_ms,
915 void *arch_info)
916 {
917 int retval = ERROR_FAIL;
918
919 if (!target->type->wait_algorithm) {
920 LOG_ERROR("Target type '%s' does not support %s",
921 target_type_name(target), __func__);
922 goto done;
923 }
924 if (!target->running_alg) {
925 LOG_ERROR("Target is not running an algorithm");
926 goto done;
927 }
928
929 retval = target->type->wait_algorithm(target,
930 num_mem_params, mem_params,
931 num_reg_params, reg_params,
932 exit_point, timeout_ms, arch_info);
933 if (retval != ERROR_TARGET_TIMEOUT)
934 target->running_alg = false;
935
936 done:
937 return retval;
938 }
939
940 /**
941 * Streams data to a circular buffer on target intended for consumption by code
942 * running asynchronously on target.
943 *
944 * This is intended for applications where target-specific native code runs
945 * on the target, receives data from the circular buffer, does something with
946 * it (most likely writing it to a flash memory), and advances the circular
947 * buffer pointer.
948 *
949 * This assumes that the helper algorithm has already been loaded to the target,
950 * but has not been started yet. Given memory and register parameters are passed
951 * to the algorithm.
952 *
953 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
954 * following format:
955 *
956 * [buffer_start + 0, buffer_start + 4):
957 * Write Pointer address (aka head). Written and updated by this
958 * routine when new data is written to the circular buffer.
959 * [buffer_start + 4, buffer_start + 8):
960 * Read Pointer address (aka tail). Updated by code running on the
961 * target after it consumes data.
962 * [buffer_start + 8, buffer_start + buffer_size):
963 * Circular buffer contents.
964 *
965 * See contrib/loaders/flash/stm32f1x.S for an example.
966 *
967 * @param target used to run the algorithm
968 * @param buffer address on the host where data to be sent is located
969 * @param count number of blocks to send
970 * @param block_size size in bytes of each block
971 * @param num_mem_params count of memory-based params to pass to algorithm
972 * @param mem_params memory-based params to pass to algorithm
973 * @param num_reg_params count of register-based params to pass to algorithm
974 * @param reg_params memory-based params to pass to algorithm
975 * @param buffer_start address on the target of the circular buffer structure
976 * @param buffer_size size of the circular buffer structure
977 * @param entry_point address on the target to execute to start the algorithm
978 * @param exit_point address at which to set a breakpoint to catch the
979 * end of the algorithm; can be 0 if target triggers a breakpoint itself
980 * @param arch_info
981 */
982
983 int target_run_flash_async_algorithm(struct target *target,
984 const uint8_t *buffer, uint32_t count, int block_size,
985 int num_mem_params, struct mem_param *mem_params,
986 int num_reg_params, struct reg_param *reg_params,
987 uint32_t buffer_start, uint32_t buffer_size,
988 uint32_t entry_point, uint32_t exit_point, void *arch_info)
989 {
990 int retval;
991 int timeout = 0;
992
993 const uint8_t *buffer_orig = buffer;
994
995 /* Set up working area. First word is write pointer, second word is read pointer,
996 * rest is fifo data area. */
997 uint32_t wp_addr = buffer_start;
998 uint32_t rp_addr = buffer_start + 4;
999 uint32_t fifo_start_addr = buffer_start + 8;
1000 uint32_t fifo_end_addr = buffer_start + buffer_size;
1001
1002 uint32_t wp = fifo_start_addr;
1003 uint32_t rp = fifo_start_addr;
1004
1005 /* validate block_size is 2^n */
1006 assert(!block_size || !(block_size & (block_size - 1)));
1007
1008 retval = target_write_u32(target, wp_addr, wp);
1009 if (retval != ERROR_OK)
1010 return retval;
1011 retval = target_write_u32(target, rp_addr, rp);
1012 if (retval != ERROR_OK)
1013 return retval;
1014
1015 /* Start up algorithm on target and let it idle while writing the first chunk */
1016 retval = target_start_algorithm(target, num_mem_params, mem_params,
1017 num_reg_params, reg_params,
1018 entry_point,
1019 exit_point,
1020 arch_info);
1021
1022 if (retval != ERROR_OK) {
1023 LOG_ERROR("error starting target flash write algorithm");
1024 return retval;
1025 }
1026
1027 while (count > 0) {
1028
1029 retval = target_read_u32(target, rp_addr, &rp);
1030 if (retval != ERROR_OK) {
1031 LOG_ERROR("failed to get read pointer");
1032 break;
1033 }
1034
1035 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1036 (size_t) (buffer - buffer_orig), count, wp, rp);
1037
1038 if (rp == 0) {
1039 LOG_ERROR("flash write algorithm aborted by target");
1040 retval = ERROR_FLASH_OPERATION_FAILED;
1041 break;
1042 }
1043
1044 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1045 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1046 break;
1047 }
1048
1049 /* Count the number of bytes available in the fifo without
1050 * crossing the wrap around. Make sure to not fill it completely,
1051 * because that would make wp == rp and that's the empty condition. */
1052 uint32_t thisrun_bytes;
1053 if (rp > wp)
1054 thisrun_bytes = rp - wp - block_size;
1055 else if (rp > fifo_start_addr)
1056 thisrun_bytes = fifo_end_addr - wp;
1057 else
1058 thisrun_bytes = fifo_end_addr - wp - block_size;
1059
1060 if (thisrun_bytes == 0) {
1061 /* Throttle polling a bit if transfer is (much) faster than flash
1062 * programming. The exact delay shouldn't matter as long as it's
1063 * less than buffer size / flash speed. This is very unlikely to
1064 * run when using high latency connections such as USB. */
1065 alive_sleep(2);
1066
1067 /* to stop an infinite loop on some targets check and increment a timeout
1068 * this issue was observed on a stellaris using the new ICDI interface */
1069 if (timeout++ >= 2500) {
1070 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1071 return ERROR_FLASH_OPERATION_FAILED;
1072 }
1073 continue;
1074 }
1075
1076 /* reset our timeout */
1077 timeout = 0;
1078
1079 /* Limit to the amount of data we actually want to write */
1080 if (thisrun_bytes > count * block_size)
1081 thisrun_bytes = count * block_size;
1082
1083 /* Force end of large blocks to be word aligned */
1084 if (thisrun_bytes >= 16)
1085 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1086
1087 /* Write data to fifo */
1088 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1089 if (retval != ERROR_OK)
1090 break;
1091
1092 /* Update counters and wrap write pointer */
1093 buffer += thisrun_bytes;
1094 count -= thisrun_bytes / block_size;
1095 wp += thisrun_bytes;
1096 if (wp >= fifo_end_addr)
1097 wp = fifo_start_addr;
1098
1099 /* Store updated write pointer to target */
1100 retval = target_write_u32(target, wp_addr, wp);
1101 if (retval != ERROR_OK)
1102 break;
1103
1104 /* Avoid GDB timeouts */
1105 keep_alive();
1106 }
1107
1108 if (retval != ERROR_OK) {
1109 /* abort flash write algorithm on target */
1110 target_write_u32(target, wp_addr, 0);
1111 }
1112
1113 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1114 num_reg_params, reg_params,
1115 exit_point,
1116 10000,
1117 arch_info);
1118
1119 if (retval2 != ERROR_OK) {
1120 LOG_ERROR("error waiting for target flash write algorithm");
1121 retval = retval2;
1122 }
1123
1124 if (retval == ERROR_OK) {
1125 /* check if algorithm set rp = 0 after fifo writer loop finished */
1126 retval = target_read_u32(target, rp_addr, &rp);
1127 if (retval == ERROR_OK && rp == 0) {
1128 LOG_ERROR("flash write algorithm aborted by target");
1129 retval = ERROR_FLASH_OPERATION_FAILED;
1130 }
1131 }
1132
1133 return retval;
1134 }
1135
1136 int target_run_read_async_algorithm(struct target *target,
1137 uint8_t *buffer, uint32_t count, int block_size,
1138 int num_mem_params, struct mem_param *mem_params,
1139 int num_reg_params, struct reg_param *reg_params,
1140 uint32_t buffer_start, uint32_t buffer_size,
1141 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1142 {
1143 int retval;
1144 int timeout = 0;
1145
1146 const uint8_t *buffer_orig = buffer;
1147
1148 /* Set up working area. First word is write pointer, second word is read pointer,
1149 * rest is fifo data area. */
1150 uint32_t wp_addr = buffer_start;
1151 uint32_t rp_addr = buffer_start + 4;
1152 uint32_t fifo_start_addr = buffer_start + 8;
1153 uint32_t fifo_end_addr = buffer_start + buffer_size;
1154
1155 uint32_t wp = fifo_start_addr;
1156 uint32_t rp = fifo_start_addr;
1157
1158 /* validate block_size is 2^n */
1159 assert(!block_size || !(block_size & (block_size - 1)));
1160
1161 retval = target_write_u32(target, wp_addr, wp);
1162 if (retval != ERROR_OK)
1163 return retval;
1164 retval = target_write_u32(target, rp_addr, rp);
1165 if (retval != ERROR_OK)
1166 return retval;
1167
1168 /* Start up algorithm on target */
1169 retval = target_start_algorithm(target, num_mem_params, mem_params,
1170 num_reg_params, reg_params,
1171 entry_point,
1172 exit_point,
1173 arch_info);
1174
1175 if (retval != ERROR_OK) {
1176 LOG_ERROR("error starting target flash read algorithm");
1177 return retval;
1178 }
1179
1180 while (count > 0) {
1181 retval = target_read_u32(target, wp_addr, &wp);
1182 if (retval != ERROR_OK) {
1183 LOG_ERROR("failed to get write pointer");
1184 break;
1185 }
1186
1187 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1188 (size_t)(buffer - buffer_orig), count, wp, rp);
1189
1190 if (wp == 0) {
1191 LOG_ERROR("flash read algorithm aborted by target");
1192 retval = ERROR_FLASH_OPERATION_FAILED;
1193 break;
1194 }
1195
1196 if (((wp - fifo_start_addr) & (block_size - 1)) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1197 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1198 break;
1199 }
1200
1201 /* Count the number of bytes available in the fifo without
1202 * crossing the wrap around. */
1203 uint32_t thisrun_bytes;
1204 if (wp >= rp)
1205 thisrun_bytes = wp - rp;
1206 else
1207 thisrun_bytes = fifo_end_addr - rp;
1208
1209 if (thisrun_bytes == 0) {
1210 /* Throttle polling a bit if transfer is (much) faster than flash
1211 * reading. The exact delay shouldn't matter as long as it's
1212 * less than buffer size / flash speed. This is very unlikely to
1213 * run when using high latency connections such as USB. */
1214 alive_sleep(2);
1215
1216 /* to stop an infinite loop on some targets check and increment a timeout
1217 * this issue was observed on a stellaris using the new ICDI interface */
1218 if (timeout++ >= 2500) {
1219 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1220 return ERROR_FLASH_OPERATION_FAILED;
1221 }
1222 continue;
1223 }
1224
1225 /* Reset our timeout */
1226 timeout = 0;
1227
1228 /* Limit to the amount of data we actually want to read */
1229 if (thisrun_bytes > count * block_size)
1230 thisrun_bytes = count * block_size;
1231
1232 /* Force end of large blocks to be word aligned */
1233 if (thisrun_bytes >= 16)
1234 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1235
1236 /* Read data from fifo */
1237 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1238 if (retval != ERROR_OK)
1239 break;
1240
1241 /* Update counters and wrap write pointer */
1242 buffer += thisrun_bytes;
1243 count -= thisrun_bytes / block_size;
1244 rp += thisrun_bytes;
1245 if (rp >= fifo_end_addr)
1246 rp = fifo_start_addr;
1247
1248 /* Store updated write pointer to target */
1249 retval = target_write_u32(target, rp_addr, rp);
1250 if (retval != ERROR_OK)
1251 break;
1252
1253 /* Avoid GDB timeouts */
1254 keep_alive();
1255
1256 }
1257
1258 if (retval != ERROR_OK) {
1259 /* abort flash write algorithm on target */
1260 target_write_u32(target, rp_addr, 0);
1261 }
1262
1263 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1264 num_reg_params, reg_params,
1265 exit_point,
1266 10000,
1267 arch_info);
1268
1269 if (retval2 != ERROR_OK) {
1270 LOG_ERROR("error waiting for target flash write algorithm");
1271 retval = retval2;
1272 }
1273
1274 if (retval == ERROR_OK) {
1275 /* check if algorithm set wp = 0 after fifo writer loop finished */
1276 retval = target_read_u32(target, wp_addr, &wp);
1277 if (retval == ERROR_OK && wp == 0) {
1278 LOG_ERROR("flash read algorithm aborted by target");
1279 retval = ERROR_FLASH_OPERATION_FAILED;
1280 }
1281 }
1282
1283 return retval;
1284 }
1285
1286 int target_read_memory(struct target *target,
1287 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1288 {
1289 if (!target_was_examined(target)) {
1290 LOG_ERROR("Target not examined yet");
1291 return ERROR_FAIL;
1292 }
1293 if (!target->type->read_memory) {
1294 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1295 return ERROR_FAIL;
1296 }
1297 return target->type->read_memory(target, address, size, count, buffer);
1298 }
1299
1300 int target_read_phys_memory(struct target *target,
1301 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1302 {
1303 if (!target_was_examined(target)) {
1304 LOG_ERROR("Target not examined yet");
1305 return ERROR_FAIL;
1306 }
1307 if (!target->type->read_phys_memory) {
1308 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1309 return ERROR_FAIL;
1310 }
1311 return target->type->read_phys_memory(target, address, size, count, buffer);
1312 }
1313
1314 int target_write_memory(struct target *target,
1315 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1316 {
1317 if (!target_was_examined(target)) {
1318 LOG_ERROR("Target not examined yet");
1319 return ERROR_FAIL;
1320 }
1321 if (!target->type->write_memory) {
1322 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1323 return ERROR_FAIL;
1324 }
1325 return target->type->write_memory(target, address, size, count, buffer);
1326 }
1327
1328 int target_write_phys_memory(struct target *target,
1329 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1330 {
1331 if (!target_was_examined(target)) {
1332 LOG_ERROR("Target not examined yet");
1333 return ERROR_FAIL;
1334 }
1335 if (!target->type->write_phys_memory) {
1336 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1337 return ERROR_FAIL;
1338 }
1339 return target->type->write_phys_memory(target, address, size, count, buffer);
1340 }
1341
1342 int target_add_breakpoint(struct target *target,
1343 struct breakpoint *breakpoint)
1344 {
1345 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1346 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1347 return ERROR_TARGET_NOT_HALTED;
1348 }
1349 return target->type->add_breakpoint(target, breakpoint);
1350 }
1351
1352 int target_add_context_breakpoint(struct target *target,
1353 struct breakpoint *breakpoint)
1354 {
1355 if (target->state != TARGET_HALTED) {
1356 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1357 return ERROR_TARGET_NOT_HALTED;
1358 }
1359 return target->type->add_context_breakpoint(target, breakpoint);
1360 }
1361
1362 int target_add_hybrid_breakpoint(struct target *target,
1363 struct breakpoint *breakpoint)
1364 {
1365 if (target->state != TARGET_HALTED) {
1366 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1367 return ERROR_TARGET_NOT_HALTED;
1368 }
1369 return target->type->add_hybrid_breakpoint(target, breakpoint);
1370 }
1371
1372 int target_remove_breakpoint(struct target *target,
1373 struct breakpoint *breakpoint)
1374 {
1375 return target->type->remove_breakpoint(target, breakpoint);
1376 }
1377
1378 int target_add_watchpoint(struct target *target,
1379 struct watchpoint *watchpoint)
1380 {
1381 if (target->state != TARGET_HALTED) {
1382 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1383 return ERROR_TARGET_NOT_HALTED;
1384 }
1385 return target->type->add_watchpoint(target, watchpoint);
1386 }
1387 int target_remove_watchpoint(struct target *target,
1388 struct watchpoint *watchpoint)
1389 {
1390 return target->type->remove_watchpoint(target, watchpoint);
1391 }
1392 int target_hit_watchpoint(struct target *target,
1393 struct watchpoint **hit_watchpoint)
1394 {
1395 if (target->state != TARGET_HALTED) {
1396 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1397 return ERROR_TARGET_NOT_HALTED;
1398 }
1399
1400 if (target->type->hit_watchpoint == NULL) {
1401 /* For backward compatible, if hit_watchpoint is not implemented,
1402 * return ERROR_FAIL such that gdb_server will not take the nonsense
1403 * information. */
1404 return ERROR_FAIL;
1405 }
1406
1407 return target->type->hit_watchpoint(target, hit_watchpoint);
1408 }
1409
1410 const char *target_get_gdb_arch(struct target *target)
1411 {
1412 if (target->type->get_gdb_arch == NULL)
1413 return NULL;
1414 return target->type->get_gdb_arch(target);
1415 }
1416
1417 int target_get_gdb_reg_list(struct target *target,
1418 struct reg **reg_list[], int *reg_list_size,
1419 enum target_register_class reg_class)
1420 {
1421 int result = ERROR_FAIL;
1422
1423 if (!target_was_examined(target)) {
1424 LOG_ERROR("Target not examined yet");
1425 goto done;
1426 }
1427
1428 result = target->type->get_gdb_reg_list(target, reg_list,
1429 reg_list_size, reg_class);
1430
1431 done:
1432 if (result != ERROR_OK) {
1433 *reg_list = NULL;
1434 *reg_list_size = 0;
1435 }
1436 return result;
1437 }
1438
1439 int target_get_gdb_reg_list_noread(struct target *target,
1440 struct reg **reg_list[], int *reg_list_size,
1441 enum target_register_class reg_class)
1442 {
1443 if (target->type->get_gdb_reg_list_noread &&
1444 target->type->get_gdb_reg_list_noread(target, reg_list,
1445 reg_list_size, reg_class) == ERROR_OK)
1446 return ERROR_OK;
1447 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1448 }
1449
1450 bool target_supports_gdb_connection(struct target *target)
1451 {
1452 /*
1453 * exclude all the targets that don't provide get_gdb_reg_list
1454 * or that have explicit gdb_max_connection == 0
1455 */
1456 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1457 }
1458
1459 int target_step(struct target *target,
1460 int current, target_addr_t address, int handle_breakpoints)
1461 {
1462 int retval;
1463
1464 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1465
1466 retval = target->type->step(target, current, address, handle_breakpoints);
1467 if (retval != ERROR_OK)
1468 return retval;
1469
1470 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1471
1472 return retval;
1473 }
1474
1475 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1476 {
1477 if (target->state != TARGET_HALTED) {
1478 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1479 return ERROR_TARGET_NOT_HALTED;
1480 }
1481 return target->type->get_gdb_fileio_info(target, fileio_info);
1482 }
1483
1484 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1485 {
1486 if (target->state != TARGET_HALTED) {
1487 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1488 return ERROR_TARGET_NOT_HALTED;
1489 }
1490 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1491 }
1492
1493 target_addr_t target_address_max(struct target *target)
1494 {
1495 unsigned bits = target_address_bits(target);
1496 if (sizeof(target_addr_t) * 8 == bits)
1497 return (target_addr_t) -1;
1498 else
1499 return (((target_addr_t) 1) << bits) - 1;
1500 }
1501
1502 unsigned target_address_bits(struct target *target)
1503 {
1504 if (target->type->address_bits)
1505 return target->type->address_bits(target);
1506 return 32;
1507 }
1508
1509 unsigned int target_data_bits(struct target *target)
1510 {
1511 if (target->type->data_bits)
1512 return target->type->data_bits(target);
1513 return 32;
1514 }
1515
1516 static int target_profiling(struct target *target, uint32_t *samples,
1517 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1518 {
1519 return target->type->profiling(target, samples, max_num_samples,
1520 num_samples, seconds);
1521 }
1522
1523 /**
1524 * Reset the @c examined flag for the given target.
1525 * Pure paranoia -- targets are zeroed on allocation.
1526 */
1527 static void target_reset_examined(struct target *target)
1528 {
1529 target->examined = false;
1530 }
1531
1532 static int handle_target(void *priv);
1533
1534 static int target_init_one(struct command_context *cmd_ctx,
1535 struct target *target)
1536 {
1537 target_reset_examined(target);
1538
1539 struct target_type *type = target->type;
1540 if (type->examine == NULL)
1541 type->examine = default_examine;
1542
1543 if (type->check_reset == NULL)
1544 type->check_reset = default_check_reset;
1545
1546 assert(type->init_target != NULL);
1547
1548 int retval = type->init_target(cmd_ctx, target);
1549 if (ERROR_OK != retval) {
1550 LOG_ERROR("target '%s' init failed", target_name(target));
1551 return retval;
1552 }
1553
1554 /* Sanity-check MMU support ... stub in what we must, to help
1555 * implement it in stages, but warn if we need to do so.
1556 */
1557 if (type->mmu) {
1558 if (type->virt2phys == NULL) {
1559 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1560 type->virt2phys = identity_virt2phys;
1561 }
1562 } else {
1563 /* Make sure no-MMU targets all behave the same: make no
1564 * distinction between physical and virtual addresses, and
1565 * ensure that virt2phys() is always an identity mapping.
1566 */
1567 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1568 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1569
1570 type->mmu = no_mmu;
1571 type->write_phys_memory = type->write_memory;
1572 type->read_phys_memory = type->read_memory;
1573 type->virt2phys = identity_virt2phys;
1574 }
1575
1576 if (target->type->read_buffer == NULL)
1577 target->type->read_buffer = target_read_buffer_default;
1578
1579 if (target->type->write_buffer == NULL)
1580 target->type->write_buffer = target_write_buffer_default;
1581
1582 if (target->type->get_gdb_fileio_info == NULL)
1583 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1584
1585 if (target->type->gdb_fileio_end == NULL)
1586 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1587
1588 if (target->type->profiling == NULL)
1589 target->type->profiling = target_profiling_default;
1590
1591 return ERROR_OK;
1592 }
1593
1594 static int target_init(struct command_context *cmd_ctx)
1595 {
1596 struct target *target;
1597 int retval;
1598
1599 for (target = all_targets; target; target = target->next) {
1600 retval = target_init_one(cmd_ctx, target);
1601 if (ERROR_OK != retval)
1602 return retval;
1603 }
1604
1605 if (!all_targets)
1606 return ERROR_OK;
1607
1608 retval = target_register_user_commands(cmd_ctx);
1609 if (ERROR_OK != retval)
1610 return retval;
1611
1612 retval = target_register_timer_callback(&handle_target,
1613 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1614 if (ERROR_OK != retval)
1615 return retval;
1616
1617 return ERROR_OK;
1618 }
1619
1620 COMMAND_HANDLER(handle_target_init_command)
1621 {
1622 int retval;
1623
1624 if (CMD_ARGC != 0)
1625 return ERROR_COMMAND_SYNTAX_ERROR;
1626
1627 static bool target_initialized;
1628 if (target_initialized) {
1629 LOG_INFO("'target init' has already been called");
1630 return ERROR_OK;
1631 }
1632 target_initialized = true;
1633
1634 retval = command_run_line(CMD_CTX, "init_targets");
1635 if (ERROR_OK != retval)
1636 return retval;
1637
1638 retval = command_run_line(CMD_CTX, "init_target_events");
1639 if (ERROR_OK != retval)
1640 return retval;
1641
1642 retval = command_run_line(CMD_CTX, "init_board");
1643 if (ERROR_OK != retval)
1644 return retval;
1645
1646 LOG_DEBUG("Initializing targets...");
1647 return target_init(CMD_CTX);
1648 }
1649
1650 int target_register_event_callback(int (*callback)(struct target *target,
1651 enum target_event event, void *priv), void *priv)
1652 {
1653 struct target_event_callback **callbacks_p = &target_event_callbacks;
1654
1655 if (callback == NULL)
1656 return ERROR_COMMAND_SYNTAX_ERROR;
1657
1658 if (*callbacks_p) {
1659 while ((*callbacks_p)->next)
1660 callbacks_p = &((*callbacks_p)->next);
1661 callbacks_p = &((*callbacks_p)->next);
1662 }
1663
1664 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1665 (*callbacks_p)->callback = callback;
1666 (*callbacks_p)->priv = priv;
1667 (*callbacks_p)->next = NULL;
1668
1669 return ERROR_OK;
1670 }
1671
1672 int target_register_reset_callback(int (*callback)(struct target *target,
1673 enum target_reset_mode reset_mode, void *priv), void *priv)
1674 {
1675 struct target_reset_callback *entry;
1676
1677 if (callback == NULL)
1678 return ERROR_COMMAND_SYNTAX_ERROR;
1679
1680 entry = malloc(sizeof(struct target_reset_callback));
1681 if (entry == NULL) {
1682 LOG_ERROR("error allocating buffer for reset callback entry");
1683 return ERROR_COMMAND_SYNTAX_ERROR;
1684 }
1685
1686 entry->callback = callback;
1687 entry->priv = priv;
1688 list_add(&entry->list, &target_reset_callback_list);
1689
1690
1691 return ERROR_OK;
1692 }
1693
1694 int target_register_trace_callback(int (*callback)(struct target *target,
1695 size_t len, uint8_t *data, void *priv), void *priv)
1696 {
1697 struct target_trace_callback *entry;
1698
1699 if (callback == NULL)
1700 return ERROR_COMMAND_SYNTAX_ERROR;
1701
1702 entry = malloc(sizeof(struct target_trace_callback));
1703 if (entry == NULL) {
1704 LOG_ERROR("error allocating buffer for trace callback entry");
1705 return ERROR_COMMAND_SYNTAX_ERROR;
1706 }
1707
1708 entry->callback = callback;
1709 entry->priv = priv;
1710 list_add(&entry->list, &target_trace_callback_list);
1711
1712
1713 return ERROR_OK;
1714 }
1715
1716 int target_register_timer_callback(int (*callback)(void *priv),
1717 unsigned int time_ms, enum target_timer_type type, void *priv)
1718 {
1719 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1720
1721 if (callback == NULL)
1722 return ERROR_COMMAND_SYNTAX_ERROR;
1723
1724 if (*callbacks_p) {
1725 while ((*callbacks_p)->next)
1726 callbacks_p = &((*callbacks_p)->next);
1727 callbacks_p = &((*callbacks_p)->next);
1728 }
1729
1730 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1731 (*callbacks_p)->callback = callback;
1732 (*callbacks_p)->type = type;
1733 (*callbacks_p)->time_ms = time_ms;
1734 (*callbacks_p)->removed = false;
1735
1736 gettimeofday(&(*callbacks_p)->when, NULL);
1737 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1738
1739 (*callbacks_p)->priv = priv;
1740 (*callbacks_p)->next = NULL;
1741
1742 return ERROR_OK;
1743 }
1744
1745 int target_unregister_event_callback(int (*callback)(struct target *target,
1746 enum target_event event, void *priv), void *priv)
1747 {
1748 struct target_event_callback **p = &target_event_callbacks;
1749 struct target_event_callback *c = target_event_callbacks;
1750
1751 if (callback == NULL)
1752 return ERROR_COMMAND_SYNTAX_ERROR;
1753
1754 while (c) {
1755 struct target_event_callback *next = c->next;
1756 if ((c->callback == callback) && (c->priv == priv)) {
1757 *p = next;
1758 free(c);
1759 return ERROR_OK;
1760 } else
1761 p = &(c->next);
1762 c = next;
1763 }
1764
1765 return ERROR_OK;
1766 }
1767
1768 int target_unregister_reset_callback(int (*callback)(struct target *target,
1769 enum target_reset_mode reset_mode, void *priv), void *priv)
1770 {
1771 struct target_reset_callback *entry;
1772
1773 if (callback == NULL)
1774 return ERROR_COMMAND_SYNTAX_ERROR;
1775
1776 list_for_each_entry(entry, &target_reset_callback_list, list) {
1777 if (entry->callback == callback && entry->priv == priv) {
1778 list_del(&entry->list);
1779 free(entry);
1780 break;
1781 }
1782 }
1783
1784 return ERROR_OK;
1785 }
1786
1787 int target_unregister_trace_callback(int (*callback)(struct target *target,
1788 size_t len, uint8_t *data, void *priv), void *priv)
1789 {
1790 struct target_trace_callback *entry;
1791
1792 if (callback == NULL)
1793 return ERROR_COMMAND_SYNTAX_ERROR;
1794
1795 list_for_each_entry(entry, &target_trace_callback_list, list) {
1796 if (entry->callback == callback && entry->priv == priv) {
1797 list_del(&entry->list);
1798 free(entry);
1799 break;
1800 }
1801 }
1802
1803 return ERROR_OK;
1804 }
1805
1806 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1807 {
1808 if (callback == NULL)
1809 return ERROR_COMMAND_SYNTAX_ERROR;
1810
1811 for (struct target_timer_callback *c = target_timer_callbacks;
1812 c; c = c->next) {
1813 if ((c->callback == callback) && (c->priv == priv)) {
1814 c->removed = true;
1815 return ERROR_OK;
1816 }
1817 }
1818
1819 return ERROR_FAIL;
1820 }
1821
1822 int target_call_event_callbacks(struct target *target, enum target_event event)
1823 {
1824 struct target_event_callback *callback = target_event_callbacks;
1825 struct target_event_callback *next_callback;
1826
1827 if (event == TARGET_EVENT_HALTED) {
1828 /* execute early halted first */
1829 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1830 }
1831
1832 LOG_DEBUG("target event %i (%s) for core %s", event,
1833 jim_nvp_value2name_simple(nvp_target_event, event)->name,
1834 target_name(target));
1835
1836 target_handle_event(target, event);
1837
1838 while (callback) {
1839 next_callback = callback->next;
1840 callback->callback(target, event, callback->priv);
1841 callback = next_callback;
1842 }
1843
1844 return ERROR_OK;
1845 }
1846
1847 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1848 {
1849 struct target_reset_callback *callback;
1850
1851 LOG_DEBUG("target reset %i (%s)", reset_mode,
1852 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1853
1854 list_for_each_entry(callback, &target_reset_callback_list, list)
1855 callback->callback(target, reset_mode, callback->priv);
1856
1857 return ERROR_OK;
1858 }
1859
1860 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1861 {
1862 struct target_trace_callback *callback;
1863
1864 list_for_each_entry(callback, &target_trace_callback_list, list)
1865 callback->callback(target, len, data, callback->priv);
1866
1867 return ERROR_OK;
1868 }
1869
1870 static int target_timer_callback_periodic_restart(
1871 struct target_timer_callback *cb, struct timeval *now)
1872 {
1873 cb->when = *now;
1874 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1875 return ERROR_OK;
1876 }
1877
1878 static int target_call_timer_callback(struct target_timer_callback *cb,
1879 struct timeval *now)
1880 {
1881 cb->callback(cb->priv);
1882
1883 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1884 return target_timer_callback_periodic_restart(cb, now);
1885
1886 return target_unregister_timer_callback(cb->callback, cb->priv);
1887 }
1888
1889 static int target_call_timer_callbacks_check_time(int checktime)
1890 {
1891 static bool callback_processing;
1892
1893 /* Do not allow nesting */
1894 if (callback_processing)
1895 return ERROR_OK;
1896
1897 callback_processing = true;
1898
1899 keep_alive();
1900
1901 struct timeval now;
1902 gettimeofday(&now, NULL);
1903
1904 /* Store an address of the place containing a pointer to the
1905 * next item; initially, that's a standalone "root of the
1906 * list" variable. */
1907 struct target_timer_callback **callback = &target_timer_callbacks;
1908 while (callback && *callback) {
1909 if ((*callback)->removed) {
1910 struct target_timer_callback *p = *callback;
1911 *callback = (*callback)->next;
1912 free(p);
1913 continue;
1914 }
1915
1916 bool call_it = (*callback)->callback &&
1917 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1918 timeval_compare(&now, &(*callback)->when) >= 0);
1919
1920 if (call_it)
1921 target_call_timer_callback(*callback, &now);
1922
1923 callback = &(*callback)->next;
1924 }
1925
1926 callback_processing = false;
1927 return ERROR_OK;
1928 }
1929
1930 int target_call_timer_callbacks(void)
1931 {
1932 return target_call_timer_callbacks_check_time(1);
1933 }
1934
1935 /* invoke periodic callbacks immediately */
1936 int target_call_timer_callbacks_now(void)
1937 {
1938 return target_call_timer_callbacks_check_time(0);
1939 }
1940
1941 /* Prints the working area layout for debug purposes */
1942 static void print_wa_layout(struct target *target)
1943 {
1944 struct working_area *c = target->working_areas;
1945
1946 while (c) {
1947 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1948 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1949 c->address, c->address + c->size - 1, c->size);
1950 c = c->next;
1951 }
1952 }
1953
1954 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1955 static void target_split_working_area(struct working_area *area, uint32_t size)
1956 {
1957 assert(area->free); /* Shouldn't split an allocated area */
1958 assert(size <= area->size); /* Caller should guarantee this */
1959
1960 /* Split only if not already the right size */
1961 if (size < area->size) {
1962 struct working_area *new_wa = malloc(sizeof(*new_wa));
1963
1964 if (new_wa == NULL)
1965 return;
1966
1967 new_wa->next = area->next;
1968 new_wa->size = area->size - size;
1969 new_wa->address = area->address + size;
1970 new_wa->backup = NULL;
1971 new_wa->user = NULL;
1972 new_wa->free = true;
1973
1974 area->next = new_wa;
1975 area->size = size;
1976
1977 /* If backup memory was allocated to this area, it has the wrong size
1978 * now so free it and it will be reallocated if/when needed */
1979 free(area->backup);
1980 area->backup = NULL;
1981 }
1982 }
1983
1984 /* Merge all adjacent free areas into one */
1985 static void target_merge_working_areas(struct target *target)
1986 {
1987 struct working_area *c = target->working_areas;
1988
1989 while (c && c->next) {
1990 assert(c->next->address == c->address + c->size); /* This is an invariant */
1991
1992 /* Find two adjacent free areas */
1993 if (c->free && c->next->free) {
1994 /* Merge the last into the first */
1995 c->size += c->next->size;
1996
1997 /* Remove the last */
1998 struct working_area *to_be_freed = c->next;
1999 c->next = c->next->next;
2000 free(to_be_freed->backup);
2001 free(to_be_freed);
2002
2003 /* If backup memory was allocated to the remaining area, it's has
2004 * the wrong size now */
2005 free(c->backup);
2006 c->backup = NULL;
2007 } else {
2008 c = c->next;
2009 }
2010 }
2011 }
2012
2013 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2014 {
2015 /* Reevaluate working area address based on MMU state*/
2016 if (target->working_areas == NULL) {
2017 int retval;
2018 int enabled;
2019
2020 retval = target->type->mmu(target, &enabled);
2021 if (retval != ERROR_OK)
2022 return retval;
2023
2024 if (!enabled) {
2025 if (target->working_area_phys_spec) {
2026 LOG_DEBUG("MMU disabled, using physical "
2027 "address for working memory " TARGET_ADDR_FMT,
2028 target->working_area_phys);
2029 target->working_area = target->working_area_phys;
2030 } else {
2031 LOG_ERROR("No working memory available. "
2032 "Specify -work-area-phys to target.");
2033 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2034 }
2035 } else {
2036 if (target->working_area_virt_spec) {
2037 LOG_DEBUG("MMU enabled, using virtual "
2038 "address for working memory " TARGET_ADDR_FMT,
2039 target->working_area_virt);
2040 target->working_area = target->working_area_virt;
2041 } else {
2042 LOG_ERROR("No working memory available. "
2043 "Specify -work-area-virt to target.");
2044 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2045 }
2046 }
2047
2048 /* Set up initial working area on first call */
2049 struct working_area *new_wa = malloc(sizeof(*new_wa));
2050 if (new_wa) {
2051 new_wa->next = NULL;
2052 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2053 new_wa->address = target->working_area;
2054 new_wa->backup = NULL;
2055 new_wa->user = NULL;
2056 new_wa->free = true;
2057 }
2058
2059 target->working_areas = new_wa;
2060 }
2061
2062 /* only allocate multiples of 4 byte */
2063 if (size % 4)
2064 size = (size + 3) & (~3UL);
2065
2066 struct working_area *c = target->working_areas;
2067
2068 /* Find the first large enough working area */
2069 while (c) {
2070 if (c->free && c->size >= size)
2071 break;
2072 c = c->next;
2073 }
2074
2075 if (c == NULL)
2076 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2077
2078 /* Split the working area into the requested size */
2079 target_split_working_area(c, size);
2080
2081 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2082 size, c->address);
2083
2084 if (target->backup_working_area) {
2085 if (c->backup == NULL) {
2086 c->backup = malloc(c->size);
2087 if (c->backup == NULL)
2088 return ERROR_FAIL;
2089 }
2090
2091 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2092 if (retval != ERROR_OK)
2093 return retval;
2094 }
2095
2096 /* mark as used, and return the new (reused) area */
2097 c->free = false;
2098 *area = c;
2099
2100 /* user pointer */
2101 c->user = area;
2102
2103 print_wa_layout(target);
2104
2105 return ERROR_OK;
2106 }
2107
2108 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2109 {
2110 int retval;
2111
2112 retval = target_alloc_working_area_try(target, size, area);
2113 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2114 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2115 return retval;
2116
2117 }
2118
2119 static int target_restore_working_area(struct target *target, struct working_area *area)
2120 {
2121 int retval = ERROR_OK;
2122
2123 if (target->backup_working_area && area->backup != NULL) {
2124 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2125 if (retval != ERROR_OK)
2126 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2127 area->size, area->address);
2128 }
2129
2130 return retval;
2131 }
2132
2133 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2134 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2135 {
2136 int retval = ERROR_OK;
2137
2138 if (area->free)
2139 return retval;
2140
2141 if (restore) {
2142 retval = target_restore_working_area(target, area);
2143 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2144 if (retval != ERROR_OK)
2145 return retval;
2146 }
2147
2148 area->free = true;
2149
2150 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2151 area->size, area->address);
2152
2153 /* mark user pointer invalid */
2154 /* TODO: Is this really safe? It points to some previous caller's memory.
2155 * How could we know that the area pointer is still in that place and not
2156 * some other vital data? What's the purpose of this, anyway? */
2157 *area->user = NULL;
2158 area->user = NULL;
2159
2160 target_merge_working_areas(target);
2161
2162 print_wa_layout(target);
2163
2164 return retval;
2165 }
2166
2167 int target_free_working_area(struct target *target, struct working_area *area)
2168 {
2169 return target_free_working_area_restore(target, area, 1);
2170 }
2171
2172 /* free resources and restore memory, if restoring memory fails,
2173 * free up resources anyway
2174 */
2175 static void target_free_all_working_areas_restore(struct target *target, int restore)
2176 {
2177 struct working_area *c = target->working_areas;
2178
2179 LOG_DEBUG("freeing all working areas");
2180
2181 /* Loop through all areas, restoring the allocated ones and marking them as free */
2182 while (c) {
2183 if (!c->free) {
2184 if (restore)
2185 target_restore_working_area(target, c);
2186 c->free = true;
2187 *c->user = NULL; /* Same as above */
2188 c->user = NULL;
2189 }
2190 c = c->next;
2191 }
2192
2193 /* Run a merge pass to combine all areas into one */
2194 target_merge_working_areas(target);
2195
2196 print_wa_layout(target);
2197 }
2198
2199 void target_free_all_working_areas(struct target *target)
2200 {
2201 target_free_all_working_areas_restore(target, 1);
2202
2203 /* Now we have none or only one working area marked as free */
2204 if (target->working_areas) {
2205 /* Free the last one to allow on-the-fly moving and resizing */
2206 free(target->working_areas->backup);
2207 free(target->working_areas);
2208 target->working_areas = NULL;
2209 }
2210 }
2211
2212 /* Find the largest number of bytes that can be allocated */
2213 uint32_t target_get_working_area_avail(struct target *target)
2214 {
2215 struct working_area *c = target->working_areas;
2216 uint32_t max_size = 0;
2217
2218 if (c == NULL)
2219 return target->working_area_size;
2220
2221 while (c) {
2222 if (c->free && max_size < c->size)
2223 max_size = c->size;
2224
2225 c = c->next;
2226 }
2227
2228 return max_size;
2229 }
2230
2231 static void target_destroy(struct target *target)
2232 {
2233 if (target->type->deinit_target)
2234 target->type->deinit_target(target);
2235
2236 free(target->semihosting);
2237
2238 jtag_unregister_event_callback(jtag_enable_callback, target);
2239
2240 struct target_event_action *teap = target->event_action;
2241 while (teap) {
2242 struct target_event_action *next = teap->next;
2243 Jim_DecrRefCount(teap->interp, teap->body);
2244 free(teap);
2245 teap = next;
2246 }
2247
2248 target_free_all_working_areas(target);
2249
2250 /* release the targets SMP list */
2251 if (target->smp) {
2252 struct target_list *head = target->head;
2253 while (head != NULL) {
2254 struct target_list *pos = head->next;
2255 head->target->smp = 0;
2256 free(head);
2257 head = pos;
2258 }
2259 target->smp = 0;
2260 }
2261
2262 rtos_destroy(target);
2263
2264 free(target->gdb_port_override);
2265 free(target->type);
2266 free(target->trace_info);
2267 free(target->fileio_info);
2268 free(target->cmd_name);
2269 free(target);
2270 }
2271
2272 void target_quit(void)
2273 {
2274 struct target_event_callback *pe = target_event_callbacks;
2275 while (pe) {
2276 struct target_event_callback *t = pe->next;
2277 free(pe);
2278 pe = t;
2279 }
2280 target_event_callbacks = NULL;
2281
2282 struct target_timer_callback *pt = target_timer_callbacks;
2283 while (pt) {
2284 struct target_timer_callback *t = pt->next;
2285 free(pt);
2286 pt = t;
2287 }
2288 target_timer_callbacks = NULL;
2289
2290 for (struct target *target = all_targets; target;) {
2291 struct target *tmp;
2292
2293 tmp = target->next;
2294 target_destroy(target);
2295 target = tmp;
2296 }
2297
2298 all_targets = NULL;
2299 }
2300
2301 int target_arch_state(struct target *target)
2302 {
2303 int retval;
2304 if (target == NULL) {
2305 LOG_WARNING("No target has been configured");
2306 return ERROR_OK;
2307 }
2308
2309 if (target->state != TARGET_HALTED)
2310 return ERROR_OK;
2311
2312 retval = target->type->arch_state(target);
2313 return retval;
2314 }
2315
2316 static int target_get_gdb_fileio_info_default(struct target *target,
2317 struct gdb_fileio_info *fileio_info)
2318 {
2319 /* If target does not support semi-hosting function, target
2320 has no need to provide .get_gdb_fileio_info callback.
2321 It just return ERROR_FAIL and gdb_server will return "Txx"
2322 as target halted every time. */
2323 return ERROR_FAIL;
2324 }
2325
2326 static int target_gdb_fileio_end_default(struct target *target,
2327 int retcode, int fileio_errno, bool ctrl_c)
2328 {
2329 return ERROR_OK;
2330 }
2331
2332 int target_profiling_default(struct target *target, uint32_t *samples,
2333 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2334 {
2335 struct timeval timeout, now;
2336
2337 gettimeofday(&timeout, NULL);
2338 timeval_add_time(&timeout, seconds, 0);
2339
2340 LOG_INFO("Starting profiling. Halting and resuming the"
2341 " target as often as we can...");
2342
2343 uint32_t sample_count = 0;
2344 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2345 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
2346
2347 int retval = ERROR_OK;
2348 for (;;) {
2349 target_poll(target);
2350 if (target->state == TARGET_HALTED) {
2351 uint32_t t = buf_get_u32(reg->value, 0, 32);
2352 samples[sample_count++] = t;
2353 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2354 retval = target_resume(target, 1, 0, 0, 0);
2355 target_poll(target);
2356 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2357 } else if (target->state == TARGET_RUNNING) {
2358 /* We want to quickly sample the PC. */
2359 retval = target_halt(target);
2360 } else {
2361 LOG_INFO("Target not halted or running");
2362 retval = ERROR_OK;
2363 break;
2364 }
2365
2366 if (retval != ERROR_OK)
2367 break;
2368
2369 gettimeofday(&now, NULL);
2370 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2371 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2372 break;
2373 }
2374 }
2375
2376 *num_samples = sample_count;
2377 return retval;
2378 }
2379
2380 /* Single aligned words are guaranteed to use 16 or 32 bit access
2381 * mode respectively, otherwise data is handled as quickly as
2382 * possible
2383 */
2384 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2385 {
2386 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2387 size, address);
2388
2389 if (!target_was_examined(target)) {
2390 LOG_ERROR("Target not examined yet");
2391 return ERROR_FAIL;
2392 }
2393
2394 if (size == 0)
2395 return ERROR_OK;
2396
2397 if ((address + size - 1) < address) {
2398 /* GDB can request this when e.g. PC is 0xfffffffc */
2399 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2400 address,
2401 size);
2402 return ERROR_FAIL;
2403 }
2404
2405 return target->type->write_buffer(target, address, size, buffer);
2406 }
2407
2408 static int target_write_buffer_default(struct target *target,
2409 target_addr_t address, uint32_t count, const uint8_t *buffer)
2410 {
2411 uint32_t size;
2412 unsigned int data_bytes = target_data_bits(target) / 8;
2413
2414 /* Align up to maximum bytes. The loop condition makes sure the next pass
2415 * will have something to do with the size we leave to it. */
2416 for (size = 1;
2417 size < data_bytes && count >= size * 2 + (address & size);
2418 size *= 2) {
2419 if (address & size) {
2420 int retval = target_write_memory(target, address, size, 1, buffer);
2421 if (retval != ERROR_OK)
2422 return retval;
2423 address += size;
2424 count -= size;
2425 buffer += size;
2426 }
2427 }
2428
2429 /* Write the data with as large access size as possible. */
2430 for (; size > 0; size /= 2) {
2431 uint32_t aligned = count - count % size;
2432 if (aligned > 0) {
2433 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2434 if (retval != ERROR_OK)
2435 return retval;
2436 address += aligned;
2437 count -= aligned;
2438 buffer += aligned;
2439 }
2440 }
2441
2442 return ERROR_OK;
2443 }
2444
2445 /* Single aligned words are guaranteed to use 16 or 32 bit access
2446 * mode respectively, otherwise data is handled as quickly as
2447 * possible
2448 */
2449 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2450 {
2451 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2452 size, address);
2453
2454 if (!target_was_examined(target)) {
2455 LOG_ERROR("Target not examined yet");
2456 return ERROR_FAIL;
2457 }
2458
2459 if (size == 0)
2460 return ERROR_OK;
2461
2462 if ((address + size - 1) < address) {
2463 /* GDB can request this when e.g. PC is 0xfffffffc */
2464 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2465 address,
2466 size);
2467 return ERROR_FAIL;
2468 }
2469
2470 return target->type->read_buffer(target, address, size, buffer);
2471 }
2472
2473 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2474 {
2475 uint32_t size;
2476 unsigned int data_bytes = target_data_bits(target) / 8;
2477
2478 /* Align up to maximum bytes. The loop condition makes sure the next pass
2479 * will have something to do with the size we leave to it. */
2480 for (size = 1;
2481 size < data_bytes && count >= size * 2 + (address & size);
2482 size *= 2) {
2483 if (address & size) {
2484 int retval = target_read_memory(target, address, size, 1, buffer);
2485 if (retval != ERROR_OK)
2486 return retval;
2487 address += size;
2488 count -= size;
2489 buffer += size;
2490 }
2491 }
2492
2493 /* Read the data with as large access size as possible. */
2494 for (; size > 0; size /= 2) {
2495 uint32_t aligned = count - count % size;
2496 if (aligned > 0) {
2497 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2498 if (retval != ERROR_OK)
2499 return retval;
2500 address += aligned;
2501 count -= aligned;
2502 buffer += aligned;
2503 }
2504 }
2505
2506 return ERROR_OK;
2507 }
2508
2509 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2510 {
2511 uint8_t *buffer;
2512 int retval;
2513 uint32_t i;
2514 uint32_t checksum = 0;
2515 if (!target_was_examined(target)) {
2516 LOG_ERROR("Target not examined yet");
2517 return ERROR_FAIL;
2518 }
2519
2520 retval = target->type->checksum_memory(target, address, size, &checksum);
2521 if (retval != ERROR_OK) {
2522 buffer = malloc(size);
2523 if (buffer == NULL) {
2524 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2525 return ERROR_COMMAND_SYNTAX_ERROR;
2526 }
2527 retval = target_read_buffer(target, address, size, buffer);
2528 if (retval != ERROR_OK) {
2529 free(buffer);
2530 return retval;
2531 }
2532
2533 /* convert to target endianness */
2534 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2535 uint32_t target_data;
2536 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2537 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2538 }
2539
2540 retval = image_calculate_checksum(buffer, size, &checksum);
2541 free(buffer);
2542 }
2543
2544 *crc = checksum;
2545
2546 return retval;
2547 }
2548
2549 int target_blank_check_memory(struct target *target,
2550 struct target_memory_check_block *blocks, int num_blocks,
2551 uint8_t erased_value)
2552 {
2553 if (!target_was_examined(target)) {
2554 LOG_ERROR("Target not examined yet");
2555 return ERROR_FAIL;
2556 }
2557
2558 if (target->type->blank_check_memory == NULL)
2559 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2560
2561 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2562 }
2563
2564 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2565 {
2566 uint8_t value_buf[8];
2567 if (!target_was_examined(target)) {
2568 LOG_ERROR("Target not examined yet");
2569 return ERROR_FAIL;
2570 }
2571
2572 int retval = target_read_memory(target, address, 8, 1, value_buf);
2573
2574 if (retval == ERROR_OK) {
2575 *value = target_buffer_get_u64(target, value_buf);
2576 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2577 address,
2578 *value);
2579 } else {
2580 *value = 0x0;
2581 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2582 address);
2583 }
2584
2585 return retval;
2586 }
2587
2588 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2589 {
2590 uint8_t value_buf[4];
2591 if (!target_was_examined(target)) {
2592 LOG_ERROR("Target not examined yet");
2593 return ERROR_FAIL;
2594 }
2595
2596 int retval = target_read_memory(target, address, 4, 1, value_buf);
2597
2598 if (retval == ERROR_OK) {
2599 *value = target_buffer_get_u32(target, value_buf);
2600 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2601 address,
2602 *value);
2603 } else {
2604 *value = 0x0;
2605 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2606 address);
2607 }
2608
2609 return retval;
2610 }
2611
2612 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2613 {
2614 uint8_t value_buf[2];
2615 if (!target_was_examined(target)) {
2616 LOG_ERROR("Target not examined yet");
2617 return ERROR_FAIL;
2618 }
2619
2620 int retval = target_read_memory(target, address, 2, 1, value_buf);
2621
2622 if (retval == ERROR_OK) {
2623 *value = target_buffer_get_u16(target, value_buf);
2624 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2625 address,
2626 *value);
2627 } else {
2628 *value = 0x0;
2629 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2630 address);
2631 }
2632
2633 return retval;
2634 }
2635
2636 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2637 {
2638 if (!target_was_examined(target)) {
2639 LOG_ERROR("Target not examined yet");
2640 return ERROR_FAIL;
2641 }
2642
2643 int retval = target_read_memory(target, address, 1, 1, value);
2644
2645 if (retval == ERROR_OK) {
2646 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2647 address,
2648 *value);
2649 } else {
2650 *value = 0x0;
2651 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2652 address);
2653 }
2654
2655 return retval;
2656 }
2657
2658 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2659 {
2660 int retval;
2661 uint8_t value_buf[8];
2662 if (!target_was_examined(target)) {
2663 LOG_ERROR("Target not examined yet");
2664 return ERROR_FAIL;
2665 }
2666
2667 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2668 address,
2669 value);
2670
2671 target_buffer_set_u64(target, value_buf, value);
2672 retval = target_write_memory(target, address, 8, 1, value_buf);
2673 if (retval != ERROR_OK)
2674 LOG_DEBUG("failed: %i", retval);
2675
2676 return retval;
2677 }
2678
2679 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2680 {
2681 int retval;
2682 uint8_t value_buf[4];
2683 if (!target_was_examined(target)) {
2684 LOG_ERROR("Target not examined yet");
2685 return ERROR_FAIL;
2686 }
2687
2688 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2689 address,
2690 value);
2691
2692 target_buffer_set_u32(target, value_buf, value);
2693 retval = target_write_memory(target, address, 4, 1, value_buf);
2694 if (retval != ERROR_OK)
2695 LOG_DEBUG("failed: %i", retval);
2696
2697 return retval;
2698 }
2699
2700 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2701 {
2702 int retval;
2703 uint8_t value_buf[2];
2704 if (!target_was_examined(target)) {
2705 LOG_ERROR("Target not examined yet");
2706 return ERROR_FAIL;
2707 }
2708
2709 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2710 address,
2711 value);
2712
2713 target_buffer_set_u16(target, value_buf, value);
2714 retval = target_write_memory(target, address, 2, 1, value_buf);
2715 if (retval != ERROR_OK)
2716 LOG_DEBUG("failed: %i", retval);
2717
2718 return retval;
2719 }
2720
2721 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2722 {
2723 int retval;
2724 if (!target_was_examined(target)) {
2725 LOG_ERROR("Target not examined yet");
2726 return ERROR_FAIL;
2727 }
2728
2729 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2730 address, value);
2731
2732 retval = target_write_memory(target, address, 1, 1, &value);
2733 if (retval != ERROR_OK)
2734 LOG_DEBUG("failed: %i", retval);
2735
2736 return retval;
2737 }
2738
2739 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2740 {
2741 int retval;
2742 uint8_t value_buf[8];
2743 if (!target_was_examined(target)) {
2744 LOG_ERROR("Target not examined yet");
2745 return ERROR_FAIL;
2746 }
2747
2748 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2749 address,
2750 value);
2751
2752 target_buffer_set_u64(target, value_buf, value);
2753 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2754 if (retval != ERROR_OK)
2755 LOG_DEBUG("failed: %i", retval);
2756
2757 return retval;
2758 }
2759
2760 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2761 {
2762 int retval;
2763 uint8_t value_buf[4];
2764 if (!target_was_examined(target)) {
2765 LOG_ERROR("Target not examined yet");
2766 return ERROR_FAIL;
2767 }
2768
2769 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2770 address,
2771 value);
2772
2773 target_buffer_set_u32(target, value_buf, value);
2774 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2775 if (retval != ERROR_OK)
2776 LOG_DEBUG("failed: %i", retval);
2777
2778 return retval;
2779 }
2780
2781 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2782 {
2783 int retval;
2784 uint8_t value_buf[2];
2785 if (!target_was_examined(target)) {
2786 LOG_ERROR("Target not examined yet");
2787 return ERROR_FAIL;
2788 }
2789
2790 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2791 address,
2792 value);
2793
2794 target_buffer_set_u16(target, value_buf, value);
2795 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2796 if (retval != ERROR_OK)
2797 LOG_DEBUG("failed: %i", retval);
2798
2799 return retval;
2800 }
2801
2802 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2803 {
2804 int retval;
2805 if (!target_was_examined(target)) {
2806 LOG_ERROR("Target not examined yet");
2807 return ERROR_FAIL;
2808 }
2809
2810 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2811 address, value);
2812
2813 retval = target_write_phys_memory(target, address, 1, 1, &value);
2814 if (retval != ERROR_OK)
2815 LOG_DEBUG("failed: %i", retval);
2816
2817 return retval;
2818 }
2819
2820 static int find_target(struct command_invocation *cmd, const char *name)
2821 {
2822 struct target *target = get_target(name);
2823 if (target == NULL) {
2824 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2825 return ERROR_FAIL;
2826 }
2827 if (!target->tap->enabled) {
2828 command_print(cmd, "Target: TAP %s is disabled, "
2829 "can't be the current target\n",
2830 target->tap->dotted_name);
2831 return ERROR_FAIL;
2832 }
2833
2834 cmd->ctx->current_target = target;
2835 if (cmd->ctx->current_target_override)
2836 cmd->ctx->current_target_override = target;
2837
2838 return ERROR_OK;
2839 }
2840
2841
2842 COMMAND_HANDLER(handle_targets_command)
2843 {
2844 int retval = ERROR_OK;
2845 if (CMD_ARGC == 1) {
2846 retval = find_target(CMD, CMD_ARGV[0]);
2847 if (retval == ERROR_OK) {
2848 /* we're done! */
2849 return retval;
2850 }
2851 }
2852
2853 struct target *target = all_targets;
2854 command_print(CMD, " TargetName Type Endian TapName State ");
2855 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2856 while (target) {
2857 const char *state;
2858 char marker = ' ';
2859
2860 if (target->tap->enabled)
2861 state = target_state_name(target);
2862 else
2863 state = "tap-disabled";
2864
2865 if (CMD_CTX->current_target == target)
2866 marker = '*';
2867
2868 /* keep columns lined up to match the headers above */
2869 command_print(CMD,
2870 "%2d%c %-18s %-10s %-6s %-18s %s",
2871 target->target_number,
2872 marker,
2873 target_name(target),
2874 target_type_name(target),
2875 jim_nvp_value2name_simple(nvp_target_endian,
2876 target->endianness)->name,
2877 target->tap->dotted_name,
2878 state);
2879 target = target->next;
2880 }
2881
2882 return retval;
2883 }
2884
2885 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2886
2887 static int powerDropout;
2888 static int srstAsserted;
2889
2890 static int runPowerRestore;
2891 static int runPowerDropout;
2892 static int runSrstAsserted;
2893 static int runSrstDeasserted;
2894
2895 static int sense_handler(void)
2896 {
2897 static int prevSrstAsserted;
2898 static int prevPowerdropout;
2899
2900 int retval = jtag_power_dropout(&powerDropout);
2901 if (retval != ERROR_OK)
2902 return retval;
2903
2904 int powerRestored;
2905 powerRestored = prevPowerdropout && !powerDropout;
2906 if (powerRestored)
2907 runPowerRestore = 1;
2908
2909 int64_t current = timeval_ms();
2910 static int64_t lastPower;
2911 bool waitMore = lastPower + 2000 > current;
2912 if (powerDropout && !waitMore) {
2913 runPowerDropout = 1;
2914 lastPower = current;
2915 }
2916
2917 retval = jtag_srst_asserted(&srstAsserted);
2918 if (retval != ERROR_OK)
2919 return retval;
2920
2921 int srstDeasserted;
2922 srstDeasserted = prevSrstAsserted && !srstAsserted;
2923
2924 static int64_t lastSrst;
2925 waitMore = lastSrst + 2000 > current;
2926 if (srstDeasserted && !waitMore) {
2927 runSrstDeasserted = 1;
2928 lastSrst = current;
2929 }
2930
2931 if (!prevSrstAsserted && srstAsserted)
2932 runSrstAsserted = 1;
2933
2934 prevSrstAsserted = srstAsserted;
2935 prevPowerdropout = powerDropout;
2936
2937 if (srstDeasserted || powerRestored) {
2938 /* Other than logging the event we can't do anything here.
2939 * Issuing a reset is a particularly bad idea as we might
2940 * be inside a reset already.
2941 */
2942 }
2943
2944 return ERROR_OK;
2945 }
2946
2947 /* process target state changes */
2948 static int handle_target(void *priv)
2949 {
2950 Jim_Interp *interp = (Jim_Interp *)priv;
2951 int retval = ERROR_OK;
2952
2953 if (!is_jtag_poll_safe()) {
2954 /* polling is disabled currently */
2955 return ERROR_OK;
2956 }
2957
2958 /* we do not want to recurse here... */
2959 static int recursive;
2960 if (!recursive) {
2961 recursive = 1;
2962 sense_handler();
2963 /* danger! running these procedures can trigger srst assertions and power dropouts.
2964 * We need to avoid an infinite loop/recursion here and we do that by
2965 * clearing the flags after running these events.
2966 */
2967 int did_something = 0;
2968 if (runSrstAsserted) {
2969 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2970 Jim_Eval(interp, "srst_asserted");
2971 did_something = 1;
2972 }
2973 if (runSrstDeasserted) {
2974 Jim_Eval(interp, "srst_deasserted");
2975 did_something = 1;
2976 }
2977 if (runPowerDropout) {
2978 LOG_INFO("Power dropout detected, running power_dropout proc.");
2979 Jim_Eval(interp, "power_dropout");
2980 did_something = 1;
2981 }
2982 if (runPowerRestore) {
2983 Jim_Eval(interp, "power_restore");
2984 did_something = 1;
2985 }
2986
2987 if (did_something) {
2988 /* clear detect flags */
2989 sense_handler();
2990 }
2991
2992 /* clear action flags */
2993
2994 runSrstAsserted = 0;
2995 runSrstDeasserted = 0;
2996 runPowerRestore = 0;
2997 runPowerDropout = 0;
2998
2999 recursive = 0;
3000 }
3001
3002 /* Poll targets for state changes unless that's globally disabled.
3003 * Skip targets that are currently disabled.
3004 */
3005 for (struct target *target = all_targets;
3006 is_jtag_poll_safe() && target;
3007 target = target->next) {
3008
3009 if (!target_was_examined(target))
3010 continue;
3011
3012 if (!target->tap->enabled)
3013 continue;
3014
3015 if (target->backoff.times > target->backoff.count) {
3016 /* do not poll this time as we failed previously */
3017 target->backoff.count++;
3018 continue;
3019 }
3020 target->backoff.count = 0;
3021
3022 /* only poll target if we've got power and srst isn't asserted */
3023 if (!powerDropout && !srstAsserted) {
3024 /* polling may fail silently until the target has been examined */
3025 retval = target_poll(target);
3026 if (retval != ERROR_OK) {
3027 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3028 if (target->backoff.times * polling_interval < 5000) {
3029 target->backoff.times *= 2;
3030 target->backoff.times++;
3031 }
3032
3033 /* Tell GDB to halt the debugger. This allows the user to
3034 * run monitor commands to handle the situation.
3035 */
3036 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3037 }
3038 if (target->backoff.times > 0) {
3039 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3040 target_reset_examined(target);
3041 retval = target_examine_one(target);
3042 /* Target examination could have failed due to unstable connection,
3043 * but we set the examined flag anyway to repoll it later */
3044 if (retval != ERROR_OK) {
3045 target->examined = true;
3046 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3047 target->backoff.times * polling_interval);
3048 return retval;
3049 }
3050 }
3051
3052 /* Since we succeeded, we reset backoff count */
3053 target->backoff.times = 0;
3054 }
3055 }
3056
3057 return retval;
3058 }
3059
3060 COMMAND_HANDLER(handle_reg_command)
3061 {
3062 struct target *target;
3063 struct reg *reg = NULL;
3064 unsigned count = 0;
3065 char *value;
3066
3067 LOG_DEBUG("-");
3068
3069 target = get_current_target(CMD_CTX);
3070
3071 /* list all available registers for the current target */
3072 if (CMD_ARGC == 0) {
3073 struct reg_cache *cache = target->reg_cache;
3074
3075 count = 0;
3076 while (cache) {
3077 unsigned i;
3078
3079 command_print(CMD, "===== %s", cache->name);
3080
3081 for (i = 0, reg = cache->reg_list;
3082 i < cache->num_regs;
3083 i++, reg++, count++) {
3084 if (reg->exist == false || reg->hidden)
3085 continue;
3086 /* only print cached values if they are valid */
3087 if (reg->valid) {
3088 value = buf_to_hex_str(reg->value,
3089 reg->size);
3090 command_print(CMD,
3091 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3092 count, reg->name,
3093 reg->size, value,
3094 reg->dirty
3095 ? " (dirty)"
3096 : "");
3097 free(value);
3098 } else {
3099 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3100 count, reg->name,
3101 reg->size);
3102 }
3103 }
3104 cache = cache->next;
3105 }
3106
3107 return ERROR_OK;
3108 }
3109
3110 /* access a single register by its ordinal number */
3111 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3112 unsigned num;
3113 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3114
3115 struct reg_cache *cache = target->reg_cache;
3116 count = 0;
3117 while (cache) {
3118 unsigned i;
3119 for (i = 0; i < cache->num_regs; i++) {
3120 if (count++ == num) {
3121 reg = &cache->reg_list[i];
3122 break;
3123 }
3124 }
3125 if (reg)
3126 break;
3127 cache = cache->next;
3128 }
3129
3130 if (!reg) {
3131 command_print(CMD, "%i is out of bounds, the current target "
3132 "has only %i registers (0 - %i)", num, count, count - 1);
3133 return ERROR_OK;
3134 }
3135 } else {
3136 /* access a single register by its name */
3137 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
3138
3139 if (!reg)
3140 goto not_found;
3141 }
3142
3143 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
3144
3145 if (!reg->exist)
3146 goto not_found;
3147
3148 /* display a register */
3149 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3150 && (CMD_ARGV[1][0] <= '9')))) {
3151 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3152 reg->valid = 0;
3153
3154 if (reg->valid == 0)
3155 reg->type->get(reg);
3156 value = buf_to_hex_str(reg->value, reg->size);
3157 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3158 free(value);
3159 return ERROR_OK;
3160 }
3161
3162 /* set register value */
3163 if (CMD_ARGC == 2) {
3164 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3165 if (buf == NULL)
3166 return ERROR_FAIL;
3167 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3168
3169 reg->type->set(reg, buf);
3170
3171 value = buf_to_hex_str(reg->value, reg->size);
3172 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3173 free(value);
3174
3175 free(buf);
3176
3177 return ERROR_OK;
3178 }
3179
3180 return ERROR_COMMAND_SYNTAX_ERROR;
3181
3182 not_found:
3183 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3184 return ERROR_OK;
3185 }
3186
3187 COMMAND_HANDLER(handle_poll_command)
3188 {
3189 int retval = ERROR_OK;
3190 struct target *target = get_current_target(CMD_CTX);
3191
3192 if (CMD_ARGC == 0) {
3193 command_print(CMD, "background polling: %s",
3194 jtag_poll_get_enabled() ? "on" : "off");
3195 command_print(CMD, "TAP: %s (%s)",
3196 target->tap->dotted_name,
3197 target->tap->enabled ? "enabled" : "disabled");
3198 if (!target->tap->enabled)
3199 return ERROR_OK;
3200 retval = target_poll(target);
3201 if (retval != ERROR_OK)
3202 return retval;
3203 retval = target_arch_state(target);
3204 if (retval != ERROR_OK)
3205 return retval;
3206 } else if (CMD_ARGC == 1) {
3207 bool enable;
3208 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3209 jtag_poll_set_enabled(enable);
3210 } else
3211 return ERROR_COMMAND_SYNTAX_ERROR;
3212
3213 return retval;
3214 }
3215
3216 COMMAND_HANDLER(handle_wait_halt_command)
3217 {
3218 if (CMD_ARGC > 1)
3219 return ERROR_COMMAND_SYNTAX_ERROR;
3220
3221 unsigned ms = DEFAULT_HALT_TIMEOUT;
3222 if (1 == CMD_ARGC) {
3223 int retval = parse_uint(CMD_ARGV[0], &ms);
3224 if (ERROR_OK != retval)
3225 return ERROR_COMMAND_SYNTAX_ERROR;
3226 }
3227
3228 struct target *target = get_current_target(CMD_CTX);
3229 return target_wait_state(target, TARGET_HALTED, ms);
3230 }
3231
3232 /* wait for target state to change. The trick here is to have a low
3233 * latency for short waits and not to suck up all the CPU time
3234 * on longer waits.
3235 *
3236 * After 500ms, keep_alive() is invoked
3237 */
3238 int target_wait_state(struct target *target, enum target_state state, int ms)
3239 {
3240 int retval;
3241 int64_t then = 0, cur;
3242 bool once = true;
3243
3244 for (;;) {
3245 retval = target_poll(target);
3246 if (retval != ERROR_OK)
3247 return retval;
3248 if (target->state == state)
3249 break;
3250 cur = timeval_ms();
3251 if (once) {
3252 once = false;
3253 then = timeval_ms();
3254 LOG_DEBUG("waiting for target %s...",
3255 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3256 }
3257
3258 if (cur-then > 500)
3259 keep_alive();
3260
3261 if ((cur-then) > ms) {
3262 LOG_ERROR("timed out while waiting for target %s",
3263 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3264 return ERROR_FAIL;
3265 }
3266 }
3267
3268 return ERROR_OK;
3269 }
3270
3271 COMMAND_HANDLER(handle_halt_command)
3272 {
3273 LOG_DEBUG("-");
3274
3275 struct target *target = get_current_target(CMD_CTX);
3276
3277 target->verbose_halt_msg = true;
3278
3279 int retval = target_halt(target);
3280 if (ERROR_OK != retval)
3281 return retval;
3282
3283 if (CMD_ARGC == 1) {
3284 unsigned wait_local;
3285 retval = parse_uint(CMD_ARGV[0], &wait_local);
3286 if (ERROR_OK != retval)
3287 return ERROR_COMMAND_SYNTAX_ERROR;
3288 if (!wait_local)
3289 return ERROR_OK;
3290 }
3291
3292 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3293 }
3294
3295 COMMAND_HANDLER(handle_soft_reset_halt_command)
3296 {
3297 struct target *target = get_current_target(CMD_CTX);
3298
3299 LOG_USER("requesting target halt and executing a soft reset");
3300
3301 target_soft_reset_halt(target);
3302
3303 return ERROR_OK;
3304 }
3305
3306 COMMAND_HANDLER(handle_reset_command)
3307 {
3308 if (CMD_ARGC > 1)
3309 return ERROR_COMMAND_SYNTAX_ERROR;
3310
3311 enum target_reset_mode reset_mode = RESET_RUN;
3312 if (CMD_ARGC == 1) {
3313 const struct jim_nvp *n;
3314 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3315 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
3316 return ERROR_COMMAND_SYNTAX_ERROR;
3317 reset_mode = n->value;
3318 }
3319
3320 /* reset *all* targets */
3321 return target_process_reset(CMD, reset_mode);
3322 }
3323
3324
3325 COMMAND_HANDLER(handle_resume_command)
3326 {
3327 int current = 1;
3328 if (CMD_ARGC > 1)
3329 return ERROR_COMMAND_SYNTAX_ERROR;
3330
3331 struct target *target = get_current_target(CMD_CTX);
3332
3333 /* with no CMD_ARGV, resume from current pc, addr = 0,
3334 * with one arguments, addr = CMD_ARGV[0],
3335 * handle breakpoints, not debugging */
3336 target_addr_t addr = 0;
3337 if (CMD_ARGC == 1) {
3338 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3339 current = 0;
3340 }
3341
3342 return target_resume(target, current, addr, 1, 0);
3343 }
3344
3345 COMMAND_HANDLER(handle_step_command)
3346 {
3347 if (CMD_ARGC > 1)
3348 return ERROR_COMMAND_SYNTAX_ERROR;
3349
3350 LOG_DEBUG("-");
3351
3352 /* with no CMD_ARGV, step from current pc, addr = 0,
3353 * with one argument addr = CMD_ARGV[0],
3354 * handle breakpoints, debugging */
3355 target_addr_t addr = 0;
3356 int current_pc = 1;
3357 if (CMD_ARGC == 1) {
3358 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3359 current_pc = 0;
3360 }
3361
3362 struct target *target = get_current_target(CMD_CTX);
3363
3364 return target_step(target, current_pc, addr, 1);
3365 }
3366
3367 void target_handle_md_output(struct command_invocation *cmd,
3368 struct target *target, target_addr_t address, unsigned size,
3369 unsigned count, const uint8_t *buffer)
3370 {
3371 const unsigned line_bytecnt = 32;
3372 unsigned line_modulo = line_bytecnt / size;
3373
3374 char output[line_bytecnt * 4 + 1];
3375 unsigned output_len = 0;
3376
3377 const char *value_fmt;
3378 switch (size) {
3379 case 8:
3380 value_fmt = "%16.16"PRIx64" ";
3381 break;
3382 case 4:
3383 value_fmt = "%8.8"PRIx64" ";
3384 break;
3385 case 2:
3386 value_fmt = "%4.4"PRIx64" ";
3387 break;
3388 case 1:
3389 value_fmt = "%2.2"PRIx64" ";
3390 break;
3391 default:
3392 /* "can't happen", caller checked */
3393 LOG_ERROR("invalid memory read size: %u", size);
3394 return;
3395 }
3396
3397 for (unsigned i = 0; i < count; i++) {
3398 if (i % line_modulo == 0) {
3399 output_len += snprintf(output + output_len,
3400 sizeof(output) - output_len,
3401 TARGET_ADDR_FMT ": ",
3402 (address + (i * size)));
3403 }
3404
3405 uint64_t value = 0;
3406 const uint8_t *value_ptr = buffer + i * size;
3407 switch (size) {
3408 case 8:
3409 value = target_buffer_get_u64(target, value_ptr);
3410 break;
3411 case 4:
3412 value = target_buffer_get_u32(target, value_ptr);
3413 break;
3414 case 2:
3415 value = target_buffer_get_u16(target, value_ptr);
3416 break;
3417 case 1:
3418 value = *value_ptr;
3419 }
3420 output_len += snprintf(output + output_len,
3421 sizeof(output) - output_len,
3422 value_fmt, value);
3423
3424 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3425 command_print(cmd, "%s", output);
3426 output_len = 0;
3427 }
3428 }
3429 }
3430
3431 COMMAND_HANDLER(handle_md_command)
3432 {
3433 if (CMD_ARGC < 1)
3434 return ERROR_COMMAND_SYNTAX_ERROR;
3435
3436 unsigned size = 0;
3437 switch (CMD_NAME[2]) {
3438 case 'd':
3439 size = 8;
3440 break;
3441 case 'w':
3442 size = 4;
3443 break;
3444 case 'h':
3445 size = 2;
3446 break;
3447 case 'b':
3448 size = 1;
3449 break;
3450 default:
3451 return ERROR_COMMAND_SYNTAX_ERROR;
3452 }
3453
3454 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3455 int (*fn)(struct target *target,
3456 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3457 if (physical) {
3458 CMD_ARGC--;
3459 CMD_ARGV++;
3460 fn = target_read_phys_memory;
3461 } else
3462 fn = target_read_memory;
3463 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3464 return ERROR_COMMAND_SYNTAX_ERROR;
3465
3466 target_addr_t address;
3467 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3468
3469 unsigned count = 1;
3470 if (CMD_ARGC == 2)
3471 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3472
3473 uint8_t *buffer = calloc(count, size);
3474 if (buffer == NULL) {
3475 LOG_ERROR("Failed to allocate md read buffer");
3476 return ERROR_FAIL;
3477 }
3478
3479 struct target *target = get_current_target(CMD_CTX);
3480 int retval = fn(target, address, size, count, buffer);
3481 if (ERROR_OK == retval)
3482 target_handle_md_output(CMD, target, address, size, count, buffer);
3483
3484 free(buffer);
3485
3486 return retval;
3487 }
3488
3489 typedef int (*target_write_fn)(struct target *target,
3490 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3491
3492 static int target_fill_mem(struct target *target,
3493 target_addr_t address,
3494 target_write_fn fn,
3495 unsigned data_size,
3496 /* value */
3497 uint64_t b,
3498 /* count */
3499 unsigned c)
3500 {
3501 /* We have to write in reasonably large chunks to be able
3502 * to fill large memory areas with any sane speed */
3503 const unsigned chunk_size = 16384;
3504 uint8_t *target_buf = malloc(chunk_size * data_size);
3505 if (target_buf == NULL) {
3506 LOG_ERROR("Out of memory");
3507 return ERROR_FAIL;
3508 }
3509
3510 for (unsigned i = 0; i < chunk_size; i++) {
3511 switch (data_size) {
3512 case 8:
3513 target_buffer_set_u64(target, target_buf + i * data_size, b);
3514 break;
3515 case 4:
3516 target_buffer_set_u32(target, target_buf + i * data_size, b);
3517 break;
3518 case 2:
3519 target_buffer_set_u16(target, target_buf + i * data_size, b);
3520 break;
3521 case 1:
3522 target_buffer_set_u8(target, target_buf + i * data_size, b);
3523 break;
3524 default:
3525 exit(-1);
3526 }
3527 }
3528
3529 int retval = ERROR_OK;
3530
3531 for (unsigned x = 0; x < c; x += chunk_size) {
3532 unsigned current;
3533 current = c - x;
3534 if (current > chunk_size)
3535 current = chunk_size;
3536 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3537 if (retval != ERROR_OK)
3538 break;
3539 /* avoid GDB timeouts */
3540 keep_alive();
3541 }
3542 free(target_buf);
3543
3544 return retval;
3545 }
3546
3547
3548 COMMAND_HANDLER(handle_mw_command)
3549 {
3550 if (CMD_ARGC < 2)
3551 return ERROR_COMMAND_SYNTAX_ERROR;
3552 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3553 target_write_fn fn;
3554 if (physical) {
3555 CMD_ARGC--;
3556 CMD_ARGV++;
3557 fn = target_write_phys_memory;
3558 } else
3559 fn = target_write_memory;
3560 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3561 return ERROR_COMMAND_SYNTAX_ERROR;
3562
3563 target_addr_t address;
3564 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3565
3566 uint64_t value;
3567 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3568
3569 unsigned count = 1;
3570 if (CMD_ARGC == 3)
3571 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3572
3573 struct target *target = get_current_target(CMD_CTX);
3574 unsigned wordsize;
3575 switch (CMD_NAME[2]) {
3576 case 'd':
3577 wordsize = 8;
3578 break;
3579 case 'w':
3580 wordsize = 4;
3581 break;
3582 case 'h':
3583 wordsize = 2;
3584 break;
3585 case 'b':
3586 wordsize = 1;
3587 break;
3588 default:
3589 return ERROR_COMMAND_SYNTAX_ERROR;
3590 }
3591
3592 return target_fill_mem(target, address, fn, wordsize, value, count);
3593 }
3594
3595 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3596 target_addr_t *min_address, target_addr_t *max_address)
3597 {
3598 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3599 return ERROR_COMMAND_SYNTAX_ERROR;
3600
3601 /* a base address isn't always necessary,
3602 * default to 0x0 (i.e. don't relocate) */
3603 if (CMD_ARGC >= 2) {
3604 target_addr_t addr;
3605 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3606 image->base_address = addr;
3607 image->base_address_set = true;
3608 } else
3609 image->base_address_set = false;
3610
3611 image->start_address_set = false;
3612
3613 if (CMD_ARGC >= 4)
3614 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3615 if (CMD_ARGC == 5) {
3616 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3617 /* use size (given) to find max (required) */
3618 *max_address += *min_address;
3619 }
3620
3621 if (*min_address > *max_address)
3622 return ERROR_COMMAND_SYNTAX_ERROR;
3623
3624 return ERROR_OK;
3625 }
3626
3627 COMMAND_HANDLER(handle_load_image_command)
3628 {
3629 uint8_t *buffer;
3630 size_t buf_cnt;
3631 uint32_t image_size;
3632 target_addr_t min_address = 0;
3633 target_addr_t max_address = -1;
3634 struct image image;
3635
3636 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3637 &image, &min_address, &max_address);
3638 if (ERROR_OK != retval)
3639 return retval;
3640
3641 struct target *target = get_current_target(CMD_CTX);
3642
3643 struct duration bench;
3644 duration_start(&bench);
3645
3646 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3647 return ERROR_FAIL;
3648
3649 image_size = 0x0;
3650 retval = ERROR_OK;
3651 for (unsigned int i = 0; i < image.num_sections; i++) {
3652 buffer = malloc(image.sections[i].size);
3653 if (buffer == NULL) {
3654 command_print(CMD,
3655 "error allocating buffer for section (%d bytes)",
3656 (int)(image.sections[i].size));
3657 retval = ERROR_FAIL;
3658 break;
3659 }
3660
3661 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3662 if (retval != ERROR_OK) {
3663 free(buffer);
3664 break;
3665 }
3666
3667 uint32_t offset = 0;
3668 uint32_t length = buf_cnt;
3669
3670 /* DANGER!!! beware of unsigned comparison here!!! */
3671
3672 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3673 (image.sections[i].base_address < max_address)) {
3674
3675 if (image.sections[i].base_address < min_address) {
3676 /* clip addresses below */
3677 offset += min_address-image.sections[i].base_address;
3678 length -= offset;
3679 }
3680
3681 if (image.sections[i].base_address + buf_cnt > max_address)
3682 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3683
3684 retval = target_write_buffer(target,
3685 image.sections[i].base_address + offset, length, buffer + offset);
3686 if (retval != ERROR_OK) {
3687 free(buffer);
3688 break;
3689 }
3690 image_size += length;
3691 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3692 (unsigned int)length,
3693 image.sections[i].base_address + offset);
3694 }
3695
3696 free(buffer);
3697 }
3698
3699 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3700 command_print(CMD, "downloaded %" PRIu32 " bytes "
3701 "in %fs (%0.3f KiB/s)", image_size,
3702 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3703 }
3704
3705 image_close(&image);
3706
3707 return retval;
3708
3709 }
3710
3711 COMMAND_HANDLER(handle_dump_image_command)
3712 {
3713 struct fileio *fileio;
3714 uint8_t *buffer;
3715 int retval, retvaltemp;
3716 target_addr_t address, size;
3717 struct duration bench;
3718 struct target *target = get_current_target(CMD_CTX);
3719
3720 if (CMD_ARGC != 3)
3721 return ERROR_COMMAND_SYNTAX_ERROR;
3722
3723 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3724 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3725
3726 uint32_t buf_size = (size > 4096) ? 4096 : size;
3727 buffer = malloc(buf_size);
3728 if (!buffer)
3729 return ERROR_FAIL;
3730
3731 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3732 if (retval != ERROR_OK) {
3733 free(buffer);
3734 return retval;
3735 }
3736
3737 duration_start(&bench);
3738
3739 while (size > 0) {
3740 size_t size_written;
3741 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3742 retval = target_read_buffer(target, address, this_run_size, buffer);
3743 if (retval != ERROR_OK)
3744 break;
3745
3746 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3747 if (retval != ERROR_OK)
3748 break;
3749
3750 size -= this_run_size;
3751 address += this_run_size;
3752 }
3753
3754 free(buffer);
3755
3756 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3757 size_t filesize;
3758 retval = fileio_size(fileio, &filesize);
3759 if (retval != ERROR_OK)
3760 return retval;
3761 command_print(CMD,
3762 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3763 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3764 }
3765
3766 retvaltemp = fileio_close(fileio);
3767 if (retvaltemp != ERROR_OK)
3768 return retvaltemp;
3769
3770 return retval;
3771 }
3772
3773 enum verify_mode {
3774 IMAGE_TEST = 0,
3775 IMAGE_VERIFY = 1,
3776 IMAGE_CHECKSUM_ONLY = 2
3777 };
3778
3779 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3780 {
3781 uint8_t *buffer;
3782 size_t buf_cnt;
3783 uint32_t image_size;
3784 int retval;
3785 uint32_t checksum = 0;
3786 uint32_t mem_checksum = 0;
3787
3788 struct image image;
3789
3790 struct target *target = get_current_target(CMD_CTX);
3791
3792 if (CMD_ARGC < 1)
3793 return ERROR_COMMAND_SYNTAX_ERROR;
3794
3795 if (!target) {
3796 LOG_ERROR("no target selected");
3797 return ERROR_FAIL;
3798 }
3799
3800 struct duration bench;
3801 duration_start(&bench);
3802
3803 if (CMD_ARGC >= 2) {
3804 target_addr_t addr;
3805 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3806 image.base_address = addr;
3807 image.base_address_set = true;
3808 } else {
3809 image.base_address_set = false;
3810 image.base_address = 0x0;
3811 }
3812
3813 image.start_address_set = false;
3814
3815 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3816 if (retval != ERROR_OK)
3817 return retval;
3818
3819 image_size = 0x0;
3820 int diffs = 0;
3821 retval = ERROR_OK;
3822 for (unsigned int i = 0; i < image.num_sections; i++) {
3823 buffer = malloc(image.sections[i].size);
3824 if (buffer == NULL) {
3825 command_print(CMD,
3826 "error allocating buffer for section (%" PRIu32 " bytes)",
3827 image.sections[i].size);
3828 break;
3829 }
3830 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3831 if (retval != ERROR_OK) {
3832 free(buffer);
3833 break;
3834 }
3835
3836 if (verify >= IMAGE_VERIFY) {
3837 /* calculate checksum of image */
3838 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3839 if (retval != ERROR_OK) {
3840 free(buffer);
3841 break;
3842 }
3843
3844 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3845 if (retval != ERROR_OK) {
3846 free(buffer);
3847 break;
3848 }
3849 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3850 LOG_ERROR("checksum mismatch");
3851 free(buffer);
3852 retval = ERROR_FAIL;
3853 goto done;
3854 }
3855 if (checksum != mem_checksum) {
3856 /* failed crc checksum, fall back to a binary compare */
3857 uint8_t *data;
3858
3859 if (diffs == 0)
3860 LOG_ERROR("checksum mismatch - attempting binary compare");
3861
3862 data = malloc(buf_cnt);
3863
3864 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3865 if (retval == ERROR_OK) {
3866 uint32_t t;
3867 for (t = 0; t < buf_cnt; t++) {
3868 if (data[t] != buffer[t]) {
3869 command_print(CMD,
3870 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3871 diffs,
3872 (unsigned)(t + image.sections[i].base_address),
3873 data[t],
3874 buffer[t]);
3875 if (diffs++ >= 127) {
3876 command_print(CMD, "More than 128 errors, the rest are not printed.");
3877 free(data);
3878 free(buffer);
3879 goto done;
3880 }
3881 }
3882 keep_alive();
3883 }
3884 }
3885 free(data);
3886 }
3887 } else {
3888 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3889 image.sections[i].base_address,
3890 buf_cnt);
3891 }
3892
3893 free(buffer);
3894 image_size += buf_cnt;
3895 }
3896 if (diffs > 0)
3897 command_print(CMD, "No more differences found.");
3898 done:
3899 if (diffs > 0)
3900 retval = ERROR_FAIL;
3901 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3902 command_print(CMD, "verified %" PRIu32 " bytes "
3903 "in %fs (%0.3f KiB/s)", image_size,
3904 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3905 }
3906
3907 image_close(&image);
3908
3909 return retval;
3910 }
3911
3912 COMMAND_HANDLER(handle_verify_image_checksum_command)
3913 {
3914 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3915 }
3916
3917 COMMAND_HANDLER(handle_verify_image_command)
3918 {
3919 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3920 }
3921
3922 COMMAND_HANDLER(handle_test_image_command)
3923 {
3924 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3925 }
3926
3927 static int handle_bp_command_list(struct command_invocation *cmd)
3928 {
3929 struct target *target = get_current_target(cmd->ctx);
3930 struct breakpoint *breakpoint = target->breakpoints;
3931 while (breakpoint) {
3932 if (breakpoint->type == BKPT_SOFT) {
3933 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3934 breakpoint->length);
3935 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3936 breakpoint->address,
3937 breakpoint->length,
3938 breakpoint->set, buf);
3939 free(buf);
3940 } else {
3941 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3942 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3943 breakpoint->asid,
3944 breakpoint->length, breakpoint->set);
3945 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3946 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3947 breakpoint->address,
3948 breakpoint->length, breakpoint->set);
3949 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3950 breakpoint->asid);
3951 } else
3952 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3953 breakpoint->address,
3954 breakpoint->length, breakpoint->set);
3955 }
3956
3957 breakpoint = breakpoint->next;
3958 }
3959 return ERROR_OK;
3960 }
3961
3962 static int handle_bp_command_set(struct command_invocation *cmd,
3963 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3964 {
3965 struct target *target = get_current_target(cmd->ctx);
3966 int retval;
3967
3968 if (asid == 0) {
3969 retval = breakpoint_add(target, addr, length, hw);
3970 /* error is always logged in breakpoint_add(), do not print it again */
3971 if (ERROR_OK == retval)
3972 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3973
3974 } else if (addr == 0) {
3975 if (target->type->add_context_breakpoint == NULL) {
3976 LOG_ERROR("Context breakpoint not available");
3977 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3978 }
3979 retval = context_breakpoint_add(target, asid, length, hw);
3980 /* error is always logged in context_breakpoint_add(), do not print it again */
3981 if (ERROR_OK == retval)
3982 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3983
3984 } else {
3985 if (target->type->add_hybrid_breakpoint == NULL) {
3986 LOG_ERROR("Hybrid breakpoint not available");
3987 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3988 }
3989 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3990 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3991 if (ERROR_OK == retval)
3992 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3993 }
3994 return retval;
3995 }
3996
3997 COMMAND_HANDLER(handle_bp_command)
3998 {
3999 target_addr_t addr;
4000 uint32_t asid;
4001 uint32_t length;
4002 int hw = BKPT_SOFT;
4003
4004 switch (CMD_ARGC) {
4005 case 0:
4006 return handle_bp_command_list(CMD);
4007
4008 case 2:
4009 asid = 0;
4010 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4011 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4012 return handle_bp_command_set(CMD, addr, asid, length, hw);
4013
4014 case 3:
4015 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4016 hw = BKPT_HARD;
4017 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4018 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4019 asid = 0;
4020 return handle_bp_command_set(CMD, addr, asid, length, hw);
4021 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4022 hw = BKPT_HARD;
4023 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4024 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4025 addr = 0;
4026 return handle_bp_command_set(CMD, addr, asid, length, hw);
4027 }
4028 /* fallthrough */
4029 case 4:
4030 hw = BKPT_HARD;
4031 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4032 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4033 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4034 return handle_bp_command_set(CMD, addr, asid, length, hw);
4035
4036 default:
4037 return ERROR_COMMAND_SYNTAX_ERROR;
4038 }
4039 }
4040
4041 COMMAND_HANDLER(handle_rbp_command)
4042 {
4043 if (CMD_ARGC != 1)
4044 return ERROR_COMMAND_SYNTAX_ERROR;
4045
4046 struct target *target = get_current_target(CMD_CTX);
4047
4048 if (!strcmp(CMD_ARGV[0], "all")) {
4049 breakpoint_remove_all(target);
4050 } else {
4051 target_addr_t addr;
4052 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4053
4054 breakpoint_remove(target, addr);
4055 }
4056
4057 return ERROR_OK;
4058 }
4059
4060 COMMAND_HANDLER(handle_wp_command)
4061 {
4062 struct target *target = get_current_target(CMD_CTX);
4063
4064 if (CMD_ARGC == 0) {
4065 struct watchpoint *watchpoint = target->watchpoints;
4066
4067 while (watchpoint) {
4068 command_print(CMD, "address: " TARGET_ADDR_FMT
4069 ", len: 0x%8.8" PRIx32
4070 ", r/w/a: %i, value: 0x%8.8" PRIx32
4071 ", mask: 0x%8.8" PRIx32,
4072 watchpoint->address,
4073 watchpoint->length,
4074 (int)watchpoint->rw,
4075 watchpoint->value,
4076 watchpoint->mask);
4077 watchpoint = watchpoint->next;
4078 }
4079 return ERROR_OK;
4080 }
4081
4082 enum watchpoint_rw type = WPT_ACCESS;
4083 target_addr_t addr = 0;
4084 uint32_t length = 0;
4085 uint32_t data_value = 0x0;
4086 uint32_t data_mask = 0xffffffff;
4087
4088 switch (CMD_ARGC) {
4089 case 5:
4090 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4091 /* fall through */
4092 case 4:
4093 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4094 /* fall through */
4095 case 3:
4096 switch (CMD_ARGV[2][0]) {
4097 case 'r':
4098 type = WPT_READ;
4099 break;
4100 case 'w':
4101 type = WPT_WRITE;
4102 break;
4103 case 'a':
4104 type = WPT_ACCESS;
4105 break;
4106 default:
4107 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4108 return ERROR_COMMAND_SYNTAX_ERROR;
4109 }
4110 /* fall through */
4111 case 2:
4112 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4113 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4114 break;
4115
4116 default:
4117 return ERROR_COMMAND_SYNTAX_ERROR;
4118 }
4119
4120 int retval = watchpoint_add(target, addr, length, type,
4121 data_value, data_mask);
4122 if (ERROR_OK != retval)
4123 LOG_ERROR("Failure setting watchpoints");
4124
4125 return retval;
4126 }
4127
4128 COMMAND_HANDLER(handle_rwp_command)
4129 {
4130 if (CMD_ARGC != 1)
4131 return ERROR_COMMAND_SYNTAX_ERROR;
4132
4133 target_addr_t addr;
4134 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4135
4136 struct target *target = get_current_target(CMD_CTX);
4137 watchpoint_remove(target, addr);
4138
4139 return ERROR_OK;
4140 }
4141
4142 /**
4143 * Translate a virtual address to a physical address.
4144 *
4145 * The low-level target implementation must have logged a detailed error
4146 * which is forwarded to telnet/GDB session.
4147 */
4148 COMMAND_HANDLER(handle_virt2phys_command)
4149 {
4150 if (CMD_ARGC != 1)
4151 return ERROR_COMMAND_SYNTAX_ERROR;
4152
4153 target_addr_t va;
4154 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4155 target_addr_t pa;
4156
4157 struct target *target = get_current_target(CMD_CTX);
4158 int retval = target->type->virt2phys(target, va, &pa);
4159 if (retval == ERROR_OK)
4160 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4161
4162 return retval;
4163 }
4164
4165 static void writeData(FILE *f, const void *data, size_t len)
4166 {
4167 size_t written = fwrite(data, 1, len, f);
4168 if (written != len)
4169 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4170 }
4171
4172 static void writeLong(FILE *f, int l, struct target *target)
4173 {
4174 uint8_t val[4];
4175
4176 target_buffer_set_u32(target, val, l);
4177 writeData(f, val, 4);
4178 }
4179
4180 static void writeString(FILE *f, char *s)
4181 {
4182 writeData(f, s, strlen(s));
4183 }
4184
4185 typedef unsigned char UNIT[2]; /* unit of profiling */
4186
4187 /* Dump a gmon.out histogram file. */
4188 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
4189 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4190 {
4191 uint32_t i;
4192 FILE *f = fopen(filename, "w");
4193 if (f == NULL)
4194 return;
4195 writeString(f, "gmon");
4196 writeLong(f, 0x00000001, target); /* Version */
4197 writeLong(f, 0, target); /* padding */
4198 writeLong(f, 0, target); /* padding */
4199 writeLong(f, 0, target); /* padding */
4200
4201 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4202 writeData(f, &zero, 1);
4203
4204 /* figure out bucket size */
4205 uint32_t min;
4206 uint32_t max;
4207 if (with_range) {
4208 min = start_address;
4209 max = end_address;
4210 } else {
4211 min = samples[0];
4212 max = samples[0];
4213 for (i = 0; i < sampleNum; i++) {
4214 if (min > samples[i])
4215 min = samples[i];
4216 if (max < samples[i])
4217 max = samples[i];
4218 }
4219
4220 /* max should be (largest sample + 1)
4221 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4222 max++;
4223 }
4224
4225 int addressSpace = max - min;
4226 assert(addressSpace >= 2);
4227
4228 /* FIXME: What is the reasonable number of buckets?
4229 * The profiling result will be more accurate if there are enough buckets. */
4230 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
4231 uint32_t numBuckets = addressSpace / sizeof(UNIT);
4232 if (numBuckets > maxBuckets)
4233 numBuckets = maxBuckets;
4234 int *buckets = malloc(sizeof(int) * numBuckets);
4235 if (buckets == NULL) {
4236 fclose(f);
4237 return;
4238 }
4239 memset(buckets, 0, sizeof(int) * numBuckets);
4240 for (i = 0; i < sampleNum; i++) {
4241 uint32_t address = samples[i];
4242
4243 if ((address < min) || (max <= address))
4244 continue;
4245
4246 long long a = address - min;
4247 long long b = numBuckets;
4248 long long c = addressSpace;
4249 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4250 buckets[index_t]++;
4251 }
4252
4253 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4254 writeLong(f, min, target); /* low_pc */
4255 writeLong(f, max, target); /* high_pc */
4256 writeLong(f, numBuckets, target); /* # of buckets */
4257 float sample_rate = sampleNum / (duration_ms / 1000.0);
4258 writeLong(f, sample_rate, target);
4259 writeString(f, "seconds");
4260 for (i = 0; i < (15-strlen("seconds")); i++)
4261 writeData(f, &zero, 1);
4262 writeString(f, "s");
4263
4264 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4265
4266 char *data = malloc(2 * numBuckets);
4267 if (data != NULL) {
4268 for (i = 0; i < numBuckets; i++) {
4269 int val;
4270 val = buckets[i];
4271 if (val > 65535)
4272 val = 65535;
4273 data[i * 2] = val&0xff;
4274 data[i * 2 + 1] = (val >> 8) & 0xff;
4275 }
4276 free(buckets);
4277 writeData(f, data, numBuckets * 2);
4278 free(data);
4279 } else
4280 free(buckets);
4281
4282 fclose(f);
4283 }
4284
4285 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4286 * which will be used as a random sampling of PC */
4287 COMMAND_HANDLER(handle_profile_command)
4288 {
4289 struct target *target = get_current_target(CMD_CTX);
4290
4291 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4292 return ERROR_COMMAND_SYNTAX_ERROR;
4293
4294 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4295 uint32_t offset;
4296 uint32_t num_of_samples;
4297 int retval = ERROR_OK;
4298 bool halted_before_profiling = target->state == TARGET_HALTED;
4299
4300 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4301
4302 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4303 if (samples == NULL) {
4304 LOG_ERROR("No memory to store samples.");
4305 return ERROR_FAIL;
4306 }
4307
4308 uint64_t timestart_ms = timeval_ms();
4309 /**
4310 * Some cores let us sample the PC without the
4311 * annoying halt/resume step; for example, ARMv7 PCSR.
4312 * Provide a way to use that more efficient mechanism.
4313 */
4314 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4315 &num_of_samples, offset);
4316 if (retval != ERROR_OK) {
4317 free(samples);
4318 return retval;
4319 }
4320 uint32_t duration_ms = timeval_ms() - timestart_ms;
4321
4322 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4323
4324 retval = target_poll(target);
4325 if (retval != ERROR_OK) {
4326 free(samples);
4327 return retval;
4328 }
4329
4330 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4331 /* The target was halted before we started and is running now. Halt it,
4332 * for consistency. */
4333 retval = target_halt(target);
4334 if (retval != ERROR_OK) {
4335 free(samples);
4336 return retval;
4337 }
4338 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4339 /* The target was running before we started and is halted now. Resume
4340 * it, for consistency. */
4341 retval = target_resume(target, 1, 0, 0, 0);
4342 if (retval != ERROR_OK) {
4343 free(samples);
4344 return retval;
4345 }
4346 }
4347
4348 retval = target_poll(target);
4349 if (retval != ERROR_OK) {
4350 free(samples);
4351 return retval;
4352 }
4353
4354 uint32_t start_address = 0;
4355 uint32_t end_address = 0;
4356 bool with_range = false;
4357 if (CMD_ARGC == 4) {
4358 with_range = true;
4359 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4360 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4361 }
4362
4363 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4364 with_range, start_address, end_address, target, duration_ms);
4365 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4366
4367 free(samples);
4368 return retval;
4369 }
4370
4371 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
4372 {
4373 char *namebuf;
4374 Jim_Obj *nameObjPtr, *valObjPtr;
4375 int result;
4376
4377 namebuf = alloc_printf("%s(%d)", varname, idx);
4378 if (!namebuf)
4379 return JIM_ERR;
4380
4381 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4382 valObjPtr = Jim_NewIntObj(interp, val);
4383 if (!nameObjPtr || !valObjPtr) {
4384 free(namebuf);
4385 return JIM_ERR;
4386 }
4387
4388 Jim_IncrRefCount(nameObjPtr);
4389 Jim_IncrRefCount(valObjPtr);
4390 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
4391 Jim_DecrRefCount(interp, nameObjPtr);
4392 Jim_DecrRefCount(interp, valObjPtr);
4393 free(namebuf);
4394 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4395 return result;
4396 }
4397
4398 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4399 {
4400 struct command_context *context;
4401 struct target *target;
4402
4403 context = current_command_context(interp);
4404 assert(context != NULL);
4405
4406 target = get_current_target(context);
4407 if (target == NULL) {
4408 LOG_ERROR("mem2array: no current target");
4409 return JIM_ERR;
4410 }
4411
4412 return target_mem2array(interp, target, argc - 1, argv + 1);
4413 }
4414
4415 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4416 {
4417 long l;
4418 uint32_t width;
4419 int len;
4420 uint32_t addr;
4421 uint32_t count;
4422 uint32_t v;
4423 const char *varname;
4424 const char *phys;
4425 bool is_phys;
4426 int n, e, retval;
4427 uint32_t i;
4428
4429 /* argv[1] = name of array to receive the data
4430 * argv[2] = desired width
4431 * argv[3] = memory address
4432 * argv[4] = count of times to read
4433 */
4434
4435 if (argc < 4 || argc > 5) {
4436 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4437 return JIM_ERR;
4438 }
4439 varname = Jim_GetString(argv[0], &len);
4440 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4441
4442 e = Jim_GetLong(interp, argv[1], &l);
4443 width = l;
4444 if (e != JIM_OK)
4445 return e;
4446
4447 e = Jim_GetLong(interp, argv[2], &l);
4448 addr = l;
4449 if (e != JIM_OK)
4450 return e;
4451 e = Jim_GetLong(interp, argv[3], &l);
4452 len = l;
4453 if (e != JIM_OK)
4454 return e;
4455 is_phys = false;
4456 if (argc > 4) {
4457 phys = Jim_GetString(argv[4], &n);
4458 if (!strncmp(phys, "phys", n))
4459 is_phys = true;
4460 else
4461 return JIM_ERR;
4462 }
4463 switch (width) {
4464 case 8:
4465 width = 1;
4466 break;
4467 case 16:
4468 width = 2;
4469 break;
4470 case 32:
4471 width = 4;
4472 break;
4473 default:
4474 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4475 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
4476 return JIM_ERR;
4477 }
4478 if (len == 0) {
4479 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4480 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4481 return JIM_ERR;
4482 }
4483 if ((addr + (len * width)) < addr) {
4484 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4485 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4486 return JIM_ERR;
4487 }
4488 /* absurd transfer size? */
4489 if (len > 65536) {
4490 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4491 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
4492 return JIM_ERR;
4493 }
4494
4495 if ((width == 1) ||
4496 ((width == 2) && ((addr & 1) == 0)) ||
4497 ((width == 4) && ((addr & 3) == 0))) {
4498 /* all is well */
4499 } else {
4500 char buf[100];
4501 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4502 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRIu32 " byte reads",
4503 addr,
4504 width);
4505 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4506 return JIM_ERR;
4507 }
4508
4509 /* Transfer loop */
4510
4511 /* index counter */
4512 n = 0;
4513
4514 size_t buffersize = 4096;
4515 uint8_t *buffer = malloc(buffersize);
4516 if (buffer == NULL)
4517 return JIM_ERR;
4518
4519 /* assume ok */
4520 e = JIM_OK;
4521 while (len) {
4522 /* Slurp... in buffer size chunks */
4523
4524 count = len; /* in objects.. */
4525 if (count > (buffersize / width))
4526 count = (buffersize / width);
4527
4528 if (is_phys)
4529 retval = target_read_phys_memory(target, addr, width, count, buffer);
4530 else
4531 retval = target_read_memory(target, addr, width, count, buffer);
4532 if (retval != ERROR_OK) {
4533 /* BOO !*/
4534 LOG_ERROR("mem2array: Read @ 0x%08" PRIx32 ", w=%" PRIu32 ", cnt=%" PRIu32 ", failed",
4535 addr,
4536 width,
4537 count);
4538 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4539 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4540 e = JIM_ERR;
4541 break;
4542 } else {
4543 v = 0; /* shut up gcc */
4544 for (i = 0; i < count ; i++, n++) {
4545 switch (width) {
4546 case 4:
4547 v = target_buffer_get_u32(target, &buffer[i*width]);
4548 break;
4549 case 2:
4550 v = target_buffer_get_u16(target, &buffer[i*width]);
4551 break;
4552 case 1:
4553 v = buffer[i] & 0x0ff;
4554 break;
4555 }
4556 new_int_array_element(interp, varname, n, v);
4557 }
4558 len -= count;
4559 addr += count * width;
4560 }
4561 }
4562
4563 free(buffer);
4564
4565 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4566
4567 return e;
4568 }
4569
4570 static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
4571 {
4572 char *namebuf;
4573 Jim_Obj *nameObjPtr, *valObjPtr;
4574 int result;
4575 long l;
4576
4577 namebuf = alloc_printf("%s(%d)", varname, idx);
4578 if (!namebuf)
4579 return JIM_ERR;
4580
4581 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4582 if (!nameObjPtr) {
4583 free(namebuf);
4584 return JIM_ERR;
4585 }
4586
4587 Jim_IncrRefCount(nameObjPtr);
4588 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
4589 Jim_DecrRefCount(interp, nameObjPtr);
4590 free(namebuf);
4591 if (valObjPtr == NULL)
4592 return JIM_ERR;
4593
4594 result = Jim_GetLong(interp, valObjPtr, &l);
4595 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
4596 *val = l;
4597 return result;
4598 }
4599
4600 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4601 {
4602 struct command_context *context;
4603 struct target *target;
4604
4605 context = current_command_context(interp);
4606 assert(context != NULL);
4607
4608 target = get_current_target(context);
4609 if (target == NULL) {
4610 LOG_ERROR("array2mem: no current target");
4611 return JIM_ERR;
4612 }
4613
4614 return target_array2mem(interp, target, argc-1, argv + 1);
4615 }
4616
4617 static int target_array2mem(Jim_Interp *interp, struct target *target,
4618 int argc, Jim_Obj *const *argv)
4619 {
4620 long l;
4621 uint32_t width;
4622 int len;
4623 uint32_t addr;
4624 uint32_t count;
4625 uint32_t v;
4626 const char *varname;
4627 const char *phys;
4628 bool is_phys;
4629 int n, e, retval;
4630 uint32_t i;
4631
4632 /* argv[1] = name of array to get the data
4633 * argv[2] = desired width
4634 * argv[3] = memory address
4635 * argv[4] = count to write
4636 */
4637 if (argc < 4 || argc > 5) {
4638 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4639 return JIM_ERR;
4640 }
4641 varname = Jim_GetString(argv[0], &len);
4642 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4643
4644 e = Jim_GetLong(interp, argv[1], &l);
4645 width = l;
4646 if (e != JIM_OK)
4647 return e;
4648
4649 e = Jim_GetLong(interp, argv[2], &l);
4650 addr = l;
4651 if (e != JIM_OK)
4652 return e;
4653 e = Jim_GetLong(interp, argv[3], &l);
4654 len = l;
4655 if (e != JIM_OK)
4656 return e;
4657 is_phys = false;
4658 if (argc > 4) {
4659 phys = Jim_GetString(argv[4], &n);
4660 if (!strncmp(phys, "phys", n))
4661 is_phys = true;
4662 else
4663 return JIM_ERR;
4664 }
4665 switch (width) {
4666 case 8:
4667 width = 1;
4668 break;
4669 case 16:
4670 width = 2;
4671 break;
4672 case 32:
4673 width = 4;
4674 break;
4675 default:
4676 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4677 Jim_AppendStrings(interp, Jim_GetResult(interp),
4678 "Invalid width param, must be 8/16/32", NULL);
4679 return JIM_ERR;
4680 }
4681 if (len == 0) {
4682 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4683 Jim_AppendStrings(interp, Jim_GetResult(interp),
4684 "array2mem: zero width read?", NULL);
4685 return JIM_ERR;
4686 }
4687 if ((addr + (len * width)) < addr) {
4688 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4689 Jim_AppendStrings(interp, Jim_GetResult(interp),
4690 "array2mem: addr + len - wraps to zero?", NULL);
4691 return JIM_ERR;
4692 }
4693 /* absurd transfer size? */
4694 if (len > 65536) {
4695 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4696 Jim_AppendStrings(interp, Jim_GetResult(interp),
4697 "array2mem: absurd > 64K item request", NULL);
4698 return JIM_ERR;
4699 }
4700
4701 if ((width == 1) ||
4702 ((width == 2) && ((addr & 1) == 0)) ||
4703 ((width == 4) && ((addr & 3) == 0))) {
4704 /* all is well */
4705 } else {
4706 char buf[100];
4707 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4708 sprintf(buf, "array2mem address: 0x%08" PRIx32 " is not aligned for %" PRIu32 " byte reads",
4709 addr,
4710 width);
4711 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4712 return JIM_ERR;
4713 }
4714
4715 /* Transfer loop */
4716
4717 /* index counter */
4718 n = 0;
4719 /* assume ok */
4720 e = JIM_OK;
4721
4722 size_t buffersize = 4096;
4723 uint8_t *buffer = malloc(buffersize);
4724 if (buffer == NULL)
4725 return JIM_ERR;
4726
4727 while (len) {
4728 /* Slurp... in buffer size chunks */
4729
4730 count = len; /* in objects.. */
4731 if (count > (buffersize / width))
4732 count = (buffersize / width);
4733
4734 v = 0; /* shut up gcc */
4735 for (i = 0; i < count; i++, n++) {
4736 get_int_array_element(interp, varname, n, &v);
4737 switch (width) {
4738 case 4:
4739 target_buffer_set_u32(target, &buffer[i * width], v);
4740 break;
4741 case 2:
4742 target_buffer_set_u16(target, &buffer[i * width], v);
4743 break;
4744 case 1:
4745 buffer[i] = v & 0x0ff;
4746 break;
4747 }
4748 }
4749 len -= count;
4750
4751 if (is_phys)
4752 retval = target_write_phys_memory(target, addr, width, count, buffer);
4753 else
4754 retval = target_write_memory(target, addr, width, count, buffer);
4755 if (retval != ERROR_OK) {
4756 /* BOO !*/
4757 LOG_ERROR("array2mem: Write @ 0x%08" PRIx32 ", w=%" PRIu32 ", cnt=%" PRIu32 ", failed",
4758 addr,
4759 width,
4760 count);
4761 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4762 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4763 e = JIM_ERR;
4764 break;
4765 }
4766 addr += count * width;
4767 }
4768
4769 free(buffer);
4770
4771 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4772
4773 return e;
4774 }
4775
4776 /* FIX? should we propagate errors here rather than printing them
4777 * and continuing?
4778 */
4779 void target_handle_event(struct target *target, enum target_event e)
4780 {
4781 struct target_event_action *teap;
4782 int retval;
4783
4784 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4785 if (teap->event == e) {
4786 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4787 target->target_number,
4788 target_name(target),
4789 target_type_name(target),
4790 e,
4791 jim_nvp_value2name_simple(nvp_target_event, e)->name,
4792 Jim_GetString(teap->body, NULL));
4793
4794 /* Override current target by the target an event
4795 * is issued from (lot of scripts need it).
4796 * Return back to previous override as soon
4797 * as the handler processing is done */
4798 struct command_context *cmd_ctx = current_command_context(teap->interp);
4799 struct target *saved_target_override = cmd_ctx->current_target_override;
4800 cmd_ctx->current_target_override = target;
4801
4802 retval = Jim_EvalObj(teap->interp, teap->body);
4803
4804 cmd_ctx->current_target_override = saved_target_override;
4805
4806 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4807 return;
4808
4809 if (retval == JIM_RETURN)
4810 retval = teap->interp->returnCode;
4811
4812 if (retval != JIM_OK) {
4813 Jim_MakeErrorMessage(teap->interp);
4814 LOG_USER("Error executing event %s on target %s:\n%s",
4815 jim_nvp_value2name_simple(nvp_target_event, e)->name,
4816 target_name(target),
4817 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4818 /* clean both error code and stacktrace before return */
4819 Jim_Eval(teap->interp, "error \"\" \"\"");
4820 }
4821 }
4822 }
4823 }
4824
4825 /**
4826 * Returns true only if the target has a handler for the specified event.
4827 */
4828 bool target_has_event_action(struct target *target, enum target_event event)
4829 {
4830 struct target_event_action *teap;
4831
4832 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4833 if (teap->event == event)
4834 return true;
4835 }
4836 return false;
4837 }
4838
4839 enum target_cfg_param {
4840 TCFG_TYPE,
4841 TCFG_EVENT,
4842 TCFG_WORK_AREA_VIRT,
4843 TCFG_WORK_AREA_PHYS,
4844 TCFG_WORK_AREA_SIZE,
4845 TCFG_WORK_AREA_BACKUP,
4846 TCFG_ENDIAN,
4847 TCFG_COREID,
4848 TCFG_CHAIN_POSITION,
4849 TCFG_DBGBASE,
4850 TCFG_RTOS,
4851 TCFG_DEFER_EXAMINE,
4852 TCFG_GDB_PORT,
4853 TCFG_GDB_MAX_CONNECTIONS,
4854 };
4855
4856 static struct jim_nvp nvp_config_opts[] = {
4857 { .name = "-type", .value = TCFG_TYPE },
4858 { .name = "-event", .value = TCFG_EVENT },
4859 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4860 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4861 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4862 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4863 { .name = "-endian", .value = TCFG_ENDIAN },
4864 { .name = "-coreid", .value = TCFG_COREID },
4865 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4866 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4867 { .name = "-rtos", .value = TCFG_RTOS },
4868 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4869 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4870 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
4871 { .name = NULL, .value = -1 }
4872 };
4873
4874 static int target_configure(struct jim_getopt_info *goi, struct target *target)
4875 {
4876 struct jim_nvp *n;
4877 Jim_Obj *o;
4878 jim_wide w;
4879 int e;
4880
4881 /* parse config or cget options ... */
4882 while (goi->argc > 0) {
4883 Jim_SetEmptyResult(goi->interp);
4884 /* jim_getopt_debug(goi); */
4885
4886 if (target->type->target_jim_configure) {
4887 /* target defines a configure function */
4888 /* target gets first dibs on parameters */
4889 e = (*(target->type->target_jim_configure))(target, goi);
4890 if (e == JIM_OK) {
4891 /* more? */
4892 continue;
4893 }
4894 if (e == JIM_ERR) {
4895 /* An error */
4896 return e;
4897 }
4898 /* otherwise we 'continue' below */
4899 }
4900 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
4901 if (e != JIM_OK) {
4902 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
4903 return e;
4904 }
4905 switch (n->value) {
4906 case TCFG_TYPE:
4907 /* not settable */
4908 if (goi->isconfigure) {
4909 Jim_SetResultFormatted(goi->interp,
4910 "not settable: %s", n->name);
4911 return JIM_ERR;
4912 } else {
4913 no_params:
4914 if (goi->argc != 0) {
4915 Jim_WrongNumArgs(goi->interp,
4916 goi->argc, goi->argv,
4917 "NO PARAMS");
4918 return JIM_ERR;
4919 }
4920 }
4921 Jim_SetResultString(goi->interp,
4922 target_type_name(target), -1);
4923 /* loop for more */
4924 break;
4925 case TCFG_EVENT:
4926 if (goi->argc == 0) {
4927 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4928 return JIM_ERR;
4929 }
4930
4931 e = jim_getopt_nvp(goi, nvp_target_event, &n);
4932 if (e != JIM_OK) {
4933 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
4934 return e;
4935 }
4936
4937 if (goi->isconfigure) {
4938 if (goi->argc != 1) {
4939 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4940 return JIM_ERR;
4941 }
4942 } else {
4943 if (goi->argc != 0) {
4944 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4945 return JIM_ERR;
4946 }
4947 }
4948
4949 {
4950 struct target_event_action *teap;
4951
4952 teap = target->event_action;
4953 /* replace existing? */
4954 while (teap) {
4955 if (teap->event == (enum target_event)n->value)
4956 break;
4957 teap = teap->next;
4958 }
4959
4960 if (goi->isconfigure) {
4961 /* START_DEPRECATED_TPIU */
4962 if (n->value == TARGET_EVENT_TRACE_CONFIG)
4963 LOG_INFO("DEPRECATED target event %s", n->name);
4964 /* END_DEPRECATED_TPIU */
4965
4966 bool replace = true;
4967 if (teap == NULL) {
4968 /* create new */
4969 teap = calloc(1, sizeof(*teap));
4970 replace = false;
4971 }
4972 teap->event = n->value;
4973 teap->interp = goi->interp;
4974 jim_getopt_obj(goi, &o);
4975 if (teap->body)
4976 Jim_DecrRefCount(teap->interp, teap->body);
4977 teap->body = Jim_DuplicateObj(goi->interp, o);
4978 /*
4979 * FIXME:
4980 * Tcl/TK - "tk events" have a nice feature.
4981 * See the "BIND" command.
4982 * We should support that here.
4983 * You can specify %X and %Y in the event code.
4984 * The idea is: %T - target name.
4985 * The idea is: %N - target number
4986 * The idea is: %E - event name.
4987 */
4988 Jim_IncrRefCount(teap->body);
4989
4990 if (!replace) {
4991 /* add to head of event list */
4992 teap->next = target->event_action;
4993 target->event_action = teap;
4994 }
4995 Jim_SetEmptyResult(goi->interp);
4996 } else {
4997 /* get */
4998 if (teap == NULL)
4999 Jim_SetEmptyResult(goi->interp);
5000 else
5001 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5002 }
5003 }
5004 /* loop for more */
5005 break;
5006
5007 case TCFG_WORK_AREA_VIRT:
5008 if (goi->isconfigure) {
5009 target_free_all_working_areas(target);
5010 e = jim_getopt_wide(goi, &w);
5011 if (e != JIM_OK)
5012 return e;
5013 target->working_area_virt = w;
5014 target->working_area_virt_spec = true;
5015 } else {
5016 if (goi->argc != 0)
5017 goto no_params;
5018 }
5019 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5020 /* loop for more */
5021 break;
5022
5023 case TCFG_WORK_AREA_PHYS:
5024 if (goi->isconfigure) {
5025 target_free_all_working_areas(target);
5026 e = jim_getopt_wide(goi, &w);
5027 if (e != JIM_OK)
5028 return e;
5029 target->working_area_phys = w;
5030 target->working_area_phys_spec = true;
5031 } else {
5032 if (goi->argc != 0)
5033 goto no_params;
5034 }
5035 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5036 /* loop for more */
5037 break;
5038
5039 case TCFG_WORK_AREA_SIZE:
5040 if (goi->isconfigure) {
5041 target_free_all_working_areas(target);
5042 e = jim_getopt_wide(goi, &w);
5043 if (e != JIM_OK)
5044 return e;
5045 target->working_area_size = w;
5046 } else {
5047 if (goi->argc != 0)
5048 goto no_params;
5049 }
5050 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5051 /* loop for more */
5052 break;
5053
5054 case TCFG_WORK_AREA_BACKUP:
5055 if (goi->isconfigure) {
5056 target_free_all_working_areas(target);
5057 e = jim_getopt_wide(goi, &w);
5058 if (e != JIM_OK)
5059 return e;
5060 /* make this exactly 1 or 0 */
5061 target->backup_working_area = (!!w);
5062 } else {
5063 if (goi->argc != 0)
5064 goto no_params;
5065 }
5066 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5067 /* loop for more e*/
5068 break;
5069
5070
5071 case TCFG_ENDIAN:
5072 if (goi->isconfigure) {
5073 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5074 if (e != JIM_OK) {
5075 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5076 return e;
5077 }
5078 target->endianness = n->value;
5079 } else {
5080 if (goi->argc != 0)
5081 goto no_params;
5082 }
5083 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5084 if (n->name == NULL) {
5085 target->endianness = TARGET_LITTLE_ENDIAN;
5086 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5087 }
5088 Jim_SetResultString(goi->interp, n->name, -1);
5089 /* loop for more */
5090 break;
5091
5092 case TCFG_COREID:
5093 if (goi->isconfigure) {
5094 e = jim_getopt_wide(goi, &w);
5095 if (e != JIM_OK)
5096 return e;
5097 target->coreid = (int32_t)w;
5098 } else {
5099 if (goi->argc != 0)
5100 goto no_params;
5101 }
5102 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5103 /* loop for more */
5104 break;
5105
5106 case TCFG_CHAIN_POSITION:
5107 if (goi->isconfigure) {
5108 Jim_Obj *o_t;
5109 struct jtag_tap *tap;
5110
5111 if (target->has_dap) {
5112 Jim_SetResultString(goi->interp,
5113 "target requires -dap parameter instead of -chain-position!", -1);
5114 return JIM_ERR;
5115 }
5116
5117 target_free_all_working_areas(target);
5118 e = jim_getopt_obj(goi, &o_t);
5119 if (e != JIM_OK)
5120 return e;
5121 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5122 if (tap == NULL)
5123 return JIM_ERR;
5124 target->tap = tap;
5125 target->tap_configured = true;
5126 } else {
5127 if (goi->argc != 0)
5128 goto no_params;
5129 }
5130 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5131 /* loop for more e*/
5132 break;
5133 case TCFG_DBGBASE:
5134 if (goi->isconfigure) {
5135 e = jim_getopt_wide(goi, &w);
5136 if (e != JIM_OK)
5137 return e;
5138 target->dbgbase = (uint32_t)w;
5139 target->dbgbase_set = true;
5140 } else {
5141 if (goi->argc != 0)
5142 goto no_params;
5143 }
5144 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5145 /* loop for more */
5146 break;
5147 case TCFG_RTOS:
5148 /* RTOS */
5149 {
5150 int result = rtos_create(goi, target);
5151 if (result != JIM_OK)
5152 return result;
5153 }
5154 /* loop for more */
5155 break;
5156
5157 case TCFG_DEFER_EXAMINE:
5158 /* DEFER_EXAMINE */
5159 target->defer_examine = true;
5160 /* loop for more */
5161 break;
5162
5163 case TCFG_GDB_PORT:
5164 if (goi->isconfigure) {
5165 struct command_context *cmd_ctx = current_command_context(goi->interp);
5166 if (cmd_ctx->mode != COMMAND_CONFIG) {
5167 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5168 return JIM_ERR;
5169 }
5170
5171 const char *s;
5172 e = jim_getopt_string(goi, &s, NULL);
5173 if (e != JIM_OK)
5174 return e;
5175 free(target->gdb_port_override);
5176 target->gdb_port_override = strdup(s);
5177 } else {
5178 if (goi->argc != 0)
5179 goto no_params;
5180 }
5181 Jim_SetResultString(goi->interp, target->gdb_port_override ? : "undefined", -1);
5182 /* loop for more */
5183 break;
5184
5185 case TCFG_GDB_MAX_CONNECTIONS:
5186 if (goi->isconfigure) {
5187 struct command_context *cmd_ctx = current_command_context(goi->interp);
5188 if (cmd_ctx->mode != COMMAND_CONFIG) {
5189 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5190 return JIM_ERR;
5191 }
5192
5193 e = jim_getopt_wide(goi, &w);
5194 if (e != JIM_OK)
5195 return e;
5196 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5197 } else {
5198 if (goi->argc != 0)
5199 goto no_params;
5200 }
5201 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5202 break;
5203 }
5204 } /* while (goi->argc) */
5205
5206
5207 /* done - we return */
5208 return JIM_OK;
5209 }
5210
5211 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5212 {
5213 struct command *c = jim_to_command(interp);
5214 struct jim_getopt_info goi;
5215
5216 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5217 goi.isconfigure = !strcmp(c->name, "configure");
5218 if (goi.argc < 1) {
5219 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5220 "missing: -option ...");
5221 return JIM_ERR;
5222 }
5223 struct command_context *cmd_ctx = current_command_context(interp);
5224 assert(cmd_ctx);
5225 struct target *target = get_current_target(cmd_ctx);
5226 return target_configure(&goi, target);
5227 }
5228
5229 static int jim_target_mem2array(Jim_Interp *interp,
5230 int argc, Jim_Obj *const *argv)
5231 {
5232 struct command_context *cmd_ctx = current_command_context(interp);
5233 assert(cmd_ctx);
5234 struct target *target = get_current_target(cmd_ctx);
5235 return target_mem2array(interp, target, argc - 1, argv + 1);
5236 }
5237
5238 static int jim_target_array2mem(Jim_Interp *interp,
5239 int argc, Jim_Obj *const *argv)
5240 {
5241 struct command_context *cmd_ctx = current_command_context(interp);
5242 assert(cmd_ctx);
5243 struct target *target = get_current_target(cmd_ctx);
5244 return target_array2mem(interp, target, argc - 1, argv + 1);
5245 }
5246
5247 static int jim_target_tap_disabled(Jim_Interp *interp)
5248 {
5249 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5250 return JIM_ERR;
5251 }
5252
5253 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5254 {
5255 bool allow_defer = false;
5256
5257 struct jim_getopt_info goi;
5258 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5259 if (goi.argc > 1) {
5260 const char *cmd_name = Jim_GetString(argv[0], NULL);
5261 Jim_SetResultFormatted(goi.interp,
5262 "usage: %s ['allow-defer']", cmd_name);
5263 return JIM_ERR;
5264 }
5265 if (goi.argc > 0 &&
5266 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5267 /* consume it */
5268 Jim_Obj *obj;
5269 int e = jim_getopt_obj(&goi, &obj);
5270 if (e != JIM_OK)
5271 return e;
5272 allow_defer = true;
5273 }
5274
5275 struct command_context *cmd_ctx = current_command_context(interp);
5276 assert(cmd_ctx);
5277 struct target *target = get_current_target(cmd_ctx);
5278 if (!target->tap->enabled)
5279 return jim_target_tap_disabled(interp);
5280
5281 if (allow_defer && target->defer_examine) {
5282 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5283 LOG_INFO("Use arp_examine command to examine it manually!");
5284 return JIM_OK;
5285 }
5286
5287 int e = target->type->examine(target);
5288 if (e != ERROR_OK)
5289 return JIM_ERR;
5290 return JIM_OK;
5291 }
5292
5293 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5294 {
5295 struct command_context *cmd_ctx = current_command_context(interp);
5296 assert(cmd_ctx);
5297 struct target *target = get_current_target(cmd_ctx);
5298
5299 Jim_SetResultBool(interp, target_was_examined(target));
5300 return JIM_OK;
5301 }
5302
5303 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5304 {
5305 struct command_context *cmd_ctx = current_command_context(interp);
5306 assert(cmd_ctx);
5307 struct target *target = get_current_target(cmd_ctx);
5308
5309 Jim_SetResultBool(interp, target->defer_examine);
5310 return JIM_OK;
5311 }
5312
5313 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5314 {
5315 if (argc != 1) {
5316 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5317 return JIM_ERR;
5318 }
5319 struct command_context *cmd_ctx = current_command_context(interp);
5320 assert(cmd_ctx);
5321 struct target *target = get_current_target(cmd_ctx);
5322
5323 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5324 return JIM_ERR;
5325
5326 return JIM_OK;
5327 }
5328
5329 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5330 {
5331 if (argc != 1) {
5332 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5333 return JIM_ERR;
5334 }
5335 struct command_context *cmd_ctx = current_command_context(interp);
5336 assert(cmd_ctx);
5337 struct target *target = get_current_target(cmd_ctx);
5338 if (!target->tap->enabled)
5339 return jim_target_tap_disabled(interp);
5340
5341 int e;
5342 if (!(target_was_examined(target)))
5343 e = ERROR_TARGET_NOT_EXAMINED;
5344 else
5345 e = target->type->poll(target);
5346 if (e != ERROR_OK)
5347 return JIM_ERR;
5348 return JIM_OK;
5349 }
5350
5351 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5352 {
5353 struct jim_getopt_info goi;
5354 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5355
5356 if (goi.argc != 2) {
5357 Jim_WrongNumArgs(interp, 0, argv,
5358 "([tT]|[fF]|assert|deassert) BOOL");
5359 return JIM_ERR;
5360 }
5361
5362 struct jim_nvp *n;
5363 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5364 if (e != JIM_OK) {
5365 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5366 return e;
5367 }
5368 /* the halt or not param */
5369 jim_wide a;
5370 e = jim_getopt_wide(&goi, &a);
5371 if (e != JIM_OK)
5372 return e;
5373
5374 struct command_context *cmd_ctx = current_command_context(interp);
5375 assert(cmd_ctx);
5376 struct target *target = get_current_target(cmd_ctx);
5377 if (!target->tap->enabled)
5378 return jim_target_tap_disabled(interp);
5379
5380 if (!target->type->assert_reset || !target->type->deassert_reset) {
5381 Jim_SetResultFormatted(interp,
5382 "No target-specific reset for %s",
5383 target_name(target));
5384 return JIM_ERR;
5385 }
5386
5387 if (target->defer_examine)
5388 target_reset_examined(target);
5389
5390 /* determine if we should halt or not. */
5391 target->reset_halt = (a != 0);
5392 /* When this happens - all workareas are invalid. */
5393 target_free_all_working_areas_restore(target, 0);
5394
5395 /* do the assert */
5396 if (n->value == NVP_ASSERT)
5397 e = target->type->assert_reset(target);
5398 else
5399 e = target->type->deassert_reset(target);
5400 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5401 }
5402
5403 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5404 {
5405 if (argc != 1) {
5406 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5407 return JIM_ERR;
5408 }
5409 struct command_context *cmd_ctx = current_command_context(interp);
5410 assert(cmd_ctx);
5411 struct target *target = get_current_target(cmd_ctx);
5412 if (!target->tap->enabled)
5413 return jim_target_tap_disabled(interp);
5414 int e = target->type->halt(target);
5415 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5416 }
5417
5418 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5419 {
5420 struct jim_getopt_info goi;
5421 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5422
5423 /* params: <name> statename timeoutmsecs */
5424 if (goi.argc != 2) {
5425 const char *cmd_name = Jim_GetString(argv[0], NULL);
5426 Jim_SetResultFormatted(goi.interp,
5427 "%s <state_name> <timeout_in_msec>", cmd_name);
5428 return JIM_ERR;
5429 }
5430
5431 struct jim_nvp *n;
5432 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5433 if (e != JIM_OK) {
5434 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5435 return e;
5436 }
5437 jim_wide a;
5438 e = jim_getopt_wide(&goi, &a);
5439 if (e != JIM_OK)
5440 return e;
5441 struct command_context *cmd_ctx = current_command_context(interp);
5442 assert(cmd_ctx);
5443 struct target *target = get_current_target(cmd_ctx);
5444 if (!target->tap->enabled)
5445 return jim_target_tap_disabled(interp);
5446
5447 e = target_wait_state(target, n->value, a);
5448 if (e != ERROR_OK) {
5449 Jim_Obj *eObj = Jim_NewIntObj(interp, e);
5450 Jim_SetResultFormatted(goi.interp,
5451 "target: %s wait %s fails (%#s) %s",
5452 target_name(target), n->name,
5453 eObj, target_strerror_safe(e));
5454 return JIM_ERR;
5455 }
5456 return JIM_OK;
5457 }
5458 /* List for human, Events defined for this target.
5459 * scripts/programs should use 'name cget -event NAME'
5460 */
5461 COMMAND_HANDLER(handle_target_event_list)
5462 {
5463 struct target *target = get_current_target(CMD_CTX);
5464 struct target_event_action *teap = target->event_action;
5465
5466 command_print(CMD, "Event actions for target (%d) %s\n",
5467 target->target_number,
5468 target_name(target));
5469 command_print(CMD, "%-25s | Body", "Event");
5470 command_print(CMD, "------------------------- | "
5471 "----------------------------------------");
5472 while (teap) {
5473 struct jim_nvp *opt = jim_nvp_value2name_simple(nvp_target_event, teap->event);
5474 command_print(CMD, "%-25s | %s",
5475 opt->name, Jim_GetString(teap->body, NULL));
5476 teap = teap->next;
5477 }
5478 command_print(CMD, "***END***");
5479 return ERROR_OK;
5480 }
5481 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5482 {
5483 if (argc != 1) {
5484 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5485 return JIM_ERR;
5486 }
5487 struct command_context *cmd_ctx = current_command_context(interp);
5488 assert(cmd_ctx);
5489 struct target *target = get_current_target(cmd_ctx);
5490 Jim_SetResultString(interp, target_state_name(target), -1);
5491 return JIM_OK;
5492 }
5493 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5494 {
5495 struct jim_getopt_info goi;
5496 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5497 if (goi.argc != 1) {
5498 const char *cmd_name = Jim_GetString(argv[0], NULL);
5499 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5500 return JIM_ERR;
5501 }
5502 struct jim_nvp *n;
5503 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5504 if (e != JIM_OK) {
5505 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5506 return e;
5507 }
5508 struct command_context *cmd_ctx = current_command_context(interp);
5509 assert(cmd_ctx);
5510 struct target *target = get_current_target(cmd_ctx);
5511 target_handle_event(target, n->value);
5512 return JIM_OK;
5513 }
5514
5515 static const struct command_registration target_instance_command_handlers[] = {
5516 {
5517 .name = "configure",
5518 .mode = COMMAND_ANY,
5519 .jim_handler = jim_target_configure,
5520 .help = "configure a new target for use",
5521 .usage = "[target_attribute ...]",
5522 },
5523 {
5524 .name = "cget",
5525 .mode = COMMAND_ANY,
5526 .jim_handler = jim_target_configure,
5527 .help = "returns the specified target attribute",
5528 .usage = "target_attribute",
5529 },
5530 {
5531 .name = "mwd",
5532 .handler = handle_mw_command,
5533 .mode = COMMAND_EXEC,
5534 .help = "Write 64-bit word(s) to target memory",
5535 .usage = "address data [count]",
5536 },
5537 {
5538 .name = "mww",
5539 .handler = handle_mw_command,
5540 .mode = COMMAND_EXEC,
5541 .help = "Write 32-bit word(s) to target memory",
5542 .usage = "address data [count]",
5543 },
5544 {
5545 .name = "mwh",
5546 .handler = handle_mw_command,
5547 .mode = COMMAND_EXEC,
5548 .help = "Write 16-bit half-word(s) to target memory",
5549 .usage = "address data [count]",
5550 },
5551 {
5552 .name = "mwb",
5553 .handler = handle_mw_command,
5554 .mode = COMMAND_EXEC,
5555 .help = "Write byte(s) to target memory",
5556 .usage = "address data [count]",
5557 },
5558 {
5559 .name = "mdd",
5560 .handler = handle_md_command,
5561 .mode = COMMAND_EXEC,
5562 .help = "Display target memory as 64-bit words",
5563 .usage = "address [count]",
5564 },
5565 {
5566 .name = "mdw",
5567 .handler = handle_md_command,
5568 .mode = COMMAND_EXEC,
5569 .help = "Display target memory as 32-bit words",
5570 .usage = "address [count]",
5571 },
5572 {
5573 .name = "mdh",
5574 .handler = handle_md_command,
5575 .mode = COMMAND_EXEC,
5576 .help = "Display target memory as 16-bit half-words",
5577 .usage = "address [count]",
5578 },
5579 {
5580 .name = "mdb",
5581 .handler = handle_md_command,
5582 .mode = COMMAND_EXEC,
5583 .help = "Display target memory as 8-bit bytes",
5584 .usage = "address [count]",
5585 },
5586 {
5587 .name = "array2mem",
5588 .mode = COMMAND_EXEC,
5589 .jim_handler = jim_target_array2mem,
5590 .help = "Writes Tcl array of 8/16/32 bit numbers "
5591 "to target memory",
5592 .usage = "arrayname bitwidth address count",
5593 },
5594 {
5595 .name = "mem2array",
5596 .mode = COMMAND_EXEC,
5597 .jim_handler = jim_target_mem2array,
5598 .help = "Loads Tcl array of 8/16/32 bit numbers "
5599 "from target memory",
5600 .usage = "arrayname bitwidth address count",
5601 },
5602 {
5603 .name = "eventlist",
5604 .handler = handle_target_event_list,
5605 .mode = COMMAND_EXEC,
5606 .help = "displays a table of events defined for this target",
5607 .usage = "",
5608 },
5609 {
5610 .name = "curstate",
5611 .mode = COMMAND_EXEC,
5612 .jim_handler = jim_target_current_state,
5613 .help = "displays the current state of this target",
5614 },
5615 {
5616 .name = "arp_examine",
5617 .mode = COMMAND_EXEC,
5618 .jim_handler = jim_target_examine,
5619 .help = "used internally for reset processing",
5620 .usage = "['allow-defer']",
5621 },
5622 {
5623 .name = "was_examined",
5624 .mode = COMMAND_EXEC,
5625 .jim_handler = jim_target_was_examined,
5626 .help = "used internally for reset processing",
5627 },
5628 {
5629 .name = "examine_deferred",
5630 .mode = COMMAND_EXEC,
5631 .jim_handler = jim_target_examine_deferred,
5632 .help = "used internally for reset processing",
5633 },
5634 {
5635 .name = "arp_halt_gdb",
5636 .mode = COMMAND_EXEC,
5637 .jim_handler = jim_target_halt_gdb,
5638 .help = "used internally for reset processing to halt GDB",
5639 },
5640 {
5641 .name = "arp_poll",
5642 .mode = COMMAND_EXEC,
5643 .jim_handler = jim_target_poll,
5644 .help = "used internally for reset processing",
5645 },
5646 {
5647 .name = "arp_reset",
5648 .mode = COMMAND_EXEC,
5649 .jim_handler = jim_target_reset,
5650 .help = "used internally for reset processing",
5651 },
5652 {
5653 .name = "arp_halt",
5654 .mode = COMMAND_EXEC,
5655 .jim_handler = jim_target_halt,
5656 .help = "used internally for reset processing",
5657 },
5658 {
5659 .name = "arp_waitstate",
5660 .mode = COMMAND_EXEC,
5661 .jim_handler = jim_target_wait_state,
5662 .help = "used internally for reset processing",
5663 },
5664 {
5665 .name = "invoke-event",
5666 .mode = COMMAND_EXEC,
5667 .jim_handler = jim_target_invoke_event,
5668 .help = "invoke handler for specified event",
5669 .usage = "event_name",
5670 },
5671 COMMAND_REGISTRATION_DONE
5672 };
5673
5674 static int target_create(struct jim_getopt_info *goi)
5675 {
5676 Jim_Obj *new_cmd;
5677 Jim_Cmd *cmd;
5678 const char *cp;
5679 int e;
5680 int x;
5681 struct target *target;
5682 struct command_context *cmd_ctx;
5683
5684 cmd_ctx = current_command_context(goi->interp);
5685 assert(cmd_ctx != NULL);
5686
5687 if (goi->argc < 3) {
5688 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5689 return JIM_ERR;
5690 }
5691
5692 /* COMMAND */
5693 jim_getopt_obj(goi, &new_cmd);
5694 /* does this command exist? */
5695 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5696 if (cmd) {
5697 cp = Jim_GetString(new_cmd, NULL);
5698 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5699 return JIM_ERR;
5700 }
5701
5702 /* TYPE */
5703 e = jim_getopt_string(goi, &cp, NULL);
5704 if (e != JIM_OK)
5705 return e;
5706 struct transport *tr = get_current_transport();
5707 if (tr->override_target) {
5708 e = tr->override_target(&cp);
5709 if (e != ERROR_OK) {
5710 LOG_ERROR("The selected transport doesn't support this target");
5711 return JIM_ERR;
5712 }
5713 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5714 }
5715 /* now does target type exist */
5716 for (x = 0 ; target_types[x] ; x++) {
5717 if (0 == strcmp(cp, target_types[x]->name)) {
5718 /* found */
5719 break;
5720 }
5721 }
5722 if (target_types[x] == NULL) {
5723 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5724 for (x = 0 ; target_types[x] ; x++) {
5725 if (target_types[x + 1]) {
5726 Jim_AppendStrings(goi->interp,
5727 Jim_GetResult(goi->interp),
5728 target_types[x]->name,
5729 ", ", NULL);
5730 } else {
5731 Jim_AppendStrings(goi->interp,
5732 Jim_GetResult(goi->interp),
5733 " or ",
5734 target_types[x]->name, NULL);
5735 }
5736 }
5737 return JIM_ERR;
5738 }
5739
5740 /* Create it */
5741 target = calloc(1, sizeof(struct target));
5742 if (!target) {
5743 LOG_ERROR("Out of memory");
5744 return JIM_ERR;
5745 }
5746
5747 /* set target number */
5748 target->target_number = new_target_number();
5749
5750 /* allocate memory for each unique target type */
5751 target->type = malloc(sizeof(struct target_type));
5752 if (!target->type) {
5753 LOG_ERROR("Out of memory");
5754 free(target);
5755 return JIM_ERR;
5756 }
5757
5758 memcpy(target->type, target_types[x], sizeof(struct target_type));
5759
5760 /* default to first core, override with -coreid */
5761 target->coreid = 0;
5762
5763 target->working_area = 0x0;
5764 target->working_area_size = 0x0;
5765 target->working_areas = NULL;
5766 target->backup_working_area = 0;
5767
5768 target->state = TARGET_UNKNOWN;
5769 target->debug_reason = DBG_REASON_UNDEFINED;
5770 target->reg_cache = NULL;
5771 target->breakpoints = NULL;
5772 target->watchpoints = NULL;
5773 target->next = NULL;
5774 target->arch_info = NULL;
5775
5776 target->verbose_halt_msg = true;
5777
5778 target->halt_issued = false;
5779
5780 /* initialize trace information */
5781 target->trace_info = calloc(1, sizeof(struct trace));
5782 if (!target->trace_info) {
5783 LOG_ERROR("Out of memory");
5784 free(target->type);
5785 free(target);
5786 return JIM_ERR;
5787 }
5788
5789 target->dbgmsg = NULL;
5790 target->dbg_msg_enabled = 0;
5791
5792 target->endianness = TARGET_ENDIAN_UNKNOWN;
5793
5794 target->rtos = NULL;
5795 target->rtos_auto_detect = false;
5796
5797 target->gdb_port_override = NULL;
5798 target->gdb_max_connections = 1;
5799
5800 /* Do the rest as "configure" options */
5801 goi->isconfigure = 1;
5802 e = target_configure(goi, target);
5803
5804 if (e == JIM_OK) {
5805 if (target->has_dap) {
5806 if (!target->dap_configured) {
5807 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5808 e = JIM_ERR;
5809 }
5810 } else {
5811 if (!target->tap_configured) {
5812 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5813 e = JIM_ERR;
5814 }
5815 }
5816 /* tap must be set after target was configured */
5817 if (target->tap == NULL)
5818 e = JIM_ERR;
5819 }
5820
5821 if (e != JIM_OK) {
5822 rtos_destroy(target);
5823 free(target->gdb_port_override);
5824 free(target->trace_info);
5825 free(target->type);
5826 free(target);
5827 return e;
5828 }
5829
5830 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5831 /* default endian to little if not specified */
5832 target->endianness = TARGET_LITTLE_ENDIAN;
5833 }
5834
5835 cp = Jim_GetString(new_cmd, NULL);
5836 target->cmd_name = strdup(cp);
5837 if (!target->cmd_name) {
5838 LOG_ERROR("Out of memory");
5839 rtos_destroy(target);
5840 free(target->gdb_port_override);
5841 free(target->trace_info);
5842 free(target->type);
5843 free(target);
5844 return JIM_ERR;
5845 }
5846
5847 if (target->type->target_create) {
5848 e = (*(target->type->target_create))(target, goi->interp);
5849 if (e != ERROR_OK) {
5850 LOG_DEBUG("target_create failed");
5851 free(target->cmd_name);
5852 rtos_destroy(target);
5853 free(target->gdb_port_override);
5854 free(target->trace_info);
5855 free(target->type);
5856 free(target);
5857 return JIM_ERR;
5858 }
5859 }
5860
5861 /* create the target specific commands */
5862 if (target->type->commands) {
5863 e = register_commands(cmd_ctx, NULL, target->type->commands);
5864 if (ERROR_OK != e)
5865 LOG_ERROR("unable to register '%s' commands", cp);
5866 }
5867
5868 /* now - create the new target name command */
5869 const struct command_registration target_subcommands[] = {
5870 {
5871 .chain = target_instance_command_handlers,
5872 },
5873 {
5874 .chain = target->type->commands,
5875 },
5876 COMMAND_REGISTRATION_DONE
5877 };
5878 const struct command_registration target_commands[] = {
5879 {
5880 .name = cp,
5881 .mode = COMMAND_ANY,
5882 .help = "target command group",
5883 .usage = "",
5884 .chain = target_subcommands,
5885 },
5886 COMMAND_REGISTRATION_DONE
5887 };
5888 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
5889 if (e != ERROR_OK) {
5890 if (target->type->deinit_target)
5891 target->type->deinit_target(target);
5892 free(target->cmd_name);
5893 rtos_destroy(target);
5894 free(target->gdb_port_override);
5895 free(target->trace_info);
5896 free(target->type);
5897 free(target);
5898 return JIM_ERR;
5899 }
5900
5901 /* append to end of list */
5902 append_to_list_all_targets(target);
5903
5904 cmd_ctx->current_target = target;
5905 return JIM_OK;
5906 }
5907
5908 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5909 {
5910 if (argc != 1) {
5911 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5912 return JIM_ERR;
5913 }
5914 struct command_context *cmd_ctx = current_command_context(interp);
5915 assert(cmd_ctx != NULL);
5916
5917 struct target *target = get_current_target_or_null(cmd_ctx);
5918 if (target)
5919 Jim_SetResultString(interp, target_name(target), -1);
5920 return JIM_OK;
5921 }
5922
5923 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5924 {
5925 if (argc != 1) {
5926 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5927 return JIM_ERR;
5928 }
5929 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5930 for (unsigned x = 0; NULL != target_types[x]; x++) {
5931 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5932 Jim_NewStringObj(interp, target_types[x]->name, -1));
5933 }
5934 return JIM_OK;
5935 }
5936
5937 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5938 {
5939 if (argc != 1) {
5940 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5941 return JIM_ERR;
5942 }
5943 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5944 struct target *target = all_targets;
5945 while (target) {
5946 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5947 Jim_NewStringObj(interp, target_name(target), -1));
5948 target = target->next;
5949 }
5950 return JIM_OK;
5951 }
5952
5953 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5954 {
5955 int i;
5956 const char *targetname;
5957 int retval, len;
5958 struct target *target = (struct target *) NULL;
5959 struct target_list *head, *curr, *new;
5960 curr = (struct target_list *) NULL;
5961 head = (struct target_list *) NULL;
5962
5963 retval = 0;
5964 LOG_DEBUG("%d", argc);
5965 /* argv[1] = target to associate in smp
5966 * argv[2] = target to associate in smp
5967 * argv[3] ...
5968 */
5969
5970 for (i = 1; i < argc; i++) {
5971
5972 targetname = Jim_GetString(argv[i], &len);
5973 target = get_target(targetname);
5974 LOG_DEBUG("%s ", targetname);
5975 if (target) {
5976 new = malloc(sizeof(struct target_list));
5977 new->target = target;
5978 new->next = (struct target_list *)NULL;
5979 if (head == (struct target_list *)NULL) {
5980 head = new;
5981 curr = head;
5982 } else {
5983 curr->next = new;
5984 curr = new;
5985 }
5986 }
5987 }
5988 /* now parse the list of cpu and put the target in smp mode*/
5989 curr = head;
5990
5991 while (curr != (struct target_list *)NULL) {
5992 target = curr->target;
5993 target->smp = 1;
5994 target->head = head;
5995 curr = curr->next;
5996 }
5997
5998 if (target && target->rtos)
5999 retval = rtos_smp_init(head->target);
6000
6001 return retval;
6002 }
6003
6004
6005 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6006 {
6007 struct jim_getopt_info goi;
6008 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6009 if (goi.argc < 3) {
6010 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6011 "<name> <target_type> [<target_options> ...]");
6012 return JIM_ERR;
6013 }
6014 return target_create(&goi);
6015 }
6016
6017 static const struct command_registration target_subcommand_handlers[] = {
6018 {
6019 .name = "init",
6020 .mode = COMMAND_CONFIG,
6021 .handler = handle_target_init_command,
6022 .help = "initialize targets",
6023 .usage = "",
6024 },
6025 {
6026 .name = "create",
6027 .mode = COMMAND_CONFIG,
6028 .jim_handler = jim_target_create,
6029 .usage = "name type '-chain-position' name [options ...]",
6030 .help = "Creates and selects a new target",
6031 },
6032 {
6033 .name = "current",
6034 .mode = COMMAND_ANY,
6035 .jim_handler = jim_target_current,
6036 .help = "Returns the currently selected target",
6037 },
6038 {
6039 .name = "types",
6040 .mode = COMMAND_ANY,
6041 .jim_handler = jim_target_types,
6042 .help = "Returns the available target types as "
6043 "a list of strings",
6044 },
6045 {
6046 .name = "names",
6047 .mode = COMMAND_ANY,
6048 .jim_handler = jim_target_names,
6049 .help = "Returns the names of all targets as a list of strings",
6050 },
6051 {
6052 .name = "smp",
6053 .mode = COMMAND_ANY,
6054 .jim_handler = jim_target_smp,
6055 .usage = "targetname1 targetname2 ...",
6056 .help = "gather several target in a smp list"
6057 },
6058
6059 COMMAND_REGISTRATION_DONE
6060 };
6061
6062 struct FastLoad {
6063 target_addr_t address;
6064 uint8_t *data;
6065 int length;
6066
6067 };
6068
6069 static int fastload_num;
6070 static struct FastLoad *fastload;
6071
6072 static void free_fastload(void)
6073 {
6074 if (fastload != NULL) {
6075 for (int i = 0; i < fastload_num; i++)
6076 free(fastload[i].data);
6077 free(fastload);
6078 fastload = NULL;
6079 }
6080 }
6081
6082 COMMAND_HANDLER(handle_fast_load_image_command)
6083 {
6084 uint8_t *buffer;
6085 size_t buf_cnt;
6086 uint32_t image_size;
6087 target_addr_t min_address = 0;
6088 target_addr_t max_address = -1;
6089
6090 struct image image;
6091
6092 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
6093 &image, &min_address, &max_address);
6094 if (ERROR_OK != retval)
6095 return retval;
6096
6097 struct duration bench;
6098 duration_start(&bench);
6099
6100 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6101 if (retval != ERROR_OK)
6102 return retval;
6103
6104 image_size = 0x0;
6105 retval = ERROR_OK;
6106 fastload_num = image.num_sections;
6107 fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
6108 if (fastload == NULL) {
6109 command_print(CMD, "out of memory");
6110 image_close(&image);
6111 return ERROR_FAIL;
6112 }
6113 memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
6114 for (unsigned int i = 0; i < image.num_sections; i++) {
6115 buffer = malloc(image.sections[i].size);
6116 if (buffer == NULL) {
6117 command_print(CMD, "error allocating buffer for section (%d bytes)",
6118 (int)(image.sections[i].size));
6119 retval = ERROR_FAIL;
6120 break;
6121 }
6122
6123 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6124 if (retval != ERROR_OK) {
6125 free(buffer);
6126 break;
6127 }
6128
6129 uint32_t offset = 0;
6130 uint32_t length = buf_cnt;
6131
6132 /* DANGER!!! beware of unsigned comparison here!!! */
6133
6134 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6135 (image.sections[i].base_address < max_address)) {
6136 if (image.sections[i].base_address < min_address) {
6137 /* clip addresses below */
6138 offset += min_address-image.sections[i].base_address;
6139 length -= offset;
6140 }
6141
6142 if (image.sections[i].base_address + buf_cnt > max_address)
6143 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6144
6145 fastload[i].address = image.sections[i].base_address + offset;
6146 fastload[i].data = malloc(length);
6147 if (fastload[i].data == NULL) {
6148 free(buffer);
6149 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6150 length);
6151 retval = ERROR_FAIL;
6152 break;
6153 }
6154 memcpy(fastload[i].data, buffer + offset, length);
6155 fastload[i].length = length;
6156
6157 image_size += length;
6158 command_print(CMD, "%u bytes written at address 0x%8.8x",
6159 (unsigned int)length,
6160 ((unsigned int)(image.sections[i].base_address + offset)));
6161 }
6162
6163 free(buffer);
6164 }
6165
6166 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
6167 command_print(CMD, "Loaded %" PRIu32 " bytes "
6168 "in %fs (%0.3f KiB/s)", image_size,
6169 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6170
6171 command_print(CMD,
6172 "WARNING: image has not been loaded to target!"
6173 "You can issue a 'fast_load' to finish loading.");
6174 }
6175
6176 image_close(&image);
6177
6178 if (retval != ERROR_OK)
6179 free_fastload();
6180
6181 return retval;
6182 }
6183
6184 COMMAND_HANDLER(handle_fast_load_command)
6185 {
6186 if (CMD_ARGC > 0)
6187 return ERROR_COMMAND_SYNTAX_ERROR;
6188 if (fastload == NULL) {
6189 LOG_ERROR("No image in memory");
6190 return ERROR_FAIL;
6191 }
6192 int i;
6193 int64_t ms = timeval_ms();
6194 int size = 0;
6195 int retval = ERROR_OK;
6196 for (i = 0; i < fastload_num; i++) {
6197 struct target *target = get_current_target(CMD_CTX);
6198 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6199 (unsigned int)(fastload[i].address),
6200 (unsigned int)(fastload[i].length));
6201 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6202 if (retval != ERROR_OK)
6203 break;
6204 size += fastload[i].length;
6205 }
6206 if (retval == ERROR_OK) {
6207 int64_t after = timeval_ms();
6208 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6209 }
6210 return retval;
6211 }
6212
6213 static const struct command_registration target_command_handlers[] = {
6214 {
6215 .name = "targets",
6216 .handler = handle_targets_command,
6217 .mode = COMMAND_ANY,
6218 .help = "change current default target (one parameter) "
6219 "or prints table of all targets (no parameters)",
6220 .usage = "[target]",
6221 },
6222 {
6223 .name = "target",
6224 .mode = COMMAND_CONFIG,
6225 .help = "configure target",
6226 .chain = target_subcommand_handlers,
6227 .usage = "",
6228 },
6229 COMMAND_REGISTRATION_DONE
6230 };
6231
6232 int target_register_commands(struct command_context *cmd_ctx)
6233 {
6234 return register_commands(cmd_ctx, NULL, target_command_handlers);
6235 }
6236
6237 static bool target_reset_nag = true;
6238
6239 bool get_target_reset_nag(void)
6240 {
6241 return target_reset_nag;
6242 }
6243
6244 COMMAND_HANDLER(handle_target_reset_nag)
6245 {
6246 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6247 &target_reset_nag, "Nag after each reset about options to improve "
6248 "performance");
6249 }
6250
6251 COMMAND_HANDLER(handle_ps_command)
6252 {
6253 struct target *target = get_current_target(CMD_CTX);
6254 char *display;
6255 if (target->state != TARGET_HALTED) {
6256 LOG_INFO("target not halted !!");
6257 return ERROR_OK;
6258 }
6259
6260 if ((target->rtos) && (target->rtos->type)
6261 && (target->rtos->type->ps_command)) {
6262 display = target->rtos->type->ps_command(target);
6263 command_print(CMD, "%s", display);
6264 free(display);
6265 return ERROR_OK;
6266 } else {
6267 LOG_INFO("failed");
6268 return ERROR_TARGET_FAILURE;
6269 }
6270 }
6271
6272 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6273 {
6274 if (text != NULL)
6275 command_print_sameline(cmd, "%s", text);
6276 for (int i = 0; i < size; i++)
6277 command_print_sameline(cmd, " %02x", buf[i]);
6278 command_print(cmd, " ");
6279 }
6280
6281 COMMAND_HANDLER(handle_test_mem_access_command)
6282 {
6283 struct target *target = get_current_target(CMD_CTX);
6284 uint32_t test_size;
6285 int retval = ERROR_OK;
6286
6287 if (target->state != TARGET_HALTED) {
6288 LOG_INFO("target not halted !!");
6289 return ERROR_FAIL;
6290 }
6291
6292 if (CMD_ARGC != 1)
6293 return ERROR_COMMAND_SYNTAX_ERROR;
6294
6295 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6296
6297 /* Test reads */
6298 size_t num_bytes = test_size + 4;
6299
6300 struct working_area *wa = NULL;
6301 retval = target_alloc_working_area(target, num_bytes, &wa);
6302 if (retval != ERROR_OK) {
6303 LOG_ERROR("Not enough working area");
6304 return ERROR_FAIL;
6305 }
6306
6307 uint8_t *test_pattern = malloc(num_bytes);
6308
6309 for (size_t i = 0; i < num_bytes; i++)
6310 test_pattern[i] = rand();
6311
6312 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6313 if (retval != ERROR_OK) {
6314 LOG_ERROR("Test pattern write failed");
6315 goto out;
6316 }
6317
6318 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6319 for (int size = 1; size <= 4; size *= 2) {
6320 for (int offset = 0; offset < 4; offset++) {
6321 uint32_t count = test_size / size;
6322 size_t host_bufsiz = (count + 2) * size + host_offset;
6323 uint8_t *read_ref = malloc(host_bufsiz);
6324 uint8_t *read_buf = malloc(host_bufsiz);
6325
6326 for (size_t i = 0; i < host_bufsiz; i++) {
6327 read_ref[i] = rand();
6328 read_buf[i] = read_ref[i];
6329 }
6330 command_print_sameline(CMD,
6331 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6332 size, offset, host_offset ? "un" : "");
6333
6334 struct duration bench;
6335 duration_start(&bench);
6336
6337 retval = target_read_memory(target, wa->address + offset, size, count,
6338 read_buf + size + host_offset);
6339
6340 duration_measure(&bench);
6341
6342 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6343 command_print(CMD, "Unsupported alignment");
6344 goto next;
6345 } else if (retval != ERROR_OK) {
6346 command_print(CMD, "Memory read failed");
6347 goto next;
6348 }
6349
6350 /* replay on host */
6351 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6352
6353 /* check result */
6354 int result = memcmp(read_ref, read_buf, host_bufsiz);
6355 if (result == 0) {
6356 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6357 duration_elapsed(&bench),
6358 duration_kbps(&bench, count * size));
6359 } else {
6360 command_print(CMD, "Compare failed");
6361 binprint(CMD, "ref:", read_ref, host_bufsiz);
6362 binprint(CMD, "buf:", read_buf, host_bufsiz);
6363 }
6364 next:
6365 free(read_ref);
6366 free(read_buf);
6367 }
6368 }
6369 }
6370
6371 out:
6372 free(test_pattern);
6373
6374 if (wa != NULL)
6375 target_free_working_area(target, wa);
6376
6377 /* Test writes */
6378 num_bytes = test_size + 4 + 4 + 4;
6379
6380 retval = target_alloc_working_area(target, num_bytes, &wa);
6381 if (retval != ERROR_OK) {
6382 LOG_ERROR("Not enough working area");
6383 return ERROR_FAIL;
6384 }
6385
6386 test_pattern = malloc(num_bytes);
6387
6388 for (size_t i = 0; i < num_bytes; i++)
6389 test_pattern[i] = rand();
6390
6391 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6392 for (int size = 1; size <= 4; size *= 2) {
6393 for (int offset = 0; offset < 4; offset++) {
6394 uint32_t count = test_size / size;
6395 size_t host_bufsiz = count * size + host_offset;
6396 uint8_t *read_ref = malloc(num_bytes);
6397 uint8_t *read_buf = malloc(num_bytes);
6398 uint8_t *write_buf = malloc(host_bufsiz);
6399
6400 for (size_t i = 0; i < host_bufsiz; i++)
6401 write_buf[i] = rand();
6402 command_print_sameline(CMD,
6403 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6404 size, offset, host_offset ? "un" : "");
6405
6406 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6407 if (retval != ERROR_OK) {
6408 command_print(CMD, "Test pattern write failed");
6409 goto nextw;
6410 }
6411
6412 /* replay on host */
6413 memcpy(read_ref, test_pattern, num_bytes);
6414 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6415
6416 struct duration bench;
6417 duration_start(&bench);
6418
6419 retval = target_write_memory(target, wa->address + size + offset, size, count,
6420 write_buf + host_offset);
6421
6422 duration_measure(&bench);
6423
6424 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6425 command_print(CMD, "Unsupported alignment");
6426 goto nextw;
6427 } else if (retval != ERROR_OK) {
6428 command_print(CMD, "Memory write failed");
6429 goto nextw;
6430 }
6431
6432 /* read back */
6433 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6434 if (retval != ERROR_OK) {
6435 command_print(CMD, "Test pattern write failed");
6436 goto nextw;
6437 }
6438
6439 /* check result */
6440 int result = memcmp(read_ref, read_buf, num_bytes);
6441 if (result == 0) {
6442 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6443 duration_elapsed(&bench),
6444 duration_kbps(&bench, count * size));
6445 } else {
6446 command_print(CMD, "Compare failed");
6447 binprint(CMD, "ref:", read_ref, num_bytes);
6448 binprint(CMD, "buf:", read_buf, num_bytes);
6449 }
6450 nextw:
6451 free(read_ref);
6452 free(read_buf);
6453 }
6454 }
6455 }
6456
6457 free(test_pattern);
6458
6459 if (wa != NULL)
6460 target_free_working_area(target, wa);
6461 return retval;
6462 }
6463
6464 static const struct command_registration target_exec_command_handlers[] = {
6465 {
6466 .name = "fast_load_image",
6467 .handler = handle_fast_load_image_command,
6468 .mode = COMMAND_ANY,
6469 .help = "Load image into server memory for later use by "
6470 "fast_load; primarily for profiling",
6471 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6472 "[min_address [max_length]]",
6473 },
6474 {
6475 .name = "fast_load",
6476 .handler = handle_fast_load_command,
6477 .mode = COMMAND_EXEC,
6478 .help = "loads active fast load image to current target "
6479 "- mainly for profiling purposes",
6480 .usage = "",
6481 },
6482 {
6483 .name = "profile",
6484 .handler = handle_profile_command,
6485 .mode = COMMAND_EXEC,
6486 .usage = "seconds filename [start end]",
6487 .help = "profiling samples the CPU PC",
6488 },
6489 /** @todo don't register virt2phys() unless target supports it */
6490 {
6491 .name = "virt2phys",
6492 .handler = handle_virt2phys_command,
6493 .mode = COMMAND_ANY,
6494 .help = "translate a virtual address into a physical address",
6495 .usage = "virtual_address",
6496 },
6497 {
6498 .name = "reg",
6499 .handler = handle_reg_command,
6500 .mode = COMMAND_EXEC,
6501 .help = "display (reread from target with \"force\") or set a register; "
6502 "with no arguments, displays all registers and their values",
6503 .usage = "[(register_number|register_name) [(value|'force')]]",
6504 },
6505 {
6506 .name = "poll",
6507 .handler = handle_poll_command,
6508 .mode = COMMAND_EXEC,
6509 .help = "poll target state; or reconfigure background polling",
6510 .usage = "['on'|'off']",
6511 },
6512 {
6513 .name = "wait_halt",
6514 .handler = handle_wait_halt_command,
6515 .mode = COMMAND_EXEC,
6516 .help = "wait up to the specified number of milliseconds "
6517 "(default 5000) for a previously requested halt",
6518 .usage = "[milliseconds]",
6519 },
6520 {
6521 .name = "halt",
6522 .handler = handle_halt_command,
6523 .mode = COMMAND_EXEC,
6524 .help = "request target to halt, then wait up to the specified "
6525 "number of milliseconds (default 5000) for it to complete",
6526 .usage = "[milliseconds]",
6527 },
6528 {
6529 .name = "resume",
6530 .handler = handle_resume_command,
6531 .mode = COMMAND_EXEC,
6532 .help = "resume target execution from current PC or address",
6533 .usage = "[address]",
6534 },
6535 {
6536 .name = "reset",
6537 .handler = handle_reset_command,
6538 .mode = COMMAND_EXEC,
6539 .usage = "[run|halt|init]",
6540 .help = "Reset all targets into the specified mode. "
6541 "Default reset mode is run, if not given.",
6542 },
6543 {
6544 .name = "soft_reset_halt",
6545 .handler = handle_soft_reset_halt_command,
6546 .mode = COMMAND_EXEC,
6547 .usage = "",
6548 .help = "halt the target and do a soft reset",
6549 },
6550 {
6551 .name = "step",
6552 .handler = handle_step_command,
6553 .mode = COMMAND_EXEC,
6554 .help = "step one instruction from current PC or address",
6555 .usage = "[address]",
6556 },
6557 {
6558 .name = "mdd",
6559 .handler = handle_md_command,
6560 .mode = COMMAND_EXEC,
6561 .help = "display memory double-words",
6562 .usage = "['phys'] address [count]",
6563 },
6564 {
6565 .name = "mdw",
6566 .handler = handle_md_command,
6567 .mode = COMMAND_EXEC,
6568 .help = "display memory words",
6569 .usage = "['phys'] address [count]",
6570 },
6571 {
6572 .name = "mdh",
6573 .handler = handle_md_command,
6574 .mode = COMMAND_EXEC,
6575 .help = "display memory half-words",
6576 .usage = "['phys'] address [count]",
6577 },
6578 {
6579 .name = "mdb",
6580 .handler = handle_md_command,
6581 .mode = COMMAND_EXEC,
6582 .help = "display memory bytes",
6583 .usage = "['phys'] address [count]",
6584 },
6585 {
6586 .name = "mwd",
6587 .handler = handle_mw_command,
6588 .mode = COMMAND_EXEC,
6589 .help = "write memory double-word",
6590 .usage = "['phys'] address value [count]",
6591 },
6592 {
6593 .name = "mww",
6594 .handler = handle_mw_command,
6595 .mode = COMMAND_EXEC,
6596 .help = "write memory word",
6597 .usage = "['phys'] address value [count]",
6598 },
6599 {
6600 .name = "mwh",
6601 .handler = handle_mw_command,
6602 .mode = COMMAND_EXEC,
6603 .help = "write memory half-word",
6604 .usage = "['phys'] address value [count]",
6605 },
6606 {
6607 .name = "mwb",
6608 .handler = handle_mw_command,
6609 .mode = COMMAND_EXEC,
6610 .help = "write memory byte",
6611 .usage = "['phys'] address value [count]",
6612 },
6613 {
6614 .name = "bp",
6615 .handler = handle_bp_command,
6616 .mode = COMMAND_EXEC,
6617 .help = "list or set hardware or software breakpoint",
6618 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6619 },
6620 {
6621 .name = "rbp",
6622 .handler = handle_rbp_command,
6623 .mode = COMMAND_EXEC,
6624 .help = "remove breakpoint",
6625 .usage = "'all' | address",
6626 },
6627 {
6628 .name = "wp",
6629 .handler = handle_wp_command,
6630 .mode = COMMAND_EXEC,
6631 .help = "list (no params) or create watchpoints",
6632 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6633 },
6634 {
6635 .name = "rwp",
6636 .handler = handle_rwp_command,
6637 .mode = COMMAND_EXEC,
6638 .help = "remove watchpoint",
6639 .usage = "address",
6640 },
6641 {
6642 .name = "load_image",
6643 .handler = handle_load_image_command,
6644 .mode = COMMAND_EXEC,
6645 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6646 "[min_address] [max_length]",
6647 },
6648 {
6649 .name = "dump_image",
6650 .handler = handle_dump_image_command,
6651 .mode = COMMAND_EXEC,
6652 .usage = "filename address size",
6653 },
6654 {
6655 .name = "verify_image_checksum",
6656 .handler = handle_verify_image_checksum_command,
6657 .mode = COMMAND_EXEC,
6658 .usage = "filename [offset [type]]",
6659 },
6660 {
6661 .name = "verify_image",
6662 .handler = handle_verify_image_command,
6663 .mode = COMMAND_EXEC,
6664 .usage = "filename [offset [type]]",
6665 },
6666 {
6667 .name = "test_image",
6668 .handler = handle_test_image_command,
6669 .mode = COMMAND_EXEC,
6670 .usage = "filename [offset [type]]",
6671 },
6672 {
6673 .name = "mem2array",
6674 .mode = COMMAND_EXEC,
6675 .jim_handler = jim_mem2array,
6676 .help = "read 8/16/32 bit memory and return as a TCL array "
6677 "for script processing",
6678 .usage = "arrayname bitwidth address count",
6679 },
6680 {
6681 .name = "array2mem",
6682 .mode = COMMAND_EXEC,
6683 .jim_handler = jim_array2mem,
6684 .help = "convert a TCL array to memory locations "
6685 "and write the 8/16/32 bit values",
6686 .usage = "arrayname bitwidth address count",
6687 },
6688 {
6689 .name = "reset_nag",
6690 .handler = handle_target_reset_nag,
6691 .mode = COMMAND_ANY,
6692 .help = "Nag after each reset about options that could have been "
6693 "enabled to improve performance.",
6694 .usage = "['enable'|'disable']",
6695 },
6696 {
6697 .name = "ps",
6698 .handler = handle_ps_command,
6699 .mode = COMMAND_EXEC,
6700 .help = "list all tasks",
6701 .usage = "",
6702 },
6703 {
6704 .name = "test_mem_access",
6705 .handler = handle_test_mem_access_command,
6706 .mode = COMMAND_EXEC,
6707 .help = "Test the target's memory access functions",
6708 .usage = "size",
6709 },
6710
6711 COMMAND_REGISTRATION_DONE
6712 };
6713 static int target_register_user_commands(struct command_context *cmd_ctx)
6714 {
6715 int retval = ERROR_OK;
6716 retval = target_request_register_commands(cmd_ctx);
6717 if (retval != ERROR_OK)
6718 return retval;
6719
6720 retval = trace_register_commands(cmd_ctx);
6721 if (retval != ERROR_OK)
6722 return retval;
6723
6724
6725 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6726 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)