ea49b79722561ea4b348585a0f5d8f328799e1f8
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
60 #include "semihosting_common.h"
61
62 /* default halt wait timeout (ms) */
63 #define DEFAULT_HALT_TIMEOUT 5000
64
65 static int target_read_buffer_default(struct target *target, target_addr_t address,
66 uint32_t count, uint8_t *buffer);
67 static int target_write_buffer_default(struct target *target, target_addr_t address,
68 uint32_t count, const uint8_t *buffer);
69 static int target_array2mem(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_mem2array(Jim_Interp *interp, struct target *target,
72 int argc, Jim_Obj * const *argv);
73 static int target_register_user_commands(struct command_context *cmd_ctx);
74 static int target_get_gdb_fileio_info_default(struct target *target,
75 struct gdb_fileio_info *fileio_info);
76 static int target_gdb_fileio_end_default(struct target *target, int retcode,
77 int fileio_errno, bool ctrl_c);
78
79 /* targets */
80 extern struct target_type arm7tdmi_target;
81 extern struct target_type arm720t_target;
82 extern struct target_type arm9tdmi_target;
83 extern struct target_type arm920t_target;
84 extern struct target_type arm966e_target;
85 extern struct target_type arm946e_target;
86 extern struct target_type arm926ejs_target;
87 extern struct target_type fa526_target;
88 extern struct target_type feroceon_target;
89 extern struct target_type dragonite_target;
90 extern struct target_type xscale_target;
91 extern struct target_type cortexm_target;
92 extern struct target_type cortexa_target;
93 extern struct target_type aarch64_target;
94 extern struct target_type cortexr4_target;
95 extern struct target_type arm11_target;
96 extern struct target_type ls1_sap_target;
97 extern struct target_type mips_m4k_target;
98 extern struct target_type mips_mips64_target;
99 extern struct target_type avr_target;
100 extern struct target_type dsp563xx_target;
101 extern struct target_type dsp5680xx_target;
102 extern struct target_type testee_target;
103 extern struct target_type avr32_ap7k_target;
104 extern struct target_type hla_target;
105 extern struct target_type nds32_v2_target;
106 extern struct target_type nds32_v3_target;
107 extern struct target_type nds32_v3m_target;
108 extern struct target_type esp32_target;
109 extern struct target_type esp32s2_target;
110 extern struct target_type or1k_target;
111 extern struct target_type quark_x10xx_target;
112 extern struct target_type quark_d20xx_target;
113 extern struct target_type stm8_target;
114 extern struct target_type riscv_target;
115 extern struct target_type mem_ap_target;
116 extern struct target_type esirisc_target;
117 extern struct target_type arcv2_target;
118
119 static struct target_type *target_types[] = {
120 &arm7tdmi_target,
121 &arm9tdmi_target,
122 &arm920t_target,
123 &arm720t_target,
124 &arm966e_target,
125 &arm946e_target,
126 &arm926ejs_target,
127 &fa526_target,
128 &feroceon_target,
129 &dragonite_target,
130 &xscale_target,
131 &cortexm_target,
132 &cortexa_target,
133 &cortexr4_target,
134 &arm11_target,
135 &ls1_sap_target,
136 &mips_m4k_target,
137 &avr_target,
138 &dsp563xx_target,
139 &dsp5680xx_target,
140 &testee_target,
141 &avr32_ap7k_target,
142 &hla_target,
143 &nds32_v2_target,
144 &nds32_v3_target,
145 &nds32_v3m_target,
146 &esp32_target,
147 &esp32s2_target,
148 &or1k_target,
149 &quark_x10xx_target,
150 &quark_d20xx_target,
151 &stm8_target,
152 &riscv_target,
153 &mem_ap_target,
154 &esirisc_target,
155 &arcv2_target,
156 &aarch64_target,
157 &mips_mips64_target,
158 NULL,
159 };
160
161 struct target *all_targets;
162 static struct target_event_callback *target_event_callbacks;
163 static struct target_timer_callback *target_timer_callbacks;
164 static int64_t target_timer_next_event_value;
165 static LIST_HEAD(target_reset_callback_list);
166 static LIST_HEAD(target_trace_callback_list);
167 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
168 static LIST_HEAD(empty_smp_targets);
169
170 static const struct jim_nvp nvp_assert[] = {
171 { .name = "assert", NVP_ASSERT },
172 { .name = "deassert", NVP_DEASSERT },
173 { .name = "T", NVP_ASSERT },
174 { .name = "F", NVP_DEASSERT },
175 { .name = "t", NVP_ASSERT },
176 { .name = "f", NVP_DEASSERT },
177 { .name = NULL, .value = -1 }
178 };
179
180 static const struct jim_nvp nvp_error_target[] = {
181 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
182 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
183 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
184 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
185 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
186 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
187 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
188 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
189 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
190 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
191 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
192 { .value = -1, .name = NULL }
193 };
194
195 static const char *target_strerror_safe(int err)
196 {
197 const struct jim_nvp *n;
198
199 n = jim_nvp_value2name_simple(nvp_error_target, err);
200 if (!n->name)
201 return "unknown";
202 else
203 return n->name;
204 }
205
206 static const struct jim_nvp nvp_target_event[] = {
207
208 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
209 { .value = TARGET_EVENT_HALTED, .name = "halted" },
210 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
211 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
212 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
213 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
214 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
215
216 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
217 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
218
219 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
220 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
221 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
222 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
223 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
224 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
225 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
226 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
227
228 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
229 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
230 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
231
232 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
233 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
234
235 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
236 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
237
238 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
239 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
240
241 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
242 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
243
244 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
245
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
247 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
249 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
250 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
251 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
252 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
253 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
254
255 { .name = NULL, .value = -1 }
256 };
257
258 static const struct jim_nvp nvp_target_state[] = {
259 { .name = "unknown", .value = TARGET_UNKNOWN },
260 { .name = "running", .value = TARGET_RUNNING },
261 { .name = "halted", .value = TARGET_HALTED },
262 { .name = "reset", .value = TARGET_RESET },
263 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
264 { .name = NULL, .value = -1 },
265 };
266
267 static const struct jim_nvp nvp_target_debug_reason[] = {
268 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
269 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
270 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
271 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
272 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
273 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
274 { .name = "program-exit", .value = DBG_REASON_EXIT },
275 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
276 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
277 { .name = NULL, .value = -1 },
278 };
279
280 static const struct jim_nvp nvp_target_endian[] = {
281 { .name = "big", .value = TARGET_BIG_ENDIAN },
282 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
283 { .name = "be", .value = TARGET_BIG_ENDIAN },
284 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
285 { .name = NULL, .value = -1 },
286 };
287
288 static const struct jim_nvp nvp_reset_modes[] = {
289 { .name = "unknown", .value = RESET_UNKNOWN },
290 { .name = "run", .value = RESET_RUN },
291 { .name = "halt", .value = RESET_HALT },
292 { .name = "init", .value = RESET_INIT },
293 { .name = NULL, .value = -1 },
294 };
295
296 const char *debug_reason_name(struct target *t)
297 {
298 const char *cp;
299
300 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
301 t->debug_reason)->name;
302 if (!cp) {
303 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
304 cp = "(*BUG*unknown*BUG*)";
305 }
306 return cp;
307 }
308
309 const char *target_state_name(struct target *t)
310 {
311 const char *cp;
312 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
313 if (!cp) {
314 LOG_ERROR("Invalid target state: %d", (int)(t->state));
315 cp = "(*BUG*unknown*BUG*)";
316 }
317
318 if (!target_was_examined(t) && t->defer_examine)
319 cp = "examine deferred";
320
321 return cp;
322 }
323
324 const char *target_event_name(enum target_event event)
325 {
326 const char *cp;
327 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
328 if (!cp) {
329 LOG_ERROR("Invalid target event: %d", (int)(event));
330 cp = "(*BUG*unknown*BUG*)";
331 }
332 return cp;
333 }
334
335 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
336 {
337 const char *cp;
338 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
339 if (!cp) {
340 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
341 cp = "(*BUG*unknown*BUG*)";
342 }
343 return cp;
344 }
345
346 /* determine the number of the new target */
347 static int new_target_number(void)
348 {
349 struct target *t;
350 int x;
351
352 /* number is 0 based */
353 x = -1;
354 t = all_targets;
355 while (t) {
356 if (x < t->target_number)
357 x = t->target_number;
358 t = t->next;
359 }
360 return x + 1;
361 }
362
363 static void append_to_list_all_targets(struct target *target)
364 {
365 struct target **t = &all_targets;
366
367 while (*t)
368 t = &((*t)->next);
369 *t = target;
370 }
371
372 /* read a uint64_t from a buffer in target memory endianness */
373 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 return le_to_h_u64(buffer);
377 else
378 return be_to_h_u64(buffer);
379 }
380
381 /* read a uint32_t from a buffer in target memory endianness */
382 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 return le_to_h_u32(buffer);
386 else
387 return be_to_h_u32(buffer);
388 }
389
390 /* read a uint24_t from a buffer in target memory endianness */
391 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 return le_to_h_u24(buffer);
395 else
396 return be_to_h_u24(buffer);
397 }
398
399 /* read a uint16_t from a buffer in target memory endianness */
400 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
401 {
402 if (target->endianness == TARGET_LITTLE_ENDIAN)
403 return le_to_h_u16(buffer);
404 else
405 return be_to_h_u16(buffer);
406 }
407
408 /* write a uint64_t to a buffer in target memory endianness */
409 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
410 {
411 if (target->endianness == TARGET_LITTLE_ENDIAN)
412 h_u64_to_le(buffer, value);
413 else
414 h_u64_to_be(buffer, value);
415 }
416
417 /* write a uint32_t to a buffer in target memory endianness */
418 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
419 {
420 if (target->endianness == TARGET_LITTLE_ENDIAN)
421 h_u32_to_le(buffer, value);
422 else
423 h_u32_to_be(buffer, value);
424 }
425
426 /* write a uint24_t to a buffer in target memory endianness */
427 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
428 {
429 if (target->endianness == TARGET_LITTLE_ENDIAN)
430 h_u24_to_le(buffer, value);
431 else
432 h_u24_to_be(buffer, value);
433 }
434
435 /* write a uint16_t to a buffer in target memory endianness */
436 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
437 {
438 if (target->endianness == TARGET_LITTLE_ENDIAN)
439 h_u16_to_le(buffer, value);
440 else
441 h_u16_to_be(buffer, value);
442 }
443
444 /* write a uint8_t to a buffer in target memory endianness */
445 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
446 {
447 *buffer = value;
448 }
449
450 /* write a uint64_t array to a buffer in target memory endianness */
451 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
452 {
453 uint32_t i;
454 for (i = 0; i < count; i++)
455 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
456 }
457
458 /* write a uint32_t array to a buffer in target memory endianness */
459 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
460 {
461 uint32_t i;
462 for (i = 0; i < count; i++)
463 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
464 }
465
466 /* write a uint16_t array to a buffer in target memory endianness */
467 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
468 {
469 uint32_t i;
470 for (i = 0; i < count; i++)
471 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
472 }
473
474 /* write a uint64_t array to a buffer in target memory endianness */
475 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
476 {
477 uint32_t i;
478 for (i = 0; i < count; i++)
479 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
480 }
481
482 /* write a uint32_t array to a buffer in target memory endianness */
483 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
484 {
485 uint32_t i;
486 for (i = 0; i < count; i++)
487 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
488 }
489
490 /* write a uint16_t array to a buffer in target memory endianness */
491 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
492 {
493 uint32_t i;
494 for (i = 0; i < count; i++)
495 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
496 }
497
498 /* return a pointer to a configured target; id is name or number */
499 struct target *get_target(const char *id)
500 {
501 struct target *target;
502
503 /* try as tcltarget name */
504 for (target = all_targets; target; target = target->next) {
505 if (!target_name(target))
506 continue;
507 if (strcmp(id, target_name(target)) == 0)
508 return target;
509 }
510
511 /* It's OK to remove this fallback sometime after August 2010 or so */
512
513 /* no match, try as number */
514 unsigned num;
515 if (parse_uint(id, &num) != ERROR_OK)
516 return NULL;
517
518 for (target = all_targets; target; target = target->next) {
519 if (target->target_number == (int)num) {
520 LOG_WARNING("use '%s' as target identifier, not '%u'",
521 target_name(target), num);
522 return target;
523 }
524 }
525
526 return NULL;
527 }
528
529 /* returns a pointer to the n-th configured target */
530 struct target *get_target_by_num(int num)
531 {
532 struct target *target = all_targets;
533
534 while (target) {
535 if (target->target_number == num)
536 return target;
537 target = target->next;
538 }
539
540 return NULL;
541 }
542
543 struct target *get_current_target(struct command_context *cmd_ctx)
544 {
545 struct target *target = get_current_target_or_null(cmd_ctx);
546
547 if (!target) {
548 LOG_ERROR("BUG: current_target out of bounds");
549 exit(-1);
550 }
551
552 return target;
553 }
554
555 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
556 {
557 return cmd_ctx->current_target_override
558 ? cmd_ctx->current_target_override
559 : cmd_ctx->current_target;
560 }
561
562 int target_poll(struct target *target)
563 {
564 int retval;
565
566 /* We can't poll until after examine */
567 if (!target_was_examined(target)) {
568 /* Fail silently lest we pollute the log */
569 return ERROR_FAIL;
570 }
571
572 retval = target->type->poll(target);
573 if (retval != ERROR_OK)
574 return retval;
575
576 if (target->halt_issued) {
577 if (target->state == TARGET_HALTED)
578 target->halt_issued = false;
579 else {
580 int64_t t = timeval_ms() - target->halt_issued_time;
581 if (t > DEFAULT_HALT_TIMEOUT) {
582 target->halt_issued = false;
583 LOG_INFO("Halt timed out, wake up GDB.");
584 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
585 }
586 }
587 }
588
589 return ERROR_OK;
590 }
591
592 int target_halt(struct target *target)
593 {
594 int retval;
595 /* We can't poll until after examine */
596 if (!target_was_examined(target)) {
597 LOG_ERROR("Target not examined yet");
598 return ERROR_FAIL;
599 }
600
601 retval = target->type->halt(target);
602 if (retval != ERROR_OK)
603 return retval;
604
605 target->halt_issued = true;
606 target->halt_issued_time = timeval_ms();
607
608 return ERROR_OK;
609 }
610
611 /**
612 * Make the target (re)start executing using its saved execution
613 * context (possibly with some modifications).
614 *
615 * @param target Which target should start executing.
616 * @param current True to use the target's saved program counter instead
617 * of the address parameter
618 * @param address Optionally used as the program counter.
619 * @param handle_breakpoints True iff breakpoints at the resumption PC
620 * should be skipped. (For example, maybe execution was stopped by
621 * such a breakpoint, in which case it would be counterproductive to
622 * let it re-trigger.
623 * @param debug_execution False if all working areas allocated by OpenOCD
624 * should be released and/or restored to their original contents.
625 * (This would for example be true to run some downloaded "helper"
626 * algorithm code, which resides in one such working buffer and uses
627 * another for data storage.)
628 *
629 * @todo Resolve the ambiguity about what the "debug_execution" flag
630 * signifies. For example, Target implementations don't agree on how
631 * it relates to invalidation of the register cache, or to whether
632 * breakpoints and watchpoints should be enabled. (It would seem wrong
633 * to enable breakpoints when running downloaded "helper" algorithms
634 * (debug_execution true), since the breakpoints would be set to match
635 * target firmware being debugged, not the helper algorithm.... and
636 * enabling them could cause such helpers to malfunction (for example,
637 * by overwriting data with a breakpoint instruction. On the other
638 * hand the infrastructure for running such helpers might use this
639 * procedure but rely on hardware breakpoint to detect termination.)
640 */
641 int target_resume(struct target *target, int current, target_addr_t address,
642 int handle_breakpoints, int debug_execution)
643 {
644 int retval;
645
646 /* We can't poll until after examine */
647 if (!target_was_examined(target)) {
648 LOG_ERROR("Target not examined yet");
649 return ERROR_FAIL;
650 }
651
652 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
653
654 /* note that resume *must* be asynchronous. The CPU can halt before
655 * we poll. The CPU can even halt at the current PC as a result of
656 * a software breakpoint being inserted by (a bug?) the application.
657 */
658 /*
659 * resume() triggers the event 'resumed'. The execution of TCL commands
660 * in the event handler causes the polling of targets. If the target has
661 * already halted for a breakpoint, polling will run the 'halted' event
662 * handler before the pending 'resumed' handler.
663 * Disable polling during resume() to guarantee the execution of handlers
664 * in the correct order.
665 */
666 bool save_poll = jtag_poll_get_enabled();
667 jtag_poll_set_enabled(false);
668 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
669 jtag_poll_set_enabled(save_poll);
670 if (retval != ERROR_OK)
671 return retval;
672
673 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
674
675 return retval;
676 }
677
678 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
679 {
680 char buf[100];
681 int retval;
682 struct jim_nvp *n;
683 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
684 if (!n->name) {
685 LOG_ERROR("invalid reset mode");
686 return ERROR_FAIL;
687 }
688
689 struct target *target;
690 for (target = all_targets; target; target = target->next)
691 target_call_reset_callbacks(target, reset_mode);
692
693 /* disable polling during reset to make reset event scripts
694 * more predictable, i.e. dr/irscan & pathmove in events will
695 * not have JTAG operations injected into the middle of a sequence.
696 */
697 bool save_poll = jtag_poll_get_enabled();
698
699 jtag_poll_set_enabled(false);
700
701 sprintf(buf, "ocd_process_reset %s", n->name);
702 retval = Jim_Eval(cmd->ctx->interp, buf);
703
704 jtag_poll_set_enabled(save_poll);
705
706 if (retval != JIM_OK) {
707 Jim_MakeErrorMessage(cmd->ctx->interp);
708 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
709 return ERROR_FAIL;
710 }
711
712 /* We want any events to be processed before the prompt */
713 retval = target_call_timer_callbacks_now();
714
715 for (target = all_targets; target; target = target->next) {
716 target->type->check_reset(target);
717 target->running_alg = false;
718 }
719
720 return retval;
721 }
722
723 static int identity_virt2phys(struct target *target,
724 target_addr_t virtual, target_addr_t *physical)
725 {
726 *physical = virtual;
727 return ERROR_OK;
728 }
729
730 static int no_mmu(struct target *target, int *enabled)
731 {
732 *enabled = 0;
733 return ERROR_OK;
734 }
735
736 /**
737 * Reset the @c examined flag for the given target.
738 * Pure paranoia -- targets are zeroed on allocation.
739 */
740 static inline void target_reset_examined(struct target *target)
741 {
742 target->examined = false;
743 }
744
745 static int default_examine(struct target *target)
746 {
747 target_set_examined(target);
748 return ERROR_OK;
749 }
750
751 /* no check by default */
752 static int default_check_reset(struct target *target)
753 {
754 return ERROR_OK;
755 }
756
757 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
758 * Keep in sync */
759 int target_examine_one(struct target *target)
760 {
761 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
762
763 int retval = target->type->examine(target);
764 if (retval != ERROR_OK) {
765 target_reset_examined(target);
766 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
767 return retval;
768 }
769
770 target_set_examined(target);
771 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
772
773 return ERROR_OK;
774 }
775
776 static int jtag_enable_callback(enum jtag_event event, void *priv)
777 {
778 struct target *target = priv;
779
780 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
781 return ERROR_OK;
782
783 jtag_unregister_event_callback(jtag_enable_callback, target);
784
785 return target_examine_one(target);
786 }
787
788 /* Targets that correctly implement init + examine, i.e.
789 * no communication with target during init:
790 *
791 * XScale
792 */
793 int target_examine(void)
794 {
795 int retval = ERROR_OK;
796 struct target *target;
797
798 for (target = all_targets; target; target = target->next) {
799 /* defer examination, but don't skip it */
800 if (!target->tap->enabled) {
801 jtag_register_event_callback(jtag_enable_callback,
802 target);
803 continue;
804 }
805
806 if (target->defer_examine)
807 continue;
808
809 int retval2 = target_examine_one(target);
810 if (retval2 != ERROR_OK) {
811 LOG_WARNING("target %s examination failed", target_name(target));
812 retval = retval2;
813 }
814 }
815 return retval;
816 }
817
818 const char *target_type_name(struct target *target)
819 {
820 return target->type->name;
821 }
822
823 static int target_soft_reset_halt(struct target *target)
824 {
825 if (!target_was_examined(target)) {
826 LOG_ERROR("Target not examined yet");
827 return ERROR_FAIL;
828 }
829 if (!target->type->soft_reset_halt) {
830 LOG_ERROR("Target %s does not support soft_reset_halt",
831 target_name(target));
832 return ERROR_FAIL;
833 }
834 return target->type->soft_reset_halt(target);
835 }
836
837 /**
838 * Downloads a target-specific native code algorithm to the target,
839 * and executes it. * Note that some targets may need to set up, enable,
840 * and tear down a breakpoint (hard or * soft) to detect algorithm
841 * termination, while others may support lower overhead schemes where
842 * soft breakpoints embedded in the algorithm automatically terminate the
843 * algorithm.
844 *
845 * @param target used to run the algorithm
846 * @param num_mem_params
847 * @param mem_params
848 * @param num_reg_params
849 * @param reg_param
850 * @param entry_point
851 * @param exit_point
852 * @param timeout_ms
853 * @param arch_info target-specific description of the algorithm.
854 */
855 int target_run_algorithm(struct target *target,
856 int num_mem_params, struct mem_param *mem_params,
857 int num_reg_params, struct reg_param *reg_param,
858 target_addr_t entry_point, target_addr_t exit_point,
859 int timeout_ms, void *arch_info)
860 {
861 int retval = ERROR_FAIL;
862
863 if (!target_was_examined(target)) {
864 LOG_ERROR("Target not examined yet");
865 goto done;
866 }
867 if (!target->type->run_algorithm) {
868 LOG_ERROR("Target type '%s' does not support %s",
869 target_type_name(target), __func__);
870 goto done;
871 }
872
873 target->running_alg = true;
874 retval = target->type->run_algorithm(target,
875 num_mem_params, mem_params,
876 num_reg_params, reg_param,
877 entry_point, exit_point, timeout_ms, arch_info);
878 target->running_alg = false;
879
880 done:
881 return retval;
882 }
883
884 /**
885 * Executes a target-specific native code algorithm and leaves it running.
886 *
887 * @param target used to run the algorithm
888 * @param num_mem_params
889 * @param mem_params
890 * @param num_reg_params
891 * @param reg_params
892 * @param entry_point
893 * @param exit_point
894 * @param arch_info target-specific description of the algorithm.
895 */
896 int target_start_algorithm(struct target *target,
897 int num_mem_params, struct mem_param *mem_params,
898 int num_reg_params, struct reg_param *reg_params,
899 target_addr_t entry_point, target_addr_t exit_point,
900 void *arch_info)
901 {
902 int retval = ERROR_FAIL;
903
904 if (!target_was_examined(target)) {
905 LOG_ERROR("Target not examined yet");
906 goto done;
907 }
908 if (!target->type->start_algorithm) {
909 LOG_ERROR("Target type '%s' does not support %s",
910 target_type_name(target), __func__);
911 goto done;
912 }
913 if (target->running_alg) {
914 LOG_ERROR("Target is already running an algorithm");
915 goto done;
916 }
917
918 target->running_alg = true;
919 retval = target->type->start_algorithm(target,
920 num_mem_params, mem_params,
921 num_reg_params, reg_params,
922 entry_point, exit_point, arch_info);
923
924 done:
925 return retval;
926 }
927
928 /**
929 * Waits for an algorithm started with target_start_algorithm() to complete.
930 *
931 * @param target used to run the algorithm
932 * @param num_mem_params
933 * @param mem_params
934 * @param num_reg_params
935 * @param reg_params
936 * @param exit_point
937 * @param timeout_ms
938 * @param arch_info target-specific description of the algorithm.
939 */
940 int target_wait_algorithm(struct target *target,
941 int num_mem_params, struct mem_param *mem_params,
942 int num_reg_params, struct reg_param *reg_params,
943 target_addr_t exit_point, int timeout_ms,
944 void *arch_info)
945 {
946 int retval = ERROR_FAIL;
947
948 if (!target->type->wait_algorithm) {
949 LOG_ERROR("Target type '%s' does not support %s",
950 target_type_name(target), __func__);
951 goto done;
952 }
953 if (!target->running_alg) {
954 LOG_ERROR("Target is not running an algorithm");
955 goto done;
956 }
957
958 retval = target->type->wait_algorithm(target,
959 num_mem_params, mem_params,
960 num_reg_params, reg_params,
961 exit_point, timeout_ms, arch_info);
962 if (retval != ERROR_TARGET_TIMEOUT)
963 target->running_alg = false;
964
965 done:
966 return retval;
967 }
968
969 /**
970 * Streams data to a circular buffer on target intended for consumption by code
971 * running asynchronously on target.
972 *
973 * This is intended for applications where target-specific native code runs
974 * on the target, receives data from the circular buffer, does something with
975 * it (most likely writing it to a flash memory), and advances the circular
976 * buffer pointer.
977 *
978 * This assumes that the helper algorithm has already been loaded to the target,
979 * but has not been started yet. Given memory and register parameters are passed
980 * to the algorithm.
981 *
982 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
983 * following format:
984 *
985 * [buffer_start + 0, buffer_start + 4):
986 * Write Pointer address (aka head). Written and updated by this
987 * routine when new data is written to the circular buffer.
988 * [buffer_start + 4, buffer_start + 8):
989 * Read Pointer address (aka tail). Updated by code running on the
990 * target after it consumes data.
991 * [buffer_start + 8, buffer_start + buffer_size):
992 * Circular buffer contents.
993 *
994 * See contrib/loaders/flash/stm32f1x.S for an example.
995 *
996 * @param target used to run the algorithm
997 * @param buffer address on the host where data to be sent is located
998 * @param count number of blocks to send
999 * @param block_size size in bytes of each block
1000 * @param num_mem_params count of memory-based params to pass to algorithm
1001 * @param mem_params memory-based params to pass to algorithm
1002 * @param num_reg_params count of register-based params to pass to algorithm
1003 * @param reg_params memory-based params to pass to algorithm
1004 * @param buffer_start address on the target of the circular buffer structure
1005 * @param buffer_size size of the circular buffer structure
1006 * @param entry_point address on the target to execute to start the algorithm
1007 * @param exit_point address at which to set a breakpoint to catch the
1008 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1009 * @param arch_info
1010 */
1011
1012 int target_run_flash_async_algorithm(struct target *target,
1013 const uint8_t *buffer, uint32_t count, int block_size,
1014 int num_mem_params, struct mem_param *mem_params,
1015 int num_reg_params, struct reg_param *reg_params,
1016 uint32_t buffer_start, uint32_t buffer_size,
1017 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1018 {
1019 int retval;
1020 int timeout = 0;
1021
1022 const uint8_t *buffer_orig = buffer;
1023
1024 /* Set up working area. First word is write pointer, second word is read pointer,
1025 * rest is fifo data area. */
1026 uint32_t wp_addr = buffer_start;
1027 uint32_t rp_addr = buffer_start + 4;
1028 uint32_t fifo_start_addr = buffer_start + 8;
1029 uint32_t fifo_end_addr = buffer_start + buffer_size;
1030
1031 uint32_t wp = fifo_start_addr;
1032 uint32_t rp = fifo_start_addr;
1033
1034 /* validate block_size is 2^n */
1035 assert(IS_PWR_OF_2(block_size));
1036
1037 retval = target_write_u32(target, wp_addr, wp);
1038 if (retval != ERROR_OK)
1039 return retval;
1040 retval = target_write_u32(target, rp_addr, rp);
1041 if (retval != ERROR_OK)
1042 return retval;
1043
1044 /* Start up algorithm on target and let it idle while writing the first chunk */
1045 retval = target_start_algorithm(target, num_mem_params, mem_params,
1046 num_reg_params, reg_params,
1047 entry_point,
1048 exit_point,
1049 arch_info);
1050
1051 if (retval != ERROR_OK) {
1052 LOG_ERROR("error starting target flash write algorithm");
1053 return retval;
1054 }
1055
1056 while (count > 0) {
1057
1058 retval = target_read_u32(target, rp_addr, &rp);
1059 if (retval != ERROR_OK) {
1060 LOG_ERROR("failed to get read pointer");
1061 break;
1062 }
1063
1064 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1065 (size_t) (buffer - buffer_orig), count, wp, rp);
1066
1067 if (rp == 0) {
1068 LOG_ERROR("flash write algorithm aborted by target");
1069 retval = ERROR_FLASH_OPERATION_FAILED;
1070 break;
1071 }
1072
1073 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1074 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1075 break;
1076 }
1077
1078 /* Count the number of bytes available in the fifo without
1079 * crossing the wrap around. Make sure to not fill it completely,
1080 * because that would make wp == rp and that's the empty condition. */
1081 uint32_t thisrun_bytes;
1082 if (rp > wp)
1083 thisrun_bytes = rp - wp - block_size;
1084 else if (rp > fifo_start_addr)
1085 thisrun_bytes = fifo_end_addr - wp;
1086 else
1087 thisrun_bytes = fifo_end_addr - wp - block_size;
1088
1089 if (thisrun_bytes == 0) {
1090 /* Throttle polling a bit if transfer is (much) faster than flash
1091 * programming. The exact delay shouldn't matter as long as it's
1092 * less than buffer size / flash speed. This is very unlikely to
1093 * run when using high latency connections such as USB. */
1094 alive_sleep(2);
1095
1096 /* to stop an infinite loop on some targets check and increment a timeout
1097 * this issue was observed on a stellaris using the new ICDI interface */
1098 if (timeout++ >= 2500) {
1099 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1100 return ERROR_FLASH_OPERATION_FAILED;
1101 }
1102 continue;
1103 }
1104
1105 /* reset our timeout */
1106 timeout = 0;
1107
1108 /* Limit to the amount of data we actually want to write */
1109 if (thisrun_bytes > count * block_size)
1110 thisrun_bytes = count * block_size;
1111
1112 /* Force end of large blocks to be word aligned */
1113 if (thisrun_bytes >= 16)
1114 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1115
1116 /* Write data to fifo */
1117 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1118 if (retval != ERROR_OK)
1119 break;
1120
1121 /* Update counters and wrap write pointer */
1122 buffer += thisrun_bytes;
1123 count -= thisrun_bytes / block_size;
1124 wp += thisrun_bytes;
1125 if (wp >= fifo_end_addr)
1126 wp = fifo_start_addr;
1127
1128 /* Store updated write pointer to target */
1129 retval = target_write_u32(target, wp_addr, wp);
1130 if (retval != ERROR_OK)
1131 break;
1132
1133 /* Avoid GDB timeouts */
1134 keep_alive();
1135 }
1136
1137 if (retval != ERROR_OK) {
1138 /* abort flash write algorithm on target */
1139 target_write_u32(target, wp_addr, 0);
1140 }
1141
1142 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1143 num_reg_params, reg_params,
1144 exit_point,
1145 10000,
1146 arch_info);
1147
1148 if (retval2 != ERROR_OK) {
1149 LOG_ERROR("error waiting for target flash write algorithm");
1150 retval = retval2;
1151 }
1152
1153 if (retval == ERROR_OK) {
1154 /* check if algorithm set rp = 0 after fifo writer loop finished */
1155 retval = target_read_u32(target, rp_addr, &rp);
1156 if (retval == ERROR_OK && rp == 0) {
1157 LOG_ERROR("flash write algorithm aborted by target");
1158 retval = ERROR_FLASH_OPERATION_FAILED;
1159 }
1160 }
1161
1162 return retval;
1163 }
1164
1165 int target_run_read_async_algorithm(struct target *target,
1166 uint8_t *buffer, uint32_t count, int block_size,
1167 int num_mem_params, struct mem_param *mem_params,
1168 int num_reg_params, struct reg_param *reg_params,
1169 uint32_t buffer_start, uint32_t buffer_size,
1170 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1171 {
1172 int retval;
1173 int timeout = 0;
1174
1175 const uint8_t *buffer_orig = buffer;
1176
1177 /* Set up working area. First word is write pointer, second word is read pointer,
1178 * rest is fifo data area. */
1179 uint32_t wp_addr = buffer_start;
1180 uint32_t rp_addr = buffer_start + 4;
1181 uint32_t fifo_start_addr = buffer_start + 8;
1182 uint32_t fifo_end_addr = buffer_start + buffer_size;
1183
1184 uint32_t wp = fifo_start_addr;
1185 uint32_t rp = fifo_start_addr;
1186
1187 /* validate block_size is 2^n */
1188 assert(IS_PWR_OF_2(block_size));
1189
1190 retval = target_write_u32(target, wp_addr, wp);
1191 if (retval != ERROR_OK)
1192 return retval;
1193 retval = target_write_u32(target, rp_addr, rp);
1194 if (retval != ERROR_OK)
1195 return retval;
1196
1197 /* Start up algorithm on target */
1198 retval = target_start_algorithm(target, num_mem_params, mem_params,
1199 num_reg_params, reg_params,
1200 entry_point,
1201 exit_point,
1202 arch_info);
1203
1204 if (retval != ERROR_OK) {
1205 LOG_ERROR("error starting target flash read algorithm");
1206 return retval;
1207 }
1208
1209 while (count > 0) {
1210 retval = target_read_u32(target, wp_addr, &wp);
1211 if (retval != ERROR_OK) {
1212 LOG_ERROR("failed to get write pointer");
1213 break;
1214 }
1215
1216 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1217 (size_t)(buffer - buffer_orig), count, wp, rp);
1218
1219 if (wp == 0) {
1220 LOG_ERROR("flash read algorithm aborted by target");
1221 retval = ERROR_FLASH_OPERATION_FAILED;
1222 break;
1223 }
1224
1225 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1226 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1227 break;
1228 }
1229
1230 /* Count the number of bytes available in the fifo without
1231 * crossing the wrap around. */
1232 uint32_t thisrun_bytes;
1233 if (wp >= rp)
1234 thisrun_bytes = wp - rp;
1235 else
1236 thisrun_bytes = fifo_end_addr - rp;
1237
1238 if (thisrun_bytes == 0) {
1239 /* Throttle polling a bit if transfer is (much) faster than flash
1240 * reading. The exact delay shouldn't matter as long as it's
1241 * less than buffer size / flash speed. This is very unlikely to
1242 * run when using high latency connections such as USB. */
1243 alive_sleep(2);
1244
1245 /* to stop an infinite loop on some targets check and increment a timeout
1246 * this issue was observed on a stellaris using the new ICDI interface */
1247 if (timeout++ >= 2500) {
1248 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1249 return ERROR_FLASH_OPERATION_FAILED;
1250 }
1251 continue;
1252 }
1253
1254 /* Reset our timeout */
1255 timeout = 0;
1256
1257 /* Limit to the amount of data we actually want to read */
1258 if (thisrun_bytes > count * block_size)
1259 thisrun_bytes = count * block_size;
1260
1261 /* Force end of large blocks to be word aligned */
1262 if (thisrun_bytes >= 16)
1263 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1264
1265 /* Read data from fifo */
1266 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1267 if (retval != ERROR_OK)
1268 break;
1269
1270 /* Update counters and wrap write pointer */
1271 buffer += thisrun_bytes;
1272 count -= thisrun_bytes / block_size;
1273 rp += thisrun_bytes;
1274 if (rp >= fifo_end_addr)
1275 rp = fifo_start_addr;
1276
1277 /* Store updated write pointer to target */
1278 retval = target_write_u32(target, rp_addr, rp);
1279 if (retval != ERROR_OK)
1280 break;
1281
1282 /* Avoid GDB timeouts */
1283 keep_alive();
1284
1285 }
1286
1287 if (retval != ERROR_OK) {
1288 /* abort flash write algorithm on target */
1289 target_write_u32(target, rp_addr, 0);
1290 }
1291
1292 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1293 num_reg_params, reg_params,
1294 exit_point,
1295 10000,
1296 arch_info);
1297
1298 if (retval2 != ERROR_OK) {
1299 LOG_ERROR("error waiting for target flash write algorithm");
1300 retval = retval2;
1301 }
1302
1303 if (retval == ERROR_OK) {
1304 /* check if algorithm set wp = 0 after fifo writer loop finished */
1305 retval = target_read_u32(target, wp_addr, &wp);
1306 if (retval == ERROR_OK && wp == 0) {
1307 LOG_ERROR("flash read algorithm aborted by target");
1308 retval = ERROR_FLASH_OPERATION_FAILED;
1309 }
1310 }
1311
1312 return retval;
1313 }
1314
1315 int target_read_memory(struct target *target,
1316 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1317 {
1318 if (!target_was_examined(target)) {
1319 LOG_ERROR("Target not examined yet");
1320 return ERROR_FAIL;
1321 }
1322 if (!target->type->read_memory) {
1323 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1324 return ERROR_FAIL;
1325 }
1326 return target->type->read_memory(target, address, size, count, buffer);
1327 }
1328
1329 int target_read_phys_memory(struct target *target,
1330 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1331 {
1332 if (!target_was_examined(target)) {
1333 LOG_ERROR("Target not examined yet");
1334 return ERROR_FAIL;
1335 }
1336 if (!target->type->read_phys_memory) {
1337 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1338 return ERROR_FAIL;
1339 }
1340 return target->type->read_phys_memory(target, address, size, count, buffer);
1341 }
1342
1343 int target_write_memory(struct target *target,
1344 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1345 {
1346 if (!target_was_examined(target)) {
1347 LOG_ERROR("Target not examined yet");
1348 return ERROR_FAIL;
1349 }
1350 if (!target->type->write_memory) {
1351 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1352 return ERROR_FAIL;
1353 }
1354 return target->type->write_memory(target, address, size, count, buffer);
1355 }
1356
1357 int target_write_phys_memory(struct target *target,
1358 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1359 {
1360 if (!target_was_examined(target)) {
1361 LOG_ERROR("Target not examined yet");
1362 return ERROR_FAIL;
1363 }
1364 if (!target->type->write_phys_memory) {
1365 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1366 return ERROR_FAIL;
1367 }
1368 return target->type->write_phys_memory(target, address, size, count, buffer);
1369 }
1370
1371 int target_add_breakpoint(struct target *target,
1372 struct breakpoint *breakpoint)
1373 {
1374 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1375 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1376 return ERROR_TARGET_NOT_HALTED;
1377 }
1378 return target->type->add_breakpoint(target, breakpoint);
1379 }
1380
1381 int target_add_context_breakpoint(struct target *target,
1382 struct breakpoint *breakpoint)
1383 {
1384 if (target->state != TARGET_HALTED) {
1385 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1386 return ERROR_TARGET_NOT_HALTED;
1387 }
1388 return target->type->add_context_breakpoint(target, breakpoint);
1389 }
1390
1391 int target_add_hybrid_breakpoint(struct target *target,
1392 struct breakpoint *breakpoint)
1393 {
1394 if (target->state != TARGET_HALTED) {
1395 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1396 return ERROR_TARGET_NOT_HALTED;
1397 }
1398 return target->type->add_hybrid_breakpoint(target, breakpoint);
1399 }
1400
1401 int target_remove_breakpoint(struct target *target,
1402 struct breakpoint *breakpoint)
1403 {
1404 return target->type->remove_breakpoint(target, breakpoint);
1405 }
1406
1407 int target_add_watchpoint(struct target *target,
1408 struct watchpoint *watchpoint)
1409 {
1410 if (target->state != TARGET_HALTED) {
1411 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1412 return ERROR_TARGET_NOT_HALTED;
1413 }
1414 return target->type->add_watchpoint(target, watchpoint);
1415 }
1416 int target_remove_watchpoint(struct target *target,
1417 struct watchpoint *watchpoint)
1418 {
1419 return target->type->remove_watchpoint(target, watchpoint);
1420 }
1421 int target_hit_watchpoint(struct target *target,
1422 struct watchpoint **hit_watchpoint)
1423 {
1424 if (target->state != TARGET_HALTED) {
1425 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1426 return ERROR_TARGET_NOT_HALTED;
1427 }
1428
1429 if (!target->type->hit_watchpoint) {
1430 /* For backward compatible, if hit_watchpoint is not implemented,
1431 * return ERROR_FAIL such that gdb_server will not take the nonsense
1432 * information. */
1433 return ERROR_FAIL;
1434 }
1435
1436 return target->type->hit_watchpoint(target, hit_watchpoint);
1437 }
1438
1439 const char *target_get_gdb_arch(struct target *target)
1440 {
1441 if (!target->type->get_gdb_arch)
1442 return NULL;
1443 return target->type->get_gdb_arch(target);
1444 }
1445
1446 int target_get_gdb_reg_list(struct target *target,
1447 struct reg **reg_list[], int *reg_list_size,
1448 enum target_register_class reg_class)
1449 {
1450 int result = ERROR_FAIL;
1451
1452 if (!target_was_examined(target)) {
1453 LOG_ERROR("Target not examined yet");
1454 goto done;
1455 }
1456
1457 result = target->type->get_gdb_reg_list(target, reg_list,
1458 reg_list_size, reg_class);
1459
1460 done:
1461 if (result != ERROR_OK) {
1462 *reg_list = NULL;
1463 *reg_list_size = 0;
1464 }
1465 return result;
1466 }
1467
1468 int target_get_gdb_reg_list_noread(struct target *target,
1469 struct reg **reg_list[], int *reg_list_size,
1470 enum target_register_class reg_class)
1471 {
1472 if (target->type->get_gdb_reg_list_noread &&
1473 target->type->get_gdb_reg_list_noread(target, reg_list,
1474 reg_list_size, reg_class) == ERROR_OK)
1475 return ERROR_OK;
1476 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1477 }
1478
1479 bool target_supports_gdb_connection(struct target *target)
1480 {
1481 /*
1482 * exclude all the targets that don't provide get_gdb_reg_list
1483 * or that have explicit gdb_max_connection == 0
1484 */
1485 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1486 }
1487
1488 int target_step(struct target *target,
1489 int current, target_addr_t address, int handle_breakpoints)
1490 {
1491 int retval;
1492
1493 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1494
1495 retval = target->type->step(target, current, address, handle_breakpoints);
1496 if (retval != ERROR_OK)
1497 return retval;
1498
1499 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1500
1501 return retval;
1502 }
1503
1504 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1505 {
1506 if (target->state != TARGET_HALTED) {
1507 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1508 return ERROR_TARGET_NOT_HALTED;
1509 }
1510 return target->type->get_gdb_fileio_info(target, fileio_info);
1511 }
1512
1513 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1514 {
1515 if (target->state != TARGET_HALTED) {
1516 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1517 return ERROR_TARGET_NOT_HALTED;
1518 }
1519 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1520 }
1521
1522 target_addr_t target_address_max(struct target *target)
1523 {
1524 unsigned bits = target_address_bits(target);
1525 if (sizeof(target_addr_t) * 8 == bits)
1526 return (target_addr_t) -1;
1527 else
1528 return (((target_addr_t) 1) << bits) - 1;
1529 }
1530
1531 unsigned target_address_bits(struct target *target)
1532 {
1533 if (target->type->address_bits)
1534 return target->type->address_bits(target);
1535 return 32;
1536 }
1537
1538 unsigned int target_data_bits(struct target *target)
1539 {
1540 if (target->type->data_bits)
1541 return target->type->data_bits(target);
1542 return 32;
1543 }
1544
1545 static int target_profiling(struct target *target, uint32_t *samples,
1546 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1547 {
1548 return target->type->profiling(target, samples, max_num_samples,
1549 num_samples, seconds);
1550 }
1551
1552 static int handle_target(void *priv);
1553
1554 static int target_init_one(struct command_context *cmd_ctx,
1555 struct target *target)
1556 {
1557 target_reset_examined(target);
1558
1559 struct target_type *type = target->type;
1560 if (!type->examine)
1561 type->examine = default_examine;
1562
1563 if (!type->check_reset)
1564 type->check_reset = default_check_reset;
1565
1566 assert(type->init_target);
1567
1568 int retval = type->init_target(cmd_ctx, target);
1569 if (retval != ERROR_OK) {
1570 LOG_ERROR("target '%s' init failed", target_name(target));
1571 return retval;
1572 }
1573
1574 /* Sanity-check MMU support ... stub in what we must, to help
1575 * implement it in stages, but warn if we need to do so.
1576 */
1577 if (type->mmu) {
1578 if (!type->virt2phys) {
1579 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1580 type->virt2phys = identity_virt2phys;
1581 }
1582 } else {
1583 /* Make sure no-MMU targets all behave the same: make no
1584 * distinction between physical and virtual addresses, and
1585 * ensure that virt2phys() is always an identity mapping.
1586 */
1587 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1588 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1589
1590 type->mmu = no_mmu;
1591 type->write_phys_memory = type->write_memory;
1592 type->read_phys_memory = type->read_memory;
1593 type->virt2phys = identity_virt2phys;
1594 }
1595
1596 if (!target->type->read_buffer)
1597 target->type->read_buffer = target_read_buffer_default;
1598
1599 if (!target->type->write_buffer)
1600 target->type->write_buffer = target_write_buffer_default;
1601
1602 if (!target->type->get_gdb_fileio_info)
1603 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1604
1605 if (!target->type->gdb_fileio_end)
1606 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1607
1608 if (!target->type->profiling)
1609 target->type->profiling = target_profiling_default;
1610
1611 return ERROR_OK;
1612 }
1613
1614 static int target_init(struct command_context *cmd_ctx)
1615 {
1616 struct target *target;
1617 int retval;
1618
1619 for (target = all_targets; target; target = target->next) {
1620 retval = target_init_one(cmd_ctx, target);
1621 if (retval != ERROR_OK)
1622 return retval;
1623 }
1624
1625 if (!all_targets)
1626 return ERROR_OK;
1627
1628 retval = target_register_user_commands(cmd_ctx);
1629 if (retval != ERROR_OK)
1630 return retval;
1631
1632 retval = target_register_timer_callback(&handle_target,
1633 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1634 if (retval != ERROR_OK)
1635 return retval;
1636
1637 return ERROR_OK;
1638 }
1639
1640 COMMAND_HANDLER(handle_target_init_command)
1641 {
1642 int retval;
1643
1644 if (CMD_ARGC != 0)
1645 return ERROR_COMMAND_SYNTAX_ERROR;
1646
1647 static bool target_initialized;
1648 if (target_initialized) {
1649 LOG_INFO("'target init' has already been called");
1650 return ERROR_OK;
1651 }
1652 target_initialized = true;
1653
1654 retval = command_run_line(CMD_CTX, "init_targets");
1655 if (retval != ERROR_OK)
1656 return retval;
1657
1658 retval = command_run_line(CMD_CTX, "init_target_events");
1659 if (retval != ERROR_OK)
1660 return retval;
1661
1662 retval = command_run_line(CMD_CTX, "init_board");
1663 if (retval != ERROR_OK)
1664 return retval;
1665
1666 LOG_DEBUG("Initializing targets...");
1667 return target_init(CMD_CTX);
1668 }
1669
1670 int target_register_event_callback(int (*callback)(struct target *target,
1671 enum target_event event, void *priv), void *priv)
1672 {
1673 struct target_event_callback **callbacks_p = &target_event_callbacks;
1674
1675 if (!callback)
1676 return ERROR_COMMAND_SYNTAX_ERROR;
1677
1678 if (*callbacks_p) {
1679 while ((*callbacks_p)->next)
1680 callbacks_p = &((*callbacks_p)->next);
1681 callbacks_p = &((*callbacks_p)->next);
1682 }
1683
1684 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1685 (*callbacks_p)->callback = callback;
1686 (*callbacks_p)->priv = priv;
1687 (*callbacks_p)->next = NULL;
1688
1689 return ERROR_OK;
1690 }
1691
1692 int target_register_reset_callback(int (*callback)(struct target *target,
1693 enum target_reset_mode reset_mode, void *priv), void *priv)
1694 {
1695 struct target_reset_callback *entry;
1696
1697 if (!callback)
1698 return ERROR_COMMAND_SYNTAX_ERROR;
1699
1700 entry = malloc(sizeof(struct target_reset_callback));
1701 if (!entry) {
1702 LOG_ERROR("error allocating buffer for reset callback entry");
1703 return ERROR_COMMAND_SYNTAX_ERROR;
1704 }
1705
1706 entry->callback = callback;
1707 entry->priv = priv;
1708 list_add(&entry->list, &target_reset_callback_list);
1709
1710
1711 return ERROR_OK;
1712 }
1713
1714 int target_register_trace_callback(int (*callback)(struct target *target,
1715 size_t len, uint8_t *data, void *priv), void *priv)
1716 {
1717 struct target_trace_callback *entry;
1718
1719 if (!callback)
1720 return ERROR_COMMAND_SYNTAX_ERROR;
1721
1722 entry = malloc(sizeof(struct target_trace_callback));
1723 if (!entry) {
1724 LOG_ERROR("error allocating buffer for trace callback entry");
1725 return ERROR_COMMAND_SYNTAX_ERROR;
1726 }
1727
1728 entry->callback = callback;
1729 entry->priv = priv;
1730 list_add(&entry->list, &target_trace_callback_list);
1731
1732
1733 return ERROR_OK;
1734 }
1735
1736 int target_register_timer_callback(int (*callback)(void *priv),
1737 unsigned int time_ms, enum target_timer_type type, void *priv)
1738 {
1739 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1740
1741 if (!callback)
1742 return ERROR_COMMAND_SYNTAX_ERROR;
1743
1744 if (*callbacks_p) {
1745 while ((*callbacks_p)->next)
1746 callbacks_p = &((*callbacks_p)->next);
1747 callbacks_p = &((*callbacks_p)->next);
1748 }
1749
1750 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1751 (*callbacks_p)->callback = callback;
1752 (*callbacks_p)->type = type;
1753 (*callbacks_p)->time_ms = time_ms;
1754 (*callbacks_p)->removed = false;
1755
1756 (*callbacks_p)->when = timeval_ms() + time_ms;
1757 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1758
1759 (*callbacks_p)->priv = priv;
1760 (*callbacks_p)->next = NULL;
1761
1762 return ERROR_OK;
1763 }
1764
1765 int target_unregister_event_callback(int (*callback)(struct target *target,
1766 enum target_event event, void *priv), void *priv)
1767 {
1768 struct target_event_callback **p = &target_event_callbacks;
1769 struct target_event_callback *c = target_event_callbacks;
1770
1771 if (!callback)
1772 return ERROR_COMMAND_SYNTAX_ERROR;
1773
1774 while (c) {
1775 struct target_event_callback *next = c->next;
1776 if ((c->callback == callback) && (c->priv == priv)) {
1777 *p = next;
1778 free(c);
1779 return ERROR_OK;
1780 } else
1781 p = &(c->next);
1782 c = next;
1783 }
1784
1785 return ERROR_OK;
1786 }
1787
1788 int target_unregister_reset_callback(int (*callback)(struct target *target,
1789 enum target_reset_mode reset_mode, void *priv), void *priv)
1790 {
1791 struct target_reset_callback *entry;
1792
1793 if (!callback)
1794 return ERROR_COMMAND_SYNTAX_ERROR;
1795
1796 list_for_each_entry(entry, &target_reset_callback_list, list) {
1797 if (entry->callback == callback && entry->priv == priv) {
1798 list_del(&entry->list);
1799 free(entry);
1800 break;
1801 }
1802 }
1803
1804 return ERROR_OK;
1805 }
1806
1807 int target_unregister_trace_callback(int (*callback)(struct target *target,
1808 size_t len, uint8_t *data, void *priv), void *priv)
1809 {
1810 struct target_trace_callback *entry;
1811
1812 if (!callback)
1813 return ERROR_COMMAND_SYNTAX_ERROR;
1814
1815 list_for_each_entry(entry, &target_trace_callback_list, list) {
1816 if (entry->callback == callback && entry->priv == priv) {
1817 list_del(&entry->list);
1818 free(entry);
1819 break;
1820 }
1821 }
1822
1823 return ERROR_OK;
1824 }
1825
1826 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1827 {
1828 if (!callback)
1829 return ERROR_COMMAND_SYNTAX_ERROR;
1830
1831 for (struct target_timer_callback *c = target_timer_callbacks;
1832 c; c = c->next) {
1833 if ((c->callback == callback) && (c->priv == priv)) {
1834 c->removed = true;
1835 return ERROR_OK;
1836 }
1837 }
1838
1839 return ERROR_FAIL;
1840 }
1841
1842 int target_call_event_callbacks(struct target *target, enum target_event event)
1843 {
1844 struct target_event_callback *callback = target_event_callbacks;
1845 struct target_event_callback *next_callback;
1846
1847 if (event == TARGET_EVENT_HALTED) {
1848 /* execute early halted first */
1849 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1850 }
1851
1852 LOG_DEBUG("target event %i (%s) for core %s", event,
1853 target_event_name(event),
1854 target_name(target));
1855
1856 target_handle_event(target, event);
1857
1858 while (callback) {
1859 next_callback = callback->next;
1860 callback->callback(target, event, callback->priv);
1861 callback = next_callback;
1862 }
1863
1864 return ERROR_OK;
1865 }
1866
1867 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1868 {
1869 struct target_reset_callback *callback;
1870
1871 LOG_DEBUG("target reset %i (%s)", reset_mode,
1872 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1873
1874 list_for_each_entry(callback, &target_reset_callback_list, list)
1875 callback->callback(target, reset_mode, callback->priv);
1876
1877 return ERROR_OK;
1878 }
1879
1880 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1881 {
1882 struct target_trace_callback *callback;
1883
1884 list_for_each_entry(callback, &target_trace_callback_list, list)
1885 callback->callback(target, len, data, callback->priv);
1886
1887 return ERROR_OK;
1888 }
1889
1890 static int target_timer_callback_periodic_restart(
1891 struct target_timer_callback *cb, int64_t *now)
1892 {
1893 cb->when = *now + cb->time_ms;
1894 return ERROR_OK;
1895 }
1896
1897 static int target_call_timer_callback(struct target_timer_callback *cb,
1898 int64_t *now)
1899 {
1900 cb->callback(cb->priv);
1901
1902 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1903 return target_timer_callback_periodic_restart(cb, now);
1904
1905 return target_unregister_timer_callback(cb->callback, cb->priv);
1906 }
1907
1908 static int target_call_timer_callbacks_check_time(int checktime)
1909 {
1910 static bool callback_processing;
1911
1912 /* Do not allow nesting */
1913 if (callback_processing)
1914 return ERROR_OK;
1915
1916 callback_processing = true;
1917
1918 keep_alive();
1919
1920 int64_t now = timeval_ms();
1921
1922 /* Initialize to a default value that's a ways into the future.
1923 * The loop below will make it closer to now if there are
1924 * callbacks that want to be called sooner. */
1925 target_timer_next_event_value = now + 1000;
1926
1927 /* Store an address of the place containing a pointer to the
1928 * next item; initially, that's a standalone "root of the
1929 * list" variable. */
1930 struct target_timer_callback **callback = &target_timer_callbacks;
1931 while (callback && *callback) {
1932 if ((*callback)->removed) {
1933 struct target_timer_callback *p = *callback;
1934 *callback = (*callback)->next;
1935 free(p);
1936 continue;
1937 }
1938
1939 bool call_it = (*callback)->callback &&
1940 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1941 now >= (*callback)->when);
1942
1943 if (call_it)
1944 target_call_timer_callback(*callback, &now);
1945
1946 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1947 target_timer_next_event_value = (*callback)->when;
1948
1949 callback = &(*callback)->next;
1950 }
1951
1952 callback_processing = false;
1953 return ERROR_OK;
1954 }
1955
1956 int target_call_timer_callbacks()
1957 {
1958 return target_call_timer_callbacks_check_time(1);
1959 }
1960
1961 /* invoke periodic callbacks immediately */
1962 int target_call_timer_callbacks_now()
1963 {
1964 return target_call_timer_callbacks_check_time(0);
1965 }
1966
1967 int64_t target_timer_next_event(void)
1968 {
1969 return target_timer_next_event_value;
1970 }
1971
1972 /* Prints the working area layout for debug purposes */
1973 static void print_wa_layout(struct target *target)
1974 {
1975 struct working_area *c = target->working_areas;
1976
1977 while (c) {
1978 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1979 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1980 c->address, c->address + c->size - 1, c->size);
1981 c = c->next;
1982 }
1983 }
1984
1985 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1986 static void target_split_working_area(struct working_area *area, uint32_t size)
1987 {
1988 assert(area->free); /* Shouldn't split an allocated area */
1989 assert(size <= area->size); /* Caller should guarantee this */
1990
1991 /* Split only if not already the right size */
1992 if (size < area->size) {
1993 struct working_area *new_wa = malloc(sizeof(*new_wa));
1994
1995 if (!new_wa)
1996 return;
1997
1998 new_wa->next = area->next;
1999 new_wa->size = area->size - size;
2000 new_wa->address = area->address + size;
2001 new_wa->backup = NULL;
2002 new_wa->user = NULL;
2003 new_wa->free = true;
2004
2005 area->next = new_wa;
2006 area->size = size;
2007
2008 /* If backup memory was allocated to this area, it has the wrong size
2009 * now so free it and it will be reallocated if/when needed */
2010 free(area->backup);
2011 area->backup = NULL;
2012 }
2013 }
2014
2015 /* Merge all adjacent free areas into one */
2016 static void target_merge_working_areas(struct target *target)
2017 {
2018 struct working_area *c = target->working_areas;
2019
2020 while (c && c->next) {
2021 assert(c->next->address == c->address + c->size); /* This is an invariant */
2022
2023 /* Find two adjacent free areas */
2024 if (c->free && c->next->free) {
2025 /* Merge the last into the first */
2026 c->size += c->next->size;
2027
2028 /* Remove the last */
2029 struct working_area *to_be_freed = c->next;
2030 c->next = c->next->next;
2031 free(to_be_freed->backup);
2032 free(to_be_freed);
2033
2034 /* If backup memory was allocated to the remaining area, it's has
2035 * the wrong size now */
2036 free(c->backup);
2037 c->backup = NULL;
2038 } else {
2039 c = c->next;
2040 }
2041 }
2042 }
2043
2044 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2045 {
2046 /* Reevaluate working area address based on MMU state*/
2047 if (!target->working_areas) {
2048 int retval;
2049 int enabled;
2050
2051 retval = target->type->mmu(target, &enabled);
2052 if (retval != ERROR_OK)
2053 return retval;
2054
2055 if (!enabled) {
2056 if (target->working_area_phys_spec) {
2057 LOG_DEBUG("MMU disabled, using physical "
2058 "address for working memory " TARGET_ADDR_FMT,
2059 target->working_area_phys);
2060 target->working_area = target->working_area_phys;
2061 } else {
2062 LOG_ERROR("No working memory available. "
2063 "Specify -work-area-phys to target.");
2064 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2065 }
2066 } else {
2067 if (target->working_area_virt_spec) {
2068 LOG_DEBUG("MMU enabled, using virtual "
2069 "address for working memory " TARGET_ADDR_FMT,
2070 target->working_area_virt);
2071 target->working_area = target->working_area_virt;
2072 } else {
2073 LOG_ERROR("No working memory available. "
2074 "Specify -work-area-virt to target.");
2075 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2076 }
2077 }
2078
2079 /* Set up initial working area on first call */
2080 struct working_area *new_wa = malloc(sizeof(*new_wa));
2081 if (new_wa) {
2082 new_wa->next = NULL;
2083 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2084 new_wa->address = target->working_area;
2085 new_wa->backup = NULL;
2086 new_wa->user = NULL;
2087 new_wa->free = true;
2088 }
2089
2090 target->working_areas = new_wa;
2091 }
2092
2093 /* only allocate multiples of 4 byte */
2094 if (size % 4)
2095 size = (size + 3) & (~3UL);
2096
2097 struct working_area *c = target->working_areas;
2098
2099 /* Find the first large enough working area */
2100 while (c) {
2101 if (c->free && c->size >= size)
2102 break;
2103 c = c->next;
2104 }
2105
2106 if (!c)
2107 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2108
2109 /* Split the working area into the requested size */
2110 target_split_working_area(c, size);
2111
2112 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2113 size, c->address);
2114
2115 if (target->backup_working_area) {
2116 if (!c->backup) {
2117 c->backup = malloc(c->size);
2118 if (!c->backup)
2119 return ERROR_FAIL;
2120 }
2121
2122 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2123 if (retval != ERROR_OK)
2124 return retval;
2125 }
2126
2127 /* mark as used, and return the new (reused) area */
2128 c->free = false;
2129 *area = c;
2130
2131 /* user pointer */
2132 c->user = area;
2133
2134 print_wa_layout(target);
2135
2136 return ERROR_OK;
2137 }
2138
2139 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2140 {
2141 int retval;
2142
2143 retval = target_alloc_working_area_try(target, size, area);
2144 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2145 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2146 return retval;
2147
2148 }
2149
2150 static int target_restore_working_area(struct target *target, struct working_area *area)
2151 {
2152 int retval = ERROR_OK;
2153
2154 if (target->backup_working_area && area->backup) {
2155 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2156 if (retval != ERROR_OK)
2157 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2158 area->size, area->address);
2159 }
2160
2161 return retval;
2162 }
2163
2164 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2165 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2166 {
2167 if (!area || area->free)
2168 return ERROR_OK;
2169
2170 int retval = ERROR_OK;
2171 if (restore) {
2172 retval = target_restore_working_area(target, area);
2173 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2174 if (retval != ERROR_OK)
2175 return retval;
2176 }
2177
2178 area->free = true;
2179
2180 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2181 area->size, area->address);
2182
2183 /* mark user pointer invalid */
2184 /* TODO: Is this really safe? It points to some previous caller's memory.
2185 * How could we know that the area pointer is still in that place and not
2186 * some other vital data? What's the purpose of this, anyway? */
2187 *area->user = NULL;
2188 area->user = NULL;
2189
2190 target_merge_working_areas(target);
2191
2192 print_wa_layout(target);
2193
2194 return retval;
2195 }
2196
2197 int target_free_working_area(struct target *target, struct working_area *area)
2198 {
2199 return target_free_working_area_restore(target, area, 1);
2200 }
2201
2202 /* free resources and restore memory, if restoring memory fails,
2203 * free up resources anyway
2204 */
2205 static void target_free_all_working_areas_restore(struct target *target, int restore)
2206 {
2207 struct working_area *c = target->working_areas;
2208
2209 LOG_DEBUG("freeing all working areas");
2210
2211 /* Loop through all areas, restoring the allocated ones and marking them as free */
2212 while (c) {
2213 if (!c->free) {
2214 if (restore)
2215 target_restore_working_area(target, c);
2216 c->free = true;
2217 *c->user = NULL; /* Same as above */
2218 c->user = NULL;
2219 }
2220 c = c->next;
2221 }
2222
2223 /* Run a merge pass to combine all areas into one */
2224 target_merge_working_areas(target);
2225
2226 print_wa_layout(target);
2227 }
2228
2229 void target_free_all_working_areas(struct target *target)
2230 {
2231 target_free_all_working_areas_restore(target, 1);
2232
2233 /* Now we have none or only one working area marked as free */
2234 if (target->working_areas) {
2235 /* Free the last one to allow on-the-fly moving and resizing */
2236 free(target->working_areas->backup);
2237 free(target->working_areas);
2238 target->working_areas = NULL;
2239 }
2240 }
2241
2242 /* Find the largest number of bytes that can be allocated */
2243 uint32_t target_get_working_area_avail(struct target *target)
2244 {
2245 struct working_area *c = target->working_areas;
2246 uint32_t max_size = 0;
2247
2248 if (!c)
2249 return target->working_area_size;
2250
2251 while (c) {
2252 if (c->free && max_size < c->size)
2253 max_size = c->size;
2254
2255 c = c->next;
2256 }
2257
2258 return max_size;
2259 }
2260
2261 static void target_destroy(struct target *target)
2262 {
2263 if (target->type->deinit_target)
2264 target->type->deinit_target(target);
2265
2266 if (target->semihosting)
2267 free(target->semihosting->basedir);
2268 free(target->semihosting);
2269
2270 jtag_unregister_event_callback(jtag_enable_callback, target);
2271
2272 struct target_event_action *teap = target->event_action;
2273 while (teap) {
2274 struct target_event_action *next = teap->next;
2275 Jim_DecrRefCount(teap->interp, teap->body);
2276 free(teap);
2277 teap = next;
2278 }
2279
2280 target_free_all_working_areas(target);
2281
2282 /* release the targets SMP list */
2283 if (target->smp) {
2284 struct target_list *head, *tmp;
2285
2286 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2287 list_del(&head->lh);
2288 head->target->smp = 0;
2289 free(head);
2290 }
2291 if (target->smp_targets != &empty_smp_targets)
2292 free(target->smp_targets);
2293 target->smp = 0;
2294 }
2295
2296 rtos_destroy(target);
2297
2298 free(target->gdb_port_override);
2299 free(target->type);
2300 free(target->trace_info);
2301 free(target->fileio_info);
2302 free(target->cmd_name);
2303 free(target);
2304 }
2305
2306 void target_quit(void)
2307 {
2308 struct target_event_callback *pe = target_event_callbacks;
2309 while (pe) {
2310 struct target_event_callback *t = pe->next;
2311 free(pe);
2312 pe = t;
2313 }
2314 target_event_callbacks = NULL;
2315
2316 struct target_timer_callback *pt = target_timer_callbacks;
2317 while (pt) {
2318 struct target_timer_callback *t = pt->next;
2319 free(pt);
2320 pt = t;
2321 }
2322 target_timer_callbacks = NULL;
2323
2324 for (struct target *target = all_targets; target;) {
2325 struct target *tmp;
2326
2327 tmp = target->next;
2328 target_destroy(target);
2329 target = tmp;
2330 }
2331
2332 all_targets = NULL;
2333 }
2334
2335 int target_arch_state(struct target *target)
2336 {
2337 int retval;
2338 if (!target) {
2339 LOG_WARNING("No target has been configured");
2340 return ERROR_OK;
2341 }
2342
2343 if (target->state != TARGET_HALTED)
2344 return ERROR_OK;
2345
2346 retval = target->type->arch_state(target);
2347 return retval;
2348 }
2349
2350 static int target_get_gdb_fileio_info_default(struct target *target,
2351 struct gdb_fileio_info *fileio_info)
2352 {
2353 /* If target does not support semi-hosting function, target
2354 has no need to provide .get_gdb_fileio_info callback.
2355 It just return ERROR_FAIL and gdb_server will return "Txx"
2356 as target halted every time. */
2357 return ERROR_FAIL;
2358 }
2359
2360 static int target_gdb_fileio_end_default(struct target *target,
2361 int retcode, int fileio_errno, bool ctrl_c)
2362 {
2363 return ERROR_OK;
2364 }
2365
2366 int target_profiling_default(struct target *target, uint32_t *samples,
2367 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2368 {
2369 struct timeval timeout, now;
2370
2371 gettimeofday(&timeout, NULL);
2372 timeval_add_time(&timeout, seconds, 0);
2373
2374 LOG_INFO("Starting profiling. Halting and resuming the"
2375 " target as often as we can...");
2376
2377 uint32_t sample_count = 0;
2378 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2379 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2380
2381 int retval = ERROR_OK;
2382 for (;;) {
2383 target_poll(target);
2384 if (target->state == TARGET_HALTED) {
2385 uint32_t t = buf_get_u32(reg->value, 0, 32);
2386 samples[sample_count++] = t;
2387 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2388 retval = target_resume(target, 1, 0, 0, 0);
2389 target_poll(target);
2390 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2391 } else if (target->state == TARGET_RUNNING) {
2392 /* We want to quickly sample the PC. */
2393 retval = target_halt(target);
2394 } else {
2395 LOG_INFO("Target not halted or running");
2396 retval = ERROR_OK;
2397 break;
2398 }
2399
2400 if (retval != ERROR_OK)
2401 break;
2402
2403 gettimeofday(&now, NULL);
2404 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2405 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2406 break;
2407 }
2408 }
2409
2410 *num_samples = sample_count;
2411 return retval;
2412 }
2413
2414 /* Single aligned words are guaranteed to use 16 or 32 bit access
2415 * mode respectively, otherwise data is handled as quickly as
2416 * possible
2417 */
2418 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2419 {
2420 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2421 size, address);
2422
2423 if (!target_was_examined(target)) {
2424 LOG_ERROR("Target not examined yet");
2425 return ERROR_FAIL;
2426 }
2427
2428 if (size == 0)
2429 return ERROR_OK;
2430
2431 if ((address + size - 1) < address) {
2432 /* GDB can request this when e.g. PC is 0xfffffffc */
2433 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2434 address,
2435 size);
2436 return ERROR_FAIL;
2437 }
2438
2439 return target->type->write_buffer(target, address, size, buffer);
2440 }
2441
2442 static int target_write_buffer_default(struct target *target,
2443 target_addr_t address, uint32_t count, const uint8_t *buffer)
2444 {
2445 uint32_t size;
2446 unsigned int data_bytes = target_data_bits(target) / 8;
2447
2448 /* Align up to maximum bytes. The loop condition makes sure the next pass
2449 * will have something to do with the size we leave to it. */
2450 for (size = 1;
2451 size < data_bytes && count >= size * 2 + (address & size);
2452 size *= 2) {
2453 if (address & size) {
2454 int retval = target_write_memory(target, address, size, 1, buffer);
2455 if (retval != ERROR_OK)
2456 return retval;
2457 address += size;
2458 count -= size;
2459 buffer += size;
2460 }
2461 }
2462
2463 /* Write the data with as large access size as possible. */
2464 for (; size > 0; size /= 2) {
2465 uint32_t aligned = count - count % size;
2466 if (aligned > 0) {
2467 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2468 if (retval != ERROR_OK)
2469 return retval;
2470 address += aligned;
2471 count -= aligned;
2472 buffer += aligned;
2473 }
2474 }
2475
2476 return ERROR_OK;
2477 }
2478
2479 /* Single aligned words are guaranteed to use 16 or 32 bit access
2480 * mode respectively, otherwise data is handled as quickly as
2481 * possible
2482 */
2483 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2484 {
2485 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2486 size, address);
2487
2488 if (!target_was_examined(target)) {
2489 LOG_ERROR("Target not examined yet");
2490 return ERROR_FAIL;
2491 }
2492
2493 if (size == 0)
2494 return ERROR_OK;
2495
2496 if ((address + size - 1) < address) {
2497 /* GDB can request this when e.g. PC is 0xfffffffc */
2498 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2499 address,
2500 size);
2501 return ERROR_FAIL;
2502 }
2503
2504 return target->type->read_buffer(target, address, size, buffer);
2505 }
2506
2507 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2508 {
2509 uint32_t size;
2510 unsigned int data_bytes = target_data_bits(target) / 8;
2511
2512 /* Align up to maximum bytes. The loop condition makes sure the next pass
2513 * will have something to do with the size we leave to it. */
2514 for (size = 1;
2515 size < data_bytes && count >= size * 2 + (address & size);
2516 size *= 2) {
2517 if (address & size) {
2518 int retval = target_read_memory(target, address, size, 1, buffer);
2519 if (retval != ERROR_OK)
2520 return retval;
2521 address += size;
2522 count -= size;
2523 buffer += size;
2524 }
2525 }
2526
2527 /* Read the data with as large access size as possible. */
2528 for (; size > 0; size /= 2) {
2529 uint32_t aligned = count - count % size;
2530 if (aligned > 0) {
2531 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2532 if (retval != ERROR_OK)
2533 return retval;
2534 address += aligned;
2535 count -= aligned;
2536 buffer += aligned;
2537 }
2538 }
2539
2540 return ERROR_OK;
2541 }
2542
2543 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2544 {
2545 uint8_t *buffer;
2546 int retval;
2547 uint32_t i;
2548 uint32_t checksum = 0;
2549 if (!target_was_examined(target)) {
2550 LOG_ERROR("Target not examined yet");
2551 return ERROR_FAIL;
2552 }
2553 if (!target->type->checksum_memory) {
2554 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2555 return ERROR_FAIL;
2556 }
2557
2558 retval = target->type->checksum_memory(target, address, size, &checksum);
2559 if (retval != ERROR_OK) {
2560 buffer = malloc(size);
2561 if (!buffer) {
2562 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2563 return ERROR_COMMAND_SYNTAX_ERROR;
2564 }
2565 retval = target_read_buffer(target, address, size, buffer);
2566 if (retval != ERROR_OK) {
2567 free(buffer);
2568 return retval;
2569 }
2570
2571 /* convert to target endianness */
2572 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2573 uint32_t target_data;
2574 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2575 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2576 }
2577
2578 retval = image_calculate_checksum(buffer, size, &checksum);
2579 free(buffer);
2580 }
2581
2582 *crc = checksum;
2583
2584 return retval;
2585 }
2586
2587 int target_blank_check_memory(struct target *target,
2588 struct target_memory_check_block *blocks, int num_blocks,
2589 uint8_t erased_value)
2590 {
2591 if (!target_was_examined(target)) {
2592 LOG_ERROR("Target not examined yet");
2593 return ERROR_FAIL;
2594 }
2595
2596 if (!target->type->blank_check_memory)
2597 return ERROR_NOT_IMPLEMENTED;
2598
2599 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2600 }
2601
2602 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2603 {
2604 uint8_t value_buf[8];
2605 if (!target_was_examined(target)) {
2606 LOG_ERROR("Target not examined yet");
2607 return ERROR_FAIL;
2608 }
2609
2610 int retval = target_read_memory(target, address, 8, 1, value_buf);
2611
2612 if (retval == ERROR_OK) {
2613 *value = target_buffer_get_u64(target, value_buf);
2614 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2615 address,
2616 *value);
2617 } else {
2618 *value = 0x0;
2619 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2620 address);
2621 }
2622
2623 return retval;
2624 }
2625
2626 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2627 {
2628 uint8_t value_buf[4];
2629 if (!target_was_examined(target)) {
2630 LOG_ERROR("Target not examined yet");
2631 return ERROR_FAIL;
2632 }
2633
2634 int retval = target_read_memory(target, address, 4, 1, value_buf);
2635
2636 if (retval == ERROR_OK) {
2637 *value = target_buffer_get_u32(target, value_buf);
2638 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2639 address,
2640 *value);
2641 } else {
2642 *value = 0x0;
2643 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2644 address);
2645 }
2646
2647 return retval;
2648 }
2649
2650 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2651 {
2652 uint8_t value_buf[2];
2653 if (!target_was_examined(target)) {
2654 LOG_ERROR("Target not examined yet");
2655 return ERROR_FAIL;
2656 }
2657
2658 int retval = target_read_memory(target, address, 2, 1, value_buf);
2659
2660 if (retval == ERROR_OK) {
2661 *value = target_buffer_get_u16(target, value_buf);
2662 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2663 address,
2664 *value);
2665 } else {
2666 *value = 0x0;
2667 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2668 address);
2669 }
2670
2671 return retval;
2672 }
2673
2674 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2675 {
2676 if (!target_was_examined(target)) {
2677 LOG_ERROR("Target not examined yet");
2678 return ERROR_FAIL;
2679 }
2680
2681 int retval = target_read_memory(target, address, 1, 1, value);
2682
2683 if (retval == ERROR_OK) {
2684 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2685 address,
2686 *value);
2687 } else {
2688 *value = 0x0;
2689 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2690 address);
2691 }
2692
2693 return retval;
2694 }
2695
2696 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2697 {
2698 int retval;
2699 uint8_t value_buf[8];
2700 if (!target_was_examined(target)) {
2701 LOG_ERROR("Target not examined yet");
2702 return ERROR_FAIL;
2703 }
2704
2705 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2706 address,
2707 value);
2708
2709 target_buffer_set_u64(target, value_buf, value);
2710 retval = target_write_memory(target, address, 8, 1, value_buf);
2711 if (retval != ERROR_OK)
2712 LOG_DEBUG("failed: %i", retval);
2713
2714 return retval;
2715 }
2716
2717 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2718 {
2719 int retval;
2720 uint8_t value_buf[4];
2721 if (!target_was_examined(target)) {
2722 LOG_ERROR("Target not examined yet");
2723 return ERROR_FAIL;
2724 }
2725
2726 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2727 address,
2728 value);
2729
2730 target_buffer_set_u32(target, value_buf, value);
2731 retval = target_write_memory(target, address, 4, 1, value_buf);
2732 if (retval != ERROR_OK)
2733 LOG_DEBUG("failed: %i", retval);
2734
2735 return retval;
2736 }
2737
2738 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2739 {
2740 int retval;
2741 uint8_t value_buf[2];
2742 if (!target_was_examined(target)) {
2743 LOG_ERROR("Target not examined yet");
2744 return ERROR_FAIL;
2745 }
2746
2747 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2748 address,
2749 value);
2750
2751 target_buffer_set_u16(target, value_buf, value);
2752 retval = target_write_memory(target, address, 2, 1, value_buf);
2753 if (retval != ERROR_OK)
2754 LOG_DEBUG("failed: %i", retval);
2755
2756 return retval;
2757 }
2758
2759 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2760 {
2761 int retval;
2762 if (!target_was_examined(target)) {
2763 LOG_ERROR("Target not examined yet");
2764 return ERROR_FAIL;
2765 }
2766
2767 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2768 address, value);
2769
2770 retval = target_write_memory(target, address, 1, 1, &value);
2771 if (retval != ERROR_OK)
2772 LOG_DEBUG("failed: %i", retval);
2773
2774 return retval;
2775 }
2776
2777 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2778 {
2779 int retval;
2780 uint8_t value_buf[8];
2781 if (!target_was_examined(target)) {
2782 LOG_ERROR("Target not examined yet");
2783 return ERROR_FAIL;
2784 }
2785
2786 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2787 address,
2788 value);
2789
2790 target_buffer_set_u64(target, value_buf, value);
2791 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2792 if (retval != ERROR_OK)
2793 LOG_DEBUG("failed: %i", retval);
2794
2795 return retval;
2796 }
2797
2798 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2799 {
2800 int retval;
2801 uint8_t value_buf[4];
2802 if (!target_was_examined(target)) {
2803 LOG_ERROR("Target not examined yet");
2804 return ERROR_FAIL;
2805 }
2806
2807 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2808 address,
2809 value);
2810
2811 target_buffer_set_u32(target, value_buf, value);
2812 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2813 if (retval != ERROR_OK)
2814 LOG_DEBUG("failed: %i", retval);
2815
2816 return retval;
2817 }
2818
2819 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2820 {
2821 int retval;
2822 uint8_t value_buf[2];
2823 if (!target_was_examined(target)) {
2824 LOG_ERROR("Target not examined yet");
2825 return ERROR_FAIL;
2826 }
2827
2828 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2829 address,
2830 value);
2831
2832 target_buffer_set_u16(target, value_buf, value);
2833 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2834 if (retval != ERROR_OK)
2835 LOG_DEBUG("failed: %i", retval);
2836
2837 return retval;
2838 }
2839
2840 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2841 {
2842 int retval;
2843 if (!target_was_examined(target)) {
2844 LOG_ERROR("Target not examined yet");
2845 return ERROR_FAIL;
2846 }
2847
2848 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2849 address, value);
2850
2851 retval = target_write_phys_memory(target, address, 1, 1, &value);
2852 if (retval != ERROR_OK)
2853 LOG_DEBUG("failed: %i", retval);
2854
2855 return retval;
2856 }
2857
2858 static int find_target(struct command_invocation *cmd, const char *name)
2859 {
2860 struct target *target = get_target(name);
2861 if (!target) {
2862 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2863 return ERROR_FAIL;
2864 }
2865 if (!target->tap->enabled) {
2866 command_print(cmd, "Target: TAP %s is disabled, "
2867 "can't be the current target\n",
2868 target->tap->dotted_name);
2869 return ERROR_FAIL;
2870 }
2871
2872 cmd->ctx->current_target = target;
2873 if (cmd->ctx->current_target_override)
2874 cmd->ctx->current_target_override = target;
2875
2876 return ERROR_OK;
2877 }
2878
2879
2880 COMMAND_HANDLER(handle_targets_command)
2881 {
2882 int retval = ERROR_OK;
2883 if (CMD_ARGC == 1) {
2884 retval = find_target(CMD, CMD_ARGV[0]);
2885 if (retval == ERROR_OK) {
2886 /* we're done! */
2887 return retval;
2888 }
2889 }
2890
2891 struct target *target = all_targets;
2892 command_print(CMD, " TargetName Type Endian TapName State ");
2893 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2894 while (target) {
2895 const char *state;
2896 char marker = ' ';
2897
2898 if (target->tap->enabled)
2899 state = target_state_name(target);
2900 else
2901 state = "tap-disabled";
2902
2903 if (CMD_CTX->current_target == target)
2904 marker = '*';
2905
2906 /* keep columns lined up to match the headers above */
2907 command_print(CMD,
2908 "%2d%c %-18s %-10s %-6s %-18s %s",
2909 target->target_number,
2910 marker,
2911 target_name(target),
2912 target_type_name(target),
2913 jim_nvp_value2name_simple(nvp_target_endian,
2914 target->endianness)->name,
2915 target->tap->dotted_name,
2916 state);
2917 target = target->next;
2918 }
2919
2920 return retval;
2921 }
2922
2923 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2924
2925 static int power_dropout;
2926 static int srst_asserted;
2927
2928 static int run_power_restore;
2929 static int run_power_dropout;
2930 static int run_srst_asserted;
2931 static int run_srst_deasserted;
2932
2933 static int sense_handler(void)
2934 {
2935 static int prev_srst_asserted;
2936 static int prev_power_dropout;
2937
2938 int retval = jtag_power_dropout(&power_dropout);
2939 if (retval != ERROR_OK)
2940 return retval;
2941
2942 int power_restored;
2943 power_restored = prev_power_dropout && !power_dropout;
2944 if (power_restored)
2945 run_power_restore = 1;
2946
2947 int64_t current = timeval_ms();
2948 static int64_t last_power;
2949 bool wait_more = last_power + 2000 > current;
2950 if (power_dropout && !wait_more) {
2951 run_power_dropout = 1;
2952 last_power = current;
2953 }
2954
2955 retval = jtag_srst_asserted(&srst_asserted);
2956 if (retval != ERROR_OK)
2957 return retval;
2958
2959 int srst_deasserted;
2960 srst_deasserted = prev_srst_asserted && !srst_asserted;
2961
2962 static int64_t last_srst;
2963 wait_more = last_srst + 2000 > current;
2964 if (srst_deasserted && !wait_more) {
2965 run_srst_deasserted = 1;
2966 last_srst = current;
2967 }
2968
2969 if (!prev_srst_asserted && srst_asserted)
2970 run_srst_asserted = 1;
2971
2972 prev_srst_asserted = srst_asserted;
2973 prev_power_dropout = power_dropout;
2974
2975 if (srst_deasserted || power_restored) {
2976 /* Other than logging the event we can't do anything here.
2977 * Issuing a reset is a particularly bad idea as we might
2978 * be inside a reset already.
2979 */
2980 }
2981
2982 return ERROR_OK;
2983 }
2984
2985 /* process target state changes */
2986 static int handle_target(void *priv)
2987 {
2988 Jim_Interp *interp = (Jim_Interp *)priv;
2989 int retval = ERROR_OK;
2990
2991 if (!is_jtag_poll_safe()) {
2992 /* polling is disabled currently */
2993 return ERROR_OK;
2994 }
2995
2996 /* we do not want to recurse here... */
2997 static int recursive;
2998 if (!recursive) {
2999 recursive = 1;
3000 sense_handler();
3001 /* danger! running these procedures can trigger srst assertions and power dropouts.
3002 * We need to avoid an infinite loop/recursion here and we do that by
3003 * clearing the flags after running these events.
3004 */
3005 int did_something = 0;
3006 if (run_srst_asserted) {
3007 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3008 Jim_Eval(interp, "srst_asserted");
3009 did_something = 1;
3010 }
3011 if (run_srst_deasserted) {
3012 Jim_Eval(interp, "srst_deasserted");
3013 did_something = 1;
3014 }
3015 if (run_power_dropout) {
3016 LOG_INFO("Power dropout detected, running power_dropout proc.");
3017 Jim_Eval(interp, "power_dropout");
3018 did_something = 1;
3019 }
3020 if (run_power_restore) {
3021 Jim_Eval(interp, "power_restore");
3022 did_something = 1;
3023 }
3024
3025 if (did_something) {
3026 /* clear detect flags */
3027 sense_handler();
3028 }
3029
3030 /* clear action flags */
3031
3032 run_srst_asserted = 0;
3033 run_srst_deasserted = 0;
3034 run_power_restore = 0;
3035 run_power_dropout = 0;
3036
3037 recursive = 0;
3038 }
3039
3040 /* Poll targets for state changes unless that's globally disabled.
3041 * Skip targets that are currently disabled.
3042 */
3043 for (struct target *target = all_targets;
3044 is_jtag_poll_safe() && target;
3045 target = target->next) {
3046
3047 if (!target_was_examined(target))
3048 continue;
3049
3050 if (!target->tap->enabled)
3051 continue;
3052
3053 if (target->backoff.times > target->backoff.count) {
3054 /* do not poll this time as we failed previously */
3055 target->backoff.count++;
3056 continue;
3057 }
3058 target->backoff.count = 0;
3059
3060 /* only poll target if we've got power and srst isn't asserted */
3061 if (!power_dropout && !srst_asserted) {
3062 /* polling may fail silently until the target has been examined */
3063 retval = target_poll(target);
3064 if (retval != ERROR_OK) {
3065 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3066 if (target->backoff.times * polling_interval < 5000) {
3067 target->backoff.times *= 2;
3068 target->backoff.times++;
3069 }
3070
3071 /* Tell GDB to halt the debugger. This allows the user to
3072 * run monitor commands to handle the situation.
3073 */
3074 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3075 }
3076 if (target->backoff.times > 0) {
3077 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3078 target_reset_examined(target);
3079 retval = target_examine_one(target);
3080 /* Target examination could have failed due to unstable connection,
3081 * but we set the examined flag anyway to repoll it later */
3082 if (retval != ERROR_OK) {
3083 target_set_examined(target);
3084 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3085 target->backoff.times * polling_interval);
3086 return retval;
3087 }
3088 }
3089
3090 /* Since we succeeded, we reset backoff count */
3091 target->backoff.times = 0;
3092 }
3093 }
3094
3095 return retval;
3096 }
3097
3098 COMMAND_HANDLER(handle_reg_command)
3099 {
3100 LOG_DEBUG("-");
3101
3102 struct target *target = get_current_target(CMD_CTX);
3103 struct reg *reg = NULL;
3104
3105 /* list all available registers for the current target */
3106 if (CMD_ARGC == 0) {
3107 struct reg_cache *cache = target->reg_cache;
3108
3109 unsigned int count = 0;
3110 while (cache) {
3111 unsigned i;
3112
3113 command_print(CMD, "===== %s", cache->name);
3114
3115 for (i = 0, reg = cache->reg_list;
3116 i < cache->num_regs;
3117 i++, reg++, count++) {
3118 if (reg->exist == false || reg->hidden)
3119 continue;
3120 /* only print cached values if they are valid */
3121 if (reg->valid) {
3122 char *value = buf_to_hex_str(reg->value,
3123 reg->size);
3124 command_print(CMD,
3125 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3126 count, reg->name,
3127 reg->size, value,
3128 reg->dirty
3129 ? " (dirty)"
3130 : "");
3131 free(value);
3132 } else {
3133 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3134 count, reg->name,
3135 reg->size);
3136 }
3137 }
3138 cache = cache->next;
3139 }
3140
3141 return ERROR_OK;
3142 }
3143
3144 /* access a single register by its ordinal number */
3145 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3146 unsigned num;
3147 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3148
3149 struct reg_cache *cache = target->reg_cache;
3150 unsigned int count = 0;
3151 while (cache) {
3152 unsigned i;
3153 for (i = 0; i < cache->num_regs; i++) {
3154 if (count++ == num) {
3155 reg = &cache->reg_list[i];
3156 break;
3157 }
3158 }
3159 if (reg)
3160 break;
3161 cache = cache->next;
3162 }
3163
3164 if (!reg) {
3165 command_print(CMD, "%i is out of bounds, the current target "
3166 "has only %i registers (0 - %i)", num, count, count - 1);
3167 return ERROR_OK;
3168 }
3169 } else {
3170 /* access a single register by its name */
3171 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3172
3173 if (!reg)
3174 goto not_found;
3175 }
3176
3177 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3178
3179 if (!reg->exist)
3180 goto not_found;
3181
3182 /* display a register */
3183 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3184 && (CMD_ARGV[1][0] <= '9')))) {
3185 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3186 reg->valid = 0;
3187
3188 if (reg->valid == 0) {
3189 int retval = reg->type->get(reg);
3190 if (retval != ERROR_OK) {
3191 LOG_ERROR("Could not read register '%s'", reg->name);
3192 return retval;
3193 }
3194 }
3195 char *value = buf_to_hex_str(reg->value, reg->size);
3196 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3197 free(value);
3198 return ERROR_OK;
3199 }
3200
3201 /* set register value */
3202 if (CMD_ARGC == 2) {
3203 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3204 if (!buf)
3205 return ERROR_FAIL;
3206 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3207
3208 int retval = reg->type->set(reg, buf);
3209 if (retval != ERROR_OK) {
3210 LOG_ERROR("Could not write to register '%s'", reg->name);
3211 } else {
3212 char *value = buf_to_hex_str(reg->value, reg->size);
3213 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3214 free(value);
3215 }
3216
3217 free(buf);
3218
3219 return retval;
3220 }
3221
3222 return ERROR_COMMAND_SYNTAX_ERROR;
3223
3224 not_found:
3225 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3226 return ERROR_OK;
3227 }
3228
3229 COMMAND_HANDLER(handle_poll_command)
3230 {
3231 int retval = ERROR_OK;
3232 struct target *target = get_current_target(CMD_CTX);
3233
3234 if (CMD_ARGC == 0) {
3235 command_print(CMD, "background polling: %s",
3236 jtag_poll_get_enabled() ? "on" : "off");
3237 command_print(CMD, "TAP: %s (%s)",
3238 target->tap->dotted_name,
3239 target->tap->enabled ? "enabled" : "disabled");
3240 if (!target->tap->enabled)
3241 return ERROR_OK;
3242 retval = target_poll(target);
3243 if (retval != ERROR_OK)
3244 return retval;
3245 retval = target_arch_state(target);
3246 if (retval != ERROR_OK)
3247 return retval;
3248 } else if (CMD_ARGC == 1) {
3249 bool enable;
3250 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3251 jtag_poll_set_enabled(enable);
3252 } else
3253 return ERROR_COMMAND_SYNTAX_ERROR;
3254
3255 return retval;
3256 }
3257
3258 COMMAND_HANDLER(handle_wait_halt_command)
3259 {
3260 if (CMD_ARGC > 1)
3261 return ERROR_COMMAND_SYNTAX_ERROR;
3262
3263 unsigned ms = DEFAULT_HALT_TIMEOUT;
3264 if (1 == CMD_ARGC) {
3265 int retval = parse_uint(CMD_ARGV[0], &ms);
3266 if (retval != ERROR_OK)
3267 return ERROR_COMMAND_SYNTAX_ERROR;
3268 }
3269
3270 struct target *target = get_current_target(CMD_CTX);
3271 return target_wait_state(target, TARGET_HALTED, ms);
3272 }
3273
3274 /* wait for target state to change. The trick here is to have a low
3275 * latency for short waits and not to suck up all the CPU time
3276 * on longer waits.
3277 *
3278 * After 500ms, keep_alive() is invoked
3279 */
3280 int target_wait_state(struct target *target, enum target_state state, int ms)
3281 {
3282 int retval;
3283 int64_t then = 0, cur;
3284 bool once = true;
3285
3286 for (;;) {
3287 retval = target_poll(target);
3288 if (retval != ERROR_OK)
3289 return retval;
3290 if (target->state == state)
3291 break;
3292 cur = timeval_ms();
3293 if (once) {
3294 once = false;
3295 then = timeval_ms();
3296 LOG_DEBUG("waiting for target %s...",
3297 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3298 }
3299
3300 if (cur-then > 500)
3301 keep_alive();
3302
3303 if ((cur-then) > ms) {
3304 LOG_ERROR("timed out while waiting for target %s",
3305 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3306 return ERROR_FAIL;
3307 }
3308 }
3309
3310 return ERROR_OK;
3311 }
3312
3313 COMMAND_HANDLER(handle_halt_command)
3314 {
3315 LOG_DEBUG("-");
3316
3317 struct target *target = get_current_target(CMD_CTX);
3318
3319 target->verbose_halt_msg = true;
3320
3321 int retval = target_halt(target);
3322 if (retval != ERROR_OK)
3323 return retval;
3324
3325 if (CMD_ARGC == 1) {
3326 unsigned wait_local;
3327 retval = parse_uint(CMD_ARGV[0], &wait_local);
3328 if (retval != ERROR_OK)
3329 return ERROR_COMMAND_SYNTAX_ERROR;
3330 if (!wait_local)
3331 return ERROR_OK;
3332 }
3333
3334 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3335 }
3336
3337 COMMAND_HANDLER(handle_soft_reset_halt_command)
3338 {
3339 struct target *target = get_current_target(CMD_CTX);
3340
3341 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3342
3343 target_soft_reset_halt(target);
3344
3345 return ERROR_OK;
3346 }
3347
3348 COMMAND_HANDLER(handle_reset_command)
3349 {
3350 if (CMD_ARGC > 1)
3351 return ERROR_COMMAND_SYNTAX_ERROR;
3352
3353 enum target_reset_mode reset_mode = RESET_RUN;
3354 if (CMD_ARGC == 1) {
3355 const struct jim_nvp *n;
3356 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3357 if ((!n->name) || (n->value == RESET_UNKNOWN))
3358 return ERROR_COMMAND_SYNTAX_ERROR;
3359 reset_mode = n->value;
3360 }
3361
3362 /* reset *all* targets */
3363 return target_process_reset(CMD, reset_mode);
3364 }
3365
3366
3367 COMMAND_HANDLER(handle_resume_command)
3368 {
3369 int current = 1;
3370 if (CMD_ARGC > 1)
3371 return ERROR_COMMAND_SYNTAX_ERROR;
3372
3373 struct target *target = get_current_target(CMD_CTX);
3374
3375 /* with no CMD_ARGV, resume from current pc, addr = 0,
3376 * with one arguments, addr = CMD_ARGV[0],
3377 * handle breakpoints, not debugging */
3378 target_addr_t addr = 0;
3379 if (CMD_ARGC == 1) {
3380 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3381 current = 0;
3382 }
3383
3384 return target_resume(target, current, addr, 1, 0);
3385 }
3386
3387 COMMAND_HANDLER(handle_step_command)
3388 {
3389 if (CMD_ARGC > 1)
3390 return ERROR_COMMAND_SYNTAX_ERROR;
3391
3392 LOG_DEBUG("-");
3393
3394 /* with no CMD_ARGV, step from current pc, addr = 0,
3395 * with one argument addr = CMD_ARGV[0],
3396 * handle breakpoints, debugging */
3397 target_addr_t addr = 0;
3398 int current_pc = 1;
3399 if (CMD_ARGC == 1) {
3400 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3401 current_pc = 0;
3402 }
3403
3404 struct target *target = get_current_target(CMD_CTX);
3405
3406 return target_step(target, current_pc, addr, 1);
3407 }
3408
3409 void target_handle_md_output(struct command_invocation *cmd,
3410 struct target *target, target_addr_t address, unsigned size,
3411 unsigned count, const uint8_t *buffer)
3412 {
3413 const unsigned line_bytecnt = 32;
3414 unsigned line_modulo = line_bytecnt / size;
3415
3416 char output[line_bytecnt * 4 + 1];
3417 unsigned output_len = 0;
3418
3419 const char *value_fmt;
3420 switch (size) {
3421 case 8:
3422 value_fmt = "%16.16"PRIx64" ";
3423 break;
3424 case 4:
3425 value_fmt = "%8.8"PRIx64" ";
3426 break;
3427 case 2:
3428 value_fmt = "%4.4"PRIx64" ";
3429 break;
3430 case 1:
3431 value_fmt = "%2.2"PRIx64" ";
3432 break;
3433 default:
3434 /* "can't happen", caller checked */
3435 LOG_ERROR("invalid memory read size: %u", size);
3436 return;
3437 }
3438
3439 for (unsigned i = 0; i < count; i++) {
3440 if (i % line_modulo == 0) {
3441 output_len += snprintf(output + output_len,
3442 sizeof(output) - output_len,
3443 TARGET_ADDR_FMT ": ",
3444 (address + (i * size)));
3445 }
3446
3447 uint64_t value = 0;
3448 const uint8_t *value_ptr = buffer + i * size;
3449 switch (size) {
3450 case 8:
3451 value = target_buffer_get_u64(target, value_ptr);
3452 break;
3453 case 4:
3454 value = target_buffer_get_u32(target, value_ptr);
3455 break;
3456 case 2:
3457 value = target_buffer_get_u16(target, value_ptr);
3458 break;
3459 case 1:
3460 value = *value_ptr;
3461 }
3462 output_len += snprintf(output + output_len,
3463 sizeof(output) - output_len,
3464 value_fmt, value);
3465
3466 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3467 command_print(cmd, "%s", output);
3468 output_len = 0;
3469 }
3470 }
3471 }
3472
3473 COMMAND_HANDLER(handle_md_command)
3474 {
3475 if (CMD_ARGC < 1)
3476 return ERROR_COMMAND_SYNTAX_ERROR;
3477
3478 unsigned size = 0;
3479 switch (CMD_NAME[2]) {
3480 case 'd':
3481 size = 8;
3482 break;
3483 case 'w':
3484 size = 4;
3485 break;
3486 case 'h':
3487 size = 2;
3488 break;
3489 case 'b':
3490 size = 1;
3491 break;
3492 default:
3493 return ERROR_COMMAND_SYNTAX_ERROR;
3494 }
3495
3496 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3497 int (*fn)(struct target *target,
3498 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3499 if (physical) {
3500 CMD_ARGC--;
3501 CMD_ARGV++;
3502 fn = target_read_phys_memory;
3503 } else
3504 fn = target_read_memory;
3505 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3506 return ERROR_COMMAND_SYNTAX_ERROR;
3507
3508 target_addr_t address;
3509 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3510
3511 unsigned count = 1;
3512 if (CMD_ARGC == 2)
3513 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3514
3515 uint8_t *buffer = calloc(count, size);
3516 if (!buffer) {
3517 LOG_ERROR("Failed to allocate md read buffer");
3518 return ERROR_FAIL;
3519 }
3520
3521 struct target *target = get_current_target(CMD_CTX);
3522 int retval = fn(target, address, size, count, buffer);
3523 if (retval == ERROR_OK)
3524 target_handle_md_output(CMD, target, address, size, count, buffer);
3525
3526 free(buffer);
3527
3528 return retval;
3529 }
3530
3531 typedef int (*target_write_fn)(struct target *target,
3532 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3533
3534 static int target_fill_mem(struct target *target,
3535 target_addr_t address,
3536 target_write_fn fn,
3537 unsigned data_size,
3538 /* value */
3539 uint64_t b,
3540 /* count */
3541 unsigned c)
3542 {
3543 /* We have to write in reasonably large chunks to be able
3544 * to fill large memory areas with any sane speed */
3545 const unsigned chunk_size = 16384;
3546 uint8_t *target_buf = malloc(chunk_size * data_size);
3547 if (!target_buf) {
3548 LOG_ERROR("Out of memory");
3549 return ERROR_FAIL;
3550 }
3551
3552 for (unsigned i = 0; i < chunk_size; i++) {
3553 switch (data_size) {
3554 case 8:
3555 target_buffer_set_u64(target, target_buf + i * data_size, b);
3556 break;
3557 case 4:
3558 target_buffer_set_u32(target, target_buf + i * data_size, b);
3559 break;
3560 case 2:
3561 target_buffer_set_u16(target, target_buf + i * data_size, b);
3562 break;
3563 case 1:
3564 target_buffer_set_u8(target, target_buf + i * data_size, b);
3565 break;
3566 default:
3567 exit(-1);
3568 }
3569 }
3570
3571 int retval = ERROR_OK;
3572
3573 for (unsigned x = 0; x < c; x += chunk_size) {
3574 unsigned current;
3575 current = c - x;
3576 if (current > chunk_size)
3577 current = chunk_size;
3578 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3579 if (retval != ERROR_OK)
3580 break;
3581 /* avoid GDB timeouts */
3582 keep_alive();
3583 }
3584 free(target_buf);
3585
3586 return retval;
3587 }
3588
3589
3590 COMMAND_HANDLER(handle_mw_command)
3591 {
3592 if (CMD_ARGC < 2)
3593 return ERROR_COMMAND_SYNTAX_ERROR;
3594 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3595 target_write_fn fn;
3596 if (physical) {
3597 CMD_ARGC--;
3598 CMD_ARGV++;
3599 fn = target_write_phys_memory;
3600 } else
3601 fn = target_write_memory;
3602 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3603 return ERROR_COMMAND_SYNTAX_ERROR;
3604
3605 target_addr_t address;
3606 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3607
3608 uint64_t value;
3609 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3610
3611 unsigned count = 1;
3612 if (CMD_ARGC == 3)
3613 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3614
3615 struct target *target = get_current_target(CMD_CTX);
3616 unsigned wordsize;
3617 switch (CMD_NAME[2]) {
3618 case 'd':
3619 wordsize = 8;
3620 break;
3621 case 'w':
3622 wordsize = 4;
3623 break;
3624 case 'h':
3625 wordsize = 2;
3626 break;
3627 case 'b':
3628 wordsize = 1;
3629 break;
3630 default:
3631 return ERROR_COMMAND_SYNTAX_ERROR;
3632 }
3633
3634 return target_fill_mem(target, address, fn, wordsize, value, count);
3635 }
3636
3637 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3638 target_addr_t *min_address, target_addr_t *max_address)
3639 {
3640 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3641 return ERROR_COMMAND_SYNTAX_ERROR;
3642
3643 /* a base address isn't always necessary,
3644 * default to 0x0 (i.e. don't relocate) */
3645 if (CMD_ARGC >= 2) {
3646 target_addr_t addr;
3647 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3648 image->base_address = addr;
3649 image->base_address_set = true;
3650 } else
3651 image->base_address_set = false;
3652
3653 image->start_address_set = false;
3654
3655 if (CMD_ARGC >= 4)
3656 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3657 if (CMD_ARGC == 5) {
3658 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3659 /* use size (given) to find max (required) */
3660 *max_address += *min_address;
3661 }
3662
3663 if (*min_address > *max_address)
3664 return ERROR_COMMAND_SYNTAX_ERROR;
3665
3666 return ERROR_OK;
3667 }
3668
3669 COMMAND_HANDLER(handle_load_image_command)
3670 {
3671 uint8_t *buffer;
3672 size_t buf_cnt;
3673 uint32_t image_size;
3674 target_addr_t min_address = 0;
3675 target_addr_t max_address = -1;
3676 struct image image;
3677
3678 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3679 &image, &min_address, &max_address);
3680 if (retval != ERROR_OK)
3681 return retval;
3682
3683 struct target *target = get_current_target(CMD_CTX);
3684
3685 struct duration bench;
3686 duration_start(&bench);
3687
3688 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3689 return ERROR_FAIL;
3690
3691 image_size = 0x0;
3692 retval = ERROR_OK;
3693 for (unsigned int i = 0; i < image.num_sections; i++) {
3694 buffer = malloc(image.sections[i].size);
3695 if (!buffer) {
3696 command_print(CMD,
3697 "error allocating buffer for section (%d bytes)",
3698 (int)(image.sections[i].size));
3699 retval = ERROR_FAIL;
3700 break;
3701 }
3702
3703 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3704 if (retval != ERROR_OK) {
3705 free