semihosting: add semihosting_basedir command
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
60 #include "semihosting_common.h"
61
62 /* default halt wait timeout (ms) */
63 #define DEFAULT_HALT_TIMEOUT 5000
64
65 static int target_read_buffer_default(struct target *target, target_addr_t address,
66 uint32_t count, uint8_t *buffer);
67 static int target_write_buffer_default(struct target *target, target_addr_t address,
68 uint32_t count, const uint8_t *buffer);
69 static int target_array2mem(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_mem2array(Jim_Interp *interp, struct target *target,
72 int argc, Jim_Obj * const *argv);
73 static int target_register_user_commands(struct command_context *cmd_ctx);
74 static int target_get_gdb_fileio_info_default(struct target *target,
75 struct gdb_fileio_info *fileio_info);
76 static int target_gdb_fileio_end_default(struct target *target, int retcode,
77 int fileio_errno, bool ctrl_c);
78
79 /* targets */
80 extern struct target_type arm7tdmi_target;
81 extern struct target_type arm720t_target;
82 extern struct target_type arm9tdmi_target;
83 extern struct target_type arm920t_target;
84 extern struct target_type arm966e_target;
85 extern struct target_type arm946e_target;
86 extern struct target_type arm926ejs_target;
87 extern struct target_type fa526_target;
88 extern struct target_type feroceon_target;
89 extern struct target_type dragonite_target;
90 extern struct target_type xscale_target;
91 extern struct target_type cortexm_target;
92 extern struct target_type cortexa_target;
93 extern struct target_type aarch64_target;
94 extern struct target_type cortexr4_target;
95 extern struct target_type arm11_target;
96 extern struct target_type ls1_sap_target;
97 extern struct target_type mips_m4k_target;
98 extern struct target_type mips_mips64_target;
99 extern struct target_type avr_target;
100 extern struct target_type dsp563xx_target;
101 extern struct target_type dsp5680xx_target;
102 extern struct target_type testee_target;
103 extern struct target_type avr32_ap7k_target;
104 extern struct target_type hla_target;
105 extern struct target_type nds32_v2_target;
106 extern struct target_type nds32_v3_target;
107 extern struct target_type nds32_v3m_target;
108 extern struct target_type or1k_target;
109 extern struct target_type quark_x10xx_target;
110 extern struct target_type quark_d20xx_target;
111 extern struct target_type stm8_target;
112 extern struct target_type riscv_target;
113 extern struct target_type mem_ap_target;
114 extern struct target_type esirisc_target;
115 extern struct target_type arcv2_target;
116
117 static struct target_type *target_types[] = {
118 &arm7tdmi_target,
119 &arm9tdmi_target,
120 &arm920t_target,
121 &arm720t_target,
122 &arm966e_target,
123 &arm946e_target,
124 &arm926ejs_target,
125 &fa526_target,
126 &feroceon_target,
127 &dragonite_target,
128 &xscale_target,
129 &cortexm_target,
130 &cortexa_target,
131 &cortexr4_target,
132 &arm11_target,
133 &ls1_sap_target,
134 &mips_m4k_target,
135 &avr_target,
136 &dsp563xx_target,
137 &dsp5680xx_target,
138 &testee_target,
139 &avr32_ap7k_target,
140 &hla_target,
141 &nds32_v2_target,
142 &nds32_v3_target,
143 &nds32_v3m_target,
144 &or1k_target,
145 &quark_x10xx_target,
146 &quark_d20xx_target,
147 &stm8_target,
148 &riscv_target,
149 &mem_ap_target,
150 &esirisc_target,
151 &arcv2_target,
152 &aarch64_target,
153 &mips_mips64_target,
154 NULL,
155 };
156
157 struct target *all_targets;
158 static struct target_event_callback *target_event_callbacks;
159 static struct target_timer_callback *target_timer_callbacks;
160 static int64_t target_timer_next_event_value;
161 static LIST_HEAD(target_reset_callback_list);
162 static LIST_HEAD(target_trace_callback_list);
163 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
164 static LIST_HEAD(empty_smp_targets);
165
166 static const struct jim_nvp nvp_assert[] = {
167 { .name = "assert", NVP_ASSERT },
168 { .name = "deassert", NVP_DEASSERT },
169 { .name = "T", NVP_ASSERT },
170 { .name = "F", NVP_DEASSERT },
171 { .name = "t", NVP_ASSERT },
172 { .name = "f", NVP_DEASSERT },
173 { .name = NULL, .value = -1 }
174 };
175
176 static const struct jim_nvp nvp_error_target[] = {
177 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
178 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
179 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
180 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
181 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
182 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
183 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
184 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
185 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
186 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
187 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
188 { .value = -1, .name = NULL }
189 };
190
191 static const char *target_strerror_safe(int err)
192 {
193 const struct jim_nvp *n;
194
195 n = jim_nvp_value2name_simple(nvp_error_target, err);
196 if (!n->name)
197 return "unknown";
198 else
199 return n->name;
200 }
201
202 static const struct jim_nvp nvp_target_event[] = {
203
204 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
205 { .value = TARGET_EVENT_HALTED, .name = "halted" },
206 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
207 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
208 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
209 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
210 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
211
212 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
213 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
214
215 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
216 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
217 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
218 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
219 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
220 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
221 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
222 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
223
224 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
225 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
226 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
227
228 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
229 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
230
231 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
232 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
233
234 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
235 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
236
237 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
238 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
239
240 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
241
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
247 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
249 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
250
251 { .name = NULL, .value = -1 }
252 };
253
254 static const struct jim_nvp nvp_target_state[] = {
255 { .name = "unknown", .value = TARGET_UNKNOWN },
256 { .name = "running", .value = TARGET_RUNNING },
257 { .name = "halted", .value = TARGET_HALTED },
258 { .name = "reset", .value = TARGET_RESET },
259 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
260 { .name = NULL, .value = -1 },
261 };
262
263 static const struct jim_nvp nvp_target_debug_reason[] = {
264 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
265 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
266 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
267 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
268 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
269 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
270 { .name = "program-exit", .value = DBG_REASON_EXIT },
271 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
272 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
273 { .name = NULL, .value = -1 },
274 };
275
276 static const struct jim_nvp nvp_target_endian[] = {
277 { .name = "big", .value = TARGET_BIG_ENDIAN },
278 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
279 { .name = "be", .value = TARGET_BIG_ENDIAN },
280 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
281 { .name = NULL, .value = -1 },
282 };
283
284 static const struct jim_nvp nvp_reset_modes[] = {
285 { .name = "unknown", .value = RESET_UNKNOWN },
286 { .name = "run", .value = RESET_RUN },
287 { .name = "halt", .value = RESET_HALT },
288 { .name = "init", .value = RESET_INIT },
289 { .name = NULL, .value = -1 },
290 };
291
292 const char *debug_reason_name(struct target *t)
293 {
294 const char *cp;
295
296 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
297 t->debug_reason)->name;
298 if (!cp) {
299 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
300 cp = "(*BUG*unknown*BUG*)";
301 }
302 return cp;
303 }
304
305 const char *target_state_name(struct target *t)
306 {
307 const char *cp;
308 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
309 if (!cp) {
310 LOG_ERROR("Invalid target state: %d", (int)(t->state));
311 cp = "(*BUG*unknown*BUG*)";
312 }
313
314 if (!target_was_examined(t) && t->defer_examine)
315 cp = "examine deferred";
316
317 return cp;
318 }
319
320 const char *target_event_name(enum target_event event)
321 {
322 const char *cp;
323 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
324 if (!cp) {
325 LOG_ERROR("Invalid target event: %d", (int)(event));
326 cp = "(*BUG*unknown*BUG*)";
327 }
328 return cp;
329 }
330
331 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
332 {
333 const char *cp;
334 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
335 if (!cp) {
336 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
337 cp = "(*BUG*unknown*BUG*)";
338 }
339 return cp;
340 }
341
342 /* determine the number of the new target */
343 static int new_target_number(void)
344 {
345 struct target *t;
346 int x;
347
348 /* number is 0 based */
349 x = -1;
350 t = all_targets;
351 while (t) {
352 if (x < t->target_number)
353 x = t->target_number;
354 t = t->next;
355 }
356 return x + 1;
357 }
358
359 static void append_to_list_all_targets(struct target *target)
360 {
361 struct target **t = &all_targets;
362
363 while (*t)
364 t = &((*t)->next);
365 *t = target;
366 }
367
368 /* read a uint64_t from a buffer in target memory endianness */
369 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
370 {
371 if (target->endianness == TARGET_LITTLE_ENDIAN)
372 return le_to_h_u64(buffer);
373 else
374 return be_to_h_u64(buffer);
375 }
376
377 /* read a uint32_t from a buffer in target memory endianness */
378 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
379 {
380 if (target->endianness == TARGET_LITTLE_ENDIAN)
381 return le_to_h_u32(buffer);
382 else
383 return be_to_h_u32(buffer);
384 }
385
386 /* read a uint24_t from a buffer in target memory endianness */
387 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
388 {
389 if (target->endianness == TARGET_LITTLE_ENDIAN)
390 return le_to_h_u24(buffer);
391 else
392 return be_to_h_u24(buffer);
393 }
394
395 /* read a uint16_t from a buffer in target memory endianness */
396 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
397 {
398 if (target->endianness == TARGET_LITTLE_ENDIAN)
399 return le_to_h_u16(buffer);
400 else
401 return be_to_h_u16(buffer);
402 }
403
404 /* write a uint64_t to a buffer in target memory endianness */
405 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
406 {
407 if (target->endianness == TARGET_LITTLE_ENDIAN)
408 h_u64_to_le(buffer, value);
409 else
410 h_u64_to_be(buffer, value);
411 }
412
413 /* write a uint32_t to a buffer in target memory endianness */
414 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
415 {
416 if (target->endianness == TARGET_LITTLE_ENDIAN)
417 h_u32_to_le(buffer, value);
418 else
419 h_u32_to_be(buffer, value);
420 }
421
422 /* write a uint24_t to a buffer in target memory endianness */
423 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
424 {
425 if (target->endianness == TARGET_LITTLE_ENDIAN)
426 h_u24_to_le(buffer, value);
427 else
428 h_u24_to_be(buffer, value);
429 }
430
431 /* write a uint16_t to a buffer in target memory endianness */
432 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
433 {
434 if (target->endianness == TARGET_LITTLE_ENDIAN)
435 h_u16_to_le(buffer, value);
436 else
437 h_u16_to_be(buffer, value);
438 }
439
440 /* write a uint8_t to a buffer in target memory endianness */
441 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
442 {
443 *buffer = value;
444 }
445
446 /* write a uint64_t array to a buffer in target memory endianness */
447 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
448 {
449 uint32_t i;
450 for (i = 0; i < count; i++)
451 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
452 }
453
454 /* write a uint32_t array to a buffer in target memory endianness */
455 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
456 {
457 uint32_t i;
458 for (i = 0; i < count; i++)
459 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
460 }
461
462 /* write a uint16_t array to a buffer in target memory endianness */
463 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
464 {
465 uint32_t i;
466 for (i = 0; i < count; i++)
467 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
468 }
469
470 /* write a uint64_t array to a buffer in target memory endianness */
471 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
472 {
473 uint32_t i;
474 for (i = 0; i < count; i++)
475 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
476 }
477
478 /* write a uint32_t array to a buffer in target memory endianness */
479 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
480 {
481 uint32_t i;
482 for (i = 0; i < count; i++)
483 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
484 }
485
486 /* write a uint16_t array to a buffer in target memory endianness */
487 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
488 {
489 uint32_t i;
490 for (i = 0; i < count; i++)
491 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
492 }
493
494 /* return a pointer to a configured target; id is name or number */
495 struct target *get_target(const char *id)
496 {
497 struct target *target;
498
499 /* try as tcltarget name */
500 for (target = all_targets; target; target = target->next) {
501 if (!target_name(target))
502 continue;
503 if (strcmp(id, target_name(target)) == 0)
504 return target;
505 }
506
507 /* It's OK to remove this fallback sometime after August 2010 or so */
508
509 /* no match, try as number */
510 unsigned num;
511 if (parse_uint(id, &num) != ERROR_OK)
512 return NULL;
513
514 for (target = all_targets; target; target = target->next) {
515 if (target->target_number == (int)num) {
516 LOG_WARNING("use '%s' as target identifier, not '%u'",
517 target_name(target), num);
518 return target;
519 }
520 }
521
522 return NULL;
523 }
524
525 /* returns a pointer to the n-th configured target */
526 struct target *get_target_by_num(int num)
527 {
528 struct target *target = all_targets;
529
530 while (target) {
531 if (target->target_number == num)
532 return target;
533 target = target->next;
534 }
535
536 return NULL;
537 }
538
539 struct target *get_current_target(struct command_context *cmd_ctx)
540 {
541 struct target *target = get_current_target_or_null(cmd_ctx);
542
543 if (!target) {
544 LOG_ERROR("BUG: current_target out of bounds");
545 exit(-1);
546 }
547
548 return target;
549 }
550
551 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
552 {
553 return cmd_ctx->current_target_override
554 ? cmd_ctx->current_target_override
555 : cmd_ctx->current_target;
556 }
557
558 int target_poll(struct target *target)
559 {
560 int retval;
561
562 /* We can't poll until after examine */
563 if (!target_was_examined(target)) {
564 /* Fail silently lest we pollute the log */
565 return ERROR_FAIL;
566 }
567
568 retval = target->type->poll(target);
569 if (retval != ERROR_OK)
570 return retval;
571
572 if (target->halt_issued) {
573 if (target->state == TARGET_HALTED)
574 target->halt_issued = false;
575 else {
576 int64_t t = timeval_ms() - target->halt_issued_time;
577 if (t > DEFAULT_HALT_TIMEOUT) {
578 target->halt_issued = false;
579 LOG_INFO("Halt timed out, wake up GDB.");
580 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
581 }
582 }
583 }
584
585 return ERROR_OK;
586 }
587
588 int target_halt(struct target *target)
589 {
590 int retval;
591 /* We can't poll until after examine */
592 if (!target_was_examined(target)) {
593 LOG_ERROR("Target not examined yet");
594 return ERROR_FAIL;
595 }
596
597 retval = target->type->halt(target);
598 if (retval != ERROR_OK)
599 return retval;
600
601 target->halt_issued = true;
602 target->halt_issued_time = timeval_ms();
603
604 return ERROR_OK;
605 }
606
607 /**
608 * Make the target (re)start executing using its saved execution
609 * context (possibly with some modifications).
610 *
611 * @param target Which target should start executing.
612 * @param current True to use the target's saved program counter instead
613 * of the address parameter
614 * @param address Optionally used as the program counter.
615 * @param handle_breakpoints True iff breakpoints at the resumption PC
616 * should be skipped. (For example, maybe execution was stopped by
617 * such a breakpoint, in which case it would be counterproductive to
618 * let it re-trigger.
619 * @param debug_execution False if all working areas allocated by OpenOCD
620 * should be released and/or restored to their original contents.
621 * (This would for example be true to run some downloaded "helper"
622 * algorithm code, which resides in one such working buffer and uses
623 * another for data storage.)
624 *
625 * @todo Resolve the ambiguity about what the "debug_execution" flag
626 * signifies. For example, Target implementations don't agree on how
627 * it relates to invalidation of the register cache, or to whether
628 * breakpoints and watchpoints should be enabled. (It would seem wrong
629 * to enable breakpoints when running downloaded "helper" algorithms
630 * (debug_execution true), since the breakpoints would be set to match
631 * target firmware being debugged, not the helper algorithm.... and
632 * enabling them could cause such helpers to malfunction (for example,
633 * by overwriting data with a breakpoint instruction. On the other
634 * hand the infrastructure for running such helpers might use this
635 * procedure but rely on hardware breakpoint to detect termination.)
636 */
637 int target_resume(struct target *target, int current, target_addr_t address,
638 int handle_breakpoints, int debug_execution)
639 {
640 int retval;
641
642 /* We can't poll until after examine */
643 if (!target_was_examined(target)) {
644 LOG_ERROR("Target not examined yet");
645 return ERROR_FAIL;
646 }
647
648 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
649
650 /* note that resume *must* be asynchronous. The CPU can halt before
651 * we poll. The CPU can even halt at the current PC as a result of
652 * a software breakpoint being inserted by (a bug?) the application.
653 */
654 /*
655 * resume() triggers the event 'resumed'. The execution of TCL commands
656 * in the event handler causes the polling of targets. If the target has
657 * already halted for a breakpoint, polling will run the 'halted' event
658 * handler before the pending 'resumed' handler.
659 * Disable polling during resume() to guarantee the execution of handlers
660 * in the correct order.
661 */
662 bool save_poll = jtag_poll_get_enabled();
663 jtag_poll_set_enabled(false);
664 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
665 jtag_poll_set_enabled(save_poll);
666 if (retval != ERROR_OK)
667 return retval;
668
669 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
670
671 return retval;
672 }
673
674 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
675 {
676 char buf[100];
677 int retval;
678 struct jim_nvp *n;
679 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
680 if (!n->name) {
681 LOG_ERROR("invalid reset mode");
682 return ERROR_FAIL;
683 }
684
685 struct target *target;
686 for (target = all_targets; target; target = target->next)
687 target_call_reset_callbacks(target, reset_mode);
688
689 /* disable polling during reset to make reset event scripts
690 * more predictable, i.e. dr/irscan & pathmove in events will
691 * not have JTAG operations injected into the middle of a sequence.
692 */
693 bool save_poll = jtag_poll_get_enabled();
694
695 jtag_poll_set_enabled(false);
696
697 sprintf(buf, "ocd_process_reset %s", n->name);
698 retval = Jim_Eval(cmd->ctx->interp, buf);
699
700 jtag_poll_set_enabled(save_poll);
701
702 if (retval != JIM_OK) {
703 Jim_MakeErrorMessage(cmd->ctx->interp);
704 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
705 return ERROR_FAIL;
706 }
707
708 /* We want any events to be processed before the prompt */
709 retval = target_call_timer_callbacks_now();
710
711 for (target = all_targets; target; target = target->next) {
712 target->type->check_reset(target);
713 target->running_alg = false;
714 }
715
716 return retval;
717 }
718
719 static int identity_virt2phys(struct target *target,
720 target_addr_t virtual, target_addr_t *physical)
721 {
722 *physical = virtual;
723 return ERROR_OK;
724 }
725
726 static int no_mmu(struct target *target, int *enabled)
727 {
728 *enabled = 0;
729 return ERROR_OK;
730 }
731
732 /**
733 * Reset the @c examined flag for the given target.
734 * Pure paranoia -- targets are zeroed on allocation.
735 */
736 static inline void target_reset_examined(struct target *target)
737 {
738 target->examined = false;
739 }
740
741 static int default_examine(struct target *target)
742 {
743 target_set_examined(target);
744 return ERROR_OK;
745 }
746
747 /* no check by default */
748 static int default_check_reset(struct target *target)
749 {
750 return ERROR_OK;
751 }
752
753 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
754 * Keep in sync */
755 int target_examine_one(struct target *target)
756 {
757 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
758
759 int retval = target->type->examine(target);
760 if (retval != ERROR_OK) {
761 target_reset_examined(target);
762 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
763 return retval;
764 }
765
766 target_set_examined(target);
767 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
768
769 return ERROR_OK;
770 }
771
772 static int jtag_enable_callback(enum jtag_event event, void *priv)
773 {
774 struct target *target = priv;
775
776 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
777 return ERROR_OK;
778
779 jtag_unregister_event_callback(jtag_enable_callback, target);
780
781 return target_examine_one(target);
782 }
783
784 /* Targets that correctly implement init + examine, i.e.
785 * no communication with target during init:
786 *
787 * XScale
788 */
789 int target_examine(void)
790 {
791 int retval = ERROR_OK;
792 struct target *target;
793
794 for (target = all_targets; target; target = target->next) {
795 /* defer examination, but don't skip it */
796 if (!target->tap->enabled) {
797 jtag_register_event_callback(jtag_enable_callback,
798 target);
799 continue;
800 }
801
802 if (target->defer_examine)
803 continue;
804
805 int retval2 = target_examine_one(target);
806 if (retval2 != ERROR_OK) {
807 LOG_WARNING("target %s examination failed", target_name(target));
808 retval = retval2;
809 }
810 }
811 return retval;
812 }
813
814 const char *target_type_name(struct target *target)
815 {
816 return target->type->name;
817 }
818
819 static int target_soft_reset_halt(struct target *target)
820 {
821 if (!target_was_examined(target)) {
822 LOG_ERROR("Target not examined yet");
823 return ERROR_FAIL;
824 }
825 if (!target->type->soft_reset_halt) {
826 LOG_ERROR("Target %s does not support soft_reset_halt",
827 target_name(target));
828 return ERROR_FAIL;
829 }
830 return target->type->soft_reset_halt(target);
831 }
832
833 /**
834 * Downloads a target-specific native code algorithm to the target,
835 * and executes it. * Note that some targets may need to set up, enable,
836 * and tear down a breakpoint (hard or * soft) to detect algorithm
837 * termination, while others may support lower overhead schemes where
838 * soft breakpoints embedded in the algorithm automatically terminate the
839 * algorithm.
840 *
841 * @param target used to run the algorithm
842 * @param num_mem_params
843 * @param mem_params
844 * @param num_reg_params
845 * @param reg_param
846 * @param entry_point
847 * @param exit_point
848 * @param timeout_ms
849 * @param arch_info target-specific description of the algorithm.
850 */
851 int target_run_algorithm(struct target *target,
852 int num_mem_params, struct mem_param *mem_params,
853 int num_reg_params, struct reg_param *reg_param,
854 target_addr_t entry_point, target_addr_t exit_point,
855 int timeout_ms, void *arch_info)
856 {
857 int retval = ERROR_FAIL;
858
859 if (!target_was_examined(target)) {
860 LOG_ERROR("Target not examined yet");
861 goto done;
862 }
863 if (!target->type->run_algorithm) {
864 LOG_ERROR("Target type '%s' does not support %s",
865 target_type_name(target), __func__);
866 goto done;
867 }
868
869 target->running_alg = true;
870 retval = target->type->run_algorithm(target,
871 num_mem_params, mem_params,
872 num_reg_params, reg_param,
873 entry_point, exit_point, timeout_ms, arch_info);
874 target->running_alg = false;
875
876 done:
877 return retval;
878 }
879
880 /**
881 * Executes a target-specific native code algorithm and leaves it running.
882 *
883 * @param target used to run the algorithm
884 * @param num_mem_params
885 * @param mem_params
886 * @param num_reg_params
887 * @param reg_params
888 * @param entry_point
889 * @param exit_point
890 * @param arch_info target-specific description of the algorithm.
891 */
892 int target_start_algorithm(struct target *target,
893 int num_mem_params, struct mem_param *mem_params,
894 int num_reg_params, struct reg_param *reg_params,
895 target_addr_t entry_point, target_addr_t exit_point,
896 void *arch_info)
897 {
898 int retval = ERROR_FAIL;
899
900 if (!target_was_examined(target)) {
901 LOG_ERROR("Target not examined yet");
902 goto done;
903 }
904 if (!target->type->start_algorithm) {
905 LOG_ERROR("Target type '%s' does not support %s",
906 target_type_name(target), __func__);
907 goto done;
908 }
909 if (target->running_alg) {
910 LOG_ERROR("Target is already running an algorithm");
911 goto done;
912 }
913
914 target->running_alg = true;
915 retval = target->type->start_algorithm(target,
916 num_mem_params, mem_params,
917 num_reg_params, reg_params,
918 entry_point, exit_point, arch_info);
919
920 done:
921 return retval;
922 }
923
924 /**
925 * Waits for an algorithm started with target_start_algorithm() to complete.
926 *
927 * @param target used to run the algorithm
928 * @param num_mem_params
929 * @param mem_params
930 * @param num_reg_params
931 * @param reg_params
932 * @param exit_point
933 * @param timeout_ms
934 * @param arch_info target-specific description of the algorithm.
935 */
936 int target_wait_algorithm(struct target *target,
937 int num_mem_params, struct mem_param *mem_params,
938 int num_reg_params, struct reg_param *reg_params,
939 target_addr_t exit_point, int timeout_ms,
940 void *arch_info)
941 {
942 int retval = ERROR_FAIL;
943
944 if (!target->type->wait_algorithm) {
945 LOG_ERROR("Target type '%s' does not support %s",
946 target_type_name(target), __func__);
947 goto done;
948 }
949 if (!target->running_alg) {
950 LOG_ERROR("Target is not running an algorithm");
951 goto done;
952 }
953
954 retval = target->type->wait_algorithm(target,
955 num_mem_params, mem_params,
956 num_reg_params, reg_params,
957 exit_point, timeout_ms, arch_info);
958 if (retval != ERROR_TARGET_TIMEOUT)
959 target->running_alg = false;
960
961 done:
962 return retval;
963 }
964
965 /**
966 * Streams data to a circular buffer on target intended for consumption by code
967 * running asynchronously on target.
968 *
969 * This is intended for applications where target-specific native code runs
970 * on the target, receives data from the circular buffer, does something with
971 * it (most likely writing it to a flash memory), and advances the circular
972 * buffer pointer.
973 *
974 * This assumes that the helper algorithm has already been loaded to the target,
975 * but has not been started yet. Given memory and register parameters are passed
976 * to the algorithm.
977 *
978 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
979 * following format:
980 *
981 * [buffer_start + 0, buffer_start + 4):
982 * Write Pointer address (aka head). Written and updated by this
983 * routine when new data is written to the circular buffer.
984 * [buffer_start + 4, buffer_start + 8):
985 * Read Pointer address (aka tail). Updated by code running on the
986 * target after it consumes data.
987 * [buffer_start + 8, buffer_start + buffer_size):
988 * Circular buffer contents.
989 *
990 * See contrib/loaders/flash/stm32f1x.S for an example.
991 *
992 * @param target used to run the algorithm
993 * @param buffer address on the host where data to be sent is located
994 * @param count number of blocks to send
995 * @param block_size size in bytes of each block
996 * @param num_mem_params count of memory-based params to pass to algorithm
997 * @param mem_params memory-based params to pass to algorithm
998 * @param num_reg_params count of register-based params to pass to algorithm
999 * @param reg_params memory-based params to pass to algorithm
1000 * @param buffer_start address on the target of the circular buffer structure
1001 * @param buffer_size size of the circular buffer structure
1002 * @param entry_point address on the target to execute to start the algorithm
1003 * @param exit_point address at which to set a breakpoint to catch the
1004 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1005 * @param arch_info
1006 */
1007
1008 int target_run_flash_async_algorithm(struct target *target,
1009 const uint8_t *buffer, uint32_t count, int block_size,
1010 int num_mem_params, struct mem_param *mem_params,
1011 int num_reg_params, struct reg_param *reg_params,
1012 uint32_t buffer_start, uint32_t buffer_size,
1013 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1014 {
1015 int retval;
1016 int timeout = 0;
1017
1018 const uint8_t *buffer_orig = buffer;
1019
1020 /* Set up working area. First word is write pointer, second word is read pointer,
1021 * rest is fifo data area. */
1022 uint32_t wp_addr = buffer_start;
1023 uint32_t rp_addr = buffer_start + 4;
1024 uint32_t fifo_start_addr = buffer_start + 8;
1025 uint32_t fifo_end_addr = buffer_start + buffer_size;
1026
1027 uint32_t wp = fifo_start_addr;
1028 uint32_t rp = fifo_start_addr;
1029
1030 /* validate block_size is 2^n */
1031 assert(IS_PWR_OF_2(block_size));
1032
1033 retval = target_write_u32(target, wp_addr, wp);
1034 if (retval != ERROR_OK)
1035 return retval;
1036 retval = target_write_u32(target, rp_addr, rp);
1037 if (retval != ERROR_OK)
1038 return retval;
1039
1040 /* Start up algorithm on target and let it idle while writing the first chunk */
1041 retval = target_start_algorithm(target, num_mem_params, mem_params,
1042 num_reg_params, reg_params,
1043 entry_point,
1044 exit_point,
1045 arch_info);
1046
1047 if (retval != ERROR_OK) {
1048 LOG_ERROR("error starting target flash write algorithm");
1049 return retval;
1050 }
1051
1052 while (count > 0) {
1053
1054 retval = target_read_u32(target, rp_addr, &rp);
1055 if (retval != ERROR_OK) {
1056 LOG_ERROR("failed to get read pointer");
1057 break;
1058 }
1059
1060 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1061 (size_t) (buffer - buffer_orig), count, wp, rp);
1062
1063 if (rp == 0) {
1064 LOG_ERROR("flash write algorithm aborted by target");
1065 retval = ERROR_FLASH_OPERATION_FAILED;
1066 break;
1067 }
1068
1069 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1070 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1071 break;
1072 }
1073
1074 /* Count the number of bytes available in the fifo without
1075 * crossing the wrap around. Make sure to not fill it completely,
1076 * because that would make wp == rp and that's the empty condition. */
1077 uint32_t thisrun_bytes;
1078 if (rp > wp)
1079 thisrun_bytes = rp - wp - block_size;
1080 else if (rp > fifo_start_addr)
1081 thisrun_bytes = fifo_end_addr - wp;
1082 else
1083 thisrun_bytes = fifo_end_addr - wp - block_size;
1084
1085 if (thisrun_bytes == 0) {
1086 /* Throttle polling a bit if transfer is (much) faster than flash
1087 * programming. The exact delay shouldn't matter as long as it's
1088 * less than buffer size / flash speed. This is very unlikely to
1089 * run when using high latency connections such as USB. */
1090 alive_sleep(2);
1091
1092 /* to stop an infinite loop on some targets check and increment a timeout
1093 * this issue was observed on a stellaris using the new ICDI interface */
1094 if (timeout++ >= 2500) {
1095 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1096 return ERROR_FLASH_OPERATION_FAILED;
1097 }
1098 continue;
1099 }
1100
1101 /* reset our timeout */
1102 timeout = 0;
1103
1104 /* Limit to the amount of data we actually want to write */
1105 if (thisrun_bytes > count * block_size)
1106 thisrun_bytes = count * block_size;
1107
1108 /* Force end of large blocks to be word aligned */
1109 if (thisrun_bytes >= 16)
1110 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1111
1112 /* Write data to fifo */
1113 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1114 if (retval != ERROR_OK)
1115 break;
1116
1117 /* Update counters and wrap write pointer */
1118 buffer += thisrun_bytes;
1119 count -= thisrun_bytes / block_size;
1120 wp += thisrun_bytes;
1121 if (wp >= fifo_end_addr)
1122 wp = fifo_start_addr;
1123
1124 /* Store updated write pointer to target */
1125 retval = target_write_u32(target, wp_addr, wp);
1126 if (retval != ERROR_OK)
1127 break;
1128
1129 /* Avoid GDB timeouts */
1130 keep_alive();
1131 }
1132
1133 if (retval != ERROR_OK) {
1134 /* abort flash write algorithm on target */
1135 target_write_u32(target, wp_addr, 0);
1136 }
1137
1138 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1139 num_reg_params, reg_params,
1140 exit_point,
1141 10000,
1142 arch_info);
1143
1144 if (retval2 != ERROR_OK) {
1145 LOG_ERROR("error waiting for target flash write algorithm");
1146 retval = retval2;
1147 }
1148
1149 if (retval == ERROR_OK) {
1150 /* check if algorithm set rp = 0 after fifo writer loop finished */
1151 retval = target_read_u32(target, rp_addr, &rp);
1152 if (retval == ERROR_OK && rp == 0) {
1153 LOG_ERROR("flash write algorithm aborted by target");
1154 retval = ERROR_FLASH_OPERATION_FAILED;
1155 }
1156 }
1157
1158 return retval;
1159 }
1160
1161 int target_run_read_async_algorithm(struct target *target,
1162 uint8_t *buffer, uint32_t count, int block_size,
1163 int num_mem_params, struct mem_param *mem_params,
1164 int num_reg_params, struct reg_param *reg_params,
1165 uint32_t buffer_start, uint32_t buffer_size,
1166 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1167 {
1168 int retval;
1169 int timeout = 0;
1170
1171 const uint8_t *buffer_orig = buffer;
1172
1173 /* Set up working area. First word is write pointer, second word is read pointer,
1174 * rest is fifo data area. */
1175 uint32_t wp_addr = buffer_start;
1176 uint32_t rp_addr = buffer_start + 4;
1177 uint32_t fifo_start_addr = buffer_start + 8;
1178 uint32_t fifo_end_addr = buffer_start + buffer_size;
1179
1180 uint32_t wp = fifo_start_addr;
1181 uint32_t rp = fifo_start_addr;
1182
1183 /* validate block_size is 2^n */
1184 assert(IS_PWR_OF_2(block_size));
1185
1186 retval = target_write_u32(target, wp_addr, wp);
1187 if (retval != ERROR_OK)
1188 return retval;
1189 retval = target_write_u32(target, rp_addr, rp);
1190 if (retval != ERROR_OK)
1191 return retval;
1192
1193 /* Start up algorithm on target */
1194 retval = target_start_algorithm(target, num_mem_params, mem_params,
1195 num_reg_params, reg_params,
1196 entry_point,
1197 exit_point,
1198 arch_info);
1199
1200 if (retval != ERROR_OK) {
1201 LOG_ERROR("error starting target flash read algorithm");
1202 return retval;
1203 }
1204
1205 while (count > 0) {
1206 retval = target_read_u32(target, wp_addr, &wp);
1207 if (retval != ERROR_OK) {
1208 LOG_ERROR("failed to get write pointer");
1209 break;
1210 }
1211
1212 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1213 (size_t)(buffer - buffer_orig), count, wp, rp);
1214
1215 if (wp == 0) {
1216 LOG_ERROR("flash read algorithm aborted by target");
1217 retval = ERROR_FLASH_OPERATION_FAILED;
1218 break;
1219 }
1220
1221 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1222 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1223 break;
1224 }
1225
1226 /* Count the number of bytes available in the fifo without
1227 * crossing the wrap around. */
1228 uint32_t thisrun_bytes;
1229 if (wp >= rp)
1230 thisrun_bytes = wp - rp;
1231 else
1232 thisrun_bytes = fifo_end_addr - rp;
1233
1234 if (thisrun_bytes == 0) {
1235 /* Throttle polling a bit if transfer is (much) faster than flash
1236 * reading. The exact delay shouldn't matter as long as it's
1237 * less than buffer size / flash speed. This is very unlikely to
1238 * run when using high latency connections such as USB. */
1239 alive_sleep(2);
1240
1241 /* to stop an infinite loop on some targets check and increment a timeout
1242 * this issue was observed on a stellaris using the new ICDI interface */
1243 if (timeout++ >= 2500) {
1244 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1245 return ERROR_FLASH_OPERATION_FAILED;
1246 }
1247 continue;
1248 }
1249
1250 /* Reset our timeout */
1251 timeout = 0;
1252
1253 /* Limit to the amount of data we actually want to read */
1254 if (thisrun_bytes > count * block_size)
1255 thisrun_bytes = count * block_size;
1256
1257 /* Force end of large blocks to be word aligned */
1258 if (thisrun_bytes >= 16)
1259 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1260
1261 /* Read data from fifo */
1262 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1263 if (retval != ERROR_OK)
1264 break;
1265
1266 /* Update counters and wrap write pointer */
1267 buffer += thisrun_bytes;
1268 count -= thisrun_bytes / block_size;
1269 rp += thisrun_bytes;
1270 if (rp >= fifo_end_addr)
1271 rp = fifo_start_addr;
1272
1273 /* Store updated write pointer to target */
1274 retval = target_write_u32(target, rp_addr, rp);
1275 if (retval != ERROR_OK)
1276 break;
1277
1278 /* Avoid GDB timeouts */
1279 keep_alive();
1280
1281 }
1282
1283 if (retval != ERROR_OK) {
1284 /* abort flash write algorithm on target */
1285 target_write_u32(target, rp_addr, 0);
1286 }
1287
1288 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1289 num_reg_params, reg_params,
1290 exit_point,
1291 10000,
1292 arch_info);
1293
1294 if (retval2 != ERROR_OK) {
1295 LOG_ERROR("error waiting for target flash write algorithm");
1296 retval = retval2;
1297 }
1298
1299 if (retval == ERROR_OK) {
1300 /* check if algorithm set wp = 0 after fifo writer loop finished */
1301 retval = target_read_u32(target, wp_addr, &wp);
1302 if (retval == ERROR_OK && wp == 0) {
1303 LOG_ERROR("flash read algorithm aborted by target");
1304 retval = ERROR_FLASH_OPERATION_FAILED;
1305 }
1306 }
1307
1308 return retval;
1309 }
1310
1311 int target_read_memory(struct target *target,
1312 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1313 {
1314 if (!target_was_examined(target)) {
1315 LOG_ERROR("Target not examined yet");
1316 return ERROR_FAIL;
1317 }
1318 if (!target->type->read_memory) {
1319 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1320 return ERROR_FAIL;
1321 }
1322 return target->type->read_memory(target, address, size, count, buffer);
1323 }
1324
1325 int target_read_phys_memory(struct target *target,
1326 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1327 {
1328 if (!target_was_examined(target)) {
1329 LOG_ERROR("Target not examined yet");
1330 return ERROR_FAIL;
1331 }
1332 if (!target->type->read_phys_memory) {
1333 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1334 return ERROR_FAIL;
1335 }
1336 return target->type->read_phys_memory(target, address, size, count, buffer);
1337 }
1338
1339 int target_write_memory(struct target *target,
1340 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1341 {
1342 if (!target_was_examined(target)) {
1343 LOG_ERROR("Target not examined yet");
1344 return ERROR_FAIL;
1345 }
1346 if (!target->type->write_memory) {
1347 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1348 return ERROR_FAIL;
1349 }
1350 return target->type->write_memory(target, address, size, count, buffer);
1351 }
1352
1353 int target_write_phys_memory(struct target *target,
1354 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1355 {
1356 if (!target_was_examined(target)) {
1357 LOG_ERROR("Target not examined yet");
1358 return ERROR_FAIL;
1359 }
1360 if (!target->type->write_phys_memory) {
1361 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1362 return ERROR_FAIL;
1363 }
1364 return target->type->write_phys_memory(target, address, size, count, buffer);
1365 }
1366
1367 int target_add_breakpoint(struct target *target,
1368 struct breakpoint *breakpoint)
1369 {
1370 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1371 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1372 return ERROR_TARGET_NOT_HALTED;
1373 }
1374 return target->type->add_breakpoint(target, breakpoint);
1375 }
1376
1377 int target_add_context_breakpoint(struct target *target,
1378 struct breakpoint *breakpoint)
1379 {
1380 if (target->state != TARGET_HALTED) {
1381 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1382 return ERROR_TARGET_NOT_HALTED;
1383 }
1384 return target->type->add_context_breakpoint(target, breakpoint);
1385 }
1386
1387 int target_add_hybrid_breakpoint(struct target *target,
1388 struct breakpoint *breakpoint)
1389 {
1390 if (target->state != TARGET_HALTED) {
1391 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1392 return ERROR_TARGET_NOT_HALTED;
1393 }
1394 return target->type->add_hybrid_breakpoint(target, breakpoint);
1395 }
1396
1397 int target_remove_breakpoint(struct target *target,
1398 struct breakpoint *breakpoint)
1399 {
1400 return target->type->remove_breakpoint(target, breakpoint);
1401 }
1402
1403 int target_add_watchpoint(struct target *target,
1404 struct watchpoint *watchpoint)
1405 {
1406 if (target->state != TARGET_HALTED) {
1407 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1408 return ERROR_TARGET_NOT_HALTED;
1409 }
1410 return target->type->add_watchpoint(target, watchpoint);
1411 }
1412 int target_remove_watchpoint(struct target *target,
1413 struct watchpoint *watchpoint)
1414 {
1415 return target->type->remove_watchpoint(target, watchpoint);
1416 }
1417 int target_hit_watchpoint(struct target *target,
1418 struct watchpoint **hit_watchpoint)
1419 {
1420 if (target->state != TARGET_HALTED) {
1421 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1422 return ERROR_TARGET_NOT_HALTED;
1423 }
1424
1425 if (!target->type->hit_watchpoint) {
1426 /* For backward compatible, if hit_watchpoint is not implemented,
1427 * return ERROR_FAIL such that gdb_server will not take the nonsense
1428 * information. */
1429 return ERROR_FAIL;
1430 }
1431
1432 return target->type->hit_watchpoint(target, hit_watchpoint);
1433 }
1434
1435 const char *target_get_gdb_arch(struct target *target)
1436 {
1437 if (!target->type->get_gdb_arch)
1438 return NULL;
1439 return target->type->get_gdb_arch(target);
1440 }
1441
1442 int target_get_gdb_reg_list(struct target *target,
1443 struct reg **reg_list[], int *reg_list_size,
1444 enum target_register_class reg_class)
1445 {
1446 int result = ERROR_FAIL;
1447
1448 if (!target_was_examined(target)) {
1449 LOG_ERROR("Target not examined yet");
1450 goto done;
1451 }
1452
1453 result = target->type->get_gdb_reg_list(target, reg_list,
1454 reg_list_size, reg_class);
1455
1456 done:
1457 if (result != ERROR_OK) {
1458 *reg_list = NULL;
1459 *reg_list_size = 0;
1460 }
1461 return result;
1462 }
1463
1464 int target_get_gdb_reg_list_noread(struct target *target,
1465 struct reg **reg_list[], int *reg_list_size,
1466 enum target_register_class reg_class)
1467 {
1468 if (target->type->get_gdb_reg_list_noread &&
1469 target->type->get_gdb_reg_list_noread(target, reg_list,
1470 reg_list_size, reg_class) == ERROR_OK)
1471 return ERROR_OK;
1472 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1473 }
1474
1475 bool target_supports_gdb_connection(struct target *target)
1476 {
1477 /*
1478 * exclude all the targets that don't provide get_gdb_reg_list
1479 * or that have explicit gdb_max_connection == 0
1480 */
1481 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1482 }
1483
1484 int target_step(struct target *target,
1485 int current, target_addr_t address, int handle_breakpoints)
1486 {
1487 int retval;
1488
1489 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1490
1491 retval = target->type->step(target, current, address, handle_breakpoints);
1492 if (retval != ERROR_OK)
1493 return retval;
1494
1495 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1496
1497 return retval;
1498 }
1499
1500 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1501 {
1502 if (target->state != TARGET_HALTED) {
1503 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1504 return ERROR_TARGET_NOT_HALTED;
1505 }
1506 return target->type->get_gdb_fileio_info(target, fileio_info);
1507 }
1508
1509 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1510 {
1511 if (target->state != TARGET_HALTED) {
1512 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1513 return ERROR_TARGET_NOT_HALTED;
1514 }
1515 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1516 }
1517
1518 target_addr_t target_address_max(struct target *target)
1519 {
1520 unsigned bits = target_address_bits(target);
1521 if (sizeof(target_addr_t) * 8 == bits)
1522 return (target_addr_t) -1;
1523 else
1524 return (((target_addr_t) 1) << bits) - 1;
1525 }
1526
1527 unsigned target_address_bits(struct target *target)
1528 {
1529 if (target->type->address_bits)
1530 return target->type->address_bits(target);
1531 return 32;
1532 }
1533
1534 unsigned int target_data_bits(struct target *target)
1535 {
1536 if (target->type->data_bits)
1537 return target->type->data_bits(target);
1538 return 32;
1539 }
1540
1541 static int target_profiling(struct target *target, uint32_t *samples,
1542 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1543 {
1544 return target->type->profiling(target, samples, max_num_samples,
1545 num_samples, seconds);
1546 }
1547
1548 static int handle_target(void *priv);
1549
1550 static int target_init_one(struct command_context *cmd_ctx,
1551 struct target *target)
1552 {
1553 target_reset_examined(target);
1554
1555 struct target_type *type = target->type;
1556 if (!type->examine)
1557 type->examine = default_examine;
1558
1559 if (!type->check_reset)
1560 type->check_reset = default_check_reset;
1561
1562 assert(type->init_target);
1563
1564 int retval = type->init_target(cmd_ctx, target);
1565 if (retval != ERROR_OK) {
1566 LOG_ERROR("target '%s' init failed", target_name(target));
1567 return retval;
1568 }
1569
1570 /* Sanity-check MMU support ... stub in what we must, to help
1571 * implement it in stages, but warn if we need to do so.
1572 */
1573 if (type->mmu) {
1574 if (!type->virt2phys) {
1575 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1576 type->virt2phys = identity_virt2phys;
1577 }
1578 } else {
1579 /* Make sure no-MMU targets all behave the same: make no
1580 * distinction between physical and virtual addresses, and
1581 * ensure that virt2phys() is always an identity mapping.
1582 */
1583 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1584 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1585
1586 type->mmu = no_mmu;
1587 type->write_phys_memory = type->write_memory;
1588 type->read_phys_memory = type->read_memory;
1589 type->virt2phys = identity_virt2phys;
1590 }
1591
1592 if (!target->type->read_buffer)
1593 target->type->read_buffer = target_read_buffer_default;
1594
1595 if (!target->type->write_buffer)
1596 target->type->write_buffer = target_write_buffer_default;
1597
1598 if (!target->type->get_gdb_fileio_info)
1599 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1600
1601 if (!target->type->gdb_fileio_end)
1602 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1603
1604 if (!target->type->profiling)
1605 target->type->profiling = target_profiling_default;
1606
1607 return ERROR_OK;
1608 }
1609
1610 static int target_init(struct command_context *cmd_ctx)
1611 {
1612 struct target *target;
1613 int retval;
1614
1615 for (target = all_targets; target; target = target->next) {
1616 retval = target_init_one(cmd_ctx, target);
1617 if (retval != ERROR_OK)
1618 return retval;
1619 }
1620
1621 if (!all_targets)
1622 return ERROR_OK;
1623
1624 retval = target_register_user_commands(cmd_ctx);
1625 if (retval != ERROR_OK)
1626 return retval;
1627
1628 retval = target_register_timer_callback(&handle_target,
1629 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1630 if (retval != ERROR_OK)
1631 return retval;
1632
1633 return ERROR_OK;
1634 }
1635
1636 COMMAND_HANDLER(handle_target_init_command)
1637 {
1638 int retval;
1639
1640 if (CMD_ARGC != 0)
1641 return ERROR_COMMAND_SYNTAX_ERROR;
1642
1643 static bool target_initialized;
1644 if (target_initialized) {
1645 LOG_INFO("'target init' has already been called");
1646 return ERROR_OK;
1647 }
1648 target_initialized = true;
1649
1650 retval = command_run_line(CMD_CTX, "init_targets");
1651 if (retval != ERROR_OK)
1652 return retval;
1653
1654 retval = command_run_line(CMD_CTX, "init_target_events");
1655 if (retval != ERROR_OK)
1656 return retval;
1657
1658 retval = command_run_line(CMD_CTX, "init_board");
1659 if (retval != ERROR_OK)
1660 return retval;
1661
1662 LOG_DEBUG("Initializing targets...");
1663 return target_init(CMD_CTX);
1664 }
1665
1666 int target_register_event_callback(int (*callback)(struct target *target,
1667 enum target_event event, void *priv), void *priv)
1668 {
1669 struct target_event_callback **callbacks_p = &target_event_callbacks;
1670
1671 if (!callback)
1672 return ERROR_COMMAND_SYNTAX_ERROR;
1673
1674 if (*callbacks_p) {
1675 while ((*callbacks_p)->next)
1676 callbacks_p = &((*callbacks_p)->next);
1677 callbacks_p = &((*callbacks_p)->next);
1678 }
1679
1680 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1681 (*callbacks_p)->callback = callback;
1682 (*callbacks_p)->priv = priv;
1683 (*callbacks_p)->next = NULL;
1684
1685 return ERROR_OK;
1686 }
1687
1688 int target_register_reset_callback(int (*callback)(struct target *target,
1689 enum target_reset_mode reset_mode, void *priv), void *priv)
1690 {
1691 struct target_reset_callback *entry;
1692
1693 if (!callback)
1694 return ERROR_COMMAND_SYNTAX_ERROR;
1695
1696 entry = malloc(sizeof(struct target_reset_callback));
1697 if (!entry) {
1698 LOG_ERROR("error allocating buffer for reset callback entry");
1699 return ERROR_COMMAND_SYNTAX_ERROR;
1700 }
1701
1702 entry->callback = callback;
1703 entry->priv = priv;
1704 list_add(&entry->list, &target_reset_callback_list);
1705
1706
1707 return ERROR_OK;
1708 }
1709
1710 int target_register_trace_callback(int (*callback)(struct target *target,
1711 size_t len, uint8_t *data, void *priv), void *priv)
1712 {
1713 struct target_trace_callback *entry;
1714
1715 if (!callback)
1716 return ERROR_COMMAND_SYNTAX_ERROR;
1717
1718 entry = malloc(sizeof(struct target_trace_callback));
1719 if (!entry) {
1720 LOG_ERROR("error allocating buffer for trace callback entry");
1721 return ERROR_COMMAND_SYNTAX_ERROR;
1722 }
1723
1724 entry->callback = callback;
1725 entry->priv = priv;
1726 list_add(&entry->list, &target_trace_callback_list);
1727
1728
1729 return ERROR_OK;
1730 }
1731
1732 int target_register_timer_callback(int (*callback)(void *priv),
1733 unsigned int time_ms, enum target_timer_type type, void *priv)
1734 {
1735 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1736
1737 if (!callback)
1738 return ERROR_COMMAND_SYNTAX_ERROR;
1739
1740 if (*callbacks_p) {
1741 while ((*callbacks_p)->next)
1742 callbacks_p = &((*callbacks_p)->next);
1743 callbacks_p = &((*callbacks_p)->next);
1744 }
1745
1746 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1747 (*callbacks_p)->callback = callback;
1748 (*callbacks_p)->type = type;
1749 (*callbacks_p)->time_ms = time_ms;
1750 (*callbacks_p)->removed = false;
1751
1752 (*callbacks_p)->when = timeval_ms() + time_ms;
1753 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1754
1755 (*callbacks_p)->priv = priv;
1756 (*callbacks_p)->next = NULL;
1757
1758 return ERROR_OK;
1759 }
1760
1761 int target_unregister_event_callback(int (*callback)(struct target *target,
1762 enum target_event event, void *priv), void *priv)
1763 {
1764 struct target_event_callback **p = &target_event_callbacks;
1765 struct target_event_callback *c = target_event_callbacks;
1766
1767 if (!callback)
1768 return ERROR_COMMAND_SYNTAX_ERROR;
1769
1770 while (c) {
1771 struct target_event_callback *next = c->next;
1772 if ((c->callback == callback) && (c->priv == priv)) {
1773 *p = next;
1774 free(c);
1775 return ERROR_OK;
1776 } else
1777 p = &(c->next);
1778 c = next;
1779 }
1780
1781 return ERROR_OK;
1782 }
1783
1784 int target_unregister_reset_callback(int (*callback)(struct target *target,
1785 enum target_reset_mode reset_mode, void *priv), void *priv)
1786 {
1787 struct target_reset_callback *entry;
1788
1789 if (!callback)
1790 return ERROR_COMMAND_SYNTAX_ERROR;
1791
1792 list_for_each_entry(entry, &target_reset_callback_list, list) {
1793 if (entry->callback == callback && entry->priv == priv) {
1794 list_del(&entry->list);
1795 free(entry);
1796 break;
1797 }
1798 }
1799
1800 return ERROR_OK;
1801 }
1802
1803 int target_unregister_trace_callback(int (*callback)(struct target *target,
1804 size_t len, uint8_t *data, void *priv), void *priv)
1805 {
1806 struct target_trace_callback *entry;
1807
1808 if (!callback)
1809 return ERROR_COMMAND_SYNTAX_ERROR;
1810
1811 list_for_each_entry(entry, &target_trace_callback_list, list) {
1812 if (entry->callback == callback && entry->priv == priv) {
1813 list_del(&entry->list);
1814 free(entry);
1815 break;
1816 }
1817 }
1818
1819 return ERROR_OK;
1820 }
1821
1822 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1823 {
1824 if (!callback)
1825 return ERROR_COMMAND_SYNTAX_ERROR;
1826
1827 for (struct target_timer_callback *c = target_timer_callbacks;
1828 c; c = c->next) {
1829 if ((c->callback == callback) && (c->priv == priv)) {
1830 c->removed = true;
1831 return ERROR_OK;
1832 }
1833 }
1834
1835 return ERROR_FAIL;
1836 }
1837
1838 int target_call_event_callbacks(struct target *target, enum target_event event)
1839 {
1840 struct target_event_callback *callback = target_event_callbacks;
1841 struct target_event_callback *next_callback;
1842
1843 if (event == TARGET_EVENT_HALTED) {
1844 /* execute early halted first */
1845 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1846 }
1847
1848 LOG_DEBUG("target event %i (%s) for core %s", event,
1849 target_event_name(event),
1850 target_name(target));
1851
1852 target_handle_event(target, event);
1853
1854 while (callback) {
1855 next_callback = callback->next;
1856 callback->callback(target, event, callback->priv);
1857 callback = next_callback;
1858 }
1859
1860 return ERROR_OK;
1861 }
1862
1863 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1864 {
1865 struct target_reset_callback *callback;
1866
1867 LOG_DEBUG("target reset %i (%s)", reset_mode,
1868 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1869
1870 list_for_each_entry(callback, &target_reset_callback_list, list)
1871 callback->callback(target, reset_mode, callback->priv);
1872
1873 return ERROR_OK;
1874 }
1875
1876 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1877 {
1878 struct target_trace_callback *callback;
1879
1880 list_for_each_entry(callback, &target_trace_callback_list, list)
1881 callback->callback(target, len, data, callback->priv);
1882
1883 return ERROR_OK;
1884 }
1885
1886 static int target_timer_callback_periodic_restart(
1887 struct target_timer_callback *cb, int64_t *now)
1888 {
1889 cb->when = *now + cb->time_ms;
1890 return ERROR_OK;
1891 }
1892
1893 static int target_call_timer_callback(struct target_timer_callback *cb,
1894 int64_t *now)
1895 {
1896 cb->callback(cb->priv);
1897
1898 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1899 return target_timer_callback_periodic_restart(cb, now);
1900
1901 return target_unregister_timer_callback(cb->callback, cb->priv);
1902 }
1903
1904 static int target_call_timer_callbacks_check_time(int checktime)
1905 {
1906 static bool callback_processing;
1907
1908 /* Do not allow nesting */
1909 if (callback_processing)
1910 return ERROR_OK;
1911
1912 callback_processing = true;
1913
1914 keep_alive();
1915
1916 int64_t now = timeval_ms();
1917
1918 /* Initialize to a default value that's a ways into the future.
1919 * The loop below will make it closer to now if there are
1920 * callbacks that want to be called sooner. */
1921 target_timer_next_event_value = now + 1000;
1922
1923 /* Store an address of the place containing a pointer to the
1924 * next item; initially, that's a standalone "root of the
1925 * list" variable. */
1926 struct target_timer_callback **callback = &target_timer_callbacks;
1927 while (callback && *callback) {
1928 if ((*callback)->removed) {
1929 struct target_timer_callback *p = *callback;
1930 *callback = (*callback)->next;
1931 free(p);
1932 continue;
1933 }
1934
1935 bool call_it = (*callback)->callback &&
1936 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1937 now >= (*callback)->when);
1938
1939 if (call_it)
1940 target_call_timer_callback(*callback, &now);
1941
1942 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1943 target_timer_next_event_value = (*callback)->when;
1944
1945 callback = &(*callback)->next;
1946 }
1947
1948 callback_processing = false;
1949 return ERROR_OK;
1950 }
1951
1952 int target_call_timer_callbacks()
1953 {
1954 return target_call_timer_callbacks_check_time(1);
1955 }
1956
1957 /* invoke periodic callbacks immediately */
1958 int target_call_timer_callbacks_now()
1959 {
1960 return target_call_timer_callbacks_check_time(0);
1961 }
1962
1963 int64_t target_timer_next_event(void)
1964 {
1965 return target_timer_next_event_value;
1966 }
1967
1968 /* Prints the working area layout for debug purposes */
1969 static void print_wa_layout(struct target *target)
1970 {
1971 struct working_area *c = target->working_areas;
1972
1973 while (c) {
1974 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1975 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1976 c->address, c->address + c->size - 1, c->size);
1977 c = c->next;
1978 }
1979 }
1980
1981 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1982 static void target_split_working_area(struct working_area *area, uint32_t size)
1983 {
1984 assert(area->free); /* Shouldn't split an allocated area */
1985 assert(size <= area->size); /* Caller should guarantee this */
1986
1987 /* Split only if not already the right size */
1988 if (size < area->size) {
1989 struct working_area *new_wa = malloc(sizeof(*new_wa));
1990
1991 if (!new_wa)
1992 return;
1993
1994 new_wa->next = area->next;
1995 new_wa->size = area->size - size;
1996 new_wa->address = area->address + size;
1997 new_wa->backup = NULL;
1998 new_wa->user = NULL;
1999 new_wa->free = true;
2000
2001 area->next = new_wa;
2002 area->size = size;
2003
2004 /* If backup memory was allocated to this area, it has the wrong size
2005 * now so free it and it will be reallocated if/when needed */
2006 free(area->backup);
2007 area->backup = NULL;
2008 }
2009 }
2010
2011 /* Merge all adjacent free areas into one */
2012 static void target_merge_working_areas(struct target *target)
2013 {
2014 struct working_area *c = target->working_areas;
2015
2016 while (c && c->next) {
2017 assert(c->next->address == c->address + c->size); /* This is an invariant */
2018
2019 /* Find two adjacent free areas */
2020 if (c->free && c->next->free) {
2021 /* Merge the last into the first */
2022 c->size += c->next->size;
2023
2024 /* Remove the last */
2025 struct working_area *to_be_freed = c->next;
2026 c->next = c->next->next;
2027 free(to_be_freed->backup);
2028 free(to_be_freed);
2029
2030 /* If backup memory was allocated to the remaining area, it's has
2031 * the wrong size now */
2032 free(c->backup);
2033 c->backup = NULL;
2034 } else {
2035 c = c->next;
2036 }
2037 }
2038 }
2039
2040 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2041 {
2042 /* Reevaluate working area address based on MMU state*/
2043 if (!target->working_areas) {
2044 int retval;
2045 int enabled;
2046
2047 retval = target->type->mmu(target, &enabled);
2048 if (retval != ERROR_OK)
2049 return retval;
2050
2051 if (!enabled) {
2052 if (target->working_area_phys_spec) {
2053 LOG_DEBUG("MMU disabled, using physical "
2054 "address for working memory " TARGET_ADDR_FMT,
2055 target->working_area_phys);
2056 target->working_area = target->working_area_phys;
2057 } else {
2058 LOG_ERROR("No working memory available. "
2059 "Specify -work-area-phys to target.");
2060 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2061 }
2062 } else {
2063 if (target->working_area_virt_spec) {
2064 LOG_DEBUG("MMU enabled, using virtual "
2065 "address for working memory " TARGET_ADDR_FMT,
2066 target->working_area_virt);
2067 target->working_area = target->working_area_virt;
2068 } else {
2069 LOG_ERROR("No working memory available. "
2070 "Specify -work-area-virt to target.");
2071 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2072 }
2073 }
2074
2075 /* Set up initial working area on first call */
2076 struct working_area *new_wa = malloc(sizeof(*new_wa));
2077 if (new_wa) {
2078 new_wa->next = NULL;
2079 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2080 new_wa->address = target->working_area;
2081 new_wa->backup = NULL;
2082 new_wa->user = NULL;
2083 new_wa->free = true;
2084 }
2085
2086 target->working_areas = new_wa;
2087 }
2088
2089 /* only allocate multiples of 4 byte */
2090 if (size % 4)
2091 size = (size + 3) & (~3UL);
2092
2093 struct working_area *c = target->working_areas;
2094
2095 /* Find the first large enough working area */
2096 while (c) {
2097 if (c->free && c->size >= size)
2098 break;
2099 c = c->next;
2100 }
2101
2102 if (!c)
2103 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2104
2105 /* Split the working area into the requested size */
2106 target_split_working_area(c, size);
2107
2108 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2109 size, c->address);
2110
2111 if (target->backup_working_area) {
2112 if (!c->backup) {
2113 c->backup = malloc(c->size);
2114 if (!c->backup)
2115 return ERROR_FAIL;
2116 }
2117
2118 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2119 if (retval != ERROR_OK)
2120 return retval;
2121 }
2122
2123 /* mark as used, and return the new (reused) area */
2124 c->free = false;
2125 *area = c;
2126
2127 /* user pointer */
2128 c->user = area;
2129
2130 print_wa_layout(target);
2131
2132 return ERROR_OK;
2133 }
2134
2135 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2136 {
2137 int retval;
2138
2139 retval = target_alloc_working_area_try(target, size, area);
2140 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2141 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2142 return retval;
2143
2144 }
2145
2146 static int target_restore_working_area(struct target *target, struct working_area *area)
2147 {
2148 int retval = ERROR_OK;
2149
2150 if (target->backup_working_area && area->backup) {
2151 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2152 if (retval != ERROR_OK)
2153 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2154 area->size, area->address);
2155 }
2156
2157 return retval;
2158 }
2159
2160 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2161 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2162 {
2163 if (!area || area->free)
2164 return ERROR_OK;
2165
2166 int retval = ERROR_OK;
2167 if (restore) {
2168 retval = target_restore_working_area(target, area);
2169 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2170 if (retval != ERROR_OK)
2171 return retval;
2172 }
2173
2174 area->free = true;
2175
2176 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2177 area->size, area->address);
2178
2179 /* mark user pointer invalid */
2180 /* TODO: Is this really safe? It points to some previous caller's memory.
2181 * How could we know that the area pointer is still in that place and not
2182 * some other vital data? What's the purpose of this, anyway? */
2183 *area->user = NULL;
2184 area->user = NULL;
2185
2186 target_merge_working_areas(target);
2187
2188 print_wa_layout(target);
2189
2190 return retval;
2191 }
2192
2193 int target_free_working_area(struct target *target, struct working_area *area)
2194 {
2195 return target_free_working_area_restore(target, area, 1);
2196 }
2197
2198 /* free resources and restore memory, if restoring memory fails,
2199 * free up resources anyway
2200 */
2201 static void target_free_all_working_areas_restore(struct target *target, int restore)
2202 {
2203 struct working_area *c = target->working_areas;
2204
2205 LOG_DEBUG("freeing all working areas");
2206
2207 /* Loop through all areas, restoring the allocated ones and marking them as free */
2208 while (c) {
2209 if (!c->free) {
2210 if (restore)
2211 target_restore_working_area(target, c);
2212 c->free = true;
2213 *c->user = NULL; /* Same as above */
2214 c->user = NULL;
2215 }
2216 c = c->next;
2217 }
2218
2219 /* Run a merge pass to combine all areas into one */
2220 target_merge_working_areas(target);
2221
2222 print_wa_layout(target);
2223 }
2224
2225 void target_free_all_working_areas(struct target *target)
2226 {
2227 target_free_all_working_areas_restore(target, 1);
2228
2229 /* Now we have none or only one working area marked as free */
2230 if (target->working_areas) {
2231 /* Free the last one to allow on-the-fly moving and resizing */
2232 free(target->working_areas->backup);
2233 free(target->working_areas);
2234 target->working_areas = NULL;
2235 }
2236 }
2237
2238 /* Find the largest number of bytes that can be allocated */
2239 uint32_t target_get_working_area_avail(struct target *target)
2240 {
2241 struct working_area *c = target->working_areas;
2242 uint32_t max_size = 0;
2243
2244 if (!c)
2245 return target->working_area_size;
2246
2247 while (c) {
2248 if (c->free && max_size < c->size)
2249 max_size = c->size;
2250
2251 c = c->next;
2252 }
2253
2254 return max_size;
2255 }
2256
2257 static void target_destroy(struct target *target)
2258 {
2259 if (target->type->deinit_target)
2260 target->type->deinit_target(target);
2261
2262 if (target->semihosting)
2263 free(target->semihosting->basedir);
2264 free(target->semihosting);
2265
2266 jtag_unregister_event_callback(jtag_enable_callback, target);
2267
2268 struct target_event_action *teap = target->event_action;
2269 while (teap) {
2270 struct target_event_action *next = teap->next;
2271 Jim_DecrRefCount(teap->interp, teap->body);
2272 free(teap);
2273 teap = next;
2274 }
2275
2276 target_free_all_working_areas(target);
2277
2278 /* release the targets SMP list */
2279 if (target->smp) {
2280 struct target_list *head, *tmp;
2281
2282 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2283 list_del(&head->lh);
2284 head->target->smp = 0;
2285 free(head);
2286 }
2287 if (target->smp_targets != &empty_smp_targets)
2288 free(target->smp_targets);
2289 target->smp = 0;
2290 }
2291
2292 rtos_destroy(target);
2293
2294 free(target->gdb_port_override);
2295 free(target->type);
2296 free(target->trace_info);
2297 free(target->fileio_info);
2298 free(target->cmd_name);
2299 free(target);
2300 }
2301
2302 void target_quit(void)
2303 {
2304 struct target_event_callback *pe = target_event_callbacks;
2305 while (pe) {
2306 struct target_event_callback *t = pe->next;
2307 free(pe);
2308 pe = t;
2309 }
2310 target_event_callbacks = NULL;
2311
2312 struct target_timer_callback *pt = target_timer_callbacks;
2313 while (pt) {
2314 struct target_timer_callback *t = pt->next;
2315 free(pt);
2316 pt = t;
2317 }
2318 target_timer_callbacks = NULL;
2319
2320 for (struct target *target = all_targets; target;) {
2321 struct target *tmp;
2322
2323 tmp = target->next;
2324 target_destroy(target);
2325 target = tmp;
2326 }
2327
2328 all_targets = NULL;
2329 }
2330
2331 int target_arch_state(struct target *target)
2332 {
2333 int retval;
2334 if (!target) {
2335 LOG_WARNING("No target has been configured");
2336 return ERROR_OK;
2337 }
2338
2339 if (target->state != TARGET_HALTED)
2340 return ERROR_OK;
2341
2342 retval = target->type->arch_state(target);
2343 return retval;
2344 }
2345
2346 static int target_get_gdb_fileio_info_default(struct target *target,
2347 struct gdb_fileio_info *fileio_info)
2348 {
2349 /* If target does not support semi-hosting function, target
2350 has no need to provide .get_gdb_fileio_info callback.
2351 It just return ERROR_FAIL and gdb_server will return "Txx"
2352 as target halted every time. */
2353 return ERROR_FAIL;
2354 }
2355
2356 static int target_gdb_fileio_end_default(struct target *target,
2357 int retcode, int fileio_errno, bool ctrl_c)
2358 {
2359 return ERROR_OK;
2360 }
2361
2362 int target_profiling_default(struct target *target, uint32_t *samples,
2363 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2364 {
2365 struct timeval timeout, now;
2366
2367 gettimeofday(&timeout, NULL);
2368 timeval_add_time(&timeout, seconds, 0);
2369
2370 LOG_INFO("Starting profiling. Halting and resuming the"
2371 " target as often as we can...");
2372
2373 uint32_t sample_count = 0;
2374 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2375 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2376
2377 int retval = ERROR_OK;
2378 for (;;) {
2379 target_poll(target);
2380 if (target->state == TARGET_HALTED) {
2381 uint32_t t = buf_get_u32(reg->value, 0, 32);
2382 samples[sample_count++] = t;
2383 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2384 retval = target_resume(target, 1, 0, 0, 0);
2385 target_poll(target);
2386 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2387 } else if (target->state == TARGET_RUNNING) {
2388 /* We want to quickly sample the PC. */
2389 retval = target_halt(target);
2390 } else {
2391 LOG_INFO("Target not halted or running");
2392 retval = ERROR_OK;
2393 break;
2394 }
2395
2396 if (retval != ERROR_OK)
2397 break;
2398
2399 gettimeofday(&now, NULL);
2400 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2401 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2402 break;
2403 }
2404 }
2405
2406 *num_samples = sample_count;
2407 return retval;
2408 }
2409
2410 /* Single aligned words are guaranteed to use 16 or 32 bit access
2411 * mode respectively, otherwise data is handled as quickly as
2412 * possible
2413 */
2414 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2415 {
2416 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2417 size, address);
2418
2419 if (!target_was_examined(target)) {
2420 LOG_ERROR("Target not examined yet");
2421 return ERROR_FAIL;
2422 }
2423
2424 if (size == 0)
2425 return ERROR_OK;
2426
2427 if ((address + size - 1) < address) {
2428 /* GDB can request this when e.g. PC is 0xfffffffc */
2429 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2430 address,
2431 size);
2432 return ERROR_FAIL;
2433 }
2434
2435 return target->type->write_buffer(target, address, size, buffer);
2436 }
2437
2438 static int target_write_buffer_default(struct target *target,
2439 target_addr_t address, uint32_t count, const uint8_t *buffer)
2440 {
2441 uint32_t size;
2442 unsigned int data_bytes = target_data_bits(target) / 8;
2443
2444 /* Align up to maximum bytes. The loop condition makes sure the next pass
2445 * will have something to do with the size we leave to it. */
2446 for (size = 1;
2447 size < data_bytes && count >= size * 2 + (address & size);
2448 size *= 2) {
2449 if (address & size) {
2450 int retval = target_write_memory(target, address, size, 1, buffer);
2451 if (retval != ERROR_OK)
2452 return retval;
2453 address += size;
2454 count -= size;
2455 buffer += size;
2456 }
2457 }
2458
2459 /* Write the data with as large access size as possible. */
2460 for (; size > 0; size /= 2) {
2461 uint32_t aligned = count - count % size;
2462 if (aligned > 0) {
2463 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2464 if (retval != ERROR_OK)
2465 return retval;
2466 address += aligned;
2467 count -= aligned;
2468 buffer += aligned;
2469 }
2470 }
2471
2472 return ERROR_OK;
2473 }
2474
2475 /* Single aligned words are guaranteed to use 16 or 32 bit access
2476 * mode respectively, otherwise data is handled as quickly as
2477 * possible
2478 */
2479 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2480 {
2481 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2482 size, address);
2483
2484 if (!target_was_examined(target)) {
2485 LOG_ERROR("Target not examined yet");
2486 return ERROR_FAIL;
2487 }
2488
2489 if (size == 0)
2490 return ERROR_OK;
2491
2492 if ((address + size - 1) < address) {
2493 /* GDB can request this when e.g. PC is 0xfffffffc */
2494 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2495 address,
2496 size);
2497 return ERROR_FAIL;
2498 }
2499
2500 return target->type->read_buffer(target, address, size, buffer);
2501 }
2502
2503 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2504 {
2505 uint32_t size;
2506 unsigned int data_bytes = target_data_bits(target) / 8;
2507
2508 /* Align up to maximum bytes. The loop condition makes sure the next pass
2509 * will have something to do with the size we leave to it. */
2510 for (size = 1;
2511 size < data_bytes && count >= size * 2 + (address & size);
2512 size *= 2) {
2513 if (address & size) {
2514 int retval = target_read_memory(target, address, size, 1, buffer);
2515 if (retval != ERROR_OK)
2516 return retval;
2517 address += size;
2518 count -= size;
2519 buffer += size;
2520 }
2521 }
2522
2523 /* Read the data with as large access size as possible. */
2524 for (; size > 0; size /= 2) {
2525 uint32_t aligned = count - count % size;
2526 if (aligned > 0) {
2527 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2528 if (retval != ERROR_OK)
2529 return retval;
2530 address += aligned;
2531 count -= aligned;
2532 buffer += aligned;
2533 }
2534 }
2535
2536 return ERROR_OK;
2537 }
2538
2539 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2540 {
2541 uint8_t *buffer;
2542 int retval;
2543 uint32_t i;
2544 uint32_t checksum = 0;
2545 if (!target_was_examined(target)) {
2546 LOG_ERROR("Target not examined yet");
2547 return ERROR_FAIL;
2548 }
2549 if (!target->type->checksum_memory) {
2550 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2551 return ERROR_FAIL;
2552 }
2553
2554 retval = target->type->checksum_memory(target, address, size, &checksum);
2555 if (retval != ERROR_OK) {
2556 buffer = malloc(size);
2557 if (!buffer) {
2558 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2559 return ERROR_COMMAND_SYNTAX_ERROR;
2560 }
2561 retval = target_read_buffer(target, address, size, buffer);
2562 if (retval != ERROR_OK) {
2563 free(buffer);
2564 return retval;
2565 }
2566
2567 /* convert to target endianness */
2568 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2569 uint32_t target_data;
2570 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2571 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2572 }
2573
2574 retval = image_calculate_checksum(buffer, size, &checksum);
2575 free(buffer);
2576 }
2577
2578 *crc = checksum;
2579
2580 return retval;
2581 }
2582
2583 int target_blank_check_memory(struct target *target,
2584 struct target_memory_check_block *blocks, int num_blocks,
2585 uint8_t erased_value)
2586 {
2587 if (!target_was_examined(target)) {
2588 LOG_ERROR("Target not examined yet");
2589 return ERROR_FAIL;
2590 }
2591
2592 if (!target->type->blank_check_memory)
2593 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2594
2595 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2596 }
2597
2598 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2599 {
2600 uint8_t value_buf[8];
2601 if (!target_was_examined(target)) {
2602 LOG_ERROR("Target not examined yet");
2603 return ERROR_FAIL;
2604 }
2605
2606 int retval = target_read_memory(target, address, 8, 1, value_buf);
2607
2608 if (retval == ERROR_OK) {
2609 *value = target_buffer_get_u64(target, value_buf);
2610 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2611 address,
2612 *value);
2613 } else {
2614 *value = 0x0;
2615 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2616 address);
2617 }
2618
2619 return retval;
2620 }
2621
2622 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2623 {
2624 uint8_t value_buf[4];
2625 if (!target_was_examined(target)) {
2626 LOG_ERROR("Target not examined yet");
2627 return ERROR_FAIL;
2628 }
2629
2630 int retval = target_read_memory(target, address, 4, 1, value_buf);
2631
2632 if (retval == ERROR_OK) {
2633 *value = target_buffer_get_u32(target, value_buf);
2634 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2635 address,
2636 *value);
2637 } else {
2638 *value = 0x0;
2639 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2640 address);
2641 }
2642
2643 return retval;
2644 }
2645
2646 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2647 {
2648 uint8_t value_buf[2];
2649 if (!target_was_examined(target)) {
2650 LOG_ERROR("Target not examined yet");
2651 return ERROR_FAIL;
2652 }
2653
2654 int retval = target_read_memory(target, address, 2, 1, value_buf);
2655
2656 if (retval == ERROR_OK) {
2657 *value = target_buffer_get_u16(target, value_buf);
2658 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2659 address,
2660 *value);
2661 } else {
2662 *value = 0x0;
2663 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2664 address);
2665 }
2666
2667 return retval;
2668 }
2669
2670 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2671 {
2672 if (!target_was_examined(target)) {
2673 LOG_ERROR("Target not examined yet");
2674 return ERROR_FAIL;
2675 }
2676
2677 int retval = target_read_memory(target, address, 1, 1, value);
2678
2679 if (retval == ERROR_OK) {
2680 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2681 address,
2682 *value);
2683 } else {
2684 *value = 0x0;
2685 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2686 address);
2687 }
2688
2689 return retval;
2690 }
2691
2692 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2693 {
2694 int retval;
2695 uint8_t value_buf[8];
2696 if (!target_was_examined(target)) {
2697 LOG_ERROR("Target not examined yet");
2698 return ERROR_FAIL;
2699 }
2700
2701 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2702 address,
2703 value);
2704
2705 target_buffer_set_u64(target, value_buf, value);
2706 retval = target_write_memory(target, address, 8, 1, value_buf);
2707 if (retval != ERROR_OK)
2708 LOG_DEBUG("failed: %i", retval);
2709
2710 return retval;
2711 }
2712
2713 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2714 {
2715 int retval;
2716 uint8_t value_buf[4];
2717 if (!target_was_examined(target)) {
2718 LOG_ERROR("Target not examined yet");
2719 return ERROR_FAIL;
2720 }
2721
2722 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2723 address,
2724 value);
2725
2726 target_buffer_set_u32(target, value_buf, value);
2727 retval = target_write_memory(target, address, 4, 1, value_buf);
2728 if (retval != ERROR_OK)
2729 LOG_DEBUG("failed: %i", retval);
2730
2731 return retval;
2732 }
2733
2734 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2735 {
2736 int retval;
2737 uint8_t value_buf[2];
2738 if (!target_was_examined(target)) {
2739 LOG_ERROR("Target not examined yet");
2740 return ERROR_FAIL;
2741 }
2742
2743 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2744 address,
2745 value);
2746
2747 target_buffer_set_u16(target, value_buf, value);
2748 retval = target_write_memory(target, address, 2, 1, value_buf);
2749 if (retval != ERROR_OK)
2750 LOG_DEBUG("failed: %i", retval);
2751
2752 return retval;
2753 }
2754
2755 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2756 {
2757 int retval;
2758 if (!target_was_examined(target)) {
2759 LOG_ERROR("Target not examined yet");
2760 return ERROR_FAIL;
2761 }
2762
2763 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2764 address, value);
2765
2766 retval = target_write_memory(target, address, 1, 1, &value);
2767 if (retval != ERROR_OK)
2768 LOG_DEBUG("failed: %i", retval);
2769
2770 return retval;
2771 }
2772
2773 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2774 {
2775 int retval;
2776 uint8_t value_buf[8];
2777 if (!target_was_examined(target)) {
2778 LOG_ERROR("Target not examined yet");
2779 return ERROR_FAIL;
2780 }
2781
2782 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2783 address,
2784 value);
2785
2786 target_buffer_set_u64(target, value_buf, value);
2787 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2788 if (retval != ERROR_OK)
2789 LOG_DEBUG("failed: %i", retval);
2790
2791 return retval;
2792 }
2793
2794 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2795 {
2796 int retval;
2797 uint8_t value_buf[4];
2798 if (!target_was_examined(target)) {
2799 LOG_ERROR("Target not examined yet");
2800 return ERROR_FAIL;
2801 }
2802
2803 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2804 address,
2805 value);
2806
2807 target_buffer_set_u32(target, value_buf, value);
2808 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2809 if (retval != ERROR_OK)
2810 LOG_DEBUG("failed: %i", retval);
2811
2812 return retval;
2813 }
2814
2815 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2816 {
2817 int retval;
2818 uint8_t value_buf[2];
2819 if (!target_was_examined(target)) {
2820 LOG_ERROR("Target not examined yet");
2821 return ERROR_FAIL;
2822 }
2823
2824 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2825 address,
2826 value);
2827
2828 target_buffer_set_u16(target, value_buf, value);
2829 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2830 if (retval != ERROR_OK)
2831 LOG_DEBUG("failed: %i", retval);
2832
2833 return retval;
2834 }
2835
2836 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2837 {
2838 int retval;
2839 if (!target_was_examined(target)) {
2840 LOG_ERROR("Target not examined yet");
2841 return ERROR_FAIL;
2842 }
2843
2844 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2845 address, value);
2846
2847 retval = target_write_phys_memory(target, address, 1, 1, &value);
2848 if (retval != ERROR_OK)
2849 LOG_DEBUG("failed: %i", retval);
2850
2851 return retval;
2852 }
2853
2854 static int find_target(struct command_invocation *cmd, const char *name)
2855 {
2856 struct target *target = get_target(name);
2857 if (!target) {
2858 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2859 return ERROR_FAIL;
2860 }
2861 if (!target->tap->enabled) {
2862 command_print(cmd, "Target: TAP %s is disabled, "
2863 "can't be the current target\n",
2864 target->tap->dotted_name);
2865 return ERROR_FAIL;
2866 }
2867
2868 cmd->ctx->current_target = target;
2869 if (cmd->ctx->current_target_override)
2870 cmd->ctx->current_target_override = target;
2871
2872 return ERROR_OK;
2873 }
2874
2875
2876 COMMAND_HANDLER(handle_targets_command)
2877 {
2878 int retval = ERROR_OK;
2879 if (CMD_ARGC == 1) {
2880 retval = find_target(CMD, CMD_ARGV[0]);
2881 if (retval == ERROR_OK) {
2882 /* we're done! */
2883 return retval;
2884 }
2885 }
2886
2887 struct target *target = all_targets;
2888 command_print(CMD, " TargetName Type Endian TapName State ");
2889 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2890 while (target) {
2891 const char *state;
2892 char marker = ' ';
2893
2894 if (target->tap->enabled)
2895 state = target_state_name(target);
2896 else
2897 state = "tap-disabled";
2898
2899 if (CMD_CTX->current_target == target)
2900 marker = '*';
2901
2902 /* keep columns lined up to match the headers above */
2903 command_print(CMD,
2904 "%2d%c %-18s %-10s %-6s %-18s %s",
2905 target->target_number,
2906 marker,
2907 target_name(target),
2908 target_type_name(target),
2909 jim_nvp_value2name_simple(nvp_target_endian,
2910 target->endianness)->name,
2911 target->tap->dotted_name,
2912 state);
2913 target = target->next;
2914 }
2915
2916 return retval;
2917 }
2918
2919 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2920
2921 static int power_dropout;
2922 static int srst_asserted;
2923
2924 static int run_power_restore;
2925 static int run_power_dropout;
2926 static int run_srst_asserted;
2927 static int run_srst_deasserted;
2928
2929 static int sense_handler(void)
2930 {
2931 static int prev_srst_asserted;
2932 static int prev_power_dropout;
2933
2934 int retval = jtag_power_dropout(&power_dropout);
2935 if (retval != ERROR_OK)
2936 return retval;
2937
2938 int power_restored;
2939 power_restored = prev_power_dropout && !power_dropout;
2940 if (power_restored)
2941 run_power_restore = 1;
2942
2943 int64_t current = timeval_ms();
2944 static int64_t last_power;
2945 bool wait_more = last_power + 2000 > current;
2946 if (power_dropout && !wait_more) {
2947 run_power_dropout = 1;
2948 last_power = current;
2949 }
2950
2951 retval = jtag_srst_asserted(&srst_asserted);
2952 if (retval != ERROR_OK)
2953 return retval;
2954
2955 int srst_deasserted;
2956 srst_deasserted = prev_srst_asserted && !srst_asserted;
2957
2958 static int64_t last_srst;
2959 wait_more = last_srst + 2000 > current;
2960 if (srst_deasserted && !wait_more) {
2961 run_srst_deasserted = 1;
2962 last_srst = current;
2963 }
2964
2965 if (!prev_srst_asserted && srst_asserted)
2966 run_srst_asserted = 1;
2967
2968 prev_srst_asserted = srst_asserted;
2969 prev_power_dropout = power_dropout;
2970
2971 if (srst_deasserted || power_restored) {
2972 /* Other than logging the event we can't do anything here.
2973 * Issuing a reset is a particularly bad idea as we might
2974 * be inside a reset already.
2975 */
2976 }
2977
2978 return ERROR_OK;
2979 }
2980
2981 /* process target state changes */
2982 static int handle_target(void *priv)
2983 {
2984 Jim_Interp *interp = (Jim_Interp *)priv;
2985 int retval = ERROR_OK;
2986
2987 if (!is_jtag_poll_safe()) {
2988 /* polling is disabled currently */
2989 return ERROR_OK;
2990 }
2991
2992 /* we do not want to recurse here... */
2993 static int recursive;
2994 if (!recursive) {
2995 recursive = 1;
2996 sense_handler();
2997 /* danger! running these procedures can trigger srst assertions and power dropouts.
2998 * We need to avoid an infinite loop/recursion here and we do that by
2999 * clearing the flags after running these events.
3000 */
3001 int did_something = 0;
3002 if (run_srst_asserted) {
3003 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3004 Jim_Eval(interp, "srst_asserted");
3005 did_something = 1;
3006 }
3007 if (run_srst_deasserted) {
3008 Jim_Eval(interp, "srst_deasserted");
3009 did_something = 1;
3010 }
3011 if (run_power_dropout) {
3012 LOG_INFO("Power dropout detected, running power_dropout proc.");
3013 Jim_Eval(interp, "power_dropout");
3014 did_something = 1;
3015 }
3016 if (run_power_restore) {
3017 Jim_Eval(interp, "power_restore");
3018 did_something = 1;
3019 }
3020
3021 if (did_something) {
3022 /* clear detect flags */
3023 sense_handler();
3024 }
3025
3026 /* clear action flags */
3027
3028 run_srst_asserted = 0;
3029 run_srst_deasserted = 0;
3030 run_power_restore = 0;
3031 run_power_dropout = 0;
3032
3033 recursive = 0;
3034 }
3035
3036 /* Poll targets for state changes unless that's globally disabled.
3037 * Skip targets that are currently disabled.
3038 */
3039 for (struct target *target = all_targets;
3040 is_jtag_poll_safe() && target;
3041 target = target->next) {
3042
3043 if (!target_was_examined(target))
3044 continue;
3045
3046 if (!target->tap->enabled)
3047 continue;
3048
3049 if (target->backoff.times > target->backoff.count) {
3050 /* do not poll this time as we failed previously */
3051 target->backoff.count++;
3052 continue;
3053 }
3054 target->backoff.count = 0;
3055
3056 /* only poll target if we've got power and srst isn't asserted */
3057 if (!power_dropout && !srst_asserted) {
3058 /* polling may fail silently until the target has been examined */
3059 retval = target_poll(target);
3060 if (retval != ERROR_OK) {
3061 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3062 if (target->backoff.times * polling_interval < 5000) {
3063 target->backoff.times *= 2;
3064 target->backoff.times++;
3065 }
3066
3067 /* Tell GDB to halt the debugger. This allows the user to
3068 * run monitor commands to handle the situation.
3069 */
3070 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3071 }
3072 if (target->backoff.times > 0) {
3073 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3074 target_reset_examined(target);
3075 retval = target_examine_one(target);
3076 /* Target examination could have failed due to unstable connection,
3077 * but we set the examined flag anyway to repoll it later */
3078 if (retval != ERROR_OK) {
3079 target_set_examined(target);
3080 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3081 target->backoff.times * polling_interval);
3082 return retval;
3083 }
3084 }
3085
3086 /* Since we succeeded, we reset backoff count */
3087 target->backoff.times = 0;
3088 }
3089 }
3090
3091 return retval;
3092 }
3093
3094 COMMAND_HANDLER(handle_reg_command)
3095 {
3096 LOG_DEBUG("-");
3097
3098 struct target *target = get_current_target(CMD_CTX);
3099 struct reg *reg = NULL;
3100
3101 /* list all available registers for the current target */
3102 if (CMD_ARGC == 0) {
3103 struct reg_cache *cache = target->reg_cache;
3104
3105 unsigned int count = 0;
3106 while (cache) {
3107 unsigned i;
3108
3109 command_print(CMD, "===== %s", cache->name);
3110
3111 for (i = 0, reg = cache->reg_list;
3112 i < cache->num_regs;
3113 i++, reg++, count++) {
3114 if (reg->exist == false || reg->hidden)
3115 continue;
3116 /* only print cached values if they are valid */
3117 if (reg->valid) {
3118 char *value = buf_to_hex_str(reg->value,
3119 reg->size);
3120 command_print(CMD,
3121 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3122 count, reg->name,
3123 reg->size, value,
3124 reg->dirty
3125 ? " (dirty)"
3126 : "");
3127 free(value);
3128 } else {
3129 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3130 count, reg->name,
3131 reg->size);
3132 }
3133 }
3134 cache = cache->next;
3135 }
3136
3137 return ERROR_OK;
3138 }
3139
3140 /* access a single register by its ordinal number */
3141 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3142 unsigned num;
3143 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3144
3145 struct reg_cache *cache = target->reg_cache;
3146 unsigned int count = 0;
3147 while (cache) {
3148 unsigned i;
3149 for (i = 0; i < cache->num_regs; i++) {
3150 if (count++ == num) {
3151 reg = &cache->reg_list[i];
3152 break;
3153 }
3154 }
3155 if (reg)
3156 break;
3157 cache = cache->next;
3158 }
3159
3160 if (!reg) {
3161 command_print(CMD, "%i is out of bounds, the current target "
3162 "has only %i registers (0 - %i)", num, count, count - 1);
3163 return ERROR_OK;
3164 }
3165 } else {
3166 /* access a single register by its name */
3167 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3168
3169 if (!reg)
3170 goto not_found;
3171 }
3172
3173 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3174
3175 if (!reg->exist)
3176 goto not_found;
3177
3178 /* display a register */
3179 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3180 && (CMD_ARGV[1][0] <= '9')))) {
3181 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3182 reg->valid = 0;
3183
3184 if (reg->valid == 0) {
3185 int retval = reg->type->get(reg);
3186 if (retval != ERROR_OK) {
3187 LOG_ERROR("Could not read register '%s'", reg->name);
3188 return retval;
3189 }
3190 }
3191 char *value = buf_to_hex_str(reg->value, reg->size);
3192 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3193 free(value);
3194 return ERROR_OK;
3195 }
3196
3197 /* set register value */
3198 if (CMD_ARGC == 2) {
3199 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3200 if (!buf)
3201 return ERROR_FAIL;
3202 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0