target: fix clang static analyzer warning
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
60
61 /* default halt wait timeout (ms) */
62 #define DEFAULT_HALT_TIMEOUT 5000
63
64 static int target_read_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, uint8_t *buffer);
66 static int target_write_buffer_default(struct target *target, target_addr_t address,
67 uint32_t count, const uint8_t *buffer);
68 static int target_array2mem(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_mem2array(Jim_Interp *interp, struct target *target,
71 int argc, Jim_Obj * const *argv);
72 static int target_register_user_commands(struct command_context *cmd_ctx);
73 static int target_get_gdb_fileio_info_default(struct target *target,
74 struct gdb_fileio_info *fileio_info);
75 static int target_gdb_fileio_end_default(struct target *target, int retcode,
76 int fileio_errno, bool ctrl_c);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type mips_mips64_target;
98 extern struct target_type avr_target;
99 extern struct target_type dsp563xx_target;
100 extern struct target_type dsp5680xx_target;
101 extern struct target_type testee_target;
102 extern struct target_type avr32_ap7k_target;
103 extern struct target_type hla_target;
104 extern struct target_type nds32_v2_target;
105 extern struct target_type nds32_v3_target;
106 extern struct target_type nds32_v3m_target;
107 extern struct target_type or1k_target;
108 extern struct target_type quark_x10xx_target;
109 extern struct target_type quark_d20xx_target;
110 extern struct target_type stm8_target;
111 extern struct target_type riscv_target;
112 extern struct target_type mem_ap_target;
113 extern struct target_type esirisc_target;
114 extern struct target_type arcv2_target;
115
116 static struct target_type *target_types[] = {
117 &arm7tdmi_target,
118 &arm9tdmi_target,
119 &arm920t_target,
120 &arm720t_target,
121 &arm966e_target,
122 &arm946e_target,
123 &arm926ejs_target,
124 &fa526_target,
125 &feroceon_target,
126 &dragonite_target,
127 &xscale_target,
128 &cortexm_target,
129 &cortexa_target,
130 &cortexr4_target,
131 &arm11_target,
132 &ls1_sap_target,
133 &mips_m4k_target,
134 &avr_target,
135 &dsp563xx_target,
136 &dsp5680xx_target,
137 &testee_target,
138 &avr32_ap7k_target,
139 &hla_target,
140 &nds32_v2_target,
141 &nds32_v3_target,
142 &nds32_v3m_target,
143 &or1k_target,
144 &quark_x10xx_target,
145 &quark_d20xx_target,
146 &stm8_target,
147 &riscv_target,
148 &mem_ap_target,
149 &esirisc_target,
150 &arcv2_target,
151 &aarch64_target,
152 &mips_mips64_target,
153 NULL,
154 };
155
156 struct target *all_targets;
157 static struct target_event_callback *target_event_callbacks;
158 static struct target_timer_callback *target_timer_callbacks;
159 static int64_t target_timer_next_event_value;
160 static LIST_HEAD(target_reset_callback_list);
161 static LIST_HEAD(target_trace_callback_list);
162 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
163 static LIST_HEAD(empty_smp_targets);
164
165 static const struct jim_nvp nvp_assert[] = {
166 { .name = "assert", NVP_ASSERT },
167 { .name = "deassert", NVP_DEASSERT },
168 { .name = "T", NVP_ASSERT },
169 { .name = "F", NVP_DEASSERT },
170 { .name = "t", NVP_ASSERT },
171 { .name = "f", NVP_DEASSERT },
172 { .name = NULL, .value = -1 }
173 };
174
175 static const struct jim_nvp nvp_error_target[] = {
176 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
177 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
178 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
179 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
180 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
181 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
182 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
183 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
184 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
185 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
186 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
187 { .value = -1, .name = NULL }
188 };
189
190 static const char *target_strerror_safe(int err)
191 {
192 const struct jim_nvp *n;
193
194 n = jim_nvp_value2name_simple(nvp_error_target, err);
195 if (!n->name)
196 return "unknown";
197 else
198 return n->name;
199 }
200
201 static const struct jim_nvp nvp_target_event[] = {
202
203 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
204 { .value = TARGET_EVENT_HALTED, .name = "halted" },
205 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
206 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
207 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
208 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
209 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
210
211 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
212 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
213
214 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
215 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
216 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
217 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
218 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
219 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
220 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
221 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
222
223 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
224 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
225 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
226
227 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
228 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
229
230 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
231 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
232
233 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
234 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
235
236 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
237 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
238
239 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
240
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
247 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
249
250 { .name = NULL, .value = -1 }
251 };
252
253 static const struct jim_nvp nvp_target_state[] = {
254 { .name = "unknown", .value = TARGET_UNKNOWN },
255 { .name = "running", .value = TARGET_RUNNING },
256 { .name = "halted", .value = TARGET_HALTED },
257 { .name = "reset", .value = TARGET_RESET },
258 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const struct jim_nvp nvp_target_debug_reason[] = {
263 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
264 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
265 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
266 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
267 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
268 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
269 { .name = "program-exit", .value = DBG_REASON_EXIT },
270 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
271 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
272 { .name = NULL, .value = -1 },
273 };
274
275 static const struct jim_nvp nvp_target_endian[] = {
276 { .name = "big", .value = TARGET_BIG_ENDIAN },
277 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
278 { .name = "be", .value = TARGET_BIG_ENDIAN },
279 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
280 { .name = NULL, .value = -1 },
281 };
282
283 static const struct jim_nvp nvp_reset_modes[] = {
284 { .name = "unknown", .value = RESET_UNKNOWN },
285 { .name = "run", .value = RESET_RUN },
286 { .name = "halt", .value = RESET_HALT },
287 { .name = "init", .value = RESET_INIT },
288 { .name = NULL, .value = -1 },
289 };
290
291 const char *debug_reason_name(struct target *t)
292 {
293 const char *cp;
294
295 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
296 t->debug_reason)->name;
297 if (!cp) {
298 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
299 cp = "(*BUG*unknown*BUG*)";
300 }
301 return cp;
302 }
303
304 const char *target_state_name(struct target *t)
305 {
306 const char *cp;
307 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
308 if (!cp) {
309 LOG_ERROR("Invalid target state: %d", (int)(t->state));
310 cp = "(*BUG*unknown*BUG*)";
311 }
312
313 if (!target_was_examined(t) && t->defer_examine)
314 cp = "examine deferred";
315
316 return cp;
317 }
318
319 const char *target_event_name(enum target_event event)
320 {
321 const char *cp;
322 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
323 if (!cp) {
324 LOG_ERROR("Invalid target event: %d", (int)(event));
325 cp = "(*BUG*unknown*BUG*)";
326 }
327 return cp;
328 }
329
330 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
331 {
332 const char *cp;
333 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
334 if (!cp) {
335 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
336 cp = "(*BUG*unknown*BUG*)";
337 }
338 return cp;
339 }
340
341 /* determine the number of the new target */
342 static int new_target_number(void)
343 {
344 struct target *t;
345 int x;
346
347 /* number is 0 based */
348 x = -1;
349 t = all_targets;
350 while (t) {
351 if (x < t->target_number)
352 x = t->target_number;
353 t = t->next;
354 }
355 return x + 1;
356 }
357
358 static void append_to_list_all_targets(struct target *target)
359 {
360 struct target **t = &all_targets;
361
362 while (*t)
363 t = &((*t)->next);
364 *t = target;
365 }
366
367 /* read a uint64_t from a buffer in target memory endianness */
368 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
369 {
370 if (target->endianness == TARGET_LITTLE_ENDIAN)
371 return le_to_h_u64(buffer);
372 else
373 return be_to_h_u64(buffer);
374 }
375
376 /* read a uint32_t from a buffer in target memory endianness */
377 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
378 {
379 if (target->endianness == TARGET_LITTLE_ENDIAN)
380 return le_to_h_u32(buffer);
381 else
382 return be_to_h_u32(buffer);
383 }
384
385 /* read a uint24_t from a buffer in target memory endianness */
386 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
387 {
388 if (target->endianness == TARGET_LITTLE_ENDIAN)
389 return le_to_h_u24(buffer);
390 else
391 return be_to_h_u24(buffer);
392 }
393
394 /* read a uint16_t from a buffer in target memory endianness */
395 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
396 {
397 if (target->endianness == TARGET_LITTLE_ENDIAN)
398 return le_to_h_u16(buffer);
399 else
400 return be_to_h_u16(buffer);
401 }
402
403 /* write a uint64_t to a buffer in target memory endianness */
404 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
405 {
406 if (target->endianness == TARGET_LITTLE_ENDIAN)
407 h_u64_to_le(buffer, value);
408 else
409 h_u64_to_be(buffer, value);
410 }
411
412 /* write a uint32_t to a buffer in target memory endianness */
413 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
414 {
415 if (target->endianness == TARGET_LITTLE_ENDIAN)
416 h_u32_to_le(buffer, value);
417 else
418 h_u32_to_be(buffer, value);
419 }
420
421 /* write a uint24_t to a buffer in target memory endianness */
422 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
423 {
424 if (target->endianness == TARGET_LITTLE_ENDIAN)
425 h_u24_to_le(buffer, value);
426 else
427 h_u24_to_be(buffer, value);
428 }
429
430 /* write a uint16_t to a buffer in target memory endianness */
431 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
432 {
433 if (target->endianness == TARGET_LITTLE_ENDIAN)
434 h_u16_to_le(buffer, value);
435 else
436 h_u16_to_be(buffer, value);
437 }
438
439 /* write a uint8_t to a buffer in target memory endianness */
440 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
441 {
442 *buffer = value;
443 }
444
445 /* write a uint64_t array to a buffer in target memory endianness */
446 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
451 }
452
453 /* write a uint32_t array to a buffer in target memory endianness */
454 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
455 {
456 uint32_t i;
457 for (i = 0; i < count; i++)
458 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
459 }
460
461 /* write a uint16_t array to a buffer in target memory endianness */
462 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
463 {
464 uint32_t i;
465 for (i = 0; i < count; i++)
466 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
467 }
468
469 /* write a uint64_t array to a buffer in target memory endianness */
470 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
471 {
472 uint32_t i;
473 for (i = 0; i < count; i++)
474 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
475 }
476
477 /* write a uint32_t array to a buffer in target memory endianness */
478 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
479 {
480 uint32_t i;
481 for (i = 0; i < count; i++)
482 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
483 }
484
485 /* write a uint16_t array to a buffer in target memory endianness */
486 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
487 {
488 uint32_t i;
489 for (i = 0; i < count; i++)
490 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
491 }
492
493 /* return a pointer to a configured target; id is name or number */
494 struct target *get_target(const char *id)
495 {
496 struct target *target;
497
498 /* try as tcltarget name */
499 for (target = all_targets; target; target = target->next) {
500 if (!target_name(target))
501 continue;
502 if (strcmp(id, target_name(target)) == 0)
503 return target;
504 }
505
506 /* It's OK to remove this fallback sometime after August 2010 or so */
507
508 /* no match, try as number */
509 unsigned num;
510 if (parse_uint(id, &num) != ERROR_OK)
511 return NULL;
512
513 for (target = all_targets; target; target = target->next) {
514 if (target->target_number == (int)num) {
515 LOG_WARNING("use '%s' as target identifier, not '%u'",
516 target_name(target), num);
517 return target;
518 }
519 }
520
521 return NULL;
522 }
523
524 /* returns a pointer to the n-th configured target */
525 struct target *get_target_by_num(int num)
526 {
527 struct target *target = all_targets;
528
529 while (target) {
530 if (target->target_number == num)
531 return target;
532 target = target->next;
533 }
534
535 return NULL;
536 }
537
538 struct target *get_current_target(struct command_context *cmd_ctx)
539 {
540 struct target *target = get_current_target_or_null(cmd_ctx);
541
542 if (!target) {
543 LOG_ERROR("BUG: current_target out of bounds");
544 exit(-1);
545 }
546
547 return target;
548 }
549
550 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
551 {
552 return cmd_ctx->current_target_override
553 ? cmd_ctx->current_target_override
554 : cmd_ctx->current_target;
555 }
556
557 int target_poll(struct target *target)
558 {
559 int retval;
560
561 /* We can't poll until after examine */
562 if (!target_was_examined(target)) {
563 /* Fail silently lest we pollute the log */
564 return ERROR_FAIL;
565 }
566
567 retval = target->type->poll(target);
568 if (retval != ERROR_OK)
569 return retval;
570
571 if (target->halt_issued) {
572 if (target->state == TARGET_HALTED)
573 target->halt_issued = false;
574 else {
575 int64_t t = timeval_ms() - target->halt_issued_time;
576 if (t > DEFAULT_HALT_TIMEOUT) {
577 target->halt_issued = false;
578 LOG_INFO("Halt timed out, wake up GDB.");
579 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
580 }
581 }
582 }
583
584 return ERROR_OK;
585 }
586
587 int target_halt(struct target *target)
588 {
589 int retval;
590 /* We can't poll until after examine */
591 if (!target_was_examined(target)) {
592 LOG_ERROR("Target not examined yet");
593 return ERROR_FAIL;
594 }
595
596 retval = target->type->halt(target);
597 if (retval != ERROR_OK)
598 return retval;
599
600 target->halt_issued = true;
601 target->halt_issued_time = timeval_ms();
602
603 return ERROR_OK;
604 }
605
606 /**
607 * Make the target (re)start executing using its saved execution
608 * context (possibly with some modifications).
609 *
610 * @param target Which target should start executing.
611 * @param current True to use the target's saved program counter instead
612 * of the address parameter
613 * @param address Optionally used as the program counter.
614 * @param handle_breakpoints True iff breakpoints at the resumption PC
615 * should be skipped. (For example, maybe execution was stopped by
616 * such a breakpoint, in which case it would be counterproductive to
617 * let it re-trigger.
618 * @param debug_execution False if all working areas allocated by OpenOCD
619 * should be released and/or restored to their original contents.
620 * (This would for example be true to run some downloaded "helper"
621 * algorithm code, which resides in one such working buffer and uses
622 * another for data storage.)
623 *
624 * @todo Resolve the ambiguity about what the "debug_execution" flag
625 * signifies. For example, Target implementations don't agree on how
626 * it relates to invalidation of the register cache, or to whether
627 * breakpoints and watchpoints should be enabled. (It would seem wrong
628 * to enable breakpoints when running downloaded "helper" algorithms
629 * (debug_execution true), since the breakpoints would be set to match
630 * target firmware being debugged, not the helper algorithm.... and
631 * enabling them could cause such helpers to malfunction (for example,
632 * by overwriting data with a breakpoint instruction. On the other
633 * hand the infrastructure for running such helpers might use this
634 * procedure but rely on hardware breakpoint to detect termination.)
635 */
636 int target_resume(struct target *target, int current, target_addr_t address,
637 int handle_breakpoints, int debug_execution)
638 {
639 int retval;
640
641 /* We can't poll until after examine */
642 if (!target_was_examined(target)) {
643 LOG_ERROR("Target not examined yet");
644 return ERROR_FAIL;
645 }
646
647 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
648
649 /* note that resume *must* be asynchronous. The CPU can halt before
650 * we poll. The CPU can even halt at the current PC as a result of
651 * a software breakpoint being inserted by (a bug?) the application.
652 */
653 /*
654 * resume() triggers the event 'resumed'. The execution of TCL commands
655 * in the event handler causes the polling of targets. If the target has
656 * already halted for a breakpoint, polling will run the 'halted' event
657 * handler before the pending 'resumed' handler.
658 * Disable polling during resume() to guarantee the execution of handlers
659 * in the correct order.
660 */
661 bool save_poll = jtag_poll_get_enabled();
662 jtag_poll_set_enabled(false);
663 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
664 jtag_poll_set_enabled(save_poll);
665 if (retval != ERROR_OK)
666 return retval;
667
668 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
669
670 return retval;
671 }
672
673 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
674 {
675 char buf[100];
676 int retval;
677 struct jim_nvp *n;
678 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
679 if (!n->name) {
680 LOG_ERROR("invalid reset mode");
681 return ERROR_FAIL;
682 }
683
684 struct target *target;
685 for (target = all_targets; target; target = target->next)
686 target_call_reset_callbacks(target, reset_mode);
687
688 /* disable polling during reset to make reset event scripts
689 * more predictable, i.e. dr/irscan & pathmove in events will
690 * not have JTAG operations injected into the middle of a sequence.
691 */
692 bool save_poll = jtag_poll_get_enabled();
693
694 jtag_poll_set_enabled(false);
695
696 sprintf(buf, "ocd_process_reset %s", n->name);
697 retval = Jim_Eval(cmd->ctx->interp, buf);
698
699 jtag_poll_set_enabled(save_poll);
700
701 if (retval != JIM_OK) {
702 Jim_MakeErrorMessage(cmd->ctx->interp);
703 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
704 return ERROR_FAIL;
705 }
706
707 /* We want any events to be processed before the prompt */
708 retval = target_call_timer_callbacks_now();
709
710 for (target = all_targets; target; target = target->next) {
711 target->type->check_reset(target);
712 target->running_alg = false;
713 }
714
715 return retval;
716 }
717
718 static int identity_virt2phys(struct target *target,
719 target_addr_t virtual, target_addr_t *physical)
720 {
721 *physical = virtual;
722 return ERROR_OK;
723 }
724
725 static int no_mmu(struct target *target, int *enabled)
726 {
727 *enabled = 0;
728 return ERROR_OK;
729 }
730
731 /**
732 * Reset the @c examined flag for the given target.
733 * Pure paranoia -- targets are zeroed on allocation.
734 */
735 static inline void target_reset_examined(struct target *target)
736 {
737 target->examined = false;
738 }
739
740 static int default_examine(struct target *target)
741 {
742 target_set_examined(target);
743 return ERROR_OK;
744 }
745
746 /* no check by default */
747 static int default_check_reset(struct target *target)
748 {
749 return ERROR_OK;
750 }
751
752 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
753 * Keep in sync */
754 int target_examine_one(struct target *target)
755 {
756 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
757
758 int retval = target->type->examine(target);
759 if (retval != ERROR_OK) {
760 target_reset_examined(target);
761 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
762 return retval;
763 }
764
765 target_set_examined(target);
766 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
767
768 return ERROR_OK;
769 }
770
771 static int jtag_enable_callback(enum jtag_event event, void *priv)
772 {
773 struct target *target = priv;
774
775 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
776 return ERROR_OK;
777
778 jtag_unregister_event_callback(jtag_enable_callback, target);
779
780 return target_examine_one(target);
781 }
782
783 /* Targets that correctly implement init + examine, i.e.
784 * no communication with target during init:
785 *
786 * XScale
787 */
788 int target_examine(void)
789 {
790 int retval = ERROR_OK;
791 struct target *target;
792
793 for (target = all_targets; target; target = target->next) {
794 /* defer examination, but don't skip it */
795 if (!target->tap->enabled) {
796 jtag_register_event_callback(jtag_enable_callback,
797 target);
798 continue;
799 }
800
801 if (target->defer_examine)
802 continue;
803
804 int retval2 = target_examine_one(target);
805 if (retval2 != ERROR_OK) {
806 LOG_WARNING("target %s examination failed", target_name(target));
807 retval = retval2;
808 }
809 }
810 return retval;
811 }
812
813 const char *target_type_name(struct target *target)
814 {
815 return target->type->name;
816 }
817
818 static int target_soft_reset_halt(struct target *target)
819 {
820 if (!target_was_examined(target)) {
821 LOG_ERROR("Target not examined yet");
822 return ERROR_FAIL;
823 }
824 if (!target->type->soft_reset_halt) {
825 LOG_ERROR("Target %s does not support soft_reset_halt",
826 target_name(target));
827 return ERROR_FAIL;
828 }
829 return target->type->soft_reset_halt(target);
830 }
831
832 /**
833 * Downloads a target-specific native code algorithm to the target,
834 * and executes it. * Note that some targets may need to set up, enable,
835 * and tear down a breakpoint (hard or * soft) to detect algorithm
836 * termination, while others may support lower overhead schemes where
837 * soft breakpoints embedded in the algorithm automatically terminate the
838 * algorithm.
839 *
840 * @param target used to run the algorithm
841 * @param num_mem_params
842 * @param mem_params
843 * @param num_reg_params
844 * @param reg_param
845 * @param entry_point
846 * @param exit_point
847 * @param timeout_ms
848 * @param arch_info target-specific description of the algorithm.
849 */
850 int target_run_algorithm(struct target *target,
851 int num_mem_params, struct mem_param *mem_params,
852 int num_reg_params, struct reg_param *reg_param,
853 target_addr_t entry_point, target_addr_t exit_point,
854 int timeout_ms, void *arch_info)
855 {
856 int retval = ERROR_FAIL;
857
858 if (!target_was_examined(target)) {
859 LOG_ERROR("Target not examined yet");
860 goto done;
861 }
862 if (!target->type->run_algorithm) {
863 LOG_ERROR("Target type '%s' does not support %s",
864 target_type_name(target), __func__);
865 goto done;
866 }
867
868 target->running_alg = true;
869 retval = target->type->run_algorithm(target,
870 num_mem_params, mem_params,
871 num_reg_params, reg_param,
872 entry_point, exit_point, timeout_ms, arch_info);
873 target->running_alg = false;
874
875 done:
876 return retval;
877 }
878
879 /**
880 * Executes a target-specific native code algorithm and leaves it running.
881 *
882 * @param target used to run the algorithm
883 * @param num_mem_params
884 * @param mem_params
885 * @param num_reg_params
886 * @param reg_params
887 * @param entry_point
888 * @param exit_point
889 * @param arch_info target-specific description of the algorithm.
890 */
891 int target_start_algorithm(struct target *target,
892 int num_mem_params, struct mem_param *mem_params,
893 int num_reg_params, struct reg_param *reg_params,
894 target_addr_t entry_point, target_addr_t exit_point,
895 void *arch_info)
896 {
897 int retval = ERROR_FAIL;
898
899 if (!target_was_examined(target)) {
900 LOG_ERROR("Target not examined yet");
901 goto done;
902 }
903 if (!target->type->start_algorithm) {
904 LOG_ERROR("Target type '%s' does not support %s",
905 target_type_name(target), __func__);
906 goto done;
907 }
908 if (target->running_alg) {
909 LOG_ERROR("Target is already running an algorithm");
910 goto done;
911 }
912
913 target->running_alg = true;
914 retval = target->type->start_algorithm(target,
915 num_mem_params, mem_params,
916 num_reg_params, reg_params,
917 entry_point, exit_point, arch_info);
918
919 done:
920 return retval;
921 }
922
923 /**
924 * Waits for an algorithm started with target_start_algorithm() to complete.
925 *
926 * @param target used to run the algorithm
927 * @param num_mem_params
928 * @param mem_params
929 * @param num_reg_params
930 * @param reg_params
931 * @param exit_point
932 * @param timeout_ms
933 * @param arch_info target-specific description of the algorithm.
934 */
935 int target_wait_algorithm(struct target *target,
936 int num_mem_params, struct mem_param *mem_params,
937 int num_reg_params, struct reg_param *reg_params,
938 target_addr_t exit_point, int timeout_ms,
939 void *arch_info)
940 {
941 int retval = ERROR_FAIL;
942
943 if (!target->type->wait_algorithm) {
944 LOG_ERROR("Target type '%s' does not support %s",
945 target_type_name(target), __func__);
946 goto done;
947 }
948 if (!target->running_alg) {
949 LOG_ERROR("Target is not running an algorithm");
950 goto done;
951 }
952
953 retval = target->type->wait_algorithm(target,
954 num_mem_params, mem_params,
955 num_reg_params, reg_params,
956 exit_point, timeout_ms, arch_info);
957 if (retval != ERROR_TARGET_TIMEOUT)
958 target->running_alg = false;
959
960 done:
961 return retval;
962 }
963
964 /**
965 * Streams data to a circular buffer on target intended for consumption by code
966 * running asynchronously on target.
967 *
968 * This is intended for applications where target-specific native code runs
969 * on the target, receives data from the circular buffer, does something with
970 * it (most likely writing it to a flash memory), and advances the circular
971 * buffer pointer.
972 *
973 * This assumes that the helper algorithm has already been loaded to the target,
974 * but has not been started yet. Given memory and register parameters are passed
975 * to the algorithm.
976 *
977 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
978 * following format:
979 *
980 * [buffer_start + 0, buffer_start + 4):
981 * Write Pointer address (aka head). Written and updated by this
982 * routine when new data is written to the circular buffer.
983 * [buffer_start + 4, buffer_start + 8):
984 * Read Pointer address (aka tail). Updated by code running on the
985 * target after it consumes data.
986 * [buffer_start + 8, buffer_start + buffer_size):
987 * Circular buffer contents.
988 *
989 * See contrib/loaders/flash/stm32f1x.S for an example.
990 *
991 * @param target used to run the algorithm
992 * @param buffer address on the host where data to be sent is located
993 * @param count number of blocks to send
994 * @param block_size size in bytes of each block
995 * @param num_mem_params count of memory-based params to pass to algorithm
996 * @param mem_params memory-based params to pass to algorithm
997 * @param num_reg_params count of register-based params to pass to algorithm
998 * @param reg_params memory-based params to pass to algorithm
999 * @param buffer_start address on the target of the circular buffer structure
1000 * @param buffer_size size of the circular buffer structure
1001 * @param entry_point address on the target to execute to start the algorithm
1002 * @param exit_point address at which to set a breakpoint to catch the
1003 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1004 * @param arch_info
1005 */
1006
1007 int target_run_flash_async_algorithm(struct target *target,
1008 const uint8_t *buffer, uint32_t count, int block_size,
1009 int num_mem_params, struct mem_param *mem_params,
1010 int num_reg_params, struct reg_param *reg_params,
1011 uint32_t buffer_start, uint32_t buffer_size,
1012 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1013 {
1014 int retval;
1015 int timeout = 0;
1016
1017 const uint8_t *buffer_orig = buffer;
1018
1019 /* Set up working area. First word is write pointer, second word is read pointer,
1020 * rest is fifo data area. */
1021 uint32_t wp_addr = buffer_start;
1022 uint32_t rp_addr = buffer_start + 4;
1023 uint32_t fifo_start_addr = buffer_start + 8;
1024 uint32_t fifo_end_addr = buffer_start + buffer_size;
1025
1026 uint32_t wp = fifo_start_addr;
1027 uint32_t rp = fifo_start_addr;
1028
1029 /* validate block_size is 2^n */
1030 assert(IS_PWR_OF_2(block_size));
1031
1032 retval = target_write_u32(target, wp_addr, wp);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 retval = target_write_u32(target, rp_addr, rp);
1036 if (retval != ERROR_OK)
1037 return retval;
1038
1039 /* Start up algorithm on target and let it idle while writing the first chunk */
1040 retval = target_start_algorithm(target, num_mem_params, mem_params,
1041 num_reg_params, reg_params,
1042 entry_point,
1043 exit_point,
1044 arch_info);
1045
1046 if (retval != ERROR_OK) {
1047 LOG_ERROR("error starting target flash write algorithm");
1048 return retval;
1049 }
1050
1051 while (count > 0) {
1052
1053 retval = target_read_u32(target, rp_addr, &rp);
1054 if (retval != ERROR_OK) {
1055 LOG_ERROR("failed to get read pointer");
1056 break;
1057 }
1058
1059 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1060 (size_t) (buffer - buffer_orig), count, wp, rp);
1061
1062 if (rp == 0) {
1063 LOG_ERROR("flash write algorithm aborted by target");
1064 retval = ERROR_FLASH_OPERATION_FAILED;
1065 break;
1066 }
1067
1068 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1069 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1070 break;
1071 }
1072
1073 /* Count the number of bytes available in the fifo without
1074 * crossing the wrap around. Make sure to not fill it completely,
1075 * because that would make wp == rp and that's the empty condition. */
1076 uint32_t thisrun_bytes;
1077 if (rp > wp)
1078 thisrun_bytes = rp - wp - block_size;
1079 else if (rp > fifo_start_addr)
1080 thisrun_bytes = fifo_end_addr - wp;
1081 else
1082 thisrun_bytes = fifo_end_addr - wp - block_size;
1083
1084 if (thisrun_bytes == 0) {
1085 /* Throttle polling a bit if transfer is (much) faster than flash
1086 * programming. The exact delay shouldn't matter as long as it's
1087 * less than buffer size / flash speed. This is very unlikely to
1088 * run when using high latency connections such as USB. */
1089 alive_sleep(2);
1090
1091 /* to stop an infinite loop on some targets check and increment a timeout
1092 * this issue was observed on a stellaris using the new ICDI interface */
1093 if (timeout++ >= 2500) {
1094 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1095 return ERROR_FLASH_OPERATION_FAILED;
1096 }
1097 continue;
1098 }
1099
1100 /* reset our timeout */
1101 timeout = 0;
1102
1103 /* Limit to the amount of data we actually want to write */
1104 if (thisrun_bytes > count * block_size)
1105 thisrun_bytes = count * block_size;
1106
1107 /* Force end of large blocks to be word aligned */
1108 if (thisrun_bytes >= 16)
1109 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1110
1111 /* Write data to fifo */
1112 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1113 if (retval != ERROR_OK)
1114 break;
1115
1116 /* Update counters and wrap write pointer */
1117 buffer += thisrun_bytes;
1118 count -= thisrun_bytes / block_size;
1119 wp += thisrun_bytes;
1120 if (wp >= fifo_end_addr)
1121 wp = fifo_start_addr;
1122
1123 /* Store updated write pointer to target */
1124 retval = target_write_u32(target, wp_addr, wp);
1125 if (retval != ERROR_OK)
1126 break;
1127
1128 /* Avoid GDB timeouts */
1129 keep_alive();
1130 }
1131
1132 if (retval != ERROR_OK) {
1133 /* abort flash write algorithm on target */
1134 target_write_u32(target, wp_addr, 0);
1135 }
1136
1137 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1138 num_reg_params, reg_params,
1139 exit_point,
1140 10000,
1141 arch_info);
1142
1143 if (retval2 != ERROR_OK) {
1144 LOG_ERROR("error waiting for target flash write algorithm");
1145 retval = retval2;
1146 }
1147
1148 if (retval == ERROR_OK) {
1149 /* check if algorithm set rp = 0 after fifo writer loop finished */
1150 retval = target_read_u32(target, rp_addr, &rp);
1151 if (retval == ERROR_OK && rp == 0) {
1152 LOG_ERROR("flash write algorithm aborted by target");
1153 retval = ERROR_FLASH_OPERATION_FAILED;
1154 }
1155 }
1156
1157 return retval;
1158 }
1159
1160 int target_run_read_async_algorithm(struct target *target,
1161 uint8_t *buffer, uint32_t count, int block_size,
1162 int num_mem_params, struct mem_param *mem_params,
1163 int num_reg_params, struct reg_param *reg_params,
1164 uint32_t buffer_start, uint32_t buffer_size,
1165 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1166 {
1167 int retval;
1168 int timeout = 0;
1169
1170 const uint8_t *buffer_orig = buffer;
1171
1172 /* Set up working area. First word is write pointer, second word is read pointer,
1173 * rest is fifo data area. */
1174 uint32_t wp_addr = buffer_start;
1175 uint32_t rp_addr = buffer_start + 4;
1176 uint32_t fifo_start_addr = buffer_start + 8;
1177 uint32_t fifo_end_addr = buffer_start + buffer_size;
1178
1179 uint32_t wp = fifo_start_addr;
1180 uint32_t rp = fifo_start_addr;
1181
1182 /* validate block_size is 2^n */
1183 assert(IS_PWR_OF_2(block_size));
1184
1185 retval = target_write_u32(target, wp_addr, wp);
1186 if (retval != ERROR_OK)
1187 return retval;
1188 retval = target_write_u32(target, rp_addr, rp);
1189 if (retval != ERROR_OK)
1190 return retval;
1191
1192 /* Start up algorithm on target */
1193 retval = target_start_algorithm(target, num_mem_params, mem_params,
1194 num_reg_params, reg_params,
1195 entry_point,
1196 exit_point,
1197 arch_info);
1198
1199 if (retval != ERROR_OK) {
1200 LOG_ERROR("error starting target flash read algorithm");
1201 return retval;
1202 }
1203
1204 while (count > 0) {
1205 retval = target_read_u32(target, wp_addr, &wp);
1206 if (retval != ERROR_OK) {
1207 LOG_ERROR("failed to get write pointer");
1208 break;
1209 }
1210
1211 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1212 (size_t)(buffer - buffer_orig), count, wp, rp);
1213
1214 if (wp == 0) {
1215 LOG_ERROR("flash read algorithm aborted by target");
1216 retval = ERROR_FLASH_OPERATION_FAILED;
1217 break;
1218 }
1219
1220 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1221 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1222 break;
1223 }
1224
1225 /* Count the number of bytes available in the fifo without
1226 * crossing the wrap around. */
1227 uint32_t thisrun_bytes;
1228 if (wp >= rp)
1229 thisrun_bytes = wp - rp;
1230 else
1231 thisrun_bytes = fifo_end_addr - rp;
1232
1233 if (thisrun_bytes == 0) {
1234 /* Throttle polling a bit if transfer is (much) faster than flash
1235 * reading. The exact delay shouldn't matter as long as it's
1236 * less than buffer size / flash speed. This is very unlikely to
1237 * run when using high latency connections such as USB. */
1238 alive_sleep(2);
1239
1240 /* to stop an infinite loop on some targets check and increment a timeout
1241 * this issue was observed on a stellaris using the new ICDI interface */
1242 if (timeout++ >= 2500) {
1243 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1244 return ERROR_FLASH_OPERATION_FAILED;
1245 }
1246 continue;
1247 }
1248
1249 /* Reset our timeout */
1250 timeout = 0;
1251
1252 /* Limit to the amount of data we actually want to read */
1253 if (thisrun_bytes > count * block_size)
1254 thisrun_bytes = count * block_size;
1255
1256 /* Force end of large blocks to be word aligned */
1257 if (thisrun_bytes >= 16)
1258 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1259
1260 /* Read data from fifo */
1261 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1262 if (retval != ERROR_OK)
1263 break;
1264
1265 /* Update counters and wrap write pointer */
1266 buffer += thisrun_bytes;
1267 count -= thisrun_bytes / block_size;
1268 rp += thisrun_bytes;
1269 if (rp >= fifo_end_addr)
1270 rp = fifo_start_addr;
1271
1272 /* Store updated write pointer to target */
1273 retval = target_write_u32(target, rp_addr, rp);
1274 if (retval != ERROR_OK)
1275 break;
1276
1277 /* Avoid GDB timeouts */
1278 keep_alive();
1279
1280 }
1281
1282 if (retval != ERROR_OK) {
1283 /* abort flash write algorithm on target */
1284 target_write_u32(target, rp_addr, 0);
1285 }
1286
1287 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1288 num_reg_params, reg_params,
1289 exit_point,
1290 10000,
1291 arch_info);
1292
1293 if (retval2 != ERROR_OK) {
1294 LOG_ERROR("error waiting for target flash write algorithm");
1295 retval = retval2;
1296 }
1297
1298 if (retval == ERROR_OK) {
1299 /* check if algorithm set wp = 0 after fifo writer loop finished */
1300 retval = target_read_u32(target, wp_addr, &wp);
1301 if (retval == ERROR_OK && wp == 0) {
1302 LOG_ERROR("flash read algorithm aborted by target");
1303 retval = ERROR_FLASH_OPERATION_FAILED;
1304 }
1305 }
1306
1307 return retval;
1308 }
1309
1310 int target_read_memory(struct target *target,
1311 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1312 {
1313 if (!target_was_examined(target)) {
1314 LOG_ERROR("Target not examined yet");
1315 return ERROR_FAIL;
1316 }
1317 if (!target->type->read_memory) {
1318 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1319 return ERROR_FAIL;
1320 }
1321 return target->type->read_memory(target, address, size, count, buffer);
1322 }
1323
1324 int target_read_phys_memory(struct target *target,
1325 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1326 {
1327 if (!target_was_examined(target)) {
1328 LOG_ERROR("Target not examined yet");
1329 return ERROR_FAIL;
1330 }
1331 if (!target->type->read_phys_memory) {
1332 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1333 return ERROR_FAIL;
1334 }
1335 return target->type->read_phys_memory(target, address, size, count, buffer);
1336 }
1337
1338 int target_write_memory(struct target *target,
1339 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1340 {
1341 if (!target_was_examined(target)) {
1342 LOG_ERROR("Target not examined yet");
1343 return ERROR_FAIL;
1344 }
1345 if (!target->type->write_memory) {
1346 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1347 return ERROR_FAIL;
1348 }
1349 return target->type->write_memory(target, address, size, count, buffer);
1350 }
1351
1352 int target_write_phys_memory(struct target *target,
1353 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1354 {
1355 if (!target_was_examined(target)) {
1356 LOG_ERROR("Target not examined yet");
1357 return ERROR_FAIL;
1358 }
1359 if (!target->type->write_phys_memory) {
1360 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1361 return ERROR_FAIL;
1362 }
1363 return target->type->write_phys_memory(target, address, size, count, buffer);
1364 }
1365
1366 int target_add_breakpoint(struct target *target,
1367 struct breakpoint *breakpoint)
1368 {
1369 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1370 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1371 return ERROR_TARGET_NOT_HALTED;
1372 }
1373 return target->type->add_breakpoint(target, breakpoint);
1374 }
1375
1376 int target_add_context_breakpoint(struct target *target,
1377 struct breakpoint *breakpoint)
1378 {
1379 if (target->state != TARGET_HALTED) {
1380 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1381 return ERROR_TARGET_NOT_HALTED;
1382 }
1383 return target->type->add_context_breakpoint(target, breakpoint);
1384 }
1385
1386 int target_add_hybrid_breakpoint(struct target *target,
1387 struct breakpoint *breakpoint)
1388 {
1389 if (target->state != TARGET_HALTED) {
1390 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1391 return ERROR_TARGET_NOT_HALTED;
1392 }
1393 return target->type->add_hybrid_breakpoint(target, breakpoint);
1394 }
1395
1396 int target_remove_breakpoint(struct target *target,
1397 struct breakpoint *breakpoint)
1398 {
1399 return target->type->remove_breakpoint(target, breakpoint);
1400 }
1401
1402 int target_add_watchpoint(struct target *target,
1403 struct watchpoint *watchpoint)
1404 {
1405 if (target->state != TARGET_HALTED) {
1406 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1407 return ERROR_TARGET_NOT_HALTED;
1408 }
1409 return target->type->add_watchpoint(target, watchpoint);
1410 }
1411 int target_remove_watchpoint(struct target *target,
1412 struct watchpoint *watchpoint)
1413 {
1414 return target->type->remove_watchpoint(target, watchpoint);
1415 }
1416 int target_hit_watchpoint(struct target *target,
1417 struct watchpoint **hit_watchpoint)
1418 {
1419 if (target->state != TARGET_HALTED) {
1420 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1421 return ERROR_TARGET_NOT_HALTED;
1422 }
1423
1424 if (!target->type->hit_watchpoint) {
1425 /* For backward compatible, if hit_watchpoint is not implemented,
1426 * return ERROR_FAIL such that gdb_server will not take the nonsense
1427 * information. */
1428 return ERROR_FAIL;
1429 }
1430
1431 return target->type->hit_watchpoint(target, hit_watchpoint);
1432 }
1433
1434 const char *target_get_gdb_arch(struct target *target)
1435 {
1436 if (!target->type->get_gdb_arch)
1437 return NULL;
1438 return target->type->get_gdb_arch(target);
1439 }
1440
1441 int target_get_gdb_reg_list(struct target *target,
1442 struct reg **reg_list[], int *reg_list_size,
1443 enum target_register_class reg_class)
1444 {
1445 int result = ERROR_FAIL;
1446
1447 if (!target_was_examined(target)) {
1448 LOG_ERROR("Target not examined yet");
1449 goto done;
1450 }
1451
1452 result = target->type->get_gdb_reg_list(target, reg_list,
1453 reg_list_size, reg_class);
1454
1455 done:
1456 if (result != ERROR_OK) {
1457 *reg_list = NULL;
1458 *reg_list_size = 0;
1459 }
1460 return result;
1461 }
1462
1463 int target_get_gdb_reg_list_noread(struct target *target,
1464 struct reg **reg_list[], int *reg_list_size,
1465 enum target_register_class reg_class)
1466 {
1467 if (target->type->get_gdb_reg_list_noread &&
1468 target->type->get_gdb_reg_list_noread(target, reg_list,
1469 reg_list_size, reg_class) == ERROR_OK)
1470 return ERROR_OK;
1471 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1472 }
1473
1474 bool target_supports_gdb_connection(struct target *target)
1475 {
1476 /*
1477 * exclude all the targets that don't provide get_gdb_reg_list
1478 * or that have explicit gdb_max_connection == 0
1479 */
1480 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1481 }
1482
1483 int target_step(struct target *target,
1484 int current, target_addr_t address, int handle_breakpoints)
1485 {
1486 int retval;
1487
1488 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1489
1490 retval = target->type->step(target, current, address, handle_breakpoints);
1491 if (retval != ERROR_OK)
1492 return retval;
1493
1494 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1495
1496 return retval;
1497 }
1498
1499 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1500 {
1501 if (target->state != TARGET_HALTED) {
1502 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1503 return ERROR_TARGET_NOT_HALTED;
1504 }
1505 return target->type->get_gdb_fileio_info(target, fileio_info);
1506 }
1507
1508 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1509 {
1510 if (target->state != TARGET_HALTED) {
1511 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1512 return ERROR_TARGET_NOT_HALTED;
1513 }
1514 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1515 }
1516
1517 target_addr_t target_address_max(struct target *target)
1518 {
1519 unsigned bits = target_address_bits(target);
1520 if (sizeof(target_addr_t) * 8 == bits)
1521 return (target_addr_t) -1;
1522 else
1523 return (((target_addr_t) 1) << bits) - 1;
1524 }
1525
1526 unsigned target_address_bits(struct target *target)
1527 {
1528 if (target->type->address_bits)
1529 return target->type->address_bits(target);
1530 return 32;
1531 }
1532
1533 unsigned int target_data_bits(struct target *target)
1534 {
1535 if (target->type->data_bits)
1536 return target->type->data_bits(target);
1537 return 32;
1538 }
1539
1540 static int target_profiling(struct target *target, uint32_t *samples,
1541 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1542 {
1543 return target->type->profiling(target, samples, max_num_samples,
1544 num_samples, seconds);
1545 }
1546
1547 static int handle_target(void *priv);
1548
1549 static int target_init_one(struct command_context *cmd_ctx,
1550 struct target *target)
1551 {
1552 target_reset_examined(target);
1553
1554 struct target_type *type = target->type;
1555 if (!type->examine)
1556 type->examine = default_examine;
1557
1558 if (!type->check_reset)
1559 type->check_reset = default_check_reset;
1560
1561 assert(type->init_target);
1562
1563 int retval = type->init_target(cmd_ctx, target);
1564 if (retval != ERROR_OK) {
1565 LOG_ERROR("target '%s' init failed", target_name(target));
1566 return retval;
1567 }
1568
1569 /* Sanity-check MMU support ... stub in what we must, to help
1570 * implement it in stages, but warn if we need to do so.
1571 */
1572 if (type->mmu) {
1573 if (!type->virt2phys) {
1574 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1575 type->virt2phys = identity_virt2phys;
1576 }
1577 } else {
1578 /* Make sure no-MMU targets all behave the same: make no
1579 * distinction between physical and virtual addresses, and
1580 * ensure that virt2phys() is always an identity mapping.
1581 */
1582 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1583 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1584
1585 type->mmu = no_mmu;
1586 type->write_phys_memory = type->write_memory;
1587 type->read_phys_memory = type->read_memory;
1588 type->virt2phys = identity_virt2phys;
1589 }
1590
1591 if (!target->type->read_buffer)
1592 target->type->read_buffer = target_read_buffer_default;
1593
1594 if (!target->type->write_buffer)
1595 target->type->write_buffer = target_write_buffer_default;
1596
1597 if (!target->type->get_gdb_fileio_info)
1598 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1599
1600 if (!target->type->gdb_fileio_end)
1601 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1602
1603 if (!target->type->profiling)
1604 target->type->profiling = target_profiling_default;
1605
1606 return ERROR_OK;
1607 }
1608
1609 static int target_init(struct command_context *cmd_ctx)
1610 {
1611 struct target *target;
1612 int retval;
1613
1614 for (target = all_targets; target; target = target->next) {
1615 retval = target_init_one(cmd_ctx, target);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 }
1619
1620 if (!all_targets)
1621 return ERROR_OK;
1622
1623 retval = target_register_user_commands(cmd_ctx);
1624 if (retval != ERROR_OK)
1625 return retval;
1626
1627 retval = target_register_timer_callback(&handle_target,
1628 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1629 if (retval != ERROR_OK)
1630 return retval;
1631
1632 return ERROR_OK;
1633 }
1634
1635 COMMAND_HANDLER(handle_target_init_command)
1636 {
1637 int retval;
1638
1639 if (CMD_ARGC != 0)
1640 return ERROR_COMMAND_SYNTAX_ERROR;
1641
1642 static bool target_initialized;
1643 if (target_initialized) {
1644 LOG_INFO("'target init' has already been called");
1645 return ERROR_OK;
1646 }
1647 target_initialized = true;
1648
1649 retval = command_run_line(CMD_CTX, "init_targets");
1650 if (retval != ERROR_OK)
1651 return retval;
1652
1653 retval = command_run_line(CMD_CTX, "init_target_events");
1654 if (retval != ERROR_OK)
1655 return retval;
1656
1657 retval = command_run_line(CMD_CTX, "init_board");
1658 if (retval != ERROR_OK)
1659 return retval;
1660
1661 LOG_DEBUG("Initializing targets...");
1662 return target_init(CMD_CTX);
1663 }
1664
1665 int target_register_event_callback(int (*callback)(struct target *target,
1666 enum target_event event, void *priv), void *priv)
1667 {
1668 struct target_event_callback **callbacks_p = &target_event_callbacks;
1669
1670 if (!callback)
1671 return ERROR_COMMAND_SYNTAX_ERROR;
1672
1673 if (*callbacks_p) {
1674 while ((*callbacks_p)->next)
1675 callbacks_p = &((*callbacks_p)->next);
1676 callbacks_p = &((*callbacks_p)->next);
1677 }
1678
1679 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1680 (*callbacks_p)->callback = callback;
1681 (*callbacks_p)->priv = priv;
1682 (*callbacks_p)->next = NULL;
1683
1684 return ERROR_OK;
1685 }
1686
1687 int target_register_reset_callback(int (*callback)(struct target *target,
1688 enum target_reset_mode reset_mode, void *priv), void *priv)
1689 {
1690 struct target_reset_callback *entry;
1691
1692 if (!callback)
1693 return ERROR_COMMAND_SYNTAX_ERROR;
1694
1695 entry = malloc(sizeof(struct target_reset_callback));
1696 if (!entry) {
1697 LOG_ERROR("error allocating buffer for reset callback entry");
1698 return ERROR_COMMAND_SYNTAX_ERROR;
1699 }
1700
1701 entry->callback = callback;
1702 entry->priv = priv;
1703 list_add(&entry->list, &target_reset_callback_list);
1704
1705
1706 return ERROR_OK;
1707 }
1708
1709 int target_register_trace_callback(int (*callback)(struct target *target,
1710 size_t len, uint8_t *data, void *priv), void *priv)
1711 {
1712 struct target_trace_callback *entry;
1713
1714 if (!callback)
1715 return ERROR_COMMAND_SYNTAX_ERROR;
1716
1717 entry = malloc(sizeof(struct target_trace_callback));
1718 if (!entry) {
1719 LOG_ERROR("error allocating buffer for trace callback entry");
1720 return ERROR_COMMAND_SYNTAX_ERROR;
1721 }
1722
1723 entry->callback = callback;
1724 entry->priv = priv;
1725 list_add(&entry->list, &target_trace_callback_list);
1726
1727
1728 return ERROR_OK;
1729 }
1730
1731 int target_register_timer_callback(int (*callback)(void *priv),
1732 unsigned int time_ms, enum target_timer_type type, void *priv)
1733 {
1734 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1735
1736 if (!callback)
1737 return ERROR_COMMAND_SYNTAX_ERROR;
1738
1739 if (*callbacks_p) {
1740 while ((*callbacks_p)->next)
1741 callbacks_p = &((*callbacks_p)->next);
1742 callbacks_p = &((*callbacks_p)->next);
1743 }
1744
1745 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1746 (*callbacks_p)->callback = callback;
1747 (*callbacks_p)->type = type;
1748 (*callbacks_p)->time_ms = time_ms;
1749 (*callbacks_p)->removed = false;
1750
1751 (*callbacks_p)->when = timeval_ms() + time_ms;
1752 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1753
1754 (*callbacks_p)->priv = priv;
1755 (*callbacks_p)->next = NULL;
1756
1757 return ERROR_OK;
1758 }
1759
1760 int target_unregister_event_callback(int (*callback)(struct target *target,
1761 enum target_event event, void *priv), void *priv)
1762 {
1763 struct target_event_callback **p = &target_event_callbacks;
1764 struct target_event_callback *c = target_event_callbacks;
1765
1766 if (!callback)
1767 return ERROR_COMMAND_SYNTAX_ERROR;
1768
1769 while (c) {
1770 struct target_event_callback *next = c->next;
1771 if ((c->callback == callback) && (c->priv == priv)) {
1772 *p = next;
1773 free(c);
1774 return ERROR_OK;
1775 } else
1776 p = &(c->next);
1777 c = next;
1778 }
1779
1780 return ERROR_OK;
1781 }
1782
1783 int target_unregister_reset_callback(int (*callback)(struct target *target,
1784 enum target_reset_mode reset_mode, void *priv), void *priv)
1785 {
1786 struct target_reset_callback *entry;
1787
1788 if (!callback)
1789 return ERROR_COMMAND_SYNTAX_ERROR;
1790
1791 list_for_each_entry(entry, &target_reset_callback_list, list) {
1792 if (entry->callback == callback && entry->priv == priv) {
1793 list_del(&entry->list);
1794 free(entry);
1795 break;
1796 }
1797 }
1798
1799 return ERROR_OK;
1800 }
1801
1802 int target_unregister_trace_callback(int (*callback)(struct target *target,
1803 size_t len, uint8_t *data, void *priv), void *priv)
1804 {
1805 struct target_trace_callback *entry;
1806
1807 if (!callback)
1808 return ERROR_COMMAND_SYNTAX_ERROR;
1809
1810 list_for_each_entry(entry, &target_trace_callback_list, list) {
1811 if (entry->callback == callback && entry->priv == priv) {
1812 list_del(&entry->list);
1813 free(entry);
1814 break;
1815 }
1816 }
1817
1818 return ERROR_OK;
1819 }
1820
1821 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1822 {
1823 if (!callback)
1824 return ERROR_COMMAND_SYNTAX_ERROR;
1825
1826 for (struct target_timer_callback *c = target_timer_callbacks;
1827 c; c = c->next) {
1828 if ((c->callback == callback) && (c->priv == priv)) {
1829 c->removed = true;
1830 return ERROR_OK;
1831 }
1832 }
1833
1834 return ERROR_FAIL;
1835 }
1836
1837 int target_call_event_callbacks(struct target *target, enum target_event event)
1838 {
1839 struct target_event_callback *callback = target_event_callbacks;
1840 struct target_event_callback *next_callback;
1841
1842 if (event == TARGET_EVENT_HALTED) {
1843 /* execute early halted first */
1844 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1845 }
1846
1847 LOG_DEBUG("target event %i (%s) for core %s", event,
1848 target_event_name(event),
1849 target_name(target));
1850
1851 target_handle_event(target, event);
1852
1853 while (callback) {
1854 next_callback = callback->next;
1855 callback->callback(target, event, callback->priv);
1856 callback = next_callback;
1857 }
1858
1859 return ERROR_OK;
1860 }
1861
1862 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1863 {
1864 struct target_reset_callback *callback;
1865
1866 LOG_DEBUG("target reset %i (%s)", reset_mode,
1867 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1868
1869 list_for_each_entry(callback, &target_reset_callback_list, list)
1870 callback->callback(target, reset_mode, callback->priv);
1871
1872 return ERROR_OK;
1873 }
1874
1875 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1876 {
1877 struct target_trace_callback *callback;
1878
1879 list_for_each_entry(callback, &target_trace_callback_list, list)
1880 callback->callback(target, len, data, callback->priv);
1881
1882 return ERROR_OK;
1883 }
1884
1885 static int target_timer_callback_periodic_restart(
1886 struct target_timer_callback *cb, int64_t *now)
1887 {
1888 cb->when = *now + cb->time_ms;
1889 return ERROR_OK;
1890 }
1891
1892 static int target_call_timer_callback(struct target_timer_callback *cb,
1893 int64_t *now)
1894 {
1895 cb->callback(cb->priv);
1896
1897 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1898 return target_timer_callback_periodic_restart(cb, now);
1899
1900 return target_unregister_timer_callback(cb->callback, cb->priv);
1901 }
1902
1903 static int target_call_timer_callbacks_check_time(int checktime)
1904 {
1905 static bool callback_processing;
1906
1907 /* Do not allow nesting */
1908 if (callback_processing)
1909 return ERROR_OK;
1910
1911 callback_processing = true;
1912
1913 keep_alive();
1914
1915 int64_t now = timeval_ms();
1916
1917 /* Initialize to a default value that's a ways into the future.
1918 * The loop below will make it closer to now if there are
1919 * callbacks that want to be called sooner. */
1920 target_timer_next_event_value = now + 1000;
1921
1922 /* Store an address of the place containing a pointer to the
1923 * next item; initially, that's a standalone "root of the
1924 * list" variable. */
1925 struct target_timer_callback **callback = &target_timer_callbacks;
1926 while (callback && *callback) {
1927 if ((*callback)->removed) {
1928 struct target_timer_callback *p = *callback;
1929 *callback = (*callback)->next;
1930 free(p);
1931 continue;
1932 }
1933
1934 bool call_it = (*callback)->callback &&
1935 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1936 now >= (*callback)->when);
1937
1938 if (call_it)
1939 target_call_timer_callback(*callback, &now);
1940
1941 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1942 target_timer_next_event_value = (*callback)->when;
1943
1944 callback = &(*callback)->next;
1945 }
1946
1947 callback_processing = false;
1948 return ERROR_OK;
1949 }
1950
1951 int target_call_timer_callbacks()
1952 {
1953 return target_call_timer_callbacks_check_time(1);
1954 }
1955
1956 /* invoke periodic callbacks immediately */
1957 int target_call_timer_callbacks_now()
1958 {
1959 return target_call_timer_callbacks_check_time(0);
1960 }
1961
1962 int64_t target_timer_next_event(void)
1963 {
1964 return target_timer_next_event_value;
1965 }
1966
1967 /* Prints the working area layout for debug purposes */
1968 static void print_wa_layout(struct target *target)
1969 {
1970 struct working_area *c = target->working_areas;
1971
1972 while (c) {
1973 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1974 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1975 c->address, c->address + c->size - 1, c->size);
1976 c = c->next;
1977 }
1978 }
1979
1980 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1981 static void target_split_working_area(struct working_area *area, uint32_t size)
1982 {
1983 assert(area->free); /* Shouldn't split an allocated area */
1984 assert(size <= area->size); /* Caller should guarantee this */
1985
1986 /* Split only if not already the right size */
1987 if (size < area->size) {
1988 struct working_area *new_wa = malloc(sizeof(*new_wa));
1989
1990 if (!new_wa)
1991 return;
1992
1993 new_wa->next = area->next;
1994 new_wa->size = area->size - size;
1995 new_wa->address = area->address + size;
1996 new_wa->backup = NULL;
1997 new_wa->user = NULL;
1998 new_wa->free = true;
1999
2000 area->next = new_wa;
2001 area->size = size;
2002
2003 /* If backup memory was allocated to this area, it has the wrong size
2004 * now so free it and it will be reallocated if/when needed */
2005 free(area->backup);
2006 area->backup = NULL;
2007 }
2008 }
2009
2010 /* Merge all adjacent free areas into one */
2011 static void target_merge_working_areas(struct target *target)
2012 {
2013 struct working_area *c = target->working_areas;
2014
2015 while (c && c->next) {
2016 assert(c->next->address == c->address + c->size); /* This is an invariant */
2017
2018 /* Find two adjacent free areas */
2019 if (c->free && c->next->free) {
2020 /* Merge the last into the first */
2021 c->size += c->next->size;
2022
2023 /* Remove the last */
2024 struct working_area *to_be_freed = c->next;
2025 c->next = c->next->next;
2026 free(to_be_freed->backup);
2027 free(to_be_freed);
2028
2029 /* If backup memory was allocated to the remaining area, it's has
2030 * the wrong size now */
2031 free(c->backup);
2032 c->backup = NULL;
2033 } else {
2034 c = c->next;
2035 }
2036 }
2037 }
2038
2039 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2040 {
2041 /* Reevaluate working area address based on MMU state*/
2042 if (!target->working_areas) {
2043 int retval;
2044 int enabled;
2045
2046 retval = target->type->mmu(target, &enabled);
2047 if (retval != ERROR_OK)
2048 return retval;
2049
2050 if (!enabled) {
2051 if (target->working_area_phys_spec) {
2052 LOG_DEBUG("MMU disabled, using physical "
2053 "address for working memory " TARGET_ADDR_FMT,
2054 target->working_area_phys);
2055 target->working_area = target->working_area_phys;
2056 } else {
2057 LOG_ERROR("No working memory available. "
2058 "Specify -work-area-phys to target.");
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060 }
2061 } else {
2062 if (target->working_area_virt_spec) {
2063 LOG_DEBUG("MMU enabled, using virtual "
2064 "address for working memory " TARGET_ADDR_FMT,
2065 target->working_area_virt);
2066 target->working_area = target->working_area_virt;
2067 } else {
2068 LOG_ERROR("No working memory available. "
2069 "Specify -work-area-virt to target.");
2070 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2071 }
2072 }
2073
2074 /* Set up initial working area on first call */
2075 struct working_area *new_wa = malloc(sizeof(*new_wa));
2076 if (new_wa) {
2077 new_wa->next = NULL;
2078 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2079 new_wa->address = target->working_area;
2080 new_wa->backup = NULL;
2081 new_wa->user = NULL;
2082 new_wa->free = true;
2083 }
2084
2085 target->working_areas = new_wa;
2086 }
2087
2088 /* only allocate multiples of 4 byte */
2089 if (size % 4)
2090 size = (size + 3) & (~3UL);
2091
2092 struct working_area *c = target->working_areas;
2093
2094 /* Find the first large enough working area */
2095 while (c) {
2096 if (c->free && c->size >= size)
2097 break;
2098 c = c->next;
2099 }
2100
2101 if (!c)
2102 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2103
2104 /* Split the working area into the requested size */
2105 target_split_working_area(c, size);
2106
2107 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2108 size, c->address);
2109
2110 if (target->backup_working_area) {
2111 if (!c->backup) {
2112 c->backup = malloc(c->size);
2113 if (!c->backup)
2114 return ERROR_FAIL;
2115 }
2116
2117 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2118 if (retval != ERROR_OK)
2119 return retval;
2120 }
2121
2122 /* mark as used, and return the new (reused) area */
2123 c->free = false;
2124 *area = c;
2125
2126 /* user pointer */
2127 c->user = area;
2128
2129 print_wa_layout(target);
2130
2131 return ERROR_OK;
2132 }
2133
2134 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2135 {
2136 int retval;
2137
2138 retval = target_alloc_working_area_try(target, size, area);
2139 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2140 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2141 return retval;
2142
2143 }
2144
2145 static int target_restore_working_area(struct target *target, struct working_area *area)
2146 {
2147 int retval = ERROR_OK;
2148
2149 if (target->backup_working_area && area->backup) {
2150 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2151 if (retval != ERROR_OK)
2152 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2153 area->size, area->address);
2154 }
2155
2156 return retval;
2157 }
2158
2159 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2160 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2161 {
2162 if (!area || area->free)
2163 return ERROR_OK;
2164
2165 int retval = ERROR_OK;
2166 if (restore) {
2167 retval = target_restore_working_area(target, area);
2168 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2169 if (retval != ERROR_OK)
2170 return retval;
2171 }
2172
2173 area->free = true;
2174
2175 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2176 area->size, area->address);
2177
2178 /* mark user pointer invalid */
2179 /* TODO: Is this really safe? It points to some previous caller's memory.
2180 * How could we know that the area pointer is still in that place and not
2181 * some other vital data? What's the purpose of this, anyway? */
2182 *area->user = NULL;
2183 area->user = NULL;
2184
2185 target_merge_working_areas(target);
2186
2187 print_wa_layout(target);
2188
2189 return retval;
2190 }
2191
2192 int target_free_working_area(struct target *target, struct working_area *area)
2193 {
2194 return target_free_working_area_restore(target, area, 1);
2195 }
2196
2197 /* free resources and restore memory, if restoring memory fails,
2198 * free up resources anyway
2199 */
2200 static void target_free_all_working_areas_restore(struct target *target, int restore)
2201 {
2202 struct working_area *c = target->working_areas;
2203
2204 LOG_DEBUG("freeing all working areas");
2205
2206 /* Loop through all areas, restoring the allocated ones and marking them as free */
2207 while (c) {
2208 if (!c->free) {
2209 if (restore)
2210 target_restore_working_area(target, c);
2211 c->free = true;
2212 *c->user = NULL; /* Same as above */
2213 c->user = NULL;
2214 }
2215 c = c->next;
2216 }
2217
2218 /* Run a merge pass to combine all areas into one */
2219 target_merge_working_areas(target);
2220
2221 print_wa_layout(target);
2222 }
2223
2224 void target_free_all_working_areas(struct target *target)
2225 {
2226 target_free_all_working_areas_restore(target, 1);
2227
2228 /* Now we have none or only one working area marked as free */
2229 if (target->working_areas) {
2230 /* Free the last one to allow on-the-fly moving and resizing */
2231 free(target->working_areas->backup);
2232 free(target->working_areas);
2233 target->working_areas = NULL;
2234 }
2235 }
2236
2237 /* Find the largest number of bytes that can be allocated */
2238 uint32_t target_get_working_area_avail(struct target *target)
2239 {
2240 struct working_area *c = target->working_areas;
2241 uint32_t max_size = 0;
2242
2243 if (!c)
2244 return target->working_area_size;
2245
2246 while (c) {
2247 if (c->free && max_size < c->size)
2248 max_size = c->size;
2249
2250 c = c->next;
2251 }
2252
2253 return max_size;
2254 }
2255
2256 static void target_destroy(struct target *target)
2257 {
2258 if (target->type->deinit_target)
2259 target->type->deinit_target(target);
2260
2261 free(target->semihosting);
2262
2263 jtag_unregister_event_callback(jtag_enable_callback, target);
2264
2265 struct target_event_action *teap = target->event_action;
2266 while (teap) {
2267 struct target_event_action *next = teap->next;
2268 Jim_DecrRefCount(teap->interp, teap->body);
2269 free(teap);
2270 teap = next;
2271 }
2272
2273 target_free_all_working_areas(target);
2274
2275 /* release the targets SMP list */
2276 if (target->smp) {
2277 struct target_list *head, *tmp;
2278
2279 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2280 list_del(&head->lh);
2281 head->target->smp = 0;
2282 free(head);
2283 }
2284 if (target->smp_targets != &empty_smp_targets)
2285 free(target->smp_targets);
2286 target->smp = 0;
2287 }
2288
2289 rtos_destroy(target);
2290
2291 free(target->gdb_port_override);
2292 free(target->type);
2293 free(target->trace_info);
2294 free(target->fileio_info);
2295 free(target->cmd_name);
2296 free(target);
2297 }
2298
2299 void target_quit(void)
2300 {
2301 struct target_event_callback *pe = target_event_callbacks;
2302 while (pe) {
2303 struct target_event_callback *t = pe->next;
2304 free(pe);
2305 pe = t;
2306 }
2307 target_event_callbacks = NULL;
2308
2309 struct target_timer_callback *pt = target_timer_callbacks;
2310 while (pt) {
2311 struct target_timer_callback *t = pt->next;
2312 free(pt);
2313 pt = t;
2314 }
2315 target_timer_callbacks = NULL;
2316
2317 for (struct target *target = all_targets; target;) {
2318 struct target *tmp;
2319
2320 tmp = target->next;
2321 target_destroy(target);
2322 target = tmp;
2323 }
2324
2325 all_targets = NULL;
2326 }
2327
2328 int target_arch_state(struct target *target)
2329 {
2330 int retval;
2331 if (!target) {
2332 LOG_WARNING("No target has been configured");
2333 return ERROR_OK;
2334 }
2335
2336 if (target->state != TARGET_HALTED)
2337 return ERROR_OK;
2338
2339 retval = target->type->arch_state(target);
2340 return retval;
2341 }
2342
2343 static int target_get_gdb_fileio_info_default(struct target *target,
2344 struct gdb_fileio_info *fileio_info)
2345 {
2346 /* If target does not support semi-hosting function, target
2347 has no need to provide .get_gdb_fileio_info callback.
2348 It just return ERROR_FAIL and gdb_server will return "Txx"
2349 as target halted every time. */
2350 return ERROR_FAIL;
2351 }
2352
2353 static int target_gdb_fileio_end_default(struct target *target,
2354 int retcode, int fileio_errno, bool ctrl_c)
2355 {
2356 return ERROR_OK;
2357 }
2358
2359 int target_profiling_default(struct target *target, uint32_t *samples,
2360 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2361 {
2362 struct timeval timeout, now;
2363
2364 gettimeofday(&timeout, NULL);
2365 timeval_add_time(&timeout, seconds, 0);
2366
2367 LOG_INFO("Starting profiling. Halting and resuming the"
2368 " target as often as we can...");
2369
2370 uint32_t sample_count = 0;
2371 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2372 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2373
2374 int retval = ERROR_OK;
2375 for (;;) {
2376 target_poll(target);
2377 if (target->state == TARGET_HALTED) {
2378 uint32_t t = buf_get_u32(reg->value, 0, 32);
2379 samples[sample_count++] = t;
2380 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2381 retval = target_resume(target, 1, 0, 0, 0);
2382 target_poll(target);
2383 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2384 } else if (target->state == TARGET_RUNNING) {
2385 /* We want to quickly sample the PC. */
2386 retval = target_halt(target);
2387 } else {
2388 LOG_INFO("Target not halted or running");
2389 retval = ERROR_OK;
2390 break;
2391 }
2392
2393 if (retval != ERROR_OK)
2394 break;
2395
2396 gettimeofday(&now, NULL);
2397 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2398 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2399 break;
2400 }
2401 }
2402
2403 *num_samples = sample_count;
2404 return retval;
2405 }
2406
2407 /* Single aligned words are guaranteed to use 16 or 32 bit access
2408 * mode respectively, otherwise data is handled as quickly as
2409 * possible
2410 */
2411 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2412 {
2413 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2414 size, address);
2415
2416 if (!target_was_examined(target)) {
2417 LOG_ERROR("Target not examined yet");
2418 return ERROR_FAIL;
2419 }
2420
2421 if (size == 0)
2422 return ERROR_OK;
2423
2424 if ((address + size - 1) < address) {
2425 /* GDB can request this when e.g. PC is 0xfffffffc */
2426 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2427 address,
2428 size);
2429 return ERROR_FAIL;
2430 }
2431
2432 return target->type->write_buffer(target, address, size, buffer);
2433 }
2434
2435 static int target_write_buffer_default(struct target *target,
2436 target_addr_t address, uint32_t count, const uint8_t *buffer)
2437 {
2438 uint32_t size;
2439 unsigned int data_bytes = target_data_bits(target) / 8;
2440
2441 /* Align up to maximum bytes. The loop condition makes sure the next pass
2442 * will have something to do with the size we leave to it. */
2443 for (size = 1;
2444 size < data_bytes && count >= size * 2 + (address & size);
2445 size *= 2) {
2446 if (address & size) {
2447 int retval = target_write_memory(target, address, size, 1, buffer);
2448 if (retval != ERROR_OK)
2449 return retval;
2450 address += size;
2451 count -= size;
2452 buffer += size;
2453 }
2454 }
2455
2456 /* Write the data with as large access size as possible. */
2457 for (; size > 0; size /= 2) {
2458 uint32_t aligned = count - count % size;
2459 if (aligned > 0) {
2460 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2461 if (retval != ERROR_OK)
2462 return retval;
2463 address += aligned;
2464 count -= aligned;
2465 buffer += aligned;
2466 }
2467 }
2468
2469 return ERROR_OK;
2470 }
2471
2472 /* Single aligned words are guaranteed to use 16 or 32 bit access
2473 * mode respectively, otherwise data is handled as quickly as
2474 * possible
2475 */
2476 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2477 {
2478 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2479 size, address);
2480
2481 if (!target_was_examined(target)) {
2482 LOG_ERROR("Target not examined yet");
2483 return ERROR_FAIL;
2484 }
2485
2486 if (size == 0)
2487 return ERROR_OK;
2488
2489 if ((address + size - 1) < address) {
2490 /* GDB can request this when e.g. PC is 0xfffffffc */
2491 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2492 address,
2493 size);
2494 return ERROR_FAIL;
2495 }
2496
2497 return target->type->read_buffer(target, address, size, buffer);
2498 }
2499
2500 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2501 {
2502 uint32_t size;
2503 unsigned int data_bytes = target_data_bits(target) / 8;
2504
2505 /* Align up to maximum bytes. The loop condition makes sure the next pass
2506 * will have something to do with the size we leave to it. */
2507 for (size = 1;
2508 size < data_bytes && count >= size * 2 + (address & size);
2509 size *= 2) {
2510 if (address & size) {
2511 int retval = target_read_memory(target, address, size, 1, buffer);
2512 if (retval != ERROR_OK)
2513 return retval;
2514 address += size;
2515 count -= size;
2516 buffer += size;
2517 }
2518 }
2519
2520 /* Read the data with as large access size as possible. */
2521 for (; size > 0; size /= 2) {
2522 uint32_t aligned = count - count % size;
2523 if (aligned > 0) {
2524 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2525 if (retval != ERROR_OK)
2526 return retval;
2527 address += aligned;
2528 count -= aligned;
2529 buffer += aligned;
2530 }
2531 }
2532
2533 return ERROR_OK;
2534 }
2535
2536 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2537 {
2538 uint8_t *buffer;
2539 int retval;
2540 uint32_t i;
2541 uint32_t checksum = 0;
2542 if (!target_was_examined(target)) {
2543 LOG_ERROR("Target not examined yet");
2544 return ERROR_FAIL;
2545 }
2546 if (!target->type->checksum_memory) {
2547 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2548 return ERROR_FAIL;
2549 }
2550
2551 retval = target->type->checksum_memory(target, address, size, &checksum);
2552 if (retval != ERROR_OK) {
2553 buffer = malloc(size);
2554 if (!buffer) {
2555 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2556 return ERROR_COMMAND_SYNTAX_ERROR;
2557 }
2558 retval = target_read_buffer(target, address, size, buffer);
2559 if (retval != ERROR_OK) {
2560 free(buffer);
2561 return retval;
2562 }
2563
2564 /* convert to target endianness */
2565 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2566 uint32_t target_data;
2567 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2568 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2569 }
2570
2571 retval = image_calculate_checksum(buffer, size, &checksum);
2572 free(buffer);
2573 }
2574
2575 *crc = checksum;
2576
2577 return retval;
2578 }
2579
2580 int target_blank_check_memory(struct target *target,
2581 struct target_memory_check_block *blocks, int num_blocks,
2582 uint8_t erased_value)
2583 {
2584 if (!target_was_examined(target)) {
2585 LOG_ERROR("Target not examined yet");
2586 return ERROR_FAIL;
2587 }
2588
2589 if (!target->type->blank_check_memory)
2590 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2591
2592 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2593 }
2594
2595 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2596 {
2597 uint8_t value_buf[8];
2598 if (!target_was_examined(target)) {
2599 LOG_ERROR("Target not examined yet");
2600 return ERROR_FAIL;
2601 }
2602
2603 int retval = target_read_memory(target, address, 8, 1, value_buf);
2604
2605 if (retval == ERROR_OK) {
2606 *value = target_buffer_get_u64(target, value_buf);
2607 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2608 address,
2609 *value);
2610 } else {
2611 *value = 0x0;
2612 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2613 address);
2614 }
2615
2616 return retval;
2617 }
2618
2619 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2620 {
2621 uint8_t value_buf[4];
2622 if (!target_was_examined(target)) {
2623 LOG_ERROR("Target not examined yet");
2624 return ERROR_FAIL;
2625 }
2626
2627 int retval = target_read_memory(target, address, 4, 1, value_buf);
2628
2629 if (retval == ERROR_OK) {
2630 *value = target_buffer_get_u32(target, value_buf);
2631 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2632 address,
2633 *value);
2634 } else {
2635 *value = 0x0;
2636 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2637 address);
2638 }
2639
2640 return retval;
2641 }
2642
2643 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2644 {
2645 uint8_t value_buf[2];
2646 if (!target_was_examined(target)) {
2647 LOG_ERROR("Target not examined yet");
2648 return ERROR_FAIL;
2649 }
2650
2651 int retval = target_read_memory(target, address, 2, 1, value_buf);
2652
2653 if (retval == ERROR_OK) {
2654 *value = target_buffer_get_u16(target, value_buf);
2655 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2656 address,
2657 *value);
2658 } else {
2659 *value = 0x0;
2660 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2661 address);
2662 }
2663
2664 return retval;
2665 }
2666
2667 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2668 {
2669 if (!target_was_examined(target)) {
2670 LOG_ERROR("Target not examined yet");
2671 return ERROR_FAIL;
2672 }
2673
2674 int retval = target_read_memory(target, address, 1, 1, value);
2675
2676 if (retval == ERROR_OK) {
2677 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2678 address,
2679 *value);
2680 } else {
2681 *value = 0x0;
2682 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2683 address);
2684 }
2685
2686 return retval;
2687 }
2688
2689 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2690 {
2691 int retval;
2692 uint8_t value_buf[8];
2693 if (!target_was_examined(target)) {
2694 LOG_ERROR("Target not examined yet");
2695 return ERROR_FAIL;
2696 }
2697
2698 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2699 address,
2700 value);
2701
2702 target_buffer_set_u64(target, value_buf, value);
2703 retval = target_write_memory(target, address, 8, 1, value_buf);
2704 if (retval != ERROR_OK)
2705 LOG_DEBUG("failed: %i", retval);
2706
2707 return retval;
2708 }
2709
2710 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2711 {
2712 int retval;
2713 uint8_t value_buf[4];
2714 if (!target_was_examined(target)) {
2715 LOG_ERROR("Target not examined yet");
2716 return ERROR_FAIL;
2717 }
2718
2719 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2720 address,
2721 value);
2722
2723 target_buffer_set_u32(target, value_buf, value);
2724 retval = target_write_memory(target, address, 4, 1, value_buf);
2725 if (retval != ERROR_OK)
2726 LOG_DEBUG("failed: %i", retval);
2727
2728 return retval;
2729 }
2730
2731 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2732 {
2733 int retval;
2734 uint8_t value_buf[2];
2735 if (!target_was_examined(target)) {
2736 LOG_ERROR("Target not examined yet");
2737 return ERROR_FAIL;
2738 }
2739
2740 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2741 address,
2742 value);
2743
2744 target_buffer_set_u16(target, value_buf, value);
2745 retval = target_write_memory(target, address, 2, 1, value_buf);
2746 if (retval != ERROR_OK)
2747 LOG_DEBUG("failed: %i", retval);
2748
2749 return retval;
2750 }
2751
2752 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2753 {
2754 int retval;
2755 if (!target_was_examined(target)) {
2756 LOG_ERROR("Target not examined yet");
2757 return ERROR_FAIL;
2758 }
2759
2760 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2761 address, value);
2762
2763 retval = target_write_memory(target, address, 1, 1, &value);
2764 if (retval != ERROR_OK)
2765 LOG_DEBUG("failed: %i", retval);
2766
2767 return retval;
2768 }
2769
2770 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2771 {
2772 int retval;
2773 uint8_t value_buf[8];
2774 if (!target_was_examined(target)) {
2775 LOG_ERROR("Target not examined yet");
2776 return ERROR_FAIL;
2777 }
2778
2779 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2780 address,
2781 value);
2782
2783 target_buffer_set_u64(target, value_buf, value);
2784 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2785 if (retval != ERROR_OK)
2786 LOG_DEBUG("failed: %i", retval);
2787
2788 return retval;
2789 }
2790
2791 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2792 {
2793 int retval;
2794 uint8_t value_buf[4];
2795 if (!target_was_examined(target)) {
2796 LOG_ERROR("Target not examined yet");
2797 return ERROR_FAIL;
2798 }
2799
2800 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2801 address,
2802 value);
2803
2804 target_buffer_set_u32(target, value_buf, value);
2805 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2806 if (retval != ERROR_OK)
2807 LOG_DEBUG("failed: %i", retval);
2808
2809 return retval;
2810 }
2811
2812 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2813 {
2814 int retval;
2815 uint8_t value_buf[2];
2816 if (!target_was_examined(target)) {
2817 LOG_ERROR("Target not examined yet");
2818 return ERROR_FAIL;
2819 }
2820
2821 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2822 address,
2823 value);
2824
2825 target_buffer_set_u16(target, value_buf, value);
2826 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2827 if (retval != ERROR_OK)
2828 LOG_DEBUG("failed: %i", retval);
2829
2830 return retval;
2831 }
2832
2833 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2834 {
2835 int retval;
2836 if (!target_was_examined(target)) {
2837 LOG_ERROR("Target not examined yet");
2838 return ERROR_FAIL;
2839 }
2840
2841 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2842 address, value);
2843
2844 retval = target_write_phys_memory(target, address, 1, 1, &value);
2845 if (retval != ERROR_OK)
2846 LOG_DEBUG("failed: %i", retval);
2847
2848 return retval;
2849 }
2850
2851 static int find_target(struct command_invocation *cmd, const char *name)
2852 {
2853 struct target *target = get_target(name);
2854 if (!target) {
2855 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2856 return ERROR_FAIL;
2857 }
2858 if (!target->tap->enabled) {
2859 command_print(cmd, "Target: TAP %s is disabled, "
2860 "can't be the current target\n",
2861 target->tap->dotted_name);
2862 return ERROR_FAIL;
2863 }
2864
2865 cmd->ctx->current_target = target;
2866 if (cmd->ctx->current_target_override)
2867 cmd->ctx->current_target_override = target;
2868
2869 return ERROR_OK;
2870 }
2871
2872
2873 COMMAND_HANDLER(handle_targets_command)
2874 {
2875 int retval = ERROR_OK;
2876 if (CMD_ARGC == 1) {
2877 retval = find_target(CMD, CMD_ARGV[0]);
2878 if (retval == ERROR_OK) {
2879 /* we're done! */
2880 return retval;
2881 }
2882 }
2883
2884 struct target *target = all_targets;
2885 command_print(CMD, " TargetName Type Endian TapName State ");
2886 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2887 while (target) {
2888 const char *state;
2889 char marker = ' ';
2890
2891 if (target->tap->enabled)
2892 state = target_state_name(target);
2893 else
2894 state = "tap-disabled";
2895
2896 if (CMD_CTX->current_target == target)
2897 marker = '*';
2898
2899 /* keep columns lined up to match the headers above */
2900 command_print(CMD,
2901 "%2d%c %-18s %-10s %-6s %-18s %s",
2902 target->target_number,
2903 marker,
2904 target_name(target),
2905 target_type_name(target),
2906 jim_nvp_value2name_simple(nvp_target_endian,
2907 target->endianness)->name,
2908 target->tap->dotted_name,
2909 state);
2910 target = target->next;
2911 }
2912
2913 return retval;
2914 }
2915
2916 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2917
2918 static int power_dropout;
2919 static int srst_asserted;
2920
2921 static int run_power_restore;
2922 static int run_power_dropout;
2923 static int run_srst_asserted;
2924 static int run_srst_deasserted;
2925
2926 static int sense_handler(void)
2927 {
2928 static int prev_srst_asserted;
2929 static int prev_power_dropout;
2930
2931 int retval = jtag_power_dropout(&power_dropout);
2932 if (retval != ERROR_OK)
2933 return retval;
2934
2935 int power_restored;
2936 power_restored = prev_power_dropout && !power_dropout;
2937 if (power_restored)
2938 run_power_restore = 1;
2939
2940 int64_t current = timeval_ms();
2941 static int64_t last_power;
2942 bool wait_more = last_power + 2000 > current;
2943 if (power_dropout && !wait_more) {
2944 run_power_dropout = 1;
2945 last_power = current;
2946 }
2947
2948 retval = jtag_srst_asserted(&srst_asserted);
2949 if (retval != ERROR_OK)
2950 return retval;
2951
2952 int srst_deasserted;
2953 srst_deasserted = prev_srst_asserted && !srst_asserted;
2954
2955 static int64_t last_srst;
2956 wait_more = last_srst + 2000 > current;
2957 if (srst_deasserted && !wait_more) {
2958 run_srst_deasserted = 1;
2959 last_srst = current;
2960 }
2961
2962 if (!prev_srst_asserted && srst_asserted)
2963 run_srst_asserted = 1;
2964
2965 prev_srst_asserted = srst_asserted;
2966 prev_power_dropout = power_dropout;
2967
2968 if (srst_deasserted || power_restored) {
2969 /* Other than logging the event we can't do anything here.
2970 * Issuing a reset is a particularly bad idea as we might
2971 * be inside a reset already.
2972 */
2973 }
2974
2975 return ERROR_OK;
2976 }
2977
2978 /* process target state changes */
2979 static int handle_target(void *priv)
2980 {
2981 Jim_Interp *interp = (Jim_Interp *)priv;
2982 int retval = ERROR_OK;
2983
2984 if (!is_jtag_poll_safe()) {
2985 /* polling is disabled currently */
2986 return ERROR_OK;
2987 }
2988
2989 /* we do not want to recurse here... */
2990 static int recursive;
2991 if (!recursive) {
2992 recursive = 1;
2993 sense_handler();
2994 /* danger! running these procedures can trigger srst assertions and power dropouts.
2995 * We need to avoid an infinite loop/recursion here and we do that by
2996 * clearing the flags after running these events.
2997 */
2998 int did_something = 0;
2999 if (run_srst_asserted) {
3000 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3001 Jim_Eval(interp, "srst_asserted");
3002 did_something = 1;
3003 }
3004 if (run_srst_deasserted) {
3005 Jim_Eval(interp, "srst_deasserted");
3006 did_something = 1;
3007 }
3008 if (run_power_dropout) {
3009 LOG_INFO("Power dropout detected, running power_dropout proc.");
3010 Jim_Eval(interp, "power_dropout");
3011 did_something = 1;
3012 }
3013 if (run_power_restore) {
3014 Jim_Eval(interp, "power_restore");
3015 did_something = 1;
3016 }
3017
3018 if (did_something) {
3019 /* clear detect flags */
3020 sense_handler();
3021 }
3022
3023 /* clear action flags */
3024
3025 run_srst_asserted = 0;
3026 run_srst_deasserted = 0;
3027 run_power_restore = 0;
3028 run_power_dropout = 0;
3029
3030 recursive = 0;
3031 }
3032
3033 /* Poll targets for state changes unless that's globally disabled.
3034 * Skip targets that are currently disabled.
3035 */
3036 for (struct target *target = all_targets;
3037 is_jtag_poll_safe() && target;
3038 target = target->next) {
3039
3040 if (!target_was_examined(target))
3041 continue;
3042
3043 if (!target->tap->enabled)
3044 continue;
3045
3046 if (target->backoff.times > target->backoff.count) {
3047 /* do not poll this time as we failed previously */
3048 target->backoff.count++;
3049 continue;
3050 }
3051 target->backoff.count = 0;
3052
3053 /* only poll target if we've got power and srst isn't asserted */
3054 if (!power_dropout && !srst_asserted) {
3055 /* polling may fail silently until the target has been examined */
3056 retval = target_poll(target);
3057 if (retval != ERROR_OK) {
3058 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3059 if (target->backoff.times * polling_interval < 5000) {
3060 target->backoff.times *= 2;
3061 target->backoff.times++;
3062 }
3063
3064 /* Tell GDB to halt the debugger. This allows the user to
3065 * run monitor commands to handle the situation.
3066 */
3067 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3068 }
3069 if (target->backoff.times > 0) {
3070 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3071 target_reset_examined(target);
3072 retval = target_examine_one(target);
3073 /* Target examination could have failed due to unstable connection,
3074 * but we set the examined flag anyway to repoll it later */
3075 if (retval != ERROR_OK) {
3076 target_set_examined(target);
3077 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3078 target->backoff.times * polling_interval);
3079 return retval;
3080 }
3081 }
3082
3083 /* Since we succeeded, we reset backoff count */
3084 target->backoff.times = 0;
3085 }
3086 }
3087
3088 return retval;
3089 }
3090
3091 COMMAND_HANDLER(handle_reg_command)
3092 {
3093 LOG_DEBUG("-");
3094
3095 struct target *target = get_current_target(CMD_CTX);
3096 struct reg *reg = NULL;
3097
3098 /* list all available registers for the current target */
3099 if (CMD_ARGC == 0) {
3100 struct reg_cache *cache = target->reg_cache;
3101
3102 unsigned int count = 0;
3103 while (cache) {
3104 unsigned i;
3105
3106 command_print(CMD, "===== %s", cache->name);
3107
3108 for (i = 0, reg = cache->reg_list;
3109 i < cache->num_regs;
3110 i++, reg++, count++) {
3111 if (reg->exist == false || reg->hidden)
3112 continue;
3113 /* only print cached values if they are valid */
3114 if (reg->valid) {
3115 char *value = buf_to_hex_str(reg->value,
3116 reg->size);
3117 command_print(CMD,
3118 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3119 count, reg->name,
3120 reg->size, value,
3121 reg->dirty
3122 ? " (dirty)"
3123 : "");
3124 free(value);
3125 } else {
3126 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3127 count, reg->name,
3128 reg->size);
3129 }
3130 }
3131 cache = cache->next;
3132 }
3133
3134 return ERROR_OK;
3135 }
3136
3137 /* access a single register by its ordinal number */
3138 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3139 unsigned num;
3140 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3141
3142 struct reg_cache *cache = target->reg_cache;
3143 unsigned int count = 0;
3144 while (cache) {
3145 unsigned i;
3146 for (i = 0; i < cache->num_regs; i++) {
3147 if (count++ == num) {
3148 reg = &cache->reg_list[i];
3149 break;
3150 }
3151 }
3152 if (reg)
3153 break;
3154 cache = cache->next;
3155 }
3156
3157 if (!reg) {
3158 command_print(CMD, "%i is out of bounds, the current target "
3159 "has only %i registers (0 - %i)", num, count, count - 1);
3160 return ERROR_OK;
3161 }
3162 } else {
3163 /* access a single register by its name */
3164 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3165
3166 if (!reg)
3167 goto not_found;
3168 }
3169
3170 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3171
3172 if (!reg->exist)
3173 goto not_found;
3174
3175 /* display a register */
3176 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3177 && (CMD_ARGV[1][0] <= '9')))) {
3178 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3179 reg->valid = 0;
3180
3181 if (reg->valid == 0) {
3182 int retval = reg->type->get(reg);
3183 if (retval != ERROR_OK) {
3184 LOG_ERROR("Could not read register '%s'", reg->name);
3185 return retval;
3186 }
3187 }
3188 char *value = buf_to_hex_str(reg->value, reg->size);
3189 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3190 free(value);
3191 return ERROR_OK;
3192 }
3193
3194 /* set register value */
3195 if (CMD_ARGC == 2) {
3196 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3197 if (!buf)
3198 return ERROR_FAIL;
3199 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3200
3201 int retval = reg->type->set(reg, buf);
3202 if (retval != ERROR_OK) {