target: add Espressif ESP32-S3 basic support
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
60 #include "semihosting_common.h"
61
62 /* default halt wait timeout (ms) */
63 #define DEFAULT_HALT_TIMEOUT 5000
64
65 static int target_read_buffer_default(struct target *target, target_addr_t address,
66 uint32_t count, uint8_t *buffer);
67 static int target_write_buffer_default(struct target *target, target_addr_t address,
68 uint32_t count, const uint8_t *buffer);
69 static int target_array2mem(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_mem2array(Jim_Interp *interp, struct target *target,
72 int argc, Jim_Obj * const *argv);
73 static int target_register_user_commands(struct command_context *cmd_ctx);
74 static int target_get_gdb_fileio_info_default(struct target *target,
75 struct gdb_fileio_info *fileio_info);
76 static int target_gdb_fileio_end_default(struct target *target, int retcode,
77 int fileio_errno, bool ctrl_c);
78
79 /* targets */
80 extern struct target_type arm7tdmi_target;
81 extern struct target_type arm720t_target;
82 extern struct target_type arm9tdmi_target;
83 extern struct target_type arm920t_target;
84 extern struct target_type arm966e_target;
85 extern struct target_type arm946e_target;
86 extern struct target_type arm926ejs_target;
87 extern struct target_type fa526_target;
88 extern struct target_type feroceon_target;
89 extern struct target_type dragonite_target;
90 extern struct target_type xscale_target;
91 extern struct target_type cortexm_target;
92 extern struct target_type cortexa_target;
93 extern struct target_type aarch64_target;
94 extern struct target_type cortexr4_target;
95 extern struct target_type arm11_target;
96 extern struct target_type ls1_sap_target;
97 extern struct target_type mips_m4k_target;
98 extern struct target_type mips_mips64_target;
99 extern struct target_type avr_target;
100 extern struct target_type dsp563xx_target;
101 extern struct target_type dsp5680xx_target;
102 extern struct target_type testee_target;
103 extern struct target_type avr32_ap7k_target;
104 extern struct target_type hla_target;
105 extern struct target_type nds32_v2_target;
106 extern struct target_type nds32_v3_target;
107 extern struct target_type nds32_v3m_target;
108 extern struct target_type esp32_target;
109 extern struct target_type esp32s2_target;
110 extern struct target_type esp32s3_target;
111 extern struct target_type or1k_target;
112 extern struct target_type quark_x10xx_target;
113 extern struct target_type quark_d20xx_target;
114 extern struct target_type stm8_target;
115 extern struct target_type riscv_target;
116 extern struct target_type mem_ap_target;
117 extern struct target_type esirisc_target;
118 extern struct target_type arcv2_target;
119
120 static struct target_type *target_types[] = {
121 &arm7tdmi_target,
122 &arm9tdmi_target,
123 &arm920t_target,
124 &arm720t_target,
125 &arm966e_target,
126 &arm946e_target,
127 &arm926ejs_target,
128 &fa526_target,
129 &feroceon_target,
130 &dragonite_target,
131 &xscale_target,
132 &cortexm_target,
133 &cortexa_target,
134 &cortexr4_target,
135 &arm11_target,
136 &ls1_sap_target,
137 &mips_m4k_target,
138 &avr_target,
139 &dsp563xx_target,
140 &dsp5680xx_target,
141 &testee_target,
142 &avr32_ap7k_target,
143 &hla_target,
144 &nds32_v2_target,
145 &nds32_v3_target,
146 &nds32_v3m_target,
147 &esp32_target,
148 &esp32s2_target,
149 &esp32s3_target,
150 &or1k_target,
151 &quark_x10xx_target,
152 &quark_d20xx_target,
153 &stm8_target,
154 &riscv_target,
155 &mem_ap_target,
156 &esirisc_target,
157 &arcv2_target,
158 &aarch64_target,
159 &mips_mips64_target,
160 NULL,
161 };
162
163 struct target *all_targets;
164 static struct target_event_callback *target_event_callbacks;
165 static struct target_timer_callback *target_timer_callbacks;
166 static int64_t target_timer_next_event_value;
167 static LIST_HEAD(target_reset_callback_list);
168 static LIST_HEAD(target_trace_callback_list);
169 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
170 static LIST_HEAD(empty_smp_targets);
171
172 static const struct jim_nvp nvp_assert[] = {
173 { .name = "assert", NVP_ASSERT },
174 { .name = "deassert", NVP_DEASSERT },
175 { .name = "T", NVP_ASSERT },
176 { .name = "F", NVP_DEASSERT },
177 { .name = "t", NVP_ASSERT },
178 { .name = "f", NVP_DEASSERT },
179 { .name = NULL, .value = -1 }
180 };
181
182 static const struct jim_nvp nvp_error_target[] = {
183 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
184 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
185 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
186 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
187 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
188 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
189 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
190 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
191 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
192 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
193 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
194 { .value = -1, .name = NULL }
195 };
196
197 static const char *target_strerror_safe(int err)
198 {
199 const struct jim_nvp *n;
200
201 n = jim_nvp_value2name_simple(nvp_error_target, err);
202 if (!n->name)
203 return "unknown";
204 else
205 return n->name;
206 }
207
208 static const struct jim_nvp nvp_target_event[] = {
209
210 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
211 { .value = TARGET_EVENT_HALTED, .name = "halted" },
212 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
213 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
214 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
215 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
216 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
217
218 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
219 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
220
221 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
222 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
223 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
224 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
225 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
226 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
227 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
228 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
229
230 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
231 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
232 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
233
234 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
235 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
236
237 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
238 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
239
240 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
241 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
242
243 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
244 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
245
246 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
247
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
249 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
250 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
251 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
252 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
253 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
254 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
255 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
256
257 { .name = NULL, .value = -1 }
258 };
259
260 static const struct jim_nvp nvp_target_state[] = {
261 { .name = "unknown", .value = TARGET_UNKNOWN },
262 { .name = "running", .value = TARGET_RUNNING },
263 { .name = "halted", .value = TARGET_HALTED },
264 { .name = "reset", .value = TARGET_RESET },
265 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
266 { .name = NULL, .value = -1 },
267 };
268
269 static const struct jim_nvp nvp_target_debug_reason[] = {
270 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
271 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
272 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
273 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
274 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
275 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
276 { .name = "program-exit", .value = DBG_REASON_EXIT },
277 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
278 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
279 { .name = NULL, .value = -1 },
280 };
281
282 static const struct jim_nvp nvp_target_endian[] = {
283 { .name = "big", .value = TARGET_BIG_ENDIAN },
284 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
285 { .name = "be", .value = TARGET_BIG_ENDIAN },
286 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
287 { .name = NULL, .value = -1 },
288 };
289
290 static const struct jim_nvp nvp_reset_modes[] = {
291 { .name = "unknown", .value = RESET_UNKNOWN },
292 { .name = "run", .value = RESET_RUN },
293 { .name = "halt", .value = RESET_HALT },
294 { .name = "init", .value = RESET_INIT },
295 { .name = NULL, .value = -1 },
296 };
297
298 const char *debug_reason_name(struct target *t)
299 {
300 const char *cp;
301
302 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
303 t->debug_reason)->name;
304 if (!cp) {
305 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
306 cp = "(*BUG*unknown*BUG*)";
307 }
308 return cp;
309 }
310
311 const char *target_state_name(struct target *t)
312 {
313 const char *cp;
314 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
315 if (!cp) {
316 LOG_ERROR("Invalid target state: %d", (int)(t->state));
317 cp = "(*BUG*unknown*BUG*)";
318 }
319
320 if (!target_was_examined(t) && t->defer_examine)
321 cp = "examine deferred";
322
323 return cp;
324 }
325
326 const char *target_event_name(enum target_event event)
327 {
328 const char *cp;
329 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
330 if (!cp) {
331 LOG_ERROR("Invalid target event: %d", (int)(event));
332 cp = "(*BUG*unknown*BUG*)";
333 }
334 return cp;
335 }
336
337 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
338 {
339 const char *cp;
340 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
341 if (!cp) {
342 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
343 cp = "(*BUG*unknown*BUG*)";
344 }
345 return cp;
346 }
347
348 /* determine the number of the new target */
349 static int new_target_number(void)
350 {
351 struct target *t;
352 int x;
353
354 /* number is 0 based */
355 x = -1;
356 t = all_targets;
357 while (t) {
358 if (x < t->target_number)
359 x = t->target_number;
360 t = t->next;
361 }
362 return x + 1;
363 }
364
365 static void append_to_list_all_targets(struct target *target)
366 {
367 struct target **t = &all_targets;
368
369 while (*t)
370 t = &((*t)->next);
371 *t = target;
372 }
373
374 /* read a uint64_t from a buffer in target memory endianness */
375 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
376 {
377 if (target->endianness == TARGET_LITTLE_ENDIAN)
378 return le_to_h_u64(buffer);
379 else
380 return be_to_h_u64(buffer);
381 }
382
383 /* read a uint32_t from a buffer in target memory endianness */
384 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
385 {
386 if (target->endianness == TARGET_LITTLE_ENDIAN)
387 return le_to_h_u32(buffer);
388 else
389 return be_to_h_u32(buffer);
390 }
391
392 /* read a uint24_t from a buffer in target memory endianness */
393 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
394 {
395 if (target->endianness == TARGET_LITTLE_ENDIAN)
396 return le_to_h_u24(buffer);
397 else
398 return be_to_h_u24(buffer);
399 }
400
401 /* read a uint16_t from a buffer in target memory endianness */
402 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
403 {
404 if (target->endianness == TARGET_LITTLE_ENDIAN)
405 return le_to_h_u16(buffer);
406 else
407 return be_to_h_u16(buffer);
408 }
409
410 /* write a uint64_t to a buffer in target memory endianness */
411 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
412 {
413 if (target->endianness == TARGET_LITTLE_ENDIAN)
414 h_u64_to_le(buffer, value);
415 else
416 h_u64_to_be(buffer, value);
417 }
418
419 /* write a uint32_t to a buffer in target memory endianness */
420 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
421 {
422 if (target->endianness == TARGET_LITTLE_ENDIAN)
423 h_u32_to_le(buffer, value);
424 else
425 h_u32_to_be(buffer, value);
426 }
427
428 /* write a uint24_t to a buffer in target memory endianness */
429 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
430 {
431 if (target->endianness == TARGET_LITTLE_ENDIAN)
432 h_u24_to_le(buffer, value);
433 else
434 h_u24_to_be(buffer, value);
435 }
436
437 /* write a uint16_t to a buffer in target memory endianness */
438 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
439 {
440 if (target->endianness == TARGET_LITTLE_ENDIAN)
441 h_u16_to_le(buffer, value);
442 else
443 h_u16_to_be(buffer, value);
444 }
445
446 /* write a uint8_t to a buffer in target memory endianness */
447 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
448 {
449 *buffer = value;
450 }
451
452 /* write a uint64_t array to a buffer in target memory endianness */
453 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
454 {
455 uint32_t i;
456 for (i = 0; i < count; i++)
457 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
458 }
459
460 /* write a uint32_t array to a buffer in target memory endianness */
461 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
462 {
463 uint32_t i;
464 for (i = 0; i < count; i++)
465 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
466 }
467
468 /* write a uint16_t array to a buffer in target memory endianness */
469 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
470 {
471 uint32_t i;
472 for (i = 0; i < count; i++)
473 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
474 }
475
476 /* write a uint64_t array to a buffer in target memory endianness */
477 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
478 {
479 uint32_t i;
480 for (i = 0; i < count; i++)
481 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
482 }
483
484 /* write a uint32_t array to a buffer in target memory endianness */
485 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
486 {
487 uint32_t i;
488 for (i = 0; i < count; i++)
489 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
490 }
491
492 /* write a uint16_t array to a buffer in target memory endianness */
493 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
494 {
495 uint32_t i;
496 for (i = 0; i < count; i++)
497 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
498 }
499
500 /* return a pointer to a configured target; id is name or number */
501 struct target *get_target(const char *id)
502 {
503 struct target *target;
504
505 /* try as tcltarget name */
506 for (target = all_targets; target; target = target->next) {
507 if (!target_name(target))
508 continue;
509 if (strcmp(id, target_name(target)) == 0)
510 return target;
511 }
512
513 /* It's OK to remove this fallback sometime after August 2010 or so */
514
515 /* no match, try as number */
516 unsigned num;
517 if (parse_uint(id, &num) != ERROR_OK)
518 return NULL;
519
520 for (target = all_targets; target; target = target->next) {
521 if (target->target_number == (int)num) {
522 LOG_WARNING("use '%s' as target identifier, not '%u'",
523 target_name(target), num);
524 return target;
525 }
526 }
527
528 return NULL;
529 }
530
531 /* returns a pointer to the n-th configured target */
532 struct target *get_target_by_num(int num)
533 {
534 struct target *target = all_targets;
535
536 while (target) {
537 if (target->target_number == num)
538 return target;
539 target = target->next;
540 }
541
542 return NULL;
543 }
544
545 struct target *get_current_target(struct command_context *cmd_ctx)
546 {
547 struct target *target = get_current_target_or_null(cmd_ctx);
548
549 if (!target) {
550 LOG_ERROR("BUG: current_target out of bounds");
551 exit(-1);
552 }
553
554 return target;
555 }
556
557 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
558 {
559 return cmd_ctx->current_target_override
560 ? cmd_ctx->current_target_override
561 : cmd_ctx->current_target;
562 }
563
564 int target_poll(struct target *target)
565 {
566 int retval;
567
568 /* We can't poll until after examine */
569 if (!target_was_examined(target)) {
570 /* Fail silently lest we pollute the log */
571 return ERROR_FAIL;
572 }
573
574 retval = target->type->poll(target);
575 if (retval != ERROR_OK)
576 return retval;
577
578 if (target->halt_issued) {
579 if (target->state == TARGET_HALTED)
580 target->halt_issued = false;
581 else {
582 int64_t t = timeval_ms() - target->halt_issued_time;
583 if (t > DEFAULT_HALT_TIMEOUT) {
584 target->halt_issued = false;
585 LOG_INFO("Halt timed out, wake up GDB.");
586 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
587 }
588 }
589 }
590
591 return ERROR_OK;
592 }
593
594 int target_halt(struct target *target)
595 {
596 int retval;
597 /* We can't poll until after examine */
598 if (!target_was_examined(target)) {
599 LOG_ERROR("Target not examined yet");
600 return ERROR_FAIL;
601 }
602
603 retval = target->type->halt(target);
604 if (retval != ERROR_OK)
605 return retval;
606
607 target->halt_issued = true;
608 target->halt_issued_time = timeval_ms();
609
610 return ERROR_OK;
611 }
612
613 /**
614 * Make the target (re)start executing using its saved execution
615 * context (possibly with some modifications).
616 *
617 * @param target Which target should start executing.
618 * @param current True to use the target's saved program counter instead
619 * of the address parameter
620 * @param address Optionally used as the program counter.
621 * @param handle_breakpoints True iff breakpoints at the resumption PC
622 * should be skipped. (For example, maybe execution was stopped by
623 * such a breakpoint, in which case it would be counterproductive to
624 * let it re-trigger.
625 * @param debug_execution False if all working areas allocated by OpenOCD
626 * should be released and/or restored to their original contents.
627 * (This would for example be true to run some downloaded "helper"
628 * algorithm code, which resides in one such working buffer and uses
629 * another for data storage.)
630 *
631 * @todo Resolve the ambiguity about what the "debug_execution" flag
632 * signifies. For example, Target implementations don't agree on how
633 * it relates to invalidation of the register cache, or to whether
634 * breakpoints and watchpoints should be enabled. (It would seem wrong
635 * to enable breakpoints when running downloaded "helper" algorithms
636 * (debug_execution true), since the breakpoints would be set to match
637 * target firmware being debugged, not the helper algorithm.... and
638 * enabling them could cause such helpers to malfunction (for example,
639 * by overwriting data with a breakpoint instruction. On the other
640 * hand the infrastructure for running such helpers might use this
641 * procedure but rely on hardware breakpoint to detect termination.)
642 */
643 int target_resume(struct target *target, int current, target_addr_t address,
644 int handle_breakpoints, int debug_execution)
645 {
646 int retval;
647
648 /* We can't poll until after examine */
649 if (!target_was_examined(target)) {
650 LOG_ERROR("Target not examined yet");
651 return ERROR_FAIL;
652 }
653
654 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
655
656 /* note that resume *must* be asynchronous. The CPU can halt before
657 * we poll. The CPU can even halt at the current PC as a result of
658 * a software breakpoint being inserted by (a bug?) the application.
659 */
660 /*
661 * resume() triggers the event 'resumed'. The execution of TCL commands
662 * in the event handler causes the polling of targets. If the target has
663 * already halted for a breakpoint, polling will run the 'halted' event
664 * handler before the pending 'resumed' handler.
665 * Disable polling during resume() to guarantee the execution of handlers
666 * in the correct order.
667 */
668 bool save_poll = jtag_poll_get_enabled();
669 jtag_poll_set_enabled(false);
670 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
671 jtag_poll_set_enabled(save_poll);
672 if (retval != ERROR_OK)
673 return retval;
674
675 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
676
677 return retval;
678 }
679
680 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
681 {
682 char buf[100];
683 int retval;
684 struct jim_nvp *n;
685 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
686 if (!n->name) {
687 LOG_ERROR("invalid reset mode");
688 return ERROR_FAIL;
689 }
690
691 struct target *target;
692 for (target = all_targets; target; target = target->next)
693 target_call_reset_callbacks(target, reset_mode);
694
695 /* disable polling during reset to make reset event scripts
696 * more predictable, i.e. dr/irscan & pathmove in events will
697 * not have JTAG operations injected into the middle of a sequence.
698 */
699 bool save_poll = jtag_poll_get_enabled();
700
701 jtag_poll_set_enabled(false);
702
703 sprintf(buf, "ocd_process_reset %s", n->name);
704 retval = Jim_Eval(cmd->ctx->interp, buf);
705
706 jtag_poll_set_enabled(save_poll);
707
708 if (retval != JIM_OK) {
709 Jim_MakeErrorMessage(cmd->ctx->interp);
710 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
711 return ERROR_FAIL;
712 }
713
714 /* We want any events to be processed before the prompt */
715 retval = target_call_timer_callbacks_now();
716
717 for (target = all_targets; target; target = target->next) {
718 target->type->check_reset(target);
719 target->running_alg = false;
720 }
721
722 return retval;
723 }
724
725 static int identity_virt2phys(struct target *target,
726 target_addr_t virtual, target_addr_t *physical)
727 {
728 *physical = virtual;
729 return ERROR_OK;
730 }
731
732 static int no_mmu(struct target *target, int *enabled)
733 {
734 *enabled = 0;
735 return ERROR_OK;
736 }
737
738 /**
739 * Reset the @c examined flag for the given target.
740 * Pure paranoia -- targets are zeroed on allocation.
741 */
742 static inline void target_reset_examined(struct target *target)
743 {
744 target->examined = false;
745 }
746
747 static int default_examine(struct target *target)
748 {
749 target_set_examined(target);
750 return ERROR_OK;
751 }
752
753 /* no check by default */
754 static int default_check_reset(struct target *target)
755 {
756 return ERROR_OK;
757 }
758
759 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
760 * Keep in sync */
761 int target_examine_one(struct target *target)
762 {
763 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
764
765 int retval = target->type->examine(target);
766 if (retval != ERROR_OK) {
767 target_reset_examined(target);
768 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
769 return retval;
770 }
771
772 target_set_examined(target);
773 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
774
775 return ERROR_OK;
776 }
777
778 static int jtag_enable_callback(enum jtag_event event, void *priv)
779 {
780 struct target *target = priv;
781
782 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
783 return ERROR_OK;
784
785 jtag_unregister_event_callback(jtag_enable_callback, target);
786
787 return target_examine_one(target);
788 }
789
790 /* Targets that correctly implement init + examine, i.e.
791 * no communication with target during init:
792 *
793 * XScale
794 */
795 int target_examine(void)
796 {
797 int retval = ERROR_OK;
798 struct target *target;
799
800 for (target = all_targets; target; target = target->next) {
801 /* defer examination, but don't skip it */
802 if (!target->tap->enabled) {
803 jtag_register_event_callback(jtag_enable_callback,
804 target);
805 continue;
806 }
807
808 if (target->defer_examine)
809 continue;
810
811 int retval2 = target_examine_one(target);
812 if (retval2 != ERROR_OK) {
813 LOG_WARNING("target %s examination failed", target_name(target));
814 retval = retval2;
815 }
816 }
817 return retval;
818 }
819
820 const char *target_type_name(struct target *target)
821 {
822 return target->type->name;
823 }
824
825 static int target_soft_reset_halt(struct target *target)
826 {
827 if (!target_was_examined(target)) {
828 LOG_ERROR("Target not examined yet");
829 return ERROR_FAIL;
830 }
831 if (!target->type->soft_reset_halt) {
832 LOG_ERROR("Target %s does not support soft_reset_halt",
833 target_name(target));
834 return ERROR_FAIL;
835 }
836 return target->type->soft_reset_halt(target);
837 }
838
839 /**
840 * Downloads a target-specific native code algorithm to the target,
841 * and executes it. * Note that some targets may need to set up, enable,
842 * and tear down a breakpoint (hard or * soft) to detect algorithm
843 * termination, while others may support lower overhead schemes where
844 * soft breakpoints embedded in the algorithm automatically terminate the
845 * algorithm.
846 *
847 * @param target used to run the algorithm
848 * @param num_mem_params
849 * @param mem_params
850 * @param num_reg_params
851 * @param reg_param
852 * @param entry_point
853 * @param exit_point
854 * @param timeout_ms
855 * @param arch_info target-specific description of the algorithm.
856 */
857 int target_run_algorithm(struct target *target,
858 int num_mem_params, struct mem_param *mem_params,
859 int num_reg_params, struct reg_param *reg_param,
860 target_addr_t entry_point, target_addr_t exit_point,
861 int timeout_ms, void *arch_info)
862 {
863 int retval = ERROR_FAIL;
864
865 if (!target_was_examined(target)) {
866 LOG_ERROR("Target not examined yet");
867 goto done;
868 }
869 if (!target->type->run_algorithm) {
870 LOG_ERROR("Target type '%s' does not support %s",
871 target_type_name(target), __func__);
872 goto done;
873 }
874
875 target->running_alg = true;
876 retval = target->type->run_algorithm(target,
877 num_mem_params, mem_params,
878 num_reg_params, reg_param,
879 entry_point, exit_point, timeout_ms, arch_info);
880 target->running_alg = false;
881
882 done:
883 return retval;
884 }
885
886 /**
887 * Executes a target-specific native code algorithm and leaves it running.
888 *
889 * @param target used to run the algorithm
890 * @param num_mem_params
891 * @param mem_params
892 * @param num_reg_params
893 * @param reg_params
894 * @param entry_point
895 * @param exit_point
896 * @param arch_info target-specific description of the algorithm.
897 */
898 int target_start_algorithm(struct target *target,
899 int num_mem_params, struct mem_param *mem_params,
900 int num_reg_params, struct reg_param *reg_params,
901 target_addr_t entry_point, target_addr_t exit_point,
902 void *arch_info)
903 {
904 int retval = ERROR_FAIL;
905
906 if (!target_was_examined(target)) {
907 LOG_ERROR("Target not examined yet");
908 goto done;
909 }
910 if (!target->type->start_algorithm) {
911 LOG_ERROR("Target type '%s' does not support %s",
912 target_type_name(target), __func__);
913 goto done;
914 }
915 if (target->running_alg) {
916 LOG_ERROR("Target is already running an algorithm");
917 goto done;
918 }
919
920 target->running_alg = true;
921 retval = target->type->start_algorithm(target,
922 num_mem_params, mem_params,
923 num_reg_params, reg_params,
924 entry_point, exit_point, arch_info);
925
926 done:
927 return retval;
928 }
929
930 /**
931 * Waits for an algorithm started with target_start_algorithm() to complete.
932 *
933 * @param target used to run the algorithm
934 * @param num_mem_params
935 * @param mem_params
936 * @param num_reg_params
937 * @param reg_params
938 * @param exit_point
939 * @param timeout_ms
940 * @param arch_info target-specific description of the algorithm.
941 */
942 int target_wait_algorithm(struct target *target,
943 int num_mem_params, struct mem_param *mem_params,
944 int num_reg_params, struct reg_param *reg_params,
945 target_addr_t exit_point, int timeout_ms,
946 void *arch_info)
947 {
948 int retval = ERROR_FAIL;
949
950 if (!target->type->wait_algorithm) {
951 LOG_ERROR("Target type '%s' does not support %s",
952 target_type_name(target), __func__);
953 goto done;
954 }
955 if (!target->running_alg) {
956 LOG_ERROR("Target is not running an algorithm");
957 goto done;
958 }
959
960 retval = target->type->wait_algorithm(target,
961 num_mem_params, mem_params,
962 num_reg_params, reg_params,
963 exit_point, timeout_ms, arch_info);
964 if (retval != ERROR_TARGET_TIMEOUT)
965 target->running_alg = false;
966
967 done:
968 return retval;
969 }
970
971 /**
972 * Streams data to a circular buffer on target intended for consumption by code
973 * running asynchronously on target.
974 *
975 * This is intended for applications where target-specific native code runs
976 * on the target, receives data from the circular buffer, does something with
977 * it (most likely writing it to a flash memory), and advances the circular
978 * buffer pointer.
979 *
980 * This assumes that the helper algorithm has already been loaded to the target,
981 * but has not been started yet. Given memory and register parameters are passed
982 * to the algorithm.
983 *
984 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
985 * following format:
986 *
987 * [buffer_start + 0, buffer_start + 4):
988 * Write Pointer address (aka head). Written and updated by this
989 * routine when new data is written to the circular buffer.
990 * [buffer_start + 4, buffer_start + 8):
991 * Read Pointer address (aka tail). Updated by code running on the
992 * target after it consumes data.
993 * [buffer_start + 8, buffer_start + buffer_size):
994 * Circular buffer contents.
995 *
996 * See contrib/loaders/flash/stm32f1x.S for an example.
997 *
998 * @param target used to run the algorithm
999 * @param buffer address on the host where data to be sent is located
1000 * @param count number of blocks to send
1001 * @param block_size size in bytes of each block
1002 * @param num_mem_params count of memory-based params to pass to algorithm
1003 * @param mem_params memory-based params to pass to algorithm
1004 * @param num_reg_params count of register-based params to pass to algorithm
1005 * @param reg_params memory-based params to pass to algorithm
1006 * @param buffer_start address on the target of the circular buffer structure
1007 * @param buffer_size size of the circular buffer structure
1008 * @param entry_point address on the target to execute to start the algorithm
1009 * @param exit_point address at which to set a breakpoint to catch the
1010 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1011 * @param arch_info
1012 */
1013
1014 int target_run_flash_async_algorithm(struct target *target,
1015 const uint8_t *buffer, uint32_t count, int block_size,
1016 int num_mem_params, struct mem_param *mem_params,
1017 int num_reg_params, struct reg_param *reg_params,
1018 uint32_t buffer_start, uint32_t buffer_size,
1019 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1020 {
1021 int retval;
1022 int timeout = 0;
1023
1024 const uint8_t *buffer_orig = buffer;
1025
1026 /* Set up working area. First word is write pointer, second word is read pointer,
1027 * rest is fifo data area. */
1028 uint32_t wp_addr = buffer_start;
1029 uint32_t rp_addr = buffer_start + 4;
1030 uint32_t fifo_start_addr = buffer_start + 8;
1031 uint32_t fifo_end_addr = buffer_start + buffer_size;
1032
1033 uint32_t wp = fifo_start_addr;
1034 uint32_t rp = fifo_start_addr;
1035
1036 /* validate block_size is 2^n */
1037 assert(IS_PWR_OF_2(block_size));
1038
1039 retval = target_write_u32(target, wp_addr, wp);
1040 if (retval != ERROR_OK)
1041 return retval;
1042 retval = target_write_u32(target, rp_addr, rp);
1043 if (retval != ERROR_OK)
1044 return retval;
1045
1046 /* Start up algorithm on target and let it idle while writing the first chunk */
1047 retval = target_start_algorithm(target, num_mem_params, mem_params,
1048 num_reg_params, reg_params,
1049 entry_point,
1050 exit_point,
1051 arch_info);
1052
1053 if (retval != ERROR_OK) {
1054 LOG_ERROR("error starting target flash write algorithm");
1055 return retval;
1056 }
1057
1058 while (count > 0) {
1059
1060 retval = target_read_u32(target, rp_addr, &rp);
1061 if (retval != ERROR_OK) {
1062 LOG_ERROR("failed to get read pointer");
1063 break;
1064 }
1065
1066 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1067 (size_t) (buffer - buffer_orig), count, wp, rp);
1068
1069 if (rp == 0) {
1070 LOG_ERROR("flash write algorithm aborted by target");
1071 retval = ERROR_FLASH_OPERATION_FAILED;
1072 break;
1073 }
1074
1075 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1076 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1077 break;
1078 }
1079
1080 /* Count the number of bytes available in the fifo without
1081 * crossing the wrap around. Make sure to not fill it completely,
1082 * because that would make wp == rp and that's the empty condition. */
1083 uint32_t thisrun_bytes;
1084 if (rp > wp)
1085 thisrun_bytes = rp - wp - block_size;
1086 else if (rp > fifo_start_addr)
1087 thisrun_bytes = fifo_end_addr - wp;
1088 else
1089 thisrun_bytes = fifo_end_addr - wp - block_size;
1090
1091 if (thisrun_bytes == 0) {
1092 /* Throttle polling a bit if transfer is (much) faster than flash
1093 * programming. The exact delay shouldn't matter as long as it's
1094 * less than buffer size / flash speed. This is very unlikely to
1095 * run when using high latency connections such as USB. */
1096 alive_sleep(2);
1097
1098 /* to stop an infinite loop on some targets check and increment a timeout
1099 * this issue was observed on a stellaris using the new ICDI interface */
1100 if (timeout++ >= 2500) {
1101 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1102 return ERROR_FLASH_OPERATION_FAILED;
1103 }
1104 continue;
1105 }
1106
1107 /* reset our timeout */
1108 timeout = 0;
1109
1110 /* Limit to the amount of data we actually want to write */
1111 if (thisrun_bytes > count * block_size)
1112 thisrun_bytes = count * block_size;
1113
1114 /* Force end of large blocks to be word aligned */
1115 if (thisrun_bytes >= 16)
1116 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1117
1118 /* Write data to fifo */
1119 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1120 if (retval != ERROR_OK)
1121 break;
1122
1123 /* Update counters and wrap write pointer */
1124 buffer += thisrun_bytes;
1125 count -= thisrun_bytes / block_size;
1126 wp += thisrun_bytes;
1127 if (wp >= fifo_end_addr)
1128 wp = fifo_start_addr;
1129
1130 /* Store updated write pointer to target */
1131 retval = target_write_u32(target, wp_addr, wp);
1132 if (retval != ERROR_OK)
1133 break;
1134
1135 /* Avoid GDB timeouts */
1136 keep_alive();
1137 }
1138
1139 if (retval != ERROR_OK) {
1140 /* abort flash write algorithm on target */
1141 target_write_u32(target, wp_addr, 0);
1142 }
1143
1144 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1145 num_reg_params, reg_params,
1146 exit_point,
1147 10000,
1148 arch_info);
1149
1150 if (retval2 != ERROR_OK) {
1151 LOG_ERROR("error waiting for target flash write algorithm");
1152 retval = retval2;
1153 }
1154
1155 if (retval == ERROR_OK) {
1156 /* check if algorithm set rp = 0 after fifo writer loop finished */
1157 retval = target_read_u32(target, rp_addr, &rp);
1158 if (retval == ERROR_OK && rp == 0) {
1159 LOG_ERROR("flash write algorithm aborted by target");
1160 retval = ERROR_FLASH_OPERATION_FAILED;
1161 }
1162 }
1163
1164 return retval;
1165 }
1166
1167 int target_run_read_async_algorithm(struct target *target,
1168 uint8_t *buffer, uint32_t count, int block_size,
1169 int num_mem_params, struct mem_param *mem_params,
1170 int num_reg_params, struct reg_param *reg_params,
1171 uint32_t buffer_start, uint32_t buffer_size,
1172 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1173 {
1174 int retval;
1175 int timeout = 0;
1176
1177 const uint8_t *buffer_orig = buffer;
1178
1179 /* Set up working area. First word is write pointer, second word is read pointer,
1180 * rest is fifo data area. */
1181 uint32_t wp_addr = buffer_start;
1182 uint32_t rp_addr = buffer_start + 4;
1183 uint32_t fifo_start_addr = buffer_start + 8;
1184 uint32_t fifo_end_addr = buffer_start + buffer_size;
1185
1186 uint32_t wp = fifo_start_addr;
1187 uint32_t rp = fifo_start_addr;
1188
1189 /* validate block_size is 2^n */
1190 assert(IS_PWR_OF_2(block_size));
1191
1192 retval = target_write_u32(target, wp_addr, wp);
1193 if (retval != ERROR_OK)
1194 return retval;
1195 retval = target_write_u32(target, rp_addr, rp);
1196 if (retval != ERROR_OK)
1197 return retval;
1198
1199 /* Start up algorithm on target */
1200 retval = target_start_algorithm(target, num_mem_params, mem_params,
1201 num_reg_params, reg_params,
1202 entry_point,
1203 exit_point,
1204 arch_info);
1205
1206 if (retval != ERROR_OK) {
1207 LOG_ERROR("error starting target flash read algorithm");
1208 return retval;
1209 }
1210
1211 while (count > 0) {
1212 retval = target_read_u32(target, wp_addr, &wp);
1213 if (retval != ERROR_OK) {
1214 LOG_ERROR("failed to get write pointer");
1215 break;
1216 }
1217
1218 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1219 (size_t)(buffer - buffer_orig), count, wp, rp);
1220
1221 if (wp == 0) {
1222 LOG_ERROR("flash read algorithm aborted by target");
1223 retval = ERROR_FLASH_OPERATION_FAILED;
1224 break;
1225 }
1226
1227 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1228 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1229 break;
1230 }
1231
1232 /* Count the number of bytes available in the fifo without
1233 * crossing the wrap around. */
1234 uint32_t thisrun_bytes;
1235 if (wp >= rp)
1236 thisrun_bytes = wp - rp;
1237 else
1238 thisrun_bytes = fifo_end_addr - rp;
1239
1240 if (thisrun_bytes == 0) {
1241 /* Throttle polling a bit if transfer is (much) faster than flash
1242 * reading. The exact delay shouldn't matter as long as it's
1243 * less than buffer size / flash speed. This is very unlikely to
1244 * run when using high latency connections such as USB. */
1245 alive_sleep(2);
1246
1247 /* to stop an infinite loop on some targets check and increment a timeout
1248 * this issue was observed on a stellaris using the new ICDI interface */
1249 if (timeout++ >= 2500) {
1250 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1251 return ERROR_FLASH_OPERATION_FAILED;
1252 }
1253 continue;
1254 }
1255
1256 /* Reset our timeout */
1257 timeout = 0;
1258
1259 /* Limit to the amount of data we actually want to read */
1260 if (thisrun_bytes > count * block_size)
1261 thisrun_bytes = count * block_size;
1262
1263 /* Force end of large blocks to be word aligned */
1264 if (thisrun_bytes >= 16)
1265 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1266
1267 /* Read data from fifo */
1268 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1269 if (retval != ERROR_OK)
1270 break;
1271
1272 /* Update counters and wrap write pointer */
1273 buffer += thisrun_bytes;
1274 count -= thisrun_bytes / block_size;
1275 rp += thisrun_bytes;
1276 if (rp >= fifo_end_addr)
1277 rp = fifo_start_addr;
1278
1279 /* Store updated write pointer to target */
1280 retval = target_write_u32(target, rp_addr, rp);
1281 if (retval != ERROR_OK)
1282 break;
1283
1284 /* Avoid GDB timeouts */
1285 keep_alive();
1286
1287 }
1288
1289 if (retval != ERROR_OK) {
1290 /* abort flash write algorithm on target */
1291 target_write_u32(target, rp_addr, 0);
1292 }
1293
1294 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1295 num_reg_params, reg_params,
1296 exit_point,
1297 10000,
1298 arch_info);
1299
1300 if (retval2 != ERROR_OK) {
1301 LOG_ERROR("error waiting for target flash write algorithm");
1302 retval = retval2;
1303 }
1304
1305 if (retval == ERROR_OK) {
1306 /* check if algorithm set wp = 0 after fifo writer loop finished */
1307 retval = target_read_u32(target, wp_addr, &wp);
1308 if (retval == ERROR_OK && wp == 0) {
1309 LOG_ERROR("flash read algorithm aborted by target");
1310 retval = ERROR_FLASH_OPERATION_FAILED;
1311 }
1312 }
1313
1314 return retval;
1315 }
1316
1317 int target_read_memory(struct target *target,
1318 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1319 {
1320 if (!target_was_examined(target)) {
1321 LOG_ERROR("Target not examined yet");
1322 return ERROR_FAIL;
1323 }
1324 if (!target->type->read_memory) {
1325 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1326 return ERROR_FAIL;
1327 }
1328 return target->type->read_memory(target, address, size, count, buffer);
1329 }
1330
1331 int target_read_phys_memory(struct target *target,
1332 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1333 {
1334 if (!target_was_examined(target)) {
1335 LOG_ERROR("Target not examined yet");
1336 return ERROR_FAIL;
1337 }
1338 if (!target->type->read_phys_memory) {
1339 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1340 return ERROR_FAIL;
1341 }
1342 return target->type->read_phys_memory(target, address, size, count, buffer);
1343 }
1344
1345 int target_write_memory(struct target *target,
1346 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1347 {
1348 if (!target_was_examined(target)) {
1349 LOG_ERROR("Target not examined yet");
1350 return ERROR_FAIL;
1351 }
1352 if (!target->type->write_memory) {
1353 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1354 return ERROR_FAIL;
1355 }
1356 return target->type->write_memory(target, address, size, count, buffer);
1357 }
1358
1359 int target_write_phys_memory(struct target *target,
1360 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1361 {
1362 if (!target_was_examined(target)) {
1363 LOG_ERROR("Target not examined yet");
1364 return ERROR_FAIL;
1365 }
1366 if (!target->type->write_phys_memory) {
1367 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1368 return ERROR_FAIL;
1369 }
1370 return target->type->write_phys_memory(target, address, size, count, buffer);
1371 }
1372
1373 int target_add_breakpoint(struct target *target,
1374 struct breakpoint *breakpoint)
1375 {
1376 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1377 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1378 return ERROR_TARGET_NOT_HALTED;
1379 }
1380 return target->type->add_breakpoint(target, breakpoint);
1381 }
1382
1383 int target_add_context_breakpoint(struct target *target,
1384 struct breakpoint *breakpoint)
1385 {
1386 if (target->state != TARGET_HALTED) {
1387 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1388 return ERROR_TARGET_NOT_HALTED;
1389 }
1390 return target->type->add_context_breakpoint(target, breakpoint);
1391 }
1392
1393 int target_add_hybrid_breakpoint(struct target *target,
1394 struct breakpoint *breakpoint)
1395 {
1396 if (target->state != TARGET_HALTED) {
1397 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1398 return ERROR_TARGET_NOT_HALTED;
1399 }
1400 return target->type->add_hybrid_breakpoint(target, breakpoint);
1401 }
1402
1403 int target_remove_breakpoint(struct target *target,
1404 struct breakpoint *breakpoint)
1405 {
1406 return target->type->remove_breakpoint(target, breakpoint);
1407 }
1408
1409 int target_add_watchpoint(struct target *target,
1410 struct watchpoint *watchpoint)
1411 {
1412 if (target->state != TARGET_HALTED) {
1413 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1414 return ERROR_TARGET_NOT_HALTED;
1415 }
1416 return target->type->add_watchpoint(target, watchpoint);
1417 }
1418 int target_remove_watchpoint(struct target *target,
1419 struct watchpoint *watchpoint)
1420 {
1421 return target->type->remove_watchpoint(target, watchpoint);
1422 }
1423 int target_hit_watchpoint(struct target *target,
1424 struct watchpoint **hit_watchpoint)
1425 {
1426 if (target->state != TARGET_HALTED) {
1427 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1428 return ERROR_TARGET_NOT_HALTED;
1429 }
1430
1431 if (!target->type->hit_watchpoint) {
1432 /* For backward compatible, if hit_watchpoint is not implemented,
1433 * return ERROR_FAIL such that gdb_server will not take the nonsense
1434 * information. */
1435 return ERROR_FAIL;
1436 }
1437
1438 return target->type->hit_watchpoint(target, hit_watchpoint);
1439 }
1440
1441 const char *target_get_gdb_arch(struct target *target)
1442 {
1443 if (!target->type->get_gdb_arch)
1444 return NULL;
1445 return target->type->get_gdb_arch(target);
1446 }
1447
1448 int target_get_gdb_reg_list(struct target *target,
1449 struct reg **reg_list[], int *reg_list_size,
1450 enum target_register_class reg_class)
1451 {
1452 int result = ERROR_FAIL;
1453
1454 if (!target_was_examined(target)) {
1455 LOG_ERROR("Target not examined yet");
1456 goto done;
1457 }
1458
1459 result = target->type->get_gdb_reg_list(target, reg_list,
1460 reg_list_size, reg_class);
1461
1462 done:
1463 if (result != ERROR_OK) {
1464 *reg_list = NULL;
1465 *reg_list_size = 0;
1466 }
1467 return result;
1468 }
1469
1470 int target_get_gdb_reg_list_noread(struct target *target,
1471 struct reg **reg_list[], int *reg_list_size,
1472 enum target_register_class reg_class)
1473 {
1474 if (target->type->get_gdb_reg_list_noread &&
1475 target->type->get_gdb_reg_list_noread(target, reg_list,
1476 reg_list_size, reg_class) == ERROR_OK)
1477 return ERROR_OK;
1478 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1479 }
1480
1481 bool target_supports_gdb_connection(struct target *target)
1482 {
1483 /*
1484 * exclude all the targets that don't provide get_gdb_reg_list
1485 * or that have explicit gdb_max_connection == 0
1486 */
1487 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1488 }
1489
1490 int target_step(struct target *target,
1491 int current, target_addr_t address, int handle_breakpoints)
1492 {
1493 int retval;
1494
1495 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1496
1497 retval = target->type->step(target, current, address, handle_breakpoints);
1498 if (retval != ERROR_OK)
1499 return retval;
1500
1501 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1502
1503 return retval;
1504 }
1505
1506 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1507 {
1508 if (target->state != TARGET_HALTED) {
1509 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1510 return ERROR_TARGET_NOT_HALTED;
1511 }
1512 return target->type->get_gdb_fileio_info(target, fileio_info);
1513 }
1514
1515 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1516 {
1517 if (target->state != TARGET_HALTED) {
1518 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1519 return ERROR_TARGET_NOT_HALTED;
1520 }
1521 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1522 }
1523
1524 target_addr_t target_address_max(struct target *target)
1525 {
1526 unsigned bits = target_address_bits(target);
1527 if (sizeof(target_addr_t) * 8 == bits)
1528 return (target_addr_t) -1;
1529 else
1530 return (((target_addr_t) 1) << bits) - 1;
1531 }
1532
1533 unsigned target_address_bits(struct target *target)
1534 {
1535 if (target->type->address_bits)
1536 return target->type->address_bits(target);
1537 return 32;
1538 }
1539
1540 unsigned int target_data_bits(struct target *target)
1541 {
1542 if (target->type->data_bits)
1543 return target->type->data_bits(target);
1544 return 32;
1545 }
1546
1547 static int target_profiling(struct target *target, uint32_t *samples,
1548 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1549 {
1550 return target->type->profiling(target, samples, max_num_samples,
1551 num_samples, seconds);
1552 }
1553
1554 static int handle_target(void *priv);
1555
1556 static int target_init_one(struct command_context *cmd_ctx,
1557 struct target *target)
1558 {
1559 target_reset_examined(target);
1560
1561 struct target_type *type = target->type;
1562 if (!type->examine)
1563 type->examine = default_examine;
1564
1565 if (!type->check_reset)
1566 type->check_reset = default_check_reset;
1567
1568 assert(type->init_target);
1569
1570 int retval = type->init_target(cmd_ctx, target);
1571 if (retval != ERROR_OK) {
1572 LOG_ERROR("target '%s' init failed", target_name(target));
1573 return retval;
1574 }
1575
1576 /* Sanity-check MMU support ... stub in what we must, to help
1577 * implement it in stages, but warn if we need to do so.
1578 */
1579 if (type->mmu) {
1580 if (!type->virt2phys) {
1581 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1582 type->virt2phys = identity_virt2phys;
1583 }
1584 } else {
1585 /* Make sure no-MMU targets all behave the same: make no
1586 * distinction between physical and virtual addresses, and
1587 * ensure that virt2phys() is always an identity mapping.
1588 */
1589 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1590 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1591
1592 type->mmu = no_mmu;
1593 type->write_phys_memory = type->write_memory;
1594 type->read_phys_memory = type->read_memory;
1595 type->virt2phys = identity_virt2phys;
1596 }
1597
1598 if (!target->type->read_buffer)
1599 target->type->read_buffer = target_read_buffer_default;
1600
1601 if (!target->type->write_buffer)
1602 target->type->write_buffer = target_write_buffer_default;
1603
1604 if (!target->type->get_gdb_fileio_info)
1605 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1606
1607 if (!target->type->gdb_fileio_end)
1608 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1609
1610 if (!target->type->profiling)
1611 target->type->profiling = target_profiling_default;
1612
1613 return ERROR_OK;
1614 }
1615
1616 static int target_init(struct command_context *cmd_ctx)
1617 {
1618 struct target *target;
1619 int retval;
1620
1621 for (target = all_targets; target; target = target->next) {
1622 retval = target_init_one(cmd_ctx, target);
1623 if (retval != ERROR_OK)
1624 return retval;
1625 }
1626
1627 if (!all_targets)
1628 return ERROR_OK;
1629
1630 retval = target_register_user_commands(cmd_ctx);
1631 if (retval != ERROR_OK)
1632 return retval;
1633
1634 retval = target_register_timer_callback(&handle_target,
1635 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1636 if (retval != ERROR_OK)
1637 return retval;
1638
1639 return ERROR_OK;
1640 }
1641
1642 COMMAND_HANDLER(handle_target_init_command)
1643 {
1644 int retval;
1645
1646 if (CMD_ARGC != 0)
1647 return ERROR_COMMAND_SYNTAX_ERROR;
1648
1649 static bool target_initialized;
1650 if (target_initialized) {
1651 LOG_INFO("'target init' has already been called");
1652 return ERROR_OK;
1653 }
1654 target_initialized = true;
1655
1656 retval = command_run_line(CMD_CTX, "init_targets");
1657 if (retval != ERROR_OK)
1658 return retval;
1659
1660 retval = command_run_line(CMD_CTX, "init_target_events");
1661 if (retval != ERROR_OK)
1662 return retval;
1663
1664 retval = command_run_line(CMD_CTX, "init_board");
1665 if (retval != ERROR_OK)
1666 return retval;
1667
1668 LOG_DEBUG("Initializing targets...");
1669 return target_init(CMD_CTX);
1670 }
1671
1672 int target_register_event_callback(int (*callback)(struct target *target,
1673 enum target_event event, void *priv), void *priv)
1674 {
1675 struct target_event_callback **callbacks_p = &target_event_callbacks;
1676
1677 if (!callback)
1678 return ERROR_COMMAND_SYNTAX_ERROR;
1679
1680 if (*callbacks_p) {
1681 while ((*callbacks_p)->next)
1682 callbacks_p = &((*callbacks_p)->next);
1683 callbacks_p = &((*callbacks_p)->next);
1684 }
1685
1686 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1687 (*callbacks_p)->callback = callback;
1688 (*callbacks_p)->priv = priv;
1689 (*callbacks_p)->next = NULL;
1690
1691 return ERROR_OK;
1692 }
1693
1694 int target_register_reset_callback(int (*callback)(struct target *target,
1695 enum target_reset_mode reset_mode, void *priv), void *priv)
1696 {
1697 struct target_reset_callback *entry;
1698
1699 if (!callback)
1700 return ERROR_COMMAND_SYNTAX_ERROR;
1701
1702 entry = malloc(sizeof(struct target_reset_callback));
1703 if (!entry) {
1704 LOG_ERROR("error allocating buffer for reset callback entry");
1705 return ERROR_COMMAND_SYNTAX_ERROR;
1706 }
1707
1708 entry->callback = callback;
1709 entry->priv = priv;
1710 list_add(&entry->list, &target_reset_callback_list);
1711
1712
1713 return ERROR_OK;
1714 }
1715
1716 int target_register_trace_callback(int (*callback)(struct target *target,
1717 size_t len, uint8_t *data, void *priv), void *priv)
1718 {
1719 struct target_trace_callback *entry;
1720
1721 if (!callback)
1722 return ERROR_COMMAND_SYNTAX_ERROR;
1723
1724 entry = malloc(sizeof(struct target_trace_callback));
1725 if (!entry) {
1726 LOG_ERROR("error allocating buffer for trace callback entry");
1727 return ERROR_COMMAND_SYNTAX_ERROR;
1728 }
1729
1730 entry->callback = callback;
1731 entry->priv = priv;
1732 list_add(&entry->list, &target_trace_callback_list);
1733
1734
1735 return ERROR_OK;
1736 }
1737
1738 int target_register_timer_callback(int (*callback)(void *priv),
1739 unsigned int time_ms, enum target_timer_type type, void *priv)
1740 {
1741 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1742
1743 if (!callback)
1744 return ERROR_COMMAND_SYNTAX_ERROR;
1745
1746 if (*callbacks_p) {
1747 while ((*callbacks_p)->next)
1748 callbacks_p = &((*callbacks_p)->next);
1749 callbacks_p = &((*callbacks_p)->next);
1750 }
1751
1752 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1753 (*callbacks_p)->callback = callback;
1754 (*callbacks_p)->type = type;
1755 (*callbacks_p)->time_ms = time_ms;
1756 (*callbacks_p)->removed = false;
1757
1758 (*callbacks_p)->when = timeval_ms() + time_ms;
1759 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1760
1761 (*callbacks_p)->priv = priv;
1762 (*callbacks_p)->next = NULL;
1763
1764 return ERROR_OK;
1765 }
1766
1767 int target_unregister_event_callback(int (*callback)(struct target *target,
1768 enum target_event event, void *priv), void *priv)
1769 {
1770 struct target_event_callback **p = &target_event_callbacks;
1771 struct target_event_callback *c = target_event_callbacks;
1772
1773 if (!callback)
1774 return ERROR_COMMAND_SYNTAX_ERROR;
1775
1776 while (c) {
1777 struct target_event_callback *next = c->next;
1778 if ((c->callback == callback) && (c->priv == priv)) {
1779 *p = next;
1780 free(c);
1781 return ERROR_OK;
1782 } else
1783 p = &(c->next);
1784 c = next;
1785 }
1786
1787 return ERROR_OK;
1788 }
1789
1790 int target_unregister_reset_callback(int (*callback)(struct target *target,
1791 enum target_reset_mode reset_mode, void *priv), void *priv)
1792 {
1793 struct target_reset_callback *entry;
1794
1795 if (!callback)
1796 return ERROR_COMMAND_SYNTAX_ERROR;
1797
1798 list_for_each_entry(entry, &target_reset_callback_list, list) {
1799 if (entry->callback == callback && entry->priv == priv) {
1800 list_del(&entry->list);
1801 free(entry);
1802 break;
1803 }
1804 }
1805
1806 return ERROR_OK;
1807 }
1808
1809 int target_unregister_trace_callback(int (*callback)(struct target *target,
1810 size_t len, uint8_t *data, void *priv), void *priv)
1811 {
1812 struct target_trace_callback *entry;
1813
1814 if (!callback)
1815 return ERROR_COMMAND_SYNTAX_ERROR;
1816
1817 list_for_each_entry(entry, &target_trace_callback_list, list) {
1818 if (entry->callback == callback && entry->priv == priv) {
1819 list_del(&entry->list);
1820 free(entry);
1821 break;
1822 }
1823 }
1824
1825 return ERROR_OK;
1826 }
1827
1828 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1829 {
1830 if (!callback)
1831 return ERROR_COMMAND_SYNTAX_ERROR;
1832
1833 for (struct target_timer_callback *c = target_timer_callbacks;
1834 c; c = c->next) {
1835 if ((c->callback == callback) && (c->priv == priv)) {
1836 c->removed = true;
1837 return ERROR_OK;
1838 }
1839 }
1840
1841 return ERROR_FAIL;
1842 }
1843
1844 int target_call_event_callbacks(struct target *target, enum target_event event)
1845 {
1846 struct target_event_callback *callback = target_event_callbacks;
1847 struct target_event_callback *next_callback;
1848
1849 if (event == TARGET_EVENT_HALTED) {
1850 /* execute early halted first */
1851 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1852 }
1853
1854 LOG_DEBUG("target event %i (%s) for core %s", event,
1855 target_event_name(event),
1856 target_name(target));
1857
1858 target_handle_event(target, event);
1859
1860 while (callback) {
1861 next_callback = callback->next;
1862 callback->callback(target, event, callback->priv);
1863 callback = next_callback;
1864 }
1865
1866 return ERROR_OK;
1867 }
1868
1869 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1870 {
1871 struct target_reset_callback *callback;
1872
1873 LOG_DEBUG("target reset %i (%s)", reset_mode,
1874 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1875
1876 list_for_each_entry(callback, &target_reset_callback_list, list)
1877 callback->callback(target, reset_mode, callback->priv);
1878
1879 return ERROR_OK;
1880 }
1881
1882 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1883 {
1884 struct target_trace_callback *callback;
1885
1886 list_for_each_entry(callback, &target_trace_callback_list, list)
1887 callback->callback(target, len, data, callback->priv);
1888
1889 return ERROR_OK;
1890 }
1891
1892 static int target_timer_callback_periodic_restart(
1893 struct target_timer_callback *cb, int64_t *now)
1894 {
1895 cb->when = *now + cb->time_ms;
1896 return ERROR_OK;
1897 }
1898
1899 static int target_call_timer_callback(struct target_timer_callback *cb,
1900 int64_t *now)
1901 {
1902 cb->callback(cb->priv);
1903
1904 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1905 return target_timer_callback_periodic_restart(cb, now);
1906
1907 return target_unregister_timer_callback(cb->callback, cb->priv);
1908 }
1909
1910 static int target_call_timer_callbacks_check_time(int checktime)
1911 {
1912 static bool callback_processing;
1913
1914 /* Do not allow nesting */
1915 if (callback_processing)
1916 return ERROR_OK;
1917
1918 callback_processing = true;
1919
1920 keep_alive();
1921
1922 int64_t now = timeval_ms();
1923
1924 /* Initialize to a default value that's a ways into the future.
1925 * The loop below will make it closer to now if there are
1926 * callbacks that want to be called sooner. */
1927 target_timer_next_event_value = now + 1000;
1928
1929 /* Store an address of the place containing a pointer to the
1930 * next item; initially, that's a standalone "root of the
1931 * list" variable. */
1932 struct target_timer_callback **callback = &target_timer_callbacks;
1933 while (callback && *callback) {
1934 if ((*callback)->removed) {
1935 struct target_timer_callback *p = *callback;
1936 *callback = (*callback)->next;
1937 free(p);
1938 continue;
1939 }
1940
1941 bool call_it = (*callback)->callback &&
1942 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1943 now >= (*callback)->when);
1944
1945 if (call_it)
1946 target_call_timer_callback(*callback, &now);
1947
1948 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1949 target_timer_next_event_value = (*callback)->when;
1950
1951 callback = &(*callback)->next;
1952 }
1953
1954 callback_processing = false;
1955 return ERROR_OK;
1956 }
1957
1958 int target_call_timer_callbacks()
1959 {
1960 return target_call_timer_callbacks_check_time(1);
1961 }
1962
1963 /* invoke periodic callbacks immediately */
1964 int target_call_timer_callbacks_now()
1965 {
1966 return target_call_timer_callbacks_check_time(0);
1967 }
1968
1969 int64_t target_timer_next_event(void)
1970 {
1971 return target_timer_next_event_value;
1972 }
1973
1974 /* Prints the working area layout for debug purposes */
1975 static void print_wa_layout(struct target *target)
1976 {
1977 struct working_area *c = target->working_areas;
1978
1979 while (c) {
1980 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1981 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1982 c->address, c->address + c->size - 1, c->size);
1983 c = c->next;
1984 }
1985 }
1986
1987 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1988 static void target_split_working_area(struct working_area *area, uint32_t size)
1989 {
1990 assert(area->free); /* Shouldn't split an allocated area */
1991 assert(size <= area->size); /* Caller should guarantee this */
1992
1993 /* Split only if not already the right size */
1994 if (size < area->size) {
1995 struct working_area *new_wa = malloc(sizeof(*new_wa));
1996
1997 if (!new_wa)
1998 return;
1999
2000 new_wa->next = area->next;
2001 new_wa->size = area->size - size;
2002 new_wa->address = area->address + size;
2003 new_wa->backup = NULL;
2004 new_wa->user = NULL;
2005 new_wa->free = true;
2006
2007 area->next = new_wa;
2008 area->size = size;
2009
2010 /* If backup memory was allocated to this area, it has the wrong size
2011 * now so free it and it will be reallocated if/when needed */
2012 free(area->backup);
2013 area->backup = NULL;
2014 }
2015 }
2016
2017 /* Merge all adjacent free areas into one */
2018 static void target_merge_working_areas(struct target *target)
2019 {
2020 struct working_area *c = target->working_areas;
2021
2022 while (c && c->next) {
2023 assert(c->next->address == c->address + c->size); /* This is an invariant */
2024
2025 /* Find two adjacent free areas */
2026 if (c->free && c->next->free) {
2027 /* Merge the last into the first */
2028 c->size += c->next->size;
2029
2030 /* Remove the last */
2031 struct working_area *to_be_freed = c->next;
2032 c->next = c->next->next;
2033 free(to_be_freed->backup);
2034 free(to_be_freed);
2035
2036 /* If backup memory was allocated to the remaining area, it's has
2037 * the wrong size now */
2038 free(c->backup);
2039 c->backup = NULL;
2040 } else {
2041 c = c->next;
2042 }
2043 }
2044 }
2045
2046 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2047 {
2048 /* Reevaluate working area address based on MMU state*/
2049 if (!target->working_areas) {
2050 int retval;
2051 int enabled;
2052
2053 retval = target->type->mmu(target, &enabled);
2054 if (retval != ERROR_OK)
2055 return retval;
2056
2057 if (!enabled) {
2058 if (target->working_area_phys_spec) {
2059 LOG_DEBUG("MMU disabled, using physical "
2060 "address for working memory " TARGET_ADDR_FMT,
2061 target->working_area_phys);
2062 target->working_area = target->working_area_phys;
2063 } else {
2064 LOG_ERROR("No working memory available. "
2065 "Specify -work-area-phys to target.");
2066 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2067 }
2068 } else {
2069 if (target->working_area_virt_spec) {
2070 LOG_DEBUG("MMU enabled, using virtual "
2071 "address for working memory " TARGET_ADDR_FMT,
2072 target->working_area_virt);
2073 target->working_area = target->working_area_virt;
2074 } else {
2075 LOG_ERROR("No working memory available. "
2076 "Specify -work-area-virt to target.");
2077 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2078 }
2079 }
2080
2081 /* Set up initial working area on first call */
2082 struct working_area *new_wa = malloc(sizeof(*new_wa));
2083 if (new_wa) {
2084 new_wa->next = NULL;
2085 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2086 new_wa->address = target->working_area;
2087 new_wa->backup = NULL;
2088 new_wa->user = NULL;
2089 new_wa->free = true;
2090 }
2091
2092 target->working_areas = new_wa;
2093 }
2094
2095 /* only allocate multiples of 4 byte */
2096 if (size % 4)
2097 size = (size + 3) & (~3UL);
2098
2099 struct working_area *c = target->working_areas;
2100
2101 /* Find the first large enough working area */
2102 while (c) {
2103 if (c->free && c->size >= size)
2104 break;
2105 c = c->next;
2106 }
2107
2108 if (!c)
2109 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2110
2111 /* Split the working area into the requested size */
2112 target_split_working_area(c, size);
2113
2114 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2115 size, c->address);
2116
2117 if (target->backup_working_area) {
2118 if (!c->backup) {
2119 c->backup = malloc(c->size);
2120 if (!c->backup)
2121 return ERROR_FAIL;
2122 }
2123
2124 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2125 if (retval != ERROR_OK)
2126 return retval;
2127 }
2128
2129 /* mark as used, and return the new (reused) area */
2130 c->free = false;
2131 *area = c;
2132
2133 /* user pointer */
2134 c->user = area;
2135
2136 print_wa_layout(target);
2137
2138 return ERROR_OK;
2139 }
2140
2141 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2142 {
2143 int retval;
2144
2145 retval = target_alloc_working_area_try(target, size, area);
2146 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2147 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2148 return retval;
2149
2150 }
2151
2152 static int target_restore_working_area(struct target *target, struct working_area *area)
2153 {
2154 int retval = ERROR_OK;
2155
2156 if (target->backup_working_area && area->backup) {
2157 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2158 if (retval != ERROR_OK)
2159 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2160 area->size, area->address);
2161 }
2162
2163 return retval;
2164 }
2165
2166 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2167 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2168 {
2169 if (!area || area->free)
2170 return ERROR_OK;
2171
2172 int retval = ERROR_OK;
2173 if (restore) {
2174 retval = target_restore_working_area(target, area);
2175 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2176 if (retval != ERROR_OK)
2177 return retval;
2178 }
2179
2180 area->free = true;
2181
2182 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2183 area->size, area->address);
2184
2185 /* mark user pointer invalid */
2186 /* TODO: Is this really safe? It points to some previous caller's memory.
2187 * How could we know that the area pointer is still in that place and not
2188 * some other vital data? What's the purpose of this, anyway? */
2189 *area->user = NULL;
2190 area->user = NULL;
2191
2192 target_merge_working_areas(target);
2193
2194 print_wa_layout(target);
2195
2196 return retval;
2197 }
2198
2199 int target_free_working_area(struct target *target, struct working_area *area)
2200 {
2201 return target_free_working_area_restore(target, area, 1);
2202 }
2203
2204 /* free resources and restore memory, if restoring memory fails,
2205 * free up resources anyway
2206 */
2207 static void target_free_all_working_areas_restore(struct target *target, int restore)
2208 {
2209 struct working_area *c = target->working_areas;
2210
2211 LOG_DEBUG("freeing all working areas");
2212
2213 /* Loop through all areas, restoring the allocated ones and marking them as free */
2214 while (c) {
2215 if (!c->free) {
2216 if (restore)
2217 target_restore_working_area(target, c);
2218 c->free = true;
2219 *c->user = NULL; /* Same as above */
2220 c->user = NULL;
2221 }
2222 c = c->next;
2223 }
2224
2225 /* Run a merge pass to combine all areas into one */
2226 target_merge_working_areas(target);
2227
2228 print_wa_layout(target);
2229 }
2230
2231 void target_free_all_working_areas(struct target *target)
2232 {
2233 target_free_all_working_areas_restore(target, 1);
2234
2235 /* Now we have none or only one working area marked as free */
2236 if (target->working_areas) {
2237 /* Free the last one to allow on-the-fly moving and resizing */
2238 free(target->working_areas->backup);
2239 free(target->working_areas);
2240 target->working_areas = NULL;
2241 }
2242 }
2243
2244 /* Find the largest number of bytes that can be allocated */
2245 uint32_t target_get_working_area_avail(struct target *target)
2246 {
2247 struct working_area *c = target->working_areas;
2248 uint32_t max_size = 0;
2249
2250 if (!c)
2251 return target->working_area_size;
2252
2253 while (c) {
2254 if (c->free && max_size < c->size)
2255 max_size = c->size;
2256
2257 c = c->next;
2258 }
2259
2260 return max_size;
2261 }
2262
2263 static void target_destroy(struct target *target)
2264 {
2265 if (target->type->deinit_target)
2266 target->type->deinit_target(target);
2267
2268 if (target->semihosting)
2269 free(target->semihosting->basedir);
2270 free(target->semihosting);
2271
2272 jtag_unregister_event_callback(jtag_enable_callback, target);
2273
2274 struct target_event_action *teap = target->event_action;
2275 while (teap) {
2276 struct target_event_action *next = teap->next;
2277 Jim_DecrRefCount(teap->interp, teap->body);
2278 free(teap);
2279 teap = next;
2280 }
2281
2282 target_free_all_working_areas(target);
2283
2284 /* release the targets SMP list */
2285 if (target->smp) {
2286 struct target_list *head, *tmp;
2287
2288 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2289 list_del(&head->lh);
2290 head->target->smp = 0;
2291 free(head);
2292 }
2293 if (target->smp_targets != &empty_smp_targets)
2294 free(target->smp_targets);
2295 target->smp = 0;
2296 }
2297
2298 rtos_destroy(target);
2299
2300 free(target->gdb_port_override);
2301 free(target->type);
2302 free(target->trace_info);
2303 free(target->fileio_info);
2304 free(target->cmd_name);
2305 free(target);
2306 }
2307
2308 void target_quit(void)
2309 {
2310 struct target_event_callback *pe = target_event_callbacks;
2311 while (pe) {
2312 struct target_event_callback *t = pe->next;
2313 free(pe);
2314 pe = t;
2315 }
2316 target_event_callbacks = NULL;
2317
2318 struct target_timer_callback *pt = target_timer_callbacks;
2319 while (pt) {
2320 struct target_timer_callback *t = pt->next;
2321 free(pt);
2322 pt = t;
2323 }
2324 target_timer_callbacks = NULL;
2325
2326 for (struct target *target = all_targets; target;) {
2327 struct target *tmp;
2328
2329 tmp = target->next;
2330 target_destroy(target);
2331 target = tmp;
2332 }
2333
2334 all_targets = NULL;
2335 }
2336
2337 int target_arch_state(struct target *target)
2338 {
2339 int retval;
2340 if (!target) {
2341 LOG_WARNING("No target has been configured");
2342 return ERROR_OK;
2343 }
2344
2345 if (target->state != TARGET_HALTED)
2346 return ERROR_OK;
2347
2348 retval = target->type->arch_state(target);
2349 return retval;
2350 }
2351
2352 static int target_get_gdb_fileio_info_default(struct target *target,
2353 struct gdb_fileio_info *fileio_info)
2354 {
2355 /* If target does not support semi-hosting function, target
2356 has no need to provide .get_gdb_fileio_info callback.
2357 It just return ERROR_FAIL and gdb_server will return "Txx"
2358 as target halted every time. */
2359 return ERROR_FAIL;
2360 }
2361
2362 static int target_gdb_fileio_end_default(struct target *target,
2363 int retcode, int fileio_errno, bool ctrl_c)
2364 {
2365 return ERROR_OK;
2366 }
2367
2368 int target_profiling_default(struct target *target, uint32_t *samples,
2369 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2370 {
2371 struct timeval timeout, now;
2372
2373 gettimeofday(&timeout, NULL);
2374 timeval_add_time(&timeout, seconds, 0);
2375
2376 LOG_INFO("Starting profiling. Halting and resuming the"
2377 " target as often as we can...");
2378
2379 uint32_t sample_count = 0;
2380 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2381 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2382
2383 int retval = ERROR_OK;
2384 for (;;) {
2385 target_poll(target);
2386 if (target->state == TARGET_HALTED) {
2387 uint32_t t = buf_get_u32(reg->value, 0, 32);
2388 samples[sample_count++] = t;
2389 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2390 retval = target_resume(target, 1, 0, 0, 0);
2391 target_poll(target);
2392 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2393 } else if (target->state == TARGET_RUNNING) {
2394 /* We want to quickly sample the PC. */
2395 retval = target_halt(target);
2396 } else {
2397 LOG_INFO("Target not halted or running");
2398 retval = ERROR_OK;
2399 break;
2400 }
2401
2402 if (retval != ERROR_OK)
2403 break;
2404
2405 gettimeofday(&now, NULL);
2406 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2407 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2408 break;
2409 }
2410 }
2411
2412 *num_samples = sample_count;
2413 return retval;
2414 }
2415
2416 /* Single aligned words are guaranteed to use 16 or 32 bit access
2417 * mode respectively, otherwise data is handled as quickly as
2418 * possible
2419 */
2420 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2421 {
2422 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2423 size, address);
2424
2425 if (!target_was_examined(target)) {
2426 LOG_ERROR("Target not examined yet");
2427 return ERROR_FAIL;
2428 }
2429
2430 if (size == 0)
2431 return ERROR_OK;
2432
2433 if ((address + size - 1) < address) {
2434 /* GDB can request this when e.g. PC is 0xfffffffc */
2435 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2436 address,
2437 size);
2438 return ERROR_FAIL;
2439 }
2440
2441 return target->type->write_buffer(target, address, size, buffer);
2442 }
2443
2444 static int target_write_buffer_default(struct target *target,
2445 target_addr_t address, uint32_t count, const uint8_t *buffer)
2446 {
2447 uint32_t size;
2448 unsigned int data_bytes = target_data_bits(target) / 8;
2449
2450 /* Align up to maximum bytes. The loop condition makes sure the next pass
2451 * will have something to do with the size we leave to it. */
2452 for (size = 1;
2453 size < data_bytes && count >= size * 2 + (address & size);
2454 size *= 2) {
2455 if (address & size) {
2456 int retval = target_write_memory(target, address, size, 1, buffer);
2457 if (retval != ERROR_OK)
2458 return retval;
2459 address += size;
2460 count -= size;
2461 buffer += size;
2462 }
2463 }
2464
2465 /* Write the data with as large access size as possible. */
2466 for (; size > 0; size /= 2) {
2467 uint32_t aligned = count - count % size;
2468 if (aligned > 0) {
2469 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2470 if (retval != ERROR_OK)
2471 return retval;
2472 address += aligned;
2473 count -= aligned;
2474 buffer += aligned;
2475 }
2476 }
2477
2478 return ERROR_OK;
2479 }
2480
2481 /* Single aligned words are guaranteed to use 16 or 32 bit access
2482 * mode respectively, otherwise data is handled as quickly as
2483 * possible
2484 */
2485 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2486 {
2487 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2488 size, address);
2489
2490 if (!target_was_examined(target)) {
2491 LOG_ERROR("Target not examined yet");
2492 return ERROR_FAIL;
2493 }
2494
2495 if (size == 0)
2496 return ERROR_OK;
2497
2498 if ((address + size - 1) < address) {
2499 /* GDB can request this when e.g. PC is 0xfffffffc */
2500 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2501 address,
2502 size);
2503 return ERROR_FAIL;
2504 }
2505
2506 return target->type->read_buffer(target, address, size, buffer);
2507 }
2508
2509 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2510 {
2511 uint32_t size;
2512 unsigned int data_bytes = target_data_bits(target) / 8;
2513
2514 /* Align up to maximum bytes. The loop condition makes sure the next pass
2515 * will have something to do with the size we leave to it. */
2516 for (size = 1;
2517 size < data_bytes && count >= size * 2 + (address & size);
2518 size *= 2) {
2519 if (address & size) {
2520 int retval = target_read_memory(target, address, size, 1, buffer);
2521 if (retval != ERROR_OK)
2522 return retval;
2523 address += size;
2524 count -= size;
2525 buffer += size;
2526 }
2527 }
2528
2529 /* Read the data with as large access size as possible. */
2530 for (; size > 0; size /= 2) {
2531 uint32_t aligned = count - count % size;
2532 if (aligned > 0) {
2533 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2534 if (retval != ERROR_OK)
2535 return retval;
2536 address += aligned;
2537 count -= aligned;
2538 buffer += aligned;
2539 }
2540 }
2541
2542 return ERROR_OK;
2543 }
2544
2545 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2546 {
2547 uint8_t *buffer;
2548 int retval;
2549 uint32_t i;
2550 uint32_t checksum = 0;
2551 if (!target_was_examined(target)) {
2552 LOG_ERROR("Target not examined yet");
2553 return ERROR_FAIL;
2554 }
2555 if (!target->type->checksum_memory) {
2556 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2557 return ERROR_FAIL;
2558 }
2559
2560 retval = target->type->checksum_memory(target, address, size, &checksum);
2561 if (retval != ERROR_OK) {
2562 buffer = malloc(size);
2563 if (!buffer) {
2564 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2565 return ERROR_COMMAND_SYNTAX_ERROR;
2566 }
2567 retval = target_read_buffer(target, address, size, buffer);
2568 if (retval != ERROR_OK) {
2569 free(buffer);
2570 return retval;
2571 }
2572
2573 /* convert to target endianness */
2574 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2575 uint32_t target_data;
2576 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2577 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2578 }
2579
2580 retval = image_calculate_checksum(buffer, size, &checksum);
2581 free(buffer);
2582 }
2583
2584 *crc = checksum;
2585
2586 return retval;
2587 }
2588
2589 int target_blank_check_memory(struct target *target,
2590 struct target_memory_check_block *blocks, int num_blocks,
2591 uint8_t erased_value)
2592 {
2593 if (!target_was_examined(target)) {
2594 LOG_ERROR("Target not examined yet");
2595 return ERROR_FAIL;
2596 }
2597
2598 if (!target->type->blank_check_memory)
2599 return ERROR_NOT_IMPLEMENTED;
2600
2601 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2602 }
2603
2604 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2605 {
2606 uint8_t value_buf[8];
2607 if (!target_was_examined(target)) {
2608 LOG_ERROR("Target not examined yet");
2609 return ERROR_FAIL;
2610 }
2611
2612 int retval = target_read_memory(target, address, 8, 1, value_buf);
2613
2614 if (retval == ERROR_OK) {
2615 *value = target_buffer_get_u64(target, value_buf);
2616 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2617 address,
2618 *value);
2619 } else {
2620 *value = 0x0;
2621 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2622 address);
2623 }
2624
2625 return retval;
2626 }
2627
2628 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2629 {
2630 uint8_t value_buf[4];
2631 if (!target_was_examined(target)) {
2632 LOG_ERROR("Target not examined yet");
2633 return ERROR_FAIL;
2634 }
2635
2636 int retval = target_read_memory(target, address, 4, 1, value_buf);
2637
2638 if (retval == ERROR_OK) {
2639 *value = target_buffer_get_u32(target, value_buf);
2640 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2641 address,
2642 *value);
2643 } else {
2644 *value = 0x0;
2645 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2646 address);
2647 }
2648
2649 return retval;
2650 }
2651
2652 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2653 {
2654 uint8_t value_buf[2];
2655 if (!target_was_examined(target)) {
2656 LOG_ERROR("Target not examined yet");
2657 return ERROR_FAIL;
2658 }
2659
2660 int retval = target_read_memory(target, address, 2, 1, value_buf);
2661
2662 if (retval == ERROR_OK) {
2663 *value = target_buffer_get_u16(target, value_buf);
2664 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2665 address,
2666 *value);
2667 } else {
2668 *value = 0x0;
2669 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2670 address);
2671 }
2672
2673 return retval;
2674 }
2675
2676 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2677 {
2678 if (!target_was_examined(target)) {
2679 LOG_ERROR("Target not examined yet");
2680 return ERROR_FAIL;
2681 }
2682
2683 int retval = target_read_memory(target, address, 1, 1, value);
2684
2685 if (retval == ERROR_OK) {
2686 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2687 address,
2688 *value);
2689 } else {
2690 *value = 0x0;
2691 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2692 address);
2693 }
2694
2695 return retval;
2696 }
2697
2698 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2699 {
2700 int retval;
2701 uint8_t value_buf[8];
2702 if (!target_was_examined(target)) {
2703 LOG_ERROR("Target not examined yet");
2704 return ERROR_FAIL;
2705 }
2706
2707 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2708 address,
2709 value);
2710
2711 target_buffer_set_u64(target, value_buf, value);
2712 retval = target_write_memory(target, address, 8, 1, value_buf);
2713 if (retval != ERROR_OK)
2714 LOG_DEBUG("failed: %i", retval);
2715
2716 return retval;
2717 }
2718
2719 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2720 {
2721 int retval;
2722 uint8_t value_buf[4];
2723 if (!target_was_examined(target)) {
2724 LOG_ERROR("Target not examined yet");
2725 return ERROR_FAIL;
2726 }
2727
2728 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2729 address,
2730 value);
2731
2732 target_buffer_set_u32(target, value_buf, value);
2733 retval = target_write_memory(target, address, 4, 1, value_buf);
2734 if (retval != ERROR_OK)
2735 LOG_DEBUG("failed: %i", retval);
2736
2737 return retval;
2738 }
2739
2740 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2741 {
2742 int retval;
2743 uint8_t value_buf[2];
2744 if (!target_was_examined(target)) {
2745 LOG_ERROR("Target not examined yet");
2746 return ERROR_FAIL;
2747 }
2748
2749 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2750 address,
2751 value);
2752
2753 target_buffer_set_u16(target, value_buf, value);
2754 retval = target_write_memory(target, address, 2, 1, value_buf);
2755 if (retval != ERROR_OK)
2756 LOG_DEBUG("failed: %i", retval);
2757
2758 return retval;
2759 }
2760
2761 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2762 {
2763 int retval;
2764 if (!target_was_examined(target)) {
2765 LOG_ERROR("Target not examined yet");
2766 return ERROR_FAIL;
2767 }
2768
2769 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2770 address, value);
2771
2772 retval = target_write_memory(target, address, 1, 1, &value);
2773 if (retval != ERROR_OK)
2774 LOG_DEBUG("failed: %i", retval);
2775
2776 return retval;
2777 }
2778
2779 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2780 {
2781 int retval;
2782 uint8_t value_buf[8];
2783 if (!target_was_examined(target)) {
2784 LOG_ERROR("Target not examined yet");
2785 return ERROR_FAIL;
2786 }
2787
2788 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2789 address,
2790 value);
2791
2792 target_buffer_set_u64(target, value_buf, value);
2793 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2794 if (retval != ERROR_OK)
2795 LOG_DEBUG("failed: %i", retval);
2796
2797 return retval;
2798 }
2799
2800 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2801 {
2802 int retval;
2803 uint8_t value_buf[4];
2804 if (!target_was_examined(target)) {
2805 LOG_ERROR("Target not examined yet");
2806 return ERROR_FAIL;
2807 }
2808
2809 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2810 address,
2811 value);
2812
2813 target_buffer_set_u32(target, value_buf, value);
2814 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2815 if (retval != ERROR_OK)
2816 LOG_DEBUG("failed: %i", retval);
2817
2818 return retval;
2819 }
2820
2821 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2822 {
2823 int retval;
2824 uint8_t value_buf[2];
2825 if (!target_was_examined(target)) {
2826 LOG_ERROR("Target not examined yet");
2827 return ERROR_FAIL;
2828 }
2829
2830 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2831 address,
2832 value);
2833
2834 target_buffer_set_u16(target, value_buf, value);
2835 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2836 if (retval != ERROR_OK)
2837 LOG_DEBUG("failed: %i", retval);
2838
2839 return retval;
2840 }
2841
2842 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2843 {
2844 int retval;
2845 if (!target_was_examined(target)) {
2846 LOG_ERROR("Target not examined yet");
2847 return ERROR_FAIL;
2848 }
2849
2850 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2851 address, value);
2852
2853 retval = target_write_phys_memory(target, address, 1, 1, &value);
2854 if (retval != ERROR_OK)
2855 LOG_DEBUG("failed: %i", retval);
2856
2857 return retval;
2858 }
2859
2860 static int find_target(struct command_invocation *cmd, const char *name)
2861 {
2862 struct target *target = get_target(name);
2863 if (!target) {
2864 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2865 return ERROR_FAIL;
2866 }
2867 if (!target->tap->enabled) {
2868 command_print(cmd, "Target: TAP %s is disabled, "
2869 "can't be the current target\n",
2870 target->tap->dotted_name);
2871 return ERROR_FAIL;
2872 }
2873
2874 cmd->ctx->current_target = target;
2875 if (cmd->ctx->current_target_override)
2876 cmd->ctx->current_target_override = target;
2877
2878 return ERROR_OK;
2879 }
2880
2881
2882 COMMAND_HANDLER(handle_targets_command)
2883 {
2884 int retval = ERROR_OK;
2885 if (CMD_ARGC == 1) {
2886 retval = find_target(CMD, CMD_ARGV[0]);
2887 if (retval == ERROR_OK) {
2888 /* we're done! */
2889 return retval;
2890 }
2891 }
2892
2893 struct target *target = all_targets;
2894 command_print(CMD, " TargetName Type Endian TapName State ");
2895 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2896 while (target) {
2897 const char *state;
2898 char marker = ' ';
2899
2900 if (target->tap->enabled)
2901 state = target_state_name(target);
2902 else
2903 state = "tap-disabled";
2904
2905 if (CMD_CTX->current_target == target)
2906 marker = '*';
2907
2908 /* keep columns lined up to match the headers above */
2909 command_print(CMD,
2910 "%2d%c %-18s %-10s %-6s %-18s %s",
2911 target->target_number,
2912 marker,
2913 target_name(target),
2914 target_type_name(target),
2915 jim_nvp_value2name_simple(nvp_target_endian,
2916 target->endianness)->name,
2917 target->tap->dotted_name,
2918 state);
2919 target = target->next;
2920 }
2921
2922 return retval;
2923 }
2924
2925 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2926
2927 static int power_dropout;
2928 static int srst_asserted;
2929
2930 static int run_power_restore;
2931 static int run_power_dropout;
2932 static int run_srst_asserted;
2933 static int run_srst_deasserted;
2934
2935 static int sense_handler(void)
2936 {
2937 static int prev_srst_asserted;
2938 static int prev_power_dropout;
2939
2940 int retval = jtag_power_dropout(&power_dropout);
2941 if (retval != ERROR_OK)
2942 return retval;
2943
2944 int power_restored;
2945 power_restored = prev_power_dropout && !power_dropout;
2946 if (power_restored)
2947 run_power_restore = 1;
2948
2949 int64_t current = timeval_ms();
2950 static int64_t last_power;
2951 bool wait_more = last_power + 2000 > current;
2952 if (power_dropout && !wait_more) {
2953 run_power_dropout = 1;
2954 last_power = current;
2955 }
2956
2957 retval = jtag_srst_asserted(&srst_asserted);
2958 if (retval != ERROR_OK)
2959 return retval;
2960
2961 int srst_deasserted;
2962 srst_deasserted = prev_srst_asserted && !srst_asserted;
2963
2964 static int64_t last_srst;
2965 wait_more = last_srst + 2000 > current;
2966 if (srst_deasserted && !wait_more) {
2967 run_srst_deasserted = 1;
2968 last_srst = current;
2969 }
2970
2971 if (!prev_srst_asserted && srst_asserted)
2972 run_srst_asserted = 1;
2973
2974 prev_srst_asserted = srst_asserted;
2975 prev_power_dropout = power_dropout;
2976
2977 if (srst_deasserted || power_restored) {
2978 /* Other than logging the event we can't do anything here.
2979 * Issuing a reset is a particularly bad idea as we might
2980 * be inside a reset already.
2981 */
2982 }
2983
2984 return ERROR_OK;
2985 }
2986
2987 /* process target state changes */
2988 static int handle_target(void *priv)
2989 {
2990 Jim_Interp *interp = (Jim_Interp *)priv;
2991 int retval = ERROR_OK;
2992
2993 if (!is_jtag_poll_safe()) {
2994 /* polling is disabled currently */
2995 return ERROR_OK;
2996 }
2997
2998 /* we do not want to recurse here... */
2999 static int recursive;
3000 if (!recursive) {
3001 recursive = 1;
3002 sense_handler();
3003 /* danger! running these procedures can trigger srst assertions and power dropouts.
3004 * We need to avoid an infinite loop/recursion here and we do that by
3005 * clearing the flags after running these events.
3006 */
3007 int did_something = 0;
3008 if (run_srst_asserted) {
3009 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3010 Jim_Eval(interp, "srst_asserted");
3011 did_something = 1;
3012 }
3013 if (run_srst_deasserted) {
3014 Jim_Eval(interp, "srst_deasserted");
3015 did_something = 1;
3016 }
3017 if (run_power_dropout) {
3018 LOG_INFO("Power dropout detected, running power_dropout proc.");
3019 Jim_Eval(interp, "power_dropout");
3020 did_something = 1;
3021 }
3022 if (run_power_restore) {
3023 Jim_Eval(interp, "power_restore");
3024 did_something = 1;
3025 }
3026
3027 if (did_something) {
3028 /* clear detect flags */
3029 sense_handler();
3030 }
3031
3032 /* clear action flags */
3033
3034 run_srst_asserted = 0;
3035 run_srst_deasserted = 0;
3036 run_power_restore = 0;
3037 run_power_dropout = 0;
3038
3039 recursive = 0;
3040 }
3041
3042 /* Poll targets for state changes unless that's globally disabled.
3043 * Skip targets that are currently disabled.
3044 */
3045 for (struct target *target = all_targets;
3046 is_jtag_poll_safe() && target;
3047 target = target->next) {
3048
3049 if (!target_was_examined(target))
3050 continue;
3051
3052 if (!target->tap->enabled)
3053 continue;
3054
3055 if (target->backoff.times > target->backoff.count) {
3056 /* do not poll this time as we failed previously */
3057 target->backoff.count++;
3058 continue;
3059 }
3060 target->backoff.count = 0;
3061
3062 /* only poll target if we've got power and srst isn't asserted */
3063 if (!power_dropout && !srst_asserted) {
3064 /* polling may fail silently until the target has been examined */
3065 retval = target_poll(target);
3066 if (retval != ERROR_OK) {
3067 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3068 if (target->backoff.times * polling_interval < 5000) {
3069 target->backoff.times *= 2;
3070 target->backoff.times++;
3071 }
3072
3073 /* Tell GDB to halt the debugger. This allows the user to
3074 * run monitor commands to handle the situation.
3075 */
3076 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3077 }
3078 if (target->backoff.times > 0) {
3079 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3080 target_reset_examined(target);
3081 retval = target_examine_one(target);
3082 /* Target examination could have failed due to unstable connection,
3083 * but we set the examined flag anyway to repoll it later */
3084 if (retval != ERROR_OK) {
3085 target_set_examined(target);
3086 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3087 target->backoff.times * polling_interval);
3088 return retval;
3089 }
3090 }
3091
3092 /* Since we succeeded, we reset backoff count */
3093 target->backoff.times = 0;
3094 }
3095 }
3096
3097 return retval;
3098 }
3099
3100 COMMAND_HANDLER(handle_reg_command)
3101 {
3102 LOG_DEBUG("-");
3103
3104 struct target *target = get_current_target(CMD_CTX);
3105 struct reg *reg = NULL;
3106
3107 /* list all available registers for the current target */
3108 if (CMD_ARGC == 0) {
3109 struct reg_cache *cache = target->reg_cache;
3110
3111 unsigned int count = 0;
3112 while (cache) {
3113 unsigned i;
3114
3115 command_print(CMD, "===== %s", cache->name);
3116
3117 for (i = 0, reg = cache->reg_list;
3118 i < cache->num_regs;
3119 i++, reg++, count++) {
3120 if (reg->exist == false || reg->hidden)
3121 continue;
3122 /* only print cached values if they are valid */
3123 if (reg->valid) {
3124 char *value = buf_to_hex_str(reg->value,
3125 reg->size);
3126 command_print(CMD,
3127 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3128 count, reg->name,
3129 reg->size, value,
3130 reg->dirty
3131 ? " (dirty)"
3132 : "");
3133 free(value);
3134 } else {
3135 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3136 count, reg->name,
3137 reg->size);
3138 }
3139 }
3140 cache = cache->next;
3141 }
3142
3143 return ERROR_OK;
3144 }
3145
3146 /* access a single register by its ordinal number */
3147 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3148 unsigned num;
3149 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3150
3151 struct reg_cache *cache = target->reg_cache;
3152 unsigned int count = 0;
3153 while (cache) {
3154 unsigned i;
3155 for (i = 0; i < cache->num_regs; i++) {
3156 if (count++ == num) {
3157 reg = &cache->reg_list[i];
3158 break;
3159 }
3160 }
3161 if (reg)
3162 break;
3163 cache = cache->next;
3164 }
3165
3166 if (!reg) {
3167 command_print(CMD, "%i is out of bounds, the current target "
3168 "has only %i registers (0 - %i)", num, count, count - 1);
3169 return ERROR_OK;
3170 }
3171 } else {
3172 /* access a single register by its name */
3173 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3174
3175 if (!reg)
3176 goto not_found;
3177 }
3178
3179 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3180
3181 if (!reg->exist)
3182 goto not_found;
3183
3184 /* display a register */
3185 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3186 && (CMD_ARGV[1][0] <= '9')))) {
3187 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3188 reg->valid = 0;
3189
3190 if (reg->valid == 0) {
3191 int retval = reg->type->get(reg);
3192 if (retval != ERROR_OK) {
3193 LOG_ERROR("Could not read register '%s'", reg->name);
3194 return retval;
3195 }
3196 }
3197 char *value = buf_to_hex_str(reg->value, reg->size);
3198 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3199 free(value);
3200 return ERROR_OK;
3201 }
3202
3203 /* set register value */
3204 if (CMD_ARGC == 2) {