target: Use target_addr_t for algorithm addresses.
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 √ėyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59
60 /* default halt wait timeout (ms) */
61 #define DEFAULT_HALT_TIMEOUT 5000
62
63 static int target_read_buffer_default(struct target *target, target_addr_t address,
64 uint32_t count, uint8_t *buffer);
65 static int target_write_buffer_default(struct target *target, target_addr_t address,
66 uint32_t count, const uint8_t *buffer);
67 static int target_array2mem(Jim_Interp *interp, struct target *target,
68 int argc, Jim_Obj * const *argv);
69 static int target_mem2array(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_register_user_commands(struct command_context *cmd_ctx);
72 static int target_get_gdb_fileio_info_default(struct target *target,
73 struct gdb_fileio_info *fileio_info);
74 static int target_gdb_fileio_end_default(struct target *target, int retcode,
75 int fileio_errno, bool ctrl_c);
76
77 /* targets */
78 extern struct target_type arm7tdmi_target;
79 extern struct target_type arm720t_target;
80 extern struct target_type arm9tdmi_target;
81 extern struct target_type arm920t_target;
82 extern struct target_type arm966e_target;
83 extern struct target_type arm946e_target;
84 extern struct target_type arm926ejs_target;
85 extern struct target_type fa526_target;
86 extern struct target_type feroceon_target;
87 extern struct target_type dragonite_target;
88 extern struct target_type xscale_target;
89 extern struct target_type cortexm_target;
90 extern struct target_type cortexa_target;
91 extern struct target_type aarch64_target;
92 extern struct target_type cortexr4_target;
93 extern struct target_type arm11_target;
94 extern struct target_type ls1_sap_target;
95 extern struct target_type mips_m4k_target;
96 extern struct target_type mips_mips64_target;
97 extern struct target_type avr_target;
98 extern struct target_type dsp563xx_target;
99 extern struct target_type dsp5680xx_target;
100 extern struct target_type testee_target;
101 extern struct target_type avr32_ap7k_target;
102 extern struct target_type hla_target;
103 extern struct target_type nds32_v2_target;
104 extern struct target_type nds32_v3_target;
105 extern struct target_type nds32_v3m_target;
106 extern struct target_type or1k_target;
107 extern struct target_type quark_x10xx_target;
108 extern struct target_type quark_d20xx_target;
109 extern struct target_type stm8_target;
110 extern struct target_type riscv_target;
111 extern struct target_type mem_ap_target;
112 extern struct target_type esirisc_target;
113 extern struct target_type arcv2_target;
114
115 static struct target_type *target_types[] = {
116 &arm7tdmi_target,
117 &arm9tdmi_target,
118 &arm920t_target,
119 &arm720t_target,
120 &arm966e_target,
121 &arm946e_target,
122 &arm926ejs_target,
123 &fa526_target,
124 &feroceon_target,
125 &dragonite_target,
126 &xscale_target,
127 &cortexm_target,
128 &cortexa_target,
129 &cortexr4_target,
130 &arm11_target,
131 &ls1_sap_target,
132 &mips_m4k_target,
133 &avr_target,
134 &dsp563xx_target,
135 &dsp5680xx_target,
136 &testee_target,
137 &avr32_ap7k_target,
138 &hla_target,
139 &nds32_v2_target,
140 &nds32_v3_target,
141 &nds32_v3m_target,
142 &or1k_target,
143 &quark_x10xx_target,
144 &quark_d20xx_target,
145 &stm8_target,
146 &riscv_target,
147 &mem_ap_target,
148 &esirisc_target,
149 &arcv2_target,
150 &aarch64_target,
151 &mips_mips64_target,
152 NULL,
153 };
154
155 struct target *all_targets;
156 static struct target_event_callback *target_event_callbacks;
157 static struct target_timer_callback *target_timer_callbacks;
158 static int64_t target_timer_next_event_value;
159 static LIST_HEAD(target_reset_callback_list);
160 static LIST_HEAD(target_trace_callback_list);
161 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
162
163 static const struct jim_nvp nvp_assert[] = {
164 { .name = "assert", NVP_ASSERT },
165 { .name = "deassert", NVP_DEASSERT },
166 { .name = "T", NVP_ASSERT },
167 { .name = "F", NVP_DEASSERT },
168 { .name = "t", NVP_ASSERT },
169 { .name = "f", NVP_DEASSERT },
170 { .name = NULL, .value = -1 }
171 };
172
173 static const struct jim_nvp nvp_error_target[] = {
174 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
175 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
176 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
177 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
178 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
179 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
180 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
181 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
182 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
183 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
184 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
185 { .value = -1, .name = NULL }
186 };
187
188 static const char *target_strerror_safe(int err)
189 {
190 const struct jim_nvp *n;
191
192 n = jim_nvp_value2name_simple(nvp_error_target, err);
193 if (!n->name)
194 return "unknown";
195 else
196 return n->name;
197 }
198
199 static const struct jim_nvp nvp_target_event[] = {
200
201 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
202 { .value = TARGET_EVENT_HALTED, .name = "halted" },
203 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
204 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
205 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
206 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
207 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
208
209 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
210 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
211
212 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
213 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
214 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
215 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
216 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
217 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
218 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
219 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
220
221 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
222 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
223 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
224
225 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
226 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
227
228 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
229 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
230
231 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
232 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
233
234 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
235 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
236
237 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
238
239 { .name = NULL, .value = -1 }
240 };
241
242 static const struct jim_nvp nvp_target_state[] = {
243 { .name = "unknown", .value = TARGET_UNKNOWN },
244 { .name = "running", .value = TARGET_RUNNING },
245 { .name = "halted", .value = TARGET_HALTED },
246 { .name = "reset", .value = TARGET_RESET },
247 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
248 { .name = NULL, .value = -1 },
249 };
250
251 static const struct jim_nvp nvp_target_debug_reason[] = {
252 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
253 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
254 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
255 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
256 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
257 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
258 { .name = "program-exit", .value = DBG_REASON_EXIT },
259 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
260 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
261 { .name = NULL, .value = -1 },
262 };
263
264 static const struct jim_nvp nvp_target_endian[] = {
265 { .name = "big", .value = TARGET_BIG_ENDIAN },
266 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
267 { .name = "be", .value = TARGET_BIG_ENDIAN },
268 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
269 { .name = NULL, .value = -1 },
270 };
271
272 static const struct jim_nvp nvp_reset_modes[] = {
273 { .name = "unknown", .value = RESET_UNKNOWN },
274 { .name = "run", .value = RESET_RUN },
275 { .name = "halt", .value = RESET_HALT },
276 { .name = "init", .value = RESET_INIT },
277 { .name = NULL, .value = -1 },
278 };
279
280 const char *debug_reason_name(struct target *t)
281 {
282 const char *cp;
283
284 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
285 t->debug_reason)->name;
286 if (!cp) {
287 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
288 cp = "(*BUG*unknown*BUG*)";
289 }
290 return cp;
291 }
292
293 const char *target_state_name(struct target *t)
294 {
295 const char *cp;
296 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
297 if (!cp) {
298 LOG_ERROR("Invalid target state: %d", (int)(t->state));
299 cp = "(*BUG*unknown*BUG*)";
300 }
301
302 if (!target_was_examined(t) && t->defer_examine)
303 cp = "examine deferred";
304
305 return cp;
306 }
307
308 const char *target_event_name(enum target_event event)
309 {
310 const char *cp;
311 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
312 if (!cp) {
313 LOG_ERROR("Invalid target event: %d", (int)(event));
314 cp = "(*BUG*unknown*BUG*)";
315 }
316 return cp;
317 }
318
319 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
320 {
321 const char *cp;
322 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
323 if (!cp) {
324 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
325 cp = "(*BUG*unknown*BUG*)";
326 }
327 return cp;
328 }
329
330 /* determine the number of the new target */
331 static int new_target_number(void)
332 {
333 struct target *t;
334 int x;
335
336 /* number is 0 based */
337 x = -1;
338 t = all_targets;
339 while (t) {
340 if (x < t->target_number)
341 x = t->target_number;
342 t = t->next;
343 }
344 return x + 1;
345 }
346
347 static void append_to_list_all_targets(struct target *target)
348 {
349 struct target **t = &all_targets;
350
351 while (*t)
352 t = &((*t)->next);
353 *t = target;
354 }
355
356 /* read a uint64_t from a buffer in target memory endianness */
357 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
358 {
359 if (target->endianness == TARGET_LITTLE_ENDIAN)
360 return le_to_h_u64(buffer);
361 else
362 return be_to_h_u64(buffer);
363 }
364
365 /* read a uint32_t from a buffer in target memory endianness */
366 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
367 {
368 if (target->endianness == TARGET_LITTLE_ENDIAN)
369 return le_to_h_u32(buffer);
370 else
371 return be_to_h_u32(buffer);
372 }
373
374 /* read a uint24_t from a buffer in target memory endianness */
375 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
376 {
377 if (target->endianness == TARGET_LITTLE_ENDIAN)
378 return le_to_h_u24(buffer);
379 else
380 return be_to_h_u24(buffer);
381 }
382
383 /* read a uint16_t from a buffer in target memory endianness */
384 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
385 {
386 if (target->endianness == TARGET_LITTLE_ENDIAN)
387 return le_to_h_u16(buffer);
388 else
389 return be_to_h_u16(buffer);
390 }
391
392 /* write a uint64_t to a buffer in target memory endianness */
393 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
394 {
395 if (target->endianness == TARGET_LITTLE_ENDIAN)
396 h_u64_to_le(buffer, value);
397 else
398 h_u64_to_be(buffer, value);
399 }
400
401 /* write a uint32_t to a buffer in target memory endianness */
402 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
403 {
404 if (target->endianness == TARGET_LITTLE_ENDIAN)
405 h_u32_to_le(buffer, value);
406 else
407 h_u32_to_be(buffer, value);
408 }
409
410 /* write a uint24_t to a buffer in target memory endianness */
411 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
412 {
413 if (target->endianness == TARGET_LITTLE_ENDIAN)
414 h_u24_to_le(buffer, value);
415 else
416 h_u24_to_be(buffer, value);
417 }
418
419 /* write a uint16_t to a buffer in target memory endianness */
420 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
421 {
422 if (target->endianness == TARGET_LITTLE_ENDIAN)
423 h_u16_to_le(buffer, value);
424 else
425 h_u16_to_be(buffer, value);
426 }
427
428 /* write a uint8_t to a buffer in target memory endianness */
429 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
430 {
431 *buffer = value;
432 }
433
434 /* write a uint64_t array to a buffer in target memory endianness */
435 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
436 {
437 uint32_t i;
438 for (i = 0; i < count; i++)
439 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
440 }
441
442 /* write a uint32_t array to a buffer in target memory endianness */
443 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
444 {
445 uint32_t i;
446 for (i = 0; i < count; i++)
447 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
448 }
449
450 /* write a uint16_t array to a buffer in target memory endianness */
451 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
452 {
453 uint32_t i;
454 for (i = 0; i < count; i++)
455 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
456 }
457
458 /* write a uint64_t array to a buffer in target memory endianness */
459 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
460 {
461 uint32_t i;
462 for (i = 0; i < count; i++)
463 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
464 }
465
466 /* write a uint32_t array to a buffer in target memory endianness */
467 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
468 {
469 uint32_t i;
470 for (i = 0; i < count; i++)
471 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
472 }
473
474 /* write a uint16_t array to a buffer in target memory endianness */
475 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
476 {
477 uint32_t i;
478 for (i = 0; i < count; i++)
479 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
480 }
481
482 /* return a pointer to a configured target; id is name or number */
483 struct target *get_target(const char *id)
484 {
485 struct target *target;
486
487 /* try as tcltarget name */
488 for (target = all_targets; target; target = target->next) {
489 if (!target_name(target))
490 continue;
491 if (strcmp(id, target_name(target)) == 0)
492 return target;
493 }
494
495 /* It's OK to remove this fallback sometime after August 2010 or so */
496
497 /* no match, try as number */
498 unsigned num;
499 if (parse_uint(id, &num) != ERROR_OK)
500 return NULL;
501
502 for (target = all_targets; target; target = target->next) {
503 if (target->target_number == (int)num) {
504 LOG_WARNING("use '%s' as target identifier, not '%u'",
505 target_name(target), num);
506 return target;
507 }
508 }
509
510 return NULL;
511 }
512
513 /* returns a pointer to the n-th configured target */
514 struct target *get_target_by_num(int num)
515 {
516 struct target *target = all_targets;
517
518 while (target) {
519 if (target->target_number == num)
520 return target;
521 target = target->next;
522 }
523
524 return NULL;
525 }
526
527 struct target *get_current_target(struct command_context *cmd_ctx)
528 {
529 struct target *target = get_current_target_or_null(cmd_ctx);
530
531 if (!target) {
532 LOG_ERROR("BUG: current_target out of bounds");
533 exit(-1);
534 }
535
536 return target;
537 }
538
539 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
540 {
541 return cmd_ctx->current_target_override
542 ? cmd_ctx->current_target_override
543 : cmd_ctx->current_target;
544 }
545
546 int target_poll(struct target *target)
547 {
548 int retval;
549
550 /* We can't poll until after examine */
551 if (!target_was_examined(target)) {
552 /* Fail silently lest we pollute the log */
553 return ERROR_FAIL;
554 }
555
556 retval = target->type->poll(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 if (target->halt_issued) {
561 if (target->state == TARGET_HALTED)
562 target->halt_issued = false;
563 else {
564 int64_t t = timeval_ms() - target->halt_issued_time;
565 if (t > DEFAULT_HALT_TIMEOUT) {
566 target->halt_issued = false;
567 LOG_INFO("Halt timed out, wake up GDB.");
568 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
569 }
570 }
571 }
572
573 return ERROR_OK;
574 }
575
576 int target_halt(struct target *target)
577 {
578 int retval;
579 /* We can't poll until after examine */
580 if (!target_was_examined(target)) {
581 LOG_ERROR("Target not examined yet");
582 return ERROR_FAIL;
583 }
584
585 retval = target->type->halt(target);
586 if (retval != ERROR_OK)
587 return retval;
588
589 target->halt_issued = true;
590 target->halt_issued_time = timeval_ms();
591
592 return ERROR_OK;
593 }
594
595 /**
596 * Make the target (re)start executing using its saved execution
597 * context (possibly with some modifications).
598 *
599 * @param target Which target should start executing.
600 * @param current True to use the target's saved program counter instead
601 * of the address parameter
602 * @param address Optionally used as the program counter.
603 * @param handle_breakpoints True iff breakpoints at the resumption PC
604 * should be skipped. (For example, maybe execution was stopped by
605 * such a breakpoint, in which case it would be counterproductive to
606 * let it re-trigger.
607 * @param debug_execution False if all working areas allocated by OpenOCD
608 * should be released and/or restored to their original contents.
609 * (This would for example be true to run some downloaded "helper"
610 * algorithm code, which resides in one such working buffer and uses
611 * another for data storage.)
612 *
613 * @todo Resolve the ambiguity about what the "debug_execution" flag
614 * signifies. For example, Target implementations don't agree on how
615 * it relates to invalidation of the register cache, or to whether
616 * breakpoints and watchpoints should be enabled. (It would seem wrong
617 * to enable breakpoints when running downloaded "helper" algorithms
618 * (debug_execution true), since the breakpoints would be set to match
619 * target firmware being debugged, not the helper algorithm.... and
620 * enabling them could cause such helpers to malfunction (for example,
621 * by overwriting data with a breakpoint instruction. On the other
622 * hand the infrastructure for running such helpers might use this
623 * procedure but rely on hardware breakpoint to detect termination.)
624 */
625 int target_resume(struct target *target, int current, target_addr_t address,
626 int handle_breakpoints, int debug_execution)
627 {
628 int retval;
629
630 /* We can't poll until after examine */
631 if (!target_was_examined(target)) {
632 LOG_ERROR("Target not examined yet");
633 return ERROR_FAIL;
634 }
635
636 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
637
638 /* note that resume *must* be asynchronous. The CPU can halt before
639 * we poll. The CPU can even halt at the current PC as a result of
640 * a software breakpoint being inserted by (a bug?) the application.
641 */
642 /*
643 * resume() triggers the event 'resumed'. The execution of TCL commands
644 * in the event handler causes the polling of targets. If the target has
645 * already halted for a breakpoint, polling will run the 'halted' event
646 * handler before the pending 'resumed' handler.
647 * Disable polling during resume() to guarantee the execution of handlers
648 * in the correct order.
649 */
650 bool save_poll = jtag_poll_get_enabled();
651 jtag_poll_set_enabled(false);
652 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
653 jtag_poll_set_enabled(save_poll);
654 if (retval != ERROR_OK)
655 return retval;
656
657 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
658
659 return retval;
660 }
661
662 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
663 {
664 char buf[100];
665 int retval;
666 struct jim_nvp *n;
667 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
668 if (!n->name) {
669 LOG_ERROR("invalid reset mode");
670 return ERROR_FAIL;
671 }
672
673 struct target *target;
674 for (target = all_targets; target; target = target->next)
675 target_call_reset_callbacks(target, reset_mode);
676
677 /* disable polling during reset to make reset event scripts
678 * more predictable, i.e. dr/irscan & pathmove in events will
679 * not have JTAG operations injected into the middle of a sequence.
680 */
681 bool save_poll = jtag_poll_get_enabled();
682
683 jtag_poll_set_enabled(false);
684
685 sprintf(buf, "ocd_process_reset %s", n->name);
686 retval = Jim_Eval(cmd->ctx->interp, buf);
687
688 jtag_poll_set_enabled(save_poll);
689
690 if (retval != JIM_OK) {
691 Jim_MakeErrorMessage(cmd->ctx->interp);
692 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
693 return ERROR_FAIL;
694 }
695
696 /* We want any events to be processed before the prompt */
697 retval = target_call_timer_callbacks_now();
698
699 for (target = all_targets; target; target = target->next) {
700 target->type->check_reset(target);
701 target->running_alg = false;
702 }
703
704 return retval;
705 }
706
707 static int identity_virt2phys(struct target *target,
708 target_addr_t virtual, target_addr_t *physical)
709 {
710 *physical = virtual;
711 return ERROR_OK;
712 }
713
714 static int no_mmu(struct target *target, int *enabled)
715 {
716 *enabled = 0;
717 return ERROR_OK;
718 }
719
720 /**
721 * Reset the @c examined flag for the given target.
722 * Pure paranoia -- targets are zeroed on allocation.
723 */
724 static inline void target_reset_examined(struct target *target)
725 {
726 target->examined = false;
727 }
728
729 static int default_examine(struct target *target)
730 {
731 target_set_examined(target);
732 return ERROR_OK;
733 }
734
735 /* no check by default */
736 static int default_check_reset(struct target *target)
737 {
738 return ERROR_OK;
739 }
740
741 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
742 * Keep in sync */
743 int target_examine_one(struct target *target)
744 {
745 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
746
747 int retval = target->type->examine(target);
748 if (retval != ERROR_OK) {
749 target_reset_examined(target);
750 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
751 return retval;
752 }
753
754 target_set_examined(target);
755 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
756
757 return ERROR_OK;
758 }
759
760 static int jtag_enable_callback(enum jtag_event event, void *priv)
761 {
762 struct target *target = priv;
763
764 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
765 return ERROR_OK;
766
767 jtag_unregister_event_callback(jtag_enable_callback, target);
768
769 return target_examine_one(target);
770 }
771
772 /* Targets that correctly implement init + examine, i.e.
773 * no communication with target during init:
774 *
775 * XScale
776 */
777 int target_examine(void)
778 {
779 int retval = ERROR_OK;
780 struct target *target;
781
782 for (target = all_targets; target; target = target->next) {
783 /* defer examination, but don't skip it */
784 if (!target->tap->enabled) {
785 jtag_register_event_callback(jtag_enable_callback,
786 target);
787 continue;
788 }
789
790 if (target->defer_examine)
791 continue;
792
793 int retval2 = target_examine_one(target);
794 if (retval2 != ERROR_OK) {
795 LOG_WARNING("target %s examination failed", target_name(target));
796 retval = retval2;
797 }
798 }
799 return retval;
800 }
801
802 const char *target_type_name(struct target *target)
803 {
804 return target->type->name;
805 }
806
807 static int target_soft_reset_halt(struct target *target)
808 {
809 if (!target_was_examined(target)) {
810 LOG_ERROR("Target not examined yet");
811 return ERROR_FAIL;
812 }
813 if (!target->type->soft_reset_halt) {
814 LOG_ERROR("Target %s does not support soft_reset_halt",
815 target_name(target));
816 return ERROR_FAIL;
817 }
818 return target->type->soft_reset_halt(target);
819 }
820
821 /**
822 * Downloads a target-specific native code algorithm to the target,
823 * and executes it. * Note that some targets may need to set up, enable,
824 * and tear down a breakpoint (hard or * soft) to detect algorithm
825 * termination, while others may support lower overhead schemes where
826 * soft breakpoints embedded in the algorithm automatically terminate the
827 * algorithm.
828 *
829 * @param target used to run the algorithm
830 * @param num_mem_params
831 * @param mem_params
832 * @param num_reg_params
833 * @param reg_param
834 * @param entry_point
835 * @param exit_point
836 * @param timeout_ms
837 * @param arch_info target-specific description of the algorithm.
838 */
839 int target_run_algorithm(struct target *target,
840 int num_mem_params, struct mem_param *mem_params,
841 int num_reg_params, struct reg_param *reg_param,
842 target_addr_t entry_point, target_addr_t exit_point,
843 int timeout_ms, void *arch_info)
844 {
845 int retval = ERROR_FAIL;
846
847 if (!target_was_examined(target)) {
848 LOG_ERROR("Target not examined yet");
849 goto done;
850 }
851 if (!target->type->run_algorithm) {
852 LOG_ERROR("Target type '%s' does not support %s",
853 target_type_name(target), __func__);
854 goto done;
855 }
856
857 target->running_alg = true;
858 retval = target->type->run_algorithm(target,
859 num_mem_params, mem_params,
860 num_reg_params, reg_param,
861 entry_point, exit_point, timeout_ms, arch_info);
862 target->running_alg = false;
863
864 done:
865 return retval;
866 }
867
868 /**
869 * Executes a target-specific native code algorithm and leaves it running.
870 *
871 * @param target used to run the algorithm
872 * @param num_mem_params
873 * @param mem_params
874 * @param num_reg_params
875 * @param reg_params
876 * @param entry_point
877 * @param exit_point
878 * @param arch_info target-specific description of the algorithm.
879 */
880 int target_start_algorithm(struct target *target,
881 int num_mem_params, struct mem_param *mem_params,
882 int num_reg_params, struct reg_param *reg_params,
883 target_addr_t entry_point, target_addr_t exit_point,
884 void *arch_info)
885 {
886 int retval = ERROR_FAIL;
887
888 if (!target_was_examined(target)) {
889 LOG_ERROR("Target not examined yet");
890 goto done;
891 }
892 if (!target->type->start_algorithm) {
893 LOG_ERROR("Target type '%s' does not support %s",
894 target_type_name(target), __func__);
895 goto done;
896 }
897 if (target->running_alg) {
898 LOG_ERROR("Target is already running an algorithm");
899 goto done;
900 }
901
902 target->running_alg = true;
903 retval = target->type->start_algorithm(target,
904 num_mem_params, mem_params,
905 num_reg_params, reg_params,
906 entry_point, exit_point, arch_info);
907
908 done:
909 return retval;
910 }
911
912 /**
913 * Waits for an algorithm started with target_start_algorithm() to complete.
914 *
915 * @param target used to run the algorithm
916 * @param num_mem_params
917 * @param mem_params
918 * @param num_reg_params
919 * @param reg_params
920 * @param exit_point
921 * @param timeout_ms
922 * @param arch_info target-specific description of the algorithm.
923 */
924 int target_wait_algorithm(struct target *target,
925 int num_mem_params, struct mem_param *mem_params,
926 int num_reg_params, struct reg_param *reg_params,
927 target_addr_t exit_point, int timeout_ms,
928 void *arch_info)
929 {
930 int retval = ERROR_FAIL;
931
932 if (!target->type->wait_algorithm) {
933 LOG_ERROR("Target type '%s' does not support %s",
934 target_type_name(target), __func__);
935 goto done;
936 }
937 if (!target->running_alg) {
938 LOG_ERROR("Target is not running an algorithm");
939 goto done;
940 }
941
942 retval = target->type->wait_algorithm(target,
943 num_mem_params, mem_params,
944 num_reg_params, reg_params,
945 exit_point, timeout_ms, arch_info);
946 if (retval != ERROR_TARGET_TIMEOUT)
947 target->running_alg = false;
948
949 done:
950 return retval;
951 }
952
953 /**
954 * Streams data to a circular buffer on target intended for consumption by code
955 * running asynchronously on target.
956 *
957 * This is intended for applications where target-specific native code runs
958 * on the target, receives data from the circular buffer, does something with
959 * it (most likely writing it to a flash memory), and advances the circular
960 * buffer pointer.
961 *
962 * This assumes that the helper algorithm has already been loaded to the target,
963 * but has not been started yet. Given memory and register parameters are passed
964 * to the algorithm.
965 *
966 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
967 * following format:
968 *
969 * [buffer_start + 0, buffer_start + 4):
970 * Write Pointer address (aka head). Written and updated by this
971 * routine when new data is written to the circular buffer.
972 * [buffer_start + 4, buffer_start + 8):
973 * Read Pointer address (aka tail). Updated by code running on the
974 * target after it consumes data.
975 * [buffer_start + 8, buffer_start + buffer_size):
976 * Circular buffer contents.
977 *
978 * See contrib/loaders/flash/stm32f1x.S for an example.
979 *
980 * @param target used to run the algorithm
981 * @param buffer address on the host where data to be sent is located
982 * @param count number of blocks to send
983 * @param block_size size in bytes of each block
984 * @param num_mem_params count of memory-based params to pass to algorithm
985 * @param mem_params memory-based params to pass to algorithm
986 * @param num_reg_params count of register-based params to pass to algorithm
987 * @param reg_params memory-based params to pass to algorithm
988 * @param buffer_start address on the target of the circular buffer structure
989 * @param buffer_size size of the circular buffer structure
990 * @param entry_point address on the target to execute to start the algorithm
991 * @param exit_point address at which to set a breakpoint to catch the
992 * end of the algorithm; can be 0 if target triggers a breakpoint itself
993 * @param arch_info
994 */
995
996 int target_run_flash_async_algorithm(struct target *target,
997 const uint8_t *buffer, uint32_t count, int block_size,
998 int num_mem_params, struct mem_param *mem_params,
999 int num_reg_params, struct reg_param *reg_params,
1000 uint32_t buffer_start, uint32_t buffer_size,
1001 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1002 {
1003 int retval;
1004 int timeout = 0;
1005
1006 const uint8_t *buffer_orig = buffer;
1007
1008 /* Set up working area. First word is write pointer, second word is read pointer,
1009 * rest is fifo data area. */
1010 uint32_t wp_addr = buffer_start;
1011 uint32_t rp_addr = buffer_start + 4;
1012 uint32_t fifo_start_addr = buffer_start + 8;
1013 uint32_t fifo_end_addr = buffer_start + buffer_size;
1014
1015 uint32_t wp = fifo_start_addr;
1016 uint32_t rp = fifo_start_addr;
1017
1018 /* validate block_size is 2^n */
1019 assert(IS_PWR_OF_2(block_size));
1020
1021 retval = target_write_u32(target, wp_addr, wp);
1022 if (retval != ERROR_OK)
1023 return retval;
1024 retval = target_write_u32(target, rp_addr, rp);
1025 if (retval != ERROR_OK)
1026 return retval;
1027
1028 /* Start up algorithm on target and let it idle while writing the first chunk */
1029 retval = target_start_algorithm(target, num_mem_params, mem_params,
1030 num_reg_params, reg_params,
1031 entry_point,
1032 exit_point,
1033 arch_info);
1034
1035 if (retval != ERROR_OK) {
1036 LOG_ERROR("error starting target flash write algorithm");
1037 return retval;
1038 }
1039
1040 while (count > 0) {
1041
1042 retval = target_read_u32(target, rp_addr, &rp);
1043 if (retval != ERROR_OK) {
1044 LOG_ERROR("failed to get read pointer");
1045 break;
1046 }
1047
1048 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1049 (size_t) (buffer - buffer_orig), count, wp, rp);
1050
1051 if (rp == 0) {
1052 LOG_ERROR("flash write algorithm aborted by target");
1053 retval = ERROR_FLASH_OPERATION_FAILED;
1054 break;
1055 }
1056
1057 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1058 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1059 break;
1060 }
1061
1062 /* Count the number of bytes available in the fifo without
1063 * crossing the wrap around. Make sure to not fill it completely,
1064 * because that would make wp == rp and that's the empty condition. */
1065 uint32_t thisrun_bytes;
1066 if (rp > wp)
1067 thisrun_bytes = rp - wp - block_size;
1068 else if (rp > fifo_start_addr)
1069 thisrun_bytes = fifo_end_addr - wp;
1070 else
1071 thisrun_bytes = fifo_end_addr - wp - block_size;
1072
1073 if (thisrun_bytes == 0) {
1074 /* Throttle polling a bit if transfer is (much) faster than flash
1075 * programming. The exact delay shouldn't matter as long as it's
1076 * less than buffer size / flash speed. This is very unlikely to
1077 * run when using high latency connections such as USB. */
1078 alive_sleep(2);
1079
1080 /* to stop an infinite loop on some targets check and increment a timeout
1081 * this issue was observed on a stellaris using the new ICDI interface */
1082 if (timeout++ >= 2500) {
1083 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1084 return ERROR_FLASH_OPERATION_FAILED;
1085 }
1086 continue;
1087 }
1088
1089 /* reset our timeout */
1090 timeout = 0;
1091
1092 /* Limit to the amount of data we actually want to write */
1093 if (thisrun_bytes > count * block_size)
1094 thisrun_bytes = count * block_size;
1095
1096 /* Force end of large blocks to be word aligned */
1097 if (thisrun_bytes >= 16)
1098 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1099
1100 /* Write data to fifo */
1101 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1102 if (retval != ERROR_OK)
1103 break;
1104
1105 /* Update counters and wrap write pointer */
1106 buffer += thisrun_bytes;
1107 count -= thisrun_bytes / block_size;
1108 wp += thisrun_bytes;
1109 if (wp >= fifo_end_addr)
1110 wp = fifo_start_addr;
1111
1112 /* Store updated write pointer to target */
1113 retval = target_write_u32(target, wp_addr, wp);
1114 if (retval != ERROR_OK)
1115 break;
1116
1117 /* Avoid GDB timeouts */
1118 keep_alive();
1119 }
1120
1121 if (retval != ERROR_OK) {
1122 /* abort flash write algorithm on target */
1123 target_write_u32(target, wp_addr, 0);
1124 }
1125
1126 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1127 num_reg_params, reg_params,
1128 exit_point,
1129 10000,
1130 arch_info);
1131
1132 if (retval2 != ERROR_OK) {
1133 LOG_ERROR("error waiting for target flash write algorithm");
1134 retval = retval2;
1135 }
1136
1137 if (retval == ERROR_OK) {
1138 /* check if algorithm set rp = 0 after fifo writer loop finished */
1139 retval = target_read_u32(target, rp_addr, &rp);
1140 if (retval == ERROR_OK && rp == 0) {
1141 LOG_ERROR("flash write algorithm aborted by target");
1142 retval = ERROR_FLASH_OPERATION_FAILED;
1143 }
1144 }
1145
1146 return retval;
1147 }
1148
1149 int target_run_read_async_algorithm(struct target *target,
1150 uint8_t *buffer, uint32_t count, int block_size,
1151 int num_mem_params, struct mem_param *mem_params,
1152 int num_reg_params, struct reg_param *reg_params,
1153 uint32_t buffer_start, uint32_t buffer_size,
1154 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1155 {
1156 int retval;
1157 int timeout = 0;
1158
1159 const uint8_t *buffer_orig = buffer;
1160
1161 /* Set up working area. First word is write pointer, second word is read pointer,
1162 * rest is fifo data area. */
1163 uint32_t wp_addr = buffer_start;
1164 uint32_t rp_addr = buffer_start + 4;
1165 uint32_t fifo_start_addr = buffer_start + 8;
1166 uint32_t fifo_end_addr = buffer_start + buffer_size;
1167
1168 uint32_t wp = fifo_start_addr;
1169 uint32_t rp = fifo_start_addr;
1170
1171 /* validate block_size is 2^n */
1172 assert(IS_PWR_OF_2(block_size));
1173
1174 retval = target_write_u32(target, wp_addr, wp);
1175 if (retval != ERROR_OK)
1176 return retval;
1177 retval = target_write_u32(target, rp_addr, rp);
1178 if (retval != ERROR_OK)
1179 return retval;
1180
1181 /* Start up algorithm on target */
1182 retval = target_start_algorithm(target, num_mem_params, mem_params,
1183 num_reg_params, reg_params,
1184 entry_point,
1185 exit_point,
1186 arch_info);
1187
1188 if (retval != ERROR_OK) {
1189 LOG_ERROR("error starting target flash read algorithm");
1190 return retval;
1191 }
1192
1193 while (count > 0) {
1194 retval = target_read_u32(target, wp_addr, &wp);
1195 if (retval != ERROR_OK) {
1196 LOG_ERROR("failed to get write pointer");
1197 break;
1198 }
1199
1200 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1201 (size_t)(buffer - buffer_orig), count, wp, rp);
1202
1203 if (wp == 0) {
1204 LOG_ERROR("flash read algorithm aborted by target");
1205 retval = ERROR_FLASH_OPERATION_FAILED;
1206 break;
1207 }
1208
1209 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1210 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1211 break;
1212 }
1213
1214 /* Count the number of bytes available in the fifo without
1215 * crossing the wrap around. */
1216 uint32_t thisrun_bytes;
1217 if (wp >= rp)
1218 thisrun_bytes = wp - rp;
1219 else
1220 thisrun_bytes = fifo_end_addr - rp;
1221
1222 if (thisrun_bytes == 0) {
1223 /* Throttle polling a bit if transfer is (much) faster than flash
1224 * reading. The exact delay shouldn't matter as long as it's
1225 * less than buffer size / flash speed. This is very unlikely to
1226 * run when using high latency connections such as USB. */
1227 alive_sleep(2);
1228
1229 /* to stop an infinite loop on some targets check and increment a timeout
1230 * this issue was observed on a stellaris using the new ICDI interface */
1231 if (timeout++ >= 2500) {
1232 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1233 return ERROR_FLASH_OPERATION_FAILED;
1234 }
1235 continue;
1236 }
1237
1238 /* Reset our timeout */
1239 timeout = 0;
1240
1241 /* Limit to the amount of data we actually want to read */
1242 if (thisrun_bytes > count * block_size)
1243 thisrun_bytes = count * block_size;
1244
1245 /* Force end of large blocks to be word aligned */
1246 if (thisrun_bytes >= 16)
1247 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1248
1249 /* Read data from fifo */
1250 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1251 if (retval != ERROR_OK)
1252 break;
1253
1254 /* Update counters and wrap write pointer */
1255 buffer += thisrun_bytes;
1256 count -= thisrun_bytes / block_size;
1257 rp += thisrun_bytes;
1258 if (rp >= fifo_end_addr)
1259 rp = fifo_start_addr;
1260
1261 /* Store updated write pointer to target */
1262 retval = target_write_u32(target, rp_addr, rp);
1263 if (retval != ERROR_OK)
1264 break;
1265
1266 /* Avoid GDB timeouts */
1267 keep_alive();
1268
1269 }
1270
1271 if (retval != ERROR_OK) {
1272 /* abort flash write algorithm on target */
1273 target_write_u32(target, rp_addr, 0);
1274 }
1275
1276 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1277 num_reg_params, reg_params,
1278 exit_point,
1279 10000,
1280 arch_info);
1281
1282 if (retval2 != ERROR_OK) {
1283 LOG_ERROR("error waiting for target flash write algorithm");
1284 retval = retval2;
1285 }
1286
1287 if (retval == ERROR_OK) {
1288 /* check if algorithm set wp = 0 after fifo writer loop finished */
1289 retval = target_read_u32(target, wp_addr, &wp);
1290 if (retval == ERROR_OK && wp == 0) {
1291 LOG_ERROR("flash read algorithm aborted by target");
1292 retval = ERROR_FLASH_OPERATION_FAILED;
1293 }
1294 }
1295
1296 return retval;
1297 }
1298
1299 int target_read_memory(struct target *target,
1300 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1301 {
1302 if (!target_was_examined(target)) {
1303 LOG_ERROR("Target not examined yet");
1304 return ERROR_FAIL;
1305 }
1306 if (!target->type->read_memory) {
1307 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1308 return ERROR_FAIL;
1309 }
1310 return target->type->read_memory(target, address, size, count, buffer);
1311 }
1312
1313 int target_read_phys_memory(struct target *target,
1314 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1315 {
1316 if (!target_was_examined(target)) {
1317 LOG_ERROR("Target not examined yet");
1318 return ERROR_FAIL;
1319 }
1320 if (!target->type->read_phys_memory) {
1321 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1322 return ERROR_FAIL;
1323 }
1324 return target->type->read_phys_memory(target, address, size, count, buffer);
1325 }
1326
1327 int target_write_memory(struct target *target,
1328 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1329 {
1330 if (!target_was_examined(target)) {
1331 LOG_ERROR("Target not examined yet");
1332 return ERROR_FAIL;
1333 }
1334 if (!target->type->write_memory) {
1335 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1336 return ERROR_FAIL;
1337 }
1338 return target->type->write_memory(target, address, size, count, buffer);
1339 }
1340
1341 int target_write_phys_memory(struct target *target,
1342 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1343 {
1344 if (!target_was_examined(target)) {
1345 LOG_ERROR("Target not examined yet");
1346 return ERROR_FAIL;
1347 }
1348 if (!target->type->write_phys_memory) {
1349 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1350 return ERROR_FAIL;
1351 }
1352 return target->type->write_phys_memory(target, address, size, count, buffer);
1353 }
1354
1355 int target_add_breakpoint(struct target *target,
1356 struct breakpoint *breakpoint)
1357 {
1358 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1359 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1360 return ERROR_TARGET_NOT_HALTED;
1361 }
1362 return target->type->add_breakpoint(target, breakpoint);
1363 }
1364
1365 int target_add_context_breakpoint(struct target *target,
1366 struct breakpoint *breakpoint)
1367 {
1368 if (target->state != TARGET_HALTED) {
1369 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1370 return ERROR_TARGET_NOT_HALTED;
1371 }
1372 return target->type->add_context_breakpoint(target, breakpoint);
1373 }
1374
1375 int target_add_hybrid_breakpoint(struct target *target,
1376 struct breakpoint *breakpoint)
1377 {
1378 if (target->state != TARGET_HALTED) {
1379 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1380 return ERROR_TARGET_NOT_HALTED;
1381 }
1382 return target->type->add_hybrid_breakpoint(target, breakpoint);
1383 }
1384
1385 int target_remove_breakpoint(struct target *target,
1386 struct breakpoint *breakpoint)
1387 {
1388 return target->type->remove_breakpoint(target, breakpoint);
1389 }
1390
1391 int target_add_watchpoint(struct target *target,
1392 struct watchpoint *watchpoint)
1393 {
1394 if (target->state != TARGET_HALTED) {
1395 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1396 return ERROR_TARGET_NOT_HALTED;
1397 }
1398 return target->type->add_watchpoint(target, watchpoint);
1399 }
1400 int target_remove_watchpoint(struct target *target,
1401 struct watchpoint *watchpoint)
1402 {
1403 return target->type->remove_watchpoint(target, watchpoint);
1404 }
1405 int target_hit_watchpoint(struct target *target,
1406 struct watchpoint **hit_watchpoint)
1407 {
1408 if (target->state != TARGET_HALTED) {
1409 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1410 return ERROR_TARGET_NOT_HALTED;
1411 }
1412
1413 if (!target->type->hit_watchpoint) {
1414 /* For backward compatible, if hit_watchpoint is not implemented,
1415 * return ERROR_FAIL such that gdb_server will not take the nonsense
1416 * information. */
1417 return ERROR_FAIL;
1418 }
1419
1420 return target->type->hit_watchpoint(target, hit_watchpoint);
1421 }
1422
1423 const char *target_get_gdb_arch(struct target *target)
1424 {
1425 if (!target->type->get_gdb_arch)
1426 return NULL;
1427 return target->type->get_gdb_arch(target);
1428 }
1429
1430 int target_get_gdb_reg_list(struct target *target,
1431 struct reg **reg_list[], int *reg_list_size,
1432 enum target_register_class reg_class)
1433 {
1434 int result = ERROR_FAIL;
1435
1436 if (!target_was_examined(target)) {
1437 LOG_ERROR("Target not examined yet");
1438 goto done;
1439 }
1440
1441 result = target->type->get_gdb_reg_list(target, reg_list,
1442 reg_list_size, reg_class);
1443
1444 done:
1445 if (result != ERROR_OK) {
1446 *reg_list = NULL;
1447 *reg_list_size = 0;
1448 }
1449 return result;
1450 }
1451
1452 int target_get_gdb_reg_list_noread(struct target *target,
1453 struct reg **reg_list[], int *reg_list_size,
1454 enum target_register_class reg_class)
1455 {
1456 if (target->type->get_gdb_reg_list_noread &&
1457 target->type->get_gdb_reg_list_noread(target, reg_list,
1458 reg_list_size, reg_class) == ERROR_OK)
1459 return ERROR_OK;
1460 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1461 }
1462
1463 bool target_supports_gdb_connection(struct target *target)
1464 {
1465 /*
1466 * exclude all the targets that don't provide get_gdb_reg_list
1467 * or that have explicit gdb_max_connection == 0
1468 */
1469 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1470 }
1471
1472 int target_step(struct target *target,
1473 int current, target_addr_t address, int handle_breakpoints)
1474 {
1475 int retval;
1476
1477 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1478
1479 retval = target->type->step(target, current, address, handle_breakpoints);
1480 if (retval != ERROR_OK)
1481 return retval;
1482
1483 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1484
1485 return retval;
1486 }
1487
1488 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1489 {
1490 if (target->state != TARGET_HALTED) {
1491 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1492 return ERROR_TARGET_NOT_HALTED;
1493 }
1494 return target->type->get_gdb_fileio_info(target, fileio_info);
1495 }
1496
1497 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1498 {
1499 if (target->state != TARGET_HALTED) {
1500 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1501 return ERROR_TARGET_NOT_HALTED;
1502 }
1503 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1504 }
1505
1506 target_addr_t target_address_max(struct target *target)
1507 {
1508 unsigned bits = target_address_bits(target);
1509 if (sizeof(target_addr_t) * 8 == bits)
1510 return (target_addr_t) -1;
1511 else
1512 return (((target_addr_t) 1) << bits) - 1;
1513 }
1514
1515 unsigned target_address_bits(struct target *target)
1516 {
1517 if (target->type->address_bits)
1518 return target->type->address_bits(target);
1519 return 32;
1520 }
1521
1522 unsigned int target_data_bits(struct target *target)
1523 {
1524 if (target->type->data_bits)
1525 return target->type->data_bits(target);
1526 return 32;
1527 }
1528
1529 static int target_profiling(struct target *target, uint32_t *samples,
1530 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1531 {
1532 return target->type->profiling(target, samples, max_num_samples,
1533 num_samples, seconds);
1534 }
1535
1536 static int handle_target(void *priv);
1537
1538 static int target_init_one(struct command_context *cmd_ctx,
1539 struct target *target)
1540 {
1541 target_reset_examined(target);
1542
1543 struct target_type *type = target->type;
1544 if (!type->examine)
1545 type->examine = default_examine;
1546
1547 if (!type->check_reset)
1548 type->check_reset = default_check_reset;
1549
1550 assert(type->init_target);
1551
1552 int retval = type->init_target(cmd_ctx, target);
1553 if (retval != ERROR_OK) {
1554 LOG_ERROR("target '%s' init failed", target_name(target));
1555 return retval;
1556 }
1557
1558 /* Sanity-check MMU support ... stub in what we must, to help
1559 * implement it in stages, but warn if we need to do so.
1560 */
1561 if (type->mmu) {
1562 if (!type->virt2phys) {
1563 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1564 type->virt2phys = identity_virt2phys;
1565 }
1566 } else {
1567 /* Make sure no-MMU targets all behave the same: make no
1568 * distinction between physical and virtual addresses, and
1569 * ensure that virt2phys() is always an identity mapping.
1570 */
1571 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1572 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1573
1574 type->mmu = no_mmu;
1575 type->write_phys_memory = type->write_memory;
1576 type->read_phys_memory = type->read_memory;
1577 type->virt2phys = identity_virt2phys;
1578 }
1579
1580 if (!target->type->read_buffer)
1581 target->type->read_buffer = target_read_buffer_default;
1582
1583 if (!target->type->write_buffer)
1584 target->type->write_buffer = target_write_buffer_default;
1585
1586 if (!target->type->get_gdb_fileio_info)
1587 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1588
1589 if (!target->type->gdb_fileio_end)
1590 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1591
1592 if (!target->type->profiling)
1593 target->type->profiling = target_profiling_default;
1594
1595 return ERROR_OK;
1596 }
1597
1598 static int target_init(struct command_context *cmd_ctx)
1599 {
1600 struct target *target;
1601 int retval;
1602
1603 for (target = all_targets; target; target = target->next) {
1604 retval = target_init_one(cmd_ctx, target);
1605 if (retval != ERROR_OK)
1606 return retval;
1607 }
1608
1609 if (!all_targets)
1610 return ERROR_OK;
1611
1612 retval = target_register_user_commands(cmd_ctx);
1613 if (retval != ERROR_OK)
1614 return retval;
1615
1616 retval = target_register_timer_callback(&handle_target,
1617 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1618 if (retval != ERROR_OK)
1619 return retval;
1620
1621 return ERROR_OK;
1622 }
1623
1624 COMMAND_HANDLER(handle_target_init_command)
1625 {
1626 int retval;
1627
1628 if (CMD_ARGC != 0)
1629 return ERROR_COMMAND_SYNTAX_ERROR;
1630
1631 static bool target_initialized;
1632 if (target_initialized) {
1633 LOG_INFO("'target init' has already been called");
1634 return ERROR_OK;
1635 }
1636 target_initialized = true;
1637
1638 retval = command_run_line(CMD_CTX, "init_targets");
1639 if (retval != ERROR_OK)
1640 return retval;
1641
1642 retval = command_run_line(CMD_CTX, "init_target_events");
1643 if (retval != ERROR_OK)
1644 return retval;
1645
1646 retval = command_run_line(CMD_CTX, "init_board");
1647 if (retval != ERROR_OK)
1648 return retval;
1649
1650 LOG_DEBUG("Initializing targets...");
1651 return target_init(CMD_CTX);
1652 }
1653
1654 int target_register_event_callback(int (*callback)(struct target *target,
1655 enum target_event event, void *priv), void *priv)
1656 {
1657 struct target_event_callback **callbacks_p = &target_event_callbacks;
1658
1659 if (!callback)
1660 return ERROR_COMMAND_SYNTAX_ERROR;
1661
1662 if (*callbacks_p) {
1663 while ((*callbacks_p)->next)
1664 callbacks_p = &((*callbacks_p)->next);
1665 callbacks_p = &((*callbacks_p)->next);
1666 }
1667
1668 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1669 (*callbacks_p)->callback = callback;
1670 (*callbacks_p)->priv = priv;
1671 (*callbacks_p)->next = NULL;
1672
1673 return ERROR_OK;
1674 }
1675
1676 int target_register_reset_callback(int (*callback)(struct target *target,
1677 enum target_reset_mode reset_mode, void *priv), void *priv)
1678 {
1679 struct target_reset_callback *entry;
1680
1681 if (!callback)
1682 return ERROR_COMMAND_SYNTAX_ERROR;
1683
1684 entry = malloc(sizeof(struct target_reset_callback));
1685 if (!entry) {
1686 LOG_ERROR("error allocating buffer for reset callback entry");
1687 return ERROR_COMMAND_SYNTAX_ERROR;
1688 }
1689
1690 entry->callback = callback;
1691 entry->priv = priv;
1692 list_add(&entry->list, &target_reset_callback_list);
1693
1694
1695 return ERROR_OK;
1696 }
1697
1698 int target_register_trace_callback(int (*callback)(struct target *target,
1699 size_t len, uint8_t *data, void *priv), void *priv)
1700 {
1701 struct target_trace_callback *entry;
1702
1703 if (!callback)
1704 return ERROR_COMMAND_SYNTAX_ERROR;
1705
1706 entry = malloc(sizeof(struct target_trace_callback));
1707 if (!entry) {
1708 LOG_ERROR("error allocating buffer for trace callback entry");
1709 return ERROR_COMMAND_SYNTAX_ERROR;
1710 }
1711
1712 entry->callback = callback;
1713 entry->priv = priv;
1714 list_add(&entry->list, &target_trace_callback_list);
1715
1716
1717 return ERROR_OK;
1718 }
1719
1720 int target_register_timer_callback(int (*callback)(void *priv),
1721 unsigned int time_ms, enum target_timer_type type, void *priv)
1722 {
1723 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1724
1725 if (!callback)
1726 return ERROR_COMMAND_SYNTAX_ERROR;
1727
1728 if (*callbacks_p) {
1729 while ((*callbacks_p)->next)
1730 callbacks_p = &((*callbacks_p)->next);
1731 callbacks_p = &((*callbacks_p)->next);
1732 }
1733
1734 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1735 (*callbacks_p)->callback = callback;
1736 (*callbacks_p)->type = type;
1737 (*callbacks_p)->time_ms = time_ms;
1738 (*callbacks_p)->removed = false;
1739
1740 (*callbacks_p)->when = timeval_ms() + time_ms;
1741 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1742
1743 (*callbacks_p)->priv = priv;
1744 (*callbacks_p)->next = NULL;
1745
1746 return ERROR_OK;
1747 }
1748
1749 int target_unregister_event_callback(int (*callback)(struct target *target,
1750 enum target_event event, void *priv), void *priv)
1751 {
1752 struct target_event_callback **p = &target_event_callbacks;
1753 struct target_event_callback *c = target_event_callbacks;
1754
1755 if (!callback)
1756 return ERROR_COMMAND_SYNTAX_ERROR;
1757
1758 while (c) {
1759 struct target_event_callback *next = c->next;
1760 if ((c->callback == callback) && (c->priv == priv)) {
1761 *p = next;
1762 free(c);
1763 return ERROR_OK;
1764 } else
1765 p = &(c->next);
1766 c = next;
1767 }
1768
1769 return ERROR_OK;
1770 }
1771
1772 int target_unregister_reset_callback(int (*callback)(struct target *target,
1773 enum target_reset_mode reset_mode, void *priv), void *priv)
1774 {
1775 struct target_reset_callback *entry;
1776
1777 if (!callback)
1778 return ERROR_COMMAND_SYNTAX_ERROR;
1779
1780 list_for_each_entry(entry, &target_reset_callback_list, list) {
1781 if (entry->callback == callback && entry->priv == priv) {
1782 list_del(&entry->list);
1783 free(entry);
1784 break;
1785 }
1786 }
1787
1788 return ERROR_OK;
1789 }
1790
1791 int target_unregister_trace_callback(int (*callback)(struct target *target,
1792 size_t len, uint8_t *data, void *priv), void *priv)
1793 {
1794 struct target_trace_callback *entry;
1795
1796 if (!callback)
1797 return ERROR_COMMAND_SYNTAX_ERROR;
1798
1799 list_for_each_entry(entry, &target_trace_callback_list, list) {
1800 if (entry->callback == callback && entry->priv == priv) {
1801 list_del(&entry->list);
1802 free(entry);
1803 break;
1804 }
1805 }
1806
1807 return ERROR_OK;
1808 }
1809
1810 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1811 {
1812 if (!callback)
1813 return ERROR_COMMAND_SYNTAX_ERROR;
1814
1815 for (struct target_timer_callback *c = target_timer_callbacks;
1816 c; c = c->next) {
1817 if ((c->callback == callback) && (c->priv == priv)) {
1818 c->removed = true;
1819 return ERROR_OK;
1820 }
1821 }
1822
1823 return ERROR_FAIL;
1824 }
1825
1826 int target_call_event_callbacks(struct target *target, enum target_event event)
1827 {
1828 struct target_event_callback *callback = target_event_callbacks;
1829 struct target_event_callback *next_callback;
1830
1831 if (event == TARGET_EVENT_HALTED) {
1832 /* execute early halted first */
1833 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1834 }
1835
1836 LOG_DEBUG("target event %i (%s) for core %s", event,
1837 jim_nvp_value2name_simple(nvp_target_event, event)->name,
1838 target_name(target));
1839
1840 target_handle_event(target, event);
1841
1842 while (callback) {
1843 next_callback = callback->next;
1844 callback->callback(target, event, callback->priv);
1845 callback = next_callback;
1846 }
1847
1848 return ERROR_OK;
1849 }
1850
1851 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1852 {
1853 struct target_reset_callback *callback;
1854
1855 LOG_DEBUG("target reset %i (%s)", reset_mode,
1856 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1857
1858 list_for_each_entry(callback, &target_reset_callback_list, list)
1859 callback->callback(target, reset_mode, callback->priv);
1860
1861 return ERROR_OK;
1862 }
1863
1864 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1865 {
1866 struct target_trace_callback *callback;
1867
1868 list_for_each_entry(callback, &target_trace_callback_list, list)
1869 callback->callback(target, len, data, callback->priv);
1870
1871 return ERROR_OK;
1872 }
1873
1874 static int target_timer_callback_periodic_restart(
1875 struct target_timer_callback *cb, int64_t *now)
1876 {
1877 cb->when = *now + cb->time_ms;
1878 return ERROR_OK;
1879 }
1880
1881 static int target_call_timer_callback(struct target_timer_callback *cb,
1882 int64_t *now)
1883 {
1884 cb->callback(cb->priv);
1885
1886 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1887 return target_timer_callback_periodic_restart(cb, now);
1888
1889 return target_unregister_timer_callback(cb->callback, cb->priv);
1890 }
1891
1892 static int target_call_timer_callbacks_check_time(int checktime)
1893 {
1894 static bool callback_processing;
1895
1896 /* Do not allow nesting */
1897 if (callback_processing)
1898 return ERROR_OK;
1899
1900 callback_processing = true;
1901
1902 keep_alive();
1903
1904 int64_t now = timeval_ms();
1905
1906 /* Initialize to a default value that's a ways into the future.
1907 * The loop below will make it closer to now if there are
1908 * callbacks that want to be called sooner. */
1909 target_timer_next_event_value = now + 1000;
1910
1911 /* Store an address of the place containing a pointer to the
1912 * next item; initially, that's a standalone "root of the
1913 * list" variable. */
1914 struct target_timer_callback **callback = &target_timer_callbacks;
1915 while (callback && *callback) {
1916 if ((*callback)->removed) {
1917 struct target_timer_callback *p = *callback;
1918 *callback = (*callback)->next;
1919 free(p);
1920 continue;
1921 }
1922
1923 bool call_it = (*callback)->callback &&
1924 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1925 now >= (*callback)->when);
1926
1927 if (call_it)
1928 target_call_timer_callback(*callback, &now);
1929
1930 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1931 target_timer_next_event_value = (*callback)->when;
1932
1933 callback = &(*callback)->next;
1934 }
1935
1936 callback_processing = false;
1937 return ERROR_OK;
1938 }
1939
1940 int target_call_timer_callbacks()
1941 {
1942 return target_call_timer_callbacks_check_time(1);
1943 }
1944
1945 /* invoke periodic callbacks immediately */
1946 int target_call_timer_callbacks_now()
1947 {
1948 return target_call_timer_callbacks_check_time(0);
1949 }
1950
1951 int64_t target_timer_next_event(void)
1952 {
1953 return target_timer_next_event_value;
1954 }
1955
1956 /* Prints the working area layout for debug purposes */
1957 static void print_wa_layout(struct target *target)
1958 {
1959 struct working_area *c = target->working_areas;
1960
1961 while (c) {
1962 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1963 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1964 c->address, c->address + c->size - 1, c->size);
1965 c = c->next;
1966 }
1967 }
1968
1969 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1970 static void target_split_working_area(struct working_area *area, uint32_t size)
1971 {
1972 assert(area->free); /* Shouldn't split an allocated area */
1973 assert(size <= area->size); /* Caller should guarantee this */
1974
1975 /* Split only if not already the right size */
1976 if (size < area->size) {
1977 struct working_area *new_wa = malloc(sizeof(*new_wa));
1978
1979 if (!new_wa)
1980 return;
1981
1982 new_wa->next = area->next;
1983 new_wa->size = area->size - size;
1984 new_wa->address = area->address + size;
1985 new_wa->backup = NULL;
1986 new_wa->user = NULL;
1987 new_wa->free = true;
1988
1989 area->next = new_wa;
1990 area->size = size;
1991
1992 /* If backup memory was allocated to this area, it has the wrong size
1993 * now so free it and it will be reallocated if/when needed */
1994 free(area->backup);
1995 area->backup = NULL;
1996 }
1997 }
1998
1999 /* Merge all adjacent free areas into one */
2000 static void target_merge_working_areas(struct target *target)
2001 {
2002 struct working_area *c = target->working_areas;
2003
2004 while (c && c->next) {
2005 assert(c->next->address == c->address + c->size); /* This is an invariant */
2006
2007 /* Find two adjacent free areas */
2008 if (c->free && c->next->free) {
2009 /* Merge the last into the first */
2010 c->size += c->next->size;
2011
2012 /* Remove the last */
2013 struct working_area *to_be_freed = c->next;
2014 c->next = c->next->next;
2015 free(to_be_freed->backup);
2016 free(to_be_freed);
2017
2018 /* If backup memory was allocated to the remaining area, it's has
2019 * the wrong size now */
2020 free(c->backup);
2021 c->backup = NULL;
2022 } else {
2023 c = c->next;
2024 }
2025 }
2026 }
2027
2028 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2029 {
2030 /* Reevaluate working area address based on MMU state*/
2031 if (!target->working_areas) {
2032 int retval;
2033 int enabled;
2034
2035 retval = target->type->mmu(target, &enabled);
2036 if (retval != ERROR_OK)
2037 return retval;
2038
2039 if (!enabled) {
2040 if (target->working_area_phys_spec) {
2041 LOG_DEBUG("MMU disabled, using physical "
2042 "address for working memory " TARGET_ADDR_FMT,
2043 target->working_area_phys);
2044 target->working_area = target->working_area_phys;
2045 } else {
2046 LOG_ERROR("No working memory available. "
2047 "Specify -work-area-phys to target.");
2048 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2049 }
2050 } else {
2051 if (target->working_area_virt_spec) {
2052 LOG_DEBUG("MMU enabled, using virtual "
2053 "address for working memory " TARGET_ADDR_FMT,
2054 target->working_area_virt);
2055 target->working_area = target->working_area_virt;
2056 } else {
2057 LOG_ERROR("No working memory available. "
2058 "Specify -work-area-virt to target.");
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060 }
2061 }
2062
2063 /* Set up initial working area on first call */
2064 struct working_area *new_wa = malloc(sizeof(*new_wa));
2065 if (new_wa) {
2066 new_wa->next = NULL;
2067 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2068 new_wa->address = target->working_area;
2069 new_wa->backup = NULL;
2070 new_wa->user = NULL;
2071 new_wa->free = true;
2072 }
2073
2074 target->working_areas = new_wa;
2075 }
2076
2077 /* only allocate multiples of 4 byte */
2078 if (size % 4)
2079 size = (size + 3) & (~3UL);
2080
2081 struct working_area *c = target->working_areas;
2082
2083 /* Find the first large enough working area */
2084 while (c) {
2085 if (c->free && c->size >= size)
2086 break;
2087 c = c->next;
2088 }
2089
2090 if (!c)
2091 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2092
2093 /* Split the working area into the requested size */
2094 target_split_working_area(c, size);
2095
2096 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2097 size, c->address);
2098
2099 if (target->backup_working_area) {
2100 if (!c->backup) {
2101 c->backup = malloc(c->size);
2102 if (!c->backup)
2103 return ERROR_FAIL;
2104 }
2105
2106 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2107 if (retval != ERROR_OK)
2108 return retval;
2109 }
2110
2111 /* mark as used, and return the new (reused) area */
2112 c->free = false;
2113 *area = c;
2114
2115 /* user pointer */
2116 c->user = area;
2117
2118 print_wa_layout(target);
2119
2120 return ERROR_OK;
2121 }
2122
2123 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2124 {
2125 int retval;
2126
2127 retval = target_alloc_working_area_try(target, size, area);
2128 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2129 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2130 return retval;
2131
2132 }
2133
2134 static int target_restore_working_area(struct target *target, struct working_area *area)
2135 {
2136 int retval = ERROR_OK;
2137
2138 if (target->backup_working_area && area->backup) {
2139 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2140 if (retval != ERROR_OK)
2141 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2142 area->size, area->address);
2143 }
2144
2145 return retval;
2146 }
2147
2148 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2149 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2150 {
2151 int retval = ERROR_OK;
2152
2153 if (area->free)
2154 return retval;
2155
2156 if (restore) {
2157 retval = target_restore_working_area(target, area);
2158 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2159 if (retval != ERROR_OK)
2160 return retval;
2161 }
2162
2163 area->free = true;
2164
2165 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2166 area->size, area->address);
2167
2168 /* mark user pointer invalid */
2169 /* TODO: Is this really safe? It points to some previous caller's memory.
2170 * How could we know that the area pointer is still in that place and not
2171 * some other vital data? What's the purpose of this, anyway? */
2172 *area->user = NULL;
2173 area->user = NULL;
2174
2175 target_merge_working_areas(target);
2176
2177 print_wa_layout(target);
2178
2179 return retval;
2180 }
2181
2182 int target_free_working_area(struct target *target, struct working_area *area)
2183 {
2184 return target_free_working_area_restore(target, area, 1);
2185 }
2186
2187 /* free resources and restore memory, if restoring memory fails,
2188 * free up resources anyway
2189 */
2190 static void target_free_all_working_areas_restore(struct target *target, int restore)
2191 {
2192 struct working_area *c = target->working_areas;
2193
2194 LOG_DEBUG("freeing all working areas");
2195
2196 /* Loop through all areas, restoring the allocated ones and marking them as free */
2197 while (c) {
2198 if (!c->free) {
2199 if (restore)
2200 target_restore_working_area(target, c);
2201 c->free = true;
2202 *c->user = NULL; /* Same as above */
2203 c->user = NULL;
2204 }
2205 c = c->next;
2206 }
2207
2208 /* Run a merge pass to combine all areas into one */
2209 target_merge_working_areas(target);
2210
2211 print_wa_layout(target);
2212 }
2213
2214 void target_free_all_working_areas(struct target *target)
2215 {
2216 target_free_all_working_areas_restore(target, 1);
2217
2218 /* Now we have none or only one working area marked as free */
2219 if (target->working_areas) {
2220 /* Free the last one to allow on-the-fly moving and resizing */
2221 free(target->working_areas->backup);
2222 free(target->working_areas);
2223 target->working_areas = NULL;
2224 }
2225 }
2226
2227 /* Find the largest number of bytes that can be allocated */
2228 uint32_t target_get_working_area_avail(struct target *target)
2229 {
2230 struct working_area *c = target->working_areas;
2231 uint32_t max_size = 0;
2232
2233 if (!c)
2234 return target->working_area_size;
2235
2236 while (c) {
2237 if (c->free && max_size < c->size)
2238 max_size = c->size;
2239
2240 c = c->next;
2241 }
2242
2243 return max_size;
2244 }
2245
2246 static void target_destroy(struct target *target)
2247 {
2248 if (target->type->deinit_target)
2249 target->type->deinit_target(target);
2250
2251 free(target->semihosting);
2252
2253 jtag_unregister_event_callback(jtag_enable_callback, target);
2254
2255 struct target_event_action *teap = target->event_action;
2256 while (teap) {
2257 struct target_event_action *next = teap->next;
2258 Jim_DecrRefCount(teap->interp, teap->body);
2259 free(teap);
2260 teap = next;
2261 }
2262
2263 target_free_all_working_areas(target);
2264
2265 /* release the targets SMP list */
2266 if (target->smp) {
2267 struct target_list *head = target->head;
2268 while (head) {
2269 struct target_list *pos = head->next;
2270 head->target->smp = 0;
2271 free(head);
2272 head = pos;
2273 }
2274 target->smp = 0;
2275 }
2276
2277 rtos_destroy(target);
2278
2279 free(target->gdb_port_override);
2280 free(target->type);
2281 free(target->trace_info);
2282 free(target->fileio_info);
2283 free(target->cmd_name);
2284 free(target);
2285 }
2286
2287 void target_quit(void)
2288 {
2289 struct target_event_callback *pe = target_event_callbacks;
2290 while (pe) {
2291 struct target_event_callback *t = pe->next;
2292 free(pe);
2293 pe = t;
2294 }
2295 target_event_callbacks = NULL;
2296
2297 struct target_timer_callback *pt = target_timer_callbacks;
2298 while (pt) {
2299 struct target_timer_callback *t = pt->next;
2300 free(pt);
2301 pt = t;
2302 }
2303 target_timer_callbacks = NULL;
2304
2305 for (struct target *target = all_targets; target;) {
2306 struct target *tmp;
2307
2308 tmp = target->next;
2309 target_destroy(target);
2310 target = tmp;
2311 }
2312
2313 all_targets = NULL;
2314 }
2315
2316 int target_arch_state(struct target *target)
2317 {
2318 int retval;
2319 if (!target) {
2320 LOG_WARNING("No target has been configured");
2321 return ERROR_OK;
2322 }
2323
2324 if (target->state != TARGET_HALTED)
2325 return ERROR_OK;
2326
2327 retval = target->type->arch_state(target);
2328 return retval;
2329 }
2330
2331 static int target_get_gdb_fileio_info_default(struct target *target,
2332 struct gdb_fileio_info *fileio_info)
2333 {
2334 /* If target does not support semi-hosting function, target
2335 has no need to provide .get_gdb_fileio_info callback.
2336 It just return ERROR_FAIL and gdb_server will return "Txx"
2337 as target halted every time. */
2338 return ERROR_FAIL;
2339 }
2340
2341 static int target_gdb_fileio_end_default(struct target *target,
2342 int retcode, int fileio_errno, bool ctrl_c)
2343 {
2344 return ERROR_OK;
2345 }
2346
2347 int target_profiling_default(struct target *target, uint32_t *samples,
2348 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2349 {
2350 struct timeval timeout, now;
2351
2352 gettimeofday(&timeout, NULL);
2353 timeval_add_time(&timeout, seconds, 0);
2354
2355 LOG_INFO("Starting profiling. Halting and resuming the"
2356 " target as often as we can...");
2357
2358 uint32_t sample_count = 0;
2359 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2360 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2361
2362 int retval = ERROR_OK;
2363 for (;;) {
2364 target_poll(target);
2365 if (target->state == TARGET_HALTED) {
2366 uint32_t t = buf_get_u32(reg->value, 0, 32);
2367 samples[sample_count++] = t;
2368 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2369 retval = target_resume(target, 1, 0, 0, 0);
2370 target_poll(target);
2371 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2372 } else if (target->state == TARGET_RUNNING) {
2373 /* We want to quickly sample the PC. */
2374 retval = target_halt(target);
2375 } else {
2376 LOG_INFO("Target not halted or running");
2377 retval = ERROR_OK;
2378 break;
2379 }
2380
2381 if (retval != ERROR_OK)
2382 break;
2383
2384 gettimeofday(&now, NULL);
2385 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2386 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2387 break;
2388 }
2389 }
2390
2391 *num_samples = sample_count;
2392 return retval;
2393 }
2394
2395 /* Single aligned words are guaranteed to use 16 or 32 bit access
2396 * mode respectively, otherwise data is handled as quickly as
2397 * possible
2398 */
2399 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2400 {
2401 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2402 size, address);
2403
2404 if (!target_was_examined(target)) {
2405 LOG_ERROR("Target not examined yet");
2406 return ERROR_FAIL;
2407 }
2408
2409 if (size == 0)
2410 return ERROR_OK;
2411
2412 if ((address + size - 1) < address) {
2413 /* GDB can request this when e.g. PC is 0xfffffffc */
2414 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2415 address,
2416 size);
2417 return ERROR_FAIL;
2418 }
2419
2420 return target->type->write_buffer(target, address, size, buffer);
2421 }
2422
2423 static int target_write_buffer_default(struct target *target,
2424 target_addr_t address, uint32_t count, const uint8_t *buffer)
2425 {
2426 uint32_t size;
2427 unsigned int data_bytes = target_data_bits(target) / 8;
2428
2429 /* Align up to maximum bytes. The loop condition makes sure the next pass
2430 * will have something to do with the size we leave to it. */
2431 for (size = 1;
2432 size < data_bytes && count >= size * 2 + (address & size);
2433 size *= 2) {
2434 if (address & size) {
2435 int retval = target_write_memory(target, address, size, 1, buffer);
2436 if (retval != ERROR_OK)
2437 return retval;
2438 address += size;
2439 count -= size;
2440 buffer += size;
2441 }
2442 }
2443
2444 /* Write the data with as large access size as possible. */
2445 for (; size > 0; size /= 2) {
2446 uint32_t aligned = count - count % size;
2447 if (aligned > 0) {
2448 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2449 if (retval != ERROR_OK)
2450 return retval;
2451 address += aligned;
2452 count -= aligned;
2453 buffer += aligned;
2454 }
2455 }
2456
2457 return ERROR_OK;
2458 }
2459
2460 /* Single aligned words are guaranteed to use 16 or 32 bit access
2461 * mode respectively, otherwise data is handled as quickly as
2462 * possible
2463 */
2464 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2465 {
2466 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2467 size, address);
2468
2469 if (!target_was_examined(target)) {
2470 LOG_ERROR("Target not examined yet");
2471 return ERROR_FAIL;
2472 }
2473
2474 if (size == 0)
2475 return ERROR_OK;
2476
2477 if ((address + size - 1) < address) {
2478 /* GDB can request this when e.g. PC is 0xfffffffc */
2479 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2480 address,
2481 size);
2482 return ERROR_FAIL;
2483 }
2484
2485 return target->type->read_buffer(target, address, size, buffer);
2486 }
2487
2488 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2489 {
2490 uint32_t size;
2491 unsigned int data_bytes = target_data_bits(target) / 8;
2492
2493 /* Align up to maximum bytes. The loop condition makes sure the next pass
2494 * will have something to do with the size we leave to it. */
2495 for (size = 1;
2496 size < data_bytes && count >= size * 2 + (address & size);
2497 size *= 2) {
2498 if (address & size) {
2499 int retval = target_read_memory(target, address, size, 1, buffer);
2500 if (retval != ERROR_OK)
2501 return retval;
2502 address += size;
2503 count -= size;
2504 buffer += size;
2505 }
2506 }
2507
2508 /* Read the data with as large access size as possible. */
2509 for (; size > 0; size /= 2) {
2510 uint32_t aligned = count - count % size;
2511 if (aligned > 0) {
2512 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2513 if (retval != ERROR_OK)
2514 return retval;
2515 address += aligned;
2516 count -= aligned;
2517 buffer += aligned;
2518 }
2519 }
2520
2521 return ERROR_OK;
2522 }
2523
2524 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2525 {
2526 uint8_t *buffer;
2527 int retval;
2528 uint32_t i;
2529 uint32_t checksum = 0;
2530 if (!target_was_examined(target)) {
2531 LOG_ERROR("Target not examined yet");
2532 return ERROR_FAIL;
2533 }
2534
2535 retval = target->type->checksum_memory(target, address, size, &checksum);
2536 if (retval != ERROR_OK) {
2537 buffer = malloc(size);
2538 if (!buffer) {
2539 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2540 return ERROR_COMMAND_SYNTAX_ERROR;
2541 }
2542 retval = target_read_buffer(target, address, size, buffer);
2543 if (retval != ERROR_OK) {
2544 free(buffer);
2545 return retval;
2546 }
2547
2548 /* convert to target endianness */
2549 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2550 uint32_t target_data;
2551 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2552 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2553 }
2554
2555 retval = image_calculate_checksum(buffer, size, &checksum);
2556 free(buffer);
2557 }
2558
2559 *crc = checksum;
2560
2561 return retval;
2562 }
2563
2564 int target_blank_check_memory(struct target *target,
2565 struct target_memory_check_block *blocks, int num_blocks,
2566 uint8_t erased_value)
2567 {
2568 if (!target_was_examined(target)) {
2569 LOG_ERROR("Target not examined yet");
2570 return ERROR_FAIL;
2571 }
2572
2573 if (!target->type->blank_check_memory)
2574 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2575
2576 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2577 }
2578
2579 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2580 {
2581 uint8_t value_buf[8];
2582 if (!target_was_examined(target)) {
2583 LOG_ERROR("Target not examined yet");
2584 return ERROR_FAIL;
2585 }
2586
2587 int retval = target_read_memory(target, address, 8, 1, value_buf);
2588
2589 if (retval == ERROR_OK) {
2590 *value = target_buffer_get_u64(target, value_buf);
2591 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2592 address,
2593 *value);
2594 } else {
2595 *value = 0x0;
2596 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2597 address);
2598 }
2599
2600 return retval;
2601 }
2602
2603 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2604 {
2605 uint8_t value_buf[4];
2606 if (!target_was_examined(target)) {
2607 LOG_ERROR("Target not examined yet");
2608 return ERROR_FAIL;
2609 }
2610
2611 int retval = target_read_memory(target, address, 4, 1, value_buf);
2612
2613 if (retval == ERROR_OK) {
2614 *value = target_buffer_get_u32(target, value_buf);
2615 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2616 address,
2617 *value);
2618 } else {
2619 *value = 0x0;
2620 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2621 address);
2622 }
2623
2624 return retval;
2625 }
2626
2627 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2628 {
2629 uint8_t value_buf[2];
2630 if (!target_was_examined(target)) {
2631 LOG_ERROR("Target not examined yet");
2632 return ERROR_FAIL;
2633 }
2634
2635 int retval = target_read_memory(target, address, 2, 1, value_buf);
2636
2637 if (retval == ERROR_OK) {
2638 *value = target_buffer_get_u16(target, value_buf);
2639 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2640 address,
2641 *value);
2642 } else {
2643 *value = 0x0;
2644 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2645 address);
2646 }
2647
2648 return retval;
2649 }
2650
2651 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2652 {
2653 if (!target_was_examined(target)) {
2654 LOG_ERROR("Target not examined yet");
2655 return ERROR_FAIL;
2656 }
2657
2658 int retval = target_read_memory(target, address, 1, 1, value);
2659
2660 if (retval == ERROR_OK) {
2661 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2662 address,
2663 *value);
2664 } else {
2665 *value = 0x0;
2666 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2667 address);
2668 }
2669
2670 return retval;
2671 }
2672
2673 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2674 {
2675 int retval;
2676 uint8_t value_buf[8];
2677 if (!target_was_examined(target)) {
2678 LOG_ERROR("Target not examined yet");
2679 return ERROR_FAIL;
2680 }
2681
2682 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2683 address,
2684 value);
2685
2686 target_buffer_set_u64(target, value_buf, value);
2687 retval = target_write_memory(target, address, 8, 1, value_buf);
2688 if (retval != ERROR_OK)
2689 LOG_DEBUG("failed: %i", retval);
2690
2691 return retval;
2692 }
2693
2694 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2695 {
2696 int retval;
2697 uint8_t value_buf[4];
2698 if (!target_was_examined(target)) {
2699 LOG_ERROR("Target not examined yet");
2700 return ERROR_FAIL;
2701 }
2702
2703 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2704 address,
2705 value);
2706
2707 target_buffer_set_u32(target, value_buf, value);
2708 retval = target_write_memory(target, address, 4, 1, value_buf);
2709 if (retval != ERROR_OK)
2710 LOG_DEBUG("failed: %i", retval);
2711
2712 return retval;
2713 }
2714
2715 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2716 {
2717 int retval;
2718 uint8_t value_buf[2];
2719 if (!target_was_examined(target)) {
2720 LOG_ERROR("Target not examined yet");
2721 return ERROR_FAIL;
2722 }
2723
2724 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2725 address,
2726 value);
2727
2728 target_buffer_set_u16(target, value_buf, value);
2729 retval = target_write_memory(target, address, 2, 1, value_buf);
2730 if (retval != ERROR_OK)
2731 LOG_DEBUG("failed: %i", retval);
2732
2733 return retval;
2734 }
2735
2736 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2737 {
2738 int retval;
2739 if (!target_was_examined(target)) {
2740 LOG_ERROR("Target not examined yet");
2741 return ERROR_FAIL;
2742 }
2743
2744 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2745 address, value);
2746
2747 retval = target_write_memory(target, address, 1, 1, &value);
2748 if (retval != ERROR_OK)
2749 LOG_DEBUG("failed: %i", retval);
2750
2751 return retval;
2752 }
2753
2754 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2755 {
2756 int retval;
2757 uint8_t value_buf[8];
2758 if (!target_was_examined(target)) {
2759 LOG_ERROR("Target not examined yet");
2760 return ERROR_FAIL;
2761 }
2762
2763 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2764 address,
2765 value);
2766
2767 target_buffer_set_u64(target, value_buf, value);
2768 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2769 if (retval != ERROR_OK)
2770 LOG_DEBUG("failed: %i", retval);
2771
2772 return retval;
2773 }
2774
2775 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2776 {
2777 int retval;
2778 uint8_t value_buf[4];
2779 if (!target_was_examined(target)) {
2780 LOG_ERROR("Target not examined yet");
2781 return ERROR_FAIL;
2782 }
2783
2784 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2785 address,
2786 value);
2787
2788 target_buffer_set_u32(target, value_buf, value);
2789 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2790 if (retval != ERROR_OK)
2791 LOG_DEBUG("failed: %i", retval);
2792
2793 return retval;
2794 }
2795
2796 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2797 {
2798 int retval;
2799 uint8_t value_buf[2];
2800 if (!target_was_examined(target)) {
2801 LOG_ERROR("Target not examined yet");
2802 return ERROR_FAIL;
2803 }
2804
2805 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2806 address,
2807 value);
2808
2809 target_buffer_set_u16(target, value_buf, value);
2810 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2811 if (retval != ERROR_OK)
2812 LOG_DEBUG("failed: %i", retval);
2813
2814 return retval;
2815 }
2816
2817 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2818 {
2819 int retval;
2820 if (!target_was_examined(target)) {
2821 LOG_ERROR("Target not examined yet");
2822 return ERROR_FAIL;
2823 }
2824
2825 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2826 address, value);
2827
2828 retval = target_write_phys_memory(target, address, 1, 1, &value);
2829 if (retval != ERROR_OK)
2830 LOG_DEBUG("failed: %i", retval);
2831
2832 return retval;
2833 }
2834
2835 static int find_target(struct command_invocation *cmd, const char *name)
2836 {
2837 struct target *target = get_target(name);
2838 if (!target) {
2839 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2840 return ERROR_FAIL;
2841 }
2842 if (!target->tap->enabled) {
2843 command_print(cmd, "Target: TAP %s is disabled, "
2844 "can't be the current target\n",
2845 target->tap->dotted_name);
2846 return ERROR_FAIL;
2847 }
2848
2849 cmd->ctx->current_target = target;
2850 if (cmd->ctx->current_target_override)
2851 cmd->ctx->current_target_override = target;
2852
2853 return ERROR_OK;
2854 }
2855
2856
2857 COMMAND_HANDLER(handle_targets_command)
2858 {
2859 int retval = ERROR_OK;
2860 if (CMD_ARGC == 1) {
2861 retval = find_target(CMD, CMD_ARGV[0]);
2862 if (retval == ERROR_OK) {
2863 /* we're done! */
2864 return retval;
2865 }
2866 }
2867
2868 struct target *target = all_targets;
2869 command_print(CMD, " TargetName Type Endian TapName State ");
2870 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2871 while (target) {
2872 const char *state;
2873 char marker = ' ';
2874
2875 if (target->tap->enabled)
2876 state = target_state_name(target);
2877 else
2878 state = "tap-disabled";
2879
2880 if (CMD_CTX->current_target == target)
2881 marker = '*';
2882
2883 /* keep columns lined up to match the headers above */
2884 command_print(CMD,
2885 "%2d%c %-18s %-10s %-6s %-18s %s",
2886 target->target_number,
2887 marker,
2888 target_name(target),
2889 target_type_name(target),
2890 jim_nvp_value2name_simple(nvp_target_endian,
2891 target->endianness)->name,
2892 target->tap->dotted_name,
2893 state);
2894 target = target->next;
2895 }
2896
2897 return retval;
2898 }
2899
2900 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2901
2902 static int power_dropout;
2903 static int srst_asserted;
2904
2905 static int run_power_restore;
2906 static int run_power_dropout;
2907 static int run_srst_asserted;
2908 static int run_srst_deasserted;
2909
2910 static int sense_handler(void)
2911 {
2912 static int prev_srst_asserted;
2913 static int prev_power_dropout;
2914
2915 int retval = jtag_power_dropout(&power_dropout);
2916 if (retval != ERROR_OK)
2917 return retval;
2918
2919 int power_restored;
2920 power_restored = prev_power_dropout && !power_dropout;
2921 if (power_restored)
2922 run_power_restore = 1;
2923
2924 int64_t current = timeval_ms();
2925 static int64_t last_power;
2926 bool wait_more = last_power + 2000 > current;
2927 if (power_dropout && !wait_more) {
2928 run_power_dropout = 1;
2929 last_power = current;
2930 }
2931
2932 retval = jtag_srst_asserted(&srst_asserted);
2933 if (retval != ERROR_OK)
2934 return retval;
2935
2936 int srst_deasserted;
2937 srst_deasserted = prev_srst_asserted && !srst_asserted;
2938
2939 static int64_t last_srst;
2940 wait_more = last_srst + 2000 > current;
2941 if (srst_deasserted && !wait_more) {
2942 run_srst_deasserted = 1;
2943 last_srst = current;
2944 }
2945
2946 if (!prev_srst_asserted && srst_asserted)
2947 run_srst_asserted = 1;
2948
2949 prev_srst_asserted = srst_asserted;
2950 prev_power_dropout = power_dropout;
2951
2952 if (srst_deasserted || power_restored) {
2953 /* Other than logging the event we can't do anything here.
2954 * Issuing a reset is a particularly bad idea as we might
2955 * be inside a reset already.
2956 */
2957 }
2958
2959 return ERROR_OK;
2960 }
2961
2962 /* process target state changes */
2963 static int handle_target(void *priv)
2964 {
2965 Jim_Interp *interp = (Jim_Interp *)priv;
2966 int retval = ERROR_OK;
2967
2968 if (!is_jtag_poll_safe()) {
2969 /* polling is disabled currently */
2970 return ERROR_OK;
2971 }
2972
2973 /* we do not want to recurse here... */
2974 static int recursive;
2975 if (!recursive) {
2976 recursive = 1;
2977 sense_handler();
2978 /* danger! running these procedures can trigger srst assertions and power dropouts.
2979 * We need to avoid an infinite loop/recursion here and we do that by
2980 * clearing the flags after running these events.
2981 */
2982 int did_something = 0;
2983 if (run_srst_asserted) {
2984 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2985 Jim_Eval(interp, "srst_asserted");
2986 did_something = 1;
2987 }
2988 if (run_srst_deasserted) {
2989 Jim_Eval(interp, "srst_deasserted");
2990 did_something = 1;
2991 }
2992 if (run_power_dropout) {
2993 LOG_INFO("Power dropout detected, running power_dropout proc.");
2994 Jim_Eval(interp, "power_dropout");
2995 did_something = 1;
2996 }
2997 if (run_power_restore) {
2998 Jim_Eval(interp, "power_restore");
2999 did_something = 1;
3000 }
3001
3002 if (did_something) {
3003 /* clear detect flags */
3004 sense_handler();
3005 }
3006
3007 /* clear action flags */
3008
3009 run_srst_asserted = 0;
3010 run_srst_deasserted = 0;
3011 run_power_restore = 0;
3012 run_power_dropout = 0;
3013
3014 recursive = 0;
3015 }
3016
3017 /* Poll targets for state changes unless that's globally disabled.
3018 * Skip targets that are currently disabled.
3019 */
3020 for (struct target *target = all_targets;
3021 is_jtag_poll_safe() && target;
3022 target = target->next) {
3023
3024 if (!target_was_examined(target))
3025 continue;
3026
3027 if (!target->tap->enabled)
3028 continue;
3029
3030 if (target->backoff.times > target->backoff.count) {
3031 /* do not poll this time as we failed previously */
3032 target->backoff.count++;
3033 continue;
3034 }
3035 target->backoff.count = 0;
3036
3037 /* only poll target if we've got power and srst isn't asserted */
3038 if (!power_dropout && !srst_asserted) {
3039 /* polling may fail silently until the target has been examined */
3040 retval = target_poll(target);
3041 if (retval != ERROR_OK) {
3042 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3043 if (target->backoff.times * polling_interval < 5000) {
3044 target->backoff.times *= 2;
3045 target->backoff.times++;
3046 }
3047
3048 /* Tell GDB to halt the debugger. This allows the user to
3049 * run monitor commands to handle the situation.
3050 */
3051 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3052 }
3053 if (target->backoff.times > 0) {
3054 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3055 target_reset_examined(target);
3056 retval = target_examine_one(target);
3057 /* Target examination could have failed due to unstable connection,
3058 * but we set the examined flag anyway to repoll it later */
3059 if (retval != ERROR_OK) {
3060 target_set_examined(target);
3061 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3062 target->backoff.times * polling_interval);
3063 return retval;
3064 }
3065 }
3066
3067 /* Since we succeeded, we reset backoff count */
3068 target->backoff.times = 0;
3069 }
3070 }
3071
3072 return retval;
3073 }
3074
3075 COMMAND_HANDLER(handle_reg_command)
3076 {
3077 LOG_DEBUG("-");
3078
3079 struct target *target = get_current_target(CMD_CTX);
3080 struct reg *reg = NULL;
3081
3082 /* list all available registers for the current target */
3083 if (CMD_ARGC == 0) {
3084 struct reg_cache *cache = target->reg_cache;
3085
3086 unsigned int count = 0;
3087 while (cache) {
3088 unsigned i;
3089
3090 command_print(CMD, "===== %s", cache->name);
3091
3092 for (i = 0, reg = cache->reg_list;
3093 i < cache->num_regs;
3094 i++, reg++, count++) {
3095 if (reg->exist == false || reg->hidden)
3096 continue;
3097 /* only print cached values if they are valid */
3098 if (reg->valid) {
3099 char *value = buf_to_hex_str(reg->value,
3100 reg->size);
3101 command_print(CMD,
3102 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3103 count, reg->name,
3104 reg->size, value,
3105 reg->dirty
3106 ? " (dirty)"
3107 : "");
3108 free(value);
3109 } else {
3110 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3111 count, reg->name,
3112 reg->size);
3113 }
3114 }
3115 cache = cache->next;
3116 }
3117
3118 return ERROR_OK;
3119 }
3120
3121 /* access a single register by its ordinal number */
3122 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3123 unsigned num;
3124 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3125
3126 struct reg_cache *cache = target->reg_cache;
3127 unsigned int count = 0;
3128 while (cache) {
3129 unsigned i;
3130 for (i = 0; i < cache->num_regs; i++) {
3131 if (count++ == num) {
3132 reg = &cache->reg_list[i];
3133 break;
3134 }
3135 }
3136 if (reg)
3137 break;
3138 cache = cache->next;
3139 }
3140
3141 if (!reg) {
3142 command_print(CMD, "%i is out of bounds, the current target "
3143 "has only %i registers (0 - %i)", num, count, count - 1);
3144 return ERROR_OK;
3145 }
3146 } else {
3147 /* access a single register by its name */
3148 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3149
3150 if (!reg)
3151 goto not_found;
3152 }
3153
3154 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3155
3156 if (!reg->exist)
3157 goto not_found;
3158
3159 /* display a register */
3160 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3161 && (CMD_ARGV[1][0] <= '9')))) {
3162 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3163 reg->valid = 0;
3164
3165 if (reg->valid == 0) {
3166 int retval = reg->type->get(reg);
3167 if (retval != ERROR_OK) {
3168 LOG_ERROR("Could not read register '%s'", reg->name);
3169 return retval;
3170 }
3171 }
3172 char *value = buf_to_hex_str(reg->value, reg->size);
3173 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3174 free(value);
3175 return ERROR_OK;
3176 }
3177
3178 /* set register value */
3179 if (CMD_ARGC == 2) {
3180 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3181 if (!buf)
3182 return ERROR_FAIL;
3183 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3184
3185 int retval = reg->type->set(reg, buf);
3186 if (retval != ERROR_OK) {
3187 LOG_ERROR("Could not write to register '%s'", reg->name);
3188 } else {
3189 char *value = buf_to_hex_str(reg->value, reg->size);
3190 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3191 free(value);
3192 }
3193
3194 free(buf);
3195
3196 return retval;
3197 }
3198
3199 return ERROR_COMMAND_SYNTAX_ERROR;
3200
3201 not_found:
3202 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3203 return ERROR_OK;
3204 }
3205
3206 COMMAND_HANDLER(handle_poll_command)
3207 {