gdb_server: add support for architecture element
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75 static int target_profiling_default(struct target *target, uint32_t *samples,
76 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type avr_target;
98 extern struct target_type dsp563xx_target;
99 extern struct target_type dsp5680xx_target;
100 extern struct target_type testee_target;
101 extern struct target_type avr32_ap7k_target;
102 extern struct target_type hla_target;
103 extern struct target_type nds32_v2_target;
104 extern struct target_type nds32_v3_target;
105 extern struct target_type nds32_v3m_target;
106 extern struct target_type or1k_target;
107 extern struct target_type quark_x10xx_target;
108 extern struct target_type quark_d20xx_target;
109 extern struct target_type stm8_target;
110 extern struct target_type riscv_target;
111 extern struct target_type mem_ap_target;
112
113 static struct target_type *target_types[] = {
114 &arm7tdmi_target,
115 &arm9tdmi_target,
116 &arm920t_target,
117 &arm720t_target,
118 &arm966e_target,
119 &arm946e_target,
120 &arm926ejs_target,
121 &fa526_target,
122 &feroceon_target,
123 &dragonite_target,
124 &xscale_target,
125 &cortexm_target,
126 &cortexa_target,
127 &cortexr4_target,
128 &arm11_target,
129 &ls1_sap_target,
130 &mips_m4k_target,
131 &avr_target,
132 &dsp563xx_target,
133 &dsp5680xx_target,
134 &testee_target,
135 &avr32_ap7k_target,
136 &hla_target,
137 &nds32_v2_target,
138 &nds32_v3_target,
139 &nds32_v3m_target,
140 &or1k_target,
141 &quark_x10xx_target,
142 &quark_d20xx_target,
143 &stm8_target,
144 &riscv_target,
145 #if BUILD_TARGET64
146 &aarch64_target,
147 #endif
148 &mem_ap_target,
149 NULL,
150 };
151
152 struct target *all_targets;
153 static struct target_event_callback *target_event_callbacks;
154 static struct target_timer_callback *target_timer_callbacks;
155 LIST_HEAD(target_reset_callback_list);
156 LIST_HEAD(target_trace_callback_list);
157 static const int polling_interval = 100;
158
159 static const Jim_Nvp nvp_assert[] = {
160 { .name = "assert", NVP_ASSERT },
161 { .name = "deassert", NVP_DEASSERT },
162 { .name = "T", NVP_ASSERT },
163 { .name = "F", NVP_DEASSERT },
164 { .name = "t", NVP_ASSERT },
165 { .name = "f", NVP_DEASSERT },
166 { .name = NULL, .value = -1 }
167 };
168
169 static const Jim_Nvp nvp_error_target[] = {
170 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
171 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
172 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
173 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
174 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
175 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
176 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
177 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
178 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
179 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
180 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
181 { .value = -1, .name = NULL }
182 };
183
184 static const char *target_strerror_safe(int err)
185 {
186 const Jim_Nvp *n;
187
188 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
189 if (n->name == NULL)
190 return "unknown";
191 else
192 return n->name;
193 }
194
195 static const Jim_Nvp nvp_target_event[] = {
196
197 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
198 { .value = TARGET_EVENT_HALTED, .name = "halted" },
199 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
200 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
201 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
202
203 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
204 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
205
206 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
207 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
208 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
209 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
210 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
211 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
212 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
213 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
214
215 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
216 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
217
218 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
219 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
220
221 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
222 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
223
224 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
225 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
226
227 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
228 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
229
230 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
231
232 { .name = NULL, .value = -1 }
233 };
234
235 static const Jim_Nvp nvp_target_state[] = {
236 { .name = "unknown", .value = TARGET_UNKNOWN },
237 { .name = "running", .value = TARGET_RUNNING },
238 { .name = "halted", .value = TARGET_HALTED },
239 { .name = "reset", .value = TARGET_RESET },
240 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
241 { .name = NULL, .value = -1 },
242 };
243
244 static const Jim_Nvp nvp_target_debug_reason[] = {
245 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
246 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
247 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
248 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
249 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
250 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
251 { .name = "program-exit" , .value = DBG_REASON_EXIT },
252 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
253 { .name = NULL, .value = -1 },
254 };
255
256 static const Jim_Nvp nvp_target_endian[] = {
257 { .name = "big", .value = TARGET_BIG_ENDIAN },
258 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
259 { .name = "be", .value = TARGET_BIG_ENDIAN },
260 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
261 { .name = NULL, .value = -1 },
262 };
263
264 static const Jim_Nvp nvp_reset_modes[] = {
265 { .name = "unknown", .value = RESET_UNKNOWN },
266 { .name = "run" , .value = RESET_RUN },
267 { .name = "halt" , .value = RESET_HALT },
268 { .name = "init" , .value = RESET_INIT },
269 { .name = NULL , .value = -1 },
270 };
271
272 const char *debug_reason_name(struct target *t)
273 {
274 const char *cp;
275
276 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
277 t->debug_reason)->name;
278 if (!cp) {
279 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
280 cp = "(*BUG*unknown*BUG*)";
281 }
282 return cp;
283 }
284
285 const char *target_state_name(struct target *t)
286 {
287 const char *cp;
288 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
289 if (!cp) {
290 LOG_ERROR("Invalid target state: %d", (int)(t->state));
291 cp = "(*BUG*unknown*BUG*)";
292 }
293
294 if (!target_was_examined(t) && t->defer_examine)
295 cp = "examine deferred";
296
297 return cp;
298 }
299
300 const char *target_event_name(enum target_event event)
301 {
302 const char *cp;
303 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
304 if (!cp) {
305 LOG_ERROR("Invalid target event: %d", (int)(event));
306 cp = "(*BUG*unknown*BUG*)";
307 }
308 return cp;
309 }
310
311 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
312 {
313 const char *cp;
314 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
315 if (!cp) {
316 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
317 cp = "(*BUG*unknown*BUG*)";
318 }
319 return cp;
320 }
321
322 /* determine the number of the new target */
323 static int new_target_number(void)
324 {
325 struct target *t;
326 int x;
327
328 /* number is 0 based */
329 x = -1;
330 t = all_targets;
331 while (t) {
332 if (x < t->target_number)
333 x = t->target_number;
334 t = t->next;
335 }
336 return x + 1;
337 }
338
339 /* read a uint64_t from a buffer in target memory endianness */
340 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
341 {
342 if (target->endianness == TARGET_LITTLE_ENDIAN)
343 return le_to_h_u64(buffer);
344 else
345 return be_to_h_u64(buffer);
346 }
347
348 /* read a uint32_t from a buffer in target memory endianness */
349 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
350 {
351 if (target->endianness == TARGET_LITTLE_ENDIAN)
352 return le_to_h_u32(buffer);
353 else
354 return be_to_h_u32(buffer);
355 }
356
357 /* read a uint24_t from a buffer in target memory endianness */
358 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
359 {
360 if (target->endianness == TARGET_LITTLE_ENDIAN)
361 return le_to_h_u24(buffer);
362 else
363 return be_to_h_u24(buffer);
364 }
365
366 /* read a uint16_t from a buffer in target memory endianness */
367 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
368 {
369 if (target->endianness == TARGET_LITTLE_ENDIAN)
370 return le_to_h_u16(buffer);
371 else
372 return be_to_h_u16(buffer);
373 }
374
375 /* read a uint8_t from a buffer in target memory endianness */
376 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
377 {
378 return *buffer & 0x0ff;
379 }
380
381 /* write a uint64_t to a buffer in target memory endianness */
382 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 h_u64_to_le(buffer, value);
386 else
387 h_u64_to_be(buffer, value);
388 }
389
390 /* write a uint32_t to a buffer in target memory endianness */
391 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 h_u32_to_le(buffer, value);
395 else
396 h_u32_to_be(buffer, value);
397 }
398
399 /* write a uint24_t to a buffer in target memory endianness */
400 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
401 {
402 if (target->endianness == TARGET_LITTLE_ENDIAN)
403 h_u24_to_le(buffer, value);
404 else
405 h_u24_to_be(buffer, value);
406 }
407
408 /* write a uint16_t to a buffer in target memory endianness */
409 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
410 {
411 if (target->endianness == TARGET_LITTLE_ENDIAN)
412 h_u16_to_le(buffer, value);
413 else
414 h_u16_to_be(buffer, value);
415 }
416
417 /* write a uint8_t to a buffer in target memory endianness */
418 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
419 {
420 *buffer = value;
421 }
422
423 /* write a uint64_t array to a buffer in target memory endianness */
424 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
425 {
426 uint32_t i;
427 for (i = 0; i < count; i++)
428 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
429 }
430
431 /* write a uint32_t array to a buffer in target memory endianness */
432 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
433 {
434 uint32_t i;
435 for (i = 0; i < count; i++)
436 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
437 }
438
439 /* write a uint16_t array to a buffer in target memory endianness */
440 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
441 {
442 uint32_t i;
443 for (i = 0; i < count; i++)
444 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
445 }
446
447 /* write a uint64_t array to a buffer in target memory endianness */
448 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
449 {
450 uint32_t i;
451 for (i = 0; i < count; i++)
452 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
453 }
454
455 /* write a uint32_t array to a buffer in target memory endianness */
456 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
457 {
458 uint32_t i;
459 for (i = 0; i < count; i++)
460 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
461 }
462
463 /* write a uint16_t array to a buffer in target memory endianness */
464 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
465 {
466 uint32_t i;
467 for (i = 0; i < count; i++)
468 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
469 }
470
471 /* return a pointer to a configured target; id is name or number */
472 struct target *get_target(const char *id)
473 {
474 struct target *target;
475
476 /* try as tcltarget name */
477 for (target = all_targets; target; target = target->next) {
478 if (target_name(target) == NULL)
479 continue;
480 if (strcmp(id, target_name(target)) == 0)
481 return target;
482 }
483
484 /* It's OK to remove this fallback sometime after August 2010 or so */
485
486 /* no match, try as number */
487 unsigned num;
488 if (parse_uint(id, &num) != ERROR_OK)
489 return NULL;
490
491 for (target = all_targets; target; target = target->next) {
492 if (target->target_number == (int)num) {
493 LOG_WARNING("use '%s' as target identifier, not '%u'",
494 target_name(target), num);
495 return target;
496 }
497 }
498
499 return NULL;
500 }
501
502 /* returns a pointer to the n-th configured target */
503 struct target *get_target_by_num(int num)
504 {
505 struct target *target = all_targets;
506
507 while (target) {
508 if (target->target_number == num)
509 return target;
510 target = target->next;
511 }
512
513 return NULL;
514 }
515
516 struct target *get_current_target(struct command_context *cmd_ctx)
517 {
518 struct target *target = cmd_ctx->current_target_override
519 ? cmd_ctx->current_target_override
520 : cmd_ctx->current_target;
521
522 if (target == NULL) {
523 LOG_ERROR("BUG: current_target out of bounds");
524 exit(-1);
525 }
526
527 return target;
528 }
529
530 int target_poll(struct target *target)
531 {
532 int retval;
533
534 /* We can't poll until after examine */
535 if (!target_was_examined(target)) {
536 /* Fail silently lest we pollute the log */
537 return ERROR_FAIL;
538 }
539
540 retval = target->type->poll(target);
541 if (retval != ERROR_OK)
542 return retval;
543
544 if (target->halt_issued) {
545 if (target->state == TARGET_HALTED)
546 target->halt_issued = false;
547 else {
548 int64_t t = timeval_ms() - target->halt_issued_time;
549 if (t > DEFAULT_HALT_TIMEOUT) {
550 target->halt_issued = false;
551 LOG_INFO("Halt timed out, wake up GDB.");
552 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
553 }
554 }
555 }
556
557 return ERROR_OK;
558 }
559
560 int target_halt(struct target *target)
561 {
562 int retval;
563 /* We can't poll until after examine */
564 if (!target_was_examined(target)) {
565 LOG_ERROR("Target not examined yet");
566 return ERROR_FAIL;
567 }
568
569 retval = target->type->halt(target);
570 if (retval != ERROR_OK)
571 return retval;
572
573 target->halt_issued = true;
574 target->halt_issued_time = timeval_ms();
575
576 return ERROR_OK;
577 }
578
579 /**
580 * Make the target (re)start executing using its saved execution
581 * context (possibly with some modifications).
582 *
583 * @param target Which target should start executing.
584 * @param current True to use the target's saved program counter instead
585 * of the address parameter
586 * @param address Optionally used as the program counter.
587 * @param handle_breakpoints True iff breakpoints at the resumption PC
588 * should be skipped. (For example, maybe execution was stopped by
589 * such a breakpoint, in which case it would be counterprodutive to
590 * let it re-trigger.
591 * @param debug_execution False if all working areas allocated by OpenOCD
592 * should be released and/or restored to their original contents.
593 * (This would for example be true to run some downloaded "helper"
594 * algorithm code, which resides in one such working buffer and uses
595 * another for data storage.)
596 *
597 * @todo Resolve the ambiguity about what the "debug_execution" flag
598 * signifies. For example, Target implementations don't agree on how
599 * it relates to invalidation of the register cache, or to whether
600 * breakpoints and watchpoints should be enabled. (It would seem wrong
601 * to enable breakpoints when running downloaded "helper" algorithms
602 * (debug_execution true), since the breakpoints would be set to match
603 * target firmware being debugged, not the helper algorithm.... and
604 * enabling them could cause such helpers to malfunction (for example,
605 * by overwriting data with a breakpoint instruction. On the other
606 * hand the infrastructure for running such helpers might use this
607 * procedure but rely on hardware breakpoint to detect termination.)
608 */
609 int target_resume(struct target *target, int current, target_addr_t address,
610 int handle_breakpoints, int debug_execution)
611 {
612 int retval;
613
614 /* We can't poll until after examine */
615 if (!target_was_examined(target)) {
616 LOG_ERROR("Target not examined yet");
617 return ERROR_FAIL;
618 }
619
620 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
621
622 /* note that resume *must* be asynchronous. The CPU can halt before
623 * we poll. The CPU can even halt at the current PC as a result of
624 * a software breakpoint being inserted by (a bug?) the application.
625 */
626 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
627 if (retval != ERROR_OK)
628 return retval;
629
630 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
631
632 return retval;
633 }
634
635 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
636 {
637 char buf[100];
638 int retval;
639 Jim_Nvp *n;
640 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
641 if (n->name == NULL) {
642 LOG_ERROR("invalid reset mode");
643 return ERROR_FAIL;
644 }
645
646 struct target *target;
647 for (target = all_targets; target; target = target->next)
648 target_call_reset_callbacks(target, reset_mode);
649
650 /* disable polling during reset to make reset event scripts
651 * more predictable, i.e. dr/irscan & pathmove in events will
652 * not have JTAG operations injected into the middle of a sequence.
653 */
654 bool save_poll = jtag_poll_get_enabled();
655
656 jtag_poll_set_enabled(false);
657
658 sprintf(buf, "ocd_process_reset %s", n->name);
659 retval = Jim_Eval(cmd_ctx->interp, buf);
660
661 jtag_poll_set_enabled(save_poll);
662
663 if (retval != JIM_OK) {
664 Jim_MakeErrorMessage(cmd_ctx->interp);
665 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
666 return ERROR_FAIL;
667 }
668
669 /* We want any events to be processed before the prompt */
670 retval = target_call_timer_callbacks_now();
671
672 for (target = all_targets; target; target = target->next) {
673 target->type->check_reset(target);
674 target->running_alg = false;
675 }
676
677 return retval;
678 }
679
680 static int identity_virt2phys(struct target *target,
681 target_addr_t virtual, target_addr_t *physical)
682 {
683 *physical = virtual;
684 return ERROR_OK;
685 }
686
687 static int no_mmu(struct target *target, int *enabled)
688 {
689 *enabled = 0;
690 return ERROR_OK;
691 }
692
693 static int default_examine(struct target *target)
694 {
695 target_set_examined(target);
696 return ERROR_OK;
697 }
698
699 /* no check by default */
700 static int default_check_reset(struct target *target)
701 {
702 return ERROR_OK;
703 }
704
705 int target_examine_one(struct target *target)
706 {
707 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
708
709 int retval = target->type->examine(target);
710 if (retval != ERROR_OK)
711 return retval;
712
713 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
714
715 return ERROR_OK;
716 }
717
718 static int jtag_enable_callback(enum jtag_event event, void *priv)
719 {
720 struct target *target = priv;
721
722 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
723 return ERROR_OK;
724
725 jtag_unregister_event_callback(jtag_enable_callback, target);
726
727 return target_examine_one(target);
728 }
729
730 /* Targets that correctly implement init + examine, i.e.
731 * no communication with target during init:
732 *
733 * XScale
734 */
735 int target_examine(void)
736 {
737 int retval = ERROR_OK;
738 struct target *target;
739
740 for (target = all_targets; target; target = target->next) {
741 /* defer examination, but don't skip it */
742 if (!target->tap->enabled) {
743 jtag_register_event_callback(jtag_enable_callback,
744 target);
745 continue;
746 }
747
748 if (target->defer_examine)
749 continue;
750
751 retval = target_examine_one(target);
752 if (retval != ERROR_OK)
753 return retval;
754 }
755 return retval;
756 }
757
758 const char *target_type_name(struct target *target)
759 {
760 return target->type->name;
761 }
762
763 static int target_soft_reset_halt(struct target *target)
764 {
765 if (!target_was_examined(target)) {
766 LOG_ERROR("Target not examined yet");
767 return ERROR_FAIL;
768 }
769 if (!target->type->soft_reset_halt) {
770 LOG_ERROR("Target %s does not support soft_reset_halt",
771 target_name(target));
772 return ERROR_FAIL;
773 }
774 return target->type->soft_reset_halt(target);
775 }
776
777 /**
778 * Downloads a target-specific native code algorithm to the target,
779 * and executes it. * Note that some targets may need to set up, enable,
780 * and tear down a breakpoint (hard or * soft) to detect algorithm
781 * termination, while others may support lower overhead schemes where
782 * soft breakpoints embedded in the algorithm automatically terminate the
783 * algorithm.
784 *
785 * @param target used to run the algorithm
786 * @param arch_info target-specific description of the algorithm.
787 */
788 int target_run_algorithm(struct target *target,
789 int num_mem_params, struct mem_param *mem_params,
790 int num_reg_params, struct reg_param *reg_param,
791 uint32_t entry_point, uint32_t exit_point,
792 int timeout_ms, void *arch_info)
793 {
794 int retval = ERROR_FAIL;
795
796 if (!target_was_examined(target)) {
797 LOG_ERROR("Target not examined yet");
798 goto done;
799 }
800 if (!target->type->run_algorithm) {
801 LOG_ERROR("Target type '%s' does not support %s",
802 target_type_name(target), __func__);
803 goto done;
804 }
805
806 target->running_alg = true;
807 retval = target->type->run_algorithm(target,
808 num_mem_params, mem_params,
809 num_reg_params, reg_param,
810 entry_point, exit_point, timeout_ms, arch_info);
811 target->running_alg = false;
812
813 done:
814 return retval;
815 }
816
817 /**
818 * Executes a target-specific native code algorithm and leaves it running.
819 *
820 * @param target used to run the algorithm
821 * @param arch_info target-specific description of the algorithm.
822 */
823 int target_start_algorithm(struct target *target,
824 int num_mem_params, struct mem_param *mem_params,
825 int num_reg_params, struct reg_param *reg_params,
826 uint32_t entry_point, uint32_t exit_point,
827 void *arch_info)
828 {
829 int retval = ERROR_FAIL;
830
831 if (!target_was_examined(target)) {
832 LOG_ERROR("Target not examined yet");
833 goto done;
834 }
835 if (!target->type->start_algorithm) {
836 LOG_ERROR("Target type '%s' does not support %s",
837 target_type_name(target), __func__);
838 goto done;
839 }
840 if (target->running_alg) {
841 LOG_ERROR("Target is already running an algorithm");
842 goto done;
843 }
844
845 target->running_alg = true;
846 retval = target->type->start_algorithm(target,
847 num_mem_params, mem_params,
848 num_reg_params, reg_params,
849 entry_point, exit_point, arch_info);
850
851 done:
852 return retval;
853 }
854
855 /**
856 * Waits for an algorithm started with target_start_algorithm() to complete.
857 *
858 * @param target used to run the algorithm
859 * @param arch_info target-specific description of the algorithm.
860 */
861 int target_wait_algorithm(struct target *target,
862 int num_mem_params, struct mem_param *mem_params,
863 int num_reg_params, struct reg_param *reg_params,
864 uint32_t exit_point, int timeout_ms,
865 void *arch_info)
866 {
867 int retval = ERROR_FAIL;
868
869 if (!target->type->wait_algorithm) {
870 LOG_ERROR("Target type '%s' does not support %s",
871 target_type_name(target), __func__);
872 goto done;
873 }
874 if (!target->running_alg) {
875 LOG_ERROR("Target is not running an algorithm");
876 goto done;
877 }
878
879 retval = target->type->wait_algorithm(target,
880 num_mem_params, mem_params,
881 num_reg_params, reg_params,
882 exit_point, timeout_ms, arch_info);
883 if (retval != ERROR_TARGET_TIMEOUT)
884 target->running_alg = false;
885
886 done:
887 return retval;
888 }
889
890 /**
891 * Streams data to a circular buffer on target intended for consumption by code
892 * running asynchronously on target.
893 *
894 * This is intended for applications where target-specific native code runs
895 * on the target, receives data from the circular buffer, does something with
896 * it (most likely writing it to a flash memory), and advances the circular
897 * buffer pointer.
898 *
899 * This assumes that the helper algorithm has already been loaded to the target,
900 * but has not been started yet. Given memory and register parameters are passed
901 * to the algorithm.
902 *
903 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
904 * following format:
905 *
906 * [buffer_start + 0, buffer_start + 4):
907 * Write Pointer address (aka head). Written and updated by this
908 * routine when new data is written to the circular buffer.
909 * [buffer_start + 4, buffer_start + 8):
910 * Read Pointer address (aka tail). Updated by code running on the
911 * target after it consumes data.
912 * [buffer_start + 8, buffer_start + buffer_size):
913 * Circular buffer contents.
914 *
915 * See contrib/loaders/flash/stm32f1x.S for an example.
916 *
917 * @param target used to run the algorithm
918 * @param buffer address on the host where data to be sent is located
919 * @param count number of blocks to send
920 * @param block_size size in bytes of each block
921 * @param num_mem_params count of memory-based params to pass to algorithm
922 * @param mem_params memory-based params to pass to algorithm
923 * @param num_reg_params count of register-based params to pass to algorithm
924 * @param reg_params memory-based params to pass to algorithm
925 * @param buffer_start address on the target of the circular buffer structure
926 * @param buffer_size size of the circular buffer structure
927 * @param entry_point address on the target to execute to start the algorithm
928 * @param exit_point address at which to set a breakpoint to catch the
929 * end of the algorithm; can be 0 if target triggers a breakpoint itself
930 */
931
932 int target_run_flash_async_algorithm(struct target *target,
933 const uint8_t *buffer, uint32_t count, int block_size,
934 int num_mem_params, struct mem_param *mem_params,
935 int num_reg_params, struct reg_param *reg_params,
936 uint32_t buffer_start, uint32_t buffer_size,
937 uint32_t entry_point, uint32_t exit_point, void *arch_info)
938 {
939 int retval;
940 int timeout = 0;
941
942 const uint8_t *buffer_orig = buffer;
943
944 /* Set up working area. First word is write pointer, second word is read pointer,
945 * rest is fifo data area. */
946 uint32_t wp_addr = buffer_start;
947 uint32_t rp_addr = buffer_start + 4;
948 uint32_t fifo_start_addr = buffer_start + 8;
949 uint32_t fifo_end_addr = buffer_start + buffer_size;
950
951 uint32_t wp = fifo_start_addr;
952 uint32_t rp = fifo_start_addr;
953
954 /* validate block_size is 2^n */
955 assert(!block_size || !(block_size & (block_size - 1)));
956
957 retval = target_write_u32(target, wp_addr, wp);
958 if (retval != ERROR_OK)
959 return retval;
960 retval = target_write_u32(target, rp_addr, rp);
961 if (retval != ERROR_OK)
962 return retval;
963
964 /* Start up algorithm on target and let it idle while writing the first chunk */
965 retval = target_start_algorithm(target, num_mem_params, mem_params,
966 num_reg_params, reg_params,
967 entry_point,
968 exit_point,
969 arch_info);
970
971 if (retval != ERROR_OK) {
972 LOG_ERROR("error starting target flash write algorithm");
973 return retval;
974 }
975
976 while (count > 0) {
977
978 retval = target_read_u32(target, rp_addr, &rp);
979 if (retval != ERROR_OK) {
980 LOG_ERROR("failed to get read pointer");
981 break;
982 }
983
984 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
985 (size_t) (buffer - buffer_orig), count, wp, rp);
986
987 if (rp == 0) {
988 LOG_ERROR("flash write algorithm aborted by target");
989 retval = ERROR_FLASH_OPERATION_FAILED;
990 break;
991 }
992
993 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
994 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
995 break;
996 }
997
998 /* Count the number of bytes available in the fifo without
999 * crossing the wrap around. Make sure to not fill it completely,
1000 * because that would make wp == rp and that's the empty condition. */
1001 uint32_t thisrun_bytes;
1002 if (rp > wp)
1003 thisrun_bytes = rp - wp - block_size;
1004 else if (rp > fifo_start_addr)
1005 thisrun_bytes = fifo_end_addr - wp;
1006 else
1007 thisrun_bytes = fifo_end_addr - wp - block_size;
1008
1009 if (thisrun_bytes == 0) {
1010 /* Throttle polling a bit if transfer is (much) faster than flash
1011 * programming. The exact delay shouldn't matter as long as it's
1012 * less than buffer size / flash speed. This is very unlikely to
1013 * run when using high latency connections such as USB. */
1014 alive_sleep(10);
1015
1016 /* to stop an infinite loop on some targets check and increment a timeout
1017 * this issue was observed on a stellaris using the new ICDI interface */
1018 if (timeout++ >= 500) {
1019 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1020 return ERROR_FLASH_OPERATION_FAILED;
1021 }
1022 continue;
1023 }
1024
1025 /* reset our timeout */
1026 timeout = 0;
1027
1028 /* Limit to the amount of data we actually want to write */
1029 if (thisrun_bytes > count * block_size)
1030 thisrun_bytes = count * block_size;
1031
1032 /* Write data to fifo */
1033 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1034 if (retval != ERROR_OK)
1035 break;
1036
1037 /* Update counters and wrap write pointer */
1038 buffer += thisrun_bytes;
1039 count -= thisrun_bytes / block_size;
1040 wp += thisrun_bytes;
1041 if (wp >= fifo_end_addr)
1042 wp = fifo_start_addr;
1043
1044 /* Store updated write pointer to target */
1045 retval = target_write_u32(target, wp_addr, wp);
1046 if (retval != ERROR_OK)
1047 break;
1048 }
1049
1050 if (retval != ERROR_OK) {
1051 /* abort flash write algorithm on target */
1052 target_write_u32(target, wp_addr, 0);
1053 }
1054
1055 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1056 num_reg_params, reg_params,
1057 exit_point,
1058 10000,
1059 arch_info);
1060
1061 if (retval2 != ERROR_OK) {
1062 LOG_ERROR("error waiting for target flash write algorithm");
1063 retval = retval2;
1064 }
1065
1066 if (retval == ERROR_OK) {
1067 /* check if algorithm set rp = 0 after fifo writer loop finished */
1068 retval = target_read_u32(target, rp_addr, &rp);
1069 if (retval == ERROR_OK && rp == 0) {
1070 LOG_ERROR("flash write algorithm aborted by target");
1071 retval = ERROR_FLASH_OPERATION_FAILED;
1072 }
1073 }
1074
1075 return retval;
1076 }
1077
1078 int target_read_memory(struct target *target,
1079 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1080 {
1081 if (!target_was_examined(target)) {
1082 LOG_ERROR("Target not examined yet");
1083 return ERROR_FAIL;
1084 }
1085 if (!target->type->read_memory) {
1086 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1087 return ERROR_FAIL;
1088 }
1089 return target->type->read_memory(target, address, size, count, buffer);
1090 }
1091
1092 int target_read_phys_memory(struct target *target,
1093 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1094 {
1095 if (!target_was_examined(target)) {
1096 LOG_ERROR("Target not examined yet");
1097 return ERROR_FAIL;
1098 }
1099 if (!target->type->read_phys_memory) {
1100 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1101 return ERROR_FAIL;
1102 }
1103 return target->type->read_phys_memory(target, address, size, count, buffer);
1104 }
1105
1106 int target_write_memory(struct target *target,
1107 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1108 {
1109 if (!target_was_examined(target)) {
1110 LOG_ERROR("Target not examined yet");
1111 return ERROR_FAIL;
1112 }
1113 if (!target->type->write_memory) {
1114 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1115 return ERROR_FAIL;
1116 }
1117 return target->type->write_memory(target, address, size, count, buffer);
1118 }
1119
1120 int target_write_phys_memory(struct target *target,
1121 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1122 {
1123 if (!target_was_examined(target)) {
1124 LOG_ERROR("Target not examined yet");
1125 return ERROR_FAIL;
1126 }
1127 if (!target->type->write_phys_memory) {
1128 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1129 return ERROR_FAIL;
1130 }
1131 return target->type->write_phys_memory(target, address, size, count, buffer);
1132 }
1133
1134 int target_add_breakpoint(struct target *target,
1135 struct breakpoint *breakpoint)
1136 {
1137 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1138 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1139 return ERROR_TARGET_NOT_HALTED;
1140 }
1141 return target->type->add_breakpoint(target, breakpoint);
1142 }
1143
1144 int target_add_context_breakpoint(struct target *target,
1145 struct breakpoint *breakpoint)
1146 {
1147 if (target->state != TARGET_HALTED) {
1148 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1149 return ERROR_TARGET_NOT_HALTED;
1150 }
1151 return target->type->add_context_breakpoint(target, breakpoint);
1152 }
1153
1154 int target_add_hybrid_breakpoint(struct target *target,
1155 struct breakpoint *breakpoint)
1156 {
1157 if (target->state != TARGET_HALTED) {
1158 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1159 return ERROR_TARGET_NOT_HALTED;
1160 }
1161 return target->type->add_hybrid_breakpoint(target, breakpoint);
1162 }
1163
1164 int target_remove_breakpoint(struct target *target,
1165 struct breakpoint *breakpoint)
1166 {
1167 return target->type->remove_breakpoint(target, breakpoint);
1168 }
1169
1170 int target_add_watchpoint(struct target *target,
1171 struct watchpoint *watchpoint)
1172 {
1173 if (target->state != TARGET_HALTED) {
1174 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1175 return ERROR_TARGET_NOT_HALTED;
1176 }
1177 return target->type->add_watchpoint(target, watchpoint);
1178 }
1179 int target_remove_watchpoint(struct target *target,
1180 struct watchpoint *watchpoint)
1181 {
1182 return target->type->remove_watchpoint(target, watchpoint);
1183 }
1184 int target_hit_watchpoint(struct target *target,
1185 struct watchpoint **hit_watchpoint)
1186 {
1187 if (target->state != TARGET_HALTED) {
1188 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1189 return ERROR_TARGET_NOT_HALTED;
1190 }
1191
1192 if (target->type->hit_watchpoint == NULL) {
1193 /* For backward compatible, if hit_watchpoint is not implemented,
1194 * return ERROR_FAIL such that gdb_server will not take the nonsense
1195 * information. */
1196 return ERROR_FAIL;
1197 }
1198
1199 return target->type->hit_watchpoint(target, hit_watchpoint);
1200 }
1201
1202 const char *target_get_gdb_arch(struct target *target)
1203 {
1204 if (target->type->get_gdb_arch == NULL)
1205 return NULL;
1206 return target->type->get_gdb_arch(target);
1207 }
1208
1209 int target_get_gdb_reg_list(struct target *target,
1210 struct reg **reg_list[], int *reg_list_size,
1211 enum target_register_class reg_class)
1212 {
1213 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1214 }
1215
1216 bool target_supports_gdb_connection(struct target *target)
1217 {
1218 /*
1219 * based on current code, we can simply exclude all the targets that
1220 * don't provide get_gdb_reg_list; this could change with new targets.
1221 */
1222 return !!target->type->get_gdb_reg_list;
1223 }
1224
1225 int target_step(struct target *target,
1226 int current, target_addr_t address, int handle_breakpoints)
1227 {
1228 return target->type->step(target, current, address, handle_breakpoints);
1229 }
1230
1231 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1232 {
1233 if (target->state != TARGET_HALTED) {
1234 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1235 return ERROR_TARGET_NOT_HALTED;
1236 }
1237 return target->type->get_gdb_fileio_info(target, fileio_info);
1238 }
1239
1240 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1241 {
1242 if (target->state != TARGET_HALTED) {
1243 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1244 return ERROR_TARGET_NOT_HALTED;
1245 }
1246 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1247 }
1248
1249 int target_profiling(struct target *target, uint32_t *samples,
1250 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1251 {
1252 if (target->state != TARGET_HALTED) {
1253 LOG_WARNING("target %s is not halted (profiling)", target->cmd_name);
1254 return ERROR_TARGET_NOT_HALTED;
1255 }
1256 return target->type->profiling(target, samples, max_num_samples,
1257 num_samples, seconds);
1258 }
1259
1260 /**
1261 * Reset the @c examined flag for the given target.
1262 * Pure paranoia -- targets are zeroed on allocation.
1263 */
1264 static void target_reset_examined(struct target *target)
1265 {
1266 target->examined = false;
1267 }
1268
1269 static int handle_target(void *priv);
1270
1271 static int target_init_one(struct command_context *cmd_ctx,
1272 struct target *target)
1273 {
1274 target_reset_examined(target);
1275
1276 struct target_type *type = target->type;
1277 if (type->examine == NULL)
1278 type->examine = default_examine;
1279
1280 if (type->check_reset == NULL)
1281 type->check_reset = default_check_reset;
1282
1283 assert(type->init_target != NULL);
1284
1285 int retval = type->init_target(cmd_ctx, target);
1286 if (ERROR_OK != retval) {
1287 LOG_ERROR("target '%s' init failed", target_name(target));
1288 return retval;
1289 }
1290
1291 /* Sanity-check MMU support ... stub in what we must, to help
1292 * implement it in stages, but warn if we need to do so.
1293 */
1294 if (type->mmu) {
1295 if (type->virt2phys == NULL) {
1296 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1297 type->virt2phys = identity_virt2phys;
1298 }
1299 } else {
1300 /* Make sure no-MMU targets all behave the same: make no
1301 * distinction between physical and virtual addresses, and
1302 * ensure that virt2phys() is always an identity mapping.
1303 */
1304 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1305 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1306
1307 type->mmu = no_mmu;
1308 type->write_phys_memory = type->write_memory;
1309 type->read_phys_memory = type->read_memory;
1310 type->virt2phys = identity_virt2phys;
1311 }
1312
1313 if (target->type->read_buffer == NULL)
1314 target->type->read_buffer = target_read_buffer_default;
1315
1316 if (target->type->write_buffer == NULL)
1317 target->type->write_buffer = target_write_buffer_default;
1318
1319 if (target->type->get_gdb_fileio_info == NULL)
1320 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1321
1322 if (target->type->gdb_fileio_end == NULL)
1323 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1324
1325 if (target->type->profiling == NULL)
1326 target->type->profiling = target_profiling_default;
1327
1328 return ERROR_OK;
1329 }
1330
1331 static int target_init(struct command_context *cmd_ctx)
1332 {
1333 struct target *target;
1334 int retval;
1335
1336 for (target = all_targets; target; target = target->next) {
1337 retval = target_init_one(cmd_ctx, target);
1338 if (ERROR_OK != retval)
1339 return retval;
1340 }
1341
1342 if (!all_targets)
1343 return ERROR_OK;
1344
1345 retval = target_register_user_commands(cmd_ctx);
1346 if (ERROR_OK != retval)
1347 return retval;
1348
1349 retval = target_register_timer_callback(&handle_target,
1350 polling_interval, 1, cmd_ctx->interp);
1351 if (ERROR_OK != retval)
1352 return retval;
1353
1354 return ERROR_OK;
1355 }
1356
1357 COMMAND_HANDLER(handle_target_init_command)
1358 {
1359 int retval;
1360
1361 if (CMD_ARGC != 0)
1362 return ERROR_COMMAND_SYNTAX_ERROR;
1363
1364 static bool target_initialized;
1365 if (target_initialized) {
1366 LOG_INFO("'target init' has already been called");
1367 return ERROR_OK;
1368 }
1369 target_initialized = true;
1370
1371 retval = command_run_line(CMD_CTX, "init_targets");
1372 if (ERROR_OK != retval)
1373 return retval;
1374
1375 retval = command_run_line(CMD_CTX, "init_target_events");
1376 if (ERROR_OK != retval)
1377 return retval;
1378
1379 retval = command_run_line(CMD_CTX, "init_board");
1380 if (ERROR_OK != retval)
1381 return retval;
1382
1383 LOG_DEBUG("Initializing targets...");
1384 return target_init(CMD_CTX);
1385 }
1386
1387 int target_register_event_callback(int (*callback)(struct target *target,
1388 enum target_event event, void *priv), void *priv)
1389 {
1390 struct target_event_callback **callbacks_p = &target_event_callbacks;
1391
1392 if (callback == NULL)
1393 return ERROR_COMMAND_SYNTAX_ERROR;
1394
1395 if (*callbacks_p) {
1396 while ((*callbacks_p)->next)
1397 callbacks_p = &((*callbacks_p)->next);
1398 callbacks_p = &((*callbacks_p)->next);
1399 }
1400
1401 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1402 (*callbacks_p)->callback = callback;
1403 (*callbacks_p)->priv = priv;
1404 (*callbacks_p)->next = NULL;
1405
1406 return ERROR_OK;
1407 }
1408
1409 int target_register_reset_callback(int (*callback)(struct target *target,
1410 enum target_reset_mode reset_mode, void *priv), void *priv)
1411 {
1412 struct target_reset_callback *entry;
1413
1414 if (callback == NULL)
1415 return ERROR_COMMAND_SYNTAX_ERROR;
1416
1417 entry = malloc(sizeof(struct target_reset_callback));
1418 if (entry == NULL) {
1419 LOG_ERROR("error allocating buffer for reset callback entry");
1420 return ERROR_COMMAND_SYNTAX_ERROR;
1421 }
1422
1423 entry->callback = callback;
1424 entry->priv = priv;
1425 list_add(&entry->list, &target_reset_callback_list);
1426
1427
1428 return ERROR_OK;
1429 }
1430
1431 int target_register_trace_callback(int (*callback)(struct target *target,
1432 size_t len, uint8_t *data, void *priv), void *priv)
1433 {
1434 struct target_trace_callback *entry;
1435
1436 if (callback == NULL)
1437 return ERROR_COMMAND_SYNTAX_ERROR;
1438
1439 entry = malloc(sizeof(struct target_trace_callback));
1440 if (entry == NULL) {
1441 LOG_ERROR("error allocating buffer for trace callback entry");
1442 return ERROR_COMMAND_SYNTAX_ERROR;
1443 }
1444
1445 entry->callback = callback;
1446 entry->priv = priv;
1447 list_add(&entry->list, &target_trace_callback_list);
1448
1449
1450 return ERROR_OK;
1451 }
1452
1453 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1454 {
1455 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1456
1457 if (callback == NULL)
1458 return ERROR_COMMAND_SYNTAX_ERROR;
1459
1460 if (*callbacks_p) {
1461 while ((*callbacks_p)->next)
1462 callbacks_p = &((*callbacks_p)->next);
1463 callbacks_p = &((*callbacks_p)->next);
1464 }
1465
1466 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1467 (*callbacks_p)->callback = callback;
1468 (*callbacks_p)->periodic = periodic;
1469 (*callbacks_p)->time_ms = time_ms;
1470 (*callbacks_p)->removed = false;
1471
1472 gettimeofday(&(*callbacks_p)->when, NULL);
1473 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1474
1475 (*callbacks_p)->priv = priv;
1476 (*callbacks_p)->next = NULL;
1477
1478 return ERROR_OK;
1479 }
1480
1481 int target_unregister_event_callback(int (*callback)(struct target *target,
1482 enum target_event event, void *priv), void *priv)
1483 {
1484 struct target_event_callback **p = &target_event_callbacks;
1485 struct target_event_callback *c = target_event_callbacks;
1486
1487 if (callback == NULL)
1488 return ERROR_COMMAND_SYNTAX_ERROR;
1489
1490 while (c) {
1491 struct target_event_callback *next = c->next;
1492 if ((c->callback == callback) && (c->priv == priv)) {
1493 *p = next;
1494 free(c);
1495 return ERROR_OK;
1496 } else
1497 p = &(c->next);
1498 c = next;
1499 }
1500
1501 return ERROR_OK;
1502 }
1503
1504 int target_unregister_reset_callback(int (*callback)(struct target *target,
1505 enum target_reset_mode reset_mode, void *priv), void *priv)
1506 {
1507 struct target_reset_callback *entry;
1508
1509 if (callback == NULL)
1510 return ERROR_COMMAND_SYNTAX_ERROR;
1511
1512 list_for_each_entry(entry, &target_reset_callback_list, list) {
1513 if (entry->callback == callback && entry->priv == priv) {
1514 list_del(&entry->list);
1515 free(entry);
1516 break;
1517 }
1518 }
1519
1520 return ERROR_OK;
1521 }
1522
1523 int target_unregister_trace_callback(int (*callback)(struct target *target,
1524 size_t len, uint8_t *data, void *priv), void *priv)
1525 {
1526 struct target_trace_callback *entry;
1527
1528 if (callback == NULL)
1529 return ERROR_COMMAND_SYNTAX_ERROR;
1530
1531 list_for_each_entry(entry, &target_trace_callback_list, list) {
1532 if (entry->callback == callback && entry->priv == priv) {
1533 list_del(&entry->list);
1534 free(entry);
1535 break;
1536 }
1537 }
1538
1539 return ERROR_OK;
1540 }
1541
1542 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1543 {
1544 if (callback == NULL)
1545 return ERROR_COMMAND_SYNTAX_ERROR;
1546
1547 for (struct target_timer_callback *c = target_timer_callbacks;
1548 c; c = c->next) {
1549 if ((c->callback == callback) && (c->priv == priv)) {
1550 c->removed = true;
1551 return ERROR_OK;
1552 }
1553 }
1554
1555 return ERROR_FAIL;
1556 }
1557
1558 int target_call_event_callbacks(struct target *target, enum target_event event)
1559 {
1560 struct target_event_callback *callback = target_event_callbacks;
1561 struct target_event_callback *next_callback;
1562
1563 if (event == TARGET_EVENT_HALTED) {
1564 /* execute early halted first */
1565 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1566 }
1567
1568 LOG_DEBUG("target event %i (%s)", event,
1569 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1570
1571 target_handle_event(target, event);
1572
1573 while (callback) {
1574 next_callback = callback->next;
1575 callback->callback(target, event, callback->priv);
1576 callback = next_callback;
1577 }
1578
1579 return ERROR_OK;
1580 }
1581
1582 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1583 {
1584 struct target_reset_callback *callback;
1585
1586 LOG_DEBUG("target reset %i (%s)", reset_mode,
1587 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1588
1589 list_for_each_entry(callback, &target_reset_callback_list, list)
1590 callback->callback(target, reset_mode, callback->priv);
1591
1592 return ERROR_OK;
1593 }
1594
1595 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1596 {
1597 struct target_trace_callback *callback;
1598
1599 list_for_each_entry(callback, &target_trace_callback_list, list)
1600 callback->callback(target, len, data, callback->priv);
1601
1602 return ERROR_OK;
1603 }
1604
1605 static int target_timer_callback_periodic_restart(
1606 struct target_timer_callback *cb, struct timeval *now)
1607 {
1608 cb->when = *now;
1609 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1610 return ERROR_OK;
1611 }
1612
1613 static int target_call_timer_callback(struct target_timer_callback *cb,
1614 struct timeval *now)
1615 {
1616 cb->callback(cb->priv);
1617
1618 if (cb->periodic)
1619 return target_timer_callback_periodic_restart(cb, now);
1620
1621 return target_unregister_timer_callback(cb->callback, cb->priv);
1622 }
1623
1624 static int target_call_timer_callbacks_check_time(int checktime)
1625 {
1626 static bool callback_processing;
1627
1628 /* Do not allow nesting */
1629 if (callback_processing)
1630 return ERROR_OK;
1631
1632 callback_processing = true;
1633
1634 keep_alive();
1635
1636 struct timeval now;
1637 gettimeofday(&now, NULL);
1638
1639 /* Store an address of the place containing a pointer to the
1640 * next item; initially, that's a standalone "root of the
1641 * list" variable. */
1642 struct target_timer_callback **callback = &target_timer_callbacks;
1643 while (*callback) {
1644 if ((*callback)->removed) {
1645 struct target_timer_callback *p = *callback;
1646 *callback = (*callback)->next;
1647 free(p);
1648 continue;
1649 }
1650
1651 bool call_it = (*callback)->callback &&
1652 ((!checktime && (*callback)->periodic) ||
1653 timeval_compare(&now, &(*callback)->when) >= 0);
1654
1655 if (call_it)
1656 target_call_timer_callback(*callback, &now);
1657
1658 callback = &(*callback)->next;
1659 }
1660
1661 callback_processing = false;
1662 return ERROR_OK;
1663 }
1664
1665 int target_call_timer_callbacks(void)
1666 {
1667 return target_call_timer_callbacks_check_time(1);
1668 }
1669
1670 /* invoke periodic callbacks immediately */
1671 int target_call_timer_callbacks_now(void)
1672 {
1673 return target_call_timer_callbacks_check_time(0);
1674 }
1675
1676 /* Prints the working area layout for debug purposes */
1677 static void print_wa_layout(struct target *target)
1678 {
1679 struct working_area *c = target->working_areas;
1680
1681 while (c) {
1682 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1683 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1684 c->address, c->address + c->size - 1, c->size);
1685 c = c->next;
1686 }
1687 }
1688
1689 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1690 static void target_split_working_area(struct working_area *area, uint32_t size)
1691 {
1692 assert(area->free); /* Shouldn't split an allocated area */
1693 assert(size <= area->size); /* Caller should guarantee this */
1694
1695 /* Split only if not already the right size */
1696 if (size < area->size) {
1697 struct working_area *new_wa = malloc(sizeof(*new_wa));
1698
1699 if (new_wa == NULL)
1700 return;
1701
1702 new_wa->next = area->next;
1703 new_wa->size = area->size - size;
1704 new_wa->address = area->address + size;
1705 new_wa->backup = NULL;
1706 new_wa->user = NULL;
1707 new_wa->free = true;
1708
1709 area->next = new_wa;
1710 area->size = size;
1711
1712 /* If backup memory was allocated to this area, it has the wrong size
1713 * now so free it and it will be reallocated if/when needed */
1714 if (area->backup) {
1715 free(area->backup);
1716 area->backup = NULL;
1717 }
1718 }
1719 }
1720
1721 /* Merge all adjacent free areas into one */
1722 static void target_merge_working_areas(struct target *target)
1723 {
1724 struct working_area *c = target->working_areas;
1725
1726 while (c && c->next) {
1727 assert(c->next->address == c->address + c->size); /* This is an invariant */
1728
1729 /* Find two adjacent free areas */
1730 if (c->free && c->next->free) {
1731 /* Merge the last into the first */
1732 c->size += c->next->size;
1733
1734 /* Remove the last */
1735 struct working_area *to_be_freed = c->next;
1736 c->next = c->next->next;
1737 if (to_be_freed->backup)
1738 free(to_be_freed->backup);
1739 free(to_be_freed);
1740
1741 /* If backup memory was allocated to the remaining area, it's has
1742 * the wrong size now */
1743 if (c->backup) {
1744 free(c->backup);
1745 c->backup = NULL;
1746 }
1747 } else {
1748 c = c->next;
1749 }
1750 }
1751 }
1752
1753 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1754 {
1755 /* Reevaluate working area address based on MMU state*/
1756 if (target->working_areas == NULL) {
1757 int retval;
1758 int enabled;
1759
1760 retval = target->type->mmu(target, &enabled);
1761 if (retval != ERROR_OK)
1762 return retval;
1763
1764 if (!enabled) {
1765 if (target->working_area_phys_spec) {
1766 LOG_DEBUG("MMU disabled, using physical "
1767 "address for working memory " TARGET_ADDR_FMT,
1768 target->working_area_phys);
1769 target->working_area = target->working_area_phys;
1770 } else {
1771 LOG_ERROR("No working memory available. "
1772 "Specify -work-area-phys to target.");
1773 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1774 }
1775 } else {
1776 if (target->working_area_virt_spec) {
1777 LOG_DEBUG("MMU enabled, using virtual "
1778 "address for working memory " TARGET_ADDR_FMT,
1779 target->working_area_virt);
1780 target->working_area = target->working_area_virt;
1781 } else {
1782 LOG_ERROR("No working memory available. "
1783 "Specify -work-area-virt to target.");
1784 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1785 }
1786 }
1787
1788 /* Set up initial working area on first call */
1789 struct working_area *new_wa = malloc(sizeof(*new_wa));
1790 if (new_wa) {
1791 new_wa->next = NULL;
1792 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1793 new_wa->address = target->working_area;
1794 new_wa->backup = NULL;
1795 new_wa->user = NULL;
1796 new_wa->free = true;
1797 }
1798
1799 target->working_areas = new_wa;
1800 }
1801
1802 /* only allocate multiples of 4 byte */
1803 if (size % 4)
1804 size = (size + 3) & (~3UL);
1805
1806 struct working_area *c = target->working_areas;
1807
1808 /* Find the first large enough working area */
1809 while (c) {
1810 if (c->free && c->size >= size)
1811 break;
1812 c = c->next;
1813 }
1814
1815 if (c == NULL)
1816 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1817
1818 /* Split the working area into the requested size */
1819 target_split_working_area(c, size);
1820
1821 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
1822 size, c->address);
1823
1824 if (target->backup_working_area) {
1825 if (c->backup == NULL) {
1826 c->backup = malloc(c->size);
1827 if (c->backup == NULL)
1828 return ERROR_FAIL;
1829 }
1830
1831 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1832 if (retval != ERROR_OK)
1833 return retval;
1834 }
1835
1836 /* mark as used, and return the new (reused) area */
1837 c->free = false;
1838 *area = c;
1839
1840 /* user pointer */
1841 c->user = area;
1842
1843 print_wa_layout(target);
1844
1845 return ERROR_OK;
1846 }
1847
1848 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1849 {
1850 int retval;
1851
1852 retval = target_alloc_working_area_try(target, size, area);
1853 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1854 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1855 return retval;
1856
1857 }
1858
1859 static int target_restore_working_area(struct target *target, struct working_area *area)
1860 {
1861 int retval = ERROR_OK;
1862
1863 if (target->backup_working_area && area->backup != NULL) {
1864 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1865 if (retval != ERROR_OK)
1866 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1867 area->size, area->address);
1868 }
1869
1870 return retval;
1871 }
1872
1873 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1874 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1875 {
1876 int retval = ERROR_OK;
1877
1878 if (area->free)
1879 return retval;
1880
1881 if (restore) {
1882 retval = target_restore_working_area(target, area);
1883 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1884 if (retval != ERROR_OK)
1885 return retval;
1886 }
1887
1888 area->free = true;
1889
1890 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1891 area->size, area->address);
1892
1893 /* mark user pointer invalid */
1894 /* TODO: Is this really safe? It points to some previous caller's memory.
1895 * How could we know that the area pointer is still in that place and not
1896 * some other vital data? What's the purpose of this, anyway? */
1897 *area->user = NULL;
1898 area->user = NULL;
1899
1900 target_merge_working_areas(target);
1901
1902 print_wa_layout(target);
1903
1904 return retval;
1905 }
1906
1907 int target_free_working_area(struct target *target, struct working_area *area)
1908 {
1909 return target_free_working_area_restore(target, area, 1);
1910 }
1911
1912 static void target_destroy(struct target *target)
1913 {
1914 if (target->type->deinit_target)
1915 target->type->deinit_target(target);
1916
1917 if (target->semihosting)
1918 free(target->semihosting);
1919
1920 jtag_unregister_event_callback(jtag_enable_callback, target);
1921
1922 struct target_event_action *teap = target->event_action;
1923 while (teap) {
1924 struct target_event_action *next = teap->next;
1925 Jim_DecrRefCount(teap->interp, teap->body);
1926 free(teap);
1927 teap = next;
1928 }
1929
1930 target_free_all_working_areas(target);
1931 /* Now we have none or only one working area marked as free */
1932 if (target->working_areas) {
1933 free(target->working_areas->backup);
1934 free(target->working_areas);
1935 }
1936
1937 /* release the targets SMP list */
1938 if (target->smp) {
1939 struct target_list *head = target->head;
1940 while (head != NULL) {
1941 struct target_list *pos = head->next;
1942 head->target->smp = 0;
1943 free(head);
1944 head = pos;
1945 }
1946 target->smp = 0;
1947 }
1948
1949 free(target->gdb_port_override);
1950 free(target->type);
1951 free(target->trace_info);
1952 free(target->fileio_info);
1953 free(target->cmd_name);
1954 free(target);
1955 }
1956
1957 void target_quit(void)
1958 {
1959 struct target_event_callback *pe = target_event_callbacks;
1960 while (pe) {
1961 struct target_event_callback *t = pe->next;
1962 free(pe);
1963 pe = t;
1964 }
1965 target_event_callbacks = NULL;
1966
1967 struct target_timer_callback *pt = target_timer_callbacks;
1968 while (pt) {
1969 struct target_timer_callback *t = pt->next;
1970 free(pt);
1971 pt = t;
1972 }
1973 target_timer_callbacks = NULL;
1974
1975 for (struct target *target = all_targets; target;) {
1976 struct target *tmp;
1977
1978 tmp = target->next;
1979 target_destroy(target);
1980 target = tmp;
1981 }
1982
1983 all_targets = NULL;
1984 }
1985
1986 /* free resources and restore memory, if restoring memory fails,
1987 * free up resources anyway
1988 */
1989 static void target_free_all_working_areas_restore(struct target *target, int restore)
1990 {
1991 struct working_area *c = target->working_areas;
1992
1993 LOG_DEBUG("freeing all working areas");
1994
1995 /* Loop through all areas, restoring the allocated ones and marking them as free */
1996 while (c) {
1997 if (!c->free) {
1998 if (restore)
1999 target_restore_working_area(target, c);
2000 c->free = true;
2001 *c->user = NULL; /* Same as above */
2002 c->user = NULL;
2003 }
2004 c = c->next;
2005 }
2006
2007 /* Run a merge pass to combine all areas into one */
2008 target_merge_working_areas(target);
2009
2010 print_wa_layout(target);
2011 }
2012
2013 void target_free_all_working_areas(struct target *target)
2014 {
2015 target_free_all_working_areas_restore(target, 1);
2016 }
2017
2018 /* Find the largest number of bytes that can be allocated */
2019 uint32_t target_get_working_area_avail(struct target *target)
2020 {
2021 struct working_area *c = target->working_areas;
2022 uint32_t max_size = 0;
2023
2024 if (c == NULL)
2025 return target->working_area_size;
2026
2027 while (c) {
2028 if (c->free && max_size < c->size)
2029 max_size = c->size;
2030
2031 c = c->next;
2032 }
2033
2034 return max_size;
2035 }
2036
2037 int target_arch_state(struct target *target)
2038 {
2039 int retval;
2040 if (target == NULL) {
2041 LOG_WARNING("No target has been configured");
2042 return ERROR_OK;
2043 }
2044
2045 if (target->state != TARGET_HALTED)
2046 return ERROR_OK;
2047
2048 retval = target->type->arch_state(target);
2049 return retval;
2050 }
2051
2052 static int target_get_gdb_fileio_info_default(struct target *target,
2053 struct gdb_fileio_info *fileio_info)
2054 {
2055 /* If target does not support semi-hosting function, target
2056 has no need to provide .get_gdb_fileio_info callback.
2057 It just return ERROR_FAIL and gdb_server will return "Txx"
2058 as target halted every time. */
2059 return ERROR_FAIL;
2060 }
2061
2062 static int target_gdb_fileio_end_default(struct target *target,
2063 int retcode, int fileio_errno, bool ctrl_c)
2064 {
2065 return ERROR_OK;
2066 }
2067
2068 static int target_profiling_default(struct target *target, uint32_t *samples,
2069 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2070 {
2071 struct timeval timeout, now;
2072
2073 gettimeofday(&timeout, NULL);
2074 timeval_add_time(&timeout, seconds, 0);
2075
2076 LOG_INFO("Starting profiling. Halting and resuming the"
2077 " target as often as we can...");
2078
2079 uint32_t sample_count = 0;
2080 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2081 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
2082
2083 int retval = ERROR_OK;
2084 for (;;) {
2085 target_poll(target);
2086 if (target->state == TARGET_HALTED) {
2087 uint32_t t = buf_get_u32(reg->value, 0, 32);
2088 samples[sample_count++] = t;
2089 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2090 retval = target_resume(target, 1, 0, 0, 0);
2091 target_poll(target);
2092 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2093 } else if (target->state == TARGET_RUNNING) {
2094 /* We want to quickly sample the PC. */
2095 retval = target_halt(target);
2096 } else {
2097 LOG_INFO("Target not halted or running");
2098 retval = ERROR_OK;
2099 break;
2100 }
2101
2102 if (retval != ERROR_OK)
2103 break;
2104
2105 gettimeofday(&now, NULL);
2106 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2107 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2108 break;
2109 }
2110 }
2111
2112 *num_samples = sample_count;
2113 return retval;
2114 }
2115
2116 /* Single aligned words are guaranteed to use 16 or 32 bit access
2117 * mode respectively, otherwise data is handled as quickly as
2118 * possible
2119 */
2120 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2121 {
2122 LOG_DEBUG("writing buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
2123 size, address);
2124
2125 if (!target_was_examined(target)) {
2126 LOG_ERROR("Target not examined yet");
2127 return ERROR_FAIL;
2128 }
2129
2130 if (size == 0)
2131 return ERROR_OK;
2132
2133 if ((address + size - 1) < address) {
2134 /* GDB can request this when e.g. PC is 0xfffffffc */
2135 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2136 address,
2137 size);
2138 return ERROR_FAIL;
2139 }
2140
2141 return target->type->write_buffer(target, address, size, buffer);
2142 }
2143
2144 static int target_write_buffer_default(struct target *target,
2145 target_addr_t address, uint32_t count, const uint8_t *buffer)
2146 {
2147 uint32_t size;
2148
2149 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2150 * will have something to do with the size we leave to it. */
2151 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2152 if (address & size) {
2153 int retval = target_write_memory(target, address, size, 1, buffer);
2154 if (retval != ERROR_OK)
2155 return retval;
2156 address += size;
2157 count -= size;
2158 buffer += size;
2159 }
2160 }
2161
2162 /* Write the data with as large access size as possible. */
2163 for (; size > 0; size /= 2) {
2164 uint32_t aligned = count - count % size;
2165 if (aligned > 0) {
2166 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2167 if (retval != ERROR_OK)
2168 return retval;
2169 address += aligned;
2170 count -= aligned;
2171 buffer += aligned;
2172 }
2173 }
2174
2175 return ERROR_OK;
2176 }
2177
2178 /* Single aligned words are guaranteed to use 16 or 32 bit access
2179 * mode respectively, otherwise data is handled as quickly as
2180 * possible
2181 */
2182 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2183 {
2184 LOG_DEBUG("reading buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
2185 size, address);
2186
2187 if (!target_was_examined(target)) {
2188 LOG_ERROR("Target not examined yet");
2189 return ERROR_FAIL;
2190 }
2191
2192 if (size == 0)
2193 return ERROR_OK;
2194
2195 if ((address + size - 1) < address) {
2196 /* GDB can request this when e.g. PC is 0xfffffffc */
2197 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2198 address,
2199 size);
2200 return ERROR_FAIL;
2201 }
2202
2203 return target->type->read_buffer(target, address, size, buffer);
2204 }
2205
2206 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2207 {
2208 uint32_t size;
2209
2210 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2211 * will have something to do with the size we leave to it. */
2212 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2213 if (address & size) {
2214 int retval = target_read_memory(target, address, size, 1, buffer);
2215 if (retval != ERROR_OK)
2216 return retval;
2217 address += size;
2218 count -= size;
2219 buffer += size;
2220 }
2221 }
2222
2223 /* Read the data with as large access size as possible. */
2224 for (; size > 0; size /= 2) {
2225 uint32_t aligned = count - count % size;
2226 if (aligned > 0) {
2227 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2228 if (retval != ERROR_OK)
2229 return retval;
2230 address += aligned;
2231 count -= aligned;
2232 buffer += aligned;
2233 }
2234 }
2235
2236 return ERROR_OK;
2237 }
2238
2239 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t* crc)
2240 {
2241 uint8_t *buffer;
2242 int retval;
2243 uint32_t i;
2244 uint32_t checksum = 0;
2245 if (!target_was_examined(target)) {
2246 LOG_ERROR("Target not examined yet");
2247 return ERROR_FAIL;
2248 }
2249
2250 retval = target->type->checksum_memory(target, address, size, &checksum);
2251 if (retval != ERROR_OK) {
2252 buffer = malloc(size);
2253 if (buffer == NULL) {
2254 LOG_ERROR("error allocating buffer for section (%" PRId32 " bytes)", size);
2255 return ERROR_COMMAND_SYNTAX_ERROR;
2256 }
2257 retval = target_read_buffer(target, address, size, buffer);
2258 if (retval != ERROR_OK) {
2259 free(buffer);
2260 return retval;
2261 }
2262
2263 /* convert to target endianness */
2264 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2265 uint32_t target_data;
2266 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2267 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2268 }
2269
2270 retval = image_calculate_checksum(buffer, size, &checksum);
2271 free(buffer);
2272 }
2273
2274 *crc = checksum;
2275
2276 return retval;
2277 }
2278
2279 int target_blank_check_memory(struct target *target,
2280 struct target_memory_check_block *blocks, int num_blocks,
2281 uint8_t erased_value)
2282 {
2283 if (!target_was_examined(target)) {
2284 LOG_ERROR("Target not examined yet");
2285 return ERROR_FAIL;
2286 }
2287
2288 if (target->type->blank_check_memory == NULL)
2289 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2290
2291 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2292 }
2293
2294 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2295 {
2296 uint8_t value_buf[8];
2297 if (!target_was_examined(target)) {
2298 LOG_ERROR("Target not examined yet");
2299 return ERROR_FAIL;
2300 }
2301
2302 int retval = target_read_memory(target, address, 8, 1, value_buf);
2303
2304 if (retval == ERROR_OK) {
2305 *value = target_buffer_get_u64(target, value_buf);
2306 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2307 address,
2308 *value);
2309 } else {
2310 *value = 0x0;
2311 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2312 address);
2313 }
2314
2315 return retval;
2316 }
2317
2318 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2319 {
2320 uint8_t value_buf[4];
2321 if (!target_was_examined(target)) {
2322 LOG_ERROR("Target not examined yet");
2323 return ERROR_FAIL;
2324 }
2325
2326 int retval = target_read_memory(target, address, 4, 1, value_buf);
2327
2328 if (retval == ERROR_OK) {
2329 *value = target_buffer_get_u32(target, value_buf);
2330 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2331 address,
2332 *value);
2333 } else {
2334 *value = 0x0;
2335 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2336 address);
2337 }
2338
2339 return retval;
2340 }
2341
2342 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2343 {
2344 uint8_t value_buf[2];
2345 if (!target_was_examined(target)) {
2346 LOG_ERROR("Target not examined yet");
2347 return ERROR_FAIL;
2348 }
2349
2350 int retval = target_read_memory(target, address, 2, 1, value_buf);
2351
2352 if (retval == ERROR_OK) {
2353 *value = target_buffer_get_u16(target, value_buf);
2354 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2355 address,
2356 *value);
2357 } else {
2358 *value = 0x0;
2359 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2360 address);
2361 }
2362
2363 return retval;
2364 }
2365
2366 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2367 {
2368 if (!target_was_examined(target)) {
2369 LOG_ERROR("Target not examined yet");
2370 return ERROR_FAIL;
2371 }
2372
2373 int retval = target_read_memory(target, address, 1, 1, value);
2374
2375 if (retval == ERROR_OK) {
2376 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2377 address,
2378 *value);
2379 } else {
2380 *value = 0x0;
2381 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2382 address);
2383 }
2384
2385 return retval;
2386 }
2387
2388 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2389 {
2390 int retval;
2391 uint8_t value_buf[8];
2392 if (!target_was_examined(target)) {
2393 LOG_ERROR("Target not examined yet");
2394 return ERROR_FAIL;
2395 }
2396
2397 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2398 address,
2399 value);
2400
2401 target_buffer_set_u64(target, value_buf, value);
2402 retval = target_write_memory(target, address, 8, 1, value_buf);
2403 if (retval != ERROR_OK)
2404 LOG_DEBUG("failed: %i", retval);
2405
2406 return retval;
2407 }
2408
2409 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2410 {
2411 int retval;
2412 uint8_t value_buf[4];
2413 if (!target_was_examined(target)) {
2414 LOG_ERROR("Target not examined yet");
2415 return ERROR_FAIL;
2416 }
2417
2418 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2419 address,
2420 value);
2421
2422 target_buffer_set_u32(target, value_buf, value);
2423 retval = target_write_memory(target, address, 4, 1, value_buf);
2424 if (retval != ERROR_OK)
2425 LOG_DEBUG("failed: %i", retval);
2426
2427 return retval;
2428 }
2429
2430 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2431 {
2432 int retval;
2433 uint8_t value_buf[2];
2434 if (!target_was_examined(target)) {
2435 LOG_ERROR("Target not examined yet");
2436 return ERROR_FAIL;
2437 }
2438
2439 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2440 address,
2441 value);
2442
2443 target_buffer_set_u16(target, value_buf, value);
2444 retval = target_write_memory(target, address, 2, 1, value_buf);
2445 if (retval != ERROR_OK)
2446 LOG_DEBUG("failed: %i", retval);
2447
2448 return retval;
2449 }
2450
2451 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2452 {
2453 int retval;
2454 if (!target_was_examined(target)) {
2455 LOG_ERROR("Target not examined yet");
2456 return ERROR_FAIL;
2457 }
2458
2459 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2460 address, value);
2461
2462 retval = target_write_memory(target, address, 1, 1, &value);
2463 if (retval != ERROR_OK)
2464 LOG_DEBUG("failed: %i", retval);
2465
2466 return retval;
2467 }
2468
2469 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2470 {
2471 int retval;
2472 uint8_t value_buf[8];
2473 if (!target_was_examined(target)) {
2474 LOG_ERROR("Target not examined yet");
2475 return ERROR_FAIL;
2476 }
2477
2478 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2479 address,
2480 value);
2481
2482 target_buffer_set_u64(target, value_buf, value);
2483 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2484 if (retval != ERROR_OK)
2485 LOG_DEBUG("failed: %i", retval);
2486
2487 return retval;
2488 }
2489
2490 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2491 {
2492 int retval;
2493 uint8_t value_buf[4];
2494 if (!target_was_examined(target)) {
2495 LOG_ERROR("Target not examined yet");
2496 return ERROR_FAIL;
2497 }
2498
2499 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2500 address,
2501 value);
2502
2503 target_buffer_set_u32(target, value_buf, value);
2504 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2505 if (retval != ERROR_OK)
2506 LOG_DEBUG("failed: %i", retval);
2507
2508 return retval;
2509 }
2510
2511 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2512 {
2513 int retval;
2514 uint8_t value_buf[2];
2515 if (!target_was_examined(target)) {
2516 LOG_ERROR("Target not examined yet");
2517 return ERROR_FAIL;
2518 }
2519
2520 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2521 address,
2522 value);
2523
2524 target_buffer_set_u16(target, value_buf, value);
2525 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2526 if (retval != ERROR_OK)
2527 LOG_DEBUG("failed: %i", retval);
2528
2529 return retval;
2530 }
2531
2532 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2533 {
2534 int retval;
2535 if (!target_was_examined(target)) {
2536 LOG_ERROR("Target not examined yet");
2537 return ERROR_FAIL;
2538 }
2539
2540 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2541 address, value);
2542
2543 retval = target_write_phys_memory(target, address, 1, 1, &value);
2544 if (retval != ERROR_OK)
2545 LOG_DEBUG("failed: %i", retval);
2546
2547 return retval;
2548 }
2549
2550 static int find_target(struct command_context *cmd_ctx, const char *name)
2551 {
2552 struct target *target = get_target(name);
2553 if (target == NULL) {
2554 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2555 return ERROR_FAIL;
2556 }
2557 if (!target->tap->enabled) {
2558 LOG_USER("Target: TAP %s is disabled, "
2559 "can't be the current target\n",
2560 target->tap->dotted_name);
2561 return ERROR_FAIL;
2562 }
2563
2564 cmd_ctx->current_target = target;
2565 if (cmd_ctx->current_target_override)
2566 cmd_ctx->current_target_override = target;
2567
2568 return ERROR_OK;
2569 }
2570
2571
2572 COMMAND_HANDLER(handle_targets_command)
2573 {
2574 int retval = ERROR_OK;
2575 if (CMD_ARGC == 1) {
2576 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2577 if (retval == ERROR_OK) {
2578 /* we're done! */
2579 return retval;
2580 }
2581 }
2582
2583 struct target *target = all_targets;
2584 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2585 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2586 while (target) {
2587 const char *state;
2588 char marker = ' ';
2589
2590 if (target->tap->enabled)
2591 state = target_state_name(target);
2592 else
2593 state = "tap-disabled";
2594
2595 if (CMD_CTX->current_target == target)
2596 marker = '*';
2597
2598 /* keep columns lined up to match the headers above */
2599 command_print(CMD_CTX,
2600 "%2d%c %-18s %-10s %-6s %-18s %s",
2601 target->target_number,
2602 marker,
2603 target_name(target),
2604 target_type_name(target),
2605 Jim_Nvp_value2name_simple(nvp_target_endian,
2606 target->endianness)->name,
2607 target->tap->dotted_name,
2608 state);
2609 target = target->next;
2610 }
2611
2612 return retval;
2613 }
2614
2615 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2616
2617 static int powerDropout;
2618 static int srstAsserted;
2619
2620 static int runPowerRestore;
2621 static int runPowerDropout;
2622 static int runSrstAsserted;
2623 static int runSrstDeasserted;
2624
2625 static int sense_handler(void)
2626 {
2627 static int prevSrstAsserted;
2628 static int prevPowerdropout;
2629
2630 int retval = jtag_power_dropout(&powerDropout);
2631 if (retval != ERROR_OK)
2632 return retval;
2633
2634 int powerRestored;
2635 powerRestored = prevPowerdropout && !powerDropout;
2636 if (powerRestored)
2637 runPowerRestore = 1;
2638
2639 int64_t current = timeval_ms();
2640 static int64_t lastPower;
2641 bool waitMore = lastPower + 2000 > current;
2642 if (powerDropout && !waitMore) {
2643 runPowerDropout = 1;
2644 lastPower = current;
2645 }
2646
2647 retval = jtag_srst_asserted(&srstAsserted);
2648 if (retval != ERROR_OK)
2649 return retval;
2650
2651 int srstDeasserted;
2652 srstDeasserted = prevSrstAsserted && !srstAsserted;
2653
2654 static int64_t lastSrst;
2655 waitMore = lastSrst + 2000 > current;
2656 if (srstDeasserted && !waitMore) {
2657 runSrstDeasserted = 1;
2658 lastSrst = current;
2659 }
2660
2661 if (!prevSrstAsserted && srstAsserted)
2662 runSrstAsserted = 1;
2663
2664 prevSrstAsserted = srstAsserted;
2665 prevPowerdropout = powerDropout;
2666
2667 if (srstDeasserted || powerRestored) {
2668 /* Other than logging the event we can't do anything here.
2669 * Issuing a reset is a particularly bad idea as we might
2670 * be inside a reset already.
2671 */
2672 }
2673
2674 return ERROR_OK;
2675 }
2676
2677 /* process target state changes */
2678 static int handle_target(void *priv)
2679 {
2680 Jim_Interp *interp = (Jim_Interp *)priv;
2681 int retval = ERROR_OK;
2682
2683 if (!is_jtag_poll_safe()) {
2684 /* polling is disabled currently */
2685 return ERROR_OK;
2686 }
2687
2688 /* we do not want to recurse here... */
2689 static int recursive;
2690 if (!recursive) {
2691 recursive = 1;
2692 sense_handler();
2693 /* danger! running these procedures can trigger srst assertions and power dropouts.
2694 * We need to avoid an infinite loop/recursion here and we do that by
2695 * clearing the flags after running these events.
2696 */
2697 int did_something = 0;
2698 if (runSrstAsserted) {
2699 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2700 Jim_Eval(interp, "srst_asserted");
2701 did_something = 1;
2702 }
2703 if (runSrstDeasserted) {
2704 Jim_Eval(interp, "srst_deasserted");
2705 did_something = 1;
2706 }
2707 if (runPowerDropout) {
2708 LOG_INFO("Power dropout detected, running power_dropout proc.");
2709 Jim_Eval(interp, "power_dropout");
2710 did_something = 1;
2711 }
2712 if (runPowerRestore) {
2713 Jim_Eval(interp, "power_restore");
2714 did_something = 1;
2715 }
2716
2717 if (did_something) {
2718 /* clear detect flags */
2719 sense_handler();
2720 }
2721
2722 /* clear action flags */
2723
2724 runSrstAsserted = 0;
2725 runSrstDeasserted = 0;
2726 runPowerRestore = 0;
2727 runPowerDropout = 0;
2728
2729 recursive = 0;
2730 }
2731
2732 /* Poll targets for state changes unless that's globally disabled.
2733 * Skip targets that are currently disabled.
2734 */
2735 for (struct target *target = all_targets;
2736 is_jtag_poll_safe() && target;
2737 target = target->next) {
2738
2739 if (!target_was_examined(target))
2740 continue;
2741
2742 if (!target->tap->enabled)
2743 continue;
2744
2745 if (target->backoff.times > target->backoff.count) {
2746 /* do not poll this time as we failed previously */
2747 target->backoff.count++;
2748 continue;
2749 }
2750 target->backoff.count = 0;
2751
2752 /* only poll target if we've got power and srst isn't asserted */
2753 if (!powerDropout && !srstAsserted) {
2754 /* polling may fail silently until the target has been examined */
2755 retval = target_poll(target);
2756 if (retval != ERROR_OK) {
2757 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2758 if (target->backoff.times * polling_interval < 5000) {
2759 target->backoff.times *= 2;
2760 target->backoff.times++;
2761 }
2762
2763 /* Tell GDB to halt the debugger. This allows the user to
2764 * run monitor commands to handle the situation.
2765 */
2766 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2767 }
2768 if (target->backoff.times > 0) {
2769 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
2770 target_reset_examined(target);
2771 retval = target_examine_one(target);
2772 /* Target examination could have failed due to unstable connection,
2773 * but we set the examined flag anyway to repoll it later */
2774 if (retval != ERROR_OK) {
2775 target->examined = true;
2776 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
2777 target->backoff.times * polling_interval);
2778 return retval;
2779 }
2780 }
2781
2782 /* Since we succeeded, we reset backoff count */
2783 target->backoff.times = 0;
2784 }
2785 }
2786
2787 return retval;
2788 }
2789
2790 COMMAND_HANDLER(handle_reg_command)
2791 {
2792 struct target *target;
2793 struct reg *reg = NULL;
2794 unsigned count = 0;
2795 char *value;
2796
2797 LOG_DEBUG("-");
2798
2799 target = get_current_target(CMD_CTX);
2800
2801 /* list all available registers for the current target */
2802 if (CMD_ARGC == 0) {
2803 struct reg_cache *cache = target->reg_cache;
2804
2805 count = 0;
2806 while (cache) {
2807 unsigned i;
2808
2809 command_print(CMD_CTX, "===== %s", cache->name);
2810
2811 for (i = 0, reg = cache->reg_list;
2812 i < cache->num_regs;
2813 i++, reg++, count++) {
2814 if (reg->exist == false)
2815 continue;
2816 /* only print cached values if they are valid */
2817 if (reg->valid) {
2818 value = buf_to_str(reg->value,
2819 reg->size, 16);
2820 command_print(CMD_CTX,
2821 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2822 count, reg->name,
2823 reg->size, value,
2824 reg->dirty
2825 ? " (dirty)"
2826 : "");
2827 free(value);
2828 } else {
2829 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2830 count, reg->name,
2831 reg->size) ;
2832 }
2833 }
2834 cache = cache->next;
2835 }
2836
2837 return ERROR_OK;
2838 }
2839
2840 /* access a single register by its ordinal number */
2841 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2842 unsigned num;
2843 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2844
2845 struct reg_cache *cache = target->reg_cache;
2846 count = 0;
2847 while (cache) {
2848 unsigned i;
2849 for (i = 0; i < cache->num_regs; i++) {
2850 if (count++ == num) {
2851 reg = &cache->reg_list[i];
2852 break;
2853 }
2854 }
2855 if (reg)
2856 break;
2857 cache = cache->next;
2858 }
2859
2860 if (!reg) {
2861 command_print(CMD_CTX, "%i is out of bounds, the current target "
2862 "has only %i registers (0 - %i)", num, count, count - 1);
2863 return ERROR_OK;
2864 }
2865 } else {
2866 /* access a single register by its name */
2867 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2868
2869 if (!reg)
2870 goto not_found;
2871 }
2872
2873 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2874
2875 if (!reg->exist)
2876 goto not_found;
2877
2878 /* display a register */
2879 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2880 && (CMD_ARGV[1][0] <= '9')))) {
2881 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2882 reg->valid = 0;
2883
2884 if (reg->valid == 0)
2885 reg->type->get(reg);
2886 value = buf_to_str(reg->value, reg->size, 16);
2887 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2888 free(value);
2889 return ERROR_OK;
2890 }
2891
2892 /* set register value */
2893 if (CMD_ARGC == 2) {
2894 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2895 if (buf == NULL)
2896 return ERROR_FAIL;
2897 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2898
2899 reg->type->set(reg, buf);
2900
2901 value = buf_to_str(reg->value, reg->size, 16);
2902 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2903 free(value);
2904
2905 free(buf);
2906
2907 return ERROR_OK;
2908 }
2909
2910 return ERROR_COMMAND_SYNTAX_ERROR;
2911
2912 not_found:
2913 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2914 return ERROR_OK;
2915 }
2916
2917 COMMAND_HANDLER(handle_poll_command)
2918 {
2919 int retval = ERROR_OK;
2920 struct target *target = get_current_target(CMD_CTX);
2921
2922 if (CMD_ARGC == 0) {
2923 command_print(CMD_CTX, "background polling: %s",
2924 jtag_poll_get_enabled() ? "on" : "off");
2925 command_print(CMD_CTX, "TAP: %s (%s)",
2926 target->tap->dotted_name,
2927 target->tap->enabled ? "enabled" : "disabled");
2928 if (!target->tap->enabled)
2929 return ERROR_OK;
2930 retval = target_poll(target);
2931 if (retval != ERROR_OK)
2932 return retval;
2933 retval = target_arch_state(target);
2934 if (retval != ERROR_OK)
2935 return retval;
2936 } else if (CMD_ARGC == 1) {
2937 bool enable;
2938 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2939 jtag_poll_set_enabled(enable);
2940 } else
2941 return ERROR_COMMAND_SYNTAX_ERROR;
2942
2943 return retval;
2944 }
2945
2946 COMMAND_HANDLER(handle_wait_halt_command)
2947 {
2948 if (CMD_ARGC > 1)
2949 return ERROR_COMMAND_SYNTAX_ERROR;
2950
2951 unsigned ms = DEFAULT_HALT_TIMEOUT;
2952 if (1 == CMD_ARGC) {
2953 int retval = parse_uint(CMD_ARGV[0], &ms);
2954 if (ERROR_OK != retval)
2955 return ERROR_COMMAND_SYNTAX_ERROR;
2956 }
2957
2958 struct target *target = get_current_target(CMD_CTX);
2959 return target_wait_state(target, TARGET_HALTED, ms);
2960 }
2961
2962 /* wait for target state to change. The trick here is to have a low
2963 * latency for short waits and not to suck up all the CPU time
2964 * on longer waits.
2965 *
2966 * After 500ms, keep_alive() is invoked
2967 */
2968 int target_wait_state(struct target *target, enum target_state state, int ms)
2969 {
2970 int retval;
2971 int64_t then = 0, cur;
2972 bool once = true;
2973
2974 for (;;) {
2975 retval = target_poll(target);
2976 if (retval != ERROR_OK)
2977 return retval;
2978 if (target->state == state)
2979 break;
2980 cur = timeval_ms();
2981 if (once) {
2982 once = false;
2983 then = timeval_ms();
2984 LOG_DEBUG("waiting for target %s...",
2985 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2986 }
2987
2988 if (cur-then > 500)
2989 keep_alive();
2990
2991 if ((cur-then) > ms) {
2992 LOG_ERROR("timed out while waiting for target %s",
2993 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2994 return ERROR_FAIL;
2995 }
2996 }
2997
2998 return ERROR_OK;
2999 }
3000
3001 COMMAND_HANDLER(handle_halt_command)
3002 {
3003 LOG_DEBUG("-");
3004
3005 struct target *target = get_current_target(CMD_CTX);
3006
3007 target->verbose_halt_msg = true;
3008
3009 int retval = target_halt(target);
3010 if (ERROR_OK != retval)
3011 return retval;
3012
3013 if (CMD_ARGC == 1) {
3014 unsigned wait_local;
3015 retval = parse_uint(CMD_ARGV[0], &wait_local);
3016 if (ERROR_OK != retval)
3017 return ERROR_COMMAND_SYNTAX_ERROR;
3018 if (!wait_local)
3019 return ERROR_OK;
3020 }
3021
3022 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3023 }
3024
3025 COMMAND_HANDLER(handle_soft_reset_halt_command)
3026 {
3027 struct target *target = get_current_target(CMD_CTX);
3028
3029 LOG_USER("requesting target halt and executing a soft reset");
3030
3031 target_soft_reset_halt(target);
3032
3033 return ERROR_OK;
3034 }
3035
3036 COMMAND_HANDLER(handle_reset_command)
3037 {
3038 if (CMD_ARGC > 1)
3039 return ERROR_COMMAND_SYNTAX_ERROR;
3040
3041 enum target_reset_mode reset_mode = RESET_RUN;
3042 if (CMD_ARGC == 1) {
3043 const Jim_Nvp *n;
3044 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3045 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
3046 return ERROR_COMMAND_SYNTAX_ERROR;
3047 reset_mode = n->value;
3048 }
3049
3050 /* reset *all* targets */
3051 return target_process_reset(CMD_CTX, reset_mode);
3052 }
3053
3054
3055 COMMAND_HANDLER(handle_resume_command)
3056 {
3057 int current = 1;
3058 if (CMD_ARGC > 1)
3059 return ERROR_COMMAND_SYNTAX_ERROR;
3060
3061 struct target *target = get_current_target(CMD_CTX);
3062
3063 /* with no CMD_ARGV, resume from current pc, addr = 0,
3064 * with one arguments, addr = CMD_ARGV[0],
3065 * handle breakpoints, not debugging */
3066 target_addr_t addr = 0;
3067 if (CMD_ARGC == 1) {
3068 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3069 current = 0;
3070 }
3071
3072 return target_resume(target, current, addr, 1, 0);
3073 }
3074
3075 COMMAND_HANDLER(handle_step_command)
3076 {
3077 if (CMD_ARGC > 1)
3078 return ERROR_COMMAND_SYNTAX_ERROR;
3079
3080 LOG_DEBUG("-");
3081
3082 /* with no CMD_ARGV, step from current pc, addr = 0,
3083 * with one argument addr = CMD_ARGV[0],
3084 * handle breakpoints, debugging */
3085 target_addr_t addr = 0;
3086 int current_pc = 1;
3087 if (CMD_ARGC == 1) {
3088 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3089 current_pc = 0;
3090 }
3091
3092 struct target *target = get_current_target(CMD_CTX);
3093
3094 return target->type->step(target, current_pc, addr, 1);
3095 }
3096
3097 static void handle_md_output(struct command_context *cmd_ctx,
3098 struct target *target, target_addr_t address, unsigned size,
3099 unsigned count, const uint8_t *buffer)
3100 {
3101 const unsigned line_bytecnt = 32;
3102 unsigned line_modulo = line_bytecnt / size;
3103
3104 char output[line_bytecnt * 4 + 1];
3105 unsigned output_len = 0;
3106
3107 const char *value_fmt;
3108 switch (size) {
3109 case 8:
3110 value_fmt = "%16.16"PRIx64" ";
3111 break;
3112 case 4:
3113 value_fmt = "%8.8"PRIx64" ";
3114 break;
3115 case 2:
3116 value_fmt = "%4.4"PRIx64" ";
3117 break;
3118 case 1:
3119 value_fmt = "%2.2"PRIx64" ";
3120 break;
3121 default:
3122 /* "can't happen", caller checked */
3123 LOG_ERROR("invalid memory read size: %u", size);
3124 return;
3125 }
3126
3127 for (unsigned i = 0; i < count; i++) {
3128 if (i % line_modulo == 0) {
3129 output_len += snprintf(output + output_len,
3130 sizeof(output) - output_len,
3131 TARGET_ADDR_FMT ": ",
3132 (address + (i * size)));
3133 }
3134
3135 uint64_t value = 0;
3136 const uint8_t *value_ptr = buffer + i * size;
3137 switch (size) {
3138 case 8:
3139 value = target_buffer_get_u64(target, value_ptr);
3140 break;
3141 case 4:
3142 value = target_buffer_get_u32(target, value_ptr);
3143 break;
3144 case 2:
3145 value = target_buffer_get_u16(target, value_ptr);
3146 break;
3147 case 1:
3148 value = *value_ptr;
3149 }
3150 output_len += snprintf(output + output_len,
3151 sizeof(output) - output_len,
3152 value_fmt, value);
3153
3154 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3155 command_print(cmd_ctx, "%s", output);
3156 output_len = 0;
3157 }
3158 }
3159 }
3160
3161 COMMAND_HANDLER(handle_md_command)
3162 {
3163 if (CMD_ARGC < 1)
3164 return ERROR_COMMAND_SYNTAX_ERROR;
3165
3166 unsigned size = 0;
3167 switch (CMD_NAME[2]) {
3168 case 'd':
3169 size = 8;
3170 break;
3171 case 'w':
3172 size = 4;
3173 break;
3174 case 'h':
3175 size = 2;
3176 break;
3177 case 'b':
3178 size = 1;
3179 break;
3180 default:
3181 return ERROR_COMMAND_SYNTAX_ERROR;
3182 }
3183
3184 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3185 int (*fn)(struct target *target,
3186 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3187 if (physical) {
3188 CMD_ARGC--;
3189 CMD_ARGV++;
3190 fn = target_read_phys_memory;
3191 } else
3192 fn = target_read_memory;
3193 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3194 return ERROR_COMMAND_SYNTAX_ERROR;
3195
3196 target_addr_t address;
3197 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address