1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * ESP Xtensa SMP target API for OpenOCD *
5 * Copyright (C) 2020 Espressif Systems Ltd. Co *
6 ***************************************************************************/
13 #include <target/target.h>
14 #include <target/target_type.h>
15 #include <target/smp.h>
16 #include <target/semihosting_common.h>
17 #include "esp_xtensa_smp.h"
18 #include "esp_xtensa_semihosting.h"
21 Multiprocessor stuff common:
23 The ESP Xtensa chip can have several cores in it, which can run in SMP-mode if an
24 SMP-capable OS is running. The hardware has a few features which makes
25 SMP debugging much easier.
27 First of all, there's something called a 'break network', consisting of a
28 BreakIn input and a BreakOut output on each CPU. The idea is that as soon
29 as a CPU goes into debug mode for whatever reason, it'll signal that using
30 its DebugOut pin. This signal is connected to the other CPU's DebugIn
31 input, causing this CPU also to go into debugging mode. To resume execution
32 when using only this break network, we will need to manually resume both
35 An alternative to this is the XOCDMode output and the RunStall (or DebugStall)
36 input. When these are cross-connected, a CPU that goes into debug mode will
37 halt execution entirely on the other CPU. Execution on the other CPU can be
38 resumed by either the first CPU going out of debug mode, or the second CPU
39 going into debug mode: the stall is temporarily lifted as long as the stalled
42 A third, separate, signal is CrossTrigger. This is connected in the same way
43 as the breakIn/breakOut network, but is for the TRAX (trace memory) feature;
44 it does not affect OCD in any way.
50 The ESP Xtensa chip has several Xtensa cores inside, but represent themself to the OCD
51 as one chip that works in multithreading mode under FreeRTOS OS.
52 The core that initiate the stop condition will be defined as an active cpu.
53 When one core stops, then other core will be stopped automatically by smpbreak.
54 The core that initiates stop condition will be defined as an active core, and
55 registers of this core will be transferred.
58 #define ESP_XTENSA_SMP_EXAMINE_OTHER_CORES 5
60 static int esp_xtensa_smp_update_halt_gdb(struct target
*target
, bool *need_resume
);
62 static inline struct esp_xtensa_smp_common
*target_to_esp_xtensa_smp(struct target
*target
)
64 return container_of(target
->arch_info
, struct esp_xtensa_smp_common
, esp_xtensa
);
67 int esp_xtensa_smp_assert_reset(struct target
*target
)
72 int esp_xtensa_smp_deassert_reset(struct target
*target
)
74 LOG_TARGET_DEBUG(target
, "begin");
76 int ret
= xtensa_deassert_reset(target
);
79 /* in SMP mode when chip was running single-core app the other core can be left un-examined,
80 because examination is done before SOC reset. But after SOC reset it is functional and should be handled.
81 So try to examine un-examined core just after SOC reset */
82 if (target
->smp
&& !target_was_examined(target
))
83 ret
= xtensa_examine(target
);
87 int esp_xtensa_smp_soft_reset_halt(struct target
*target
)
90 struct target_list
*head
;
91 struct esp_xtensa_smp_common
*esp_xtensa_smp
= target_to_esp_xtensa_smp(target
);
93 LOG_TARGET_DEBUG(target
, "begin");
94 /* in SMP mode we need to ensure that at first we reset SOC on PRO-CPU
95 and then call xtensa_assert_reset() for all cores */
96 if (target
->smp
&& target
->coreid
!= 0)
98 /* Reset the SoC first */
99 if (esp_xtensa_smp
->chip_ops
->reset
) {
100 res
= esp_xtensa_smp
->chip_ops
->reset(target
);
105 return xtensa_assert_reset(target
);
107 foreach_smp_target(head
, target
->smp_targets
) {
108 res
= xtensa_assert_reset(head
->target
);
115 int esp_xtensa_smp_on_halt(struct target
*target
)
117 struct target_list
*head
;
120 return esp_xtensa_on_halt(target
);
122 foreach_smp_target(head
, target
->smp_targets
) {
123 int res
= esp_xtensa_on_halt(head
->target
);
130 static struct target
*get_halted_esp_xtensa_smp(struct target
*target
, int32_t coreid
)
132 struct target_list
*head
;
135 foreach_smp_target(head
, target
->smp_targets
) {
137 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
144 int esp_xtensa_smp_poll(struct target
*target
)
146 enum target_state old_state
= target
->state
;
147 struct esp_xtensa_smp_common
*esp_xtensa_smp
= target_to_esp_xtensa_smp(target
);
148 struct esp_xtensa_common
*esp_xtensa
= target_to_esp_xtensa(target
);
149 uint32_t old_dbg_stubs_base
= esp_xtensa
->esp
.dbg_stubs
.base
;
150 struct target_list
*head
;
152 bool other_core_resume_req
= false;
154 if (target
->state
== TARGET_HALTED
&& target
->smp
&& target
->gdb_service
&& !target
->gdb_service
->target
) {
155 target
->gdb_service
->target
= get_halted_esp_xtensa_smp(target
, target
->gdb_service
->core
[1]);
156 LOG_INFO("Switch GDB target to '%s'", target_name(target
->gdb_service
->target
));
157 if (esp_xtensa_smp
->chip_ops
->on_halt
)
158 esp_xtensa_smp
->chip_ops
->on_halt(target
);
159 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
163 int ret
= esp_xtensa_poll(target
);
167 if (esp_xtensa
->esp
.dbg_stubs
.base
&& old_dbg_stubs_base
!= esp_xtensa
->esp
.dbg_stubs
.base
) {
168 /* debug stubs base is set only in PRO-CPU TRAX register, so sync this info */
169 foreach_smp_target(head
, target
->smp_targets
) {
173 target_to_esp_xtensa(curr
)->esp
.dbg_stubs
.base
= esp_xtensa
->esp
.dbg_stubs
.base
;
178 if (target
->state
== TARGET_RESET
) {
179 esp_xtensa_smp
->examine_other_cores
= ESP_XTENSA_SMP_EXAMINE_OTHER_CORES
;
180 } else if (esp_xtensa_smp
->examine_other_cores
> 0 &&
181 (target
->state
== TARGET_RUNNING
|| target
->state
== TARGET_HALTED
)) {
182 LOG_TARGET_DEBUG(target
, "Check for unexamined cores after reset");
183 bool all_examined
= true;
184 foreach_smp_target(head
, target
->smp_targets
) {
188 if (!target_was_examined(curr
)) {
189 if (target_examine_one(curr
) != ERROR_OK
) {
190 LOG_DEBUG("Failed to examine!");
191 all_examined
= false;
196 esp_xtensa_smp
->examine_other_cores
= 0;
198 esp_xtensa_smp
->examine_other_cores
--;
202 if (old_state
!= TARGET_HALTED
&& target
->state
== TARGET_HALTED
) {
204 ret
= esp_xtensa_smp_update_halt_gdb(target
, &other_core_resume_req
);
208 /* Call any event callbacks that are applicable */
209 if (old_state
== TARGET_DEBUG_RUNNING
) {
210 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
212 if (esp_xtensa_semihosting(target
, &ret
) == SEMIHOSTING_HANDLED
) {
213 if (ret
== ERROR_OK
&& esp_xtensa
->semihost
.need_resume
&&
214 !esp_xtensa_smp
->other_core_does_resume
) {
215 esp_xtensa
->semihost
.need_resume
= false;
216 /* Resume xtensa_resume will handle BREAK instruction. */
217 ret
= target_resume(target
, 1, 0, 1, 0);
218 if (ret
!= ERROR_OK
) {
219 LOG_ERROR("Failed to resume target");
225 /* check whether any core polled by esp_xtensa_smp_update_halt_gdb() requested resume */
226 if (target
->smp
&& other_core_resume_req
) {
227 /* Resume xtensa_resume will handle BREAK instruction. */
228 ret
= target_resume(target
, 1, 0, 1, 0);
229 if (ret
!= ERROR_OK
) {
230 LOG_ERROR("Failed to resume target");
235 if (esp_xtensa_smp
->chip_ops
->on_halt
)
236 esp_xtensa_smp
->chip_ops
->on_halt(target
);
237 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
244 static int esp_xtensa_smp_update_halt_gdb(struct target
*target
, bool *need_resume
)
246 struct esp_xtensa_smp_common
*esp_xtensa_smp
;
247 struct target
*gdb_target
= NULL
;
248 struct target_list
*head
;
252 *need_resume
= false;
254 if (target
->gdb_service
&& target
->gdb_service
->target
)
255 LOG_DEBUG("GDB target '%s'", target_name(target
->gdb_service
->target
));
257 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
258 target
->gdb_service
->target
= target
;
259 target
->gdb_service
->core
[0] = target
->coreid
;
260 LOG_INFO("Set GDB target to '%s'", target_name(target
));
263 if (target
->gdb_service
)
264 gdb_target
= target
->gdb_service
->target
;
266 /* due to smpbreak config other cores can also go to HALTED state */
267 foreach_smp_target(head
, target
->smp_targets
) {
269 LOG_DEBUG("Check target '%s'", target_name(curr
));
270 /* skip calling context */
273 if (!target_was_examined(curr
)) {
274 curr
->state
= TARGET_HALTED
;
277 /* skip targets that were already halted */
278 if (curr
->state
== TARGET_HALTED
)
280 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
281 if (curr
== gdb_target
)
283 LOG_DEBUG("Poll target '%s'", target_name(curr
));
285 esp_xtensa_smp
= target_to_esp_xtensa_smp(curr
);
286 /* avoid auto-resume after syscall, it will be done later */
287 esp_xtensa_smp
->other_core_does_resume
= true;
288 /* avoid recursion in esp_xtensa_smp_poll() */
290 if (esp_xtensa_smp
->chip_ops
->poll
)
291 ret
= esp_xtensa_smp
->chip_ops
->poll(curr
);
293 ret
= esp_xtensa_smp_poll(curr
);
297 esp_xtensa_smp
->other_core_does_resume
= false;
298 struct esp_xtensa_common
*curr_esp_xtensa
= target_to_esp_xtensa(curr
);
299 if (curr_esp_xtensa
->semihost
.need_resume
) {
300 curr_esp_xtensa
->semihost
.need_resume
= false;
305 /* after all targets were updated, poll the gdb serving target */
306 if (gdb_target
&& gdb_target
!= target
) {
307 esp_xtensa_smp
= target_to_esp_xtensa_smp(gdb_target
);
308 if (esp_xtensa_smp
->chip_ops
->poll
)
309 ret
= esp_xtensa_smp
->chip_ops
->poll(gdb_target
);
311 ret
= esp_xtensa_smp_poll(gdb_target
);
319 static inline int esp_xtensa_smp_smpbreak_disable(struct target
*target
, uint32_t *smp_break
)
321 int res
= xtensa_smpbreak_get(target
, smp_break
);
324 return xtensa_smpbreak_set(target
, 0);
327 static inline int esp_xtensa_smp_smpbreak_restore(struct target
*target
, uint32_t smp_break
)
329 return xtensa_smpbreak_set(target
, smp_break
);
332 static int esp_xtensa_smp_resume_cores(struct target
*target
,
333 int handle_breakpoints
,
336 struct target_list
*head
;
339 LOG_TARGET_DEBUG(target
, "begin");
341 foreach_smp_target(head
, target
->smp_targets
) {
343 /* in single-core mode disabled core cannot be examined, but need to be resumed too*/
344 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
) && target_was_examined(curr
)) {
345 /* resume current address, not in SMP mode */
347 int res
= esp_xtensa_smp_resume(curr
, 1, 0, handle_breakpoints
, debug_execution
);
356 int esp_xtensa_smp_resume(struct target
*target
,
358 target_addr_t address
,
359 int handle_breakpoints
,
365 xtensa_smpbreak_get(target
, &smp_break
);
366 LOG_TARGET_DEBUG(target
, "smp_break=0x%" PRIx32
, smp_break
);
368 /* dummy resume for smp toggle in order to reduce gdb impact */
369 if ((target
->smp
) && (target
->gdb_service
) && (target
->gdb_service
->core
[1] != -1)) {
370 /* simulate a start and halt of target */
371 target
->gdb_service
->target
= NULL
;
372 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
373 /* fake resume at next poll we play the target core[1], see poll*/
374 LOG_TARGET_DEBUG(target
, "Fake resume");
375 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
379 /* xtensa_prepare_resume() can step over breakpoint/watchpoint and generate signals on BreakInOut circuit for
380 * other cores. So disconnect this core from BreakInOut circuit and do xtensa_prepare_resume(). */
381 res
= esp_xtensa_smp_smpbreak_disable(target
, &smp_break
);
384 res
= xtensa_prepare_resume(target
, current
, address
, handle_breakpoints
, debug_execution
);
385 /* restore configured BreakInOut signals config */
386 int ret
= esp_xtensa_smp_smpbreak_restore(target
, smp_break
);
389 if (res
!= ERROR_OK
) {
390 LOG_TARGET_ERROR(target
, "Failed to prepare for resume!");
395 if (target
->gdb_service
)
396 target
->gdb_service
->core
[0] = -1;
397 res
= esp_xtensa_smp_resume_cores(target
, handle_breakpoints
, debug_execution
);
402 res
= xtensa_do_resume(target
);
403 if (res
!= ERROR_OK
) {
404 LOG_TARGET_ERROR(target
, "Failed to resume!");
408 target
->debug_reason
= DBG_REASON_NOTHALTED
;
409 if (!debug_execution
)
410 target
->state
= TARGET_RUNNING
;
412 target
->state
= TARGET_DEBUG_RUNNING
;
414 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
418 int esp_xtensa_smp_step(struct target
*target
,
420 target_addr_t address
,
421 int handle_breakpoints
)
424 uint32_t smp_break
= 0;
425 struct esp_xtensa_smp_common
*esp_xtensa_smp
= target_to_esp_xtensa_smp(target
);
428 res
= esp_xtensa_smp_smpbreak_disable(target
, &smp_break
);
432 res
= xtensa_step(target
, current
, address
, handle_breakpoints
);
434 if (res
== ERROR_OK
) {
435 if (esp_xtensa_smp
->chip_ops
->on_halt
)
436 esp_xtensa_smp
->chip_ops
->on_halt(target
);
437 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
441 int ret
= esp_xtensa_smp_smpbreak_restore(target
, smp_break
);
449 int esp_xtensa_smp_watchpoint_add(struct target
*target
, struct watchpoint
*watchpoint
)
451 int res
= xtensa_watchpoint_add(target
, watchpoint
);
458 struct target_list
*head
;
459 foreach_smp_target(head
, target
->smp_targets
) {
460 struct target
*curr
= head
->target
;
461 if (curr
== target
|| !target_was_examined(curr
))
463 /* Need to use high level API here because every target for core contains list of watchpoints.
464 * GDB works with active core only, so we need to duplicate every watchpoint on other cores,
465 * otherwise watchpoint_free() on active core can fail if WP has been initially added on another core. */
467 res
= watchpoint_add(curr
, watchpoint
->address
, watchpoint
->length
,
468 watchpoint
->rw
, watchpoint
->value
, watchpoint
->mask
);
476 int esp_xtensa_smp_watchpoint_remove(struct target
*target
, struct watchpoint
*watchpoint
)
478 int res
= xtensa_watchpoint_remove(target
, watchpoint
);
485 struct target_list
*head
;
486 foreach_smp_target(head
, target
->smp_targets
) {
487 struct target
*curr
= head
->target
;
490 /* see big comment in esp_xtensa_smp_watchpoint_add() */
492 watchpoint_remove(curr
, watchpoint
->address
);
498 int esp_xtensa_smp_init_arch_info(struct target
*target
,
499 struct esp_xtensa_smp_common
*esp_xtensa_smp
,
500 struct xtensa_debug_module_config
*dm_cfg
,
501 const struct esp_xtensa_smp_chip_ops
*chip_ops
,
502 const struct esp_semihost_ops
*semihost_ops
)
504 int ret
= esp_xtensa_init_arch_info(target
, &esp_xtensa_smp
->esp_xtensa
, dm_cfg
, semihost_ops
);
507 esp_xtensa_smp
->chip_ops
= chip_ops
;
508 esp_xtensa_smp
->examine_other_cores
= ESP_XTENSA_SMP_EXAMINE_OTHER_CORES
;
512 int esp_xtensa_smp_target_init(struct command_context
*cmd_ctx
, struct target
*target
)
514 int ret
= esp_xtensa_target_init(cmd_ctx
, target
);
519 struct target_list
*head
;
520 foreach_smp_target(head
, target
->smp_targets
) {
521 struct target
*curr
= head
->target
;
522 ret
= esp_xtensa_semihosting_init(curr
);
527 ret
= esp_xtensa_semihosting_init(target
);
534 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtdef
)
536 struct target
*target
= get_current_target(CMD_CTX
);
537 if (target
->smp
&& CMD_ARGC
> 0) {
538 struct target_list
*head
;
540 foreach_smp_target(head
, target
->smp_targets
) {
542 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do
,
543 target_to_xtensa(curr
));
549 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do
,
550 target_to_xtensa(target
));
553 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtopt
)
555 struct target
*target
= get_current_target(CMD_CTX
);
556 if (target
->smp
&& CMD_ARGC
> 0) {
557 struct target_list
*head
;
559 foreach_smp_target(head
, target
->smp_targets
) {
561 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do
,
562 target_to_xtensa(curr
));
568 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do
,
569 target_to_xtensa(target
));
572 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtmem
)
574 struct target
*target
= get_current_target(CMD_CTX
);
575 if (target
->smp
&& CMD_ARGC
> 0) {
576 struct target_list
*head
;
578 foreach_smp_target(head
, target
->smp_targets
) {
580 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do
,
581 target_to_xtensa(curr
));
587 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do
,
588 target_to_xtensa(target
));
591 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtmpu
)
593 struct target
*target
= get_current_target(CMD_CTX
);
594 if (target
->smp
&& CMD_ARGC
> 0) {
595 struct target_list
*head
;
597 foreach_smp_target(head
, target
->smp_targets
) {
599 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do
,
600 target_to_xtensa(curr
));
606 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do
,
607 target_to_xtensa(target
));
610 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtmmu
)
612 struct target
*target
= get_current_target(CMD_CTX
);
613 if (target
->smp
&& CMD_ARGC
> 0) {
614 struct target_list
*head
;
616 foreach_smp_target(head
, target
->smp_targets
) {
618 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do
,
619 target_to_xtensa(curr
));
625 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do
,
626 target_to_xtensa(target
));
629 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtreg
)
631 struct target
*target
= get_current_target(CMD_CTX
);
632 if (target
->smp
&& CMD_ARGC
> 0) {
633 struct target_list
*head
;
635 foreach_smp_target(head
, target
->smp_targets
) {
637 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do
,
638 target_to_xtensa(curr
));
644 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do
,
645 target_to_xtensa(target
));
648 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtregfmt
)
650 struct target
*target
= get_current_target(CMD_CTX
);
651 if (target
->smp
&& CMD_ARGC
> 0) {
652 struct target_list
*head
;
654 foreach_smp_target(head
, target
->smp_targets
) {
656 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do
,
657 target_to_xtensa(curr
));
663 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do
,
664 target_to_xtensa(target
));
667 COMMAND_HANDLER(esp_xtensa_smp_cmd_permissive_mode
)
669 struct target
*target
= get_current_target(CMD_CTX
);
670 if (target
->smp
&& CMD_ARGC
> 0) {
671 struct target_list
*head
;
673 foreach_smp_target(head
, target
->smp_targets
) {
675 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do
,
676 target_to_xtensa(curr
));
682 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do
,
683 target_to_xtensa(target
));
686 COMMAND_HANDLER(esp_xtensa_smp_cmd_smpbreak
)
688 struct target
*target
= get_current_target(CMD_CTX
);
689 if (target
->smp
&& CMD_ARGC
> 0) {
690 struct target_list
*head
;
692 foreach_smp_target(head
, target
->smp_targets
) {
694 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do
, curr
);
700 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do
, target
);
703 COMMAND_HANDLER(esp_xtensa_smp_cmd_mask_interrupts
)
705 struct target
*target
= get_current_target(CMD_CTX
);
706 if (target
->smp
&& CMD_ARGC
> 0) {
707 struct target_list
*head
;
709 foreach_smp_target(head
, target
->smp_targets
) {
711 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do
,
712 target_to_xtensa(curr
));
718 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do
,
719 target_to_xtensa(target
));
722 COMMAND_HANDLER(esp_xtensa_smp_cmd_perfmon_enable
)
724 struct target
*target
= get_current_target(CMD_CTX
);
725 if (target
->smp
&& CMD_ARGC
> 0) {
726 struct target_list
*head
;
728 foreach_smp_target(head
, target
->smp_targets
) {
730 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do
,
731 target_to_xtensa(curr
));
737 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do
,
738 target_to_xtensa(target
));
741 COMMAND_HANDLER(esp_xtensa_smp_cmd_perfmon_dump
)
743 struct target
*target
= get_current_target(CMD_CTX
);
745 struct target_list
*head
;
747 foreach_smp_target(head
, target
->smp_targets
) {
749 LOG_TARGET_INFO(curr
, ":");
750 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do
,
751 target_to_xtensa(curr
));
757 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do
,
758 target_to_xtensa(target
));
761 COMMAND_HANDLER(esp_xtensa_smp_cmd_tracestart
)
763 struct target
*target
= get_current_target(CMD_CTX
);
765 struct target_list
*head
;
767 foreach_smp_target(head
, target
->smp_targets
) {
769 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do
,
770 target_to_xtensa(curr
));
776 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do
,
777 target_to_xtensa(target
));
780 COMMAND_HANDLER(esp_xtensa_smp_cmd_tracestop
)
782 struct target
*target
= get_current_target(CMD_CTX
);
784 struct target_list
*head
;
786 foreach_smp_target(head
, target
->smp_targets
) {
788 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do
,
789 target_to_xtensa(curr
));
795 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do
,
796 target_to_xtensa(target
));
799 COMMAND_HANDLER(esp_xtensa_smp_cmd_tracedump
)
801 struct target
*target
= get_current_target(CMD_CTX
);
803 struct target_list
*head
;
805 int32_t cores_max_id
= 0;
806 /* assume that core IDs are assigned to SMP targets sequentially: 0,1,2... */
807 foreach_smp_target(head
, target
->smp_targets
) {
809 if (cores_max_id
< curr
->coreid
)
810 cores_max_id
= curr
->coreid
;
812 if (CMD_ARGC
< ((uint32_t)cores_max_id
+ 1)) {
814 "Need %d filenames to dump to as output!",
818 foreach_smp_target(head
, target
->smp_targets
) {
820 int ret
= CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do
,
821 target_to_xtensa(curr
), CMD_ARGV
[curr
->coreid
]);
827 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do
,
828 target_to_xtensa(target
), CMD_ARGV
[0]);
831 const struct command_registration esp_xtensa_smp_xtensa_command_handlers
[] = {
834 .handler
= esp_xtensa_smp_cmd_xtdef
,
835 .mode
= COMMAND_CONFIG
,
836 .help
= "Configure Xtensa core type",
841 .handler
= esp_xtensa_smp_cmd_xtopt
,
842 .mode
= COMMAND_CONFIG
,
843 .help
= "Configure Xtensa core option",
844 .usage
= "<name> <value>",
848 .handler
= esp_xtensa_smp_cmd_xtmem
,
849 .mode
= COMMAND_CONFIG
,
850 .help
= "Configure Xtensa memory/cache option",
851 .usage
= "<type> [parameters]",
855 .handler
= esp_xtensa_smp_cmd_xtmmu
,
856 .mode
= COMMAND_CONFIG
,
857 .help
= "Configure Xtensa MMU option",
858 .usage
= "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
862 .handler
= esp_xtensa_smp_cmd_xtmpu
,
863 .mode
= COMMAND_CONFIG
,
864 .help
= "Configure Xtensa MPU option",
865 .usage
= "<num FG seg> <min seg size> <lockable> <executeonly>",
869 .handler
= esp_xtensa_smp_cmd_xtreg
,
870 .mode
= COMMAND_CONFIG
,
871 .help
= "Configure Xtensa register",
872 .usage
= "<regname> <regnum>",
876 .handler
= esp_xtensa_smp_cmd_xtreg
,
877 .mode
= COMMAND_CONFIG
,
878 .help
= "Configure number of Xtensa registers",
879 .usage
= "<numregs>",
883 .handler
= esp_xtensa_smp_cmd_xtregfmt
,
884 .mode
= COMMAND_CONFIG
,
885 .help
= "Configure format of Xtensa register map",
886 .usage
= "<numgregs>",
889 .name
= "set_permissive",
890 .handler
= esp_xtensa_smp_cmd_permissive_mode
,
892 .help
= "When set to 1, enable Xtensa permissive mode (less client-side checks)",
897 .handler
= esp_xtensa_smp_cmd_mask_interrupts
,
899 .help
= "mask Xtensa interrupts at step",
900 .usage
= "['on'|'off']",
904 .handler
= esp_xtensa_smp_cmd_smpbreak
,
906 .help
= "Set the way the CPU chains OCD breaks",
908 "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
911 .name
= "perfmon_enable",
912 .handler
= esp_xtensa_smp_cmd_perfmon_enable
,
913 .mode
= COMMAND_EXEC
,
914 .help
= "Enable and start performance counter",
915 .usage
= "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
918 .name
= "perfmon_dump",
919 .handler
= esp_xtensa_smp_cmd_perfmon_dump
,
920 .mode
= COMMAND_EXEC
,
922 "Dump performance counter value. If no argument specified, dumps all counters.",
923 .usage
= "[counter_id]",
926 .name
= "tracestart",
927 .handler
= esp_xtensa_smp_cmd_tracestart
,
928 .mode
= COMMAND_EXEC
,
930 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
931 .usage
= "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
935 .handler
= esp_xtensa_smp_cmd_tracestop
,
936 .mode
= COMMAND_EXEC
,
937 .help
= "Tracing: Stop current trace as started by the tracestart command",
942 .handler
= esp_xtensa_smp_cmd_tracedump
,
943 .mode
= COMMAND_EXEC
,
944 .help
= "Tracing: Dump trace memory to a files. One file per core.",
945 .usage
= "<outfile1> <outfile2>",
947 COMMAND_REGISTRATION_DONE
950 const struct command_registration esp_xtensa_smp_command_handlers
[] = {
954 .chain
= esp_xtensa_smp_xtensa_command_handlers
,
956 COMMAND_REGISTRATION_DONE
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)