1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
20 #include "xtensa_chip.h"
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25 ((((V) & 0x0F) << 4) \
26 | (((V) & 0xF0) >> 4))
28 #define XT_NIBSWAP16(V) \
29 ((((V) & 0x000F) << 12) \
30 | (((V) & 0x00F0) << 4) \
31 | (((V) & 0x0F00) >> 4) \
32 | (((V) & 0xF000) >> 12))
34 #define XT_NIBSWAP24(V) \
35 ((((V) & 0x00000F) << 20) \
36 | (((V) & 0x0000F0) << 12) \
37 | (((V) & 0x000F00) << 4) \
38 | (((V) & 0x00F000) >> 4) \
39 | (((V) & 0x0F0000) >> 12) \
40 | (((V) & 0xF00000) >> 20))
43 * Instruction formatting converted from little-endian inputs
44 * and shifted to the MSB-side of DIR for BE systems.
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48 | (((T) & 0x0F) << 16) \
49 | (((SR) & 0xFF) << 8)) << 8 \
51 | (((SR) & 0xFF) << 8) \
52 | (((T) & 0x0F) << 4))
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57 | (((R) & 0x0F) << 8)) << 8 \
59 | (((ST) & 0xFF) << 4) \
60 | (((R) & 0x0F) << 12))
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64 | (((T) & 0x0F) << 8) \
65 | (((S) & 0x0F) << 4) \
66 | ((IMM4) & 0x0F)) << 16 \
68 | (((T) & 0x0F) << 4) \
69 | (((S) & 0x0F) << 8) \
70 | (((IMM4) & 0x0F) << 12))
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74 | (((T) & 0x0F) << 16) \
75 | (((S) & 0x0F) << 12) \
76 | (((R) & 0x0F) << 8) \
77 | ((IMM8) & 0xFF)) << 8 \
79 | (((IMM8) & 0xFF) << 16) \
80 | (((R) & 0x0F) << 12) \
81 | (((S) & 0x0F) << 8) \
82 | (((T) & 0x0F) << 4))
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86 | (((T) & 0x0F) << 16) \
87 | (((S) & 0x0F) << 12) \
88 | (((R) & 0x0F) << 8)) << 8 \
91 | (((IMM4) & 0x0F) << 20) \
92 | (((R) & 0x0F) << 12) \
93 | (((S) & 0x0F) << 8) \
94 | (((T) & 0x0F) << 4))
96 /* Xtensa processor instruction opcodes
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
160 #define XT_WATCHPOINTS_NUM_MAX 2
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163 * These get used a lot so making a shortcut is useful.
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
169 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
171 #define XT_PS_REG_NUM (0xe6U)
172 #define XT_EPS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
173 #define XT_EPC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
174 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
175 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
177 #define XT_SW_BREAKPOINTS_MAX_NUM 32
178 #define XT_HW_IBREAK_MAX_NUM 2
179 #define XT_HW_DBREAK_MAX_NUM 2
181 struct xtensa_reg_desc xtensa_regs
[XT_NUM_REGS
] = {
182 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL
, XT_REG_SPECIAL
, 0),
183 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL
, 0),
184 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL
, 0),
185 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL
, 0),
186 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL
, 0),
187 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL
, 0),
188 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL
, 0),
189 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL
, 0),
190 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL
, 0),
191 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL
, 0),
192 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL
, 0),
193 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL
, 0),
194 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL
, 0),
195 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL
, 0),
196 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL
, 0),
197 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL
, 0),
198 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL
, 0),
199 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL
, 0),
200 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL
, 0),
201 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL
, 0),
202 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL
, 0),
203 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL
, 0),
204 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL
, 0),
205 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL
, 0),
206 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL
, 0),
207 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL
, 0),
208 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL
, 0),
209 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL
, 0),
210 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL
, 0),
211 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL
, 0),
212 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL
, 0),
213 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL
, 0),
214 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL
, 0),
215 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL
, 0),
216 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL
, 0),
217 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL
, 0),
218 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL
, 0),
219 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL
, 0),
220 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL
, 0),
221 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL
, 0),
222 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL
, 0),
223 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL
, 0),
224 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL
, 0),
225 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL
, 0),
226 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL
, 0),
227 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL
, 0),
228 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL
, 0),
229 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL
, 0),
230 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL
, 0),
231 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL
, 0),
232 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL
, 0),
233 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL
, 0),
234 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL
, 0),
235 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL
, 0),
236 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL
, 0),
237 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL
, 0),
238 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL
, 0),
239 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL
, 0),
240 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL
, 0),
241 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL
, 0),
242 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL
, 0),
243 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL
, 0),
244 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL
, 0),
245 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL
, 0),
246 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL
, 0),
247 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL
, 0),
248 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL
, 0),
249 XT_MK_REG_DESC("ps", XT_PS_REG_NUM
, XT_REG_SPECIAL
, 0), /* PS (not mapped through EPS[]) */
250 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL
, 0),
251 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG
, XT_REGF_NOREAD
),
252 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL
, 0),
253 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL
, 0),
254 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL
, 0),
255 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL
, 0),
256 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL
, 0),
257 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL
, 0),
258 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL
, 0),
259 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL
, 0),
260 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL
, 0),
261 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL
, 0),
262 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL
, 0),
264 /* WARNING: For these registers, regnum points to the
265 * index of the corresponding ARx registers, NOT to
266 * the processor register number! */
267 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0
, XT_REG_RELGEN
, 0),
268 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1
, XT_REG_RELGEN
, 0),
269 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2
, XT_REG_RELGEN
, 0),
270 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3
, XT_REG_RELGEN
, 0),
271 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4
, XT_REG_RELGEN
, 0),
272 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5
, XT_REG_RELGEN
, 0),
273 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6
, XT_REG_RELGEN
, 0),
274 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7
, XT_REG_RELGEN
, 0),
275 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8
, XT_REG_RELGEN
, 0),
276 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9
, XT_REG_RELGEN
, 0),
277 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10
, XT_REG_RELGEN
, 0),
278 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11
, XT_REG_RELGEN
, 0),
279 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12
, XT_REG_RELGEN
, 0),
280 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13
, XT_REG_RELGEN
, 0),
281 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14
, XT_REG_RELGEN
, 0),
282 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15
, XT_REG_RELGEN
, 0),
286 * Types of memory used at xtensa target
288 enum xtensa_mem_region_type
{
289 XTENSA_MEM_REG_IROM
= 0x0,
298 /* Register definition as union for list allocation */
299 union xtensa_reg_val_u
{
300 xtensa_reg_val_t val
;
304 static const struct xtensa_keyval_info_s xt_qerr
[XT_QERR_NUM
] = {
305 { .chrval
= "E00", .intval
= ERROR_FAIL
},
306 { .chrval
= "E01", .intval
= ERROR_FAIL
},
307 { .chrval
= "E02", .intval
= ERROR_COMMAND_ARGUMENT_INVALID
},
308 { .chrval
= "E03", .intval
= ERROR_FAIL
},
311 /* Set to true for extra debug logging */
312 static const bool xtensa_extra_debug_log
;
315 * Gets a config for the specific mem type
317 static inline const struct xtensa_local_mem_config
*xtensa_get_mem_config(
318 struct xtensa
*xtensa
,
319 enum xtensa_mem_region_type type
)
322 case XTENSA_MEM_REG_IROM
:
323 return &xtensa
->core_config
->irom
;
324 case XTENSA_MEM_REG_IRAM
:
325 return &xtensa
->core_config
->iram
;
326 case XTENSA_MEM_REG_DROM
:
327 return &xtensa
->core_config
->drom
;
328 case XTENSA_MEM_REG_DRAM
:
329 return &xtensa
->core_config
->dram
;
330 case XTENSA_MEM_REG_SRAM
:
331 return &xtensa
->core_config
->sram
;
332 case XTENSA_MEM_REG_SROM
:
333 return &xtensa
->core_config
->srom
;
340 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
341 * for a given address
342 * Returns NULL if nothing found
344 static inline const struct xtensa_local_mem_region_config
*xtensa_memory_region_find(
345 const struct xtensa_local_mem_config
*mem
,
346 target_addr_t address
)
348 for (unsigned int i
= 0; i
< mem
->count
; i
++) {
349 const struct xtensa_local_mem_region_config
*region
= &mem
->regions
[i
];
350 if (address
>= region
->base
&& address
< (region
->base
+ region
->size
))
357 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
358 * for a given address
359 * Returns NULL if nothing found
361 static inline const struct xtensa_local_mem_region_config
*xtensa_target_memory_region_find(
362 struct xtensa
*xtensa
,
363 target_addr_t address
)
365 const struct xtensa_local_mem_region_config
*result
;
366 const struct xtensa_local_mem_config
*mcgf
;
367 for (unsigned int mtype
= 0; mtype
< XTENSA_MEM_REGS_NUM
; mtype
++) {
368 mcgf
= xtensa_get_mem_config(xtensa
, mtype
);
369 result
= xtensa_memory_region_find(mcgf
, address
);
376 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config
*cache
,
377 const struct xtensa_local_mem_config
*mem
,
378 target_addr_t address
)
382 return xtensa_memory_region_find(mem
, address
);
385 static inline bool xtensa_is_icacheable(struct xtensa
*xtensa
, target_addr_t address
)
387 return xtensa_is_cacheable(&xtensa
->core_config
->icache
, &xtensa
->core_config
->iram
, address
) ||
388 xtensa_is_cacheable(&xtensa
->core_config
->icache
, &xtensa
->core_config
->irom
, address
) ||
389 xtensa_is_cacheable(&xtensa
->core_config
->icache
, &xtensa
->core_config
->sram
, address
) ||
390 xtensa_is_cacheable(&xtensa
->core_config
->icache
, &xtensa
->core_config
->srom
, address
);
393 static inline bool xtensa_is_dcacheable(struct xtensa
*xtensa
, target_addr_t address
)
395 return xtensa_is_cacheable(&xtensa
->core_config
->dcache
, &xtensa
->core_config
->dram
, address
) ||
396 xtensa_is_cacheable(&xtensa
->core_config
->dcache
, &xtensa
->core_config
->drom
, address
) ||
397 xtensa_is_cacheable(&xtensa
->core_config
->dcache
, &xtensa
->core_config
->sram
, address
) ||
398 xtensa_is_cacheable(&xtensa
->core_config
->dcache
, &xtensa
->core_config
->srom
, address
);
401 static int xtensa_core_reg_get(struct reg
*reg
)
403 /* We don't need this because we read all registers on halt anyway. */
404 struct xtensa
*xtensa
= (struct xtensa
*)reg
->arch_info
;
405 struct target
*target
= xtensa
->target
;
407 if (target
->state
!= TARGET_HALTED
)
408 return ERROR_TARGET_NOT_HALTED
;
410 if (strncmp(reg
->name
, "?0x", 3) == 0) {
411 unsigned int regnum
= strtoul(reg
->name
+ 1, 0, 0);
412 LOG_WARNING("Read unknown register 0x%04x ignored", regnum
);
415 return ERROR_COMMAND_ARGUMENT_INVALID
;
420 static int xtensa_core_reg_set(struct reg
*reg
, uint8_t *buf
)
422 struct xtensa
*xtensa
= (struct xtensa
*)reg
->arch_info
;
423 struct target
*target
= xtensa
->target
;
425 assert(reg
->size
<= 64 && "up to 64-bit regs are supported only!");
426 if (target
->state
!= TARGET_HALTED
)
427 return ERROR_TARGET_NOT_HALTED
;
430 if (strncmp(reg
->name
, "?0x", 3) == 0) {
431 unsigned int regnum
= strtoul(reg
->name
+ 1, 0, 0);
432 LOG_WARNING("Write unknown register 0x%04x ignored", regnum
);
435 return ERROR_COMMAND_ARGUMENT_INVALID
;
438 buf_cpy(buf
, reg
->value
, reg
->size
);
440 if (xtensa
->core_config
->windowed
) {
441 /* If the user updates a potential scratch register, track for conflicts */
442 for (enum xtensa_ar_scratch_set_e s
= 0; s
< XT_AR_SCRATCH_NUM
; s
++) {
443 if (strcmp(reg
->name
, xtensa
->scratch_ars
[s
].chrval
) == 0) {
444 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32
"] set from gdb", reg
->name
,
445 buf_get_u32(reg
->value
, 0, 32));
446 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
447 xtensa
->scratch_ars
[XT_AR_SCRATCH_AR3
].chrval
,
448 xtensa
->scratch_ars
[XT_AR_SCRATCH_AR4
].chrval
);
449 xtensa
->scratch_ars
[s
].intval
= true;
460 static const struct reg_arch_type xtensa_reg_type
= {
461 .get
= xtensa_core_reg_get
,
462 .set
= xtensa_core_reg_set
,
465 /* Convert a register index that's indexed relative to windowbase, to the real address. */
466 static enum xtensa_reg_id
xtensa_windowbase_offset_to_canonical(struct xtensa
*xtensa
,
467 enum xtensa_reg_id reg_idx
,
471 if (reg_idx
>= XT_REG_IDX_AR0
&& reg_idx
<= XT_REG_IDX_ARLAST
) {
472 idx
= reg_idx
- XT_REG_IDX_AR0
;
473 } else if (reg_idx
>= XT_REG_IDX_A0
&& reg_idx
<= XT_REG_IDX_A15
) {
474 idx
= reg_idx
- XT_REG_IDX_A0
;
476 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx
);
479 return ((idx
+ windowbase
* 4) & (xtensa
->core_config
->aregs_num
- 1)) + XT_REG_IDX_AR0
;
482 static enum xtensa_reg_id
xtensa_canonical_to_windowbase_offset(struct xtensa
*xtensa
,
483 enum xtensa_reg_id reg_idx
,
486 return xtensa_windowbase_offset_to_canonical(xtensa
, reg_idx
, -windowbase
);
489 static void xtensa_mark_register_dirty(struct xtensa
*xtensa
, enum xtensa_reg_id reg_idx
)
491 struct reg
*reg_list
= xtensa
->core_cache
->reg_list
;
492 reg_list
[reg_idx
].dirty
= true;
495 static void xtensa_queue_exec_ins(struct xtensa
*xtensa
, uint32_t ins
)
497 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DIR0EXEC
, ins
);
500 static void xtensa_queue_exec_ins_wide(struct xtensa
*xtensa
, uint8_t *ops
, uint8_t oplen
)
502 const int max_oplen
= 64; /* 8 DIRx regs: max width 64B */
503 if ((oplen
> 0) && (oplen
<= max_oplen
)) {
504 uint8_t ops_padded
[max_oplen
];
505 memcpy(ops_padded
, ops
, oplen
);
506 memset(ops_padded
+ oplen
, 0, max_oplen
- oplen
);
507 unsigned int oplenw
= DIV_ROUND_UP(oplen
, sizeof(uint32_t));
508 for (int32_t i
= oplenw
- 1; i
> 0; i
--)
509 xtensa_queue_dbg_reg_write(xtensa
,
511 target_buffer_get_u32(xtensa
->target
, &ops_padded
[sizeof(uint32_t)*i
]));
512 /* Write DIR0EXEC last */
513 xtensa_queue_dbg_reg_write(xtensa
,
515 target_buffer_get_u32(xtensa
->target
, &ops_padded
[0]));
519 static int xtensa_queue_pwr_reg_write(struct xtensa
*xtensa
, unsigned int reg
, uint32_t data
)
521 struct xtensa_debug_module
*dm
= &xtensa
->dbg_mod
;
522 return dm
->pwr_ops
->queue_reg_write(dm
, reg
, data
);
525 /* NOTE: Assumes A3 has already been saved */
526 static int xtensa_window_state_save(struct target
*target
, uint32_t *woe
)
528 struct xtensa
*xtensa
= target_to_xtensa(target
);
532 if (xtensa
->core_config
->windowed
) {
533 /* Save PS (LX) and disable window overflow exceptions prior to AR save */
534 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_PS
, XT_REG_A3
));
535 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
536 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, woe_buf
);
537 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
538 if (res
!= ERROR_OK
) {
539 LOG_ERROR("Failed to read PS (%d)!", res
);
542 xtensa_core_status_check(target
);
543 *woe
= buf_get_u32(woe_buf
, 0, 32);
544 woe_dis
= *woe
& ~XT_PS_WOE_MSK
;
545 LOG_DEBUG("Clearing PS.WOE (0x%08" PRIx32
" -> 0x%08" PRIx32
")", *woe
, woe_dis
);
546 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, woe_dis
);
547 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
548 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_PS
, XT_REG_A3
));
553 /* NOTE: Assumes A3 has already been saved */
554 static void xtensa_window_state_restore(struct target
*target
, uint32_t woe
)
556 struct xtensa
*xtensa
= target_to_xtensa(target
);
557 if (xtensa
->core_config
->windowed
) {
558 /* Restore window overflow exception state */
559 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, woe
);
560 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
561 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_PS
, XT_REG_A3
));
562 LOG_DEBUG("Restored PS.WOE (0x%08" PRIx32
")", woe
);
566 static bool xtensa_reg_is_readable(int flags
, int cpenable
)
568 if (flags
& XT_REGF_NOREAD
)
570 if ((flags
& XT_REGF_COPROC0
) && (cpenable
& BIT(0)) == 0)
575 static bool xtensa_scratch_regs_fixup(struct xtensa
*xtensa
, struct reg
*reg_list
, int i
, int j
, int a_idx
, int ar_idx
)
577 int a_name
= (a_idx
== XT_AR_SCRATCH_A3
) ? 3 : 4;
578 if (xtensa
->scratch_ars
[a_idx
].intval
&& !xtensa
->scratch_ars
[ar_idx
].intval
) {
579 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name
, j
- XT_REG_IDX_AR0
);
580 memcpy(reg_list
[j
].value
, reg_list
[i
].value
, sizeof(xtensa_reg_val_t
));
582 LOG_DEBUG("AR conflict: ar%d -> a%d", j
- XT_REG_IDX_AR0
, a_name
);
583 memcpy(reg_list
[i
].value
, reg_list
[j
].value
, sizeof(xtensa_reg_val_t
));
585 return xtensa
->scratch_ars
[a_idx
].intval
&& xtensa
->scratch_ars
[ar_idx
].intval
;
588 static int xtensa_write_dirty_registers(struct target
*target
)
590 struct xtensa
*xtensa
= target_to_xtensa(target
);
592 xtensa_reg_val_t regval
, windowbase
= 0;
593 bool scratch_reg_dirty
= false, delay_cpenable
= false;
594 struct reg
*reg_list
= xtensa
->core_cache
->reg_list
;
595 unsigned int reg_list_size
= xtensa
->core_cache
->num_regs
;
596 bool preserve_a3
= false;
598 xtensa_reg_val_t a3
= 0, woe
;
600 LOG_TARGET_DEBUG(target
, "start");
602 /* We need to write the dirty registers in the cache list back to the processor.
603 * Start by writing the SFR/user registers. */
604 for (unsigned int i
= 0; i
< reg_list_size
; i
++) {
605 struct xtensa_reg_desc
*rlist
= (i
< XT_NUM_REGS
) ? xtensa_regs
: xtensa
->optregs
;
606 unsigned int ridx
= (i
< XT_NUM_REGS
) ? i
: i
- XT_NUM_REGS
;
607 if (reg_list
[i
].dirty
) {
608 if (rlist
[ridx
].type
== XT_REG_SPECIAL
||
609 rlist
[ridx
].type
== XT_REG_USER
||
610 rlist
[ridx
].type
== XT_REG_FR
) {
611 scratch_reg_dirty
= true;
612 if (i
== XT_REG_IDX_CPENABLE
) {
613 delay_cpenable
= true;
616 regval
= xtensa_reg_get(target
, i
);
617 LOG_TARGET_DEBUG(target
, "Writing back reg %s (%d) val %08" PRIX32
,
621 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, regval
);
622 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
623 if (reg_list
[i
].exist
) {
624 unsigned int reg_num
= rlist
[ridx
].reg_num
;
625 if (rlist
[ridx
].type
== XT_REG_USER
) {
626 xtensa_queue_exec_ins(xtensa
, XT_INS_WUR(xtensa
, reg_num
, XT_REG_A3
));
627 } else if (rlist
[ridx
].type
== XT_REG_FR
) {
628 xtensa_queue_exec_ins(xtensa
, XT_INS_WFR(xtensa
, reg_num
, XT_REG_A3
));
630 if (reg_num
== XT_PC_REG_NUM_VIRTUAL
)
631 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL
634 (XT_EPC_REG_NUM_BASE
+
635 xtensa
->core_config
->debug
.irq_level
);
636 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, reg_num
, XT_REG_A3
));
639 reg_list
[i
].dirty
= false;
643 if (scratch_reg_dirty
)
644 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A3
);
645 if (delay_cpenable
) {
646 regval
= xtensa_reg_get(target
, XT_REG_IDX_CPENABLE
);
647 LOG_TARGET_DEBUG(target
, "Writing back reg cpenable (224) val %08" PRIX32
, regval
);
648 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, regval
);
649 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
650 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
,
651 xtensa_regs
[XT_REG_IDX_CPENABLE
].reg_num
,
653 reg_list
[XT_REG_IDX_CPENABLE
].dirty
= false;
656 preserve_a3
= (xtensa
->core_config
->windowed
);
658 /* Save (windowed) A3 for scratch use */
659 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
660 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, a3_buf
);
661 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
664 xtensa_core_status_check(target
);
665 a3
= buf_get_u32(a3_buf
, 0, 32);
668 if (xtensa
->core_config
->windowed
) {
669 res
= xtensa_window_state_save(target
, &woe
);
672 /* Grab the windowbase, we need it. */
673 windowbase
= xtensa_reg_get(target
, XT_REG_IDX_WINDOWBASE
);
674 /* Check if there are mismatches between the ARx and corresponding Ax registers.
675 * When the user sets a register on a windowed config, xt-gdb may set the ARx
676 * register directly. Thus we take ARx as priority over Ax if both are dirty
677 * and it's unclear if the user set one over the other explicitly.
679 for (unsigned int i
= XT_REG_IDX_A0
; i
<= XT_REG_IDX_A15
; i
++) {
680 unsigned int j
= xtensa_windowbase_offset_to_canonical(xtensa
, i
, windowbase
);
681 if (reg_list
[i
].dirty
&& reg_list
[j
].dirty
) {
682 if (memcmp(reg_list
[i
].value
, reg_list
[j
].value
, sizeof(xtensa_reg_val_t
)) != 0) {
683 bool show_warning
= true;
684 if (i
== XT_REG_IDX_A3
)
685 show_warning
= xtensa_scratch_regs_fixup(xtensa
,
686 reg_list
, i
, j
, XT_AR_SCRATCH_A3
, XT_AR_SCRATCH_AR3
);
687 else if (i
== XT_REG_IDX_A4
)
688 show_warning
= xtensa_scratch_regs_fixup(xtensa
,
689 reg_list
, i
, j
, XT_AR_SCRATCH_A4
, XT_AR_SCRATCH_AR4
);
692 "Warning: Both A%d [0x%08" PRIx32
693 "] as well as its underlying physical register "
694 "(AR%d) [0x%08" PRIx32
"] are dirty and differ in value",
696 buf_get_u32(reg_list
[i
].value
, 0, 32),
698 buf_get_u32(reg_list
[j
].value
, 0, 32));
705 for (unsigned int i
= 0; i
< 16; i
++) {
706 if (reg_list
[XT_REG_IDX_A0
+ i
].dirty
) {
707 regval
= xtensa_reg_get(target
, XT_REG_IDX_A0
+ i
);
708 LOG_TARGET_DEBUG(target
, "Writing back reg %s value %08" PRIX32
", num =%i",
709 xtensa_regs
[XT_REG_IDX_A0
+ i
].name
,
711 xtensa_regs
[XT_REG_IDX_A0
+ i
].reg_num
);
712 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, regval
);
713 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, i
));
714 reg_list
[XT_REG_IDX_A0
+ i
].dirty
= false;
716 /* Avoid stomping A3 during restore at end of function */
722 if (xtensa
->core_config
->windowed
) {
723 /* Now write AR registers */
724 for (unsigned int j
= 0; j
< XT_REG_IDX_ARLAST
; j
+= 16) {
725 /* Write the 16 registers we can see */
726 for (unsigned int i
= 0; i
< 16; i
++) {
727 if (i
+ j
< xtensa
->core_config
->aregs_num
) {
728 enum xtensa_reg_id realadr
=
729 xtensa_windowbase_offset_to_canonical(xtensa
, XT_REG_IDX_AR0
+ i
+ j
,
731 /* Write back any dirty un-windowed registers */
732 if (reg_list
[realadr
].dirty
) {
733 regval
= xtensa_reg_get(target
, realadr
);
736 "Writing back reg %s value %08" PRIX32
", num =%i",
737 xtensa_regs
[realadr
].name
,
739 xtensa_regs
[realadr
].reg_num
);
740 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, regval
);
741 xtensa_queue_exec_ins(xtensa
,
742 XT_INS_RSR(xtensa
, XT_SR_DDR
,
743 xtensa_regs
[XT_REG_IDX_AR0
+ i
].reg_num
));
744 reg_list
[realadr
].dirty
= false;
746 /* Avoid stomping AR during A3 restore at end of function */
751 /*Now rotate the window so we'll see the next 16 registers. The final rotate
752 * will wraparound, */
753 /*leaving us in the state we were. */
754 xtensa_queue_exec_ins(xtensa
, XT_INS_ROTW(xtensa
, 4));
757 xtensa_window_state_restore(target
, woe
);
759 for (enum xtensa_ar_scratch_set_e s
= 0; s
< XT_AR_SCRATCH_NUM
; s
++)
760 xtensa
->scratch_ars
[s
].intval
= false;
764 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, a3
);
765 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
768 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
769 xtensa_core_status_check(target
);
774 static inline bool xtensa_is_stopped(struct target
*target
)
776 struct xtensa
*xtensa
= target_to_xtensa(target
);
777 return xtensa
->dbg_mod
.core_status
.dsr
& OCDDSR_STOPPED
;
780 int xtensa_examine(struct target
*target
)
782 struct xtensa
*xtensa
= target_to_xtensa(target
);
783 unsigned int cmd
= PWRCTL_DEBUGWAKEUP(xtensa
) | PWRCTL_MEMWAKEUP(xtensa
) | PWRCTL_COREWAKEUP(xtensa
);
785 LOG_DEBUG("coreid = %d", target
->coreid
);
787 if (xtensa
->core_config
->core_type
== XT_UNDEF
) {
788 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
792 xtensa_queue_pwr_reg_write(xtensa
, XDMREG_PWRCTL
, cmd
);
793 xtensa_queue_pwr_reg_write(xtensa
, XDMREG_PWRCTL
, cmd
| PWRCTL_JTAGDEBUGUSE(xtensa
));
794 xtensa_dm_queue_enable(&xtensa
->dbg_mod
);
795 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
796 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
799 if (!xtensa_dm_is_online(&xtensa
->dbg_mod
)) {
800 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32
, xtensa
->dbg_mod
.device_id
);
801 return ERROR_TARGET_FAILURE
;
803 LOG_DEBUG("OCD_ID = %08" PRIx32
, xtensa
->dbg_mod
.device_id
);
804 if (!target_was_examined(target
))
805 target_set_examined(target
);
806 xtensa_smpbreak_write(xtensa
, xtensa
->smp_break
);
810 int xtensa_wakeup(struct target
*target
)
812 struct xtensa
*xtensa
= target_to_xtensa(target
);
813 unsigned int cmd
= PWRCTL_DEBUGWAKEUP(xtensa
) | PWRCTL_MEMWAKEUP(xtensa
) | PWRCTL_COREWAKEUP(xtensa
);
815 if (xtensa
->reset_asserted
)
816 cmd
|= PWRCTL_CORERESET(xtensa
);
817 xtensa_queue_pwr_reg_write(xtensa
, XDMREG_PWRCTL
, cmd
);
818 /* TODO: can we join this with the write above? */
819 xtensa_queue_pwr_reg_write(xtensa
, XDMREG_PWRCTL
, cmd
| PWRCTL_JTAGDEBUGUSE(xtensa
));
820 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
821 return xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
824 int xtensa_smpbreak_write(struct xtensa
*xtensa
, uint32_t set
)
826 uint32_t dsr_data
= 0x00110000;
827 uint32_t clear
= (set
| OCDDCR_ENABLEOCD
) ^
828 (OCDDCR_BREAKINEN
| OCDDCR_BREAKOUTEN
| OCDDCR_RUNSTALLINEN
|
829 OCDDCR_DEBUGMODEOUTEN
| OCDDCR_ENABLEOCD
);
831 LOG_TARGET_DEBUG(xtensa
->target
, "write smpbreak set=0x%" PRIx32
" clear=0x%" PRIx32
, set
, clear
);
832 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DCRSET
, set
| OCDDCR_ENABLEOCD
);
833 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DCRCLR
, clear
);
834 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DSR
, dsr_data
);
835 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
836 return xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
839 int xtensa_smpbreak_set(struct target
*target
, uint32_t set
)
841 struct xtensa
*xtensa
= target_to_xtensa(target
);
844 xtensa
->smp_break
= set
;
845 if (target_was_examined(target
))
846 res
= xtensa_smpbreak_write(xtensa
, xtensa
->smp_break
);
847 LOG_TARGET_DEBUG(target
, "set smpbreak=%" PRIx32
", state=%i", set
, target
->state
);
851 int xtensa_smpbreak_read(struct xtensa
*xtensa
, uint32_t *val
)
853 uint8_t dcr_buf
[sizeof(uint32_t)];
855 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DCRSET
, dcr_buf
);
856 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
857 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
858 *val
= buf_get_u32(dcr_buf
, 0, 32);
863 int xtensa_smpbreak_get(struct target
*target
, uint32_t *val
)
865 struct xtensa
*xtensa
= target_to_xtensa(target
);
866 *val
= xtensa
->smp_break
;
870 static inline xtensa_reg_val_t
xtensa_reg_get_value(struct reg
*reg
)
872 return buf_get_u32(reg
->value
, 0, 32);
875 static inline void xtensa_reg_set_value(struct reg
*reg
, xtensa_reg_val_t value
)
877 buf_set_u32(reg
->value
, 0, 32, value
);
881 int xtensa_core_status_check(struct target
*target
)
883 struct xtensa
*xtensa
= target_to_xtensa(target
);
884 int res
, needclear
= 0;
886 xtensa_dm_core_status_read(&xtensa
->dbg_mod
);
887 xtensa_dsr_t dsr
= xtensa_dm_core_status_get(&xtensa
->dbg_mod
);
888 LOG_TARGET_DEBUG(target
, "DSR (%08" PRIX32
")", dsr
);
889 if (dsr
& OCDDSR_EXECBUSY
) {
890 if (!xtensa
->suppress_dsr_errors
)
891 LOG_TARGET_ERROR(target
, "DSR (%08" PRIX32
") indicates target still busy!", dsr
);
894 if (dsr
& OCDDSR_EXECEXCEPTION
) {
895 if (!xtensa
->suppress_dsr_errors
)
896 LOG_TARGET_ERROR(target
,
897 "DSR (%08" PRIX32
") indicates DIR instruction generated an exception!",
901 if (dsr
& OCDDSR_EXECOVERRUN
) {
902 if (!xtensa
->suppress_dsr_errors
)
903 LOG_TARGET_ERROR(target
,
904 "DSR (%08" PRIX32
") indicates DIR instruction generated an overrun!",
909 res
= xtensa_dm_core_status_clear(&xtensa
->dbg_mod
,
910 OCDDSR_EXECEXCEPTION
| OCDDSR_EXECOVERRUN
);
911 if (res
!= ERROR_OK
&& !xtensa
->suppress_dsr_errors
)
912 LOG_TARGET_ERROR(target
, "clearing DSR failed!");
918 xtensa_reg_val_t
xtensa_reg_get(struct target
*target
, enum xtensa_reg_id reg_id
)
920 struct xtensa
*xtensa
= target_to_xtensa(target
);
921 struct reg
*reg
= &xtensa
->core_cache
->reg_list
[reg_id
];
922 return xtensa_reg_get_value(reg
);
925 void xtensa_reg_set(struct target
*target
, enum xtensa_reg_id reg_id
, xtensa_reg_val_t value
)
927 struct xtensa
*xtensa
= target_to_xtensa(target
);
928 struct reg
*reg
= &xtensa
->core_cache
->reg_list
[reg_id
];
929 if (xtensa_reg_get_value(reg
) == value
)
931 xtensa_reg_set_value(reg
, value
);
934 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
935 void xtensa_reg_set_deep_relgen(struct target
*target
, enum xtensa_reg_id a_idx
, xtensa_reg_val_t value
)
937 struct xtensa
*xtensa
= target_to_xtensa(target
);
938 uint32_t windowbase
= (xtensa
->core_config
->windowed
?
939 xtensa_reg_get(target
, XT_REG_IDX_WINDOWBASE
) : 0);
940 int ar_idx
= xtensa_windowbase_offset_to_canonical(xtensa
, a_idx
, windowbase
);
941 xtensa_reg_set(target
, a_idx
, value
);
942 xtensa_reg_set(target
, ar_idx
, value
);
945 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
946 uint32_t xtensa_cause_get(struct target
*target
)
948 return xtensa_reg_get(target
, XT_REG_IDX_DEBUGCAUSE
);
951 void xtensa_cause_clear(struct target
*target
)
953 struct xtensa
*xtensa
= target_to_xtensa(target
);
954 xtensa_reg_set(target
, XT_REG_IDX_DEBUGCAUSE
, 0);
955 xtensa
->core_cache
->reg_list
[XT_REG_IDX_DEBUGCAUSE
].dirty
= false;
958 int xtensa_assert_reset(struct target
*target
)
960 struct xtensa
*xtensa
= target_to_xtensa(target
);
962 LOG_TARGET_DEBUG(target
, "target_number=%i, begin", target
->target_number
);
963 xtensa_queue_pwr_reg_write(xtensa
,
965 PWRCTL_JTAGDEBUGUSE(xtensa
) | PWRCTL_DEBUGWAKEUP(xtensa
) | PWRCTL_MEMWAKEUP(xtensa
) |
966 PWRCTL_COREWAKEUP(xtensa
) | PWRCTL_CORERESET(xtensa
));
967 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
968 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
972 /* registers are now invalid */
973 xtensa
->reset_asserted
= true;
974 register_cache_invalidate(xtensa
->core_cache
);
975 target
->state
= TARGET_RESET
;
979 int xtensa_deassert_reset(struct target
*target
)
981 struct xtensa
*xtensa
= target_to_xtensa(target
);
983 LOG_TARGET_DEBUG(target
, "halt=%d", target
->reset_halt
);
984 if (target
->reset_halt
)
985 xtensa_queue_dbg_reg_write(xtensa
,
987 OCDDCR_ENABLEOCD
| OCDDCR_DEBUGINTERRUPT
);
988 xtensa_queue_pwr_reg_write(xtensa
,
990 PWRCTL_JTAGDEBUGUSE(xtensa
) | PWRCTL_DEBUGWAKEUP(xtensa
) | PWRCTL_MEMWAKEUP(xtensa
) |
991 PWRCTL_COREWAKEUP(xtensa
));
992 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
993 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
996 target
->state
= TARGET_RUNNING
;
997 xtensa
->reset_asserted
= false;
1001 int xtensa_soft_reset_halt(struct target
*target
)
1003 LOG_TARGET_DEBUG(target
, "begin");
1004 return xtensa_assert_reset(target
);
1007 int xtensa_fetch_all_regs(struct target
*target
)
1009 struct xtensa
*xtensa
= target_to_xtensa(target
);
1010 struct reg
*reg_list
= xtensa
->core_cache
->reg_list
;
1011 unsigned int reg_list_size
= xtensa
->core_cache
->num_regs
;
1012 xtensa_reg_val_t cpenable
= 0, windowbase
= 0, a3
;
1015 bool debug_dsrs
= !xtensa
->regs_fetched
|| LOG_LEVEL_IS(LOG_LVL_DEBUG
);
1017 union xtensa_reg_val_u
*regvals
= calloc(reg_list_size
, sizeof(*regvals
));
1019 LOG_TARGET_ERROR(target
, "unable to allocate memory for regvals!");
1022 union xtensa_reg_val_u
*dsrs
= calloc(reg_list_size
, sizeof(*dsrs
));
1024 LOG_TARGET_ERROR(target
, "unable to allocate memory for dsrs!");
1029 LOG_TARGET_DEBUG(target
, "start");
1031 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1032 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1033 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, a3_buf
);
1034 int res
= xtensa_window_state_save(target
, &woe
);
1035 if (res
!= ERROR_OK
)
1036 goto xtensa_fetch_all_regs_done
;
1038 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1039 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1040 * in one go, then sort everything out from the regvals variable. */
1042 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1043 for (unsigned int j
= 0; j
< XT_AREGS_NUM_MAX
; j
+= 16) {
1044 /*Grab the 16 registers we can see */
1045 for (unsigned int i
= 0; i
< 16; i
++) {
1046 if (i
+ j
< xtensa
->core_config
->aregs_num
) {
1047 xtensa_queue_exec_ins(xtensa
,
1048 XT_INS_WSR(xtensa
, XT_SR_DDR
, xtensa_regs
[XT_REG_IDX_AR0
+ i
].reg_num
));
1049 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
,
1050 regvals
[XT_REG_IDX_AR0
+ i
+ j
].buf
);
1052 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DSR
,
1053 dsrs
[XT_REG_IDX_AR0
+ i
+ j
].buf
);
1056 if (xtensa
->core_config
->windowed
)
1057 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1058 * will wraparound, */
1059 /* leaving us in the state we were. */
1060 xtensa_queue_exec_ins(xtensa
, XT_INS_ROTW(xtensa
, 4));
1062 xtensa_window_state_restore(target
, woe
);
1064 if (xtensa
->core_config
->coproc
) {
1065 /* As the very first thing after AREGS, go grab CPENABLE */
1066 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, xtensa_regs
[XT_REG_IDX_CPENABLE
].reg_num
, XT_REG_A3
));
1067 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1068 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, regvals
[XT_REG_IDX_CPENABLE
].buf
);
1070 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1071 if (res
!= ERROR_OK
) {
1072 LOG_ERROR("Failed to read ARs (%d)!", res
);
1073 goto xtensa_fetch_all_regs_done
;
1075 xtensa_core_status_check(target
);
1077 a3
= buf_get_u32(a3_buf
, 0, 32);
1079 if (xtensa
->core_config
->coproc
) {
1080 cpenable
= buf_get_u32(regvals
[XT_REG_IDX_CPENABLE
].buf
, 0, 32);
1082 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1083 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, 0xffffffff);
1084 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1085 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, xtensa_regs
[XT_REG_IDX_CPENABLE
].reg_num
, XT_REG_A3
));
1087 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1088 LOG_TARGET_DEBUG(target
, "CPENABLE: was 0x%" PRIx32
", all enabled", cpenable
);
1089 xtensa_reg_set(target
, XT_REG_IDX_CPENABLE
, cpenable
);
1091 /* We're now free to use any of A0-A15 as scratch registers
1092 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1093 for (unsigned int i
= 0; i
< reg_list_size
; i
++) {
1094 struct xtensa_reg_desc
*rlist
= (i
< XT_NUM_REGS
) ? xtensa_regs
: xtensa
->optregs
;
1095 unsigned int ridx
= (i
< XT_NUM_REGS
) ? i
: i
- XT_NUM_REGS
;
1096 if (xtensa_reg_is_readable(rlist
[ridx
].flags
, cpenable
) && rlist
[ridx
].exist
) {
1097 bool reg_fetched
= true;
1098 unsigned int reg_num
= rlist
[ridx
].reg_num
;
1099 switch (rlist
[ridx
].type
) {
1101 xtensa_queue_exec_ins(xtensa
, XT_INS_RUR(xtensa
, reg_num
, XT_REG_A3
));
1104 xtensa_queue_exec_ins(xtensa
, XT_INS_RFR(xtensa
, reg_num
, XT_REG_A3
));
1106 case XT_REG_SPECIAL
:
1107 if (reg_num
== XT_PC_REG_NUM_VIRTUAL
) {
1108 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1109 reg_num
= XT_EPC_REG_NUM_BASE
+ xtensa
->core_config
->debug
.irq_level
;
1110 } else if (reg_num
== xtensa_regs
[XT_REG_IDX_PS
].reg_num
) {
1111 /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1112 reg_num
= XT_EPS_REG_NUM_BASE
+ xtensa
->core_config
->debug
.irq_level
;
1113 } else if (reg_num
== xtensa_regs
[XT_REG_IDX_CPENABLE
].reg_num
) {
1114 /* CPENABLE already read/updated; don't re-read */
1115 reg_fetched
= false;
1118 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, reg_num
, XT_REG_A3
));
1121 reg_fetched
= false;
1124 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1125 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, regvals
[i
].buf
);
1127 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DSR
, dsrs
[i
].buf
);
1131 /* Ok, send the whole mess to the CPU. */
1132 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1133 if (res
!= ERROR_OK
) {
1134 LOG_ERROR("Failed to fetch AR regs!");
1135 goto xtensa_fetch_all_regs_done
;
1137 xtensa_core_status_check(target
);
1140 /* DSR checking: follows order in which registers are requested. */
1141 for (unsigned int i
= 0; i
< reg_list_size
; i
++) {
1142 struct xtensa_reg_desc
*rlist
= (i
< XT_NUM_REGS
) ? xtensa_regs
: xtensa
->optregs
;
1143 unsigned int ridx
= (i
< XT_NUM_REGS
) ? i
: i
- XT_NUM_REGS
;
1144 if (xtensa_reg_is_readable(rlist
[ridx
].flags
, cpenable
) && rlist
[ridx
].exist
&&
1145 (rlist
[ridx
].type
!= XT_REG_DEBUG
) &&
1146 (rlist
[ridx
].type
!= XT_REG_RELGEN
) &&
1147 (rlist
[ridx
].type
!= XT_REG_TIE
) &&
1148 (rlist
[ridx
].type
!= XT_REG_OTHER
)) {
1149 if (buf_get_u32(dsrs
[i
].buf
, 0, 32) & OCDDSR_EXECEXCEPTION
) {
1150 LOG_ERROR("Exception reading %s!", reg_list
[i
].name
);
1152 goto xtensa_fetch_all_regs_done
;
1158 if (xtensa
->core_config
->windowed
)
1159 /* We need the windowbase to decode the general addresses. */
1160 windowbase
= buf_get_u32(regvals
[XT_REG_IDX_WINDOWBASE
].buf
, 0, 32);
1161 /* Decode the result and update the cache. */
1162 for (unsigned int i
= 0; i
< reg_list_size
; i
++) {
1163 struct xtensa_reg_desc
*rlist
= (i
< XT_NUM_REGS
) ? xtensa_regs
: xtensa
->optregs
;
1164 unsigned int ridx
= (i
< XT_NUM_REGS
) ? i
: i
- XT_NUM_REGS
;
1165 if (xtensa_reg_is_readable(rlist
[ridx
].flags
, cpenable
) && rlist
[ridx
].exist
) {
1166 if ((xtensa
->core_config
->windowed
) && (rlist
[ridx
].type
== XT_REG_GENERAL
)) {
1167 /* The 64-value general register set is read from (windowbase) on down.
1168 * We need to get the real register address by subtracting windowbase and
1169 * wrapping around. */
1170 enum xtensa_reg_id realadr
= xtensa_canonical_to_windowbase_offset(xtensa
, i
,
1172 buf_cpy(regvals
[realadr
].buf
, reg_list
[i
].value
, reg_list
[i
].size
);
1173 } else if (rlist
[ridx
].type
== XT_REG_RELGEN
) {
1174 buf_cpy(regvals
[rlist
[ridx
].reg_num
].buf
, reg_list
[i
].value
, reg_list
[i
].size
);
1175 if (xtensa_extra_debug_log
) {
1176 xtensa_reg_val_t regval
= buf_get_u32(regvals
[rlist
[ridx
].reg_num
].buf
, 0, 32);
1177 LOG_DEBUG("%s = 0x%x", rlist
[ridx
].name
, regval
);
1180 xtensa_reg_val_t regval
= buf_get_u32(regvals
[i
].buf
, 0, 32);
1181 bool is_dirty
= (i
== XT_REG_IDX_CPENABLE
);
1182 if (xtensa_extra_debug_log
)
1183 LOG_INFO("Register %s: 0x%X", reg_list
[i
].name
, regval
);
1184 xtensa_reg_set(target
, i
, regval
);
1185 reg_list
[i
].dirty
= is_dirty
; /*always do this _after_ xtensa_reg_set! */
1187 reg_list
[i
].valid
= true;
1189 if ((rlist
[ridx
].flags
& XT_REGF_MASK
) == XT_REGF_NOREAD
) {
1190 /* Report read-only registers all-zero but valid */
1191 reg_list
[i
].valid
= true;
1192 xtensa_reg_set(target
, i
, 0);
1194 reg_list
[i
].valid
= false;
1199 if (xtensa
->core_config
->windowed
) {
1200 /* We have used A3 as a scratch register.
1201 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1203 enum xtensa_reg_id ar3_idx
= xtensa_windowbase_offset_to_canonical(xtensa
, XT_REG_IDX_A3
, windowbase
);
1204 xtensa_reg_set(target
, ar3_idx
, a3
);
1205 xtensa_mark_register_dirty(xtensa
, ar3_idx
);
1207 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1208 sprintf(xtensa
->scratch_ars
[XT_AR_SCRATCH_AR3
].chrval
, "ar%d", ar3_idx
- XT_REG_IDX_AR0
);
1209 enum xtensa_reg_id ar4_idx
= xtensa_windowbase_offset_to_canonical(xtensa
, XT_REG_IDX_A4
, windowbase
);
1210 sprintf(xtensa
->scratch_ars
[XT_AR_SCRATCH_AR4
].chrval
, "ar%d", ar4_idx
- XT_REG_IDX_AR0
);
1211 for (enum xtensa_ar_scratch_set_e s
= 0; s
< XT_AR_SCRATCH_NUM
; s
++)
1212 xtensa
->scratch_ars
[s
].intval
= false;
1215 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1216 xtensa_reg_set(target
, XT_REG_IDX_A3
, a3
);
1217 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A3
);
1218 xtensa
->regs_fetched
= true;
1219 xtensa_fetch_all_regs_done
:
1225 int xtensa_get_gdb_reg_list(struct target
*target
,
1226 struct reg
**reg_list
[],
1228 enum target_register_class reg_class
)
1230 struct xtensa
*xtensa
= target_to_xtensa(target
);
1231 unsigned int num_regs
;
1233 if (reg_class
== REG_CLASS_GENERAL
) {
1234 if ((xtensa
->genpkt_regs_num
== 0) || !xtensa
->contiguous_regs_list
) {
1235 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class
);
1238 num_regs
= xtensa
->genpkt_regs_num
;
1240 /* Determine whether to return a contiguous or sparse register map */
1241 num_regs
= xtensa
->regmap_contiguous
? xtensa
->total_regs_num
: xtensa
->dbregs_num
;
1244 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class
, num_regs
);
1246 *reg_list
= calloc(num_regs
, sizeof(struct reg
*));
1250 *reg_list_size
= num_regs
;
1251 if (xtensa
->regmap_contiguous
) {
1252 assert((num_regs
<= xtensa
->total_regs_num
) && "contiguous regmap size internal error!");
1253 for (unsigned int i
= 0; i
< num_regs
; i
++)
1254 (*reg_list
)[i
] = xtensa
->contiguous_regs_list
[i
];
1258 for (unsigned int i
= 0; i
< num_regs
; i
++)
1259 (*reg_list
)[i
] = (struct reg
*)&xtensa
->empty_regs
[i
];
1261 for (unsigned int i
= 0; i
< xtensa
->core_cache
->num_regs
&& k
< num_regs
; i
++) {
1262 if (xtensa
->core_cache
->reg_list
[i
].exist
) {
1263 struct xtensa_reg_desc
*rlist
= (i
< XT_NUM_REGS
) ? xtensa_regs
: xtensa
->optregs
;
1264 unsigned int ridx
= (i
< XT_NUM_REGS
) ? i
: i
- XT_NUM_REGS
;
1265 int sparse_idx
= rlist
[ridx
].dbreg_num
;
1266 if (i
== XT_REG_IDX_PS
) {
1267 if (xtensa
->eps_dbglevel_idx
== 0) {
1268 LOG_ERROR("eps_dbglevel_idx not set\n");
1271 (*reg_list
)[sparse_idx
] = &xtensa
->core_cache
->reg_list
[xtensa
->eps_dbglevel_idx
];
1272 if (xtensa_extra_debug_log
)
1273 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1274 sparse_idx
, xtensa
->core_config
->debug
.irq_level
,
1275 xtensa_reg_get_value((*reg_list
)[sparse_idx
]));
1276 } else if (rlist
[ridx
].type
== XT_REG_RELGEN
) {
1277 (*reg_list
)[sparse_idx
- XT_REG_IDX_ARFIRST
] = &xtensa
->core_cache
->reg_list
[i
];
1279 (*reg_list
)[sparse_idx
] = &xtensa
->core_cache
->reg_list
[i
];
1281 if (i
== XT_REG_IDX_PC
)
1282 /* Make a duplicate copy of PC for external access */
1283 (*reg_list
)[XT_PC_DBREG_NUM_BASE
] = &xtensa
->core_cache
->reg_list
[i
];
1289 LOG_ERROR("SPARSE GDB reg list full (size %d)", k
);
1294 int xtensa_mmu_is_enabled(struct target
*target
, int *enabled
)
1296 struct xtensa
*xtensa
= target_to_xtensa(target
);
1297 *enabled
= xtensa
->core_config
->mmu
.itlb_entries_count
> 0 ||
1298 xtensa
->core_config
->mmu
.dtlb_entries_count
> 0;
1302 int xtensa_halt(struct target
*target
)
1304 struct xtensa
*xtensa
= target_to_xtensa(target
);
1306 LOG_TARGET_DEBUG(target
, "start");
1307 if (target
->state
== TARGET_HALTED
) {
1308 LOG_TARGET_DEBUG(target
, "target was already halted");
1311 /* First we have to read dsr and check if the target stopped */
1312 int res
= xtensa_dm_core_status_read(&xtensa
->dbg_mod
);
1313 if (res
!= ERROR_OK
) {
1314 LOG_TARGET_ERROR(target
, "Failed to read core status!");
1317 LOG_TARGET_DEBUG(target
, "Core status 0x%" PRIx32
, xtensa_dm_core_status_get(&xtensa
->dbg_mod
));
1318 if (!xtensa_is_stopped(target
)) {
1319 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DCRSET
, OCDDCR_ENABLEOCD
| OCDDCR_DEBUGINTERRUPT
);
1320 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
1321 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1322 if (res
!= ERROR_OK
)
1323 LOG_TARGET_ERROR(target
, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1329 int xtensa_prepare_resume(struct target
*target
,
1331 target_addr_t address
,
1332 int handle_breakpoints
,
1333 int debug_execution
)
1335 struct xtensa
*xtensa
= target_to_xtensa(target
);
1338 LOG_TARGET_DEBUG(target
,
1339 "current=%d address=" TARGET_ADDR_FMT
", handle_breakpoints=%i, debug_execution=%i)",
1345 if (target
->state
!= TARGET_HALTED
) {
1346 LOG_TARGET_WARNING(target
, "target not halted");
1347 return ERROR_TARGET_NOT_HALTED
;
1350 if (address
&& !current
) {
1351 xtensa_reg_set(target
, XT_REG_IDX_PC
, address
);
1353 uint32_t cause
= xtensa_cause_get(target
);
1354 LOG_TARGET_DEBUG(target
, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1355 cause
, (cause
& DEBUGCAUSE_DB
), (cause
& (DEBUGCAUSE_BI
| DEBUGCAUSE_BN
)));
1356 if (cause
& DEBUGCAUSE_DB
)
1357 /* We stopped due to a watchpoint. We can't just resume executing the
1358 * instruction again because */
1359 /* that would trigger the watchpoint again. To fix this, we single-step,
1360 * which ignores watchpoints. */
1361 xtensa_do_step(target
, current
, address
, handle_breakpoints
);
1362 if (cause
& (DEBUGCAUSE_BI
| DEBUGCAUSE_BN
))
1363 /* We stopped due to a break instruction. We can't just resume executing the
1364 * instruction again because */
1365 /* that would trigger the break again. To fix this, we single-step, which
1367 xtensa_do_step(target
, current
, address
, handle_breakpoints
);
1370 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1371 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1372 for (unsigned int slot
= 0; slot
< xtensa
->core_config
->debug
.ibreaks_num
; slot
++) {
1373 if (xtensa
->hw_brps
[slot
]) {
1374 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1375 xtensa_reg_set(target
, XT_REG_IDX_IBREAKA0
+ slot
, xtensa
->hw_brps
[slot
]->address
);
1379 xtensa_reg_set(target
, XT_REG_IDX_IBREAKENABLE
, bpena
);
1381 /* Here we write all registers to the targets */
1382 int res
= xtensa_write_dirty_registers(target
);
1383 if (res
!= ERROR_OK
)
1384 LOG_TARGET_ERROR(target
, "Failed to write back register cache.");
1388 int xtensa_do_resume(struct target
*target
)
1390 struct xtensa
*xtensa
= target_to_xtensa(target
);
1392 LOG_TARGET_DEBUG(target
, "start");
1394 xtensa_queue_exec_ins(xtensa
, XT_INS_RFDO(xtensa
));
1395 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1396 if (res
!= ERROR_OK
) {
1397 LOG_TARGET_ERROR(target
, "Failed to exec RFDO %d!", res
);
1400 xtensa_core_status_check(target
);
1404 int xtensa_resume(struct target
*target
,
1406 target_addr_t address
,
1407 int handle_breakpoints
,
1408 int debug_execution
)
1410 LOG_TARGET_DEBUG(target
, "start");
1411 int res
= xtensa_prepare_resume(target
, current
, address
, handle_breakpoints
, debug_execution
);
1412 if (res
!= ERROR_OK
) {
1413 LOG_TARGET_ERROR(target
, "Failed to prepare for resume!");
1416 res
= xtensa_do_resume(target
);
1417 if (res
!= ERROR_OK
) {
1418 LOG_TARGET_ERROR(target
, "Failed to resume!");
1422 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1423 if (!debug_execution
)
1424 target
->state
= TARGET_RUNNING
;
1426 target
->state
= TARGET_DEBUG_RUNNING
;
1428 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1433 static bool xtensa_pc_in_winexc(struct target
*target
, target_addr_t pc
)
1435 struct xtensa
*xtensa
= target_to_xtensa(target
);
1436 uint8_t insn_buf
[XT_ISNS_SZ_MAX
];
1437 int err
= xtensa_read_buffer(target
, pc
, sizeof(insn_buf
), insn_buf
);
1438 if (err
!= ERROR_OK
)
1441 xtensa_insn_t insn
= buf_get_u32(insn_buf
, 0, 24);
1442 xtensa_insn_t masked
= insn
& XT_INS_L32E_S32E_MASK(xtensa
);
1443 if (masked
== XT_INS_L32E(xtensa
, 0, 0, 0) || masked
== XT_INS_S32E(xtensa
, 0, 0, 0))
1446 masked
= insn
& XT_INS_RFWO_RFWU_MASK(xtensa
);
1447 if (masked
== XT_INS_RFWO(xtensa
) || masked
== XT_INS_RFWU(xtensa
))
1453 int xtensa_do_step(struct target
*target
, int current
, target_addr_t address
, int handle_breakpoints
)
1455 struct xtensa
*xtensa
= target_to_xtensa(target
);
1457 const uint32_t icount_val
= -2; /* ICOUNT value to load for 1 step */
1458 xtensa_reg_val_t dbreakc
[XT_WATCHPOINTS_NUM_MAX
];
1459 xtensa_reg_val_t icountlvl
, cause
;
1460 xtensa_reg_val_t oldps
, oldpc
, cur_pc
;
1461 bool ps_lowered
= false;
1463 LOG_TARGET_DEBUG(target
, "current=%d, address=" TARGET_ADDR_FMT
", handle_breakpoints=%i",
1464 current
, address
, handle_breakpoints
);
1466 if (target
->state
!= TARGET_HALTED
) {
1467 LOG_TARGET_WARNING(target
, "target not halted");
1468 return ERROR_TARGET_NOT_HALTED
;
1471 if (xtensa
->eps_dbglevel_idx
== 0) {
1472 LOG_ERROR("eps_dbglevel_idx not set\n");
1476 /* Save old ps (EPS[dbglvl] on LX), pc */
1477 oldps
= xtensa_reg_get(target
, xtensa
->eps_dbglevel_idx
);
1478 oldpc
= xtensa_reg_get(target
, XT_REG_IDX_PC
);
1480 cause
= xtensa_cause_get(target
);
1481 LOG_TARGET_DEBUG(target
, "oldps=%" PRIx32
", oldpc=%" PRIx32
" dbg_cause=%" PRIx32
" exc_cause=%" PRIx32
,
1485 xtensa_reg_get(target
, XT_REG_IDX_EXCCAUSE
));
1486 if (handle_breakpoints
&& (cause
& (DEBUGCAUSE_BI
| DEBUGCAUSE_BN
))) {
1487 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1488 LOG_TARGET_DEBUG(target
, "Increment PC to pass break instruction...");
1489 xtensa_cause_clear(target
); /* so we don't recurse into the same routine */
1490 /* pretend that we have stepped */
1491 if (cause
& DEBUGCAUSE_BI
)
1492 xtensa_reg_set(target
, XT_REG_IDX_PC
, oldpc
+ 3); /* PC = PC+3 */
1494 xtensa_reg_set(target
, XT_REG_IDX_PC
, oldpc
+ 2); /* PC = PC+2 */
1498 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1499 * at which the instructions are to be counted while stepping.
1501 * For example, if we need to step by 2 instructions, and an interrupt occurs
1502 * in between, the processor will trigger the interrupt and halt after the 2nd
1503 * instruction within the interrupt vector and/or handler.
1505 * However, sometimes we don't want the interrupt handlers to be executed at all
1506 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1507 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1508 * code from being counted during stepping. Note that C exception handlers must
1509 * run at level 0 and hence will be counted and stepped into, should one occur.
1511 * TODO: Certain instructions should never be single-stepped and should instead
1512 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1515 if (xtensa
->stepping_isr_mode
== XT_STEPPING_ISR_OFF
) {
1516 if (!xtensa
->core_config
->high_irq
.enabled
) {
1519 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1522 /* Update ICOUNTLEVEL accordingly */
1523 icountlvl
= MIN((oldps
& 0xF) + 1, xtensa
->core_config
->debug
.irq_level
);
1525 icountlvl
= xtensa
->core_config
->debug
.irq_level
;
1528 if (cause
& DEBUGCAUSE_DB
) {
1529 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1530 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1531 * re-enable the watchpoint. */
1534 "Single-stepping to get past instruction that triggered the watchpoint...");
1535 xtensa_cause_clear(target
); /* so we don't recurse into the same routine */
1536 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1537 for (unsigned int slot
= 0; slot
< xtensa
->core_config
->debug
.dbreaks_num
; slot
++) {
1538 dbreakc
[slot
] = xtensa_reg_get(target
, XT_REG_IDX_DBREAKC0
+ slot
);
1539 xtensa_reg_set(target
, XT_REG_IDX_DBREAKC0
+ slot
, 0);
1543 if (!handle_breakpoints
&& (cause
& (DEBUGCAUSE_BI
| DEBUGCAUSE_BN
)))
1544 /* handle normal SW breakpoint */
1545 xtensa_cause_clear(target
); /* so we don't recurse into the same routine */
1546 if ((oldps
& 0xf) >= icountlvl
) {
1547 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1549 uint32_t newps
= (oldps
& ~0xf) | (icountlvl
- 1);
1550 xtensa_reg_set(target
, xtensa
->eps_dbglevel_idx
, newps
);
1551 LOG_TARGET_DEBUG(target
,
1552 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32
" (was 0x%08" PRIx32
")",
1553 xtensa
->core_cache
->reg_list
[xtensa
->eps_dbglevel_idx
].name
,
1558 xtensa_reg_set(target
, XT_REG_IDX_ICOUNTLEVEL
, icountlvl
);
1559 xtensa_reg_set(target
, XT_REG_IDX_ICOUNT
, icount_val
);
1561 /* Now ICOUNT is set, we can resume as if we were going to run */
1562 res
= xtensa_prepare_resume(target
, current
, address
, 0, 0);
1563 if (res
!= ERROR_OK
) {
1564 LOG_TARGET_ERROR(target
, "Failed to prepare resume for single step");
1567 res
= xtensa_do_resume(target
);
1568 if (res
!= ERROR_OK
) {
1569 LOG_TARGET_ERROR(target
, "Failed to resume after setting up single step");
1573 /* Wait for stepping to complete */
1574 long long start
= timeval_ms();
1575 while (timeval_ms() < start
+ 500) {
1576 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1577 *until stepping is complete. */
1579 res
= xtensa_dm_core_status_read(&xtensa
->dbg_mod
);
1580 if (res
!= ERROR_OK
) {
1581 LOG_TARGET_ERROR(target
, "Failed to read core status!");
1584 if (xtensa_is_stopped(target
))
1588 LOG_TARGET_DEBUG(target
, "Finish stepping. dsr=0x%08" PRIx32
,
1589 xtensa_dm_core_status_get(&xtensa
->dbg_mod
));
1590 if (!xtensa_is_stopped(target
)) {
1593 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32
,
1594 xtensa_dm_core_status_get(&xtensa
->dbg_mod
));
1595 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1596 target
->state
= TARGET_RUNNING
;
1600 xtensa_fetch_all_regs(target
);
1601 cur_pc
= xtensa_reg_get(target
, XT_REG_IDX_PC
);
1603 LOG_TARGET_DEBUG(target
,
1604 "cur_ps=%" PRIx32
", cur_pc=%" PRIx32
" dbg_cause=%" PRIx32
" exc_cause=%" PRIx32
,
1605 xtensa_reg_get(target
, XT_REG_IDX_PS
),
1607 xtensa_cause_get(target
),
1608 xtensa_reg_get(target
, XT_REG_IDX_EXCCAUSE
));
1610 /* Do not step into WindowOverflow if ISRs are masked.
1611 If we stop in WindowOverflow at breakpoint with masked ISRs and
1612 try to do a step it will get us out of that handler */
1613 if (xtensa
->core_config
->windowed
&&
1614 xtensa
->stepping_isr_mode
== XT_STEPPING_ISR_OFF
&&
1615 xtensa_pc_in_winexc(target
, cur_pc
)) {
1616 /* isrmask = on, need to step out of the window exception handler */
1617 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32
, cur_pc
);
1619 address
= oldpc
+ 3;
1623 if (oldpc
== cur_pc
)
1624 LOG_TARGET_WARNING(target
, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32
,
1625 xtensa_dm_core_status_get(&xtensa
->dbg_mod
));
1627 LOG_DEBUG("Stepped from %" PRIX32
" to %" PRIX32
, oldpc
, cur_pc
);
1631 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1632 target
->state
= TARGET_HALTED
;
1633 LOG_DEBUG("Done stepping, PC=%" PRIX32
, cur_pc
);
1635 if (cause
& DEBUGCAUSE_DB
) {
1636 LOG_TARGET_DEBUG(target
, "...Done, re-installing watchpoints.");
1637 /* Restore the DBREAKCx registers */
1638 for (unsigned int slot
= 0; slot
< xtensa
->core_config
->debug
.dbreaks_num
; slot
++)
1639 xtensa_reg_set(target
, XT_REG_IDX_DBREAKC0
+ slot
, dbreakc
[slot
]);
1642 /* Restore int level */
1644 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32
,
1645 xtensa
->core_cache
->reg_list
[xtensa
->eps_dbglevel_idx
].name
,
1647 xtensa_reg_set(target
, xtensa
->eps_dbglevel_idx
, oldps
);
1650 /* write ICOUNTLEVEL back to zero */
1651 xtensa_reg_set(target
, XT_REG_IDX_ICOUNTLEVEL
, 0);
1652 /* TODO: can we skip writing dirty registers and re-fetching them? */
1653 res
= xtensa_write_dirty_registers(target
);
1654 xtensa_fetch_all_regs(target
);
1658 int xtensa_step(struct target
*target
, int current
, target_addr_t address
, int handle_breakpoints
)
1660 int retval
= xtensa_do_step(target
, current
, address
, handle_breakpoints
);
1661 if (retval
!= ERROR_OK
)
1663 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1669 * Returns true if two ranges are overlapping
1671 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start
,
1672 target_addr_t r1_end
,
1673 target_addr_t r2_start
,
1674 target_addr_t r2_end
)
1676 if ((r2_start
>= r1_start
) && (r2_start
< r1_end
))
1677 return true; /* r2_start is in r1 region */
1678 if ((r2_end
> r1_start
) && (r2_end
<= r1_end
))
1679 return true; /* r2_end is in r1 region */
1684 * Returns a size of overlapped region of two ranges.
1686 static inline target_addr_t
xtensa_get_overlap_size(target_addr_t r1_start
,
1687 target_addr_t r1_end
,
1688 target_addr_t r2_start
,
1689 target_addr_t r2_end
)
1691 if (xtensa_memory_regions_overlap(r1_start
, r1_end
, r2_start
, r2_end
)) {
1692 target_addr_t ov_start
= r1_start
< r2_start
? r2_start
: r1_start
;
1693 target_addr_t ov_end
= r1_end
> r2_end
? r2_end
: r1_end
;
1694 return ov_end
- ov_start
;
1700 * Check if the address gets to memory regions, and its access mode
1702 static bool xtensa_memory_op_validate_range(struct xtensa
*xtensa
, target_addr_t address
, size_t size
, int access
)
1704 target_addr_t adr_pos
= address
; /* address cursor set to the beginning start */
1705 target_addr_t adr_end
= address
+ size
; /* region end */
1706 target_addr_t overlap_size
;
1707 const struct xtensa_local_mem_region_config
*cm
; /* current mem region */
1709 while (adr_pos
< adr_end
) {
1710 cm
= xtensa_target_memory_region_find(xtensa
, adr_pos
);
1711 if (!cm
) /* address is not belong to anything */
1713 if ((cm
->access
& access
) != access
) /* access check */
1715 overlap_size
= xtensa_get_overlap_size(cm
->base
, (cm
->base
+ cm
->size
), adr_pos
, adr_end
);
1716 assert(overlap_size
!= 0);
1717 adr_pos
+= overlap_size
;
1722 int xtensa_read_memory(struct target
*target
, target_addr_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
)
1724 struct xtensa
*xtensa
= target_to_xtensa(target
);
1725 /* We are going to read memory in 32-bit increments. This may not be what the calling
1726 * function expects, so we may need to allocate a temp buffer and read into that first. */
1727 target_addr_t addrstart_al
= ALIGN_DOWN(address
, 4);
1728 target_addr_t addrend_al
= ALIGN_UP(address
+ size
* count
, 4);
1729 target_addr_t adr
= addrstart_al
;
1731 bool bswap
= xtensa
->target
->endianness
== TARGET_BIG_ENDIAN
;
1733 if (target
->state
!= TARGET_HALTED
) {
1734 LOG_TARGET_WARNING(target
, "target not halted");
1735 return ERROR_TARGET_NOT_HALTED
;
1738 if (!xtensa
->permissive_mode
) {
1739 if (!xtensa_memory_op_validate_range(xtensa
, address
, (size
* count
),
1740 XT_MEM_ACCESS_READ
)) {
1741 LOG_DEBUG("address " TARGET_ADDR_FMT
" not readable", address
);
1746 unsigned int alloc_bytes
= ALIGN_UP(addrend_al
- addrstart_al
, sizeof(uint32_t));
1747 albuff
= calloc(alloc_bytes
, 1);
1749 LOG_TARGET_ERROR(target
, "Out of memory allocating %" PRId64
" bytes!",
1750 addrend_al
- addrstart_al
);
1751 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1754 /* We're going to use A3 here */
1755 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A3
);
1756 /* Write start address to A3 */
1757 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, addrstart_al
);
1758 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1759 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1760 if (xtensa
->probe_lsddr32p
!= 0) {
1761 xtensa_queue_exec_ins(xtensa
, XT_INS_LDDR32P(xtensa
, XT_REG_A3
));
1762 for (unsigned int i
= 0; adr
!= addrend_al
; i
+= sizeof(uint32_t), adr
+= sizeof(uint32_t))
1763 xtensa_queue_dbg_reg_read(xtensa
,
1764 (adr
+ sizeof(uint32_t) == addrend_al
) ? XDMREG_DDR
: XDMREG_DDREXEC
,
1767 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A4
);
1768 for (unsigned int i
= 0; adr
!= addrend_al
; i
+= sizeof(uint32_t), adr
+= sizeof(uint32_t)) {
1769 xtensa_queue_exec_ins(xtensa
, XT_INS_L32I(xtensa
, XT_REG_A3
, XT_REG_A4
, 0));
1770 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A4
));
1771 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, &albuff
[i
]);
1772 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, adr
+ sizeof(uint32_t));
1773 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1776 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1777 if (res
== ERROR_OK
) {
1778 bool prev_suppress
= xtensa
->suppress_dsr_errors
;
1779 xtensa
->suppress_dsr_errors
= true;
1780 res
= xtensa_core_status_check(target
);
1781 if (xtensa
->probe_lsddr32p
== -1)
1782 xtensa
->probe_lsddr32p
= 1;
1783 xtensa
->suppress_dsr_errors
= prev_suppress
;
1785 if (res
!= ERROR_OK
) {
1786 if (xtensa
->probe_lsddr32p
!= 0) {
1787 /* Disable fast memory access instructions and retry before reporting an error */
1788 LOG_TARGET_DEBUG(target
, "Disabling LDDR32.P/SDDR32.P");
1789 xtensa
->probe_lsddr32p
= 0;
1790 res
= xtensa_read_memory(target
, address
, size
, count
, albuff
);
1793 LOG_TARGET_WARNING(target
, "Failed reading %d bytes at address "TARGET_ADDR_FMT
,
1794 count
* size
, address
);
1799 buf_bswap32(albuff
, albuff
, addrend_al
- addrstart_al
);
1800 memcpy(buffer
, albuff
+ (address
& 3), (size
* count
));
1805 int xtensa_read_buffer(struct target
*target
, target_addr_t address
, uint32_t count
, uint8_t *buffer
)
1807 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1808 return xtensa_read_memory(target
, address
, 1, count
, buffer
);
1811 int xtensa_write_memory(struct target
*target
,
1812 target_addr_t address
,
1815 const uint8_t *buffer
)
1817 /* This memory write function can get thrown nigh everything into it, from
1818 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1819 * accept anything but aligned uint32 writes, though. That is why we convert
1820 * everything into that. */
1821 struct xtensa
*xtensa
= target_to_xtensa(target
);
1822 target_addr_t addrstart_al
= ALIGN_DOWN(address
, 4);
1823 target_addr_t addrend_al
= ALIGN_UP(address
+ size
* count
, 4);
1824 target_addr_t adr
= addrstart_al
;
1827 bool fill_head_tail
= false;
1829 if (target
->state
!= TARGET_HALTED
) {
1830 LOG_TARGET_WARNING(target
, "target not halted");
1831 return ERROR_TARGET_NOT_HALTED
;
1834 if (!xtensa
->permissive_mode
) {
1835 if (!xtensa_memory_op_validate_range(xtensa
, address
, (size
* count
), XT_MEM_ACCESS_WRITE
)) {
1836 LOG_WARNING("address " TARGET_ADDR_FMT
" not writable", address
);
1841 if (size
== 0 || count
== 0 || !buffer
)
1842 return ERROR_COMMAND_SYNTAX_ERROR
;
1844 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1845 if (addrstart_al
== address
&& addrend_al
== address
+ (size
* count
)) {
1846 if (xtensa
->target
->endianness
== TARGET_BIG_ENDIAN
)
1847 /* Need a buffer for byte-swapping */
1848 albuff
= malloc(addrend_al
- addrstart_al
);
1850 /* We discard the const here because albuff can also be non-const */
1851 albuff
= (uint8_t *)buffer
;
1853 fill_head_tail
= true;
1854 albuff
= malloc(addrend_al
- addrstart_al
);
1857 LOG_TARGET_ERROR(target
, "Out of memory allocating %" PRId64
" bytes!",
1858 addrend_al
- addrstart_al
);
1859 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1862 /* We're going to use A3 here */
1863 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A3
);
1865 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1866 if (fill_head_tail
) {
1867 /* See if we need to read the first and/or last word. */
1869 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, addrstart_al
);
1870 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1871 if (xtensa
->probe_lsddr32p
== 1) {
1872 xtensa_queue_exec_ins(xtensa
, XT_INS_LDDR32P(xtensa
, XT_REG_A3
));
1874 xtensa_queue_exec_ins(xtensa
, XT_INS_L32I(xtensa
, XT_REG_A3
, XT_REG_A3
, 0));
1875 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1877 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, &albuff
[0]);
1879 if ((address
+ (size
* count
)) & 3) {
1880 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, addrend_al
- 4);
1881 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1882 if (xtensa
->probe_lsddr32p
== 1) {
1883 xtensa_queue_exec_ins(xtensa
, XT_INS_LDDR32P(xtensa
, XT_REG_A3
));
1885 xtensa_queue_exec_ins(xtensa
, XT_INS_L32I(xtensa
, XT_REG_A3
, XT_REG_A3
, 0));
1886 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1888 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
,
1889 &albuff
[addrend_al
- addrstart_al
- 4]);
1892 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1893 if (res
!= ERROR_OK
) {
1894 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res
);
1895 if (albuff
!= buffer
)
1899 xtensa_core_status_check(target
);
1900 if (xtensa
->target
->endianness
== TARGET_BIG_ENDIAN
) {
1901 bool swapped_w0
= false;
1903 buf_bswap32(&albuff
[0], &albuff
[0], 4);
1906 if ((address
+ (size
* count
)) & 3) {
1907 if ((addrend_al
- addrstart_al
- 4 == 0) && swapped_w0
) {
1908 /* Don't double-swap if buffer start/end are within the same word */
1910 buf_bswap32(&albuff
[addrend_al
- addrstart_al
- 4],
1911 &albuff
[addrend_al
- addrstart_al
- 4], 4);
1915 /* Copy data to be written into the aligned buffer (in host-endianness) */
1916 memcpy(&albuff
[address
& 3], buffer
, size
* count
);
1917 /* Now we can write albuff in aligned uint32s. */
1920 if (xtensa
->target
->endianness
== TARGET_BIG_ENDIAN
)
1921 buf_bswap32(albuff
, fill_head_tail
? albuff
: buffer
, addrend_al
- addrstart_al
);
1923 /* Write start address to A3 */
1924 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, addrstart_al
);
1925 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1926 /* Write the aligned buffer */
1927 if (xtensa
->probe_lsddr32p
!= 0) {
1928 for (unsigned int i
= 0; adr
!= addrend_al
; i
+= sizeof(uint32_t), adr
+= sizeof(uint32_t)) {
1930 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, buf_get_u32(&albuff
[i
], 0, 32));
1931 xtensa_queue_exec_ins(xtensa
, XT_INS_SDDR32P(xtensa
, XT_REG_A3
));
1933 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDREXEC
, buf_get_u32(&albuff
[i
], 0, 32));
1937 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A4
);
1938 for (unsigned int i
= 0; adr
!= addrend_al
; i
+= sizeof(uint32_t), adr
+= sizeof(uint32_t)) {
1939 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, buf_get_u32(&albuff
[i
], 0, 32));
1940 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A4
));
1941 xtensa_queue_exec_ins(xtensa
, XT_INS_S32I(xtensa
, XT_REG_A3
, XT_REG_A4
, 0));
1942 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, adr
+ sizeof(uint32_t));
1943 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1947 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1948 if (res
== ERROR_OK
) {
1949 bool prev_suppress
= xtensa
->suppress_dsr_errors
;
1950 xtensa
->suppress_dsr_errors
= true;
1951 res
= xtensa_core_status_check(target
);
1952 if (xtensa
->probe_lsddr32p
== -1)
1953 xtensa
->probe_lsddr32p
= 1;
1954 xtensa
->suppress_dsr_errors
= prev_suppress
;
1956 if (res
!= ERROR_OK
) {
1957 if (xtensa
->probe_lsddr32p
!= 0) {
1958 /* Disable fast memory access instructions and retry before reporting an error */
1959 LOG_TARGET_INFO(target
, "Disabling LDDR32.P/SDDR32.P");
1960 xtensa
->probe_lsddr32p
= 0;
1961 res
= xtensa_write_memory(target
, address
, size
, count
, buffer
);
1963 LOG_TARGET_WARNING(target
, "Failed writing %d bytes at address "TARGET_ADDR_FMT
,
1964 count
* size
, address
);
1967 /* Invalidate ICACHE, writeback DCACHE if present */
1968 uint32_t issue_ihi
= xtensa_is_icacheable(xtensa
, address
);
1969 uint32_t issue_dhwb
= xtensa_is_dcacheable(xtensa
, address
);
1970 if (issue_ihi
|| issue_dhwb
) {
1971 uint32_t ilinesize
= issue_ihi
? xtensa
->core_config
->icache
.line_size
: UINT32_MAX
;
1972 uint32_t dlinesize
= issue_dhwb
? xtensa
->core_config
->dcache
.line_size
: UINT32_MAX
;
1973 uint32_t linesize
= MIN(ilinesize
, dlinesize
);
1977 while ((adr
+ off
) < addrend_al
) {
1979 /* Write start address to A3 */
1980 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, adr
);
1981 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1984 xtensa_queue_exec_ins(xtensa
, XT_INS_IHI(xtensa
, XT_REG_A3
, off
));
1986 xtensa_queue_exec_ins(xtensa
, XT_INS_DHWBI(xtensa
, XT_REG_A3
, off
));
1989 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
1995 /* Execute cache WB/INV instructions */
1996 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1997 xtensa_core_status_check(target
);
1998 if (res
!= ERROR_OK
)
1999 LOG_TARGET_ERROR(target
,
2000 "Error issuing cache writeback/invaldate instruction(s): %d",
2004 if (albuff
!= buffer
)
2010 int xtensa_write_buffer(struct target
*target
, target_addr_t address
, uint32_t count
, const uint8_t *buffer
)
2012 /* xtensa_write_memory can handle everything. Just pass on to that. */
2013 return xtensa_write_memory(target
, address
, 1, count
, buffer
);
2016 int xtensa_checksum_memory(struct target
*target
, target_addr_t address
, uint32_t count
, uint32_t *checksum
)
2018 LOG_WARNING("not implemented yet");
2022 int xtensa_poll(struct target
*target
)
2024 struct xtensa
*xtensa
= target_to_xtensa(target
);
2025 if (xtensa_dm_poll(&xtensa
->dbg_mod
) != ERROR_OK
) {
2026 target
->state
= TARGET_UNKNOWN
;
2027 return ERROR_TARGET_NOT_EXAMINED
;
2030 int res
= xtensa_dm_power_status_read(&xtensa
->dbg_mod
, PWRSTAT_DEBUGWASRESET(xtensa
) |
2031 PWRSTAT_COREWASRESET(xtensa
));
2032 if (xtensa
->dbg_mod
.power_status
.stat
!= xtensa
->dbg_mod
.power_status
.stath
)
2033 LOG_TARGET_DEBUG(target
, "PWRSTAT: read 0x%08" PRIx32
", clear 0x%08lx, reread 0x%08" PRIx32
,
2034 xtensa
->dbg_mod
.power_status
.stat
,
2035 PWRSTAT_DEBUGWASRESET(xtensa
) | PWRSTAT_COREWASRESET(xtensa
),
2036 xtensa
->dbg_mod
.power_status
.stath
);
2037 if (res
!= ERROR_OK
)
2040 if (xtensa_dm_tap_was_reset(&xtensa
->dbg_mod
)) {
2041 LOG_TARGET_INFO(target
, "Debug controller was reset.");
2042 res
= xtensa_smpbreak_write(xtensa
, xtensa
->smp_break
);
2043 if (res
!= ERROR_OK
)
2046 if (xtensa_dm_core_was_reset(&xtensa
->dbg_mod
))
2047 LOG_TARGET_INFO(target
, "Core was reset.");
2048 xtensa_dm_power_status_cache(&xtensa
->dbg_mod
);
2049 /* Enable JTAG, set reset if needed */
2050 res
= xtensa_wakeup(target
);
2051 if (res
!= ERROR_OK
)
2054 uint32_t prev_dsr
= xtensa
->dbg_mod
.core_status
.dsr
;
2055 res
= xtensa_dm_core_status_read(&xtensa
->dbg_mod
);
2056 if (res
!= ERROR_OK
)
2058 if (prev_dsr
!= xtensa
->dbg_mod
.core_status
.dsr
)
2059 LOG_TARGET_DEBUG(target
,
2060 "DSR has changed: was 0x%08" PRIx32
" now 0x%08" PRIx32
,
2062 xtensa
->dbg_mod
.core_status
.dsr
);
2063 if (xtensa
->dbg_mod
.power_status
.stath
& PWRSTAT_COREWASRESET(xtensa
)) {
2064 /* if RESET state is persitent */
2065 target
->state
= TARGET_RESET
;
2066 } else if (!xtensa_dm_is_powered(&xtensa
->dbg_mod
)) {
2067 LOG_TARGET_DEBUG(target
, "not powered 0x%" PRIX32
"%ld",
2068 xtensa
->dbg_mod
.core_status
.dsr
,
2069 xtensa
->dbg_mod
.core_status
.dsr
& OCDDSR_STOPPED
);
2070 target
->state
= TARGET_UNKNOWN
;
2071 if (xtensa
->come_online_probes_num
== 0)
2072 target
->examined
= false;
2074 xtensa
->come_online_probes_num
--;
2075 } else if (xtensa_is_stopped(target
)) {
2076 if (target
->state
!= TARGET_HALTED
) {
2077 enum target_state oldstate
= target
->state
;
2078 target
->state
= TARGET_HALTED
;
2079 /* Examine why the target has been halted */
2080 target
->debug_reason
= DBG_REASON_DBGRQ
;
2081 xtensa_fetch_all_regs(target
);
2082 /* When setting debug reason DEBUGCAUSE events have the following
2083 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2084 /* Watchpoint and breakpoint events at the same time results in special
2085 * debug reason: DBG_REASON_WPTANDBKPT. */
2086 uint32_t halt_cause
= xtensa_cause_get(target
);
2087 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2088 if (halt_cause
& DEBUGCAUSE_IC
)
2089 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
2090 if (halt_cause
& (DEBUGCAUSE_IB
| DEBUGCAUSE_BN
| DEBUGCAUSE_BI
)) {
2091 if (halt_cause
& DEBUGCAUSE_DB
)
2092 target
->debug_reason
= DBG_REASON_WPTANDBKPT
;
2094 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
2095 } else if (halt_cause
& DEBUGCAUSE_DB
) {
2096 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
2098 LOG_TARGET_DEBUG(target
, "Target halted, pc=0x%08" PRIx32
2099 ", debug_reason=%08" PRIx32
", oldstate=%08" PRIx32
,
2100 xtensa_reg_get(target
, XT_REG_IDX_PC
),
2101 target
->debug_reason
,
2103 LOG_TARGET_DEBUG(target
, "Halt reason=0x%08" PRIX32
", exc_cause=%" PRId32
", dsr=0x%08" PRIx32
,
2105 xtensa_reg_get(target
, XT_REG_IDX_EXCCAUSE
),
2106 xtensa
->dbg_mod
.core_status
.dsr
);
2107 xtensa_dm_core_status_clear(
2109 OCDDSR_DEBUGPENDBREAK
| OCDDSR_DEBUGINTBREAK
| OCDDSR_DEBUGPENDTRAX
|
2110 OCDDSR_DEBUGINTTRAX
|
2111 OCDDSR_DEBUGPENDHOST
| OCDDSR_DEBUGINTHOST
);
2114 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2115 if (target
->state
!= TARGET_RUNNING
&& target
->state
!= TARGET_DEBUG_RUNNING
) {
2116 target
->state
= TARGET_RUNNING
;
2117 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2120 if (xtensa
->trace_active
) {
2121 /* Detect if tracing was active but has stopped. */
2122 struct xtensa_trace_status trace_status
;
2123 res
= xtensa_dm_trace_status_read(&xtensa
->dbg_mod
, &trace_status
);
2124 if (res
== ERROR_OK
) {
2125 if (!(trace_status
.stat
& TRAXSTAT_TRACT
)) {
2126 LOG_INFO("Detected end of trace.");
2127 if (trace_status
.stat
& TRAXSTAT_PCMTG
)
2128 LOG_TARGET_INFO(target
, "Trace stop triggered by PC match");
2129 if (trace_status
.stat
& TRAXSTAT_PTITG
)
2130 LOG_TARGET_INFO(target
, "Trace stop triggered by Processor Trigger Input");
2131 if (trace_status
.stat
& TRAXSTAT_CTITG
)
2132 LOG_TARGET_INFO(target
, "Trace stop triggered by Cross-trigger Input");
2133 xtensa
->trace_active
= false;
2140 static int xtensa_update_instruction(struct target
*target
, target_addr_t address
, uint32_t size
, const uint8_t *buffer
)
2142 struct xtensa
*xtensa
= target_to_xtensa(target
);
2143 unsigned int issue_ihi
= xtensa_is_icacheable(xtensa
, address
);
2144 unsigned int issue_dhwbi
= xtensa_is_dcacheable(xtensa
, address
);
2145 uint32_t icache_line_size
= issue_ihi
? xtensa
->core_config
->icache
.line_size
: UINT32_MAX
;
2146 uint32_t dcache_line_size
= issue_dhwbi
? xtensa
->core_config
->dcache
.line_size
: UINT32_MAX
;
2147 unsigned int same_ic_line
= ((address
& (icache_line_size
- 1)) + size
) <= icache_line_size
;
2148 unsigned int same_dc_line
= ((address
& (dcache_line_size
- 1)) + size
) <= dcache_line_size
;
2151 if (size
> icache_line_size
)
2154 if (issue_ihi
|| issue_dhwbi
) {
2155 /* We're going to use A3 here */
2156 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A3
);
2158 /* Write start address to A3 and invalidate */
2159 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, address
);
2160 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2161 LOG_TARGET_DEBUG(target
, "DHWBI, IHI for address "TARGET_ADDR_FMT
, address
);
2163 xtensa_queue_exec_ins(xtensa
, XT_INS_DHWBI(xtensa
, XT_REG_A3
, 0));
2164 if (!same_dc_line
) {
2165 LOG_TARGET_DEBUG(target
,
2166 "DHWBI second dcache line for address "TARGET_ADDR_FMT
,
2168 xtensa_queue_exec_ins(xtensa
, XT_INS_DHWBI(xtensa
, XT_REG_A3
, 4));
2172 xtensa_queue_exec_ins(xtensa
, XT_INS_IHI(xtensa
, XT_REG_A3
, 0));
2173 if (!same_ic_line
) {
2174 LOG_TARGET_DEBUG(target
,
2175 "IHI second icache line for address "TARGET_ADDR_FMT
,
2177 xtensa_queue_exec_ins(xtensa
, XT_INS_IHI(xtensa
, XT_REG_A3
, 4));
2181 /* Execute invalidate instructions */
2182 ret
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
2183 xtensa_core_status_check(target
);
2184 if (ret
!= ERROR_OK
) {
2185 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret
);
2190 /* Write new instructions to memory */
2191 ret
= target_write_buffer(target
, address
, size
, buffer
);
2192 if (ret
!= ERROR_OK
) {
2193 LOG_TARGET_ERROR(target
, "Error writing instruction to memory: %d", ret
);
2198 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2199 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, address
);
2200 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2201 xtensa_queue_exec_ins(xtensa
, XT_INS_DHWB(xtensa
, XT_REG_A3
, 0));
2202 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT
, address
);
2203 if (!same_dc_line
) {
2204 LOG_TARGET_DEBUG(target
, "DHWB second dcache line for address "TARGET_ADDR_FMT
, address
+ 4);
2205 xtensa_queue_exec_ins(xtensa
, XT_INS_DHWB(xtensa
, XT_REG_A3
, 4));
2208 /* Execute invalidate instructions */
2209 ret
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
2210 xtensa_core_status_check(target
);
2213 /* TODO: Handle L2 cache if present */
2217 static int xtensa_sw_breakpoint_add(struct target
*target
,
2218 struct breakpoint
*breakpoint
,
2219 struct xtensa_sw_breakpoint
*sw_bp
)
2221 struct xtensa
*xtensa
= target_to_xtensa(target
);
2222 int ret
= target_read_buffer(target
, breakpoint
->address
, XT_ISNS_SZ_MAX
, sw_bp
->insn
);
2223 if (ret
!= ERROR_OK
) {
2224 LOG_TARGET_ERROR(target
, "Failed to read original instruction (%d)!", ret
);
2228 sw_bp
->insn_sz
= MIN(XT_ISNS_SZ_MAX
, breakpoint
->length
);
2229 sw_bp
->oocd_bp
= breakpoint
;
2231 uint32_t break_insn
= sw_bp
->insn_sz
== XT_ISNS_SZ_MAX
? XT_INS_BREAK(xtensa
, 0, 0) : XT_INS_BREAKN(xtensa
, 0);
2233 /* Underlying memory write will convert instruction endianness, don't do that here */
2234 ret
= xtensa_update_instruction(target
, breakpoint
->address
, sw_bp
->insn_sz
, (uint8_t *)&break_insn
);
2235 if (ret
!= ERROR_OK
) {
2236 LOG_TARGET_ERROR(target
, "Failed to write breakpoint instruction (%d)!", ret
);
2243 static int xtensa_sw_breakpoint_remove(struct target
*target
, struct xtensa_sw_breakpoint
*sw_bp
)
2245 int ret
= xtensa_update_instruction(target
, sw_bp
->oocd_bp
->address
, sw_bp
->insn_sz
, sw_bp
->insn
);
2246 if (ret
!= ERROR_OK
) {
2247 LOG_TARGET_ERROR(target
, "Failed to write insn (%d)!", ret
);
2250 sw_bp
->oocd_bp
= NULL
;
2254 int xtensa_breakpoint_add(struct target
*target
, struct breakpoint
*breakpoint
)
2256 struct xtensa
*xtensa
= target_to_xtensa(target
);
2259 if (breakpoint
->type
== BKPT_SOFT
) {
2260 for (slot
= 0; slot
< XT_SW_BREAKPOINTS_MAX_NUM
; slot
++) {
2261 if (!xtensa
->sw_brps
[slot
].oocd_bp
||
2262 xtensa
->sw_brps
[slot
].oocd_bp
== breakpoint
)
2265 if (slot
== XT_SW_BREAKPOINTS_MAX_NUM
) {
2266 LOG_TARGET_WARNING(target
, "No free slots to add SW breakpoint!");
2267 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2269 int ret
= xtensa_sw_breakpoint_add(target
, breakpoint
, &xtensa
->sw_brps
[slot
]);
2270 if (ret
!= ERROR_OK
) {
2271 LOG_TARGET_ERROR(target
, "Failed to add SW breakpoint!");
2274 LOG_TARGET_DEBUG(target
, "placed SW breakpoint %u @ " TARGET_ADDR_FMT
,
2276 breakpoint
->address
);
2280 for (slot
= 0; slot
< xtensa
->core_config
->debug
.ibreaks_num
; slot
++) {
2281 if (!xtensa
->hw_brps
[slot
] || xtensa
->hw_brps
[slot
] == breakpoint
)
2284 if (slot
== xtensa
->core_config
->debug
.ibreaks_num
) {
2285 LOG_TARGET_ERROR(target
, "No free slots to add HW breakpoint!");
2286 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2289 xtensa
->hw_brps
[slot
] = breakpoint
;
2290 /* We will actually write the breakpoints when we resume the target. */
2291 LOG_TARGET_DEBUG(target
, "placed HW breakpoint %u @ " TARGET_ADDR_FMT
,
2293 breakpoint
->address
);
2298 int xtensa_breakpoint_remove(struct target
*target
, struct breakpoint
*breakpoint
)
2300 struct xtensa
*xtensa
= target_to_xtensa(target
);
2303 if (breakpoint
->type
== BKPT_SOFT
) {
2304 for (slot
= 0; slot
< XT_SW_BREAKPOINTS_MAX_NUM
; slot
++) {
2305 if (xtensa
->sw_brps
[slot
].oocd_bp
&& xtensa
->sw_brps
[slot
].oocd_bp
== breakpoint
)
2308 if (slot
== XT_SW_BREAKPOINTS_MAX_NUM
) {
2309 LOG_TARGET_WARNING(target
, "Max SW breakpoints slot reached, slot=%u!", slot
);
2310 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2312 int ret
= xtensa_sw_breakpoint_remove(target
, &xtensa
->sw_brps
[slot
]);
2313 if (ret
!= ERROR_OK
) {
2314 LOG_TARGET_ERROR(target
, "Failed to remove SW breakpoint (%d)!", ret
);
2317 LOG_TARGET_DEBUG(target
, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT
, slot
, breakpoint
->address
);
2321 for (slot
= 0; slot
< xtensa
->core_config
->debug
.ibreaks_num
; slot
++) {
2322 if (xtensa
->hw_brps
[slot
] == breakpoint
)
2325 if (slot
== xtensa
->core_config
->debug
.ibreaks_num
) {
2326 LOG_TARGET_ERROR(target
, "HW breakpoint not found!");
2327 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2329 xtensa
->hw_brps
[slot
] = NULL
;
2330 LOG_TARGET_DEBUG(target
, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT
, slot
, breakpoint
->address
);
2334 int xtensa_watchpoint_add(struct target
*target
, struct watchpoint
*watchpoint
)
2336 struct xtensa
*xtensa
= target_to_xtensa(target
);
2338 xtensa_reg_val_t dbreakcval
;
2340 if (target
->state
!= TARGET_HALTED
) {
2341 LOG_TARGET_WARNING(target
, "target not halted");
2342 return ERROR_TARGET_NOT_HALTED
;
2345 if (watchpoint
->mask
!= ~(uint32_t)0) {
2346 LOG_TARGET_ERROR(target
, "watchpoint value masks not supported");
2347 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2350 for (slot
= 0; slot
< xtensa
->core_config
->debug
.dbreaks_num
; slot
++) {
2351 if (!xtensa
->hw_wps
[slot
] || xtensa
->hw_wps
[slot
] == watchpoint
)
2354 if (slot
== xtensa
->core_config
->debug
.dbreaks_num
) {
2355 LOG_TARGET_WARNING(target
, "No free slots to add HW watchpoint!");
2356 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2359 /* Figure out value for dbreakc5..0
2360 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2361 if (watchpoint
->length
< 1 || watchpoint
->length
> 64 ||
2362 !IS_PWR_OF_2(watchpoint
->length
) ||
2363 !IS_ALIGNED(watchpoint
->address
, watchpoint
->length
)) {
2366 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2367 " not supported by hardware.",
2369 watchpoint
->address
);
2370 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2372 dbreakcval
= ALIGN_DOWN(0x3F, watchpoint
->length
);
2374 if (watchpoint
->rw
== WPT_READ
)
2375 dbreakcval
|= BIT(30);
2376 if (watchpoint
->rw
== WPT_WRITE
)
2377 dbreakcval
|= BIT(31);
2378 if (watchpoint
->rw
== WPT_ACCESS
)
2379 dbreakcval
|= BIT(30) | BIT(31);
2381 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2382 xtensa_reg_set(target
, XT_REG_IDX_DBREAKA0
+ slot
, watchpoint
->address
);
2383 xtensa_reg_set(target
, XT_REG_IDX_DBREAKC0
+ slot
, dbreakcval
);
2384 xtensa
->hw_wps
[slot
] = watchpoint
;
2385 LOG_TARGET_DEBUG(target
, "placed HW watchpoint @ " TARGET_ADDR_FMT
,
2386 watchpoint
->address
);
2390 int xtensa_watchpoint_remove(struct target
*target
, struct watchpoint
*watchpoint
)
2392 struct xtensa
*xtensa
= target_to_xtensa(target
);
2395 for (slot
= 0; slot
< xtensa
->core_config
->debug
.dbreaks_num
; slot
++) {
2396 if (xtensa
->hw_wps
[slot
] == watchpoint
)
2399 if (slot
== xtensa
->core_config
->debug
.dbreaks_num
) {
2400 LOG_TARGET_WARNING(target
, "HW watchpoint " TARGET_ADDR_FMT
" not found!", watchpoint
->address
);
2401 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2403 xtensa_reg_set(target
, XT_REG_IDX_DBREAKC0
+ slot
, 0);
2404 xtensa
->hw_wps
[slot
] = NULL
;
2405 LOG_TARGET_DEBUG(target
, "cleared HW watchpoint @ " TARGET_ADDR_FMT
,
2406 watchpoint
->address
);
2410 static int xtensa_build_reg_cache(struct target
*target
)
2412 struct xtensa
*xtensa
= target_to_xtensa(target
);
2413 struct reg_cache
**cache_p
= register_get_last_cache_p(&target
->reg_cache
);
2414 unsigned int last_dbreg_num
= 0;
2416 if (xtensa
->core_regs_num
+ xtensa
->num_optregs
!= xtensa
->total_regs_num
)
2417 LOG_TARGET_WARNING(target
, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2418 xtensa
->core_regs_num
, xtensa
->num_optregs
, xtensa
->total_regs_num
);
2420 struct reg_cache
*reg_cache
= calloc(1, sizeof(struct reg_cache
));
2423 LOG_ERROR("Failed to alloc reg cache!");
2426 reg_cache
->name
= "Xtensa registers";
2427 reg_cache
->next
= NULL
;
2429 unsigned int reg_list_size
= XT_NUM_REGS
+ xtensa
->num_optregs
;
2430 struct reg
*reg_list
= calloc(reg_list_size
, sizeof(struct reg
));
2432 LOG_ERROR("Failed to alloc reg list!");
2435 xtensa
->dbregs_num
= 0;
2436 unsigned int didx
= 0;
2437 for (unsigned int whichlist
= 0; whichlist
< 2; whichlist
++) {
2438 struct xtensa_reg_desc
*rlist
= (whichlist
== 0) ? xtensa_regs
: xtensa
->optregs
;
2439 unsigned int listsize
= (whichlist
== 0) ? XT_NUM_REGS
: xtensa
->num_optregs
;
2440 for (unsigned int i
= 0; i
< listsize
; i
++, didx
++) {
2441 reg_list
[didx
].exist
= rlist
[i
].exist
;
2442 reg_list
[didx
].name
= rlist
[i
].name
;
2443 reg_list
[didx
].size
= 32;
2444 reg_list
[didx
].value
= calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2445 if (!reg_list
[didx
].value
) {
2446 LOG_ERROR("Failed to alloc reg list value!");
2449 reg_list
[didx
].dirty
= false;
2450 reg_list
[didx
].valid
= false;
2451 reg_list
[didx
].type
= &xtensa_reg_type
;
2452 reg_list
[didx
].arch_info
= xtensa
;
2453 if (rlist
[i
].exist
&& (rlist
[i
].dbreg_num
> last_dbreg_num
))
2454 last_dbreg_num
= rlist
[i
].dbreg_num
;
2456 if (xtensa_extra_debug_log
) {
2457 LOG_TARGET_DEBUG(target
,
2458 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2459 reg_list
[didx
].name
,
2461 reg_list
[didx
].exist
,
2464 rlist
[i
].dbreg_num
);
2469 xtensa
->dbregs_num
= last_dbreg_num
+ 1;
2470 reg_cache
->reg_list
= reg_list
;
2471 reg_cache
->num_regs
= reg_list_size
;
2473 LOG_TARGET_DEBUG(target
, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2474 xtensa
->total_regs_num
, reg_list_size
, xtensa
->dbregs_num
);
2476 /* Construct empty-register list for handling unknown register requests */
2477 xtensa
->empty_regs
= calloc(xtensa
->dbregs_num
, sizeof(struct reg
));
2478 if (!xtensa
->empty_regs
) {
2479 LOG_TARGET_ERROR(target
, "ERROR: Out of memory");
2482 for (unsigned int i
= 0; i
< xtensa
->dbregs_num
; i
++) {
2483 xtensa
->empty_regs
[i
].name
= calloc(8, sizeof(char));
2484 if (!xtensa
->empty_regs
[i
].name
) {
2485 LOG_TARGET_ERROR(target
, "ERROR: Out of memory");
2488 sprintf((char *)xtensa
->empty_regs
[i
].name
, "?0x%04x", i
& 0x0000FFFF);
2489 xtensa
->empty_regs
[i
].size
= 32;
2490 xtensa
->empty_regs
[i
].type
= &xtensa_reg_type
;
2491 xtensa
->empty_regs
[i
].value
= calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2492 if (!xtensa
->empty_regs
[i
].value
) {
2493 LOG_ERROR("Failed to alloc empty reg list value!");
2496 xtensa
->empty_regs
[i
].arch_info
= xtensa
;
2499 /* Construct contiguous register list from contiguous descriptor list */
2500 if (xtensa
->regmap_contiguous
&& xtensa
->contiguous_regs_desc
) {
2501 xtensa
->contiguous_regs_list
= calloc(xtensa
->total_regs_num
, sizeof(struct reg
*));
2502 if (!xtensa
->contiguous_regs_list
) {
2503 LOG_TARGET_ERROR(target
, "ERROR: Out of memory");
2506 for (unsigned int i
= 0; i
< xtensa
->total_regs_num
; i
++) {
2508 for (j
= 0; j
< reg_cache
->num_regs
; j
++) {
2509 if (!strcmp(reg_cache
->reg_list
[j
].name
, xtensa
->contiguous_regs_desc
[i
]->name
)) {
2510 /* Register number field is not filled above.
2511 Here we are assigning the corresponding index from the contiguous reg list.
2512 These indexes are in the same order with gdb g-packet request/response.
2513 Some more changes may be required for sparse reg lists.
2515 reg_cache
->reg_list
[j
].number
= i
;
2516 xtensa
->contiguous_regs_list
[i
] = &(reg_cache
->reg_list
[j
]);
2517 LOG_TARGET_DEBUG(target
,
2518 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2519 xtensa
->contiguous_regs_list
[i
]->name
,
2520 xtensa
->contiguous_regs_desc
[i
]->dbreg_num
);
2524 if (j
== reg_cache
->num_regs
)
2525 LOG_TARGET_WARNING(target
, "contiguous register %s not found",
2526 xtensa
->contiguous_regs_desc
[i
]->name
);
2530 xtensa
->algo_context_backup
= calloc(reg_cache
->num_regs
, sizeof(void *));
2531 if (!xtensa
->algo_context_backup
) {
2532 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2535 for (unsigned int i
= 0; i
< reg_cache
->num_regs
; i
++) {
2536 struct reg
*reg
= ®_cache
->reg_list
[i
];
2537 xtensa
->algo_context_backup
[i
] = calloc(1, reg
->size
/ 8);
2538 if (!xtensa
->algo_context_backup
[i
]) {
2539 LOG_ERROR("Failed to alloc mem for algorithm context!");
2543 xtensa
->core_cache
= reg_cache
;
2545 *cache_p
= reg_cache
;
2550 for (unsigned int i
= 0; i
< reg_list_size
; i
++)
2551 free(reg_list
[i
].value
);
2554 if (xtensa
->empty_regs
) {
2555 for (unsigned int i
= 0; i
< xtensa
->dbregs_num
; i
++) {
2556 free((void *)xtensa
->empty_regs
[i
].name
);
2557 free(xtensa
->empty_regs
[i
].value
);
2559 free(xtensa
->empty_regs
);
2561 if (xtensa
->algo_context_backup
) {
2562 for (unsigned int i
= 0; i
< reg_cache
->num_regs
; i
++)
2563 free(xtensa
->algo_context_backup
[i
]);
2564 free(xtensa
->algo_context_backup
);
2571 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target
*target
, char *opstr
)
2573 struct xtensa
*xtensa
= target_to_xtensa(target
);
2574 int32_t status
= ERROR_COMMAND_ARGUMENT_INVALID
;
2575 /* Process op[] list */
2576 while (opstr
&& (*opstr
== ':')) {
2578 unsigned int oplen
= strtoul(opstr
+ 1, &opstr
, 16);
2580 LOG_TARGET_ERROR(target
, "TIE access instruction too long (%d)\n", oplen
);
2584 while ((i
< oplen
) && opstr
&& (*opstr
== ':'))
2585 ops
[i
++] = strtoul(opstr
+ 1, &opstr
, 16);
2587 LOG_TARGET_ERROR(target
, "TIE access instruction malformed (%d)\n", i
);
2592 sprintf(insn_buf
, "Exec %d-byte TIE sequence: ", oplen
);
2593 for (i
= 0; i
< oplen
; i
++)
2594 sprintf(insn_buf
+ strlen(insn_buf
), "%02x:", ops
[i
]);
2595 LOG_TARGET_DEBUG(target
, "%s", insn_buf
);
2596 xtensa_queue_exec_ins_wide(xtensa
, ops
, oplen
); /* Handles endian-swap */
2602 static int xtensa_gdbqc_qxtreg(struct target
*target
, const char *packet
, char **response_p
)
2604 struct xtensa
*xtensa
= target_to_xtensa(target
);
2605 bool iswrite
= (packet
[0] == 'Q');
2606 enum xtensa_qerr_e error
;
2608 /* Read/write TIE register. Requires spill location.
2609 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2610 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2612 if (!(xtensa
->spill_buf
)) {
2613 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2614 error
= XT_QERR_FAIL
;
2615 goto xtensa_gdbqc_qxtreg_fail
;
2619 uint32_t regnum
= strtoul(packet
+ 6, &delim
, 16);
2620 if (*delim
!= ':') {
2621 LOG_ERROR("Malformed qxtreg packet");
2622 error
= XT_QERR_INVAL
;
2623 goto xtensa_gdbqc_qxtreg_fail
;
2625 uint32_t reglen
= strtoul(delim
+ 1, &delim
, 16);
2626 if (*delim
!= ':') {
2627 LOG_ERROR("Malformed qxtreg packet");
2628 error
= XT_QERR_INVAL
;
2629 goto xtensa_gdbqc_qxtreg_fail
;
2631 uint8_t regbuf
[XT_QUERYPKT_RESP_MAX
];
2632 memset(regbuf
, 0, XT_QUERYPKT_RESP_MAX
);
2633 LOG_DEBUG("TIE reg 0x%08" PRIx32
" %s (%d bytes)", regnum
, iswrite
? "write" : "read", reglen
);
2634 if (reglen
* 2 + 1 > XT_QUERYPKT_RESP_MAX
) {
2635 LOG_ERROR("TIE register too large");
2636 error
= XT_QERR_MEM
;
2637 goto xtensa_gdbqc_qxtreg_fail
;
2640 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2641 * (2) read old a4, (3) write spill address to a4.
2642 * NOTE: ensure a4 is restored properly by all error handling logic
2644 unsigned int memop_size
= (xtensa
->spill_loc
& 3) ? 1 : 4;
2645 int status
= xtensa_read_memory(target
, xtensa
->spill_loc
, memop_size
,
2646 xtensa
->spill_bytes
/ memop_size
, xtensa
->spill_buf
);
2647 if (status
!= ERROR_OK
) {
2648 LOG_ERROR("Spill memory save");
2649 error
= XT_QERR_MEM
;
2650 goto xtensa_gdbqc_qxtreg_fail
;
2653 /* Extract value and store in spill memory */
2655 char *valbuf
= strchr(delim
, '=');
2656 if (!(valbuf
&& (*valbuf
== '='))) {
2657 LOG_ERROR("Malformed Qxtreg packet");
2658 error
= XT_QERR_INVAL
;
2659 goto xtensa_gdbqc_qxtreg_fail
;
2662 while (*valbuf
&& *(valbuf
+ 1)) {
2663 char bytestr
[3] = { 0, 0, 0 };
2664 strncpy(bytestr
, valbuf
, 2);
2665 regbuf
[b
++] = strtoul(bytestr
, NULL
, 16);
2669 LOG_ERROR("Malformed Qxtreg packet");
2670 error
= XT_QERR_INVAL
;
2671 goto xtensa_gdbqc_qxtreg_fail
;
2673 status
= xtensa_write_memory(target
, xtensa
->spill_loc
, memop_size
,
2674 reglen
/ memop_size
, regbuf
);
2675 if (status
!= ERROR_OK
) {
2676 LOG_ERROR("TIE value store");
2677 error
= XT_QERR_MEM
;
2678 goto xtensa_gdbqc_qxtreg_fail
;
2681 xtensa_reg_val_t orig_a4
= xtensa_reg_get(target
, XT_REG_IDX_A4
);
2682 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, xtensa
->spill_loc
);
2683 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A4
));
2685 int32_t tieop_status
= xtensa_gdbqc_parse_exec_tie_ops(target
, delim
);
2687 /* Restore a4 but not yet spill memory. Execute it all... */
2688 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, orig_a4
);
2689 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A4
));
2690 status
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
2691 if (status
!= ERROR_OK
) {
2692 LOG_TARGET_ERROR(target
, "TIE queue execute: %d\n", status
);
2693 tieop_status
= status
;
2695 status
= xtensa_core_status_check(target
);
2696 if (status
!= ERROR_OK
) {
2697 LOG_TARGET_ERROR(target
, "TIE instr execute: %d\n", status
);
2698 tieop_status
= status
;
2701 if (tieop_status
== ERROR_OK
) {
2703 /* TIE write succeeded; send OK */
2704 strcpy(*response_p
, "OK");
2706 /* TIE read succeeded; copy result from spill memory */
2707 status
= xtensa_read_memory(target
, xtensa
->spill_loc
, memop_size
, reglen
, regbuf
);
2708 if (status
!= ERROR_OK
) {
2709 LOG_TARGET_ERROR(target
, "TIE result read");
2710 tieop_status
= status
;
2713 for (i
= 0; i
< reglen
; i
++)
2714 sprintf(*response_p
+ 2 * i
, "%02x", regbuf
[i
]);
2715 *(*response_p
+ 2 * i
) = '\0';
2716 LOG_TARGET_DEBUG(target
, "TIE response: %s", *response_p
);
2720 /* Restore spill memory first, then report any previous errors */
2721 status
= xtensa_write_memory(target
, xtensa
->spill_loc
, memop_size
,
2722 xtensa
->spill_bytes
/ memop_size
, xtensa
->spill_buf
);
2723 if (status
!= ERROR_OK
) {
2724 LOG_ERROR("Spill memory restore");
2725 error
= XT_QERR_MEM
;
2726 goto xtensa_gdbqc_qxtreg_fail
;
2728 if (tieop_status
!= ERROR_OK
) {
2729 LOG_ERROR("TIE execution");
2730 error
= XT_QERR_FAIL
;
2731 goto xtensa_gdbqc_qxtreg_fail
;
2735 xtensa_gdbqc_qxtreg_fail
:
2736 strcpy(*response_p
, xt_qerr
[error
].chrval
);
2737 return xt_qerr
[error
].intval
;
2740 int xtensa_gdb_query_custom(struct target
*target
, const char *packet
, char **response_p
)
2742 struct xtensa
*xtensa
= target_to_xtensa(target
);
2743 enum xtensa_qerr_e error
;
2744 if (!packet
|| !response_p
) {
2745 LOG_TARGET_ERROR(target
, "invalid parameter: packet %p response_p %p", packet
, response_p
);
2749 *response_p
= xtensa
->qpkt_resp
;
2750 if (strncmp(packet
, "qxtn", 4) == 0) {
2751 strcpy(*response_p
, "OpenOCD");
2753 } else if (strncasecmp(packet
, "qxtgdbversion=", 14) == 0) {
2755 } else if ((strncmp(packet
, "Qxtsis=", 7) == 0) || (strncmp(packet
, "Qxtsds=", 7) == 0)) {
2756 /* Confirm host cache params match core .cfg file */
2757 struct xtensa_cache_config
*cachep
= (packet
[4] == 'i') ?
2758 &xtensa
->core_config
->icache
: &xtensa
->core_config
->dcache
;
2759 unsigned int line_size
= 0, size
= 0, way_count
= 0;
2760 sscanf(&packet
[7], "%x,%x,%x", &line_size
, &size
, &way_count
);
2761 if ((cachep
->line_size
!= line_size
) ||
2762 (cachep
->size
!= size
) ||
2763 (cachep
->way_count
!= way_count
)) {
2764 LOG_TARGET_WARNING(target
, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2765 cachep
== &xtensa
->core_config
->icache
? 'I' : 'D');
2767 strcpy(*response_p
, "OK");
2769 } else if ((strncmp(packet
, "Qxtiram=", 8) == 0) || (strncmp(packet
, "Qxtirom=", 8) == 0)) {
2770 /* Confirm host IRAM/IROM params match core .cfg file */
2771 struct xtensa_local_mem_config
*memp
= (packet
[5] == 'a') ?
2772 &xtensa
->core_config
->iram
: &xtensa
->core_config
->irom
;
2773 unsigned int base
= 0, size
= 0, i
;
2774 char *pkt
= (char *)&packet
[7];
2777 size
= strtoul(pkt
, &pkt
, 16);
2779 base
= strtoul(pkt
, &pkt
, 16);
2780 LOG_TARGET_DEBUG(target
, "memcheck: %dB @ 0x%08x", size
, base
);
2781 for (i
= 0; i
< memp
->count
; i
++) {
2782 if ((memp
->regions
[i
].base
== base
) && (memp
->regions
[i
].size
== size
))
2785 if (i
== memp
->count
) {
2786 LOG_TARGET_WARNING(target
, "%s mismatch; check xtensa-core-XXX.cfg file",
2787 memp
== &xtensa
->core_config
->iram
? "IRAM" : "IROM");
2790 for (i
= 0; i
< 11; i
++) {
2792 strtoul(pkt
, &pkt
, 16);
2794 } while (pkt
&& (pkt
[0] == ','));
2795 strcpy(*response_p
, "OK");
2797 } else if (strncmp(packet
, "Qxtexcmlvl=", 11) == 0) {
2798 /* Confirm host EXCM_LEVEL matches core .cfg file */
2799 unsigned int excm_level
= strtoul(&packet
[11], NULL
, 0);
2800 if (!xtensa
->core_config
->high_irq
.enabled
||
2801 (excm_level
!= xtensa
->core_config
->high_irq
.excm_level
))
2802 LOG_TARGET_WARNING(target
, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
2803 strcpy(*response_p
, "OK");
2805 } else if ((strncmp(packet
, "Qxtl2cs=", 8) == 0) ||
2806 (strncmp(packet
, "Qxtl2ca=", 8) == 0) ||
2807 (strncmp(packet
, "Qxtdensity=", 11) == 0)) {
2808 strcpy(*response_p
, "OK");
2810 } else if (strncmp(packet
, "Qxtspill=", 9) == 0) {
2812 uint32_t spill_loc
= strtoul(packet
+ 9, &delim
, 16);
2813 if (*delim
!= ':') {
2814 LOG_ERROR("Malformed Qxtspill packet");
2815 error
= XT_QERR_INVAL
;
2816 goto xtensa_gdb_query_custom_fail
;
2818 xtensa
->spill_loc
= spill_loc
;
2819 xtensa
->spill_bytes
= strtoul(delim
+ 1, NULL
, 16);
2820 if (xtensa
->spill_buf
)
2821 free(xtensa
->spill_buf
);
2822 xtensa
->spill_buf
= calloc(1, xtensa
->spill_bytes
);
2823 if (!xtensa
->spill_buf
) {
2824 LOG_ERROR("Spill buf alloc");
2825 error
= XT_QERR_MEM
;
2826 goto xtensa_gdb_query_custom_fail
;
2828 LOG_TARGET_DEBUG(target
, "Set spill 0x%08" PRIx32
" (%d)", xtensa
->spill_loc
, xtensa
->spill_bytes
);
2829 strcpy(*response_p
, "OK");
2831 } else if (strncasecmp(packet
, "qxtreg", 6) == 0) {
2832 return xtensa_gdbqc_qxtreg(target
, packet
, response_p
);
2833 } else if ((strncmp(packet
, "qTStatus", 8) == 0) ||
2834 (strncmp(packet
, "qxtftie", 7) == 0) ||
2835 (strncmp(packet
, "qxtstie", 7) == 0)) {
2836 /* Return empty string to indicate trace, TIE wire debug are unsupported */
2837 strcpy(*response_p
, "");
2841 /* Warn for all other queries, but do not return errors */
2842 LOG_TARGET_WARNING(target
, "Unknown target-specific query packet: %s", packet
);
2843 strcpy(*response_p
, "");
2846 xtensa_gdb_query_custom_fail
:
2847 strcpy(*response_p
, xt_qerr
[error
].chrval
);
2848 return xt_qerr
[error
].intval
;
2851 int xtensa_init_arch_info(struct target
*target
, struct xtensa
*xtensa
,
2852 const struct xtensa_debug_module_config
*dm_cfg
)
2854 target
->arch_info
= xtensa
;
2855 xtensa
->common_magic
= XTENSA_COMMON_MAGIC
;
2856 xtensa
->target
= target
;
2857 xtensa
->stepping_isr_mode
= XT_STEPPING_ISR_ON
;
2859 xtensa
->core_config
= calloc(1, sizeof(struct xtensa_config
));
2860 if (!xtensa
->core_config
) {
2861 LOG_ERROR("Xtensa configuration alloc failed\n");
2865 /* Default cache settings are disabled with 1 way */
2866 xtensa
->core_config
->icache
.way_count
= 1;
2867 xtensa
->core_config
->dcache
.way_count
= 1;
2869 /* chrval: AR3/AR4 register names will change with window mapping.
2870 * intval: tracks whether scratch register was set through gdb P packet.
2872 for (enum xtensa_ar_scratch_set_e s
= 0; s
< XT_AR_SCRATCH_NUM
; s
++) {
2873 xtensa
->scratch_ars
[s
].chrval
= calloc(8, sizeof(char));
2874 if (!xtensa
->scratch_ars
[s
].chrval
) {
2875 for (enum xtensa_ar_scratch_set_e f
= 0; f
< s
; f
++)
2876 free(xtensa
->scratch_ars
[f
].chrval
);
2877 free(xtensa
->core_config
);
2878 LOG_ERROR("Xtensa scratch AR alloc failed\n");
2881 xtensa
->scratch_ars
[s
].intval
= false;
2882 sprintf(xtensa
->scratch_ars
[s
].chrval
, "%s%d",
2883 ((s
== XT_AR_SCRATCH_A3
) || (s
== XT_AR_SCRATCH_A4
)) ? "a" : "ar",
2884 ((s
== XT_AR_SCRATCH_A3
) || (s
== XT_AR_SCRATCH_AR3
)) ? 3 : 4);
2887 return xtensa_dm_init(&xtensa
->dbg_mod
, dm_cfg
);
2890 void xtensa_set_permissive_mode(struct target
*target
, bool state
)
2892 target_to_xtensa(target
)->permissive_mode
= state
;
2895 int xtensa_target_init(struct command_context
*cmd_ctx
, struct target
*target
)
2897 struct xtensa
*xtensa
= target_to_xtensa(target
);
2899 xtensa
->come_online_probes_num
= 3;
2900 xtensa
->hw_brps
= calloc(XT_HW_IBREAK_MAX_NUM
, sizeof(struct breakpoint
*));
2901 if (!xtensa
->hw_brps
) {
2902 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2905 xtensa
->hw_wps
= calloc(XT_HW_DBREAK_MAX_NUM
, sizeof(struct watchpoint
*));
2906 if (!xtensa
->hw_wps
) {
2907 free(xtensa
->hw_brps
);
2908 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2911 xtensa
->sw_brps
= calloc(XT_SW_BREAKPOINTS_MAX_NUM
, sizeof(struct xtensa_sw_breakpoint
));
2912 if (!xtensa
->sw_brps
) {
2913 free(xtensa
->hw_brps
);
2914 free(xtensa
->hw_wps
);
2915 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2919 xtensa
->spill_loc
= 0xffffffff;
2920 xtensa
->spill_bytes
= 0;
2921 xtensa
->spill_buf
= NULL
;
2922 xtensa
->probe_lsddr32p
= -1; /* Probe for fast load/store operations */
2924 return xtensa_build_reg_cache(target
);
2927 static void xtensa_free_reg_cache(struct target
*target
)
2929 struct xtensa
*xtensa
= target_to_xtensa(target
);
2930 struct reg_cache
*cache
= xtensa
->core_cache
;
2933 register_unlink_cache(&target
->reg_cache
, cache
);
2934 for (unsigned int i
= 0; i
< cache
->num_regs
; i
++) {
2935 free(xtensa
->algo_context_backup
[i
]);
2936 free(cache
->reg_list
[i
].value
);
2938 free(xtensa
->algo_context_backup
);
2939 free(cache
->reg_list
);
2942 xtensa
->core_cache
= NULL
;
2943 xtensa
->algo_context_backup
= NULL
;
2945 if (xtensa
->empty_regs
) {
2946 for (unsigned int i
= 0; i
< xtensa
->dbregs_num
; i
++) {
2947 free((void *)xtensa
->empty_regs
[i
].name
);
2948 free(xtensa
->empty_regs
[i
].value
);
2950 free(xtensa
->empty_regs
);
2952 xtensa
->empty_regs
= NULL
;
2953 if (xtensa
->optregs
) {
2954 for (unsigned int i
= 0; i
< xtensa
->num_optregs
; i
++)
2955 free((void *)xtensa
->optregs
[i
].name
);
2956 free(xtensa
->optregs
);
2958 xtensa
->optregs
= NULL
;
2961 void xtensa_target_deinit(struct target
*target
)
2963 struct xtensa
*xtensa
= target_to_xtensa(target
);
2967 if (target_was_examined(target
)) {
2968 int ret
= xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DCRCLR
, OCDDCR_ENABLEOCD
);
2969 if (ret
!= ERROR_OK
) {
2970 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2973 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
2974 ret
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
2975 if (ret
!= ERROR_OK
) {
2976 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2979 xtensa_dm_deinit(&xtensa
->dbg_mod
);
2981 xtensa_free_reg_cache(target
);
2982 free(xtensa
->hw_brps
);
2983 free(xtensa
->hw_wps
);
2984 free(xtensa
->sw_brps
);
2985 if (xtensa
->spill_buf
) {
2986 free(xtensa
->spill_buf
);
2987 xtensa
->spill_buf
= NULL
;
2989 for (enum xtensa_ar_scratch_set_e s
= 0; s
< XT_AR_SCRATCH_NUM
; s
++)
2990 free(xtensa
->scratch_ars
[s
].chrval
);
2991 free(xtensa
->core_config
);
2994 const char *xtensa_get_gdb_arch(struct target
*target
)
2999 /* exe <ascii-encoded hexadecimal instruction bytes> */
3000 static COMMAND_HELPER(xtensa_cmd_exe_do
, struct target
*target
)
3002 struct xtensa
*xtensa
= target_to_xtensa(target
);
3005 return ERROR_COMMAND_SYNTAX_ERROR
;
3007 /* Process ascii-encoded hex byte string */
3008 const char *parm
= CMD_ARGV
[0];
3009 unsigned int parm_len
= strlen(parm
);
3010 if ((parm_len
>= 64) || (parm_len
& 1)) {
3011 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len
);
3017 unsigned int oplen
= parm_len
/ 2;
3018 char encoded_byte
[3] = { 0, 0, 0 };
3019 for (unsigned int i
= 0; i
< oplen
; i
++) {
3020 encoded_byte
[0] = *parm
++;
3021 encoded_byte
[1] = *parm
++;
3022 ops
[i
] = strtoul(encoded_byte
, NULL
, 16);
3025 /* GDB must handle state save/restore.
3026 * Flush reg cache in case spill location is in an AR
3027 * Update CPENABLE only for this execution; later restore cached copy
3028 * Keep a copy of exccause in case executed code triggers an exception
3030 int status
= xtensa_write_dirty_registers(target
);
3031 if (status
!= ERROR_OK
) {
3032 LOG_ERROR("%s: Failed to write back register cache.", target_name(target
));
3035 xtensa_reg_val_t exccause
= xtensa_reg_get(target
, XT_REG_IDX_EXCCAUSE
);
3036 xtensa_reg_val_t cpenable
= xtensa_reg_get(target
, XT_REG_IDX_CPENABLE
);
3037 xtensa_reg_val_t a3
= xtensa_reg_get(target
, XT_REG_IDX_A3
);
3038 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, 0xffffffff);
3039 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
3040 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
,
3041 xtensa_regs
[XT_REG_IDX_CPENABLE
].reg_num
, XT_REG_A3
));
3042 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, a3
);
3043 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
3045 /* Queue instruction list and execute everything */
3046 LOG_TARGET_DEBUG(target
, "execute stub: %s", CMD_ARGV
[0]);
3047 xtensa_queue_exec_ins_wide(xtensa
, ops
, oplen
); /* Handles endian-swap */
3048 status
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
3049 if (status
!= ERROR_OK
)
3050 LOG_TARGET_ERROR(target
, "TIE queue execute: %d\n", status
);
3051 status
= xtensa_core_status_check(target
);
3052 if (status
!= ERROR_OK
)
3053 LOG_TARGET_ERROR(target
, "TIE instr execute: %d\n", status
);
3055 /* Reread register cache and restore saved regs after instruction execution */
3056 if (xtensa_fetch_all_regs(target
) != ERROR_OK
)
3057 LOG_TARGET_ERROR(target
, "%s: Failed to fetch register cache (post-exec).", target_name(target
));
3058 xtensa_reg_set(target
, XT_REG_IDX_EXCCAUSE
, exccause
);
3059 xtensa_reg_set(target
, XT_REG_IDX_CPENABLE
, cpenable
);
3063 COMMAND_HANDLER(xtensa_cmd_exe
)
3065 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do
, get_current_target(CMD_CTX
));
3069 COMMAND_HELPER(xtensa_cmd_xtdef_do
, struct xtensa
*xtensa
)
3072 return ERROR_COMMAND_SYNTAX_ERROR
;
3074 const char *core_name
= CMD_ARGV
[0];
3075 if (strcasecmp(core_name
, "LX") == 0) {
3076 xtensa
->core_config
->core_type
= XT_LX
;
3078 LOG_ERROR("xtdef [LX]\n");
3079 return ERROR_COMMAND_SYNTAX_ERROR
;
3084 COMMAND_HANDLER(xtensa_cmd_xtdef
)
3086 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do
,
3087 target_to_xtensa(get_current_target(CMD_CTX
)));
3090 static inline bool xtensa_cmd_xtopt_legal_val(char *opt
, int val
, int min
, int max
)
3092 if ((val
< min
) || (val
> max
)) {
3093 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt
, val
, min
, max
);
3099 /* xtopt <name> <value> */
3100 COMMAND_HELPER(xtensa_cmd_xtopt_do
, struct xtensa
*xtensa
)
3103 return ERROR_COMMAND_SYNTAX_ERROR
;
3105 const char *opt_name
= CMD_ARGV
[0];
3106 int opt_val
= strtol(CMD_ARGV
[1], NULL
, 0);
3107 if (strcasecmp(opt_name
, "arnum") == 0) {
3108 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val
, 0, 64))
3109 return ERROR_COMMAND_ARGUMENT_INVALID
;
3110 xtensa
->core_config
->aregs_num
= opt_val
;
3111 } else if (strcasecmp(opt_name
, "windowed") == 0) {
3112 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val
, 0, 1))
3113 return ERROR_COMMAND_ARGUMENT_INVALID
;
3114 xtensa
->core_config
->windowed
= opt_val
;
3115 } else if (strcasecmp(opt_name
, "cpenable") == 0) {
3116 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val
, 0, 1))
3117 return ERROR_COMMAND_ARGUMENT_INVALID
;
3118 xtensa
->core_config
->coproc
= opt_val
;
3119 } else if (strcasecmp(opt_name
, "exceptions") == 0) {
3120 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val
, 0, 1))
3121 return ERROR_COMMAND_ARGUMENT_INVALID
;
3122 xtensa
->core_config
->exceptions
= opt_val
;
3123 } else if (strcasecmp(opt_name
, "intnum") == 0) {
3124 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val
, 0, 32))
3125 return ERROR_COMMAND_ARGUMENT_INVALID
;
3126 xtensa
->core_config
->irq
.enabled
= (opt_val
> 0);
3127 xtensa
->core_config
->irq
.irq_num
= opt_val
;
3128 } else if (strcasecmp(opt_name
, "hipriints") == 0) {
3129 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val
, 0, 1))
3130 return ERROR_COMMAND_ARGUMENT_INVALID
;
3131 xtensa
->core_config
->high_irq
.enabled
= opt_val
;
3132 } else if (strcasecmp(opt_name
, "excmlevel") == 0) {
3133 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val
, 1, 6))
3134 return ERROR_COMMAND_ARGUMENT_INVALID
;
3135 if (!xtensa
->core_config
->high_irq
.enabled
) {
3136 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3137 return ERROR_COMMAND_ARGUMENT_INVALID
;
3139 xtensa
->core_config
->high_irq
.excm_level
= opt_val
;
3140 } else if (strcasecmp(opt_name
, "intlevels") == 0) {
3141 if (xtensa
->core_config
->core_type
== XT_LX
) {
3142 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val
, 2, 6))
3143 return ERROR_COMMAND_ARGUMENT_INVALID
;
3145 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val
, 1, 255))
3146 return ERROR_COMMAND_ARGUMENT_INVALID
;
3148 if (!xtensa
->core_config
->high_irq
.enabled
) {
3149 LOG_ERROR("xtopt intlevels requires hipriints\n");
3150 return ERROR_COMMAND_ARGUMENT_INVALID
;
3152 xtensa
->core_config
->high_irq
.level_num
= opt_val
;
3153 } else if (strcasecmp(opt_name
, "debuglevel") == 0) {
3154 if (xtensa
->core_config
->core_type
== XT_LX
) {
3155 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val
, 2, 6))
3156 return ERROR_COMMAND_ARGUMENT_INVALID
;
3158 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val
, 0, 0))
3159 return ERROR_COMMAND_ARGUMENT_INVALID
;
3161 xtensa
->core_config
->debug
.enabled
= 1;
3162 xtensa
->core_config
->debug
.irq_level
= opt_val
;
3163 } else if (strcasecmp(opt_name
, "ibreaknum") == 0) {
3164 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val
, 0, 2))
3165 return ERROR_COMMAND_ARGUMENT_INVALID
;
3166 xtensa
->core_config
->debug
.ibreaks_num
= opt_val
;
3167 } else if (strcasecmp(opt_name
, "dbreaknum") == 0) {
3168 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val
, 0, 2))
3169 return ERROR_COMMAND_ARGUMENT_INVALID
;
3170 xtensa
->core_config
->debug
.dbreaks_num
= opt_val
;
3171 } else if (strcasecmp(opt_name
, "tracemem") == 0) {
3172 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val
, 0, 256 * 1024))
3173 return ERROR_COMMAND_ARGUMENT_INVALID
;
3174 xtensa
->core_config
->trace
.mem_sz
= opt_val
;
3175 xtensa
->core_config
->trace
.enabled
= (opt_val
> 0);
3176 } else if (strcasecmp(opt_name
, "tracememrev") == 0) {
3177 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val
, 0, 1))
3178 return ERROR_COMMAND_ARGUMENT_INVALID
;
3179 xtensa
->core_config
->trace
.reversed_mem_access
= opt_val
;
3180 } else if (strcasecmp(opt_name
, "perfcount") == 0) {
3181 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val
, 0, 8))
3182 return ERROR_COMMAND_ARGUMENT_INVALID
;
3183 xtensa
->core_config
->debug
.perfcount_num
= opt_val
;
3185 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV
[0], CMD_ARGV
[1]);
3192 COMMAND_HANDLER(xtensa_cmd_xtopt
)
3194 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do
,
3195 target_to_xtensa(get_current_target(CMD_CTX
)));
3198 /* xtmem <type> [parameters] */
3199 COMMAND_HELPER(xtensa_cmd_xtmem_do
, struct xtensa
*xtensa
)
3201 struct xtensa_cache_config
*cachep
= NULL
;
3202 struct xtensa_local_mem_config
*memp
= NULL
;
3204 bool is_dcache
= false;
3206 if (CMD_ARGC
== 0) {
3207 LOG_ERROR("xtmem <type> [parameters]\n");
3208 return ERROR_COMMAND_SYNTAX_ERROR
;
3211 const char *mem_name
= CMD_ARGV
[0];
3212 if (strcasecmp(mem_name
, "icache") == 0) {
3213 cachep
= &xtensa
->core_config
->icache
;
3214 } else if (strcasecmp(mem_name
, "dcache") == 0) {
3215 cachep
= &xtensa
->core_config
->dcache
;
3217 } else if (strcasecmp(mem_name
, "l2cache") == 0) {
3218 /* TODO: support L2 cache */
3219 } else if (strcasecmp(mem_name
, "l2addr") == 0) {
3220 /* TODO: support L2 cache */
3221 } else if (strcasecmp(mem_name
, "iram") == 0) {
3222 memp
= &xtensa
->core_config
->iram
;
3223 mem_access
= XT_MEM_ACCESS_READ
| XT_MEM_ACCESS_WRITE
;
3224 } else if (strcasecmp(mem_name
, "dram") == 0) {
3225 memp
= &xtensa
->core_config
->dram
;
3226 mem_access
= XT_MEM_ACCESS_READ
| XT_MEM_ACCESS_WRITE
;
3227 } else if (strcasecmp(mem_name
, "sram") == 0) {
3228 memp
= &xtensa
->core_config
->sram
;
3229 mem_access
= XT_MEM_ACCESS_READ
| XT_MEM_ACCESS_WRITE
;
3230 } else if (strcasecmp(mem_name
, "irom") == 0) {
3231 memp
= &xtensa
->core_config
->irom
;
3232 mem_access
= XT_MEM_ACCESS_READ
;
3233 } else if (strcasecmp(mem_name
, "drom") == 0) {
3234 memp
= &xtensa
->core_config
->drom
;
3235 mem_access
= XT_MEM_ACCESS_READ
;
3236 } else if (strcasecmp(mem_name
, "srom") == 0) {
3237 memp
= &xtensa
->core_config
->srom
;
3238 mem_access
= XT_MEM_ACCESS_READ
;
3240 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3241 return ERROR_COMMAND_ARGUMENT_INVALID
;
3245 if ((CMD_ARGC
!= 4) && (CMD_ARGC
!= 5)) {
3246 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3247 return ERROR_COMMAND_SYNTAX_ERROR
;
3249 cachep
->line_size
= strtoul(CMD_ARGV
[1], NULL
, 0);
3250 cachep
->size
= strtoul(CMD_ARGV
[2], NULL
, 0);
3251 cachep
->way_count
= strtoul(CMD_ARGV
[3], NULL
, 0);
3252 cachep
->writeback
= ((CMD_ARGC
== 5) && is_dcache
) ?
3253 strtoul(CMD_ARGV
[4], NULL
, 0) : 0;
3255 if (CMD_ARGC
!= 3) {
3256 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3257 return ERROR_COMMAND_SYNTAX_ERROR
;
3259 struct xtensa_local_mem_region_config
*memcfgp
= &memp
->regions
[memp
->count
];
3260 memcfgp
->base
= strtoul(CMD_ARGV
[1], NULL
, 0);
3261 memcfgp
->size
= strtoul(CMD_ARGV
[2], NULL
, 0);
3262 memcfgp
->access
= mem_access
;
3269 COMMAND_HANDLER(xtensa_cmd_xtmem
)
3271 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do
,
3272 target_to_xtensa(get_current_target(CMD_CTX
)));
3275 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3276 COMMAND_HELPER(xtensa_cmd_xtmpu_do
, struct xtensa
*xtensa
)
3278 if (CMD_ARGC
!= 4) {
3279 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3280 return ERROR_COMMAND_SYNTAX_ERROR
;
3283 unsigned int nfgseg
= strtoul(CMD_ARGV
[0], NULL
, 0);
3284 unsigned int minsegsize
= strtoul(CMD_ARGV
[1], NULL
, 0);
3285 unsigned int lockable
= strtoul(CMD_ARGV
[2], NULL
, 0);
3286 unsigned int execonly
= strtoul(CMD_ARGV
[3], NULL
, 0);
3288 if ((nfgseg
> 32)) {
3289 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3290 return ERROR_COMMAND_ARGUMENT_INVALID
;
3291 } else if (minsegsize
& (minsegsize
- 1)) {
3292 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3293 return ERROR_COMMAND_ARGUMENT_INVALID
;
3294 } else if (lockable
> 1) {
3295 LOG_ERROR("<lockable> must be 0 or 1\n");
3296 return ERROR_COMMAND_ARGUMENT_INVALID
;
3297 } else if (execonly
> 1) {
3298 LOG_ERROR("<execonly> must be 0 or 1\n");
3299 return ERROR_COMMAND_ARGUMENT_INVALID
;
3302 xtensa
->core_config
->mpu
.enabled
= true;
3303 xtensa
->core_config
->mpu
.nfgseg
= nfgseg
;
3304 xtensa
->core_config
->mpu
.minsegsize
= minsegsize
;
3305 xtensa
->core_config
->mpu
.lockable
= lockable
;
3306 xtensa
->core_config
->mpu
.execonly
= execonly
;
3310 COMMAND_HANDLER(xtensa_cmd_xtmpu
)
3312 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do
,
3313 target_to_xtensa(get_current_target(CMD_CTX
)));
3316 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3317 COMMAND_HELPER(xtensa_cmd_xtmmu_do
, struct xtensa
*xtensa
)
3319 if (CMD_ARGC
!= 2) {
3320 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3321 return ERROR_COMMAND_SYNTAX_ERROR
;
3324 unsigned int nirefillentries
= strtoul(CMD_ARGV
[0], NULL
, 0);
3325 unsigned int ndrefillentries
= strtoul(CMD_ARGV
[1], NULL
, 0);
3326 if ((nirefillentries
!= 16) && (nirefillentries
!= 32)) {
3327 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3328 return ERROR_COMMAND_ARGUMENT_INVALID
;
3329 } else if ((ndrefillentries
!= 16) && (ndrefillentries
!= 32)) {
3330 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3331 return ERROR_COMMAND_ARGUMENT_INVALID
;
3334 xtensa
->core_config
->mmu
.enabled
= true;
3335 xtensa
->core_config
->mmu
.itlb_entries_count
= nirefillentries
;
3336 xtensa
->core_config
->mmu
.dtlb_entries_count
= ndrefillentries
;
3340 COMMAND_HANDLER(xtensa_cmd_xtmmu
)
3342 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do
,
3343 target_to_xtensa(get_current_target(CMD_CTX
)));
3347 * xtreg <regname> <regnum> */
3348 COMMAND_HELPER(xtensa_cmd_xtreg_do
, struct xtensa
*xtensa
)
3350 if (CMD_ARGC
== 1) {
3351 int32_t numregs
= strtoul(CMD_ARGV
[0], NULL
, 0);
3352 if ((numregs
<= 0) || (numregs
> UINT16_MAX
)) {
3353 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs
);
3354 return ERROR_COMMAND_SYNTAX_ERROR
;
3356 if ((xtensa
->genpkt_regs_num
> 0) && (numregs
< (int32_t)xtensa
->genpkt_regs_num
)) {
3357 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3358 numregs
, xtensa
->genpkt_regs_num
);
3359 return ERROR_COMMAND_SYNTAX_ERROR
;
3361 xtensa
->total_regs_num
= numregs
;
3362 xtensa
->core_regs_num
= 0;
3363 xtensa
->num_optregs
= 0;
3364 /* A little more memory than required, but saves a second initialization pass */
3365 xtensa
->optregs
= calloc(xtensa
->total_regs_num
, sizeof(struct xtensa_reg_desc
));
3366 if (!xtensa
->optregs
) {
3367 LOG_ERROR("Failed to allocate xtensa->optregs!");
3371 } else if (CMD_ARGC
!= 2) {
3372 return ERROR_COMMAND_SYNTAX_ERROR
;
3375 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3376 * if general register (g-packet) requests or contiguous register maps are supported */
3377 if (xtensa
->regmap_contiguous
&& !xtensa
->contiguous_regs_desc
) {
3378 xtensa
->contiguous_regs_desc
= calloc(xtensa
->total_regs_num
, sizeof(struct xtensa_reg_desc
*));
3379 if (!xtensa
->contiguous_regs_desc
) {
3380 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3385 const char *regname
= CMD_ARGV
[0];
3386 unsigned int regnum
= strtoul(CMD_ARGV
[1], NULL
, 0);
3387 if (regnum
> UINT16_MAX
) {
3388 LOG_ERROR("<regnum> must be a 16-bit number");
3389 return ERROR_COMMAND_ARGUMENT_INVALID
;
3392 if ((xtensa
->num_optregs
+ xtensa
->core_regs_num
) >= xtensa
->total_regs_num
) {
3393 if (xtensa
->total_regs_num
)
3394 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3396 xtensa
->total_regs_num
, xtensa
->core_regs_num
, xtensa
->num_optregs
);
3398 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3403 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3404 struct xtensa_reg_desc
*rptr
= &xtensa
->optregs
[xtensa
->num_optregs
];
3405 bool is_extended_reg
= true;
3407 for (ridx
= 0; ridx
< XT_NUM_REGS
; ridx
++) {
3408 if (strcmp(CMD_ARGV
[0], xtensa_regs
[ridx
].name
) == 0) {
3409 /* Flag core register as defined */
3410 rptr
= &xtensa_regs
[ridx
];
3411 xtensa
->core_regs_num
++;
3412 is_extended_reg
= false;
3418 if (is_extended_reg
) {
3419 /* Register ID, debugger-visible register ID */
3420 rptr
->name
= strdup(CMD_ARGV
[0]);
3421 rptr
->dbreg_num
= regnum
;
3422 rptr
->reg_num
= (regnum
& XT_REG_INDEX_MASK
);
3423 xtensa
->num_optregs
++;
3426 if ((regnum
& XT_REG_GENERAL_MASK
) == XT_REG_GENERAL_VAL
) {
3427 rptr
->type
= XT_REG_GENERAL
;
3428 } else if ((regnum
& XT_REG_USER_MASK
) == XT_REG_USER_VAL
) {
3429 rptr
->type
= XT_REG_USER
;
3430 } else if ((regnum
& XT_REG_FR_MASK
) == XT_REG_FR_VAL
) {
3431 rptr
->type
= XT_REG_FR
;
3432 } else if ((regnum
& XT_REG_SPECIAL_MASK
) == XT_REG_SPECIAL_VAL
) {
3433 rptr
->type
= XT_REG_SPECIAL
;
3434 } else if ((regnum
& XT_REG_RELGEN_MASK
) == XT_REG_RELGEN_VAL
) {
3435 /* WARNING: For these registers, regnum points to the
3436 * index of the corresponding ARx registers, NOT to
3437 * the processor register number! */
3438 rptr
->type
= XT_REG_RELGEN
;
3439 rptr
->reg_num
+= XT_REG_IDX_ARFIRST
;
3440 rptr
->dbreg_num
+= XT_REG_IDX_ARFIRST
;
3441 } else if ((regnum
& XT_REG_TIE_MASK
) != 0) {
3442 rptr
->type
= XT_REG_TIE
;
3444 rptr
->type
= XT_REG_OTHER
;
3447 /* Register flags */
3448 if ((strcmp(rptr
->name
, "mmid") == 0) || (strcmp(rptr
->name
, "eraccess") == 0) ||
3449 (strcmp(rptr
->name
, "ddr") == 0) || (strcmp(rptr
->name
, "intset") == 0) ||
3450 (strcmp(rptr
->name
, "intclear") == 0))
3451 rptr
->flags
= XT_REGF_NOREAD
;
3455 if (rptr
->reg_num
== (XT_EPS_REG_NUM_BASE
+ xtensa
->core_config
->debug
.irq_level
) &&
3456 xtensa
->core_config
->core_type
== XT_LX
&& rptr
->type
== XT_REG_SPECIAL
) {
3457 xtensa
->eps_dbglevel_idx
= XT_NUM_REGS
+ xtensa
->num_optregs
- 1;
3458 LOG_DEBUG("Setting PS (%s) index to %d", rptr
->name
, xtensa
->eps_dbglevel_idx
);
3460 } else if (strcmp(rptr
->name
, "cpenable") == 0) {
3461 xtensa
->core_config
->coproc
= true;
3464 /* Build out list of contiguous registers in specified order */
3465 unsigned int running_reg_count
= xtensa
->num_optregs
+ xtensa
->core_regs_num
;
3466 if (xtensa
->contiguous_regs_desc
) {
3467 assert((running_reg_count
<= xtensa
->total_regs_num
) && "contiguous register address internal error!");
3468 xtensa
->contiguous_regs_desc
[running_reg_count
- 1] = rptr
;
3470 if (xtensa_extra_debug_log
)
3471 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3472 is_extended_reg
? "config-specific" : "core",
3473 rptr
->name
, rptr
->dbreg_num
, rptr
->reg_num
, rptr
->type
,
3474 is_extended_reg
? xtensa
->num_optregs
: ridx
,
3475 is_extended_reg
? xtensa
->total_regs_num
: XT_NUM_REGS
);
3479 COMMAND_HANDLER(xtensa_cmd_xtreg
)
3481 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do
,
3482 target_to_xtensa(get_current_target(CMD_CTX
)));
3485 /* xtregfmt <contiguous|sparse> [numgregs] */
3486 COMMAND_HELPER(xtensa_cmd_xtregfmt_do
, struct xtensa
*xtensa
)
3488 if ((CMD_ARGC
== 1) || (CMD_ARGC
== 2)) {
3489 if (!strcasecmp(CMD_ARGV
[0], "sparse")) {
3491 } else if (!strcasecmp(CMD_ARGV
[0], "contiguous")) {
3492 xtensa
->regmap_contiguous
= true;
3493 if (CMD_ARGC
== 2) {
3494 unsigned int numgregs
= strtoul(CMD_ARGV
[1], NULL
, 0);
3495 if ((numgregs
<= 0) ||
3496 ((numgregs
> xtensa
->total_regs_num
) &&
3497 (xtensa
->total_regs_num
> 0))) {
3498 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3499 numgregs
, xtensa
->total_regs_num
);
3500 return ERROR_COMMAND_SYNTAX_ERROR
;
3502 xtensa
->genpkt_regs_num
= numgregs
;
3507 return ERROR_COMMAND_SYNTAX_ERROR
;
3510 COMMAND_HANDLER(xtensa_cmd_xtregfmt
)
3512 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do
,
3513 target_to_xtensa(get_current_target(CMD_CTX
)));
3516 COMMAND_HELPER(xtensa_cmd_permissive_mode_do
, struct xtensa
*xtensa
)
3518 return CALL_COMMAND_HANDLER(handle_command_parse_bool
,
3519 &xtensa
->permissive_mode
, "xtensa permissive mode");
3522 COMMAND_HANDLER(xtensa_cmd_permissive_mode
)
3524 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do
,
3525 target_to_xtensa(get_current_target(CMD_CTX
)));
3528 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3529 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do
, struct xtensa
*xtensa
)
3531 struct xtensa_perfmon_config config
= {
3534 .tracelevel
= -1 /* use DEBUGLEVEL by default */
3537 if (CMD_ARGC
< 2 || CMD_ARGC
> 6)
3538 return ERROR_COMMAND_SYNTAX_ERROR
;
3540 unsigned int counter_id
= strtoul(CMD_ARGV
[0], NULL
, 0);
3541 if (counter_id
>= XTENSA_MAX_PERF_COUNTERS
) {
3542 command_print(CMD
, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS
);
3543 return ERROR_COMMAND_ARGUMENT_INVALID
;
3546 config
.select
= strtoul(CMD_ARGV
[1], NULL
, 0);
3547 if (config
.select
> XTENSA_MAX_PERF_SELECT
) {
3548 command_print(CMD
, "select should be < %d", XTENSA_MAX_PERF_SELECT
);
3549 return ERROR_COMMAND_ARGUMENT_INVALID
;
3552 if (CMD_ARGC
>= 3) {
3553 config
.mask
= strtoul(CMD_ARGV
[2], NULL
, 0);
3554 if (config
.mask
> XTENSA_MAX_PERF_MASK
) {
3555 command_print(CMD
, "mask should be < %d", XTENSA_MAX_PERF_MASK
);
3556 return ERROR_COMMAND_ARGUMENT_INVALID
;
3560 if (CMD_ARGC
>= 4) {
3561 config
.kernelcnt
= strtoul(CMD_ARGV
[3], NULL
, 0);
3562 if (config
.kernelcnt
> 1) {
3563 command_print(CMD
, "kernelcnt should be 0 or 1");
3564 return ERROR_COMMAND_ARGUMENT_INVALID
;
3568 if (CMD_ARGC
>= 5) {
3569 config
.tracelevel
= strtoul(CMD_ARGV
[4], NULL
, 0);
3570 if (config
.tracelevel
> 7) {
3571 command_print(CMD
, "tracelevel should be <=7");
3572 return ERROR_COMMAND_ARGUMENT_INVALID
;
3576 if (config
.tracelevel
== -1)
3577 config
.tracelevel
= xtensa
->core_config
->debug
.irq_level
;
3579 return xtensa_dm_perfmon_enable(&xtensa
->dbg_mod
, counter_id
, &config
);
3582 COMMAND_HANDLER(xtensa_cmd_perfmon_enable
)
3584 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do
,
3585 target_to_xtensa(get_current_target(CMD_CTX
)));
3588 /* perfmon_dump [counter_id] */
3589 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do
, struct xtensa
*xtensa
)
3592 return ERROR_COMMAND_SYNTAX_ERROR
;
3594 int counter_id
= -1;
3595 if (CMD_ARGC
== 1) {
3596 counter_id
= strtol(CMD_ARGV
[0], NULL
, 0);
3597 if (counter_id
> XTENSA_MAX_PERF_COUNTERS
) {
3598 command_print(CMD
, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS
);
3599 return ERROR_COMMAND_ARGUMENT_INVALID
;
3603 unsigned int counter_start
= (counter_id
< 0) ? 0 : counter_id
;
3604 unsigned int counter_end
= (counter_id
< 0) ? XTENSA_MAX_PERF_COUNTERS
: counter_id
+ 1;
3605 for (unsigned int counter
= counter_start
; counter
< counter_end
; ++counter
) {
3606 char result_buf
[128] = { 0 };
3607 size_t result_pos
= snprintf(result_buf
, sizeof(result_buf
), "Counter %d: ", counter
);
3608 struct xtensa_perfmon_result result
;
3609 int res
= xtensa_dm_perfmon_dump(&xtensa
->dbg_mod
, counter
, &result
);
3610 if (res
!= ERROR_OK
)
3612 snprintf(result_buf
+ result_pos
, sizeof(result_buf
) - result_pos
,
3615 result
.overflow
? " (overflow)" : "");
3616 LOG_INFO("%s", result_buf
);
3622 COMMAND_HANDLER(xtensa_cmd_perfmon_dump
)
3624 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do
,
3625 target_to_xtensa(get_current_target(CMD_CTX
)));
3628 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do
, struct xtensa
*xtensa
)
3634 state
= xtensa
->stepping_isr_mode
;
3635 if (state
== XT_STEPPING_ISR_ON
)
3637 else if (state
== XT_STEPPING_ISR_OFF
)
3641 command_print(CMD
, "Current ISR step mode: %s", st
);
3644 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3645 if (!strcasecmp(CMD_ARGV
[0], "off"))
3646 state
= XT_STEPPING_ISR_ON
;
3647 else if (!strcasecmp(CMD_ARGV
[0], "on"))
3648 state
= XT_STEPPING_ISR_OFF
;
3651 command_print(CMD
, "Argument unknown. Please pick one of ON, OFF");
3654 xtensa
->stepping_isr_mode
= state
;
3658 COMMAND_HANDLER(xtensa_cmd_mask_interrupts
)
3660 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do
,
3661 target_to_xtensa(get_current_target(CMD_CTX
)));
3664 COMMAND_HELPER(xtensa_cmd_smpbreak_do
, struct target
*target
)
3669 if (CMD_ARGC
>= 1) {
3670 for (unsigned int i
= 0; i
< CMD_ARGC
; i
++) {
3671 if (!strcasecmp(CMD_ARGV
[0], "none")) {
3673 } else if (!strcasecmp(CMD_ARGV
[i
], "BreakIn")) {
3674 val
|= OCDDCR_BREAKINEN
;
3675 } else if (!strcasecmp(CMD_ARGV
[i
], "BreakOut")) {
3676 val
|= OCDDCR_BREAKOUTEN
;
3677 } else if (!strcasecmp(CMD_ARGV
[i
], "RunStallIn")) {
3678 val
|= OCDDCR_RUNSTALLINEN
;
3679 } else if (!strcasecmp(CMD_ARGV
[i
], "DebugModeOut")) {
3680 val
|= OCDDCR_DEBUGMODEOUTEN
;
3681 } else if (!strcasecmp(CMD_ARGV
[i
], "BreakInOut")) {
3682 val
|= OCDDCR_BREAKINEN
| OCDDCR_BREAKOUTEN
;
3683 } else if (!strcasecmp(CMD_ARGV
[i
], "RunStall")) {
3684 val
|= OCDDCR_RUNSTALLINEN
| OCDDCR_DEBUGMODEOUTEN
;
3686 command_print(CMD
, "Unknown arg %s", CMD_ARGV
[i
]);
3689 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3693 res
= xtensa_smpbreak_set(target
, val
);
3694 if (res
!= ERROR_OK
)
3695 command_print(CMD
, "Failed to set smpbreak config %d", res
);
3697 struct xtensa
*xtensa
= target_to_xtensa(target
);
3698 res
= xtensa_smpbreak_read(xtensa
, &val
);
3699 if (res
== ERROR_OK
)
3700 command_print(CMD
, "Current bits set:%s%s%s%s",
3701 (val
& OCDDCR_BREAKINEN
) ? " BreakIn" : "",
3702 (val
& OCDDCR_BREAKOUTEN
) ? " BreakOut" : "",
3703 (val
& OCDDCR_RUNSTALLINEN
) ? " RunStallIn" : "",
3704 (val
& OCDDCR_DEBUGMODEOUTEN
) ? " DebugModeOut" : ""
3707 command_print(CMD
, "Failed to get smpbreak config %d", res
);
3712 COMMAND_HANDLER(xtensa_cmd_smpbreak
)
3714 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do
,
3715 get_current_target(CMD_CTX
));
3718 COMMAND_HELPER(xtensa_cmd_tracestart_do
, struct xtensa
*xtensa
)
3720 struct xtensa_trace_status trace_status
;
3721 struct xtensa_trace_start_config cfg
= {
3723 .stopmask
= XTENSA_STOPMASK_DISABLED
,
3725 .after_is_words
= false
3728 /* Parse arguments */
3729 for (unsigned int i
= 0; i
< CMD_ARGC
; i
++) {
3730 if ((!strcasecmp(CMD_ARGV
[i
], "pc")) && CMD_ARGC
> i
) {
3733 cfg
.stoppc
= strtol(CMD_ARGV
[i
], &e
, 0);
3736 cfg
.stopmask
= strtol(e
, NULL
, 0);
3737 } else if ((!strcasecmp(CMD_ARGV
[i
], "after")) && CMD_ARGC
> i
) {
3739 cfg
.after
= strtol(CMD_ARGV
[i
], NULL
, 0);
3740 } else if (!strcasecmp(CMD_ARGV
[i
], "ins")) {
3741 cfg
.after_is_words
= 0;
3742 } else if (!strcasecmp(CMD_ARGV
[i
], "words")) {
3743 cfg
.after_is_words
= 1;
3745 command_print(CMD
, "Did not understand %s", CMD_ARGV
[i
]);
3750 int res
= xtensa_dm_trace_status_read(&xtensa
->dbg_mod
, &trace_status
);
3751 if (res
!= ERROR_OK
)
3753 if (trace_status
.stat
& TRAXSTAT_TRACT
) {
3754 LOG_WARNING("Silently stop active tracing!");
3755 res
= xtensa_dm_trace_stop(&xtensa
->dbg_mod
, false);
3756 if (res
!= ERROR_OK
)
3760 res
= xtensa_dm_trace_start(&xtensa
->dbg_mod
, &cfg
);
3761 if (res
!= ERROR_OK
)
3764 xtensa
->trace_active
= true;
3765 command_print(CMD
, "Trace started.");
3769 COMMAND_HANDLER(xtensa_cmd_tracestart
)
3771 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do
,
3772 target_to_xtensa(get_current_target(CMD_CTX
)));
3775 COMMAND_HELPER(xtensa_cmd_tracestop_do
, struct xtensa
*xtensa
)
3777 struct xtensa_trace_status trace_status
;
3779 int res
= xtensa_dm_trace_status_read(&xtensa
->dbg_mod
, &trace_status
);
3780 if (res
!= ERROR_OK
)
3783 if (!(trace_status
.stat
& TRAXSTAT_TRACT
)) {
3784 command_print(CMD
, "No trace is currently active.");
3788 res
= xtensa_dm_trace_stop(&xtensa
->dbg_mod
, true);
3789 if (res
!= ERROR_OK
)
3792 xtensa
->trace_active
= false;
3793 command_print(CMD
, "Trace stop triggered.");
3797 COMMAND_HANDLER(xtensa_cmd_tracestop
)
3799 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do
,
3800 target_to_xtensa(get_current_target(CMD_CTX
)));
3803 COMMAND_HELPER(xtensa_cmd_tracedump_do
, struct xtensa
*xtensa
, const char *fname
)
3805 struct xtensa_trace_config trace_config
;
3806 struct xtensa_trace_status trace_status
;
3807 uint32_t memsz
, wmem
;
3809 int res
= xtensa_dm_trace_status_read(&xtensa
->dbg_mod
, &trace_status
);
3810 if (res
!= ERROR_OK
)
3813 if (trace_status
.stat
& TRAXSTAT_TRACT
) {
3814 command_print(CMD
, "Tracing is still active. Please stop it first.");
3818 res
= xtensa_dm_trace_config_read(&xtensa
->dbg_mod
, &trace_config
);
3819 if (res
!= ERROR_OK
)
3822 if (!(trace_config
.ctrl
& TRAXCTRL_TREN
)) {
3823 command_print(CMD
, "No active trace found; nothing to dump.");
3827 memsz
= trace_config
.memaddr_end
- trace_config
.memaddr_start
+ 1;
3828 LOG_INFO("Total trace memory: %d words", memsz
);
3829 if ((trace_config
.addr
&
3830 ((TRAXADDR_TWRAP_MASK
<< TRAXADDR_TWRAP_SHIFT
) | TRAXADDR_TWSAT
)) == 0) {
3831 /*Memory hasn't overwritten itself yet. */
3832 wmem
= trace_config
.addr
& TRAXADDR_TADDR_MASK
;
3833 LOG_INFO("...but trace is only %d words", wmem
);
3837 if (trace_config
.addr
& TRAXADDR_TWSAT
) {
3838 LOG_INFO("Real trace is many times longer than that (overflow)");
3840 uint32_t trc_sz
= (trace_config
.addr
>> TRAXADDR_TWRAP_SHIFT
) & TRAXADDR_TWRAP_MASK
;
3841 trc_sz
= (trc_sz
* memsz
) + (trace_config
.addr
& TRAXADDR_TADDR_MASK
);
3842 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz
);
3846 uint8_t *tracemem
= malloc(memsz
* 4);
3848 command_print(CMD
, "Failed to alloc memory for trace data!");
3851 res
= xtensa_dm_trace_data_read(&xtensa
->dbg_mod
, tracemem
, memsz
* 4);
3852 if (res
!= ERROR_OK
) {
3857 int f
= open(fname
, O_WRONLY
| O_CREAT
| O_TRUNC
, 0666);
3860 command_print(CMD
, "Unable to open file %s", fname
);
3863 if (write(f
, tracemem
, memsz
* 4) != (int)memsz
* 4)
3864 command_print(CMD
, "Unable to write to file %s", fname
);
3866 command_print(CMD
, "Written %d bytes of trace data to %s", memsz
* 4, fname
);
3869 bool is_all_zeroes
= true;
3870 for (unsigned int i
= 0; i
< memsz
* 4; i
++) {
3871 if (tracemem
[i
] != 0) {
3872 is_all_zeroes
= false;
3880 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
3885 COMMAND_HANDLER(xtensa_cmd_tracedump
)
3887 if (CMD_ARGC
!= 1) {
3888 command_print(CMD
, "Command takes exactly 1 parameter.Need filename to dump to as output!");
3892 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do
,
3893 target_to_xtensa(get_current_target(CMD_CTX
)), CMD_ARGV
[0]);
3896 static const struct command_registration xtensa_any_command_handlers
[] = {
3899 .handler
= xtensa_cmd_xtdef
,
3900 .mode
= COMMAND_CONFIG
,
3901 .help
= "Configure Xtensa core type",
3906 .handler
= xtensa_cmd_xtopt
,
3907 .mode
= COMMAND_CONFIG
,
3908 .help
= "Configure Xtensa core option",
3909 .usage
= "<name> <value>",
3913 .handler
= xtensa_cmd_xtmem
,
3914 .mode
= COMMAND_CONFIG
,
3915 .help
= "Configure Xtensa memory/cache option",
3916 .usage
= "<type> [parameters]",
3920 .handler
= xtensa_cmd_xtmmu
,
3921 .mode
= COMMAND_CONFIG
,
3922 .help
= "Configure Xtensa MMU option",
3923 .usage
= "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
3927 .handler
= xtensa_cmd_xtmpu
,
3928 .mode
= COMMAND_CONFIG
,
3929 .help
= "Configure Xtensa MPU option",
3930 .usage
= "<num FG seg> <min seg size> <lockable> <executeonly>",
3934 .handler
= xtensa_cmd_xtreg
,
3935 .mode
= COMMAND_CONFIG
,
3936 .help
= "Configure Xtensa register",
3937 .usage
= "<regname> <regnum>",
3941 .handler
= xtensa_cmd_xtreg
,
3942 .mode
= COMMAND_CONFIG
,
3943 .help
= "Configure number of Xtensa registers",
3944 .usage
= "<numregs>",
3948 .handler
= xtensa_cmd_xtregfmt
,
3949 .mode
= COMMAND_CONFIG
,
3950 .help
= "Configure format of Xtensa register map",
3951 .usage
= "<contiguous|sparse> [numgregs]",
3954 .name
= "set_permissive",
3955 .handler
= xtensa_cmd_permissive_mode
,
3956 .mode
= COMMAND_ANY
,
3957 .help
= "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
3962 .handler
= xtensa_cmd_mask_interrupts
,
3963 .mode
= COMMAND_ANY
,
3964 .help
= "mask Xtensa interrupts at step",
3965 .usage
= "['on'|'off']",
3969 .handler
= xtensa_cmd_smpbreak
,
3970 .mode
= COMMAND_ANY
,
3971 .help
= "Set the way the CPU chains OCD breaks",
3972 .usage
= "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
3975 .name
= "perfmon_enable",
3976 .handler
= xtensa_cmd_perfmon_enable
,
3977 .mode
= COMMAND_EXEC
,
3978 .help
= "Enable and start performance counter",
3979 .usage
= "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
3982 .name
= "perfmon_dump",
3983 .handler
= xtensa_cmd_perfmon_dump
,
3984 .mode
= COMMAND_EXEC
,
3985 .help
= "Dump performance counter value. If no argument specified, dumps all counters.",
3986 .usage
= "[counter_id]",
3989 .name
= "tracestart",
3990 .handler
= xtensa_cmd_tracestart
,
3991 .mode
= COMMAND_EXEC
,
3993 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
3994 .usage
= "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
3997 .name
= "tracestop",
3998 .handler
= xtensa_cmd_tracestop
,
3999 .mode
= COMMAND_EXEC
,
4000 .help
= "Tracing: Stop current trace as started by the tracestart command",
4004 .name
= "tracedump",
4005 .handler
= xtensa_cmd_tracedump
,
4006 .mode
= COMMAND_EXEC
,
4007 .help
= "Tracing: Dump trace memory to a files. One file per core.",
4008 .usage
= "<outfile>",
4012 .handler
= xtensa_cmd_exe
,
4013 .mode
= COMMAND_ANY
,
4014 .help
= "Xtensa stub execution",
4015 .usage
= "<ascii-encoded hexadecimal instruction bytes>",
4017 COMMAND_REGISTRATION_DONE
4020 const struct command_registration xtensa_command_handlers
[] = {
4023 .mode
= COMMAND_ANY
,
4024 .help
= "Xtensa command group",
4026 .chain
= xtensa_any_command_handlers
,
4028 COMMAND_REGISTRATION_DONE