1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19 #include <target/algorithm.h>
21 #include "xtensa_chip.h"
24 /* Swap 4-bit Xtensa opcodes and fields */
25 #define XT_NIBSWAP8(V) \
26 ((((V) & 0x0F) << 4) \
27 | (((V) & 0xF0) >> 4))
29 #define XT_NIBSWAP16(V) \
30 ((((V) & 0x000F) << 12) \
31 | (((V) & 0x00F0) << 4) \
32 | (((V) & 0x0F00) >> 4) \
33 | (((V) & 0xF000) >> 12))
35 #define XT_NIBSWAP24(V) \
36 ((((V) & 0x00000F) << 20) \
37 | (((V) & 0x0000F0) << 12) \
38 | (((V) & 0x000F00) << 4) \
39 | (((V) & 0x00F000) >> 4) \
40 | (((V) & 0x0F0000) >> 12) \
41 | (((V) & 0xF00000) >> 20))
44 * Instruction formatting converted from little-endian inputs
45 * and shifted to the MSB-side of DIR for BE systems.
47 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
48 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
49 | (((T) & 0x0F) << 16) \
50 | (((SR) & 0xFF) << 8)) << 8 \
52 | (((SR) & 0xFF) << 8) \
53 | (((T) & 0x0F) << 4))
55 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
56 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
57 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
58 | (((R) & 0x0F) << 8)) << 8 \
60 | (((ST) & 0xFF) << 4) \
61 | (((R) & 0x0F) << 12))
63 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
64 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
65 | (((T) & 0x0F) << 8) \
66 | (((S) & 0x0F) << 4) \
67 | ((IMM4) & 0x0F)) << 16 \
69 | (((T) & 0x0F) << 4) \
70 | (((S) & 0x0F) << 8) \
71 | (((IMM4) & 0x0F) << 12))
73 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
74 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
75 | (((T) & 0x0F) << 16) \
76 | (((S) & 0x0F) << 12) \
77 | (((R) & 0x0F) << 8) \
78 | ((IMM8) & 0xFF)) << 8 \
80 | (((IMM8) & 0xFF) << 16) \
81 | (((R) & 0x0F) << 12) \
82 | (((S) & 0x0F) << 8) \
83 | (((T) & 0x0F) << 4))
85 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
86 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
87 | (((T) & 0x0F) << 16) \
88 | (((S) & 0x0F) << 12) \
89 | (((R) & 0x0F) << 8)) << 8 \
92 | (((IMM4) & 0x0F) << 20) \
93 | (((R) & 0x0F) << 12) \
94 | (((S) & 0x0F) << 8) \
95 | (((T) & 0x0F) << 4))
97 /* Xtensa processor instruction opcodes
99 /* "Return From Debug Operation" to Normal */
100 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
101 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
102 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
104 /* Load to DDR register, increase addr register */
105 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
106 /* Store from DDR register, increase addr register */
107 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
109 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
110 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
111 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
112 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
113 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
114 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
116 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
117 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
118 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
119 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
120 /* Store 8-bit to A(S)+IMM8 from A(T) */
121 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
123 /* Cache Instructions */
124 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
125 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
126 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
127 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
129 /* Control Instructions */
130 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
131 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
133 /* Read Special Register */
134 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
135 /* Write Special Register */
136 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
137 /* Swap Special Register */
138 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
140 /* Rotate Window by (-8..7) */
141 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
143 /* Read User Register */
144 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
145 /* Write User Register */
146 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
148 /* Read Floating-Point Register */
149 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
150 /* Write Floating-Point Register */
151 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
153 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
154 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
155 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
157 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
158 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
159 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
161 /* Read Protection TLB Entry Info */
162 #define XT_INS_PPTLB(X, S, T) _XT_INS_FORMAT_RRR(X, 0x500000, ((S) << 4) | (T), 0xD)
164 #define XT_TLB1_ACC_SHIFT 8
165 #define XT_TLB1_ACC_MSK 0xF
167 #define XT_WATCHPOINTS_NUM_MAX 2
169 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
170 * These get used a lot so making a shortcut is useful.
172 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
173 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
174 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
175 #define XT_REG_A0 (xtensa_regs[XT_REG_IDX_AR0].reg_num)
176 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
177 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
179 #define XT_PS_REG_NUM (0xe6U)
180 #define XT_EPS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
181 #define XT_EPC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
182 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
183 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
184 #define XT_NX_IBREAKC_BASE (0xc0U) /* (IBREAKC0..IBREAKC1) for NX */
186 #define XT_SW_BREAKPOINTS_MAX_NUM 32
187 #define XT_HW_IBREAK_MAX_NUM 2
188 #define XT_HW_DBREAK_MAX_NUM 2
190 struct xtensa_reg_desc xtensa_regs
[XT_NUM_REGS
] = {
191 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL
, XT_REG_SPECIAL
, 0),
192 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL
, 0),
193 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL
, 0),
194 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL
, 0),
195 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL
, 0),
196 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL
, 0),
197 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL
, 0),
198 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL
, 0),
199 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL
, 0),
200 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL
, 0),
201 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL
, 0),
202 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL
, 0),
203 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL
, 0),
204 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL
, 0),
205 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL
, 0),
206 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL
, 0),
207 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL
, 0),
208 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL
, 0),
209 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL
, 0),
210 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL
, 0),
211 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL
, 0),
212 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL
, 0),
213 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL
, 0),
214 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL
, 0),
215 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL
, 0),
216 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL
, 0),
217 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL
, 0),
218 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL
, 0),
219 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL
, 0),
220 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL
, 0),
221 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL
, 0),
222 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL
, 0),
223 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL
, 0),
224 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL
, 0),
225 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL
, 0),
226 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL
, 0),
227 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL
, 0),
228 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL
, 0),
229 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL
, 0),
230 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL
, 0),
231 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL
, 0),
232 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL
, 0),
233 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL
, 0),
234 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL
, 0),
235 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL
, 0),
236 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL
, 0),
237 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL
, 0),
238 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL
, 0),
239 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL
, 0),
240 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL
, 0),
241 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL
, 0),
242 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL
, 0),
243 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL
, 0),
244 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL
, 0),
245 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL
, 0),
246 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL
, 0),
247 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL
, 0),
248 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL
, 0),
249 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL
, 0),
250 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL
, 0),
251 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL
, 0),
252 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL
, 0),
253 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL
, 0),
254 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL
, 0),
255 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL
, 0),
256 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL
, 0),
257 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL
, 0),
258 XT_MK_REG_DESC("ps", XT_PS_REG_NUM
, XT_REG_SPECIAL
, 0), /* PS (not mapped through EPS[]) */
259 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL
, 0),
260 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG
, XT_REGF_NOREAD
),
261 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL
, 0),
262 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL
, 0),
263 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL
, 0),
264 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL
, 0),
265 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL
, 0),
266 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL
, 0),
267 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL
, 0),
268 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL
, 0),
269 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL
, 0),
270 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL
, 0),
271 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL
, 0),
273 /* WARNING: For these registers, regnum points to the
274 * index of the corresponding ARx registers, NOT to
275 * the processor register number! */
276 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0
, XT_REG_RELGEN
, 0),
277 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1
, XT_REG_RELGEN
, 0),
278 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2
, XT_REG_RELGEN
, 0),
279 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3
, XT_REG_RELGEN
, 0),
280 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4
, XT_REG_RELGEN
, 0),
281 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5
, XT_REG_RELGEN
, 0),
282 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6
, XT_REG_RELGEN
, 0),
283 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7
, XT_REG_RELGEN
, 0),
284 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8
, XT_REG_RELGEN
, 0),
285 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9
, XT_REG_RELGEN
, 0),
286 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10
, XT_REG_RELGEN
, 0),
287 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11
, XT_REG_RELGEN
, 0),
288 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12
, XT_REG_RELGEN
, 0),
289 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13
, XT_REG_RELGEN
, 0),
290 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14
, XT_REG_RELGEN
, 0),
291 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15
, XT_REG_RELGEN
, 0),
295 * Types of memory used at xtensa target
297 enum xtensa_mem_region_type
{
298 XTENSA_MEM_REG_IROM
= 0x0,
308 * Types of access rights for MPU option
309 * The first block is kernel RWX ARs; the second block is user rwx ARs.
311 enum xtensa_mpu_access_type
{
312 XTENSA_ACC_00X_000
= 0x2,
328 /* Register definition as union for list allocation */
329 union xtensa_reg_val_u
{
330 xtensa_reg_val_t val
;
334 static const struct xtensa_keyval_info_s xt_qerr
[XT_QERR_NUM
] = {
335 { .chrval
= "E00", .intval
= ERROR_FAIL
},
336 { .chrval
= "E01", .intval
= ERROR_FAIL
},
337 { .chrval
= "E02", .intval
= ERROR_COMMAND_ARGUMENT_INVALID
},
338 { .chrval
= "E03", .intval
= ERROR_FAIL
},
341 /* Set to true for extra debug logging */
342 static const bool xtensa_extra_debug_log
;
345 * Gets a config for the specific mem type
347 static inline const struct xtensa_local_mem_config
*xtensa_get_mem_config(
348 struct xtensa
*xtensa
,
349 enum xtensa_mem_region_type type
)
352 case XTENSA_MEM_REG_IROM
:
353 return &xtensa
->core_config
->irom
;
354 case XTENSA_MEM_REG_IRAM
:
355 return &xtensa
->core_config
->iram
;
356 case XTENSA_MEM_REG_DROM
:
357 return &xtensa
->core_config
->drom
;
358 case XTENSA_MEM_REG_DRAM
:
359 return &xtensa
->core_config
->dram
;
360 case XTENSA_MEM_REG_SRAM
:
361 return &xtensa
->core_config
->sram
;
362 case XTENSA_MEM_REG_SROM
:
363 return &xtensa
->core_config
->srom
;
370 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
371 * for a given address
372 * Returns NULL if nothing found
374 static inline const struct xtensa_local_mem_region_config
*xtensa_memory_region_find(
375 const struct xtensa_local_mem_config
*mem
,
376 target_addr_t address
)
378 for (unsigned int i
= 0; i
< mem
->count
; i
++) {
379 const struct xtensa_local_mem_region_config
*region
= &mem
->regions
[i
];
380 if (address
>= region
->base
&& address
< (region
->base
+ region
->size
))
387 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
388 * for a given address
389 * Returns NULL if nothing found
391 static inline const struct xtensa_local_mem_region_config
*xtensa_target_memory_region_find(
392 struct xtensa
*xtensa
,
393 target_addr_t address
)
395 const struct xtensa_local_mem_region_config
*result
;
396 const struct xtensa_local_mem_config
*mcgf
;
397 for (unsigned int mtype
= 0; mtype
< XTENSA_MEM_REGS_NUM
; mtype
++) {
398 mcgf
= xtensa_get_mem_config(xtensa
, mtype
);
399 result
= xtensa_memory_region_find(mcgf
, address
);
406 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config
*cache
,
407 const struct xtensa_local_mem_config
*mem
,
408 target_addr_t address
)
412 return xtensa_memory_region_find(mem
, address
);
415 static inline bool xtensa_is_icacheable(struct xtensa
*xtensa
, target_addr_t address
)
417 return xtensa_is_cacheable(&xtensa
->core_config
->icache
, &xtensa
->core_config
->iram
, address
) ||
418 xtensa_is_cacheable(&xtensa
->core_config
->icache
, &xtensa
->core_config
->irom
, address
) ||
419 xtensa_is_cacheable(&xtensa
->core_config
->icache
, &xtensa
->core_config
->sram
, address
) ||
420 xtensa_is_cacheable(&xtensa
->core_config
->icache
, &xtensa
->core_config
->srom
, address
);
423 static inline bool xtensa_is_dcacheable(struct xtensa
*xtensa
, target_addr_t address
)
425 return xtensa_is_cacheable(&xtensa
->core_config
->dcache
, &xtensa
->core_config
->dram
, address
) ||
426 xtensa_is_cacheable(&xtensa
->core_config
->dcache
, &xtensa
->core_config
->drom
, address
) ||
427 xtensa_is_cacheable(&xtensa
->core_config
->dcache
, &xtensa
->core_config
->sram
, address
) ||
428 xtensa_is_cacheable(&xtensa
->core_config
->dcache
, &xtensa
->core_config
->srom
, address
);
431 static int xtensa_core_reg_get(struct reg
*reg
)
433 /* We don't need this because we read all registers on halt anyway. */
434 struct xtensa
*xtensa
= (struct xtensa
*)reg
->arch_info
;
435 struct target
*target
= xtensa
->target
;
437 if (target
->state
!= TARGET_HALTED
)
438 return ERROR_TARGET_NOT_HALTED
;
440 if (strncmp(reg
->name
, "?0x", 3) == 0) {
441 unsigned int regnum
= strtoul(reg
->name
+ 1, NULL
, 0);
442 LOG_WARNING("Read unknown register 0x%04x ignored", regnum
);
445 return ERROR_COMMAND_ARGUMENT_INVALID
;
450 static int xtensa_core_reg_set(struct reg
*reg
, uint8_t *buf
)
452 struct xtensa
*xtensa
= (struct xtensa
*)reg
->arch_info
;
453 struct target
*target
= xtensa
->target
;
455 assert(reg
->size
<= 64 && "up to 64-bit regs are supported only!");
456 if (target
->state
!= TARGET_HALTED
)
457 return ERROR_TARGET_NOT_HALTED
;
460 if (strncmp(reg
->name
, "?0x", 3) == 0) {
461 unsigned int regnum
= strtoul(reg
->name
+ 1, NULL
, 0);
462 LOG_WARNING("Write unknown register 0x%04x ignored", regnum
);
465 return ERROR_COMMAND_ARGUMENT_INVALID
;
468 buf_cpy(buf
, reg
->value
, reg
->size
);
470 if (xtensa
->core_config
->windowed
) {
471 /* If the user updates a potential scratch register, track for conflicts */
472 for (enum xtensa_ar_scratch_set_e s
= 0; s
< XT_AR_SCRATCH_NUM
; s
++) {
473 if (strcmp(reg
->name
, xtensa
->scratch_ars
[s
].chrval
) == 0) {
474 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32
"] set from gdb", reg
->name
,
475 buf_get_u32(reg
->value
, 0, 32));
476 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
477 xtensa
->scratch_ars
[XT_AR_SCRATCH_AR3
].chrval
,
478 xtensa
->scratch_ars
[XT_AR_SCRATCH_AR4
].chrval
);
479 xtensa
->scratch_ars
[s
].intval
= true;
490 static const struct reg_arch_type xtensa_reg_type
= {
491 .get
= xtensa_core_reg_get
,
492 .set
= xtensa_core_reg_set
,
495 /* Convert a register index that's indexed relative to windowbase, to the real address. */
496 static enum xtensa_reg_id
xtensa_windowbase_offset_to_canonical(struct xtensa
*xtensa
,
497 enum xtensa_reg_id reg_idx
,
501 if (reg_idx
>= XT_REG_IDX_AR0
&& reg_idx
<= XT_REG_IDX_ARLAST
) {
502 idx
= reg_idx
- XT_REG_IDX_AR0
;
503 } else if (reg_idx
>= XT_REG_IDX_A0
&& reg_idx
<= XT_REG_IDX_A15
) {
504 idx
= reg_idx
- XT_REG_IDX_A0
;
506 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx
);
509 /* Each windowbase value represents 4 registers on LX and 8 on NX */
510 int base_inc
= (xtensa
->core_config
->core_type
== XT_LX
) ? 4 : 8;
511 return ((idx
+ windowbase
* base_inc
) & (xtensa
->core_config
->aregs_num
- 1)) + XT_REG_IDX_AR0
;
514 static enum xtensa_reg_id
xtensa_canonical_to_windowbase_offset(struct xtensa
*xtensa
,
515 enum xtensa_reg_id reg_idx
,
518 return xtensa_windowbase_offset_to_canonical(xtensa
, reg_idx
, -windowbase
);
521 static void xtensa_mark_register_dirty(struct xtensa
*xtensa
, enum xtensa_reg_id reg_idx
)
523 struct reg
*reg_list
= xtensa
->core_cache
->reg_list
;
524 reg_list
[reg_idx
].dirty
= true;
527 static void xtensa_queue_exec_ins(struct xtensa
*xtensa
, uint32_t ins
)
529 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DIR0EXEC
, ins
);
532 static void xtensa_queue_exec_ins_wide(struct xtensa
*xtensa
, uint8_t *ops
, uint8_t oplen
)
534 const int max_oplen
= 64; /* 8 DIRx regs: max width 64B */
535 if ((oplen
> 0) && (oplen
<= max_oplen
)) {
536 uint8_t ops_padded
[max_oplen
];
537 memcpy(ops_padded
, ops
, oplen
);
538 memset(ops_padded
+ oplen
, 0, max_oplen
- oplen
);
539 unsigned int oplenw
= DIV_ROUND_UP(oplen
, sizeof(uint32_t));
540 for (int32_t i
= oplenw
- 1; i
> 0; i
--)
541 xtensa_queue_dbg_reg_write(xtensa
,
543 target_buffer_get_u32(xtensa
->target
, &ops_padded
[sizeof(uint32_t)*i
]));
544 /* Write DIR0EXEC last */
545 xtensa_queue_dbg_reg_write(xtensa
,
547 target_buffer_get_u32(xtensa
->target
, &ops_padded
[0]));
551 /* NOTE: Assumes A3 has already been saved and marked dirty; A3 will be clobbered */
552 static inline bool xtensa_region_ar_exec(struct target
*target
, target_addr_t start
, target_addr_t end
)
554 struct xtensa
*xtensa
= target_to_xtensa(target
);
555 if (xtensa
->core_config
->mpu
.enabled
) {
556 /* For cores with the MPU option, issue PPTLB on start and end addresses.
557 * Parse access rights field, and confirm both have execute permissions.
559 for (int i
= 0; i
<= 1; i
++) {
563 target_addr_t addr
= i
? end
: start
;
564 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, addr
);
565 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
566 xtensa_queue_exec_ins(xtensa
, XT_INS_PPTLB(xtensa
, XT_REG_A3
, XT_REG_A3
));
567 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
568 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, at_buf
);
569 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
571 LOG_TARGET_ERROR(target
, "Error queuing PPTLB: %d", res
);
572 res
= xtensa_core_status_check(target
);
574 LOG_TARGET_ERROR(target
, "Error issuing PPTLB: %d", res
);
575 at
= buf_get_u32(at_buf
, 0, 32);
576 acc
= (at
>> XT_TLB1_ACC_SHIFT
) & XT_TLB1_ACC_MSK
;
577 exec_acc
= ((acc
== XTENSA_ACC_00X_000
) || (acc
== XTENSA_ACC_R0X_000
) ||
578 (acc
== XTENSA_ACC_RWX_000
) || (acc
== XTENSA_ACC_RWX_R0X
) ||
579 (acc
== XTENSA_ACC_R0X_R0X
) || (acc
== XTENSA_ACC_RWX_RWX
));
580 LOG_TARGET_DEBUG(target
, "PPTLB(" TARGET_ADDR_FMT
") -> 0x%08" PRIx32
" exec_acc %d",
589 static int xtensa_queue_pwr_reg_write(struct xtensa
*xtensa
, unsigned int reg
, uint32_t data
)
591 struct xtensa_debug_module
*dm
= &xtensa
->dbg_mod
;
592 return dm
->pwr_ops
->queue_reg_write(dm
, reg
, data
);
595 /* NOTE: Assumes A3 has already been saved */
596 static int xtensa_window_state_save(struct target
*target
, uint32_t *woe
)
598 struct xtensa
*xtensa
= target_to_xtensa(target
);
599 unsigned int woe_sr
= (xtensa
->core_config
->core_type
== XT_LX
) ? XT_SR_PS
: XT_SR_WB
;
603 if (xtensa
->core_config
->windowed
) {
604 /* Save PS (LX) or WB (NX) and disable window overflow exceptions prior to AR save */
605 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, woe_sr
, XT_REG_A3
));
606 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
607 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, woe_buf
);
608 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
609 if (res
!= ERROR_OK
) {
610 LOG_TARGET_ERROR(target
, "Failed to read %s (%d)!",
611 (woe_sr
== XT_SR_PS
) ? "PS" : "WB", res
);
614 xtensa_core_status_check(target
);
615 *woe
= buf_get_u32(woe_buf
, 0, 32);
616 woe_dis
= *woe
& ~((woe_sr
== XT_SR_PS
) ? XT_PS_WOE_MSK
: XT_WB_S_MSK
);
617 LOG_TARGET_DEBUG(target
, "Clearing %s (0x%08" PRIx32
" -> 0x%08" PRIx32
")",
618 (woe_sr
== XT_SR_PS
) ? "PS.WOE" : "WB.S", *woe
, woe_dis
);
619 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, woe_dis
);
620 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
621 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, woe_sr
, XT_REG_A3
));
626 /* NOTE: Assumes A3 has already been saved */
627 static void xtensa_window_state_restore(struct target
*target
, uint32_t woe
)
629 struct xtensa
*xtensa
= target_to_xtensa(target
);
630 unsigned int woe_sr
= (xtensa
->core_config
->core_type
== XT_LX
) ? XT_SR_PS
: XT_SR_WB
;
631 if (xtensa
->core_config
->windowed
) {
632 /* Restore window overflow exception state */
633 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, woe
);
634 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
635 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, woe_sr
, XT_REG_A3
));
636 LOG_TARGET_DEBUG(target
, "Restored %s (0x%08" PRIx32
")",
637 (woe_sr
== XT_SR_PS
) ? "PS.WOE" : "WB", woe
);
641 static bool xtensa_reg_is_readable(int flags
, int cpenable
)
643 if (flags
& XT_REGF_NOREAD
)
645 if ((flags
& XT_REGF_COPROC0
) && (cpenable
& BIT(0)) == 0)
650 static bool xtensa_scratch_regs_fixup(struct xtensa
*xtensa
, struct reg
*reg_list
, int i
, int j
, int a_idx
, int ar_idx
)
652 int a_name
= (a_idx
== XT_AR_SCRATCH_A3
) ? 3 : 4;
653 if (xtensa
->scratch_ars
[a_idx
].intval
&& !xtensa
->scratch_ars
[ar_idx
].intval
) {
654 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name
, j
- XT_REG_IDX_AR0
);
655 memcpy(reg_list
[j
].value
, reg_list
[i
].value
, sizeof(xtensa_reg_val_t
));
657 LOG_DEBUG("AR conflict: ar%d -> a%d", j
- XT_REG_IDX_AR0
, a_name
);
658 memcpy(reg_list
[i
].value
, reg_list
[j
].value
, sizeof(xtensa_reg_val_t
));
660 return xtensa
->scratch_ars
[a_idx
].intval
&& xtensa
->scratch_ars
[ar_idx
].intval
;
663 static int xtensa_write_dirty_registers(struct target
*target
)
665 struct xtensa
*xtensa
= target_to_xtensa(target
);
667 xtensa_reg_val_t regval
, windowbase
= 0;
668 bool scratch_reg_dirty
= false, delay_cpenable
= false;
669 struct reg
*reg_list
= xtensa
->core_cache
->reg_list
;
670 unsigned int reg_list_size
= xtensa
->core_cache
->num_regs
;
671 bool preserve_a3
= false;
673 xtensa_reg_val_t a3
= 0, woe
;
674 unsigned int ms_idx
= (xtensa
->core_config
->core_type
== XT_NX
) ?
675 xtensa
->nx_reg_idx
[XT_NX_REG_IDX_MS
] : reg_list_size
;
676 xtensa_reg_val_t ms
= 0;
677 bool restore_ms
= false;
679 LOG_TARGET_DEBUG(target
, "start");
681 /* We need to write the dirty registers in the cache list back to the processor.
682 * Start by writing the SFR/user registers. */
683 for (unsigned int i
= 0; i
< reg_list_size
; i
++) {
684 struct xtensa_reg_desc
*rlist
= (i
< XT_NUM_REGS
) ? xtensa_regs
: xtensa
->optregs
;
685 unsigned int ridx
= (i
< XT_NUM_REGS
) ? i
: i
- XT_NUM_REGS
;
686 if (reg_list
[i
].dirty
) {
687 if (rlist
[ridx
].type
== XT_REG_SPECIAL
||
688 rlist
[ridx
].type
== XT_REG_USER
||
689 rlist
[ridx
].type
== XT_REG_FR
) {
690 scratch_reg_dirty
= true;
691 if (i
== XT_REG_IDX_CPENABLE
) {
692 delay_cpenable
= true;
695 regval
= xtensa_reg_get(target
, i
);
696 LOG_TARGET_DEBUG(target
, "Writing back reg %s (%d) val %08" PRIX32
,
700 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, regval
);
701 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
702 if (reg_list
[i
].exist
) {
703 unsigned int reg_num
= rlist
[ridx
].reg_num
;
704 if (rlist
[ridx
].type
== XT_REG_USER
) {
705 xtensa_queue_exec_ins(xtensa
, XT_INS_WUR(xtensa
, reg_num
, XT_REG_A3
));
706 } else if (rlist
[ridx
].type
== XT_REG_FR
) {
707 xtensa_queue_exec_ins(xtensa
, XT_INS_WFR(xtensa
, reg_num
, XT_REG_A3
));
709 if (reg_num
== XT_PC_REG_NUM_VIRTUAL
) {
710 if (xtensa
->core_config
->core_type
== XT_LX
) {
711 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
712 reg_num
= (XT_EPC_REG_NUM_BASE
+ xtensa
->core_config
->debug
.irq_level
);
713 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, reg_num
, XT_REG_A3
));
715 /* NX PC set through issuing a jump instruction */
716 xtensa_queue_exec_ins(xtensa
, XT_INS_JX(xtensa
, XT_REG_A3
));
718 } else if (i
== ms_idx
) {
719 /* MS must be restored after ARs. This ensures ARs remain in correct
720 * order even for reversed register groups (overflow/underflow).
724 LOG_TARGET_DEBUG(target
, "Delaying MS write: 0x%x", ms
);
726 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, reg_num
, XT_REG_A3
));
730 reg_list
[i
].dirty
= false;
734 if (scratch_reg_dirty
)
735 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A3
);
736 if (delay_cpenable
) {
737 regval
= xtensa_reg_get(target
, XT_REG_IDX_CPENABLE
);
738 LOG_TARGET_DEBUG(target
, "Writing back reg cpenable (224) val %08" PRIX32
, regval
);
739 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, regval
);
740 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
741 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
,
742 xtensa_regs
[XT_REG_IDX_CPENABLE
].reg_num
,
744 reg_list
[XT_REG_IDX_CPENABLE
].dirty
= false;
747 preserve_a3
= (xtensa
->core_config
->windowed
) || (xtensa
->core_config
->core_type
== XT_NX
);
749 /* Save (windowed) A3 for scratch use */
750 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
751 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, a3_buf
);
752 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
755 xtensa_core_status_check(target
);
756 a3
= buf_get_u32(a3_buf
, 0, 32);
759 if (xtensa
->core_config
->windowed
) {
760 res
= xtensa_window_state_save(target
, &woe
);
763 /* Grab the windowbase, we need it. */
764 uint32_t wb_idx
= (xtensa
->core_config
->core_type
== XT_LX
) ?
765 XT_REG_IDX_WINDOWBASE
: xtensa
->nx_reg_idx
[XT_NX_REG_IDX_WB
];
766 windowbase
= xtensa_reg_get(target
, wb_idx
);
767 if (xtensa
->core_config
->core_type
== XT_NX
)
768 windowbase
= (windowbase
& XT_WB_P_MSK
) >> XT_WB_P_SHIFT
;
770 /* Check if there are mismatches between the ARx and corresponding Ax registers.
771 * When the user sets a register on a windowed config, xt-gdb may set the ARx
772 * register directly. Thus we take ARx as priority over Ax if both are dirty
773 * and it's unclear if the user set one over the other explicitly.
775 for (unsigned int i
= XT_REG_IDX_A0
; i
<= XT_REG_IDX_A15
; i
++) {
776 unsigned int j
= xtensa_windowbase_offset_to_canonical(xtensa
, i
, windowbase
);
777 if (reg_list
[i
].dirty
&& reg_list
[j
].dirty
) {
778 if (memcmp(reg_list
[i
].value
, reg_list
[j
].value
, sizeof(xtensa_reg_val_t
)) != 0) {
779 bool show_warning
= true;
780 if (i
== XT_REG_IDX_A3
)
781 show_warning
= xtensa_scratch_regs_fixup(xtensa
,
782 reg_list
, i
, j
, XT_AR_SCRATCH_A3
, XT_AR_SCRATCH_AR3
);
783 else if (i
== XT_REG_IDX_A4
)
784 show_warning
= xtensa_scratch_regs_fixup(xtensa
,
785 reg_list
, i
, j
, XT_AR_SCRATCH_A4
, XT_AR_SCRATCH_AR4
);
788 "Warning: Both A%d [0x%08" PRIx32
789 "] as well as its underlying physical register "
790 "(AR%d) [0x%08" PRIx32
"] are dirty and differ in value",
792 buf_get_u32(reg_list
[i
].value
, 0, 32),
794 buf_get_u32(reg_list
[j
].value
, 0, 32));
801 for (unsigned int i
= 0; i
< 16; i
++) {
802 if (reg_list
[XT_REG_IDX_A0
+ i
].dirty
) {
803 regval
= xtensa_reg_get(target
, XT_REG_IDX_A0
+ i
);
804 LOG_TARGET_DEBUG(target
, "Writing back reg %s value %08" PRIX32
", num =%i",
805 xtensa_regs
[XT_REG_IDX_A0
+ i
].name
,
807 xtensa_regs
[XT_REG_IDX_A0
+ i
].reg_num
);
808 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, regval
);
809 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, i
));
810 reg_list
[XT_REG_IDX_A0
+ i
].dirty
= false;
812 /* Avoid stomping A3 during restore at end of function */
818 if (xtensa
->core_config
->windowed
) {
819 /* Now write AR registers */
820 for (unsigned int j
= 0; j
< XT_REG_IDX_ARLAST
; j
+= 16) {
821 /* Write the 16 registers we can see */
822 for (unsigned int i
= 0; i
< 16; i
++) {
823 if (i
+ j
< xtensa
->core_config
->aregs_num
) {
824 enum xtensa_reg_id realadr
=
825 xtensa_windowbase_offset_to_canonical(xtensa
, XT_REG_IDX_AR0
+ i
+ j
,
827 /* Write back any dirty un-windowed registers */
828 if (reg_list
[realadr
].dirty
) {
829 regval
= xtensa_reg_get(target
, realadr
);
832 "Writing back reg %s value %08" PRIX32
", num =%i",
833 xtensa_regs
[realadr
].name
,
835 xtensa_regs
[realadr
].reg_num
);
836 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, regval
);
837 xtensa_queue_exec_ins(xtensa
,
838 XT_INS_RSR(xtensa
, XT_SR_DDR
,
839 xtensa_regs
[XT_REG_IDX_AR0
+ i
].reg_num
));
840 reg_list
[realadr
].dirty
= false;
842 /* Avoid stomping AR during A3 restore at end of function */
848 /* Now rotate the window so we'll see the next 16 registers. The final rotate
849 * will wraparound, leaving us in the state we were.
850 * Each ROTW rotates 4 registers on LX and 8 on NX */
851 int rotw_arg
= (xtensa
->core_config
->core_type
== XT_LX
) ? 4 : 2;
852 xtensa_queue_exec_ins(xtensa
, XT_INS_ROTW(xtensa
, rotw_arg
));
855 xtensa_window_state_restore(target
, woe
);
857 for (enum xtensa_ar_scratch_set_e s
= 0; s
< XT_AR_SCRATCH_NUM
; s
++)
858 xtensa
->scratch_ars
[s
].intval
= false;
862 uint32_t ms_regno
= xtensa
->optregs
[ms_idx
- XT_NUM_REGS
].reg_num
;
863 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, ms
);
864 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
865 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, ms_regno
, XT_REG_A3
));
866 LOG_TARGET_DEBUG(target
, "Delayed MS (0x%x) write complete: 0x%x", ms_regno
, ms
);
870 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, a3
);
871 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
874 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
875 xtensa_core_status_check(target
);
880 static inline bool xtensa_is_stopped(struct target
*target
)
882 struct xtensa
*xtensa
= target_to_xtensa(target
);
883 return xtensa
->dbg_mod
.core_status
.dsr
& OCDDSR_STOPPED
;
886 int xtensa_examine(struct target
*target
)
888 struct xtensa
*xtensa
= target_to_xtensa(target
);
889 unsigned int cmd
= PWRCTL_DEBUGWAKEUP(xtensa
) | PWRCTL_MEMWAKEUP(xtensa
) | PWRCTL_COREWAKEUP(xtensa
);
891 LOG_TARGET_DEBUG(target
, "");
893 if (xtensa
->core_config
->core_type
== XT_UNDEF
) {
894 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
898 xtensa_queue_pwr_reg_write(xtensa
, XDMREG_PWRCTL
, cmd
);
899 xtensa_queue_pwr_reg_write(xtensa
, XDMREG_PWRCTL
, cmd
| PWRCTL_JTAGDEBUGUSE(xtensa
));
900 xtensa_dm_queue_enable(&xtensa
->dbg_mod
);
901 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
902 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
905 if (!xtensa_dm_is_online(&xtensa
->dbg_mod
)) {
906 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32
, xtensa
->dbg_mod
.device_id
);
907 return ERROR_TARGET_FAILURE
;
909 LOG_DEBUG("OCD_ID = %08" PRIx32
, xtensa
->dbg_mod
.device_id
);
910 target_set_examined(target
);
911 xtensa_smpbreak_write(xtensa
, xtensa
->smp_break
);
915 int xtensa_wakeup(struct target
*target
)
917 struct xtensa
*xtensa
= target_to_xtensa(target
);
918 unsigned int cmd
= PWRCTL_DEBUGWAKEUP(xtensa
) | PWRCTL_MEMWAKEUP(xtensa
) | PWRCTL_COREWAKEUP(xtensa
);
920 if (xtensa
->reset_asserted
)
921 cmd
|= PWRCTL_CORERESET(xtensa
);
922 xtensa_queue_pwr_reg_write(xtensa
, XDMREG_PWRCTL
, cmd
);
923 /* TODO: can we join this with the write above? */
924 xtensa_queue_pwr_reg_write(xtensa
, XDMREG_PWRCTL
, cmd
| PWRCTL_JTAGDEBUGUSE(xtensa
));
925 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
926 return xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
929 int xtensa_smpbreak_write(struct xtensa
*xtensa
, uint32_t set
)
931 uint32_t dsr_data
= 0x00110000;
932 uint32_t clear
= (set
| OCDDCR_ENABLEOCD
) ^
933 (OCDDCR_BREAKINEN
| OCDDCR_BREAKOUTEN
| OCDDCR_RUNSTALLINEN
|
934 OCDDCR_DEBUGMODEOUTEN
| OCDDCR_ENABLEOCD
);
936 LOG_TARGET_DEBUG(xtensa
->target
, "write smpbreak set=0x%" PRIx32
" clear=0x%" PRIx32
, set
, clear
);
937 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DCRSET
, set
| OCDDCR_ENABLEOCD
);
938 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DCRCLR
, clear
);
939 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DSR
, dsr_data
);
940 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
941 return xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
944 int xtensa_smpbreak_set(struct target
*target
, uint32_t set
)
946 struct xtensa
*xtensa
= target_to_xtensa(target
);
949 xtensa
->smp_break
= set
;
950 if (target_was_examined(target
))
951 res
= xtensa_smpbreak_write(xtensa
, xtensa
->smp_break
);
952 LOG_TARGET_DEBUG(target
, "set smpbreak=%" PRIx32
", state=%i", set
, target
->state
);
956 int xtensa_smpbreak_read(struct xtensa
*xtensa
, uint32_t *val
)
958 uint8_t dcr_buf
[sizeof(uint32_t)];
960 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DCRSET
, dcr_buf
);
961 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
962 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
963 *val
= buf_get_u32(dcr_buf
, 0, 32);
968 int xtensa_smpbreak_get(struct target
*target
, uint32_t *val
)
970 struct xtensa
*xtensa
= target_to_xtensa(target
);
971 *val
= xtensa
->smp_break
;
975 static inline xtensa_reg_val_t
xtensa_reg_get_value(struct reg
*reg
)
977 return buf_get_u32(reg
->value
, 0, 32);
980 static inline void xtensa_reg_set_value(struct reg
*reg
, xtensa_reg_val_t value
)
982 buf_set_u32(reg
->value
, 0, 32, value
);
986 static int xtensa_imprecise_exception_occurred(struct target
*target
)
988 struct xtensa
*xtensa
= target_to_xtensa(target
);
989 for (enum xtensa_nx_reg_idx idx
= XT_NX_REG_IDX_IEVEC
; idx
<= XT_NX_REG_IDX_MESR
; idx
++) {
990 enum xtensa_reg_id ridx
= xtensa
->nx_reg_idx
[idx
];
991 if (xtensa
->nx_reg_idx
[idx
]) {
992 xtensa_reg_val_t reg
= xtensa_reg_get(target
, xtensa
->nx_reg_idx
[idx
]);
993 if (reg
& XT_IMPR_EXC_MSK
) {
994 LOG_TARGET_DEBUG(target
, "Imprecise exception: %s: 0x%x",
995 xtensa
->core_cache
->reg_list
[ridx
].name
, reg
);
1003 static void xtensa_imprecise_exception_clear(struct target
*target
)
1005 struct xtensa
*xtensa
= target_to_xtensa(target
);
1006 for (enum xtensa_nx_reg_idx idx
= XT_NX_REG_IDX_IEVEC
; idx
<= XT_NX_REG_IDX_MESRCLR
; idx
++) {
1007 enum xtensa_reg_id ridx
= xtensa
->nx_reg_idx
[idx
];
1008 if (ridx
&& idx
!= XT_NX_REG_IDX_MESR
) {
1009 xtensa_reg_val_t value
= (idx
== XT_NX_REG_IDX_MESRCLR
) ? XT_MESRCLR_IMPR_EXC_MSK
: 0;
1010 xtensa_reg_set(target
, ridx
, value
);
1011 LOG_TARGET_DEBUG(target
, "Imprecise exception: clearing %s (0x%x)",
1012 xtensa
->core_cache
->reg_list
[ridx
].name
, value
);
1017 int xtensa_core_status_check(struct target
*target
)
1019 struct xtensa
*xtensa
= target_to_xtensa(target
);
1020 int res
, needclear
= 0, needimprclear
= 0;
1022 xtensa_dm_core_status_read(&xtensa
->dbg_mod
);
1023 xtensa_dsr_t dsr
= xtensa_dm_core_status_get(&xtensa
->dbg_mod
);
1024 LOG_TARGET_DEBUG(target
, "DSR (%08" PRIX32
")", dsr
);
1025 if (dsr
& OCDDSR_EXECBUSY
) {
1026 if (!xtensa
->suppress_dsr_errors
)
1027 LOG_TARGET_ERROR(target
, "DSR (%08" PRIX32
") indicates target still busy!", dsr
);
1030 if (dsr
& OCDDSR_EXECEXCEPTION
) {
1031 if (!xtensa
->suppress_dsr_errors
)
1032 LOG_TARGET_ERROR(target
,
1033 "DSR (%08" PRIX32
") indicates DIR instruction generated an exception!",
1037 if (dsr
& OCDDSR_EXECOVERRUN
) {
1038 if (!xtensa
->suppress_dsr_errors
)
1039 LOG_TARGET_ERROR(target
,
1040 "DSR (%08" PRIX32
") indicates DIR instruction generated an overrun!",
1044 if (xtensa
->core_config
->core_type
== XT_NX
&& (xtensa_imprecise_exception_occurred(target
))) {
1045 if (!xtensa
->suppress_dsr_errors
)
1046 LOG_TARGET_ERROR(target
,
1047 "%s: Imprecise exception occurred!", target_name(target
));
1052 res
= xtensa_dm_core_status_clear(&xtensa
->dbg_mod
,
1053 OCDDSR_EXECEXCEPTION
| OCDDSR_EXECOVERRUN
);
1054 if (res
!= ERROR_OK
&& !xtensa
->suppress_dsr_errors
)
1055 LOG_TARGET_ERROR(target
, "clearing DSR failed!");
1056 if (xtensa
->core_config
->core_type
== XT_NX
&& needimprclear
)
1057 xtensa_imprecise_exception_clear(target
);
1063 xtensa_reg_val_t
xtensa_reg_get(struct target
*target
, enum xtensa_reg_id reg_id
)
1065 struct xtensa
*xtensa
= target_to_xtensa(target
);
1066 struct reg
*reg
= &xtensa
->core_cache
->reg_list
[reg_id
];
1067 return xtensa_reg_get_value(reg
);
1070 void xtensa_reg_set(struct target
*target
, enum xtensa_reg_id reg_id
, xtensa_reg_val_t value
)
1072 struct xtensa
*xtensa
= target_to_xtensa(target
);
1073 struct reg
*reg
= &xtensa
->core_cache
->reg_list
[reg_id
];
1074 if (xtensa_reg_get_value(reg
) == value
)
1076 xtensa_reg_set_value(reg
, value
);
1079 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
1080 void xtensa_reg_set_deep_relgen(struct target
*target
, enum xtensa_reg_id a_idx
, xtensa_reg_val_t value
)
1082 struct xtensa
*xtensa
= target_to_xtensa(target
);
1083 uint32_t wb_idx
= (xtensa
->core_config
->core_type
== XT_LX
) ?
1084 XT_REG_IDX_WINDOWBASE
: xtensa
->nx_reg_idx
[XT_NX_REG_IDX_WB
];
1085 uint32_t windowbase
= (xtensa
->core_config
->windowed
?
1086 xtensa_reg_get(target
, wb_idx
) : 0);
1087 if (xtensa
->core_config
->core_type
== XT_NX
)
1088 windowbase
= (windowbase
& XT_WB_P_MSK
) >> XT_WB_P_SHIFT
;
1089 int ar_idx
= xtensa_windowbase_offset_to_canonical(xtensa
, a_idx
, windowbase
);
1090 xtensa_reg_set(target
, a_idx
, value
);
1091 xtensa_reg_set(target
, ar_idx
, value
);
1094 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
1095 uint32_t xtensa_cause_get(struct target
*target
)
1097 struct xtensa
*xtensa
= target_to_xtensa(target
);
1098 if (xtensa
->core_config
->core_type
== XT_LX
) {
1099 /* LX cause in DEBUGCAUSE */
1100 return xtensa_reg_get(target
, XT_REG_IDX_DEBUGCAUSE
);
1102 if (xtensa
->nx_stop_cause
& DEBUGCAUSE_VALID
)
1103 return xtensa
->nx_stop_cause
;
1105 /* NX cause determined from DSR.StopCause */
1106 if (xtensa_dm_core_status_read(&xtensa
->dbg_mod
) != ERROR_OK
) {
1107 LOG_TARGET_ERROR(target
, "Read DSR error");
1109 uint32_t dsr
= xtensa_dm_core_status_get(&xtensa
->dbg_mod
);
1110 /* NX causes are prioritized; only 1 bit can be set */
1111 switch ((dsr
& OCDDSR_STOPCAUSE
) >> OCDDSR_STOPCAUSE_SHIFT
) {
1112 case OCDDSR_STOPCAUSE_DI
:
1113 xtensa
->nx_stop_cause
= DEBUGCAUSE_DI
;
1115 case OCDDSR_STOPCAUSE_SS
:
1116 xtensa
->nx_stop_cause
= DEBUGCAUSE_IC
;
1118 case OCDDSR_STOPCAUSE_IB
:
1119 xtensa
->nx_stop_cause
= DEBUGCAUSE_IB
;
1121 case OCDDSR_STOPCAUSE_B
:
1122 case OCDDSR_STOPCAUSE_B1
:
1123 xtensa
->nx_stop_cause
= DEBUGCAUSE_BI
;
1125 case OCDDSR_STOPCAUSE_BN
:
1126 xtensa
->nx_stop_cause
= DEBUGCAUSE_BN
;
1128 case OCDDSR_STOPCAUSE_DB0
:
1129 case OCDDSR_STOPCAUSE_DB1
:
1130 xtensa
->nx_stop_cause
= DEBUGCAUSE_DB
;
1133 LOG_TARGET_ERROR(target
, "Unknown stop cause (DSR: 0x%08x)", dsr
);
1136 if (xtensa
->nx_stop_cause
)
1137 xtensa
->nx_stop_cause
|= DEBUGCAUSE_VALID
;
1139 return xtensa
->nx_stop_cause
;
1142 void xtensa_cause_clear(struct target
*target
)
1144 struct xtensa
*xtensa
= target_to_xtensa(target
);
1145 if (xtensa
->core_config
->core_type
== XT_LX
) {
1146 xtensa_reg_set(target
, XT_REG_IDX_DEBUGCAUSE
, 0);
1147 xtensa
->core_cache
->reg_list
[XT_REG_IDX_DEBUGCAUSE
].dirty
= false;
1149 /* NX DSR.STOPCAUSE is not writeable; clear cached copy but leave it valid */
1150 xtensa
->nx_stop_cause
= DEBUGCAUSE_VALID
;
1154 void xtensa_cause_reset(struct target
*target
)
1156 /* Clear DEBUGCAUSE_VALID to trigger re-read (on NX) */
1157 struct xtensa
*xtensa
= target_to_xtensa(target
);
1158 xtensa
->nx_stop_cause
= 0;
1161 int xtensa_assert_reset(struct target
*target
)
1163 struct xtensa
*xtensa
= target_to_xtensa(target
);
1165 LOG_TARGET_DEBUG(target
, " begin");
1166 xtensa_queue_pwr_reg_write(xtensa
,
1168 PWRCTL_JTAGDEBUGUSE(xtensa
) | PWRCTL_DEBUGWAKEUP(xtensa
) | PWRCTL_MEMWAKEUP(xtensa
) |
1169 PWRCTL_COREWAKEUP(xtensa
) | PWRCTL_CORERESET(xtensa
));
1170 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
1171 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1172 if (res
!= ERROR_OK
)
1175 /* registers are now invalid */
1176 xtensa
->reset_asserted
= true;
1177 register_cache_invalidate(xtensa
->core_cache
);
1178 target
->state
= TARGET_RESET
;
1182 int xtensa_deassert_reset(struct target
*target
)
1184 struct xtensa
*xtensa
= target_to_xtensa(target
);
1186 LOG_TARGET_DEBUG(target
, "halt=%d", target
->reset_halt
);
1187 if (target
->reset_halt
)
1188 xtensa_queue_dbg_reg_write(xtensa
,
1190 OCDDCR_ENABLEOCD
| OCDDCR_DEBUGINTERRUPT
);
1191 xtensa_queue_pwr_reg_write(xtensa
,
1193 PWRCTL_JTAGDEBUGUSE(xtensa
) | PWRCTL_DEBUGWAKEUP(xtensa
) | PWRCTL_MEMWAKEUP(xtensa
) |
1194 PWRCTL_COREWAKEUP(xtensa
));
1195 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
1196 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1197 if (res
!= ERROR_OK
)
1199 target
->state
= TARGET_RUNNING
;
1200 xtensa
->reset_asserted
= false;
1204 int xtensa_soft_reset_halt(struct target
*target
)
1206 LOG_TARGET_DEBUG(target
, "begin");
1207 return xtensa_assert_reset(target
);
1210 int xtensa_fetch_all_regs(struct target
*target
)
1212 struct xtensa
*xtensa
= target_to_xtensa(target
);
1213 struct reg
*reg_list
= xtensa
->core_cache
->reg_list
;
1214 unsigned int reg_list_size
= xtensa
->core_cache
->num_regs
;
1215 xtensa_reg_val_t cpenable
= 0, windowbase
= 0, a0
= 0, a3
;
1216 unsigned int ms_idx
= reg_list_size
;
1219 uint8_t a0_buf
[4], a3_buf
[4], ms_buf
[4];
1220 bool debug_dsrs
= !xtensa
->regs_fetched
|| LOG_LEVEL_IS(LOG_LVL_DEBUG
);
1222 union xtensa_reg_val_u
*regvals
= calloc(reg_list_size
, sizeof(*regvals
));
1224 LOG_TARGET_ERROR(target
, "unable to allocate memory for regvals!");
1227 union xtensa_reg_val_u
*dsrs
= calloc(reg_list_size
, sizeof(*dsrs
));
1229 LOG_TARGET_ERROR(target
, "unable to allocate memory for dsrs!");
1234 LOG_TARGET_DEBUG(target
, "start");
1236 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1237 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1238 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, a3_buf
);
1239 if (xtensa
->core_config
->core_type
== XT_NX
) {
1240 /* Save (windowed) A0 as well--it will be required for reading PC */
1241 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A0
));
1242 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, a0_buf
);
1244 /* Set MS.DispSt, clear MS.DE prior to accessing ARs. This ensures ARs remain
1245 * in correct order even for reversed register groups (overflow/underflow).
1247 ms_idx
= xtensa
->nx_reg_idx
[XT_NX_REG_IDX_MS
];
1248 uint32_t ms_regno
= xtensa
->optregs
[ms_idx
- XT_NUM_REGS
].reg_num
;
1249 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, ms_regno
, XT_REG_A3
));
1250 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1251 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, ms_buf
);
1252 LOG_TARGET_DEBUG(target
, "Overriding MS (0x%x): 0x%x", ms_regno
, XT_MS_DISPST_DBG
);
1253 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, XT_MS_DISPST_DBG
);
1254 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1255 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, ms_regno
, XT_REG_A3
));
1258 int res
= xtensa_window_state_save(target
, &woe
);
1259 if (res
!= ERROR_OK
)
1260 goto xtensa_fetch_all_regs_done
;
1262 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1263 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1264 * in one go, then sort everything out from the regvals variable. */
1266 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1267 for (unsigned int j
= 0; j
< XT_AREGS_NUM_MAX
; j
+= 16) {
1268 /*Grab the 16 registers we can see */
1269 for (unsigned int i
= 0; i
< 16; i
++) {
1270 if (i
+ j
< xtensa
->core_config
->aregs_num
) {
1271 xtensa_queue_exec_ins(xtensa
,
1272 XT_INS_WSR(xtensa
, XT_SR_DDR
, xtensa_regs
[XT_REG_IDX_AR0
+ i
].reg_num
));
1273 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
,
1274 regvals
[XT_REG_IDX_AR0
+ i
+ j
].buf
);
1276 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DSR
,
1277 dsrs
[XT_REG_IDX_AR0
+ i
+ j
].buf
);
1280 if (xtensa
->core_config
->windowed
) {
1281 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1282 * will wraparound, leaving us in the state we were.
1283 * Each ROTW rotates 4 registers on LX and 8 on NX */
1284 int rotw_arg
= (xtensa
->core_config
->core_type
== XT_LX
) ? 4 : 2;
1285 xtensa_queue_exec_ins(xtensa
, XT_INS_ROTW(xtensa
, rotw_arg
));
1288 xtensa_window_state_restore(target
, woe
);
1290 if (xtensa
->core_config
->coproc
) {
1291 /* As the very first thing after AREGS, go grab CPENABLE */
1292 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, xtensa_regs
[XT_REG_IDX_CPENABLE
].reg_num
, XT_REG_A3
));
1293 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1294 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, regvals
[XT_REG_IDX_CPENABLE
].buf
);
1296 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1297 if (res
!= ERROR_OK
) {
1298 LOG_ERROR("Failed to read ARs (%d)!", res
);
1299 goto xtensa_fetch_all_regs_done
;
1301 xtensa_core_status_check(target
);
1303 a3
= buf_get_u32(a3_buf
, 0, 32);
1304 if (xtensa
->core_config
->core_type
== XT_NX
) {
1305 a0
= buf_get_u32(a0_buf
, 0, 32);
1306 ms
= buf_get_u32(ms_buf
, 0, 32);
1309 if (xtensa
->core_config
->coproc
) {
1310 cpenable
= buf_get_u32(regvals
[XT_REG_IDX_CPENABLE
].buf
, 0, 32);
1312 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1313 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, 0xffffffff);
1314 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1315 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, xtensa_regs
[XT_REG_IDX_CPENABLE
].reg_num
, XT_REG_A3
));
1317 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1318 LOG_TARGET_DEBUG(target
, "CPENABLE: was 0x%" PRIx32
", all enabled", cpenable
);
1319 xtensa_reg_set(target
, XT_REG_IDX_CPENABLE
, cpenable
);
1321 /* We're now free to use any of A0-A15 as scratch registers
1322 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1323 for (unsigned int i
= 0; i
< reg_list_size
; i
++) {
1324 struct xtensa_reg_desc
*rlist
= (i
< XT_NUM_REGS
) ? xtensa_regs
: xtensa
->optregs
;
1325 unsigned int ridx
= (i
< XT_NUM_REGS
) ? i
: i
- XT_NUM_REGS
;
1326 if (xtensa_reg_is_readable(rlist
[ridx
].flags
, cpenable
) && rlist
[ridx
].exist
) {
1327 bool reg_fetched
= true;
1328 unsigned int reg_num
= rlist
[ridx
].reg_num
;
1329 switch (rlist
[ridx
].type
) {
1331 xtensa_queue_exec_ins(xtensa
, XT_INS_RUR(xtensa
, reg_num
, XT_REG_A3
));
1334 xtensa_queue_exec_ins(xtensa
, XT_INS_RFR(xtensa
, reg_num
, XT_REG_A3
));
1336 case XT_REG_SPECIAL
:
1337 if (reg_num
== XT_PC_REG_NUM_VIRTUAL
) {
1338 if (xtensa
->core_config
->core_type
== XT_LX
) {
1339 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1340 reg_num
= XT_EPC_REG_NUM_BASE
+ xtensa
->core_config
->debug
.irq_level
;
1341 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, reg_num
, XT_REG_A3
));
1343 /* NX PC read through CALL0(0) and reading A0 */
1344 xtensa_queue_exec_ins(xtensa
, XT_INS_CALL0(xtensa
, 0));
1345 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A0
));
1346 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, regvals
[i
].buf
);
1347 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DSR
, dsrs
[i
].buf
);
1348 reg_fetched
= false;
1350 } else if ((xtensa
->core_config
->core_type
== XT_LX
)
1351 && (reg_num
== xtensa_regs
[XT_REG_IDX_PS
].reg_num
)) {
1352 /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1353 reg_num
= XT_EPS_REG_NUM_BASE
+ xtensa
->core_config
->debug
.irq_level
;
1354 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, reg_num
, XT_REG_A3
));
1355 } else if (reg_num
== xtensa_regs
[XT_REG_IDX_CPENABLE
].reg_num
) {
1356 /* CPENABLE already read/updated; don't re-read */
1357 reg_fetched
= false;
1360 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, reg_num
, XT_REG_A3
));
1364 reg_fetched
= false;
1367 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
1368 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, regvals
[i
].buf
);
1370 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DSR
, dsrs
[i
].buf
);
1374 /* Ok, send the whole mess to the CPU. */
1375 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1376 if (res
!= ERROR_OK
) {
1377 LOG_ERROR("Failed to fetch AR regs!");
1378 goto xtensa_fetch_all_regs_done
;
1380 xtensa_core_status_check(target
);
1383 /* DSR checking: follows order in which registers are requested. */
1384 for (unsigned int i
= 0; i
< reg_list_size
; i
++) {
1385 struct xtensa_reg_desc
*rlist
= (i
< XT_NUM_REGS
) ? xtensa_regs
: xtensa
->optregs
;
1386 unsigned int ridx
= (i
< XT_NUM_REGS
) ? i
: i
- XT_NUM_REGS
;
1387 if (xtensa_reg_is_readable(rlist
[ridx
].flags
, cpenable
) && rlist
[ridx
].exist
&&
1388 (rlist
[ridx
].type
!= XT_REG_DEBUG
) &&
1389 (rlist
[ridx
].type
!= XT_REG_RELGEN
) &&
1390 (rlist
[ridx
].type
!= XT_REG_TIE
) &&
1391 (rlist
[ridx
].type
!= XT_REG_OTHER
)) {
1392 if (buf_get_u32(dsrs
[i
].buf
, 0, 32) & OCDDSR_EXECEXCEPTION
) {
1393 LOG_ERROR("Exception reading %s!", reg_list
[i
].name
);
1395 goto xtensa_fetch_all_regs_done
;
1401 if (xtensa
->core_config
->windowed
) {
1402 /* We need the windowbase to decode the general addresses. */
1403 uint32_t wb_idx
= (xtensa
->core_config
->core_type
== XT_LX
) ?
1404 XT_REG_IDX_WINDOWBASE
: xtensa
->nx_reg_idx
[XT_NX_REG_IDX_WB
];
1405 windowbase
= buf_get_u32(regvals
[wb_idx
].buf
, 0, 32);
1406 if (xtensa
->core_config
->core_type
== XT_NX
)
1407 windowbase
= (windowbase
& XT_WB_P_MSK
) >> XT_WB_P_SHIFT
;
1410 /* Decode the result and update the cache. */
1411 for (unsigned int i
= 0; i
< reg_list_size
; i
++) {
1412 struct xtensa_reg_desc
*rlist
= (i
< XT_NUM_REGS
) ? xtensa_regs
: xtensa
->optregs
;
1413 unsigned int ridx
= (i
< XT_NUM_REGS
) ? i
: i
- XT_NUM_REGS
;
1414 if (xtensa_reg_is_readable(rlist
[ridx
].flags
, cpenable
) && rlist
[ridx
].exist
) {
1415 if ((xtensa
->core_config
->windowed
) && (rlist
[ridx
].type
== XT_REG_GENERAL
)) {
1416 /* The 64-value general register set is read from (windowbase) on down.
1417 * We need to get the real register address by subtracting windowbase and
1418 * wrapping around. */
1419 enum xtensa_reg_id realadr
= xtensa_canonical_to_windowbase_offset(xtensa
, i
,
1421 buf_cpy(regvals
[realadr
].buf
, reg_list
[i
].value
, reg_list
[i
].size
);
1422 } else if (rlist
[ridx
].type
== XT_REG_RELGEN
) {
1423 buf_cpy(regvals
[rlist
[ridx
].reg_num
].buf
, reg_list
[i
].value
, reg_list
[i
].size
);
1424 if (xtensa_extra_debug_log
) {
1425 xtensa_reg_val_t regval
= buf_get_u32(regvals
[rlist
[ridx
].reg_num
].buf
, 0, 32);
1426 LOG_DEBUG("%s = 0x%x", rlist
[ridx
].name
, regval
);
1429 xtensa_reg_val_t regval
= buf_get_u32(regvals
[i
].buf
, 0, 32);
1430 bool is_dirty
= (i
== XT_REG_IDX_CPENABLE
);
1431 if (xtensa_extra_debug_log
)
1432 LOG_INFO("Register %s: 0x%X", reg_list
[i
].name
, regval
);
1433 if (rlist
[ridx
].reg_num
== XT_PC_REG_NUM_VIRTUAL
&&
1434 xtensa
->core_config
->core_type
== XT_NX
) {
1435 /* A0 from prior CALL0 points to next instruction; decrement it */
1438 } else if (i
== ms_idx
) {
1439 LOG_TARGET_DEBUG(target
, "Caching MS: 0x%x", ms
);
1443 xtensa_reg_set(target
, i
, regval
);
1444 reg_list
[i
].dirty
= is_dirty
; /*always do this _after_ xtensa_reg_set! */
1446 reg_list
[i
].valid
= true;
1448 if ((rlist
[ridx
].flags
& XT_REGF_MASK
) == XT_REGF_NOREAD
) {
1449 /* Report read-only registers all-zero but valid */
1450 reg_list
[i
].valid
= true;
1451 xtensa_reg_set(target
, i
, 0);
1453 reg_list
[i
].valid
= false;
1458 if (xtensa
->core_config
->windowed
) {
1459 /* We have used A3 as a scratch register.
1460 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1462 enum xtensa_reg_id ar3_idx
= xtensa_windowbase_offset_to_canonical(xtensa
, XT_REG_IDX_A3
, windowbase
);
1463 xtensa_reg_set(target
, ar3_idx
, a3
);
1464 xtensa_mark_register_dirty(xtensa
, ar3_idx
);
1466 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1467 sprintf(xtensa
->scratch_ars
[XT_AR_SCRATCH_AR3
].chrval
, "ar%d", ar3_idx
- XT_REG_IDX_AR0
);
1468 enum xtensa_reg_id ar4_idx
= xtensa_windowbase_offset_to_canonical(xtensa
, XT_REG_IDX_A4
, windowbase
);
1469 sprintf(xtensa
->scratch_ars
[XT_AR_SCRATCH_AR4
].chrval
, "ar%d", ar4_idx
- XT_REG_IDX_AR0
);
1470 for (enum xtensa_ar_scratch_set_e s
= 0; s
< XT_AR_SCRATCH_NUM
; s
++)
1471 xtensa
->scratch_ars
[s
].intval
= false;
1474 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1475 xtensa_reg_set(target
, XT_REG_IDX_A3
, a3
);
1476 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A3
);
1477 if (xtensa
->core_config
->core_type
== XT_NX
) {
1478 xtensa_reg_set(target
, XT_REG_IDX_A0
, a0
);
1479 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A0
);
1482 xtensa
->regs_fetched
= true;
1483 xtensa_fetch_all_regs_done
:
1489 int xtensa_get_gdb_reg_list(struct target
*target
,
1490 struct reg
**reg_list
[],
1492 enum target_register_class reg_class
)
1494 struct xtensa
*xtensa
= target_to_xtensa(target
);
1495 unsigned int num_regs
;
1497 if (reg_class
== REG_CLASS_GENERAL
) {
1498 if ((xtensa
->genpkt_regs_num
== 0) || !xtensa
->contiguous_regs_list
) {
1499 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class
);
1502 num_regs
= xtensa
->genpkt_regs_num
;
1504 /* Determine whether to return a contiguous or sparse register map */
1505 num_regs
= xtensa
->regmap_contiguous
? xtensa
->total_regs_num
: xtensa
->dbregs_num
;
1508 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class
, num_regs
);
1510 *reg_list
= calloc(num_regs
, sizeof(struct reg
*));
1514 *reg_list_size
= num_regs
;
1515 if (xtensa
->regmap_contiguous
) {
1516 assert((num_regs
<= xtensa
->total_regs_num
) && "contiguous regmap size internal error!");
1517 for (unsigned int i
= 0; i
< num_regs
; i
++)
1518 (*reg_list
)[i
] = xtensa
->contiguous_regs_list
[i
];
1522 for (unsigned int i
= 0; i
< num_regs
; i
++)
1523 (*reg_list
)[i
] = (struct reg
*)&xtensa
->empty_regs
[i
];
1525 for (unsigned int i
= 0; i
< xtensa
->core_cache
->num_regs
&& k
< num_regs
; i
++) {
1526 if (xtensa
->core_cache
->reg_list
[i
].exist
) {
1527 struct xtensa_reg_desc
*rlist
= (i
< XT_NUM_REGS
) ? xtensa_regs
: xtensa
->optregs
;
1528 unsigned int ridx
= (i
< XT_NUM_REGS
) ? i
: i
- XT_NUM_REGS
;
1529 int sparse_idx
= rlist
[ridx
].dbreg_num
;
1530 if (i
== XT_REG_IDX_PS
&& xtensa
->core_config
->core_type
== XT_LX
) {
1531 if (xtensa
->eps_dbglevel_idx
== 0) {
1532 LOG_ERROR("eps_dbglevel_idx not set\n");
1535 (*reg_list
)[sparse_idx
] = &xtensa
->core_cache
->reg_list
[xtensa
->eps_dbglevel_idx
];
1536 if (xtensa_extra_debug_log
)
1537 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1538 sparse_idx
, xtensa
->core_config
->debug
.irq_level
,
1539 xtensa_reg_get_value((*reg_list
)[sparse_idx
]));
1540 } else if (rlist
[ridx
].type
== XT_REG_RELGEN
) {
1541 (*reg_list
)[sparse_idx
- XT_REG_IDX_ARFIRST
] = &xtensa
->core_cache
->reg_list
[i
];
1543 (*reg_list
)[sparse_idx
] = &xtensa
->core_cache
->reg_list
[i
];
1545 if (i
== XT_REG_IDX_PC
)
1546 /* Make a duplicate copy of PC for external access */
1547 (*reg_list
)[XT_PC_DBREG_NUM_BASE
] = &xtensa
->core_cache
->reg_list
[i
];
1553 LOG_ERROR("SPARSE GDB reg list full (size %d)", k
);
1558 int xtensa_mmu_is_enabled(struct target
*target
, int *enabled
)
1560 struct xtensa
*xtensa
= target_to_xtensa(target
);
1561 *enabled
= xtensa
->core_config
->mmu
.itlb_entries_count
> 0 ||
1562 xtensa
->core_config
->mmu
.dtlb_entries_count
> 0;
1566 int xtensa_halt(struct target
*target
)
1568 struct xtensa
*xtensa
= target_to_xtensa(target
);
1570 LOG_TARGET_DEBUG(target
, "start");
1571 if (target
->state
== TARGET_HALTED
) {
1572 LOG_TARGET_DEBUG(target
, "target was already halted");
1575 /* First we have to read dsr and check if the target stopped */
1576 int res
= xtensa_dm_core_status_read(&xtensa
->dbg_mod
);
1577 if (res
!= ERROR_OK
) {
1578 LOG_TARGET_ERROR(target
, "Failed to read core status!");
1581 LOG_TARGET_DEBUG(target
, "Core status 0x%" PRIx32
, xtensa_dm_core_status_get(&xtensa
->dbg_mod
));
1582 if (!xtensa_is_stopped(target
)) {
1583 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DCRSET
, OCDDCR_ENABLEOCD
| OCDDCR_DEBUGINTERRUPT
);
1584 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
1585 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1586 if (res
!= ERROR_OK
)
1587 LOG_TARGET_ERROR(target
, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1593 int xtensa_prepare_resume(struct target
*target
,
1595 target_addr_t address
,
1596 int handle_breakpoints
,
1597 int debug_execution
)
1599 struct xtensa
*xtensa
= target_to_xtensa(target
);
1602 LOG_TARGET_DEBUG(target
,
1603 "current=%d address=" TARGET_ADDR_FMT
", handle_breakpoints=%i, debug_execution=%i)",
1609 if (target
->state
!= TARGET_HALTED
) {
1610 LOG_TARGET_ERROR(target
, "not halted");
1611 return ERROR_TARGET_NOT_HALTED
;
1613 xtensa
->halt_request
= false;
1615 if (address
&& !current
) {
1616 xtensa_reg_set(target
, XT_REG_IDX_PC
, address
);
1618 uint32_t cause
= xtensa_cause_get(target
);
1619 LOG_TARGET_DEBUG(target
, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1620 cause
, (cause
& DEBUGCAUSE_DB
), (cause
& (DEBUGCAUSE_BI
| DEBUGCAUSE_BN
)));
1621 if (cause
& DEBUGCAUSE_DB
)
1622 /* We stopped due to a watchpoint. We can't just resume executing the
1623 * instruction again because */
1624 /* that would trigger the watchpoint again. To fix this, we single-step,
1625 * which ignores watchpoints. */
1626 xtensa_do_step(target
, current
, address
, handle_breakpoints
);
1627 if (cause
& (DEBUGCAUSE_BI
| DEBUGCAUSE_BN
))
1628 /* We stopped due to a break instruction. We can't just resume executing the
1629 * instruction again because */
1630 /* that would trigger the break again. To fix this, we single-step, which
1632 xtensa_do_step(target
, current
, address
, handle_breakpoints
);
1635 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1636 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1637 for (unsigned int slot
= 0; slot
< xtensa
->core_config
->debug
.ibreaks_num
; slot
++) {
1638 if (xtensa
->hw_brps
[slot
]) {
1639 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1640 xtensa_reg_set(target
, XT_REG_IDX_IBREAKA0
+ slot
, xtensa
->hw_brps
[slot
]->address
);
1641 if (xtensa
->core_config
->core_type
== XT_NX
)
1642 xtensa_reg_set(target
, xtensa
->nx_reg_idx
[XT_NX_REG_IDX_IBREAKC0
] + slot
, XT_IBREAKC_FB
);
1646 if (xtensa
->core_config
->core_type
== XT_LX
)
1647 xtensa_reg_set(target
, XT_REG_IDX_IBREAKENABLE
, bpena
);
1649 /* Here we write all registers to the targets */
1650 int res
= xtensa_write_dirty_registers(target
);
1651 if (res
!= ERROR_OK
)
1652 LOG_TARGET_ERROR(target
, "Failed to write back register cache.");
1656 int xtensa_do_resume(struct target
*target
)
1658 struct xtensa
*xtensa
= target_to_xtensa(target
);
1660 LOG_TARGET_DEBUG(target
, "start");
1662 xtensa_cause_reset(target
);
1663 xtensa_queue_exec_ins(xtensa
, XT_INS_RFDO(xtensa
));
1664 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
1665 if (res
!= ERROR_OK
) {
1666 LOG_TARGET_ERROR(target
, "Failed to exec RFDO %d!", res
);
1669 xtensa_core_status_check(target
);
1673 int xtensa_resume(struct target
*target
,
1675 target_addr_t address
,
1676 int handle_breakpoints
,
1677 int debug_execution
)
1679 LOG_TARGET_DEBUG(target
, "start");
1680 int res
= xtensa_prepare_resume(target
, current
, address
, handle_breakpoints
, debug_execution
);
1681 if (res
!= ERROR_OK
) {
1682 LOG_TARGET_ERROR(target
, "Failed to prepare for resume!");
1685 res
= xtensa_do_resume(target
);
1686 if (res
!= ERROR_OK
) {
1687 LOG_TARGET_ERROR(target
, "Failed to resume!");
1691 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1692 if (!debug_execution
)
1693 target
->state
= TARGET_RUNNING
;
1695 target
->state
= TARGET_DEBUG_RUNNING
;
1697 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1702 static bool xtensa_pc_in_winexc(struct target
*target
, target_addr_t pc
)
1704 struct xtensa
*xtensa
= target_to_xtensa(target
);
1705 uint8_t insn_buf
[XT_ISNS_SZ_MAX
];
1706 int err
= xtensa_read_buffer(target
, pc
, sizeof(insn_buf
), insn_buf
);
1707 if (err
!= ERROR_OK
)
1710 xtensa_insn_t insn
= buf_get_u32(insn_buf
, 0, 24);
1711 xtensa_insn_t masked
= insn
& XT_INS_L32E_S32E_MASK(xtensa
);
1712 if (masked
== XT_INS_L32E(xtensa
, 0, 0, 0) || masked
== XT_INS_S32E(xtensa
, 0, 0, 0))
1715 masked
= insn
& XT_INS_RFWO_RFWU_MASK(xtensa
);
1716 if (masked
== XT_INS_RFWO(xtensa
) || masked
== XT_INS_RFWU(xtensa
))
1722 int xtensa_do_step(struct target
*target
, int current
, target_addr_t address
, int handle_breakpoints
)
1724 struct xtensa
*xtensa
= target_to_xtensa(target
);
1726 const uint32_t icount_val
= -2; /* ICOUNT value to load for 1 step */
1727 xtensa_reg_val_t dbreakc
[XT_WATCHPOINTS_NUM_MAX
];
1728 xtensa_reg_val_t icountlvl
, cause
;
1729 xtensa_reg_val_t oldps
, oldpc
, cur_pc
;
1730 bool ps_lowered
= false;
1732 LOG_TARGET_DEBUG(target
, "current=%d, address=" TARGET_ADDR_FMT
", handle_breakpoints=%i",
1733 current
, address
, handle_breakpoints
);
1735 if (target
->state
!= TARGET_HALTED
) {
1736 LOG_TARGET_ERROR(target
, "not halted");
1737 return ERROR_TARGET_NOT_HALTED
;
1740 if (xtensa
->eps_dbglevel_idx
== 0 && xtensa
->core_config
->core_type
== XT_LX
) {
1741 LOG_TARGET_ERROR(target
, "eps_dbglevel_idx not set\n");
1745 /* Save old ps (EPS[dbglvl] on LX), pc */
1746 oldps
= xtensa_reg_get(target
, (xtensa
->core_config
->core_type
== XT_LX
) ?
1747 xtensa
->eps_dbglevel_idx
: XT_REG_IDX_PS
);
1748 oldpc
= xtensa_reg_get(target
, XT_REG_IDX_PC
);
1750 cause
= xtensa_cause_get(target
);
1751 LOG_TARGET_DEBUG(target
, "oldps=%" PRIx32
", oldpc=%" PRIx32
" dbg_cause=%" PRIx32
" exc_cause=%" PRIx32
,
1755 xtensa_reg_get(target
, XT_REG_IDX_EXCCAUSE
));
1756 if (handle_breakpoints
&& (cause
& (DEBUGCAUSE_BI
| DEBUGCAUSE_BN
))) {
1757 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1758 LOG_TARGET_DEBUG(target
, "Increment PC to pass break instruction...");
1759 xtensa_cause_clear(target
); /* so we don't recurse into the same routine */
1760 /* pretend that we have stepped */
1761 if (cause
& DEBUGCAUSE_BI
)
1762 xtensa_reg_set(target
, XT_REG_IDX_PC
, oldpc
+ 3); /* PC = PC+3 */
1764 xtensa_reg_set(target
, XT_REG_IDX_PC
, oldpc
+ 2); /* PC = PC+2 */
1768 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1769 * at which the instructions are to be counted while stepping.
1771 * For example, if we need to step by 2 instructions, and an interrupt occurs
1772 * in between, the processor will trigger the interrupt and halt after the 2nd
1773 * instruction within the interrupt vector and/or handler.
1775 * However, sometimes we don't want the interrupt handlers to be executed at all
1776 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1777 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1778 * code from being counted during stepping. Note that C exception handlers must
1779 * run at level 0 and hence will be counted and stepped into, should one occur.
1781 * TODO: Certain instructions should never be single-stepped and should instead
1782 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1785 if (xtensa
->stepping_isr_mode
== XT_STEPPING_ISR_OFF
) {
1786 if (!xtensa
->core_config
->high_irq
.enabled
) {
1789 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1792 /* Update ICOUNTLEVEL accordingly */
1793 icountlvl
= MIN((oldps
& 0xF) + 1, xtensa
->core_config
->debug
.irq_level
);
1795 icountlvl
= xtensa
->core_config
->debug
.irq_level
;
1798 if (cause
& DEBUGCAUSE_DB
) {
1799 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1800 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1801 * re-enable the watchpoint. */
1804 "Single-stepping to get past instruction that triggered the watchpoint...");
1805 xtensa_cause_clear(target
); /* so we don't recurse into the same routine */
1806 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1807 for (unsigned int slot
= 0; slot
< xtensa
->core_config
->debug
.dbreaks_num
; slot
++) {
1808 dbreakc
[slot
] = xtensa_reg_get(target
, XT_REG_IDX_DBREAKC0
+ slot
);
1809 xtensa_reg_set(target
, XT_REG_IDX_DBREAKC0
+ slot
, 0);
1813 if (!handle_breakpoints
&& (cause
& (DEBUGCAUSE_BI
| DEBUGCAUSE_BN
)))
1814 /* handle normal SW breakpoint */
1815 xtensa_cause_clear(target
); /* so we don't recurse into the same routine */
1816 if (xtensa
->core_config
->core_type
== XT_LX
&& ((oldps
& 0xf) >= icountlvl
)) {
1817 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1819 uint32_t newps
= (oldps
& ~0xf) | (icountlvl
- 1);
1820 xtensa_reg_set(target
, xtensa
->eps_dbglevel_idx
, newps
);
1821 LOG_TARGET_DEBUG(target
,
1822 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32
" (was 0x%08" PRIx32
")",
1823 xtensa
->core_cache
->reg_list
[xtensa
->eps_dbglevel_idx
].name
,
1828 if (xtensa
->core_config
->core_type
== XT_LX
) {
1829 xtensa_reg_set(target
, XT_REG_IDX_ICOUNTLEVEL
, icountlvl
);
1830 xtensa_reg_set(target
, XT_REG_IDX_ICOUNT
, icount_val
);
1832 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DCRSET
, OCDDCR_STEPREQUEST
);
1835 /* Now that ICOUNT (LX) or DCR.StepRequest (NX) is set,
1836 * we can resume as if we were going to run
1838 res
= xtensa_prepare_resume(target
, current
, address
, 0, 0);
1839 if (res
!= ERROR_OK
) {
1840 LOG_TARGET_ERROR(target
, "Failed to prepare resume for single step");
1843 res
= xtensa_do_resume(target
);
1844 if (res
!= ERROR_OK
) {
1845 LOG_TARGET_ERROR(target
, "Failed to resume after setting up single step");
1849 /* Wait for stepping to complete */
1850 long long start
= timeval_ms();
1851 while (timeval_ms() < start
+ 500) {
1852 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1853 *until stepping is complete. */
1855 res
= xtensa_dm_core_status_read(&xtensa
->dbg_mod
);
1856 if (res
!= ERROR_OK
) {
1857 LOG_TARGET_ERROR(target
, "Failed to read core status!");
1860 if (xtensa_is_stopped(target
))
1864 LOG_TARGET_DEBUG(target
, "Finish stepping. dsr=0x%08" PRIx32
,
1865 xtensa_dm_core_status_get(&xtensa
->dbg_mod
));
1866 if (!xtensa_is_stopped(target
)) {
1869 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32
,
1870 xtensa_dm_core_status_get(&xtensa
->dbg_mod
));
1871 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1872 target
->state
= TARGET_RUNNING
;
1876 xtensa_fetch_all_regs(target
);
1877 cur_pc
= xtensa_reg_get(target
, XT_REG_IDX_PC
);
1879 LOG_TARGET_DEBUG(target
,
1880 "cur_ps=%" PRIx32
", cur_pc=%" PRIx32
" dbg_cause=%" PRIx32
" exc_cause=%" PRIx32
,
1881 xtensa_reg_get(target
, XT_REG_IDX_PS
),
1883 xtensa_cause_get(target
),
1884 xtensa_reg_get(target
, XT_REG_IDX_EXCCAUSE
));
1886 /* Do not step into WindowOverflow if ISRs are masked.
1887 If we stop in WindowOverflow at breakpoint with masked ISRs and
1888 try to do a step it will get us out of that handler */
1889 if (xtensa
->core_config
->windowed
&&
1890 xtensa
->stepping_isr_mode
== XT_STEPPING_ISR_OFF
&&
1891 xtensa_pc_in_winexc(target
, cur_pc
)) {
1892 /* isrmask = on, need to step out of the window exception handler */
1893 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32
, cur_pc
);
1895 address
= oldpc
+ 3;
1899 if (oldpc
== cur_pc
)
1900 LOG_TARGET_WARNING(target
, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32
,
1901 xtensa_dm_core_status_get(&xtensa
->dbg_mod
));
1903 LOG_DEBUG("Stepped from %" PRIX32
" to %" PRIX32
, oldpc
, cur_pc
);
1907 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1908 target
->state
= TARGET_HALTED
;
1909 LOG_DEBUG("Done stepping, PC=%" PRIX32
, cur_pc
);
1911 if (cause
& DEBUGCAUSE_DB
) {
1912 LOG_TARGET_DEBUG(target
, "...Done, re-installing watchpoints.");
1913 /* Restore the DBREAKCx registers */
1914 for (unsigned int slot
= 0; slot
< xtensa
->core_config
->debug
.dbreaks_num
; slot
++)
1915 xtensa_reg_set(target
, XT_REG_IDX_DBREAKC0
+ slot
, dbreakc
[slot
]);
1918 /* Restore int level */
1920 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32
,
1921 xtensa
->core_cache
->reg_list
[xtensa
->eps_dbglevel_idx
].name
,
1923 xtensa_reg_set(target
, xtensa
->eps_dbglevel_idx
, oldps
);
1926 /* write ICOUNTLEVEL back to zero */
1927 xtensa_reg_set(target
, XT_REG_IDX_ICOUNTLEVEL
, 0);
1928 /* TODO: can we skip writing dirty registers and re-fetching them? */
1929 res
= xtensa_write_dirty_registers(target
);
1930 xtensa_fetch_all_regs(target
);
1934 int xtensa_step(struct target
*target
, int current
, target_addr_t address
, int handle_breakpoints
)
1936 int retval
= xtensa_do_step(target
, current
, address
, handle_breakpoints
);
1937 if (retval
!= ERROR_OK
)
1939 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1945 * Returns true if two ranges are overlapping
1947 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start
,
1948 target_addr_t r1_end
,
1949 target_addr_t r2_start
,
1950 target_addr_t r2_end
)
1952 if ((r2_start
>= r1_start
) && (r2_start
< r1_end
))
1953 return true; /* r2_start is in r1 region */
1954 if ((r2_end
> r1_start
) && (r2_end
<= r1_end
))
1955 return true; /* r2_end is in r1 region */
1960 * Returns a size of overlapped region of two ranges.
1962 static inline target_addr_t
xtensa_get_overlap_size(target_addr_t r1_start
,
1963 target_addr_t r1_end
,
1964 target_addr_t r2_start
,
1965 target_addr_t r2_end
)
1967 if (xtensa_memory_regions_overlap(r1_start
, r1_end
, r2_start
, r2_end
)) {
1968 target_addr_t ov_start
= r1_start
< r2_start
? r2_start
: r1_start
;
1969 target_addr_t ov_end
= r1_end
> r2_end
? r2_end
: r1_end
;
1970 return ov_end
- ov_start
;
1976 * Check if the address gets to memory regions, and its access mode
1978 static bool xtensa_memory_op_validate_range(struct xtensa
*xtensa
, target_addr_t address
, size_t size
, int access
)
1980 target_addr_t adr_pos
= address
; /* address cursor set to the beginning start */
1981 target_addr_t adr_end
= address
+ size
; /* region end */
1982 target_addr_t overlap_size
;
1983 const struct xtensa_local_mem_region_config
*cm
; /* current mem region */
1985 while (adr_pos
< adr_end
) {
1986 cm
= xtensa_target_memory_region_find(xtensa
, adr_pos
);
1987 if (!cm
) /* address is not belong to anything */
1989 if ((cm
->access
& access
) != access
) /* access check */
1991 overlap_size
= xtensa_get_overlap_size(cm
->base
, (cm
->base
+ cm
->size
), adr_pos
, adr_end
);
1992 assert(overlap_size
!= 0);
1993 adr_pos
+= overlap_size
;
1998 int xtensa_read_memory(struct target
*target
, target_addr_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
)
2000 struct xtensa
*xtensa
= target_to_xtensa(target
);
2001 /* We are going to read memory in 32-bit increments. This may not be what the calling
2002 * function expects, so we may need to allocate a temp buffer and read into that first. */
2003 target_addr_t addrstart_al
= ALIGN_DOWN(address
, 4);
2004 target_addr_t addrend_al
= ALIGN_UP(address
+ size
* count
, 4);
2005 target_addr_t adr
= addrstart_al
;
2007 bool bswap
= xtensa
->target
->endianness
== TARGET_BIG_ENDIAN
;
2009 if (target
->state
!= TARGET_HALTED
) {
2010 LOG_TARGET_ERROR(target
, "not halted");
2011 return ERROR_TARGET_NOT_HALTED
;
2014 if (!xtensa
->permissive_mode
) {
2015 if (!xtensa_memory_op_validate_range(xtensa
, address
, (size
* count
),
2016 XT_MEM_ACCESS_READ
)) {
2017 LOG_DEBUG("address " TARGET_ADDR_FMT
" not readable", address
);
2022 unsigned int alloc_bytes
= ALIGN_UP(addrend_al
- addrstart_al
, sizeof(uint32_t));
2023 albuff
= calloc(alloc_bytes
, 1);
2025 LOG_TARGET_ERROR(target
, "Out of memory allocating %" PRId64
" bytes!",
2026 addrend_al
- addrstart_al
);
2027 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2030 /* We're going to use A3 here */
2031 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A3
);
2032 /* Write start address to A3 */
2033 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, addrstart_al
);
2034 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2035 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
2036 if (xtensa
->probe_lsddr32p
!= 0) {
2037 xtensa_queue_exec_ins(xtensa
, XT_INS_LDDR32P(xtensa
, XT_REG_A3
));
2038 for (unsigned int i
= 0; adr
!= addrend_al
; i
+= sizeof(uint32_t), adr
+= sizeof(uint32_t))
2039 xtensa_queue_dbg_reg_read(xtensa
,
2040 (adr
+ sizeof(uint32_t) == addrend_al
) ? XDMREG_DDR
: XDMREG_DDREXEC
,
2043 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A4
);
2044 for (unsigned int i
= 0; adr
!= addrend_al
; i
+= sizeof(uint32_t), adr
+= sizeof(uint32_t)) {
2045 xtensa_queue_exec_ins(xtensa
, XT_INS_L32I(xtensa
, XT_REG_A3
, XT_REG_A4
, 0));
2046 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A4
));
2047 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, &albuff
[i
]);
2048 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, adr
+ sizeof(uint32_t));
2049 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2052 int res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
2053 if (res
== ERROR_OK
) {
2054 bool prev_suppress
= xtensa
->suppress_dsr_errors
;
2055 xtensa
->suppress_dsr_errors
= true;
2056 res
= xtensa_core_status_check(target
);
2057 if (xtensa
->probe_lsddr32p
== -1)
2058 xtensa
->probe_lsddr32p
= 1;
2059 xtensa
->suppress_dsr_errors
= prev_suppress
;
2061 if (res
!= ERROR_OK
) {
2062 if (xtensa
->probe_lsddr32p
!= 0) {
2063 /* Disable fast memory access instructions and retry before reporting an error */
2064 LOG_TARGET_DEBUG(target
, "Disabling LDDR32.P/SDDR32.P");
2065 xtensa
->probe_lsddr32p
= 0;
2066 res
= xtensa_read_memory(target
, address
, size
, count
, albuff
);
2069 LOG_TARGET_WARNING(target
, "Failed reading %d bytes at address "TARGET_ADDR_FMT
,
2070 count
* size
, address
);
2075 buf_bswap32(albuff
, albuff
, addrend_al
- addrstart_al
);
2076 memcpy(buffer
, albuff
+ (address
& 3), (size
* count
));
2081 int xtensa_read_buffer(struct target
*target
, target_addr_t address
, uint32_t count
, uint8_t *buffer
)
2083 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
2084 return xtensa_read_memory(target
, address
, 1, count
, buffer
);
2087 int xtensa_write_memory(struct target
*target
,
2088 target_addr_t address
,
2091 const uint8_t *buffer
)
2093 /* This memory write function can get thrown nigh everything into it, from
2094 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
2095 * accept anything but aligned uint32 writes, though. That is why we convert
2096 * everything into that. */
2097 struct xtensa
*xtensa
= target_to_xtensa(target
);
2098 target_addr_t addrstart_al
= ALIGN_DOWN(address
, 4);
2099 target_addr_t addrend_al
= ALIGN_UP(address
+ size
* count
, 4);
2100 target_addr_t adr
= addrstart_al
;
2103 bool fill_head_tail
= false;
2105 if (target
->state
!= TARGET_HALTED
) {
2106 LOG_TARGET_ERROR(target
, "not halted");
2107 return ERROR_TARGET_NOT_HALTED
;
2110 if (!xtensa
->permissive_mode
) {
2111 if (!xtensa_memory_op_validate_range(xtensa
, address
, (size
* count
), XT_MEM_ACCESS_WRITE
)) {
2112 LOG_WARNING("address " TARGET_ADDR_FMT
" not writable", address
);
2117 if (size
== 0 || count
== 0 || !buffer
)
2118 return ERROR_COMMAND_SYNTAX_ERROR
;
2120 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
2121 if (addrstart_al
== address
&& addrend_al
== address
+ (size
* count
)) {
2122 if (xtensa
->target
->endianness
== TARGET_BIG_ENDIAN
)
2123 /* Need a buffer for byte-swapping */
2124 albuff
= malloc(addrend_al
- addrstart_al
);
2126 /* We discard the const here because albuff can also be non-const */
2127 albuff
= (uint8_t *)buffer
;
2129 fill_head_tail
= true;
2130 albuff
= malloc(addrend_al
- addrstart_al
);
2133 LOG_TARGET_ERROR(target
, "Out of memory allocating %" PRId64
" bytes!",
2134 addrend_al
- addrstart_al
);
2135 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2138 /* We're going to use A3 here */
2139 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A3
);
2141 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
2142 if (fill_head_tail
) {
2143 /* See if we need to read the first and/or last word. */
2145 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, addrstart_al
);
2146 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2147 if (xtensa
->probe_lsddr32p
== 1) {
2148 xtensa_queue_exec_ins(xtensa
, XT_INS_LDDR32P(xtensa
, XT_REG_A3
));
2150 xtensa_queue_exec_ins(xtensa
, XT_INS_L32I(xtensa
, XT_REG_A3
, XT_REG_A3
, 0));
2151 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2153 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
, &albuff
[0]);
2155 if ((address
+ (size
* count
)) & 3) {
2156 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, addrend_al
- 4);
2157 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2158 if (xtensa
->probe_lsddr32p
== 1) {
2159 xtensa_queue_exec_ins(xtensa
, XT_INS_LDDR32P(xtensa
, XT_REG_A3
));
2161 xtensa_queue_exec_ins(xtensa
, XT_INS_L32I(xtensa
, XT_REG_A3
, XT_REG_A3
, 0));
2162 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2164 xtensa_queue_dbg_reg_read(xtensa
, XDMREG_DDR
,
2165 &albuff
[addrend_al
- addrstart_al
- 4]);
2168 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
2169 if (res
!= ERROR_OK
) {
2170 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res
);
2171 if (albuff
!= buffer
)
2175 xtensa_core_status_check(target
);
2176 if (xtensa
->target
->endianness
== TARGET_BIG_ENDIAN
) {
2177 bool swapped_w0
= false;
2179 buf_bswap32(&albuff
[0], &albuff
[0], 4);
2182 if ((address
+ (size
* count
)) & 3) {
2183 if ((addrend_al
- addrstart_al
- 4 == 0) && swapped_w0
) {
2184 /* Don't double-swap if buffer start/end are within the same word */
2186 buf_bswap32(&albuff
[addrend_al
- addrstart_al
- 4],
2187 &albuff
[addrend_al
- addrstart_al
- 4], 4);
2191 /* Copy data to be written into the aligned buffer (in host-endianness) */
2192 memcpy(&albuff
[address
& 3], buffer
, size
* count
);
2193 /* Now we can write albuff in aligned uint32s. */
2196 if (xtensa
->target
->endianness
== TARGET_BIG_ENDIAN
)
2197 buf_bswap32(albuff
, fill_head_tail
? albuff
: buffer
, addrend_al
- addrstart_al
);
2199 /* Write start address to A3 */
2200 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, addrstart_al
);
2201 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2202 /* Write the aligned buffer */
2203 if (xtensa
->probe_lsddr32p
!= 0) {
2204 for (unsigned int i
= 0; adr
!= addrend_al
; i
+= sizeof(uint32_t), adr
+= sizeof(uint32_t)) {
2206 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, buf_get_u32(&albuff
[i
], 0, 32));
2207 xtensa_queue_exec_ins(xtensa
, XT_INS_SDDR32P(xtensa
, XT_REG_A3
));
2209 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDREXEC
, buf_get_u32(&albuff
[i
], 0, 32));
2213 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A4
);
2214 for (unsigned int i
= 0; adr
!= addrend_al
; i
+= sizeof(uint32_t), adr
+= sizeof(uint32_t)) {
2215 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, buf_get_u32(&albuff
[i
], 0, 32));
2216 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A4
));
2217 xtensa_queue_exec_ins(xtensa
, XT_INS_S32I(xtensa
, XT_REG_A3
, XT_REG_A4
, 0));
2218 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, adr
+ sizeof(uint32_t));
2219 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2223 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
2224 if (res
== ERROR_OK
) {
2225 bool prev_suppress
= xtensa
->suppress_dsr_errors
;
2226 xtensa
->suppress_dsr_errors
= true;
2227 res
= xtensa_core_status_check(target
);
2228 if (xtensa
->probe_lsddr32p
== -1)
2229 xtensa
->probe_lsddr32p
= 1;
2230 xtensa
->suppress_dsr_errors
= prev_suppress
;
2232 if (res
!= ERROR_OK
) {
2233 if (xtensa
->probe_lsddr32p
!= 0) {
2234 /* Disable fast memory access instructions and retry before reporting an error */
2235 LOG_TARGET_INFO(target
, "Disabling LDDR32.P/SDDR32.P");
2236 xtensa
->probe_lsddr32p
= 0;
2237 res
= xtensa_write_memory(target
, address
, size
, count
, buffer
);
2239 LOG_TARGET_WARNING(target
, "Failed writing %d bytes at address "TARGET_ADDR_FMT
,
2240 count
* size
, address
);
2243 /* Invalidate ICACHE, writeback DCACHE if present */
2244 bool issue_ihi
= xtensa_is_icacheable(xtensa
, address
) &&
2245 xtensa_region_ar_exec(target
, addrstart_al
, addrend_al
);
2246 bool issue_dhwbi
= xtensa_is_dcacheable(xtensa
, address
);
2247 LOG_TARGET_DEBUG(target
, "Cache OPs: IHI %d, DHWBI %d", issue_ihi
, issue_dhwbi
);
2248 if (issue_ihi
|| issue_dhwbi
) {
2249 uint32_t ilinesize
= issue_ihi
? xtensa
->core_config
->icache
.line_size
: UINT32_MAX
;
2250 uint32_t dlinesize
= issue_dhwbi
? xtensa
->core_config
->dcache
.line_size
: UINT32_MAX
;
2251 uint32_t linesize
= MIN(ilinesize
, dlinesize
);
2255 while ((adr
+ off
) < addrend_al
) {
2257 /* Write start address to A3 */
2258 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, adr
);
2259 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2262 xtensa_queue_exec_ins(xtensa
, XT_INS_IHI(xtensa
, XT_REG_A3
, off
));
2264 xtensa_queue_exec_ins(xtensa
, XT_INS_DHWBI(xtensa
, XT_REG_A3
, off
));
2267 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
2273 /* Execute cache WB/INV instructions */
2274 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
2275 if (res
!= ERROR_OK
)
2276 LOG_TARGET_ERROR(target
,
2277 "Error queuing cache writeback/invaldate instruction(s): %d",
2279 res
= xtensa_core_status_check(target
);
2280 if (res
!= ERROR_OK
)
2281 LOG_TARGET_ERROR(target
,
2282 "Error issuing cache writeback/invaldate instruction(s): %d",
2286 if (albuff
!= buffer
)
2292 int xtensa_write_buffer(struct target
*target
, target_addr_t address
, uint32_t count
, const uint8_t *buffer
)
2294 /* xtensa_write_memory can handle everything. Just pass on to that. */
2295 return xtensa_write_memory(target
, address
, 1, count
, buffer
);
2298 int xtensa_checksum_memory(struct target
*target
, target_addr_t address
, uint32_t count
, uint32_t *checksum
)
2300 LOG_WARNING("not implemented yet");
2304 int xtensa_poll(struct target
*target
)
2306 struct xtensa
*xtensa
= target_to_xtensa(target
);
2307 if (xtensa_dm_poll(&xtensa
->dbg_mod
) != ERROR_OK
) {
2308 target
->state
= TARGET_UNKNOWN
;
2309 return ERROR_TARGET_NOT_EXAMINED
;
2312 int res
= xtensa_dm_power_status_read(&xtensa
->dbg_mod
, PWRSTAT_DEBUGWASRESET(xtensa
) |
2313 PWRSTAT_COREWASRESET(xtensa
));
2314 if (xtensa
->dbg_mod
.power_status
.stat
!= xtensa
->dbg_mod
.power_status
.stath
)
2315 LOG_TARGET_DEBUG(target
, "PWRSTAT: read 0x%08" PRIx32
", clear 0x%08lx, reread 0x%08" PRIx32
,
2316 xtensa
->dbg_mod
.power_status
.stat
,
2317 PWRSTAT_DEBUGWASRESET(xtensa
) | PWRSTAT_COREWASRESET(xtensa
),
2318 xtensa
->dbg_mod
.power_status
.stath
);
2319 if (res
!= ERROR_OK
)
2322 if (xtensa_dm_tap_was_reset(&xtensa
->dbg_mod
)) {
2323 LOG_TARGET_INFO(target
, "Debug controller was reset.");
2324 res
= xtensa_smpbreak_write(xtensa
, xtensa
->smp_break
);
2325 if (res
!= ERROR_OK
)
2328 if (xtensa_dm_core_was_reset(&xtensa
->dbg_mod
))
2329 LOG_TARGET_INFO(target
, "Core was reset.");
2330 xtensa_dm_power_status_cache(&xtensa
->dbg_mod
);
2331 /* Enable JTAG, set reset if needed */
2332 res
= xtensa_wakeup(target
);
2333 if (res
!= ERROR_OK
)
2336 uint32_t prev_dsr
= xtensa
->dbg_mod
.core_status
.dsr
;
2337 res
= xtensa_dm_core_status_read(&xtensa
->dbg_mod
);
2338 if (res
!= ERROR_OK
)
2340 if (prev_dsr
!= xtensa
->dbg_mod
.core_status
.dsr
)
2341 LOG_TARGET_DEBUG(target
,
2342 "DSR has changed: was 0x%08" PRIx32
" now 0x%08" PRIx32
,
2344 xtensa
->dbg_mod
.core_status
.dsr
);
2345 if (xtensa
->dbg_mod
.power_status
.stath
& PWRSTAT_COREWASRESET(xtensa
)) {
2346 /* if RESET state is persitent */
2347 target
->state
= TARGET_RESET
;
2348 } else if (!xtensa_dm_is_powered(&xtensa
->dbg_mod
)) {
2349 LOG_TARGET_DEBUG(target
, "not powered 0x%" PRIX32
"%ld",
2350 xtensa
->dbg_mod
.core_status
.dsr
,
2351 xtensa
->dbg_mod
.core_status
.dsr
& OCDDSR_STOPPED
);
2352 target
->state
= TARGET_UNKNOWN
;
2353 if (xtensa
->come_online_probes_num
== 0)
2354 target
->examined
= false;
2356 xtensa
->come_online_probes_num
--;
2357 } else if (xtensa_is_stopped(target
)) {
2358 if (target
->state
!= TARGET_HALTED
) {
2359 enum target_state oldstate
= target
->state
;
2360 target
->state
= TARGET_HALTED
;
2361 /* Examine why the target has been halted */
2362 target
->debug_reason
= DBG_REASON_DBGRQ
;
2363 xtensa_fetch_all_regs(target
);
2364 /* When setting debug reason DEBUGCAUSE events have the following
2365 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2366 /* Watchpoint and breakpoint events at the same time results in special
2367 * debug reason: DBG_REASON_WPTANDBKPT. */
2368 uint32_t halt_cause
= xtensa_cause_get(target
);
2369 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2370 if (halt_cause
& DEBUGCAUSE_IC
)
2371 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
2372 if (halt_cause
& (DEBUGCAUSE_IB
| DEBUGCAUSE_BN
| DEBUGCAUSE_BI
)) {
2373 if (halt_cause
& DEBUGCAUSE_DB
)
2374 target
->debug_reason
= DBG_REASON_WPTANDBKPT
;
2376 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
2377 } else if (halt_cause
& DEBUGCAUSE_DB
) {
2378 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
2380 LOG_TARGET_DEBUG(target
, "Target halted, pc=0x%08" PRIx32
2381 ", debug_reason=%08" PRIx32
", oldstate=%08" PRIx32
,
2382 xtensa_reg_get(target
, XT_REG_IDX_PC
),
2383 target
->debug_reason
,
2385 LOG_TARGET_DEBUG(target
, "Halt reason=0x%08" PRIX32
", exc_cause=%" PRId32
", dsr=0x%08" PRIx32
,
2387 xtensa_reg_get(target
, XT_REG_IDX_EXCCAUSE
),
2388 xtensa
->dbg_mod
.core_status
.dsr
);
2389 xtensa_dm_core_status_clear(
2391 OCDDSR_DEBUGPENDBREAK
| OCDDSR_DEBUGINTBREAK
| OCDDSR_DEBUGPENDTRAX
|
2392 OCDDSR_DEBUGINTTRAX
|
2393 OCDDSR_DEBUGPENDHOST
| OCDDSR_DEBUGINTHOST
);
2394 if (xtensa
->core_config
->core_type
== XT_NX
) {
2395 /* Enable imprecise exceptions while in halted state */
2396 xtensa_reg_val_t ps
= xtensa_reg_get(target
, XT_REG_IDX_PS
);
2397 xtensa_reg_val_t newps
= ps
& ~(XT_PS_DIEXC_MSK
);
2398 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_PS
);
2399 LOG_TARGET_DEBUG(target
, "Enabling PS.DIEXC: 0x%08x -> 0x%08x", ps
, newps
);
2400 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, newps
);
2401 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2402 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
, XT_SR_PS
, XT_REG_A3
));
2403 res
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
2404 if (res
!= ERROR_OK
) {
2405 LOG_TARGET_ERROR(target
, "Failed to write PS.DIEXC (%d)!", res
);
2408 xtensa_core_status_check(target
);
2412 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2413 if (target
->state
!= TARGET_RUNNING
&& target
->state
!= TARGET_DEBUG_RUNNING
) {
2414 target
->state
= TARGET_RUNNING
;
2415 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2418 if (xtensa
->trace_active
) {
2419 /* Detect if tracing was active but has stopped. */
2420 struct xtensa_trace_status trace_status
;
2421 res
= xtensa_dm_trace_status_read(&xtensa
->dbg_mod
, &trace_status
);
2422 if (res
== ERROR_OK
) {
2423 if (!(trace_status
.stat
& TRAXSTAT_TRACT
)) {
2424 LOG_INFO("Detected end of trace.");
2425 if (trace_status
.stat
& TRAXSTAT_PCMTG
)
2426 LOG_TARGET_INFO(target
, "Trace stop triggered by PC match");
2427 if (trace_status
.stat
& TRAXSTAT_PTITG
)
2428 LOG_TARGET_INFO(target
, "Trace stop triggered by Processor Trigger Input");
2429 if (trace_status
.stat
& TRAXSTAT_CTITG
)
2430 LOG_TARGET_INFO(target
, "Trace stop triggered by Cross-trigger Input");
2431 xtensa
->trace_active
= false;
2438 static int xtensa_update_instruction(struct target
*target
, target_addr_t address
, uint32_t size
, const uint8_t *buffer
)
2440 struct xtensa
*xtensa
= target_to_xtensa(target
);
2441 unsigned int issue_ihi
= xtensa_is_icacheable(xtensa
, address
) &&
2442 xtensa_region_ar_exec(target
, address
, address
+ size
);
2443 unsigned int issue_dhwbi
= xtensa_is_dcacheable(xtensa
, address
);
2444 uint32_t icache_line_size
= issue_ihi
? xtensa
->core_config
->icache
.line_size
: UINT32_MAX
;
2445 uint32_t dcache_line_size
= issue_dhwbi
? xtensa
->core_config
->dcache
.line_size
: UINT32_MAX
;
2446 unsigned int same_ic_line
= ((address
& (icache_line_size
- 1)) + size
) <= icache_line_size
;
2447 unsigned int same_dc_line
= ((address
& (dcache_line_size
- 1)) + size
) <= dcache_line_size
;
2450 if (size
> icache_line_size
)
2453 if (issue_ihi
|| issue_dhwbi
) {
2454 /* We're going to use A3 here */
2455 xtensa_mark_register_dirty(xtensa
, XT_REG_IDX_A3
);
2457 /* Write start address to A3 and invalidate */
2458 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, address
);
2459 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2460 LOG_TARGET_DEBUG(target
, "IHI %d, DHWBI %d for address " TARGET_ADDR_FMT
,
2461 issue_ihi
, issue_dhwbi
, address
);
2463 xtensa_queue_exec_ins(xtensa
, XT_INS_DHWBI(xtensa
, XT_REG_A3
, 0));
2464 if (!same_dc_line
) {
2465 LOG_TARGET_DEBUG(target
,
2466 "DHWBI second dcache line for address "TARGET_ADDR_FMT
,
2468 xtensa_queue_exec_ins(xtensa
, XT_INS_DHWBI(xtensa
, XT_REG_A3
, 4));
2472 xtensa_queue_exec_ins(xtensa
, XT_INS_IHI(xtensa
, XT_REG_A3
, 0));
2473 if (!same_ic_line
) {
2474 LOG_TARGET_DEBUG(target
,
2475 "IHI second icache line for address "TARGET_ADDR_FMT
,
2477 xtensa_queue_exec_ins(xtensa
, XT_INS_IHI(xtensa
, XT_REG_A3
, 4));
2481 /* Execute invalidate instructions */
2482 ret
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
2483 xtensa_core_status_check(target
);
2484 if (ret
!= ERROR_OK
) {
2485 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret
);
2490 /* Write new instructions to memory */
2491 ret
= target_write_buffer(target
, address
, size
, buffer
);
2492 if (ret
!= ERROR_OK
) {
2493 LOG_TARGET_ERROR(target
, "Error writing instruction to memory: %d", ret
);
2498 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2499 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, address
);
2500 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
2501 xtensa_queue_exec_ins(xtensa
, XT_INS_DHWB(xtensa
, XT_REG_A3
, 0));
2502 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT
, address
);
2503 if (!same_dc_line
) {
2504 LOG_TARGET_DEBUG(target
, "DHWB second dcache line for address "TARGET_ADDR_FMT
, address
+ 4);
2505 xtensa_queue_exec_ins(xtensa
, XT_INS_DHWB(xtensa
, XT_REG_A3
, 4));
2508 /* Execute invalidate instructions */
2509 ret
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
2510 xtensa_core_status_check(target
);
2513 /* TODO: Handle L2 cache if present */
2517 static int xtensa_sw_breakpoint_add(struct target
*target
,
2518 struct breakpoint
*breakpoint
,
2519 struct xtensa_sw_breakpoint
*sw_bp
)
2521 struct xtensa
*xtensa
= target_to_xtensa(target
);
2522 int ret
= target_read_buffer(target
, breakpoint
->address
, XT_ISNS_SZ_MAX
, sw_bp
->insn
);
2523 if (ret
!= ERROR_OK
) {
2524 LOG_TARGET_ERROR(target
, "Failed to read original instruction (%d)!", ret
);
2528 sw_bp
->insn_sz
= MIN(XT_ISNS_SZ_MAX
, breakpoint
->length
);
2529 sw_bp
->oocd_bp
= breakpoint
;
2531 uint32_t break_insn
= sw_bp
->insn_sz
== XT_ISNS_SZ_MAX
? XT_INS_BREAK(xtensa
, 0, 0) : XT_INS_BREAKN(xtensa
, 0);
2533 /* Underlying memory write will convert instruction endianness, don't do that here */
2534 ret
= xtensa_update_instruction(target
, breakpoint
->address
, sw_bp
->insn_sz
, (uint8_t *)&break_insn
);
2535 if (ret
!= ERROR_OK
) {
2536 LOG_TARGET_ERROR(target
, "Failed to write breakpoint instruction (%d)!", ret
);
2543 static int xtensa_sw_breakpoint_remove(struct target
*target
, struct xtensa_sw_breakpoint
*sw_bp
)
2545 int ret
= xtensa_update_instruction(target
, sw_bp
->oocd_bp
->address
, sw_bp
->insn_sz
, sw_bp
->insn
);
2546 if (ret
!= ERROR_OK
) {
2547 LOG_TARGET_ERROR(target
, "Failed to write insn (%d)!", ret
);
2550 sw_bp
->oocd_bp
= NULL
;
2554 int xtensa_breakpoint_add(struct target
*target
, struct breakpoint
*breakpoint
)
2556 struct xtensa
*xtensa
= target_to_xtensa(target
);
2559 if (breakpoint
->type
== BKPT_SOFT
) {
2560 for (slot
= 0; slot
< XT_SW_BREAKPOINTS_MAX_NUM
; slot
++) {
2561 if (!xtensa
->sw_brps
[slot
].oocd_bp
||
2562 xtensa
->sw_brps
[slot
].oocd_bp
== breakpoint
)
2565 if (slot
== XT_SW_BREAKPOINTS_MAX_NUM
) {
2566 LOG_TARGET_WARNING(target
, "No free slots to add SW breakpoint!");
2567 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2569 int ret
= xtensa_sw_breakpoint_add(target
, breakpoint
, &xtensa
->sw_brps
[slot
]);
2570 if (ret
!= ERROR_OK
) {
2571 LOG_TARGET_ERROR(target
, "Failed to add SW breakpoint!");
2574 LOG_TARGET_DEBUG(target
, "placed SW breakpoint %u @ " TARGET_ADDR_FMT
,
2576 breakpoint
->address
);
2580 for (slot
= 0; slot
< xtensa
->core_config
->debug
.ibreaks_num
; slot
++) {
2581 if (!xtensa
->hw_brps
[slot
] || xtensa
->hw_brps
[slot
] == breakpoint
)
2584 if (slot
== xtensa
->core_config
->debug
.ibreaks_num
) {
2585 LOG_TARGET_ERROR(target
, "No free slots to add HW breakpoint!");
2586 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2589 xtensa
->hw_brps
[slot
] = breakpoint
;
2590 /* We will actually write the breakpoints when we resume the target. */
2591 LOG_TARGET_DEBUG(target
, "placed HW breakpoint %u @ " TARGET_ADDR_FMT
,
2593 breakpoint
->address
);
2598 int xtensa_breakpoint_remove(struct target
*target
, struct breakpoint
*breakpoint
)
2600 struct xtensa
*xtensa
= target_to_xtensa(target
);
2603 if (breakpoint
->type
== BKPT_SOFT
) {
2604 for (slot
= 0; slot
< XT_SW_BREAKPOINTS_MAX_NUM
; slot
++) {
2605 if (xtensa
->sw_brps
[slot
].oocd_bp
&& xtensa
->sw_brps
[slot
].oocd_bp
== breakpoint
)
2608 if (slot
== XT_SW_BREAKPOINTS_MAX_NUM
) {
2609 LOG_TARGET_WARNING(target
, "Max SW breakpoints slot reached, slot=%u!", slot
);
2610 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2612 int ret
= xtensa_sw_breakpoint_remove(target
, &xtensa
->sw_brps
[slot
]);
2613 if (ret
!= ERROR_OK
) {
2614 LOG_TARGET_ERROR(target
, "Failed to remove SW breakpoint (%d)!", ret
);
2617 LOG_TARGET_DEBUG(target
, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT
, slot
, breakpoint
->address
);
2621 for (slot
= 0; slot
< xtensa
->core_config
->debug
.ibreaks_num
; slot
++) {
2622 if (xtensa
->hw_brps
[slot
] == breakpoint
)
2625 if (slot
== xtensa
->core_config
->debug
.ibreaks_num
) {
2626 LOG_TARGET_ERROR(target
, "HW breakpoint not found!");
2627 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2629 xtensa
->hw_brps
[slot
] = NULL
;
2630 if (xtensa
->core_config
->core_type
== XT_NX
)
2631 xtensa_reg_set(target
, xtensa
->nx_reg_idx
[XT_NX_REG_IDX_IBREAKC0
] + slot
, 0);
2632 LOG_TARGET_DEBUG(target
, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT
, slot
, breakpoint
->address
);
2636 int xtensa_watchpoint_add(struct target
*target
, struct watchpoint
*watchpoint
)
2638 struct xtensa
*xtensa
= target_to_xtensa(target
);
2640 xtensa_reg_val_t dbreakcval
;
2642 if (target
->state
!= TARGET_HALTED
) {
2643 LOG_TARGET_ERROR(target
, "not halted");
2644 return ERROR_TARGET_NOT_HALTED
;
2647 if (watchpoint
->mask
!= WATCHPOINT_IGNORE_DATA_VALUE_MASK
) {
2648 LOG_TARGET_ERROR(target
, "watchpoint value masks not supported");
2649 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2652 for (slot
= 0; slot
< xtensa
->core_config
->debug
.dbreaks_num
; slot
++) {
2653 if (!xtensa
->hw_wps
[slot
] || xtensa
->hw_wps
[slot
] == watchpoint
)
2656 if (slot
== xtensa
->core_config
->debug
.dbreaks_num
) {
2657 LOG_TARGET_WARNING(target
, "No free slots to add HW watchpoint!");
2658 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2661 /* Figure out value for dbreakc5..0
2662 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2663 if (watchpoint
->length
< 1 || watchpoint
->length
> 64 ||
2664 !IS_PWR_OF_2(watchpoint
->length
) ||
2665 !IS_ALIGNED(watchpoint
->address
, watchpoint
->length
)) {
2668 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2669 " not supported by hardware.",
2671 watchpoint
->address
);
2672 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2674 dbreakcval
= ALIGN_DOWN(0x3F, watchpoint
->length
);
2676 if (watchpoint
->rw
== WPT_READ
)
2677 dbreakcval
|= BIT(30);
2678 if (watchpoint
->rw
== WPT_WRITE
)
2679 dbreakcval
|= BIT(31);
2680 if (watchpoint
->rw
== WPT_ACCESS
)
2681 dbreakcval
|= BIT(30) | BIT(31);
2683 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2684 xtensa_reg_set(target
, XT_REG_IDX_DBREAKA0
+ slot
, watchpoint
->address
);
2685 xtensa_reg_set(target
, XT_REG_IDX_DBREAKC0
+ slot
, dbreakcval
);
2686 xtensa
->hw_wps
[slot
] = watchpoint
;
2687 LOG_TARGET_DEBUG(target
, "placed HW watchpoint @ " TARGET_ADDR_FMT
,
2688 watchpoint
->address
);
2692 int xtensa_watchpoint_remove(struct target
*target
, struct watchpoint
*watchpoint
)
2694 struct xtensa
*xtensa
= target_to_xtensa(target
);
2697 for (slot
= 0; slot
< xtensa
->core_config
->debug
.dbreaks_num
; slot
++) {
2698 if (xtensa
->hw_wps
[slot
] == watchpoint
)
2701 if (slot
== xtensa
->core_config
->debug
.dbreaks_num
) {
2702 LOG_TARGET_WARNING(target
, "HW watchpoint " TARGET_ADDR_FMT
" not found!", watchpoint
->address
);
2703 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2705 xtensa_reg_set(target
, XT_REG_IDX_DBREAKC0
+ slot
, 0);
2706 xtensa
->hw_wps
[slot
] = NULL
;
2707 LOG_TARGET_DEBUG(target
, "cleared HW watchpoint @ " TARGET_ADDR_FMT
,
2708 watchpoint
->address
);
2712 int xtensa_start_algorithm(struct target
*target
,
2713 int num_mem_params
, struct mem_param
*mem_params
,
2714 int num_reg_params
, struct reg_param
*reg_params
,
2715 target_addr_t entry_point
, target_addr_t exit_point
,
2718 struct xtensa
*xtensa
= target_to_xtensa(target
);
2719 struct xtensa_algorithm
*algorithm_info
= arch_info
;
2720 int retval
= ERROR_OK
;
2721 bool usr_ps
= false;
2724 /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
2725 * at the exit point */
2727 if (target
->state
!= TARGET_HALTED
) {
2728 LOG_WARNING("Target not halted!");
2729 return ERROR_TARGET_NOT_HALTED
;
2732 for (unsigned int i
= 0; i
< xtensa
->core_cache
->num_regs
; i
++) {
2733 struct reg
*reg
= &xtensa
->core_cache
->reg_list
[i
];
2734 buf_cpy(reg
->value
, xtensa
->algo_context_backup
[i
], reg
->size
);
2736 /* save debug reason, it will be changed */
2737 if (!algorithm_info
) {
2738 LOG_ERROR("BUG: arch_info not specified");
2741 algorithm_info
->ctx_debug_reason
= target
->debug_reason
;
2742 if (xtensa
->core_config
->core_type
== XT_LX
) {
2743 /* save PS and set to debug_level - 1 */
2744 algorithm_info
->ctx_ps
= xtensa_reg_get(target
, xtensa
->eps_dbglevel_idx
);
2745 newps
= (algorithm_info
->ctx_ps
& ~0xf) | (xtensa
->core_config
->debug
.irq_level
- 1);
2746 xtensa_reg_set(target
, xtensa
->eps_dbglevel_idx
, newps
);
2748 /* write mem params */
2749 for (int i
= 0; i
< num_mem_params
; i
++) {
2750 if (mem_params
[i
].direction
!= PARAM_IN
) {
2751 retval
= target_write_buffer(target
, mem_params
[i
].address
,
2753 mem_params
[i
].value
);
2754 if (retval
!= ERROR_OK
)
2758 /* write reg params */
2759 for (int i
= 0; i
< num_reg_params
; i
++) {
2760 if (reg_params
[i
].size
> 32) {
2761 LOG_ERROR("BUG: not supported register size (%d)", reg_params
[i
].size
);
2764 struct reg
*reg
= register_get_by_name(xtensa
->core_cache
, reg_params
[i
].reg_name
, 0);
2766 LOG_ERROR("BUG: register '%s' not found", reg_params
[i
].reg_name
);
2769 if (reg
->size
!= reg_params
[i
].size
) {
2770 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params
[i
].reg_name
);
2773 if (memcmp(reg_params
[i
].reg_name
, "ps", 3)) {
2775 } else if (xtensa
->core_config
->core_type
== XT_LX
) {
2776 unsigned int reg_id
= xtensa
->eps_dbglevel_idx
;
2777 assert(reg_id
< xtensa
->core_cache
->num_regs
&& "Attempt to access non-existing reg!");
2778 reg
= &xtensa
->core_cache
->reg_list
[reg_id
];
2780 xtensa_reg_set_value(reg
, buf_get_u32(reg_params
[i
].value
, 0, reg
->size
));
2783 /* ignore custom core mode if custom PS value is specified */
2784 if (!usr_ps
&& xtensa
->core_config
->core_type
== XT_LX
) {
2785 unsigned int eps_reg_idx
= xtensa
->eps_dbglevel_idx
;
2786 xtensa_reg_val_t ps
= xtensa_reg_get(target
, eps_reg_idx
);
2787 enum xtensa_mode core_mode
= XT_PS_RING_GET(ps
);
2788 if (algorithm_info
->core_mode
!= XT_MODE_ANY
&& algorithm_info
->core_mode
!= core_mode
) {
2789 LOG_DEBUG("setting core_mode: 0x%x", algorithm_info
->core_mode
);
2790 xtensa_reg_val_t new_ps
= (ps
& ~XT_PS_RING_MSK
) | XT_PS_RING(algorithm_info
->core_mode
);
2791 /* save previous core mode */
2792 /* TODO: core_mode is not restored for now. Can be added to the end of wait_algorithm */
2793 algorithm_info
->core_mode
= core_mode
;
2794 xtensa_reg_set(target
, eps_reg_idx
, new_ps
);
2795 xtensa
->core_cache
->reg_list
[eps_reg_idx
].valid
= 1;
2799 return xtensa_resume(target
, 0, entry_point
, 1, 1);
2802 /** Waits for an algorithm in the target. */
2803 int xtensa_wait_algorithm(struct target
*target
,
2804 int num_mem_params
, struct mem_param
*mem_params
,
2805 int num_reg_params
, struct reg_param
*reg_params
,
2806 target_addr_t exit_point
, unsigned int timeout_ms
,
2809 struct xtensa
*xtensa
= target_to_xtensa(target
);
2810 struct xtensa_algorithm
*algorithm_info
= arch_info
;
2811 int retval
= ERROR_OK
;
2812 xtensa_reg_val_t pc
;
2814 /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
2815 * at the exit point */
2817 retval
= target_wait_state(target
, TARGET_HALTED
, timeout_ms
);
2818 /* If the target fails to halt due to the breakpoint, force a halt */
2819 if (retval
!= ERROR_OK
|| target
->state
!= TARGET_HALTED
) {
2820 retval
= target_halt(target
);
2821 if (retval
!= ERROR_OK
)
2823 retval
= target_wait_state(target
, TARGET_HALTED
, 500);
2824 if (retval
!= ERROR_OK
)
2826 LOG_TARGET_ERROR(target
, "not halted %d, pc 0x%" PRIx32
", ps 0x%" PRIx32
, retval
,
2827 xtensa_reg_get(target
, XT_REG_IDX_PC
),
2828 xtensa_reg_get(target
, (xtensa
->core_config
->core_type
== XT_LX
) ?
2829 xtensa
->eps_dbglevel_idx
: XT_REG_IDX_PS
));
2830 return ERROR_TARGET_TIMEOUT
;
2832 pc
= xtensa_reg_get(target
, XT_REG_IDX_PC
);
2833 if (exit_point
&& pc
!= exit_point
) {
2834 LOG_ERROR("failed algorithm halted at 0x%" PRIx32
", expected " TARGET_ADDR_FMT
, pc
, exit_point
);
2835 return ERROR_TARGET_TIMEOUT
;
2837 /* Copy core register values to reg_params[] */
2838 for (int i
= 0; i
< num_reg_params
; i
++) {
2839 if (reg_params
[i
].direction
!= PARAM_OUT
) {
2840 struct reg
*reg
= register_get_by_name(xtensa
->core_cache
, reg_params
[i
].reg_name
, 0);
2842 LOG_ERROR("BUG: register '%s' not found", reg_params
[i
].reg_name
);
2845 if (reg
->size
!= reg_params
[i
].size
) {
2846 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params
[i
].reg_name
);
2849 buf_set_u32(reg_params
[i
].value
, 0, 32, xtensa_reg_get_value(reg
));
2852 /* Read memory values to mem_params */
2853 LOG_DEBUG("Read mem params");
2854 for (int i
= 0; i
< num_mem_params
; i
++) {
2855 LOG_DEBUG("Check mem param @ " TARGET_ADDR_FMT
, mem_params
[i
].address
);
2856 if (mem_params
[i
].direction
!= PARAM_OUT
) {
2857 LOG_DEBUG("Read mem param @ " TARGET_ADDR_FMT
, mem_params
[i
].address
);
2858 retval
= target_read_buffer(target
, mem_params
[i
].address
, mem_params
[i
].size
, mem_params
[i
].value
);
2859 if (retval
!= ERROR_OK
)
2864 /* avoid gdb keep_alive warning */
2867 for (int i
= xtensa
->core_cache
->num_regs
- 1; i
>= 0; i
--) {
2868 struct reg
*reg
= &xtensa
->core_cache
->reg_list
[i
];
2869 if (i
== XT_REG_IDX_PS
) {
2870 continue; /* restore mapped reg number of PS depends on NDEBUGLEVEL */
2871 } else if (i
== XT_REG_IDX_DEBUGCAUSE
) {
2872 /*FIXME: restoring DEBUGCAUSE causes exception when executing corresponding
2873 * instruction in DIR */
2874 LOG_DEBUG("Skip restoring register %s: 0x%8.8" PRIx32
" -> 0x%8.8" PRIx32
,
2875 xtensa
->core_cache
->reg_list
[i
].name
,
2876 buf_get_u32(reg
->value
, 0, 32),
2877 buf_get_u32(xtensa
->algo_context_backup
[i
], 0, 32));
2878 buf_cpy(xtensa
->algo_context_backup
[i
], reg
->value
, reg
->size
);
2879 xtensa
->core_cache
->reg_list
[i
].dirty
= 0;
2880 xtensa
->core_cache
->reg_list
[i
].valid
= 0;
2881 } else if (memcmp(xtensa
->algo_context_backup
[i
], reg
->value
, reg
->size
/ 8)) {
2882 if (reg
->size
<= 32) {
2883 LOG_DEBUG("restoring register %s: 0x%8.8" PRIx32
" -> 0x%8.8" PRIx32
,
2884 xtensa
->core_cache
->reg_list
[i
].name
,
2885 buf_get_u32(reg
->value
, 0, reg
->size
),
2886 buf_get_u32(xtensa
->algo_context_backup
[i
], 0, reg
->size
));
2887 } else if (reg
->size
<= 64) {
2888 LOG_DEBUG("restoring register %s: 0x%8.8" PRIx64
" -> 0x%8.8" PRIx64
,
2889 xtensa
->core_cache
->reg_list
[i
].name
,
2890 buf_get_u64(reg
->value
, 0, reg
->size
),
2891 buf_get_u64(xtensa
->algo_context_backup
[i
], 0, reg
->size
));
2893 LOG_DEBUG("restoring register %s %u-bits", xtensa
->core_cache
->reg_list
[i
].name
, reg
->size
);
2895 buf_cpy(xtensa
->algo_context_backup
[i
], reg
->value
, reg
->size
);
2896 xtensa
->core_cache
->reg_list
[i
].dirty
= 1;
2897 xtensa
->core_cache
->reg_list
[i
].valid
= 1;
2900 target
->debug_reason
= algorithm_info
->ctx_debug_reason
;
2901 if (xtensa
->core_config
->core_type
== XT_LX
)
2902 xtensa_reg_set(target
, xtensa
->eps_dbglevel_idx
, algorithm_info
->ctx_ps
);
2904 retval
= xtensa_write_dirty_registers(target
);
2905 if (retval
!= ERROR_OK
)
2906 LOG_ERROR("Failed to write dirty regs (%d)!", retval
);
2911 int xtensa_run_algorithm(struct target
*target
,
2912 int num_mem_params
, struct mem_param
*mem_params
,
2913 int num_reg_params
, struct reg_param
*reg_params
,
2914 target_addr_t entry_point
, target_addr_t exit_point
,
2915 unsigned int timeout_ms
, void *arch_info
)
2917 int retval
= xtensa_start_algorithm(target
,
2918 num_mem_params
, mem_params
,
2919 num_reg_params
, reg_params
,
2920 entry_point
, exit_point
,
2923 if (retval
== ERROR_OK
) {
2924 retval
= xtensa_wait_algorithm(target
,
2925 num_mem_params
, mem_params
,
2926 num_reg_params
, reg_params
,
2927 exit_point
, timeout_ms
,
2934 static int xtensa_build_reg_cache(struct target
*target
)
2936 struct xtensa
*xtensa
= target_to_xtensa(target
);
2937 struct reg_cache
**cache_p
= register_get_last_cache_p(&target
->reg_cache
);
2938 unsigned int last_dbreg_num
= 0;
2940 if (xtensa
->core_regs_num
+ xtensa
->num_optregs
!= xtensa
->total_regs_num
)
2941 LOG_TARGET_WARNING(target
, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2942 xtensa
->core_regs_num
, xtensa
->num_optregs
, xtensa
->total_regs_num
);
2944 struct reg_cache
*reg_cache
= calloc(1, sizeof(struct reg_cache
));
2947 LOG_ERROR("Failed to alloc reg cache!");
2950 reg_cache
->name
= "Xtensa registers";
2951 reg_cache
->next
= NULL
;
2953 unsigned int reg_list_size
= XT_NUM_REGS
+ xtensa
->num_optregs
;
2954 struct reg
*reg_list
= calloc(reg_list_size
, sizeof(struct reg
));
2956 LOG_ERROR("Failed to alloc reg list!");
2959 xtensa
->dbregs_num
= 0;
2960 unsigned int didx
= 0;
2961 for (unsigned int whichlist
= 0; whichlist
< 2; whichlist
++) {
2962 struct xtensa_reg_desc
*rlist
= (whichlist
== 0) ? xtensa_regs
: xtensa
->optregs
;
2963 unsigned int listsize
= (whichlist
== 0) ? XT_NUM_REGS
: xtensa
->num_optregs
;
2964 for (unsigned int i
= 0; i
< listsize
; i
++, didx
++) {
2965 reg_list
[didx
].exist
= rlist
[i
].exist
;
2966 reg_list
[didx
].name
= rlist
[i
].name
;
2967 reg_list
[didx
].size
= 32;
2968 reg_list
[didx
].value
= calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2969 if (!reg_list
[didx
].value
) {
2970 LOG_ERROR("Failed to alloc reg list value!");
2973 reg_list
[didx
].dirty
= false;
2974 reg_list
[didx
].valid
= false;
2975 reg_list
[didx
].type
= &xtensa_reg_type
;
2976 reg_list
[didx
].arch_info
= xtensa
;
2977 if (rlist
[i
].exist
&& (rlist
[i
].dbreg_num
> last_dbreg_num
))
2978 last_dbreg_num
= rlist
[i
].dbreg_num
;
2980 if (xtensa_extra_debug_log
) {
2981 LOG_TARGET_DEBUG(target
,
2982 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2983 reg_list
[didx
].name
,
2985 reg_list
[didx
].exist
,
2988 rlist
[i
].dbreg_num
);
2993 xtensa
->dbregs_num
= last_dbreg_num
+ 1;
2994 reg_cache
->reg_list
= reg_list
;
2995 reg_cache
->num_regs
= reg_list_size
;
2997 LOG_TARGET_DEBUG(target
, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2998 xtensa
->total_regs_num
, reg_list_size
, xtensa
->dbregs_num
);
3000 /* Construct empty-register list for handling unknown register requests */
3001 xtensa
->empty_regs
= calloc(xtensa
->dbregs_num
, sizeof(struct reg
));
3002 if (!xtensa
->empty_regs
) {
3003 LOG_TARGET_ERROR(target
, "ERROR: Out of memory");
3006 for (unsigned int i
= 0; i
< xtensa
->dbregs_num
; i
++) {
3007 xtensa
->empty_regs
[i
].name
= calloc(8, sizeof(char));
3008 if (!xtensa
->empty_regs
[i
].name
) {
3009 LOG_TARGET_ERROR(target
, "ERROR: Out of memory");
3012 sprintf((char *)xtensa
->empty_regs
[i
].name
, "?0x%04x", i
& 0x0000FFFF);
3013 xtensa
->empty_regs
[i
].size
= 32;
3014 xtensa
->empty_regs
[i
].type
= &xtensa_reg_type
;
3015 xtensa
->empty_regs
[i
].value
= calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
3016 if (!xtensa
->empty_regs
[i
].value
) {
3017 LOG_ERROR("Failed to alloc empty reg list value!");
3020 xtensa
->empty_regs
[i
].arch_info
= xtensa
;
3023 /* Construct contiguous register list from contiguous descriptor list */
3024 if (xtensa
->regmap_contiguous
&& xtensa
->contiguous_regs_desc
) {
3025 xtensa
->contiguous_regs_list
= calloc(xtensa
->total_regs_num
, sizeof(struct reg
*));
3026 if (!xtensa
->contiguous_regs_list
) {
3027 LOG_TARGET_ERROR(target
, "ERROR: Out of memory");
3030 for (unsigned int i
= 0; i
< xtensa
->total_regs_num
; i
++) {
3032 for (j
= 0; j
< reg_cache
->num_regs
; j
++) {
3033 if (!strcmp(reg_cache
->reg_list
[j
].name
, xtensa
->contiguous_regs_desc
[i
]->name
)) {
3034 /* Register number field is not filled above.
3035 Here we are assigning the corresponding index from the contiguous reg list.
3036 These indexes are in the same order with gdb g-packet request/response.
3037 Some more changes may be required for sparse reg lists.
3039 reg_cache
->reg_list
[j
].number
= i
;
3040 xtensa
->contiguous_regs_list
[i
] = &(reg_cache
->reg_list
[j
]);
3041 LOG_TARGET_DEBUG(target
,
3042 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
3043 xtensa
->contiguous_regs_list
[i
]->name
,
3044 xtensa
->contiguous_regs_desc
[i
]->dbreg_num
);
3048 if (j
== reg_cache
->num_regs
)
3049 LOG_TARGET_WARNING(target
, "contiguous register %s not found",
3050 xtensa
->contiguous_regs_desc
[i
]->name
);
3054 xtensa
->algo_context_backup
= calloc(reg_cache
->num_regs
, sizeof(void *));
3055 if (!xtensa
->algo_context_backup
) {
3056 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
3059 for (unsigned int i
= 0; i
< reg_cache
->num_regs
; i
++) {
3060 struct reg
*reg
= ®_cache
->reg_list
[i
];
3061 xtensa
->algo_context_backup
[i
] = calloc(1, reg
->size
/ 8);
3062 if (!xtensa
->algo_context_backup
[i
]) {
3063 LOG_ERROR("Failed to alloc mem for algorithm context!");
3067 xtensa
->core_cache
= reg_cache
;
3069 *cache_p
= reg_cache
;
3074 for (unsigned int i
= 0; i
< reg_list_size
; i
++)
3075 free(reg_list
[i
].value
);
3078 if (xtensa
->empty_regs
) {
3079 for (unsigned int i
= 0; i
< xtensa
->dbregs_num
; i
++) {
3080 free((void *)xtensa
->empty_regs
[i
].name
);
3081 free(xtensa
->empty_regs
[i
].value
);
3083 free(xtensa
->empty_regs
);
3085 if (xtensa
->algo_context_backup
) {
3086 for (unsigned int i
= 0; i
< reg_cache
->num_regs
; i
++)
3087 free(xtensa
->algo_context_backup
[i
]);
3088 free(xtensa
->algo_context_backup
);
3095 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target
*target
, char *opstr
)
3097 struct xtensa
*xtensa
= target_to_xtensa(target
);
3098 int32_t status
= ERROR_COMMAND_ARGUMENT_INVALID
;
3099 /* Process op[] list */
3100 while (opstr
&& (*opstr
== ':')) {
3102 unsigned int oplen
= strtoul(opstr
+ 1, &opstr
, 16);
3104 LOG_TARGET_ERROR(target
, "TIE access instruction too long (%d)\n", oplen
);
3108 while ((i
< oplen
) && opstr
&& (*opstr
== ':'))
3109 ops
[i
++] = strtoul(opstr
+ 1, &opstr
, 16);
3111 LOG_TARGET_ERROR(target
, "TIE access instruction malformed (%d)\n", i
);
3116 sprintf(insn_buf
, "Exec %d-byte TIE sequence: ", oplen
);
3117 for (i
= 0; i
< oplen
; i
++)
3118 sprintf(insn_buf
+ strlen(insn_buf
), "%02x:", ops
[i
]);
3119 LOG_TARGET_DEBUG(target
, "%s", insn_buf
);
3120 xtensa_queue_exec_ins_wide(xtensa
, ops
, oplen
); /* Handles endian-swap */
3126 static int xtensa_gdbqc_qxtreg(struct target
*target
, const char *packet
, char **response_p
)
3128 struct xtensa
*xtensa
= target_to_xtensa(target
);
3129 bool iswrite
= (packet
[0] == 'Q');
3130 enum xtensa_qerr_e error
;
3132 /* Read/write TIE register. Requires spill location.
3133 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
3134 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
3136 if (!(xtensa
->spill_buf
)) {
3137 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
3138 error
= XT_QERR_FAIL
;
3139 goto xtensa_gdbqc_qxtreg_fail
;
3143 uint32_t regnum
= strtoul(packet
+ 6, &delim
, 16);
3144 if (*delim
!= ':') {
3145 LOG_ERROR("Malformed qxtreg packet");
3146 error
= XT_QERR_INVAL
;
3147 goto xtensa_gdbqc_qxtreg_fail
;
3149 uint32_t reglen
= strtoul(delim
+ 1, &delim
, 16);
3150 if (*delim
!= ':') {
3151 LOG_ERROR("Malformed qxtreg packet");
3152 error
= XT_QERR_INVAL
;
3153 goto xtensa_gdbqc_qxtreg_fail
;
3155 uint8_t regbuf
[XT_QUERYPKT_RESP_MAX
];
3156 memset(regbuf
, 0, XT_QUERYPKT_RESP_MAX
);
3157 LOG_DEBUG("TIE reg 0x%08" PRIx32
" %s (%d bytes)", regnum
, iswrite
? "write" : "read", reglen
);
3158 if (reglen
* 2 + 1 > XT_QUERYPKT_RESP_MAX
) {
3159 LOG_ERROR("TIE register too large");
3160 error
= XT_QERR_MEM
;
3161 goto xtensa_gdbqc_qxtreg_fail
;
3164 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
3165 * (2) read old a4, (3) write spill address to a4.
3166 * NOTE: ensure a4 is restored properly by all error handling logic
3168 unsigned int memop_size
= (xtensa
->spill_loc
& 3) ? 1 : 4;
3169 int status
= xtensa_read_memory(target
, xtensa
->spill_loc
, memop_size
,
3170 xtensa
->spill_bytes
/ memop_size
, xtensa
->spill_buf
);
3171 if (status
!= ERROR_OK
) {
3172 LOG_ERROR("Spill memory save");
3173 error
= XT_QERR_MEM
;
3174 goto xtensa_gdbqc_qxtreg_fail
;
3177 /* Extract value and store in spill memory */
3179 char *valbuf
= strchr(delim
, '=');
3180 if (!(valbuf
&& (*valbuf
== '='))) {
3181 LOG_ERROR("Malformed Qxtreg packet");
3182 error
= XT_QERR_INVAL
;
3183 goto xtensa_gdbqc_qxtreg_fail
;
3186 while (*valbuf
&& *(valbuf
+ 1)) {
3187 char bytestr
[3] = { 0, 0, 0 };
3188 strncpy(bytestr
, valbuf
, 2);
3189 regbuf
[b
++] = strtoul(bytestr
, NULL
, 16);
3193 LOG_ERROR("Malformed Qxtreg packet");
3194 error
= XT_QERR_INVAL
;
3195 goto xtensa_gdbqc_qxtreg_fail
;
3197 status
= xtensa_write_memory(target
, xtensa
->spill_loc
, memop_size
,
3198 reglen
/ memop_size
, regbuf
);
3199 if (status
!= ERROR_OK
) {
3200 LOG_ERROR("TIE value store");
3201 error
= XT_QERR_MEM
;
3202 goto xtensa_gdbqc_qxtreg_fail
;
3205 xtensa_reg_val_t orig_a4
= xtensa_reg_get(target
, XT_REG_IDX_A4
);
3206 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, xtensa
->spill_loc
);
3207 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A4
));
3209 int32_t tieop_status
= xtensa_gdbqc_parse_exec_tie_ops(target
, delim
);
3211 /* Restore a4 but not yet spill memory. Execute it all... */
3212 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, orig_a4
);
3213 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A4
));
3214 status
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
3215 if (status
!= ERROR_OK
) {
3216 LOG_TARGET_ERROR(target
, "TIE queue execute: %d\n", status
);
3217 tieop_status
= status
;
3219 status
= xtensa_core_status_check(target
);
3220 if (status
!= ERROR_OK
) {
3221 LOG_TARGET_ERROR(target
, "TIE instr execute: %d\n", status
);
3222 tieop_status
= status
;
3225 if (tieop_status
== ERROR_OK
) {
3227 /* TIE write succeeded; send OK */
3228 strcpy(*response_p
, "OK");
3230 /* TIE read succeeded; copy result from spill memory */
3231 status
= xtensa_read_memory(target
, xtensa
->spill_loc
, memop_size
, reglen
, regbuf
);
3232 if (status
!= ERROR_OK
) {
3233 LOG_TARGET_ERROR(target
, "TIE result read");
3234 tieop_status
= status
;
3237 for (i
= 0; i
< reglen
; i
++)
3238 sprintf(*response_p
+ 2 * i
, "%02x", regbuf
[i
]);
3239 *(*response_p
+ 2 * i
) = '\0';
3240 LOG_TARGET_DEBUG(target
, "TIE response: %s", *response_p
);
3244 /* Restore spill memory first, then report any previous errors */
3245 status
= xtensa_write_memory(target
, xtensa
->spill_loc
, memop_size
,
3246 xtensa
->spill_bytes
/ memop_size
, xtensa
->spill_buf
);
3247 if (status
!= ERROR_OK
) {
3248 LOG_ERROR("Spill memory restore");
3249 error
= XT_QERR_MEM
;
3250 goto xtensa_gdbqc_qxtreg_fail
;
3252 if (tieop_status
!= ERROR_OK
) {
3253 LOG_ERROR("TIE execution");
3254 error
= XT_QERR_FAIL
;
3255 goto xtensa_gdbqc_qxtreg_fail
;
3259 xtensa_gdbqc_qxtreg_fail
:
3260 strcpy(*response_p
, xt_qerr
[error
].chrval
);
3261 return xt_qerr
[error
].intval
;
3264 int xtensa_gdb_query_custom(struct target
*target
, const char *packet
, char **response_p
)
3266 struct xtensa
*xtensa
= target_to_xtensa(target
);
3267 enum xtensa_qerr_e error
;
3268 if (!packet
|| !response_p
) {
3269 LOG_TARGET_ERROR(target
, "invalid parameter: packet %p response_p %p", packet
, response_p
);
3273 *response_p
= xtensa
->qpkt_resp
;
3274 if (strncmp(packet
, "qxtn", 4) == 0) {
3275 strcpy(*response_p
, "OpenOCD");
3277 } else if (strncasecmp(packet
, "qxtgdbversion=", 14) == 0) {
3279 } else if ((strncmp(packet
, "Qxtsis=", 7) == 0) || (strncmp(packet
, "Qxtsds=", 7) == 0)) {
3280 /* Confirm host cache params match core .cfg file */
3281 struct xtensa_cache_config
*cachep
= (packet
[4] == 'i') ?
3282 &xtensa
->core_config
->icache
: &xtensa
->core_config
->dcache
;
3283 unsigned int line_size
= 0, size
= 0, way_count
= 0;
3284 sscanf(&packet
[7], "%x,%x,%x", &line_size
, &size
, &way_count
);
3285 if ((cachep
->line_size
!= line_size
) ||
3286 (cachep
->size
!= size
) ||
3287 (cachep
->way_count
!= way_count
)) {
3288 LOG_TARGET_WARNING(target
, "%cCache mismatch; check xtensa-core-XXX.cfg file",
3289 cachep
== &xtensa
->core_config
->icache
? 'I' : 'D');
3291 strcpy(*response_p
, "OK");
3293 } else if ((strncmp(packet
, "Qxtiram=", 8) == 0) || (strncmp(packet
, "Qxtirom=", 8) == 0)) {
3294 /* Confirm host IRAM/IROM params match core .cfg file */
3295 struct xtensa_local_mem_config
*memp
= (packet
[5] == 'a') ?
3296 &xtensa
->core_config
->iram
: &xtensa
->core_config
->irom
;
3297 unsigned int base
= 0, size
= 0, i
;
3298 char *pkt
= (char *)&packet
[7];
3301 size
= strtoul(pkt
, &pkt
, 16);
3303 base
= strtoul(pkt
, &pkt
, 16);
3304 LOG_TARGET_DEBUG(target
, "memcheck: %dB @ 0x%08x", size
, base
);
3305 for (i
= 0; i
< memp
->count
; i
++) {
3306 if ((memp
->regions
[i
].base
== base
) && (memp
->regions
[i
].size
== size
))
3309 if (i
== memp
->count
) {
3310 LOG_TARGET_WARNING(target
, "%s mismatch; check xtensa-core-XXX.cfg file",
3311 memp
== &xtensa
->core_config
->iram
? "IRAM" : "IROM");
3314 for (i
= 0; i
< 11; i
++) {
3316 strtoul(pkt
, &pkt
, 16);
3318 } while (pkt
&& (pkt
[0] == ','));
3319 strcpy(*response_p
, "OK");
3321 } else if (strncmp(packet
, "Qxtexcmlvl=", 11) == 0) {
3322 /* Confirm host EXCM_LEVEL matches core .cfg file */
3323 unsigned int excm_level
= strtoul(&packet
[11], NULL
, 0);
3324 if (!xtensa
->core_config
->high_irq
.enabled
||
3325 (excm_level
!= xtensa
->core_config
->high_irq
.excm_level
))
3326 LOG_TARGET_WARNING(target
, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
3327 strcpy(*response_p
, "OK");
3329 } else if ((strncmp(packet
, "Qxtl2cs=", 8) == 0) ||
3330 (strncmp(packet
, "Qxtl2ca=", 8) == 0) ||
3331 (strncmp(packet
, "Qxtdensity=", 11) == 0)) {
3332 strcpy(*response_p
, "OK");
3334 } else if (strncmp(packet
, "Qxtspill=", 9) == 0) {
3336 uint32_t spill_loc
= strtoul(packet
+ 9, &delim
, 16);
3337 if (*delim
!= ':') {
3338 LOG_ERROR("Malformed Qxtspill packet");
3339 error
= XT_QERR_INVAL
;
3340 goto xtensa_gdb_query_custom_fail
;
3342 xtensa
->spill_loc
= spill_loc
;
3343 xtensa
->spill_bytes
= strtoul(delim
+ 1, NULL
, 16);
3344 if (xtensa
->spill_buf
)
3345 free(xtensa
->spill_buf
);
3346 xtensa
->spill_buf
= calloc(1, xtensa
->spill_bytes
);
3347 if (!xtensa
->spill_buf
) {
3348 LOG_ERROR("Spill buf alloc");
3349 error
= XT_QERR_MEM
;
3350 goto xtensa_gdb_query_custom_fail
;
3352 LOG_TARGET_DEBUG(target
, "Set spill 0x%08" PRIx32
" (%d)", xtensa
->spill_loc
, xtensa
->spill_bytes
);
3353 strcpy(*response_p
, "OK");
3355 } else if (strncasecmp(packet
, "qxtreg", 6) == 0) {
3356 return xtensa_gdbqc_qxtreg(target
, packet
, response_p
);
3357 } else if ((strncmp(packet
, "qTStatus", 8) == 0) ||
3358 (strncmp(packet
, "qxtftie", 7) == 0) ||
3359 (strncmp(packet
, "qxtstie", 7) == 0)) {
3360 /* Return empty string to indicate trace, TIE wire debug are unsupported */
3361 strcpy(*response_p
, "");
3365 /* Warn for all other queries, but do not return errors */
3366 LOG_TARGET_WARNING(target
, "Unknown target-specific query packet: %s", packet
);
3367 strcpy(*response_p
, "");
3370 xtensa_gdb_query_custom_fail
:
3371 strcpy(*response_p
, xt_qerr
[error
].chrval
);
3372 return xt_qerr
[error
].intval
;
3375 int xtensa_init_arch_info(struct target
*target
, struct xtensa
*xtensa
,
3376 const struct xtensa_debug_module_config
*dm_cfg
)
3378 target
->arch_info
= xtensa
;
3379 xtensa
->common_magic
= XTENSA_COMMON_MAGIC
;
3380 xtensa
->target
= target
;
3381 xtensa
->stepping_isr_mode
= XT_STEPPING_ISR_ON
;
3383 xtensa
->core_config
= calloc(1, sizeof(struct xtensa_config
));
3384 if (!xtensa
->core_config
) {
3385 LOG_ERROR("Xtensa configuration alloc failed\n");
3389 /* Default cache settings are disabled with 1 way */
3390 xtensa
->core_config
->icache
.way_count
= 1;
3391 xtensa
->core_config
->dcache
.way_count
= 1;
3393 /* chrval: AR3/AR4 register names will change with window mapping.
3394 * intval: tracks whether scratch register was set through gdb P packet.
3396 for (enum xtensa_ar_scratch_set_e s
= 0; s
< XT_AR_SCRATCH_NUM
; s
++) {
3397 xtensa
->scratch_ars
[s
].chrval
= calloc(8, sizeof(char));
3398 if (!xtensa
->scratch_ars
[s
].chrval
) {
3399 for (enum xtensa_ar_scratch_set_e f
= 0; f
< s
; f
++)
3400 free(xtensa
->scratch_ars
[f
].chrval
);
3401 free(xtensa
->core_config
);
3402 LOG_ERROR("Xtensa scratch AR alloc failed\n");
3405 xtensa
->scratch_ars
[s
].intval
= false;
3406 sprintf(xtensa
->scratch_ars
[s
].chrval
, "%s%d",
3407 ((s
== XT_AR_SCRATCH_A3
) || (s
== XT_AR_SCRATCH_A4
)) ? "a" : "ar",
3408 ((s
== XT_AR_SCRATCH_A3
) || (s
== XT_AR_SCRATCH_AR3
)) ? 3 : 4);
3411 return xtensa_dm_init(&xtensa
->dbg_mod
, dm_cfg
);
3414 void xtensa_set_permissive_mode(struct target
*target
, bool state
)
3416 target_to_xtensa(target
)->permissive_mode
= state
;
3419 int xtensa_target_init(struct command_context
*cmd_ctx
, struct target
*target
)
3421 struct xtensa
*xtensa
= target_to_xtensa(target
);
3423 xtensa
->come_online_probes_num
= 3;
3424 xtensa
->hw_brps
= calloc(XT_HW_IBREAK_MAX_NUM
, sizeof(struct breakpoint
*));
3425 if (!xtensa
->hw_brps
) {
3426 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
3429 xtensa
->hw_wps
= calloc(XT_HW_DBREAK_MAX_NUM
, sizeof(struct watchpoint
*));
3430 if (!xtensa
->hw_wps
) {
3431 free(xtensa
->hw_brps
);
3432 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
3435 xtensa
->sw_brps
= calloc(XT_SW_BREAKPOINTS_MAX_NUM
, sizeof(struct xtensa_sw_breakpoint
));
3436 if (!xtensa
->sw_brps
) {
3437 free(xtensa
->hw_brps
);
3438 free(xtensa
->hw_wps
);
3439 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
3443 xtensa
->spill_loc
= 0xffffffff;
3444 xtensa
->spill_bytes
= 0;
3445 xtensa
->spill_buf
= NULL
;
3446 xtensa
->probe_lsddr32p
= -1; /* Probe for fast load/store operations */
3448 return xtensa_build_reg_cache(target
);
3451 static void xtensa_free_reg_cache(struct target
*target
)
3453 struct xtensa
*xtensa
= target_to_xtensa(target
);
3454 struct reg_cache
*cache
= xtensa
->core_cache
;
3457 register_unlink_cache(&target
->reg_cache
, cache
);
3458 for (unsigned int i
= 0; i
< cache
->num_regs
; i
++) {
3459 free(xtensa
->algo_context_backup
[i
]);
3460 free(cache
->reg_list
[i
].value
);
3462 free(xtensa
->algo_context_backup
);
3463 free(cache
->reg_list
);
3466 xtensa
->core_cache
= NULL
;
3467 xtensa
->algo_context_backup
= NULL
;
3469 if (xtensa
->empty_regs
) {
3470 for (unsigned int i
= 0; i
< xtensa
->dbregs_num
; i
++) {
3471 free((void *)xtensa
->empty_regs
[i
].name
);
3472 free(xtensa
->empty_regs
[i
].value
);
3474 free(xtensa
->empty_regs
);
3476 xtensa
->empty_regs
= NULL
;
3477 if (xtensa
->optregs
) {
3478 for (unsigned int i
= 0; i
< xtensa
->num_optregs
; i
++)
3479 free((void *)xtensa
->optregs
[i
].name
);
3480 free(xtensa
->optregs
);
3482 xtensa
->optregs
= NULL
;
3485 void xtensa_target_deinit(struct target
*target
)
3487 struct xtensa
*xtensa
= target_to_xtensa(target
);
3491 if (target_was_examined(target
)) {
3492 int ret
= xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DCRCLR
, OCDDCR_ENABLEOCD
);
3493 if (ret
!= ERROR_OK
) {
3494 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
3497 xtensa_dm_queue_tdi_idle(&xtensa
->dbg_mod
);
3498 ret
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
3499 if (ret
!= ERROR_OK
) {
3500 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
3503 xtensa_dm_deinit(&xtensa
->dbg_mod
);
3505 xtensa_free_reg_cache(target
);
3506 free(xtensa
->hw_brps
);
3507 free(xtensa
->hw_wps
);
3508 free(xtensa
->sw_brps
);
3509 if (xtensa
->spill_buf
) {
3510 free(xtensa
->spill_buf
);
3511 xtensa
->spill_buf
= NULL
;
3513 for (enum xtensa_ar_scratch_set_e s
= 0; s
< XT_AR_SCRATCH_NUM
; s
++)
3514 free(xtensa
->scratch_ars
[s
].chrval
);
3515 free(xtensa
->core_config
);
3518 const char *xtensa_get_gdb_arch(const struct target
*target
)
3523 /* exe <ascii-encoded hexadecimal instruction bytes> */
3524 static COMMAND_HELPER(xtensa_cmd_exe_do
, struct target
*target
)
3526 struct xtensa
*xtensa
= target_to_xtensa(target
);
3529 return ERROR_COMMAND_SYNTAX_ERROR
;
3531 /* Process ascii-encoded hex byte string */
3532 const char *parm
= CMD_ARGV
[0];
3533 unsigned int parm_len
= strlen(parm
);
3534 if ((parm_len
>= 64) || (parm_len
& 1)) {
3535 command_print(CMD
, "Invalid parameter length (%d): must be even, < 64 characters", parm_len
);
3536 return ERROR_COMMAND_ARGUMENT_INVALID
;
3541 unsigned int oplen
= parm_len
/ 2;
3542 char encoded_byte
[3] = { 0, 0, 0 };
3543 for (unsigned int i
= 0; i
< oplen
; i
++) {
3544 encoded_byte
[0] = *parm
++;
3545 encoded_byte
[1] = *parm
++;
3546 ops
[i
] = strtoul(encoded_byte
, NULL
, 16);
3549 /* GDB must handle state save/restore.
3550 * Flush reg cache in case spill location is in an AR
3551 * Update CPENABLE only for this execution; later restore cached copy
3552 * Keep a copy of exccause in case executed code triggers an exception
3554 int status
= xtensa_write_dirty_registers(target
);
3555 if (status
!= ERROR_OK
) {
3556 command_print(CMD
, "%s: Failed to write back register cache.", target_name(target
));
3559 xtensa_reg_val_t exccause
= xtensa_reg_get(target
, XT_REG_IDX_EXCCAUSE
);
3560 xtensa_reg_val_t cpenable
= xtensa_reg_get(target
, XT_REG_IDX_CPENABLE
);
3561 xtensa_reg_val_t a3
= xtensa_reg_get(target
, XT_REG_IDX_A3
);
3562 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, 0xffffffff);
3563 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
3564 xtensa_queue_exec_ins(xtensa
, XT_INS_WSR(xtensa
,
3565 xtensa_regs
[XT_REG_IDX_CPENABLE
].reg_num
, XT_REG_A3
));
3566 xtensa_queue_dbg_reg_write(xtensa
, XDMREG_DDR
, a3
);
3567 xtensa_queue_exec_ins(xtensa
, XT_INS_RSR(xtensa
, XT_SR_DDR
, XT_REG_A3
));
3569 /* Queue instruction list and execute everything */
3570 LOG_TARGET_DEBUG(target
, "execute stub: %s", CMD_ARGV
[0]);
3571 xtensa_queue_exec_ins_wide(xtensa
, ops
, oplen
); /* Handles endian-swap */
3572 status
= xtensa_dm_queue_execute(&xtensa
->dbg_mod
);
3573 if (status
!= ERROR_OK
) {
3574 command_print(CMD
, "exec: queue error %d", status
);
3576 status
= xtensa_core_status_check(target
);
3577 if (status
!= ERROR_OK
)
3578 command_print(CMD
, "exec: status error %d", status
);
3581 /* Reread register cache and restore saved regs after instruction execution */
3582 if (xtensa_fetch_all_regs(target
) != ERROR_OK
)
3583 command_print(CMD
, "post-exec: register fetch error");
3584 if (status
!= ERROR_OK
) {
3585 command_print(CMD
, "post-exec: EXCCAUSE 0x%02" PRIx32
,
3586 xtensa_reg_get(target
, XT_REG_IDX_EXCCAUSE
));
3588 xtensa_reg_set(target
, XT_REG_IDX_EXCCAUSE
, exccause
);
3589 xtensa_reg_set(target
, XT_REG_IDX_CPENABLE
, cpenable
);
3593 COMMAND_HANDLER(xtensa_cmd_exe
)
3595 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do
, get_current_target(CMD_CTX
));
3599 COMMAND_HELPER(xtensa_cmd_xtdef_do
, struct xtensa
*xtensa
)
3602 return ERROR_COMMAND_SYNTAX_ERROR
;
3604 const char *core_name
= CMD_ARGV
[0];
3605 if (strcasecmp(core_name
, "LX") == 0) {
3606 xtensa
->core_config
->core_type
= XT_LX
;
3607 } else if (strcasecmp(core_name
, "NX") == 0) {
3608 xtensa
->core_config
->core_type
= XT_NX
;
3610 command_print(CMD
, "xtdef [LX|NX]\n");
3611 return ERROR_COMMAND_ARGUMENT_INVALID
;
3616 COMMAND_HANDLER(xtensa_cmd_xtdef
)
3618 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do
,
3619 target_to_xtensa(get_current_target(CMD_CTX
)));
3622 static inline bool xtensa_cmd_xtopt_legal_val(char *opt
, int val
, int min
, int max
)
3624 if ((val
< min
) || (val
> max
)) {
3625 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt
, val
, min
, max
);
3631 /* xtopt <name> <value> */
3632 COMMAND_HELPER(xtensa_cmd_xtopt_do
, struct xtensa
*xtensa
)
3635 return ERROR_COMMAND_SYNTAX_ERROR
;
3637 const char *opt_name
= CMD_ARGV
[0];
3638 int opt_val
= strtol(CMD_ARGV
[1], NULL
, 0);
3639 if (strcasecmp(opt_name
, "arnum") == 0) {
3640 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val
, 0, 64))
3641 return ERROR_COMMAND_ARGUMENT_INVALID
;
3642 xtensa
->core_config
->aregs_num
= opt_val
;
3643 } else if (strcasecmp(opt_name
, "windowed") == 0) {
3644 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val
, 0, 1))
3645 return ERROR_COMMAND_ARGUMENT_INVALID
;
3646 xtensa
->core_config
->windowed
= opt_val
;
3647 } else if (strcasecmp(opt_name
, "cpenable") == 0) {
3648 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val
, 0, 1))
3649 return ERROR_COMMAND_ARGUMENT_INVALID
;
3650 xtensa
->core_config
->coproc
= opt_val
;
3651 } else if (strcasecmp(opt_name
, "exceptions") == 0) {
3652 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val
, 0, 1))
3653 return ERROR_COMMAND_ARGUMENT_INVALID
;
3654 xtensa
->core_config
->exceptions
= opt_val
;
3655 } else if (strcasecmp(opt_name
, "intnum") == 0) {
3656 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val
, 0, 32))
3657 return ERROR_COMMAND_ARGUMENT_INVALID
;
3658 xtensa
->core_config
->irq
.enabled
= (opt_val
> 0);
3659 xtensa
->core_config
->irq
.irq_num
= opt_val
;
3660 } else if (strcasecmp(opt_name
, "hipriints") == 0) {
3661 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val
, 0, 1))
3662 return ERROR_COMMAND_ARGUMENT_INVALID
;
3663 xtensa
->core_config
->high_irq
.enabled
= opt_val
;
3664 } else if (strcasecmp(opt_name
, "excmlevel") == 0) {
3665 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val
, 1, 6))
3666 return ERROR_COMMAND_ARGUMENT_INVALID
;
3667 if (!xtensa
->core_config
->high_irq
.enabled
) {
3668 command_print(CMD
, "xtopt excmlevel requires hipriints\n");
3669 return ERROR_COMMAND_ARGUMENT_INVALID
;
3671 xtensa
->core_config
->high_irq
.excm_level
= opt_val
;
3672 } else if (strcasecmp(opt_name
, "intlevels") == 0) {
3673 if (xtensa
->core_config
->core_type
== XT_LX
) {
3674 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val
, 2, 6))
3675 return ERROR_COMMAND_ARGUMENT_INVALID
;
3677 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val
, 1, 255))
3678 return ERROR_COMMAND_ARGUMENT_INVALID
;
3680 if (!xtensa
->core_config
->high_irq
.enabled
) {
3681 command_print(CMD
, "xtopt intlevels requires hipriints\n");
3682 return ERROR_COMMAND_ARGUMENT_INVALID
;
3684 xtensa
->core_config
->high_irq
.level_num
= opt_val
;
3685 } else if (strcasecmp(opt_name
, "debuglevel") == 0) {
3686 if (xtensa
->core_config
->core_type
== XT_LX
) {
3687 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val
, 2, 6))
3688 return ERROR_COMMAND_ARGUMENT_INVALID
;
3690 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val
, 0, 0))
3691 return ERROR_COMMAND_ARGUMENT_INVALID
;
3693 xtensa
->core_config
->debug
.enabled
= 1;
3694 xtensa
->core_config
->debug
.irq_level
= opt_val
;
3695 } else if (strcasecmp(opt_name
, "ibreaknum") == 0) {
3696 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val
, 0, 2))
3697 return ERROR_COMMAND_ARGUMENT_INVALID
;
3698 xtensa
->core_config
->debug
.ibreaks_num
= opt_val
;
3699 } else if (strcasecmp(opt_name
, "dbreaknum") == 0) {
3700 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val
, 0, 2))
3701 return ERROR_COMMAND_ARGUMENT_INVALID
;
3702 xtensa
->core_config
->debug
.dbreaks_num
= opt_val
;
3703 } else if (strcasecmp(opt_name
, "tracemem") == 0) {
3704 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val
, 0, 256 * 1024))
3705 return ERROR_COMMAND_ARGUMENT_INVALID
;
3706 xtensa
->core_config
->trace
.mem_sz
= opt_val
;
3707 xtensa
->core_config
->trace
.enabled
= (opt_val
> 0);
3708 } else if (strcasecmp(opt_name
, "tracememrev") == 0) {
3709 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val
, 0, 1))
3710 return ERROR_COMMAND_ARGUMENT_INVALID
;
3711 xtensa
->core_config
->trace
.reversed_mem_access
= opt_val
;
3712 } else if (strcasecmp(opt_name
, "perfcount") == 0) {
3713 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val
, 0, 8))
3714 return ERROR_COMMAND_ARGUMENT_INVALID
;
3715 xtensa
->core_config
->debug
.perfcount_num
= opt_val
;
3717 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV
[0], CMD_ARGV
[1]);
3724 COMMAND_HANDLER(xtensa_cmd_xtopt
)
3726 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do
,
3727 target_to_xtensa(get_current_target(CMD_CTX
)));
3730 /* xtmem <type> [parameters] */
3731 COMMAND_HELPER(xtensa_cmd_xtmem_do
, struct xtensa
*xtensa
)
3733 struct xtensa_cache_config
*cachep
= NULL
;
3734 struct xtensa_local_mem_config
*memp
= NULL
;
3736 bool is_dcache
= false;
3739 return ERROR_COMMAND_SYNTAX_ERROR
;
3741 const char *mem_name
= CMD_ARGV
[0];
3742 if (strcasecmp(mem_name
, "icache") == 0) {
3743 cachep
= &xtensa
->core_config
->icache
;
3744 } else if (strcasecmp(mem_name
, "dcache") == 0) {
3745 cachep
= &xtensa
->core_config
->dcache
;
3747 } else if (strcasecmp(mem_name
, "l2cache") == 0) {
3748 /* TODO: support L2 cache */
3749 } else if (strcasecmp(mem_name
, "l2addr") == 0) {
3750 /* TODO: support L2 cache */
3751 } else if (strcasecmp(mem_name
, "iram") == 0) {
3752 memp
= &xtensa
->core_config
->iram
;
3753 mem_access
= XT_MEM_ACCESS_READ
| XT_MEM_ACCESS_WRITE
;
3754 } else if (strcasecmp(mem_name
, "dram") == 0) {
3755 memp
= &xtensa
->core_config
->dram
;
3756 mem_access
= XT_MEM_ACCESS_READ
| XT_MEM_ACCESS_WRITE
;
3757 } else if (strcasecmp(mem_name
, "sram") == 0) {
3758 memp
= &xtensa
->core_config
->sram
;
3759 mem_access
= XT_MEM_ACCESS_READ
| XT_MEM_ACCESS_WRITE
;
3760 } else if (strcasecmp(mem_name
, "irom") == 0) {
3761 memp
= &xtensa
->core_config
->irom
;
3762 mem_access
= XT_MEM_ACCESS_READ
;
3763 } else if (strcasecmp(mem_name
, "drom") == 0) {
3764 memp
= &xtensa
->core_config
->drom
;
3765 mem_access
= XT_MEM_ACCESS_READ
;
3766 } else if (strcasecmp(mem_name
, "srom") == 0) {
3767 memp
= &xtensa
->core_config
->srom
;
3768 mem_access
= XT_MEM_ACCESS_READ
;
3770 command_print(CMD
, "xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3771 return ERROR_COMMAND_ARGUMENT_INVALID
;
3775 if (CMD_ARGC
!= 4 && CMD_ARGC
!= 5)
3776 return ERROR_COMMAND_SYNTAX_ERROR
;
3777 cachep
->line_size
= strtoul(CMD_ARGV
[1], NULL
, 0);
3778 cachep
->size
= strtoul(CMD_ARGV
[2], NULL
, 0);
3779 cachep
->way_count
= strtoul(CMD_ARGV
[3], NULL
, 0);
3780 cachep
->writeback
= ((CMD_ARGC
== 5) && is_dcache
) ?
3781 strtoul(CMD_ARGV
[4], NULL
, 0) : 0;
3784 return ERROR_COMMAND_SYNTAX_ERROR
;
3785 struct xtensa_local_mem_region_config
*memcfgp
= &memp
->regions
[memp
->count
];
3786 memcfgp
->base
= strtoul(CMD_ARGV
[1], NULL
, 0);
3787 memcfgp
->size
= strtoul(CMD_ARGV
[2], NULL
, 0);
3788 memcfgp
->access
= mem_access
;
3795 COMMAND_HANDLER(xtensa_cmd_xtmem
)
3797 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do
,
3798 target_to_xtensa(get_current_target(CMD_CTX
)));
3801 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3802 COMMAND_HELPER(xtensa_cmd_xtmpu_do
, struct xtensa
*xtensa
)
3805 return ERROR_COMMAND_SYNTAX_ERROR
;
3807 unsigned int nfgseg
= strtoul(CMD_ARGV
[0], NULL
, 0);
3808 unsigned int minsegsize
= strtoul(CMD_ARGV
[1], NULL
, 0);
3809 unsigned int lockable
= strtoul(CMD_ARGV
[2], NULL
, 0);
3810 unsigned int execonly
= strtoul(CMD_ARGV
[3], NULL
, 0);
3812 if ((nfgseg
> 32)) {
3813 command_print(CMD
, "<nfgseg> must be within [0..32]\n");
3814 return ERROR_COMMAND_ARGUMENT_INVALID
;
3815 } else if (minsegsize
& (minsegsize
- 1)) {
3816 command_print(CMD
, "<minsegsize> must be a power of 2 >= 32\n");
3817 return ERROR_COMMAND_ARGUMENT_INVALID
;
3818 } else if (lockable
> 1) {
3819 command_print(CMD
, "<lockable> must be 0 or 1\n");
3820 return ERROR_COMMAND_ARGUMENT_INVALID
;
3821 } else if (execonly
> 1) {
3822 command_print(CMD
, "<execonly> must be 0 or 1\n");
3823 return ERROR_COMMAND_ARGUMENT_INVALID
;
3826 xtensa
->core_config
->mpu
.enabled
= true;
3827 xtensa
->core_config
->mpu
.nfgseg
= nfgseg
;
3828 xtensa
->core_config
->mpu
.minsegsize
= minsegsize
;
3829 xtensa
->core_config
->mpu
.lockable
= lockable
;
3830 xtensa
->core_config
->mpu
.execonly
= execonly
;
3834 COMMAND_HANDLER(xtensa_cmd_xtmpu
)
3836 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do
,
3837 target_to_xtensa(get_current_target(CMD_CTX
)));
3840 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3841 COMMAND_HELPER(xtensa_cmd_xtmmu_do
, struct xtensa
*xtensa
)
3844 return ERROR_COMMAND_SYNTAX_ERROR
;
3846 unsigned int nirefillentries
= strtoul(CMD_ARGV
[0], NULL
, 0);
3847 unsigned int ndrefillentries
= strtoul(CMD_ARGV
[1], NULL
, 0);
3848 if ((nirefillentries
!= 16) && (nirefillentries
!= 32)) {
3849 command_print(CMD
, "<nirefillentries> must be 16 or 32\n");
3850 return ERROR_COMMAND_ARGUMENT_INVALID
;
3851 } else if ((ndrefillentries
!= 16) && (ndrefillentries
!= 32)) {
3852 command_print(CMD
, "<ndrefillentries> must be 16 or 32\n");
3853 return ERROR_COMMAND_ARGUMENT_INVALID
;
3856 xtensa
->core_config
->mmu
.enabled
= true;
3857 xtensa
->core_config
->mmu
.itlb_entries_count
= nirefillentries
;
3858 xtensa
->core_config
->mmu
.dtlb_entries_count
= ndrefillentries
;
3862 COMMAND_HANDLER(xtensa_cmd_xtmmu
)
3864 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do
,
3865 target_to_xtensa(get_current_target(CMD_CTX
)));
3869 * xtreg <regname> <regnum> */
3870 COMMAND_HELPER(xtensa_cmd_xtreg_do
, struct xtensa
*xtensa
)
3872 if (CMD_ARGC
== 1) {
3873 int32_t numregs
= strtoul(CMD_ARGV
[0], NULL
, 0);
3874 if ((numregs
<= 0) || (numregs
> UINT16_MAX
)) {
3875 command_print(CMD
, "xtreg <numregs>: Invalid 'numregs' (%d)", numregs
);
3876 return ERROR_COMMAND_ARGUMENT_INVALID
;
3878 if ((xtensa
->genpkt_regs_num
> 0) && (numregs
< (int32_t)xtensa
->genpkt_regs_num
)) {
3879 command_print(CMD
, "xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3880 numregs
, xtensa
->genpkt_regs_num
);
3881 return ERROR_COMMAND_ARGUMENT_INVALID
;
3883 xtensa
->total_regs_num
= numregs
;
3884 xtensa
->core_regs_num
= 0;
3885 xtensa
->num_optregs
= 0;
3886 /* A little more memory than required, but saves a second initialization pass */
3887 xtensa
->optregs
= calloc(xtensa
->total_regs_num
, sizeof(struct xtensa_reg_desc
));
3888 if (!xtensa
->optregs
) {
3889 LOG_ERROR("Failed to allocate xtensa->optregs!");
3893 } else if (CMD_ARGC
!= 2) {
3894 return ERROR_COMMAND_SYNTAX_ERROR
;
3897 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3898 * if general register (g-packet) requests or contiguous register maps are supported */
3899 if (xtensa
->regmap_contiguous
&& !xtensa
->contiguous_regs_desc
) {
3900 xtensa
->contiguous_regs_desc
= calloc(xtensa
->total_regs_num
, sizeof(struct xtensa_reg_desc
*));
3901 if (!xtensa
->contiguous_regs_desc
) {
3902 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3907 const char *regname
= CMD_ARGV
[0];
3908 unsigned int regnum
= strtoul(CMD_ARGV
[1], NULL
, 0);
3909 if (regnum
> UINT16_MAX
) {
3910 command_print(CMD
, "<regnum> must be a 16-bit number");
3911 return ERROR_COMMAND_ARGUMENT_INVALID
;
3914 if ((xtensa
->num_optregs
+ xtensa
->core_regs_num
) >= xtensa
->total_regs_num
) {
3915 if (xtensa
->total_regs_num
)
3916 command_print(CMD
, "'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3918 xtensa
->total_regs_num
, xtensa
->core_regs_num
, xtensa
->num_optregs
);
3920 command_print(CMD
, "'xtreg %s 0x%04x': Number of registers unspecified",
3925 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3926 struct xtensa_reg_desc
*rptr
= &xtensa
->optregs
[xtensa
->num_optregs
];
3927 bool is_extended_reg
= true;
3929 for (ridx
= 0; ridx
< XT_NUM_REGS
; ridx
++) {
3930 if (strcmp(CMD_ARGV
[0], xtensa_regs
[ridx
].name
) == 0) {
3931 /* Flag core register as defined */
3932 rptr
= &xtensa_regs
[ridx
];
3933 xtensa
->core_regs_num
++;
3934 is_extended_reg
= false;
3940 if (is_extended_reg
) {
3941 /* Register ID, debugger-visible register ID */
3942 rptr
->name
= strdup(CMD_ARGV
[0]);
3943 rptr
->dbreg_num
= regnum
;
3944 rptr
->reg_num
= (regnum
& XT_REG_INDEX_MASK
);
3945 xtensa
->num_optregs
++;
3948 if ((regnum
& XT_REG_GENERAL_MASK
) == XT_REG_GENERAL_VAL
) {
3949 rptr
->type
= XT_REG_GENERAL
;
3950 } else if ((regnum
& XT_REG_USER_MASK
) == XT_REG_USER_VAL
) {
3951 rptr
->type
= XT_REG_USER
;
3952 } else if ((regnum
& XT_REG_FR_MASK
) == XT_REG_FR_VAL
) {
3953 rptr
->type
= XT_REG_FR
;
3954 } else if ((regnum
& XT_REG_SPECIAL_MASK
) == XT_REG_SPECIAL_VAL
) {
3955 rptr
->type
= XT_REG_SPECIAL
;
3956 } else if ((regnum
& XT_REG_RELGEN_MASK
) == XT_REG_RELGEN_VAL
) {
3957 /* WARNING: For these registers, regnum points to the
3958 * index of the corresponding ARx registers, NOT to
3959 * the processor register number! */
3960 rptr
->type
= XT_REG_RELGEN
;
3961 rptr
->reg_num
+= XT_REG_IDX_ARFIRST
;
3962 rptr
->dbreg_num
+= XT_REG_IDX_ARFIRST
;
3963 } else if ((regnum
& XT_REG_TIE_MASK
) != 0) {
3964 rptr
->type
= XT_REG_TIE
;
3966 rptr
->type
= XT_REG_OTHER
;
3969 /* Register flags */
3970 if ((strcmp(rptr
->name
, "mmid") == 0) || (strcmp(rptr
->name
, "eraccess") == 0) ||
3971 (strcmp(rptr
->name
, "ddr") == 0) || (strcmp(rptr
->name
, "intset") == 0) ||
3972 (strcmp(rptr
->name
, "intclear") == 0))
3973 rptr
->flags
= XT_REGF_NOREAD
;
3977 if (rptr
->reg_num
== (XT_EPS_REG_NUM_BASE
+ xtensa
->core_config
->debug
.irq_level
) &&
3978 xtensa
->core_config
->core_type
== XT_LX
&& rptr
->type
== XT_REG_SPECIAL
) {
3979 xtensa
->eps_dbglevel_idx
= XT_NUM_REGS
+ xtensa
->num_optregs
- 1;
3980 LOG_DEBUG("Setting PS (%s) index to %d", rptr
->name
, xtensa
->eps_dbglevel_idx
);
3982 if (xtensa
->core_config
->core_type
== XT_NX
) {
3983 enum xtensa_nx_reg_idx idx
= XT_NX_REG_IDX_NUM
;
3984 if (strcmp(rptr
->name
, "ibreakc0") == 0)
3985 idx
= XT_NX_REG_IDX_IBREAKC0
;
3986 else if (strcmp(rptr
->name
, "wb") == 0)
3987 idx
= XT_NX_REG_IDX_WB
;
3988 else if (strcmp(rptr
->name
, "ms") == 0)
3989 idx
= XT_NX_REG_IDX_MS
;
3990 else if (strcmp(rptr
->name
, "ievec") == 0)
3991 idx
= XT_NX_REG_IDX_IEVEC
;
3992 else if (strcmp(rptr
->name
, "ieextern") == 0)
3993 idx
= XT_NX_REG_IDX_IEEXTERN
;
3994 else if (strcmp(rptr
->name
, "mesr") == 0)
3995 idx
= XT_NX_REG_IDX_MESR
;
3996 else if (strcmp(rptr
->name
, "mesrclr") == 0)
3997 idx
= XT_NX_REG_IDX_MESRCLR
;
3998 if (idx
< XT_NX_REG_IDX_NUM
) {
3999 if (xtensa
->nx_reg_idx
[idx
] != 0) {
4000 command_print(CMD
, "nx_reg_idx[%d] previously set to %d",
4001 idx
, xtensa
->nx_reg_idx
[idx
]);
4004 xtensa
->nx_reg_idx
[idx
] = XT_NUM_REGS
+ xtensa
->num_optregs
- 1;
4005 LOG_DEBUG("NX reg %s: index %d (%d)",
4006 rptr
->name
, xtensa
->nx_reg_idx
[idx
], idx
);
4009 } else if (strcmp(rptr
->name
, "cpenable") == 0) {
4010 xtensa
->core_config
->coproc
= true;
4013 /* Build out list of contiguous registers in specified order */
4014 unsigned int running_reg_count
= xtensa
->num_optregs
+ xtensa
->core_regs_num
;
4015 if (xtensa
->contiguous_regs_desc
) {
4016 assert((running_reg_count
<= xtensa
->total_regs_num
) && "contiguous register address internal error!");
4017 xtensa
->contiguous_regs_desc
[running_reg_count
- 1] = rptr
;
4019 if (xtensa_extra_debug_log
)
4020 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
4021 is_extended_reg
? "config-specific" : "core",
4022 rptr
->name
, rptr
->dbreg_num
, rptr
->reg_num
, rptr
->type
,
4023 is_extended_reg
? xtensa
->num_optregs
: ridx
,
4024 is_extended_reg
? xtensa
->total_regs_num
: XT_NUM_REGS
);
4028 COMMAND_HANDLER(xtensa_cmd_xtreg
)
4030 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do
,
4031 target_to_xtensa(get_current_target(CMD_CTX
)));
4034 /* xtregfmt <contiguous|sparse> [numgregs] */
4035 COMMAND_HELPER(xtensa_cmd_xtregfmt_do
, struct xtensa
*xtensa
)
4037 if ((CMD_ARGC
== 1) || (CMD_ARGC
== 2)) {
4038 if (!strcasecmp(CMD_ARGV
[0], "sparse")) {
4040 } else if (!strcasecmp(CMD_ARGV
[0], "contiguous")) {
4041 xtensa
->regmap_contiguous
= true;
4042 if (CMD_ARGC
== 2) {
4043 unsigned int numgregs
= strtoul(CMD_ARGV
[1], NULL
, 0);
4044 if ((numgregs
<= 0) ||
4045 ((numgregs
> xtensa
->total_regs_num
) &&
4046 (xtensa
->total_regs_num
> 0))) {
4047 command_print(CMD
, "xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
4048 numgregs
, xtensa
->total_regs_num
);
4049 return ERROR_COMMAND_ARGUMENT_INVALID
;
4051 xtensa
->genpkt_regs_num
= numgregs
;
4056 return ERROR_COMMAND_SYNTAX_ERROR
;
4059 COMMAND_HANDLER(xtensa_cmd_xtregfmt
)
4061 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do
,
4062 target_to_xtensa(get_current_target(CMD_CTX
)));
4065 COMMAND_HELPER(xtensa_cmd_permissive_mode_do
, struct xtensa
*xtensa
)
4067 return CALL_COMMAND_HANDLER(handle_command_parse_bool
,
4068 &xtensa
->permissive_mode
, "xtensa permissive mode");
4071 COMMAND_HANDLER(xtensa_cmd_permissive_mode
)
4073 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do
,
4074 target_to_xtensa(get_current_target(CMD_CTX
)));
4077 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
4078 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do
, struct xtensa
*xtensa
)
4080 struct xtensa_perfmon_config config
= {
4083 .tracelevel
= -1 /* use DEBUGLEVEL by default */
4086 if (CMD_ARGC
< 2 || CMD_ARGC
> 6)
4087 return ERROR_COMMAND_SYNTAX_ERROR
;
4089 unsigned int counter_id
= strtoul(CMD_ARGV
[0], NULL
, 0);
4090 if (counter_id
>= XTENSA_MAX_PERF_COUNTERS
) {
4091 command_print(CMD
, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS
);
4092 return ERROR_COMMAND_ARGUMENT_INVALID
;
4095 config
.select
= strtoul(CMD_ARGV
[1], NULL
, 0);
4096 if (config
.select
> XTENSA_MAX_PERF_SELECT
) {
4097 command_print(CMD
, "select should be < %d", XTENSA_MAX_PERF_SELECT
);
4098 return ERROR_COMMAND_ARGUMENT_INVALID
;
4101 if (CMD_ARGC
>= 3) {
4102 config
.mask
= strtoul(CMD_ARGV
[2], NULL
, 0);
4103 if (config
.mask
> XTENSA_MAX_PERF_MASK
) {
4104 command_print(CMD
, "mask should be < %d", XTENSA_MAX_PERF_MASK
);
4105 return ERROR_COMMAND_ARGUMENT_INVALID
;
4109 if (CMD_ARGC
>= 4) {
4110 config
.kernelcnt
= strtoul(CMD_ARGV
[3], NULL
, 0);
4111 if (config
.kernelcnt
> 1) {
4112 command_print(CMD
, "kernelcnt should be 0 or 1");
4113 return ERROR_COMMAND_ARGUMENT_INVALID
;
4117 if (CMD_ARGC
>= 5) {
4118 config
.tracelevel
= strtoul(CMD_ARGV
[4], NULL
, 0);
4119 if (config
.tracelevel
> 7) {
4120 command_print(CMD
, "tracelevel should be <=7");
4121 return ERROR_COMMAND_ARGUMENT_INVALID
;
4125 if (config
.tracelevel
== -1)
4126 config
.tracelevel
= xtensa
->core_config
->debug
.irq_level
;
4128 return xtensa_dm_perfmon_enable(&xtensa
->dbg_mod
, counter_id
, &config
);
4131 COMMAND_HANDLER(xtensa_cmd_perfmon_enable
)
4133 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do
,
4134 target_to_xtensa(get_current_target(CMD_CTX
)));
4137 /* perfmon_dump [counter_id] */
4138 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do
, struct xtensa
*xtensa
)
4141 return ERROR_COMMAND_SYNTAX_ERROR
;
4143 int counter_id
= -1;
4144 if (CMD_ARGC
== 1) {
4145 counter_id
= strtol(CMD_ARGV
[0], NULL
, 0);
4146 if (counter_id
> XTENSA_MAX_PERF_COUNTERS
) {
4147 command_print(CMD
, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS
);
4148 return ERROR_COMMAND_ARGUMENT_INVALID
;
4152 unsigned int counter_start
= (counter_id
< 0) ? 0 : counter_id
;
4153 unsigned int counter_end
= (counter_id
< 0) ? XTENSA_MAX_PERF_COUNTERS
: counter_id
+ 1;
4154 for (unsigned int counter
= counter_start
; counter
< counter_end
; ++counter
) {
4155 char result_buf
[128] = { 0 };
4156 size_t result_pos
= snprintf(result_buf
, sizeof(result_buf
), "Counter %d: ", counter
);
4157 struct xtensa_perfmon_result result
;
4158 int res
= xtensa_dm_perfmon_dump(&xtensa
->dbg_mod
, counter
, &result
);
4159 if (res
!= ERROR_OK
)
4161 snprintf(result_buf
+ result_pos
, sizeof(result_buf
) - result_pos
,
4164 result
.overflow
? " (overflow)" : "");
4165 command_print(CMD
, "%s", result_buf
);
4171 COMMAND_HANDLER(xtensa_cmd_perfmon_dump
)
4173 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do
,
4174 target_to_xtensa(get_current_target(CMD_CTX
)));
4177 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do
, struct xtensa
*xtensa
)
4183 state
= xtensa
->stepping_isr_mode
;
4184 if (state
== XT_STEPPING_ISR_ON
)
4186 else if (state
== XT_STEPPING_ISR_OFF
)
4190 command_print(CMD
, "Current ISR step mode: %s", st
);
4194 if (xtensa
->core_config
->core_type
== XT_NX
) {
4195 command_print(CMD
, "ERROR: ISR step mode only supported on Xtensa LX");
4199 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
4200 if (!strcasecmp(CMD_ARGV
[0], "off"))
4201 state
= XT_STEPPING_ISR_ON
;
4202 else if (!strcasecmp(CMD_ARGV
[0], "on"))
4203 state
= XT_STEPPING_ISR_OFF
;
4206 command_print(CMD
, "Argument unknown. Please pick one of ON, OFF");
4209 xtensa
->stepping_isr_mode
= state
;
4213 COMMAND_HANDLER(xtensa_cmd_mask_interrupts
)
4215 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do
,
4216 target_to_xtensa(get_current_target(CMD_CTX
)));
4219 COMMAND_HELPER(xtensa_cmd_smpbreak_do
, struct target
*target
)
4224 if (CMD_ARGC
>= 1) {
4225 for (unsigned int i
= 0; i
< CMD_ARGC
; i
++) {
4226 if (!strcasecmp(CMD_ARGV
[0], "none")) {
4228 } else if (!strcasecmp(CMD_ARGV
[i
], "BreakIn")) {
4229 val
|= OCDDCR_BREAKINEN
;
4230 } else if (!strcasecmp(CMD_ARGV
[i
], "BreakOut")) {
4231 val
|= OCDDCR_BREAKOUTEN
;
4232 } else if (!strcasecmp(CMD_ARGV
[i
], "RunStallIn")) {
4233 val
|= OCDDCR_RUNSTALLINEN
;
4234 } else if (!strcasecmp(CMD_ARGV
[i
], "DebugModeOut")) {
4235 val
|= OCDDCR_DEBUGMODEOUTEN
;
4236 } else if (!strcasecmp(CMD_ARGV
[i
], "BreakInOut")) {
4237 val
|= OCDDCR_BREAKINEN
| OCDDCR_BREAKOUTEN
;
4238 } else if (!strcasecmp(CMD_ARGV
[i
], "RunStall")) {
4239 val
|= OCDDCR_RUNSTALLINEN
| OCDDCR_DEBUGMODEOUTEN
;
4241 command_print(CMD
, "Unknown arg %s", CMD_ARGV
[i
]);
4244 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
4248 res
= xtensa_smpbreak_set(target
, val
);
4249 if (res
!= ERROR_OK
)
4250 command_print(CMD
, "Failed to set smpbreak config %d", res
);
4252 struct xtensa
*xtensa
= target_to_xtensa(target
);
4253 res
= xtensa_smpbreak_read(xtensa
, &val
);
4254 if (res
== ERROR_OK
)
4255 command_print(CMD
, "Current bits set:%s%s%s%s",
4256 (val
& OCDDCR_BREAKINEN
) ? " BreakIn" : "",
4257 (val
& OCDDCR_BREAKOUTEN
) ? " BreakOut" : "",
4258 (val
& OCDDCR_RUNSTALLINEN
) ? " RunStallIn" : "",
4259 (val
& OCDDCR_DEBUGMODEOUTEN
) ? " DebugModeOut" : ""
4262 command_print(CMD
, "Failed to get smpbreak config %d", res
);
4267 COMMAND_HANDLER(xtensa_cmd_smpbreak
)
4269 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do
,
4270 get_current_target(CMD_CTX
));
4273 COMMAND_HELPER(xtensa_cmd_dm_rw_do
, struct xtensa
*xtensa
)
4275 if (CMD_ARGC
== 1) {
4276 // read: xtensa dm addr
4277 uint32_t addr
= strtoul(CMD_ARGV
[0], NULL
, 0);
4279 int res
= xtensa_dm_read(&xtensa
->dbg_mod
, addr
, &val
);
4280 if (res
== ERROR_OK
)
4281 command_print(CMD
, "xtensa DM(0x%08" PRIx32
") -> 0x%08" PRIx32
, addr
, val
);
4283 command_print(CMD
, "xtensa DM(0x%08" PRIx32
") : read ERROR %" PRId32
, addr
, res
);
4285 } else if (CMD_ARGC
== 2) {
4286 // write: xtensa dm addr value
4287 uint32_t addr
= strtoul(CMD_ARGV
[0], NULL
, 0);
4288 uint32_t val
= strtoul(CMD_ARGV
[1], NULL
, 0);
4289 int res
= xtensa_dm_write(&xtensa
->dbg_mod
, addr
, val
);
4290 if (res
== ERROR_OK
)
4291 command_print(CMD
, "xtensa DM(0x%08" PRIx32
") <- 0x%08" PRIx32
, addr
, val
);
4293 command_print(CMD
, "xtensa DM(0x%08" PRIx32
") : write ERROR %" PRId32
, addr
, res
);
4296 return ERROR_COMMAND_SYNTAX_ERROR
;
4299 COMMAND_HANDLER(xtensa_cmd_dm_rw
)
4301 return CALL_COMMAND_HANDLER(xtensa_cmd_dm_rw_do
,
4302 target_to_xtensa(get_current_target(CMD_CTX
)));
4305 COMMAND_HELPER(xtensa_cmd_tracestart_do
, struct xtensa
*xtensa
)
4307 struct xtensa_trace_status trace_status
;
4308 struct xtensa_trace_start_config cfg
= {
4310 .stopmask
= XTENSA_STOPMASK_DISABLED
,
4312 .after_is_words
= false
4315 /* Parse arguments */
4316 for (unsigned int i
= 0; i
< CMD_ARGC
; i
++) {
4317 if ((!strcasecmp(CMD_ARGV
[i
], "pc")) && CMD_ARGC
> i
) {
4320 cfg
.stoppc
= strtol(CMD_ARGV
[i
], &e
, 0);
4323 cfg
.stopmask
= strtol(e
, NULL
, 0);
4324 } else if ((!strcasecmp(CMD_ARGV
[i
], "after")) && CMD_ARGC
> i
) {
4326 cfg
.after
= strtol(CMD_ARGV
[i
], NULL
, 0);
4327 } else if (!strcasecmp(CMD_ARGV
[i
], "ins")) {
4328 cfg
.after_is_words
= 0;
4329 } else if (!strcasecmp(CMD_ARGV
[i
], "words")) {
4330 cfg
.after_is_words
= 1;
4332 command_print(CMD
, "Did not understand %s", CMD_ARGV
[i
]);
4337 int res
= xtensa_dm_trace_status_read(&xtensa
->dbg_mod
, &trace_status
);
4338 if (res
!= ERROR_OK
)
4340 if (trace_status
.stat
& TRAXSTAT_TRACT
) {
4341 LOG_WARNING("Silently stop active tracing!");
4342 res
= xtensa_dm_trace_stop(&xtensa
->dbg_mod
, false);
4343 if (res
!= ERROR_OK
)
4347 res
= xtensa_dm_trace_start(&xtensa
->dbg_mod
, &cfg
);
4348 if (res
!= ERROR_OK
)
4351 xtensa
->trace_active
= true;
4352 command_print(CMD
, "Trace started.");
4356 COMMAND_HANDLER(xtensa_cmd_tracestart
)
4358 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do
,
4359 target_to_xtensa(get_current_target(CMD_CTX
)));
4362 COMMAND_HELPER(xtensa_cmd_tracestop_do
, struct xtensa
*xtensa
)
4364 struct xtensa_trace_status trace_status
;
4366 int res
= xtensa_dm_trace_status_read(&xtensa
->dbg_mod
, &trace_status
);
4367 if (res
!= ERROR_OK
)
4370 if (!(trace_status
.stat
& TRAXSTAT_TRACT
)) {
4371 command_print(CMD
, "No trace is currently active.");
4375 res
= xtensa_dm_trace_stop(&xtensa
->dbg_mod
, true);
4376 if (res
!= ERROR_OK
)
4379 xtensa
->trace_active
= false;
4380 command_print(CMD
, "Trace stop triggered.");
4384 COMMAND_HANDLER(xtensa_cmd_tracestop
)
4386 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do
,
4387 target_to_xtensa(get_current_target(CMD_CTX
)));
4390 COMMAND_HELPER(xtensa_cmd_tracedump_do
, struct xtensa
*xtensa
, const char *fname
)
4392 struct xtensa_trace_config trace_config
;
4393 struct xtensa_trace_status trace_status
;
4394 uint32_t memsz
, wmem
;
4396 int res
= xtensa_dm_trace_status_read(&xtensa
->dbg_mod
, &trace_status
);
4397 if (res
!= ERROR_OK
)
4400 if (trace_status
.stat
& TRAXSTAT_TRACT
) {
4401 command_print(CMD
, "Tracing is still active. Please stop it first.");
4405 res
= xtensa_dm_trace_config_read(&xtensa
->dbg_mod
, &trace_config
);
4406 if (res
!= ERROR_OK
)
4409 if (!(trace_config
.ctrl
& TRAXCTRL_TREN
)) {
4410 command_print(CMD
, "No active trace found; nothing to dump.");
4414 memsz
= trace_config
.memaddr_end
- trace_config
.memaddr_start
+ 1;
4415 command_print(CMD
, "Total trace memory: %d words", memsz
);
4416 if ((trace_config
.addr
&
4417 ((TRAXADDR_TWRAP_MASK
<< TRAXADDR_TWRAP_SHIFT
) | TRAXADDR_TWSAT
)) == 0) {
4418 /*Memory hasn't overwritten itself yet. */
4419 wmem
= trace_config
.addr
& TRAXADDR_TADDR_MASK
;
4420 command_print(CMD
, "...but trace is only %d words", wmem
);
4424 if (trace_config
.addr
& TRAXADDR_TWSAT
) {
4425 command_print(CMD
, "Real trace is many times longer than that (overflow)");
4427 uint32_t trc_sz
= (trace_config
.addr
>> TRAXADDR_TWRAP_SHIFT
) & TRAXADDR_TWRAP_MASK
;
4428 trc_sz
= (trc_sz
* memsz
) + (trace_config
.addr
& TRAXADDR_TADDR_MASK
);
4429 command_print(CMD
, "Real trace is %d words, but the start has been truncated.", trc_sz
);
4433 uint8_t *tracemem
= malloc(memsz
* 4);
4435 command_print(CMD
, "Failed to alloc memory for trace data!");
4438 res
= xtensa_dm_trace_data_read(&xtensa
->dbg_mod
, tracemem
, memsz
* 4);
4439 if (res
!= ERROR_OK
) {
4444 int f
= open(fname
, O_WRONLY
| O_CREAT
| O_TRUNC
, 0666);
4447 command_print(CMD
, "Unable to open file %s", fname
);
4450 if (write(f
, tracemem
, memsz
* 4) != (int)memsz
* 4)
4451 command_print(CMD
, "Unable to write to file %s", fname
);
4453 command_print(CMD
, "Written %d bytes of trace data to %s", memsz
* 4, fname
);
4456 bool is_all_zeroes
= true;
4457 for (unsigned int i
= 0; i
< memsz
* 4; i
++) {
4458 if (tracemem
[i
] != 0) {
4459 is_all_zeroes
= false;
4467 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
4472 COMMAND_HANDLER(xtensa_cmd_tracedump
)
4474 if (CMD_ARGC
!= 1) {
4475 command_print(CMD
, "Command takes exactly 1 parameter.Need filename to dump to as output!");
4479 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do
,
4480 target_to_xtensa(get_current_target(CMD_CTX
)), CMD_ARGV
[0]);
4483 static const struct command_registration xtensa_any_command_handlers
[] = {
4486 .handler
= xtensa_cmd_xtdef
,
4487 .mode
= COMMAND_CONFIG
,
4488 .help
= "Configure Xtensa core type",
4493 .handler
= xtensa_cmd_xtopt
,
4494 .mode
= COMMAND_CONFIG
,
4495 .help
= "Configure Xtensa core option",
4496 .usage
= "<name> <value>",
4500 .handler
= xtensa_cmd_xtmem
,
4501 .mode
= COMMAND_CONFIG
,
4502 .help
= "Configure Xtensa memory/cache option",
4503 .usage
= "<type> [parameters]",
4507 .handler
= xtensa_cmd_xtmmu
,
4508 .mode
= COMMAND_CONFIG
,
4509 .help
= "Configure Xtensa MMU option",
4510 .usage
= "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
4514 .handler
= xtensa_cmd_xtmpu
,
4515 .mode
= COMMAND_CONFIG
,
4516 .help
= "Configure Xtensa MPU option",
4517 .usage
= "<num FG seg> <min seg size> <lockable> <executeonly>",
4521 .handler
= xtensa_cmd_xtreg
,
4522 .mode
= COMMAND_CONFIG
,
4523 .help
= "Configure Xtensa register",
4524 .usage
= "<regname> <regnum>",
4528 .handler
= xtensa_cmd_xtreg
,
4529 .mode
= COMMAND_CONFIG
,
4530 .help
= "Configure number of Xtensa registers",
4531 .usage
= "<numregs>",
4535 .handler
= xtensa_cmd_xtregfmt
,
4536 .mode
= COMMAND_CONFIG
,
4537 .help
= "Configure format of Xtensa register map",
4538 .usage
= "<contiguous|sparse> [numgregs]",
4541 .name
= "set_permissive",
4542 .handler
= xtensa_cmd_permissive_mode
,
4543 .mode
= COMMAND_ANY
,
4544 .help
= "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
4549 .handler
= xtensa_cmd_mask_interrupts
,
4550 .mode
= COMMAND_ANY
,
4551 .help
= "mask Xtensa interrupts at step",
4552 .usage
= "['on'|'off']",
4556 .handler
= xtensa_cmd_smpbreak
,
4557 .mode
= COMMAND_ANY
,
4558 .help
= "Set the way the CPU chains OCD breaks",
4559 .usage
= "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
4563 .handler
= xtensa_cmd_dm_rw
,
4564 .mode
= COMMAND_ANY
,
4565 .help
= "Xtensa DM read/write",
4566 .usage
= "addr [value]"
4569 .name
= "perfmon_enable",
4570 .handler
= xtensa_cmd_perfmon_enable
,
4571 .mode
= COMMAND_EXEC
,
4572 .help
= "Enable and start performance counter",
4573 .usage
= "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
4576 .name
= "perfmon_dump",
4577 .handler
= xtensa_cmd_perfmon_dump
,
4578 .mode
= COMMAND_EXEC
,
4579 .help
= "Dump performance counter value. If no argument specified, dumps all counters.",
4580 .usage
= "[counter_id]",
4583 .name
= "tracestart",
4584 .handler
= xtensa_cmd_tracestart
,
4585 .mode
= COMMAND_EXEC
,
4587 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
4588 .usage
= "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
4591 .name
= "tracestop",
4592 .handler
= xtensa_cmd_tracestop
,
4593 .mode
= COMMAND_EXEC
,
4594 .help
= "Tracing: Stop current trace as started by the tracestart command",
4598 .name
= "tracedump",
4599 .handler
= xtensa_cmd_tracedump
,
4600 .mode
= COMMAND_EXEC
,
4601 .help
= "Tracing: Dump trace memory to a files. One file per core.",
4602 .usage
= "<outfile>",
4606 .handler
= xtensa_cmd_exe
,
4607 .mode
= COMMAND_ANY
,
4608 .help
= "Xtensa stub execution",
4609 .usage
= "<ascii-encoded hexadecimal instruction bytes>",
4611 COMMAND_REGISTRATION_DONE
4614 const struct command_registration xtensa_command_handlers
[] = {
4617 .mode
= COMMAND_ANY
,
4618 .help
= "Xtensa command group",
4620 .chain
= xtensa_any_command_handlers
,
4622 COMMAND_REGISTRATION_DONE