xtensa: update XDM register map for TRAX support
[openocd.git] / src / target / xtensa / xtensa.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
10
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19
20 #include "xtensa_chip.h"
21 #include "xtensa.h"
22
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25 ((((V) & 0x0F) << 4) \
26 | (((V) & 0xF0) >> 4))
27
28 #define XT_NIBSWAP16(V) \
29 ((((V) & 0x000F) << 12) \
30 | (((V) & 0x00F0) << 4) \
31 | (((V) & 0x0F00) >> 4) \
32 | (((V) & 0xF000) >> 12))
33
34 #define XT_NIBSWAP24(V) \
35 ((((V) & 0x00000F) << 20) \
36 | (((V) & 0x0000F0) << 12) \
37 | (((V) & 0x000F00) << 4) \
38 | (((V) & 0x00F000) >> 4) \
39 | (((V) & 0x0F0000) >> 12) \
40 | (((V) & 0xF00000) >> 20))
41
42 /* _XT_INS_FORMAT_*()
43 * Instruction formatting converted from little-endian inputs
44 * and shifted to the MSB-side of DIR for BE systems.
45 */
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48 | (((T) & 0x0F) << 16) \
49 | (((SR) & 0xFF) << 8)) << 8 \
50 : (OPCODE) \
51 | (((SR) & 0xFF) << 8) \
52 | (((T) & 0x0F) << 4))
53
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57 | (((R) & 0x0F) << 8)) << 8 \
58 : (OPCODE) \
59 | (((ST) & 0xFF) << 4) \
60 | (((R) & 0x0F) << 12))
61
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64 | (((T) & 0x0F) << 8) \
65 | (((S) & 0x0F) << 4) \
66 | ((IMM4) & 0x0F)) << 16 \
67 : (OPCODE) \
68 | (((T) & 0x0F) << 4) \
69 | (((S) & 0x0F) << 8) \
70 | (((IMM4) & 0x0F) << 12))
71
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74 | (((T) & 0x0F) << 16) \
75 | (((S) & 0x0F) << 12) \
76 | (((R) & 0x0F) << 8) \
77 | ((IMM8) & 0xFF)) << 8 \
78 : (OPCODE) \
79 | (((IMM8) & 0xFF) << 16) \
80 | (((R) & 0x0F) << 12) \
81 | (((S) & 0x0F) << 8) \
82 | (((T) & 0x0F) << 4))
83
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86 | (((T) & 0x0F) << 16) \
87 | (((S) & 0x0F) << 12) \
88 | (((R) & 0x0F) << 8)) << 8 \
89 | ((IMM4) & 0x0F) \
90 : (OPCODE) \
91 | (((IMM4) & 0x0F) << 20) \
92 | (((R) & 0x0F) << 12) \
93 | (((S) & 0x0F) << 8) \
94 | (((T) & 0x0F) << 4))
95
96 /* Xtensa processor instruction opcodes
97 */
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
102
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
107
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
114
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
121
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
127
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
131
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
138
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
141
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
146
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
151
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
155
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
159
160 #define XT_WATCHPOINTS_NUM_MAX 2
161
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163 * These get used a lot so making a shortcut is useful.
164 */
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A0 (xtensa_regs[XT_REG_IDX_AR0].reg_num)
169 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
170 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
171
172 #define XT_PS_REG_NUM (0xe6U)
173 #define XT_EPS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
174 #define XT_EPC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
175 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
176 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
177 #define XT_NX_IBREAKC_BASE (0xc0U) /* (IBREAKC0..IBREAKC1) for NX */
178
179 #define XT_SW_BREAKPOINTS_MAX_NUM 32
180 #define XT_HW_IBREAK_MAX_NUM 2
181 #define XT_HW_DBREAK_MAX_NUM 2
182
183 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
184 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
185 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
186 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
187 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
188 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
189 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
190 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
191 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
192 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
247 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
248 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
249 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
250 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
251 XT_MK_REG_DESC("ps", XT_PS_REG_NUM, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
252 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
253 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
254 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
255 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
256 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
259 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
261 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
262 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
263 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
264 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
265
266 /* WARNING: For these registers, regnum points to the
267 * index of the corresponding ARx registers, NOT to
268 * the processor register number! */
269 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
270 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
271 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
272 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
273 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
274 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
275 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
276 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
282 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
283 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
284 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
285 };
286
287 /**
288 * Types of memory used at xtensa target
289 */
290 enum xtensa_mem_region_type {
291 XTENSA_MEM_REG_IROM = 0x0,
292 XTENSA_MEM_REG_IRAM,
293 XTENSA_MEM_REG_DROM,
294 XTENSA_MEM_REG_DRAM,
295 XTENSA_MEM_REG_SRAM,
296 XTENSA_MEM_REG_SROM,
297 XTENSA_MEM_REGS_NUM
298 };
299
300 /* Register definition as union for list allocation */
301 union xtensa_reg_val_u {
302 xtensa_reg_val_t val;
303 uint8_t buf[4];
304 };
305
306 static const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
307 { .chrval = "E00", .intval = ERROR_FAIL },
308 { .chrval = "E01", .intval = ERROR_FAIL },
309 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
310 { .chrval = "E03", .intval = ERROR_FAIL },
311 };
312
313 /* Set to true for extra debug logging */
314 static const bool xtensa_extra_debug_log;
315
316 /**
317 * Gets a config for the specific mem type
318 */
319 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
320 struct xtensa *xtensa,
321 enum xtensa_mem_region_type type)
322 {
323 switch (type) {
324 case XTENSA_MEM_REG_IROM:
325 return &xtensa->core_config->irom;
326 case XTENSA_MEM_REG_IRAM:
327 return &xtensa->core_config->iram;
328 case XTENSA_MEM_REG_DROM:
329 return &xtensa->core_config->drom;
330 case XTENSA_MEM_REG_DRAM:
331 return &xtensa->core_config->dram;
332 case XTENSA_MEM_REG_SRAM:
333 return &xtensa->core_config->sram;
334 case XTENSA_MEM_REG_SROM:
335 return &xtensa->core_config->srom;
336 default:
337 return NULL;
338 }
339 }
340
341 /**
342 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
343 * for a given address
344 * Returns NULL if nothing found
345 */
346 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
347 const struct xtensa_local_mem_config *mem,
348 target_addr_t address)
349 {
350 for (unsigned int i = 0; i < mem->count; i++) {
351 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
352 if (address >= region->base && address < (region->base + region->size))
353 return region;
354 }
355 return NULL;
356 }
357
358 /**
359 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
360 * for a given address
361 * Returns NULL if nothing found
362 */
363 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
364 struct xtensa *xtensa,
365 target_addr_t address)
366 {
367 const struct xtensa_local_mem_region_config *result;
368 const struct xtensa_local_mem_config *mcgf;
369 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
370 mcgf = xtensa_get_mem_config(xtensa, mtype);
371 result = xtensa_memory_region_find(mcgf, address);
372 if (result)
373 return result;
374 }
375 return NULL;
376 }
377
378 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
379 const struct xtensa_local_mem_config *mem,
380 target_addr_t address)
381 {
382 if (!cache->size)
383 return false;
384 return xtensa_memory_region_find(mem, address);
385 }
386
387 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
388 {
389 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
390 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
391 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
392 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
393 }
394
395 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
396 {
397 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
398 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
399 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
400 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
401 }
402
403 static int xtensa_core_reg_get(struct reg *reg)
404 {
405 /* We don't need this because we read all registers on halt anyway. */
406 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
407 struct target *target = xtensa->target;
408
409 if (target->state != TARGET_HALTED)
410 return ERROR_TARGET_NOT_HALTED;
411 if (!reg->exist) {
412 if (strncmp(reg->name, "?0x", 3) == 0) {
413 unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
414 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
415 return ERROR_OK;
416 }
417 return ERROR_COMMAND_ARGUMENT_INVALID;
418 }
419 return ERROR_OK;
420 }
421
422 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
423 {
424 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
425 struct target *target = xtensa->target;
426
427 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
428 if (target->state != TARGET_HALTED)
429 return ERROR_TARGET_NOT_HALTED;
430
431 if (!reg->exist) {
432 if (strncmp(reg->name, "?0x", 3) == 0) {
433 unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
434 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
435 return ERROR_OK;
436 }
437 return ERROR_COMMAND_ARGUMENT_INVALID;
438 }
439
440 buf_cpy(buf, reg->value, reg->size);
441
442 if (xtensa->core_config->windowed) {
443 /* If the user updates a potential scratch register, track for conflicts */
444 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
445 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
446 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
447 buf_get_u32(reg->value, 0, 32));
448 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
449 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
450 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
451 xtensa->scratch_ars[s].intval = true;
452 break;
453 }
454 }
455 }
456 reg->dirty = true;
457 reg->valid = true;
458
459 return ERROR_OK;
460 }
461
462 static const struct reg_arch_type xtensa_reg_type = {
463 .get = xtensa_core_reg_get,
464 .set = xtensa_core_reg_set,
465 };
466
467 /* Convert a register index that's indexed relative to windowbase, to the real address. */
468 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
469 enum xtensa_reg_id reg_idx,
470 int windowbase)
471 {
472 unsigned int idx;
473 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
474 idx = reg_idx - XT_REG_IDX_AR0;
475 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
476 idx = reg_idx - XT_REG_IDX_A0;
477 } else {
478 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
479 return -1;
480 }
481 /* Each windowbase value represents 4 registers on LX and 8 on NX */
482 int base_inc = (xtensa->core_config->core_type == XT_LX) ? 4 : 8;
483 return ((idx + windowbase * base_inc) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
484 }
485
486 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
487 enum xtensa_reg_id reg_idx,
488 int windowbase)
489 {
490 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
491 }
492
493 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
494 {
495 struct reg *reg_list = xtensa->core_cache->reg_list;
496 reg_list[reg_idx].dirty = true;
497 }
498
499 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
500 {
501 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DIR0EXEC, ins);
502 }
503
504 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
505 {
506 const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
507 if ((oplen > 0) && (oplen <= max_oplen)) {
508 uint8_t ops_padded[max_oplen];
509 memcpy(ops_padded, ops, oplen);
510 memset(ops_padded + oplen, 0, max_oplen - oplen);
511 unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
512 for (int32_t i = oplenw - 1; i > 0; i--)
513 xtensa_queue_dbg_reg_write(xtensa,
514 XDMREG_DIR0 + i,
515 target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t)*i]));
516 /* Write DIR0EXEC last */
517 xtensa_queue_dbg_reg_write(xtensa,
518 XDMREG_DIR0EXEC,
519 target_buffer_get_u32(xtensa->target, &ops_padded[0]));
520 }
521 }
522
523 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
524 {
525 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
526 return dm->pwr_ops->queue_reg_write(dm, reg, data);
527 }
528
529 /* NOTE: Assumes A3 has already been saved */
530 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
531 {
532 struct xtensa *xtensa = target_to_xtensa(target);
533 unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
534 uint32_t woe_dis;
535 uint8_t woe_buf[4];
536
537 if (xtensa->core_config->windowed) {
538 /* Save PS (LX) or WB (NX) and disable window overflow exceptions prior to AR save */
539 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, woe_sr, XT_REG_A3));
540 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
541 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, woe_buf);
542 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
543 if (res != ERROR_OK) {
544 LOG_TARGET_ERROR(target, "Failed to read %s (%d)!",
545 (woe_sr == XT_SR_PS) ? "PS" : "WB", res);
546 return res;
547 }
548 xtensa_core_status_check(target);
549 *woe = buf_get_u32(woe_buf, 0, 32);
550 woe_dis = *woe & ~((woe_sr == XT_SR_PS) ? XT_PS_WOE_MSK : XT_WB_S_MSK);
551 LOG_TARGET_DEBUG(target, "Clearing %s (0x%08" PRIx32 " -> 0x%08" PRIx32 ")",
552 (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB.S", *woe, woe_dis);
553 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe_dis);
554 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
555 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, woe_sr, XT_REG_A3));
556 }
557 return ERROR_OK;
558 }
559
560 /* NOTE: Assumes A3 has already been saved */
561 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
562 {
563 struct xtensa *xtensa = target_to_xtensa(target);
564 unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
565 if (xtensa->core_config->windowed) {
566 /* Restore window overflow exception state */
567 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe);
568 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
569 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, woe_sr, XT_REG_A3));
570 LOG_TARGET_DEBUG(target, "Restored %s (0x%08" PRIx32 ")",
571 (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB", woe);
572 }
573 }
574
575 static bool xtensa_reg_is_readable(int flags, int cpenable)
576 {
577 if (flags & XT_REGF_NOREAD)
578 return false;
579 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
580 return false;
581 return true;
582 }
583
584 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
585 {
586 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
587 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
588 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
589 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
590 } else {
591 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
592 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
593 }
594 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
595 }
596
597 static int xtensa_write_dirty_registers(struct target *target)
598 {
599 struct xtensa *xtensa = target_to_xtensa(target);
600 int res;
601 xtensa_reg_val_t regval, windowbase = 0;
602 bool scratch_reg_dirty = false, delay_cpenable = false;
603 struct reg *reg_list = xtensa->core_cache->reg_list;
604 unsigned int reg_list_size = xtensa->core_cache->num_regs;
605 bool preserve_a3 = false;
606 uint8_t a3_buf[4];
607 xtensa_reg_val_t a3 = 0, woe;
608 unsigned int ms_idx = (xtensa->core_config->core_type == XT_NX) ?
609 xtensa->nx_reg_idx[XT_NX_REG_IDX_MS] : reg_list_size;
610 xtensa_reg_val_t ms = 0;
611 bool restore_ms = false;
612
613 LOG_TARGET_DEBUG(target, "start");
614
615 /* We need to write the dirty registers in the cache list back to the processor.
616 * Start by writing the SFR/user registers. */
617 for (unsigned int i = 0; i < reg_list_size; i++) {
618 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
619 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
620 if (reg_list[i].dirty) {
621 if (rlist[ridx].type == XT_REG_SPECIAL ||
622 rlist[ridx].type == XT_REG_USER ||
623 rlist[ridx].type == XT_REG_FR) {
624 scratch_reg_dirty = true;
625 if (i == XT_REG_IDX_CPENABLE) {
626 delay_cpenable = true;
627 continue;
628 }
629 regval = xtensa_reg_get(target, i);
630 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
631 reg_list[i].name,
632 rlist[ridx].reg_num,
633 regval);
634 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
635 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
636 if (reg_list[i].exist) {
637 unsigned int reg_num = rlist[ridx].reg_num;
638 if (rlist[ridx].type == XT_REG_USER) {
639 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
640 } else if (rlist[ridx].type == XT_REG_FR) {
641 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
642 } else {/*SFR */
643 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
644 if (xtensa->core_config->core_type == XT_LX) {
645 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
646 reg_num = (XT_EPC_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
647 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
648 } else {
649 /* NX PC set through issuing a jump instruction */
650 xtensa_queue_exec_ins(xtensa, XT_INS_JX(xtensa, XT_REG_A3));
651 }
652 } else if (i == ms_idx) {
653 /* MS must be restored after ARs. This ensures ARs remain in correct
654 * order even for reversed register groups (overflow/underflow).
655 */
656 ms = regval;
657 restore_ms = true;
658 LOG_TARGET_DEBUG(target, "Delaying MS write: 0x%x", ms);
659 } else {
660 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
661 }
662 }
663 }
664 reg_list[i].dirty = false;
665 }
666 }
667 }
668 if (scratch_reg_dirty)
669 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
670 if (delay_cpenable) {
671 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
672 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
673 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
674 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
675 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
676 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
677 XT_REG_A3));
678 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
679 }
680
681 preserve_a3 = (xtensa->core_config->windowed) || (xtensa->core_config->core_type == XT_NX);
682 if (preserve_a3) {
683 /* Save (windowed) A3 for scratch use */
684 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
685 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
686 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
687 if (res != ERROR_OK)
688 return res;
689 xtensa_core_status_check(target);
690 a3 = buf_get_u32(a3_buf, 0, 32);
691 }
692
693 if (xtensa->core_config->windowed) {
694 res = xtensa_window_state_save(target, &woe);
695 if (res != ERROR_OK)
696 return res;
697 /* Grab the windowbase, we need it. */
698 uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
699 XT_REG_IDX_WINDOWBASE : xtensa->nx_reg_idx[XT_NX_REG_IDX_WB];
700 windowbase = xtensa_reg_get(target, wb_idx);
701 if (xtensa->core_config->core_type == XT_NX)
702 windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
703
704 /* Check if there are mismatches between the ARx and corresponding Ax registers.
705 * When the user sets a register on a windowed config, xt-gdb may set the ARx
706 * register directly. Thus we take ARx as priority over Ax if both are dirty
707 * and it's unclear if the user set one over the other explicitly.
708 */
709 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
710 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
711 if (reg_list[i].dirty && reg_list[j].dirty) {
712 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
713 bool show_warning = true;
714 if (i == XT_REG_IDX_A3)
715 show_warning = xtensa_scratch_regs_fixup(xtensa,
716 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
717 else if (i == XT_REG_IDX_A4)
718 show_warning = xtensa_scratch_regs_fixup(xtensa,
719 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
720 if (show_warning)
721 LOG_WARNING(
722 "Warning: Both A%d [0x%08" PRIx32
723 "] as well as its underlying physical register "
724 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
725 i - XT_REG_IDX_A0,
726 buf_get_u32(reg_list[i].value, 0, 32),
727 j - XT_REG_IDX_AR0,
728 buf_get_u32(reg_list[j].value, 0, 32));
729 }
730 }
731 }
732 }
733
734 /* Write A0-A16. */
735 for (unsigned int i = 0; i < 16; i++) {
736 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
737 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
738 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
739 xtensa_regs[XT_REG_IDX_A0 + i].name,
740 regval,
741 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
742 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
743 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
744 reg_list[XT_REG_IDX_A0 + i].dirty = false;
745 if (i == 3) {
746 /* Avoid stomping A3 during restore at end of function */
747 a3 = regval;
748 }
749 }
750 }
751
752 if (xtensa->core_config->windowed) {
753 /* Now write AR registers */
754 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
755 /* Write the 16 registers we can see */
756 for (unsigned int i = 0; i < 16; i++) {
757 if (i + j < xtensa->core_config->aregs_num) {
758 enum xtensa_reg_id realadr =
759 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
760 windowbase);
761 /* Write back any dirty un-windowed registers */
762 if (reg_list[realadr].dirty) {
763 regval = xtensa_reg_get(target, realadr);
764 LOG_TARGET_DEBUG(
765 target,
766 "Writing back reg %s value %08" PRIX32 ", num =%i",
767 xtensa_regs[realadr].name,
768 regval,
769 xtensa_regs[realadr].reg_num);
770 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
771 xtensa_queue_exec_ins(xtensa,
772 XT_INS_RSR(xtensa, XT_SR_DDR,
773 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
774 reg_list[realadr].dirty = false;
775 if ((i + j) == 3)
776 /* Avoid stomping AR during A3 restore at end of function */
777 a3 = regval;
778 }
779 }
780 }
781
782 /* Now rotate the window so we'll see the next 16 registers. The final rotate
783 * will wraparound, leaving us in the state we were.
784 * Each ROTW rotates 4 registers on LX and 8 on NX */
785 int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
786 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, rotw_arg));
787 }
788
789 xtensa_window_state_restore(target, woe);
790
791 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
792 xtensa->scratch_ars[s].intval = false;
793 }
794
795 if (restore_ms) {
796 uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
797 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, ms);
798 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
799 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, ms_regno, XT_REG_A3));
800 LOG_TARGET_DEBUG(target, "Delayed MS (0x%x) write complete: 0x%x", ms_regno, ms);
801 }
802
803 if (preserve_a3) {
804 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
805 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
806 }
807
808 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
809 xtensa_core_status_check(target);
810
811 return res;
812 }
813
814 static inline bool xtensa_is_stopped(struct target *target)
815 {
816 struct xtensa *xtensa = target_to_xtensa(target);
817 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
818 }
819
820 int xtensa_examine(struct target *target)
821 {
822 struct xtensa *xtensa = target_to_xtensa(target);
823 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
824
825 LOG_DEBUG("coreid = %d", target->coreid);
826
827 if (xtensa->core_config->core_type == XT_UNDEF) {
828 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
829 return ERROR_FAIL;
830 }
831
832 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
833 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
834 xtensa_dm_queue_enable(&xtensa->dbg_mod);
835 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
836 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
837 if (res != ERROR_OK)
838 return res;
839 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
840 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
841 return ERROR_TARGET_FAILURE;
842 }
843 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
844 target_set_examined(target);
845 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
846 return ERROR_OK;
847 }
848
849 int xtensa_wakeup(struct target *target)
850 {
851 struct xtensa *xtensa = target_to_xtensa(target);
852 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
853
854 if (xtensa->reset_asserted)
855 cmd |= PWRCTL_CORERESET(xtensa);
856 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
857 /* TODO: can we join this with the write above? */
858 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
859 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
860 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
861 }
862
863 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
864 {
865 uint32_t dsr_data = 0x00110000;
866 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
867 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
868 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
869
870 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
871 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, set | OCDDCR_ENABLEOCD);
872 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, clear);
873 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DSR, dsr_data);
874 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
875 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
876 }
877
878 int xtensa_smpbreak_set(struct target *target, uint32_t set)
879 {
880 struct xtensa *xtensa = target_to_xtensa(target);
881 int res = ERROR_OK;
882
883 xtensa->smp_break = set;
884 if (target_was_examined(target))
885 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
886 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
887 return res;
888 }
889
890 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
891 {
892 uint8_t dcr_buf[sizeof(uint32_t)];
893
894 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DCRSET, dcr_buf);
895 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
896 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
897 *val = buf_get_u32(dcr_buf, 0, 32);
898
899 return res;
900 }
901
902 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
903 {
904 struct xtensa *xtensa = target_to_xtensa(target);
905 *val = xtensa->smp_break;
906 return ERROR_OK;
907 }
908
909 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
910 {
911 return buf_get_u32(reg->value, 0, 32);
912 }
913
914 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
915 {
916 buf_set_u32(reg->value, 0, 32, value);
917 reg->dirty = true;
918 }
919
920 static int xtensa_imprecise_exception_occurred(struct target *target)
921 {
922 struct xtensa *xtensa = target_to_xtensa(target);
923 for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESR; idx++) {
924 enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
925 if (xtensa->nx_reg_idx[idx]) {
926 xtensa_reg_val_t reg = xtensa_reg_get(target, xtensa->nx_reg_idx[idx]);
927 if (reg & XT_IMPR_EXC_MSK) {
928 LOG_TARGET_DEBUG(target, "Imprecise exception: %s: 0x%x",
929 xtensa->core_cache->reg_list[ridx].name, reg);
930 return true;
931 }
932 }
933 }
934 return false;
935 }
936
937 static void xtensa_imprecise_exception_clear(struct target *target)
938 {
939 struct xtensa *xtensa = target_to_xtensa(target);
940 for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESRCLR; idx++) {
941 enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
942 if (ridx && idx != XT_NX_REG_IDX_MESR) {
943 xtensa_reg_val_t value = (idx == XT_NX_REG_IDX_MESRCLR) ? XT_MESRCLR_IMPR_EXC_MSK : 0;
944 xtensa_reg_set(target, ridx, value);
945 LOG_TARGET_DEBUG(target, "Imprecise exception: clearing %s (0x%x)",
946 xtensa->core_cache->reg_list[ridx].name, value);
947 }
948 }
949 }
950
951 int xtensa_core_status_check(struct target *target)
952 {
953 struct xtensa *xtensa = target_to_xtensa(target);
954 int res, needclear = 0, needimprclear = 0;
955
956 xtensa_dm_core_status_read(&xtensa->dbg_mod);
957 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
958 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
959 if (dsr & OCDDSR_EXECBUSY) {
960 if (!xtensa->suppress_dsr_errors)
961 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
962 needclear = 1;
963 }
964 if (dsr & OCDDSR_EXECEXCEPTION) {
965 if (!xtensa->suppress_dsr_errors)
966 LOG_TARGET_ERROR(target,
967 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
968 dsr);
969 needclear = 1;
970 }
971 if (dsr & OCDDSR_EXECOVERRUN) {
972 if (!xtensa->suppress_dsr_errors)
973 LOG_TARGET_ERROR(target,
974 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
975 dsr);
976 needclear = 1;
977 }
978 if (xtensa->core_config->core_type == XT_NX && (xtensa_imprecise_exception_occurred(target))) {
979 if (!xtensa->suppress_dsr_errors)
980 LOG_TARGET_ERROR(target,
981 "%s: Imprecise exception occurred!", target_name(target));
982 needclear = 1;
983 needimprclear = 1;
984 }
985 if (needclear) {
986 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
987 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
988 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
989 LOG_TARGET_ERROR(target, "clearing DSR failed!");
990 if (xtensa->core_config->core_type == XT_NX && needimprclear)
991 xtensa_imprecise_exception_clear(target);
992 return ERROR_FAIL;
993 }
994 return ERROR_OK;
995 }
996
997 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
998 {
999 struct xtensa *xtensa = target_to_xtensa(target);
1000 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1001 return xtensa_reg_get_value(reg);
1002 }
1003
1004 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
1005 {
1006 struct xtensa *xtensa = target_to_xtensa(target);
1007 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1008 if (xtensa_reg_get_value(reg) == value)
1009 return;
1010 xtensa_reg_set_value(reg, value);
1011 }
1012
1013 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
1014 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
1015 {
1016 struct xtensa *xtensa = target_to_xtensa(target);
1017 uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1018 XT_REG_IDX_WINDOWBASE : xtensa->nx_reg_idx[XT_NX_REG_IDX_WB];
1019 uint32_t windowbase = (xtensa->core_config->windowed ?
1020 xtensa_reg_get(target, wb_idx) : 0);
1021 if (xtensa->core_config->core_type == XT_NX)
1022 windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1023 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
1024 xtensa_reg_set(target, a_idx, value);
1025 xtensa_reg_set(target, ar_idx, value);
1026 }
1027
1028 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
1029 uint32_t xtensa_cause_get(struct target *target)
1030 {
1031 struct xtensa *xtensa = target_to_xtensa(target);
1032 if (xtensa->core_config->core_type == XT_LX) {
1033 /* LX cause in DEBUGCAUSE */
1034 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1035 }
1036 if (xtensa->nx_stop_cause & DEBUGCAUSE_VALID)
1037 return xtensa->nx_stop_cause;
1038
1039 /* NX cause determined from DSR.StopCause */
1040 if (xtensa_dm_core_status_read(&xtensa->dbg_mod) != ERROR_OK) {
1041 LOG_TARGET_ERROR(target, "Read DSR error");
1042 } else {
1043 uint32_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
1044 /* NX causes are prioritized; only 1 bit can be set */
1045 switch ((dsr & OCDDSR_STOPCAUSE) >> OCDDSR_STOPCAUSE_SHIFT) {
1046 case OCDDSR_STOPCAUSE_DI:
1047 xtensa->nx_stop_cause = DEBUGCAUSE_DI;
1048 break;
1049 case OCDDSR_STOPCAUSE_SS:
1050 xtensa->nx_stop_cause = DEBUGCAUSE_IC;
1051 break;
1052 case OCDDSR_STOPCAUSE_IB:
1053 xtensa->nx_stop_cause = DEBUGCAUSE_IB;
1054 break;
1055 case OCDDSR_STOPCAUSE_B:
1056 case OCDDSR_STOPCAUSE_B1:
1057 xtensa->nx_stop_cause = DEBUGCAUSE_BI;
1058 break;
1059 case OCDDSR_STOPCAUSE_BN:
1060 xtensa->nx_stop_cause = DEBUGCAUSE_BN;
1061 break;
1062 case OCDDSR_STOPCAUSE_DB0:
1063 case OCDDSR_STOPCAUSE_DB1:
1064 xtensa->nx_stop_cause = DEBUGCAUSE_DB;
1065 break;
1066 default:
1067 LOG_TARGET_ERROR(target, "Unknown stop cause (DSR: 0x%08x)", dsr);
1068 break;
1069 }
1070 if (xtensa->nx_stop_cause)
1071 xtensa->nx_stop_cause |= DEBUGCAUSE_VALID;
1072 }
1073 return xtensa->nx_stop_cause;
1074 }
1075
1076 void xtensa_cause_clear(struct target *target)
1077 {
1078 struct xtensa *xtensa = target_to_xtensa(target);
1079 if (xtensa->core_config->core_type == XT_LX) {
1080 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
1081 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1082 } else {
1083 /* NX DSR.STOPCAUSE is not writeable; clear cached copy but leave it valid */
1084 xtensa->nx_stop_cause = DEBUGCAUSE_VALID;
1085 }
1086 }
1087
1088 void xtensa_cause_reset(struct target *target)
1089 {
1090 /* Clear DEBUGCAUSE_VALID to trigger re-read (on NX) */
1091 struct xtensa *xtensa = target_to_xtensa(target);
1092 xtensa->nx_stop_cause = 0;
1093 }
1094
1095 int xtensa_assert_reset(struct target *target)
1096 {
1097 struct xtensa *xtensa = target_to_xtensa(target);
1098
1099 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
1100 xtensa_queue_pwr_reg_write(xtensa,
1101 XDMREG_PWRCTL,
1102 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
1103 PWRCTL_COREWAKEUP(xtensa) | PWRCTL_CORERESET(xtensa));
1104 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1105 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1106 if (res != ERROR_OK)
1107 return res;
1108
1109 /* registers are now invalid */
1110 xtensa->reset_asserted = true;
1111 register_cache_invalidate(xtensa->core_cache);
1112 target->state = TARGET_RESET;
1113 return ERROR_OK;
1114 }
1115
1116 int xtensa_deassert_reset(struct target *target)
1117 {
1118 struct xtensa *xtensa = target_to_xtensa(target);
1119
1120 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
1121 if (target->reset_halt)
1122 xtensa_queue_dbg_reg_write(xtensa,
1123 XDMREG_DCRSET,
1124 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1125 xtensa_queue_pwr_reg_write(xtensa,
1126 XDMREG_PWRCTL,
1127 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
1128 PWRCTL_COREWAKEUP(xtensa));
1129 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1130 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1131 if (res != ERROR_OK)
1132 return res;
1133 target->state = TARGET_RUNNING;
1134 xtensa->reset_asserted = false;
1135 return res;
1136 }
1137
1138 int xtensa_soft_reset_halt(struct target *target)
1139 {
1140 LOG_TARGET_DEBUG(target, "begin");
1141 return xtensa_assert_reset(target);
1142 }
1143
1144 int xtensa_fetch_all_regs(struct target *target)
1145 {
1146 struct xtensa *xtensa = target_to_xtensa(target);
1147 struct reg *reg_list = xtensa->core_cache->reg_list;
1148 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1149 xtensa_reg_val_t cpenable = 0, windowbase = 0, a0 = 0, a3;
1150 unsigned int ms_idx = reg_list_size;
1151 uint32_t ms = 0;
1152 uint32_t woe;
1153 uint8_t a0_buf[4], a3_buf[4], ms_buf[4];
1154 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1155
1156 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1157 if (!regvals) {
1158 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1159 return ERROR_FAIL;
1160 }
1161 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1162 if (!dsrs) {
1163 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1164 free(regvals);
1165 return ERROR_FAIL;
1166 }
1167
1168 LOG_TARGET_DEBUG(target, "start");
1169
1170 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1171 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1172 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
1173 if (xtensa->core_config->core_type == XT_NX) {
1174 /* Save (windowed) A0 as well--it will be required for reading PC */
1175 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A0));
1176 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a0_buf);
1177
1178 /* Set MS.DispSt, clear MS.DE prior to accessing ARs. This ensures ARs remain
1179 * in correct order even for reversed register groups (overflow/underflow).
1180 */
1181 ms_idx = xtensa->nx_reg_idx[XT_NX_REG_IDX_MS];
1182 uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
1183 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, ms_regno, XT_REG_A3));
1184 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1185 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, ms_buf);
1186 LOG_TARGET_DEBUG(target, "Overriding MS (0x%x): 0x%x", ms_regno, XT_MS_DISPST_DBG);
1187 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, XT_MS_DISPST_DBG);
1188 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1189 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, ms_regno, XT_REG_A3));
1190 }
1191
1192 int res = xtensa_window_state_save(target, &woe);
1193 if (res != ERROR_OK)
1194 goto xtensa_fetch_all_regs_done;
1195
1196 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1197 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1198 * in one go, then sort everything out from the regvals variable. */
1199
1200 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1201 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1202 /*Grab the 16 registers we can see */
1203 for (unsigned int i = 0; i < 16; i++) {
1204 if (i + j < xtensa->core_config->aregs_num) {
1205 xtensa_queue_exec_ins(xtensa,
1206 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1207 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1208 regvals[XT_REG_IDX_AR0 + i + j].buf);
1209 if (debug_dsrs)
1210 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR,
1211 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1212 }
1213 }
1214 if (xtensa->core_config->windowed) {
1215 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1216 * will wraparound, leaving us in the state we were.
1217 * Each ROTW rotates 4 registers on LX and 8 on NX */
1218 int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
1219 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, rotw_arg));
1220 }
1221 }
1222 xtensa_window_state_restore(target, woe);
1223
1224 if (xtensa->core_config->coproc) {
1225 /* As the very first thing after AREGS, go grab CPENABLE */
1226 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1227 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1228 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1229 }
1230 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1231 if (res != ERROR_OK) {
1232 LOG_ERROR("Failed to read ARs (%d)!", res);
1233 goto xtensa_fetch_all_regs_done;
1234 }
1235 xtensa_core_status_check(target);
1236
1237 a3 = buf_get_u32(a3_buf, 0, 32);
1238 if (xtensa->core_config->core_type == XT_NX) {
1239 a0 = buf_get_u32(a0_buf, 0, 32);
1240 ms = buf_get_u32(ms_buf, 0, 32);
1241 }
1242
1243 if (xtensa->core_config->coproc) {
1244 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1245
1246 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1247 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
1248 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1249 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1250
1251 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1252 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1253 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1254 }
1255 /* We're now free to use any of A0-A15 as scratch registers
1256 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1257 for (unsigned int i = 0; i < reg_list_size; i++) {
1258 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1259 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1260 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1261 bool reg_fetched = true;
1262 unsigned int reg_num = rlist[ridx].reg_num;
1263 switch (rlist[ridx].type) {
1264 case XT_REG_USER:
1265 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1266 break;
1267 case XT_REG_FR:
1268 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1269 break;
1270 case XT_REG_SPECIAL:
1271 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1272 if (xtensa->core_config->core_type == XT_LX) {
1273 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1274 reg_num = XT_EPC_REG_NUM_BASE + xtensa->core_config->debug.irq_level;
1275 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1276 } else {
1277 /* NX PC read through CALL0(0) and reading A0 */
1278 xtensa_queue_exec_ins(xtensa, XT_INS_CALL0(xtensa, 0));
1279 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A0));
1280 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1281 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1282 reg_fetched = false;
1283 }
1284 } else if ((xtensa->core_config->core_type == XT_LX)
1285 && (reg_num == xtensa_regs[XT_REG_IDX_PS].reg_num)) {
1286 /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1287 reg_num = XT_EPS_REG_NUM_BASE + xtensa->core_config->debug.irq_level;
1288 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1289 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1290 /* CPENABLE already read/updated; don't re-read */
1291 reg_fetched = false;
1292 break;
1293 } else {
1294 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1295 }
1296 break;
1297 default:
1298 reg_fetched = false;
1299 }
1300 if (reg_fetched) {
1301 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1302 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1303 if (debug_dsrs)
1304 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1305 }
1306 }
1307 }
1308 /* Ok, send the whole mess to the CPU. */
1309 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1310 if (res != ERROR_OK) {
1311 LOG_ERROR("Failed to fetch AR regs!");
1312 goto xtensa_fetch_all_regs_done;
1313 }
1314 xtensa_core_status_check(target);
1315
1316 if (debug_dsrs) {
1317 /* DSR checking: follows order in which registers are requested. */
1318 for (unsigned int i = 0; i < reg_list_size; i++) {
1319 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1320 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1321 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1322 (rlist[ridx].type != XT_REG_DEBUG) &&
1323 (rlist[ridx].type != XT_REG_RELGEN) &&
1324 (rlist[ridx].type != XT_REG_TIE) &&
1325 (rlist[ridx].type != XT_REG_OTHER)) {
1326 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1327 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1328 res = ERROR_FAIL;
1329 goto xtensa_fetch_all_regs_done;
1330 }
1331 }
1332 }
1333 }
1334
1335 if (xtensa->core_config->windowed) {
1336 /* We need the windowbase to decode the general addresses. */
1337 uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1338 XT_REG_IDX_WINDOWBASE : xtensa->nx_reg_idx[XT_NX_REG_IDX_WB];
1339 windowbase = buf_get_u32(regvals[wb_idx].buf, 0, 32);
1340 if (xtensa->core_config->core_type == XT_NX)
1341 windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1342 }
1343
1344 /* Decode the result and update the cache. */
1345 for (unsigned int i = 0; i < reg_list_size; i++) {
1346 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1347 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1348 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1349 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1350 /* The 64-value general register set is read from (windowbase) on down.
1351 * We need to get the real register address by subtracting windowbase and
1352 * wrapping around. */
1353 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1354 windowbase);
1355 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1356 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1357 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1358 if (xtensa_extra_debug_log) {
1359 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1360 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1361 }
1362 } else {
1363 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1364 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1365 if (xtensa_extra_debug_log)
1366 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1367 if (rlist[ridx].reg_num == XT_PC_REG_NUM_VIRTUAL &&
1368 xtensa->core_config->core_type == XT_NX) {
1369 /* A0 from prior CALL0 points to next instruction; decrement it */
1370 regval -= 3;
1371 is_dirty = 1;
1372 } else if (i == ms_idx) {
1373 LOG_TARGET_DEBUG(target, "Caching MS: 0x%x", ms);
1374 regval = ms;
1375 is_dirty = 1;
1376 }
1377 xtensa_reg_set(target, i, regval);
1378 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1379 }
1380 reg_list[i].valid = true;
1381 } else {
1382 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1383 /* Report read-only registers all-zero but valid */
1384 reg_list[i].valid = true;
1385 xtensa_reg_set(target, i, 0);
1386 } else {
1387 reg_list[i].valid = false;
1388 }
1389 }
1390 }
1391
1392 if (xtensa->core_config->windowed) {
1393 /* We have used A3 as a scratch register.
1394 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1395 */
1396 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1397 xtensa_reg_set(target, ar3_idx, a3);
1398 xtensa_mark_register_dirty(xtensa, ar3_idx);
1399
1400 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1401 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1402 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1403 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1404 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1405 xtensa->scratch_ars[s].intval = false;
1406 }
1407
1408 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1409 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1410 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1411 if (xtensa->core_config->core_type == XT_NX) {
1412 xtensa_reg_set(target, XT_REG_IDX_A0, a0);
1413 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A0);
1414 }
1415
1416 xtensa->regs_fetched = true;
1417 xtensa_fetch_all_regs_done:
1418 free(regvals);
1419 free(dsrs);
1420 return res;
1421 }
1422
1423 int xtensa_get_gdb_reg_list(struct target *target,
1424 struct reg **reg_list[],
1425 int *reg_list_size,
1426 enum target_register_class reg_class)
1427 {
1428 struct xtensa *xtensa = target_to_xtensa(target);
1429 unsigned int num_regs;
1430
1431 if (reg_class == REG_CLASS_GENERAL) {
1432 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1433 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1434 return ERROR_FAIL;
1435 }
1436 num_regs = xtensa->genpkt_regs_num;
1437 } else {
1438 /* Determine whether to return a contiguous or sparse register map */
1439 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1440 }
1441
1442 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1443
1444 *reg_list = calloc(num_regs, sizeof(struct reg *));
1445 if (!*reg_list)
1446 return ERROR_FAIL;
1447
1448 *reg_list_size = num_regs;
1449 if (xtensa->regmap_contiguous) {
1450 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1451 for (unsigned int i = 0; i < num_regs; i++)
1452 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1453 return ERROR_OK;
1454 }
1455
1456 for (unsigned int i = 0; i < num_regs; i++)
1457 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1458 unsigned int k = 0;
1459 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1460 if (xtensa->core_cache->reg_list[i].exist) {
1461 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1462 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1463 int sparse_idx = rlist[ridx].dbreg_num;
1464 if (i == XT_REG_IDX_PS && xtensa->core_config->core_type == XT_LX) {
1465 if (xtensa->eps_dbglevel_idx == 0) {
1466 LOG_ERROR("eps_dbglevel_idx not set\n");
1467 return ERROR_FAIL;
1468 }
1469 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1470 if (xtensa_extra_debug_log)
1471 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1472 sparse_idx, xtensa->core_config->debug.irq_level,
1473 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1474 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1475 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1476 } else {
1477 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1478 }
1479 if (i == XT_REG_IDX_PC)
1480 /* Make a duplicate copy of PC for external access */
1481 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1482 k++;
1483 }
1484 }
1485
1486 if (k == num_regs)
1487 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1488
1489 return ERROR_OK;
1490 }
1491
1492 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1493 {
1494 struct xtensa *xtensa = target_to_xtensa(target);
1495 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1496 xtensa->core_config->mmu.dtlb_entries_count > 0;
1497 return ERROR_OK;
1498 }
1499
1500 int xtensa_halt(struct target *target)
1501 {
1502 struct xtensa *xtensa = target_to_xtensa(target);
1503
1504 LOG_TARGET_DEBUG(target, "start");
1505 if (target->state == TARGET_HALTED) {
1506 LOG_TARGET_DEBUG(target, "target was already halted");
1507 return ERROR_OK;
1508 }
1509 /* First we have to read dsr and check if the target stopped */
1510 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1511 if (res != ERROR_OK) {
1512 LOG_TARGET_ERROR(target, "Failed to read core status!");
1513 return res;
1514 }
1515 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1516 if (!xtensa_is_stopped(target)) {
1517 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1518 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1519 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1520 if (res != ERROR_OK)
1521 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1522 }
1523
1524 return res;
1525 }
1526
1527 int xtensa_prepare_resume(struct target *target,
1528 int current,
1529 target_addr_t address,
1530 int handle_breakpoints,
1531 int debug_execution)
1532 {
1533 struct xtensa *xtensa = target_to_xtensa(target);
1534 uint32_t bpena = 0;
1535
1536 LOG_TARGET_DEBUG(target,
1537 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1538 current,
1539 address,
1540 handle_breakpoints,
1541 debug_execution);
1542
1543 if (target->state != TARGET_HALTED) {
1544 LOG_TARGET_ERROR(target, "not halted");
1545 return ERROR_TARGET_NOT_HALTED;
1546 }
1547 xtensa->halt_request = false;
1548
1549 if (address && !current) {
1550 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1551 } else {
1552 uint32_t cause = xtensa_cause_get(target);
1553 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1554 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1555 if (cause & DEBUGCAUSE_DB)
1556 /* We stopped due to a watchpoint. We can't just resume executing the
1557 * instruction again because */
1558 /* that would trigger the watchpoint again. To fix this, we single-step,
1559 * which ignores watchpoints. */
1560 xtensa_do_step(target, current, address, handle_breakpoints);
1561 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1562 /* We stopped due to a break instruction. We can't just resume executing the
1563 * instruction again because */
1564 /* that would trigger the break again. To fix this, we single-step, which
1565 * ignores break. */
1566 xtensa_do_step(target, current, address, handle_breakpoints);
1567 }
1568
1569 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1570 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1571 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1572 if (xtensa->hw_brps[slot]) {
1573 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1574 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1575 if (xtensa->core_config->core_type == XT_NX)
1576 xtensa_reg_set(target, xtensa->nx_reg_idx[XT_NX_REG_IDX_IBREAKC0] + slot, XT_IBREAKC_FB);
1577 bpena |= BIT(slot);
1578 }
1579 }
1580 if (xtensa->core_config->core_type == XT_LX)
1581 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1582
1583 /* Here we write all registers to the targets */
1584 int res = xtensa_write_dirty_registers(target);
1585 if (res != ERROR_OK)
1586 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1587 return res;
1588 }
1589
1590 int xtensa_do_resume(struct target *target)
1591 {
1592 struct xtensa *xtensa = target_to_xtensa(target);
1593
1594 LOG_TARGET_DEBUG(target, "start");
1595
1596 xtensa_cause_reset(target);
1597 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1598 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1599 if (res != ERROR_OK) {
1600 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1601 return res;
1602 }
1603 xtensa_core_status_check(target);
1604 return ERROR_OK;
1605 }
1606
1607 int xtensa_resume(struct target *target,
1608 int current,
1609 target_addr_t address,
1610 int handle_breakpoints,
1611 int debug_execution)
1612 {
1613 LOG_TARGET_DEBUG(target, "start");
1614 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1615 if (res != ERROR_OK) {
1616 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1617 return res;
1618 }
1619 res = xtensa_do_resume(target);
1620 if (res != ERROR_OK) {
1621 LOG_TARGET_ERROR(target, "Failed to resume!");
1622 return res;
1623 }
1624
1625 target->debug_reason = DBG_REASON_NOTHALTED;
1626 if (!debug_execution)
1627 target->state = TARGET_RUNNING;
1628 else
1629 target->state = TARGET_DEBUG_RUNNING;
1630
1631 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1632
1633 return ERROR_OK;
1634 }
1635
1636 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1637 {
1638 struct xtensa *xtensa = target_to_xtensa(target);
1639 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1640 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1641 if (err != ERROR_OK)
1642 return false;
1643
1644 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1645 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1646 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1647 return true;
1648
1649 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1650 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1651 return true;
1652
1653 return false;
1654 }
1655
1656 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1657 {
1658 struct xtensa *xtensa = target_to_xtensa(target);
1659 int res;
1660 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1661 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1662 xtensa_reg_val_t icountlvl, cause;
1663 xtensa_reg_val_t oldps, oldpc, cur_pc;
1664 bool ps_lowered = false;
1665
1666 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1667 current, address, handle_breakpoints);
1668
1669 if (target->state != TARGET_HALTED) {
1670 LOG_TARGET_ERROR(target, "not halted");
1671 return ERROR_TARGET_NOT_HALTED;
1672 }
1673
1674 if (xtensa->eps_dbglevel_idx == 0 && xtensa->core_config->core_type == XT_LX) {
1675 LOG_TARGET_ERROR(target, "eps_dbglevel_idx not set\n");
1676 return ERROR_FAIL;
1677 }
1678
1679 /* Save old ps (EPS[dbglvl] on LX), pc */
1680 oldps = xtensa_reg_get(target, (xtensa->core_config->core_type == XT_LX) ?
1681 xtensa->eps_dbglevel_idx : XT_REG_IDX_PS);
1682 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1683
1684 cause = xtensa_cause_get(target);
1685 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1686 oldps,
1687 oldpc,
1688 cause,
1689 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1690 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1691 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1692 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1693 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1694 /* pretend that we have stepped */
1695 if (cause & DEBUGCAUSE_BI)
1696 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1697 else
1698 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1699 return ERROR_OK;
1700 }
1701
1702 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1703 * at which the instructions are to be counted while stepping.
1704 *
1705 * For example, if we need to step by 2 instructions, and an interrupt occurs
1706 * in between, the processor will trigger the interrupt and halt after the 2nd
1707 * instruction within the interrupt vector and/or handler.
1708 *
1709 * However, sometimes we don't want the interrupt handlers to be executed at all
1710 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1711 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1712 * code from being counted during stepping. Note that C exception handlers must
1713 * run at level 0 and hence will be counted and stepped into, should one occur.
1714 *
1715 * TODO: Certain instructions should never be single-stepped and should instead
1716 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1717 * RFI >= DBGLEVEL.
1718 */
1719 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1720 if (!xtensa->core_config->high_irq.enabled) {
1721 LOG_TARGET_WARNING(
1722 target,
1723 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1724 return ERROR_FAIL;
1725 }
1726 /* Update ICOUNTLEVEL accordingly */
1727 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1728 } else {
1729 icountlvl = xtensa->core_config->debug.irq_level;
1730 }
1731
1732 if (cause & DEBUGCAUSE_DB) {
1733 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1734 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1735 * re-enable the watchpoint. */
1736 LOG_TARGET_DEBUG(
1737 target,
1738 "Single-stepping to get past instruction that triggered the watchpoint...");
1739 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1740 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1741 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1742 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1743 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1744 }
1745 }
1746
1747 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1748 /* handle normal SW breakpoint */
1749 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1750 if (xtensa->core_config->core_type == XT_LX && ((oldps & 0xf) >= icountlvl)) {
1751 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1752 ps_lowered = true;
1753 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1754 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1755 LOG_TARGET_DEBUG(target,
1756 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1757 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1758 newps,
1759 oldps);
1760 }
1761 do {
1762 if (xtensa->core_config->core_type == XT_LX) {
1763 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1764 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1765 } else {
1766 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_STEPREQUEST);
1767 }
1768
1769 /* Now that ICOUNT (LX) or DCR.StepRequest (NX) is set,
1770 * we can resume as if we were going to run
1771 */
1772 res = xtensa_prepare_resume(target, current, address, 0, 0);
1773 if (res != ERROR_OK) {
1774 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1775 return res;
1776 }
1777 res = xtensa_do_resume(target);
1778 if (res != ERROR_OK) {
1779 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1780 return res;
1781 }
1782
1783 /* Wait for stepping to complete */
1784 long long start = timeval_ms();
1785 while (timeval_ms() < start + 500) {
1786 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1787 *until stepping is complete. */
1788 usleep(1000);
1789 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1790 if (res != ERROR_OK) {
1791 LOG_TARGET_ERROR(target, "Failed to read core status!");
1792 return res;
1793 }
1794 if (xtensa_is_stopped(target))
1795 break;
1796 usleep(1000);
1797 }
1798 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1799 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1800 if (!xtensa_is_stopped(target)) {
1801 LOG_TARGET_WARNING(
1802 target,
1803 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1804 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1805 target->debug_reason = DBG_REASON_NOTHALTED;
1806 target->state = TARGET_RUNNING;
1807 return ERROR_FAIL;
1808 }
1809
1810 xtensa_fetch_all_regs(target);
1811 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1812
1813 LOG_TARGET_DEBUG(target,
1814 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1815 xtensa_reg_get(target, XT_REG_IDX_PS),
1816 cur_pc,
1817 xtensa_cause_get(target),
1818 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1819
1820 /* Do not step into WindowOverflow if ISRs are masked.
1821 If we stop in WindowOverflow at breakpoint with masked ISRs and
1822 try to do a step it will get us out of that handler */
1823 if (xtensa->core_config->windowed &&
1824 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1825 xtensa_pc_in_winexc(target, cur_pc)) {
1826 /* isrmask = on, need to step out of the window exception handler */
1827 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1828 oldpc = cur_pc;
1829 address = oldpc + 3;
1830 continue;
1831 }
1832
1833 if (oldpc == cur_pc)
1834 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1835 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1836 else
1837 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1838 break;
1839 } while (true);
1840
1841 target->debug_reason = DBG_REASON_SINGLESTEP;
1842 target->state = TARGET_HALTED;
1843 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1844
1845 if (cause & DEBUGCAUSE_DB) {
1846 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1847 /* Restore the DBREAKCx registers */
1848 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1849 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1850 }
1851
1852 /* Restore int level */
1853 if (ps_lowered) {
1854 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1855 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1856 oldps);
1857 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1858 }
1859
1860 /* write ICOUNTLEVEL back to zero */
1861 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1862 /* TODO: can we skip writing dirty registers and re-fetching them? */
1863 res = xtensa_write_dirty_registers(target);
1864 xtensa_fetch_all_regs(target);
1865 return res;
1866 }
1867
1868 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1869 {
1870 int retval = xtensa_do_step(target, current, address, handle_breakpoints);
1871 if (retval != ERROR_OK)
1872 return retval;
1873 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1874
1875 return ERROR_OK;
1876 }
1877
1878 /**
1879 * Returns true if two ranges are overlapping
1880 */
1881 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1882 target_addr_t r1_end,
1883 target_addr_t r2_start,
1884 target_addr_t r2_end)
1885 {
1886 if ((r2_start >= r1_start) && (r2_start < r1_end))
1887 return true; /* r2_start is in r1 region */
1888 if ((r2_end > r1_start) && (r2_end <= r1_end))
1889 return true; /* r2_end is in r1 region */
1890 return false;
1891 }
1892
1893 /**
1894 * Returns a size of overlapped region of two ranges.
1895 */
1896 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1897 target_addr_t r1_end,
1898 target_addr_t r2_start,
1899 target_addr_t r2_end)
1900 {
1901 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1902 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1903 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1904 return ov_end - ov_start;
1905 }
1906 return 0;
1907 }
1908
1909 /**
1910 * Check if the address gets to memory regions, and its access mode
1911 */
1912 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1913 {
1914 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1915 target_addr_t adr_end = address + size; /* region end */
1916 target_addr_t overlap_size;
1917 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1918
1919 while (adr_pos < adr_end) {
1920 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1921 if (!cm) /* address is not belong to anything */
1922 return false;
1923 if ((cm->access & access) != access) /* access check */
1924 return false;
1925 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1926 assert(overlap_size != 0);
1927 adr_pos += overlap_size;
1928 }
1929 return true;
1930 }
1931
1932 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1933 {
1934 struct xtensa *xtensa = target_to_xtensa(target);
1935 /* We are going to read memory in 32-bit increments. This may not be what the calling
1936 * function expects, so we may need to allocate a temp buffer and read into that first. */
1937 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1938 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1939 target_addr_t adr = addrstart_al;
1940 uint8_t *albuff;
1941 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1942
1943 if (target->state != TARGET_HALTED) {
1944 LOG_TARGET_ERROR(target, "not halted");
1945 return ERROR_TARGET_NOT_HALTED;
1946 }
1947
1948 if (!xtensa->permissive_mode) {
1949 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1950 XT_MEM_ACCESS_READ)) {
1951 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1952 return ERROR_FAIL;
1953 }
1954 }
1955
1956 unsigned int alloc_bytes = ALIGN_UP(addrend_al - addrstart_al, sizeof(uint32_t));
1957 albuff = calloc(alloc_bytes, 1);
1958 if (!albuff) {
1959 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
1960 addrend_al - addrstart_al);
1961 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1962 }
1963
1964 /* We're going to use A3 here */
1965 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1966 /* Write start address to A3 */
1967 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1968 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1969 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1970 if (xtensa->probe_lsddr32p != 0) {
1971 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1972 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1973 xtensa_queue_dbg_reg_read(xtensa,
1974 (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
1975 &albuff[i]);
1976 } else {
1977 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1978 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1979 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1980 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
1981 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[i]);
1982 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1983 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1984 }
1985 }
1986 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1987 if (res == ERROR_OK) {
1988 bool prev_suppress = xtensa->suppress_dsr_errors;
1989 xtensa->suppress_dsr_errors = true;
1990 res = xtensa_core_status_check(target);
1991 if (xtensa->probe_lsddr32p == -1)
1992 xtensa->probe_lsddr32p = 1;
1993 xtensa->suppress_dsr_errors = prev_suppress;
1994 }
1995 if (res != ERROR_OK) {
1996 if (xtensa->probe_lsddr32p != 0) {
1997 /* Disable fast memory access instructions and retry before reporting an error */
1998 LOG_TARGET_DEBUG(target, "Disabling LDDR32.P/SDDR32.P");
1999 xtensa->probe_lsddr32p = 0;
2000 res = xtensa_read_memory(target, address, size, count, albuff);
2001 bswap = false;
2002 } else {
2003 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
2004 count * size, address);
2005 }
2006 }
2007
2008 if (bswap)
2009 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
2010 memcpy(buffer, albuff + (address & 3), (size * count));
2011 free(albuff);
2012 return res;
2013 }
2014
2015 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2016 {
2017 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
2018 return xtensa_read_memory(target, address, 1, count, buffer);
2019 }
2020
2021 int xtensa_write_memory(struct target *target,
2022 target_addr_t address,
2023 uint32_t size,
2024 uint32_t count,
2025 const uint8_t *buffer)
2026 {
2027 /* This memory write function can get thrown nigh everything into it, from
2028 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
2029 * accept anything but aligned uint32 writes, though. That is why we convert
2030 * everything into that. */
2031 struct xtensa *xtensa = target_to_xtensa(target);
2032 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
2033 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
2034 target_addr_t adr = addrstart_al;
2035 int res;
2036 uint8_t *albuff;
2037 bool fill_head_tail = false;
2038
2039 if (target->state != TARGET_HALTED) {
2040 LOG_TARGET_ERROR(target, "not halted");
2041 return ERROR_TARGET_NOT_HALTED;
2042 }
2043
2044 if (!xtensa->permissive_mode) {
2045 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
2046 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
2047 return ERROR_FAIL;
2048 }
2049 }
2050
2051 if (size == 0 || count == 0 || !buffer)
2052 return ERROR_COMMAND_SYNTAX_ERROR;
2053
2054 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
2055 if (addrstart_al == address && addrend_al == address + (size * count)) {
2056 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
2057 /* Need a buffer for byte-swapping */
2058 albuff = malloc(addrend_al - addrstart_al);
2059 else
2060 /* We discard the const here because albuff can also be non-const */
2061 albuff = (uint8_t *)buffer;
2062 } else {
2063 fill_head_tail = true;
2064 albuff = malloc(addrend_al - addrstart_al);
2065 }
2066 if (!albuff) {
2067 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
2068 addrend_al - addrstart_al);
2069 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2070 }
2071
2072 /* We're going to use A3 here */
2073 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2074
2075 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
2076 if (fill_head_tail) {
2077 /* See if we need to read the first and/or last word. */
2078 if (address & 3) {
2079 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
2080 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2081 if (xtensa->probe_lsddr32p == 1) {
2082 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
2083 } else {
2084 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
2085 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
2086 }
2087 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[0]);
2088 }
2089 if ((address + (size * count)) & 3) {
2090 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
2091 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2092 if (xtensa->probe_lsddr32p == 1) {
2093 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
2094 } else {
2095 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
2096 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
2097 }
2098 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
2099 &albuff[addrend_al - addrstart_al - 4]);
2100 }
2101 /* Grab bytes */
2102 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2103 if (res != ERROR_OK) {
2104 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
2105 if (albuff != buffer)
2106 free(albuff);
2107 return res;
2108 }
2109 xtensa_core_status_check(target);
2110 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
2111 bool swapped_w0 = false;
2112 if (address & 3) {
2113 buf_bswap32(&albuff[0], &albuff[0], 4);
2114 swapped_w0 = true;
2115 }
2116 if ((address + (size * count)) & 3) {
2117 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
2118 /* Don't double-swap if buffer start/end are within the same word */
2119 } else {
2120 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
2121 &albuff[addrend_al - addrstart_al - 4], 4);
2122 }
2123 }
2124 }
2125 /* Copy data to be written into the aligned buffer (in host-endianness) */
2126 memcpy(&albuff[address & 3], buffer, size * count);
2127 /* Now we can write albuff in aligned uint32s. */
2128 }
2129
2130 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
2131 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
2132
2133 /* Write start address to A3 */
2134 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
2135 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2136 /* Write the aligned buffer */
2137 if (xtensa->probe_lsddr32p != 0) {
2138 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2139 if (i == 0) {
2140 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
2141 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
2142 } else {
2143 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
2144 }
2145 }
2146 } else {
2147 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
2148 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2149 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
2150 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2151 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
2152 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
2153 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2154 }
2155 }
2156
2157 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2158 if (res == ERROR_OK) {
2159 bool prev_suppress = xtensa->suppress_dsr_errors;
2160 xtensa->suppress_dsr_errors = true;
2161 res = xtensa_core_status_check(target);
2162 if (xtensa->probe_lsddr32p == -1)
2163 xtensa->probe_lsddr32p = 1;
2164 xtensa->suppress_dsr_errors = prev_suppress;
2165 }
2166 if (res != ERROR_OK) {
2167 if (xtensa->probe_lsddr32p != 0) {
2168 /* Disable fast memory access instructions and retry before reporting an error */
2169 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
2170 xtensa->probe_lsddr32p = 0;
2171 res = xtensa_write_memory(target, address, size, count, buffer);
2172 } else {
2173 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
2174 count * size, address);
2175 }
2176 } else {
2177 /* Invalidate ICACHE, writeback DCACHE if present */
2178 uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
2179 uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
2180 if (issue_ihi || issue_dhwb) {
2181 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2182 uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2183 uint32_t linesize = MIN(ilinesize, dlinesize);
2184 uint32_t off = 0;
2185 adr = addrstart_al;
2186
2187 while ((adr + off) < addrend_al) {
2188 if (off == 0) {
2189 /* Write start address to A3 */
2190 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr);
2191 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2192 }
2193 if (issue_ihi)
2194 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
2195 if (issue_dhwb)
2196 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
2197 off += linesize;
2198 if (off > 1020) {
2199 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
2200 adr += off;
2201 off = 0;
2202 }
2203 }
2204
2205 /* Execute cache WB/INV instructions */
2206 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2207 xtensa_core_status_check(target);
2208 if (res != ERROR_OK)
2209 LOG_TARGET_ERROR(target,
2210 "Error issuing cache writeback/invaldate instruction(s): %d",
2211 res);
2212 }
2213 }
2214 if (albuff != buffer)
2215 free(albuff);
2216
2217 return res;
2218 }
2219
2220 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2221 {
2222 /* xtensa_write_memory can handle everything. Just pass on to that. */
2223 return xtensa_write_memory(target, address, 1, count, buffer);
2224 }
2225
2226 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2227 {
2228 LOG_WARNING("not implemented yet");
2229 return ERROR_FAIL;
2230 }
2231
2232 int xtensa_poll(struct target *target)
2233 {
2234 struct xtensa *xtensa = target_to_xtensa(target);
2235 if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2236 target->state = TARGET_UNKNOWN;
2237 return ERROR_TARGET_NOT_EXAMINED;
2238 }
2239
2240 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET(xtensa) |
2241 PWRSTAT_COREWASRESET(xtensa));
2242 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2243 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2244 xtensa->dbg_mod.power_status.stat,
2245 PWRSTAT_DEBUGWASRESET(xtensa) | PWRSTAT_COREWASRESET(xtensa),
2246 xtensa->dbg_mod.power_status.stath);
2247 if (res != ERROR_OK)
2248 return res;
2249
2250 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2251 LOG_TARGET_INFO(target, "Debug controller was reset.");
2252 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2253 if (res != ERROR_OK)
2254 return res;
2255 }
2256 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2257 LOG_TARGET_INFO(target, "Core was reset.");
2258 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2259 /* Enable JTAG, set reset if needed */
2260 res = xtensa_wakeup(target);
2261 if (res != ERROR_OK)
2262 return res;
2263
2264 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2265 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2266 if (res != ERROR_OK)
2267 return res;
2268 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2269 LOG_TARGET_DEBUG(target,
2270 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2271 prev_dsr,
2272 xtensa->dbg_mod.core_status.dsr);
2273 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET(xtensa)) {
2274 /* if RESET state is persitent */
2275 target->state = TARGET_RESET;
2276 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2277 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2278 xtensa->dbg_mod.core_status.dsr,
2279 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2280 target->state = TARGET_UNKNOWN;
2281 if (xtensa->come_online_probes_num == 0)
2282 target->examined = false;
2283 else
2284 xtensa->come_online_probes_num--;
2285 } else if (xtensa_is_stopped(target)) {
2286 if (target->state != TARGET_HALTED) {
2287 enum target_state oldstate = target->state;
2288 target->state = TARGET_HALTED;
2289 /* Examine why the target has been halted */
2290 target->debug_reason = DBG_REASON_DBGRQ;
2291 xtensa_fetch_all_regs(target);
2292 /* When setting debug reason DEBUGCAUSE events have the following
2293 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2294 /* Watchpoint and breakpoint events at the same time results in special
2295 * debug reason: DBG_REASON_WPTANDBKPT. */
2296 uint32_t halt_cause = xtensa_cause_get(target);
2297 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2298 if (halt_cause & DEBUGCAUSE_IC)
2299 target->debug_reason = DBG_REASON_SINGLESTEP;
2300 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2301 if (halt_cause & DEBUGCAUSE_DB)
2302 target->debug_reason = DBG_REASON_WPTANDBKPT;
2303 else
2304 target->debug_reason = DBG_REASON_BREAKPOINT;
2305 } else if (halt_cause & DEBUGCAUSE_DB) {
2306 target->debug_reason = DBG_REASON_WATCHPOINT;
2307 }
2308 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2309 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2310 xtensa_reg_get(target, XT_REG_IDX_PC),
2311 target->debug_reason,
2312 oldstate);
2313 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2314 halt_cause,
2315 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2316 xtensa->dbg_mod.core_status.dsr);
2317 xtensa_dm_core_status_clear(
2318 &xtensa->dbg_mod,
2319 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2320 OCDDSR_DEBUGINTTRAX |
2321 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2322 if (xtensa->core_config->core_type == XT_NX) {
2323 /* Enable imprecise exceptions while in halted state */
2324 xtensa_reg_val_t ps = xtensa_reg_get(target, XT_REG_IDX_PS);
2325 xtensa_reg_val_t newps = ps & ~(XT_PS_DIEXC_MSK);
2326 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_PS);
2327 LOG_TARGET_DEBUG(target, "Enabling PS.DIEXC: 0x%08x -> 0x%08x", ps, newps);
2328 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, newps);
2329 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2330 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
2331 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2332 if (res != ERROR_OK) {
2333 LOG_TARGET_ERROR(target, "Failed to write PS.DIEXC (%d)!", res);
2334 return res;
2335 }
2336 xtensa_core_status_check(target);
2337 }
2338 }
2339 } else {
2340 target->debug_reason = DBG_REASON_NOTHALTED;
2341 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2342 target->state = TARGET_RUNNING;
2343 target->debug_reason = DBG_REASON_NOTHALTED;
2344 }
2345 }
2346 if (xtensa->trace_active) {
2347 /* Detect if tracing was active but has stopped. */
2348 struct xtensa_trace_status trace_status;
2349 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2350 if (res == ERROR_OK) {
2351 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2352 LOG_INFO("Detected end of trace.");
2353 if (trace_status.stat & TRAXSTAT_PCMTG)
2354 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2355 if (trace_status.stat & TRAXSTAT_PTITG)
2356 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2357 if (trace_status.stat & TRAXSTAT_CTITG)
2358 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2359 xtensa->trace_active = false;
2360 }
2361 }
2362 }
2363 return ERROR_OK;
2364 }
2365
2366 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2367 {
2368 struct xtensa *xtensa = target_to_xtensa(target);
2369 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2370 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2371 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2372 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2373 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2374 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2375 int ret;
2376
2377 if (size > icache_line_size)
2378 return ERROR_FAIL;
2379
2380 if (issue_ihi || issue_dhwbi) {
2381 /* We're going to use A3 here */
2382 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2383
2384 /* Write start address to A3 and invalidate */
2385 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2386 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2387 LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2388 if (issue_dhwbi) {
2389 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2390 if (!same_dc_line) {
2391 LOG_TARGET_DEBUG(target,
2392 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2393 address + 4);
2394 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2395 }
2396 }
2397 if (issue_ihi) {
2398 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2399 if (!same_ic_line) {
2400 LOG_TARGET_DEBUG(target,
2401 "IHI second icache line for address "TARGET_ADDR_FMT,
2402 address + 4);
2403 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2404 }
2405 }
2406
2407 /* Execute invalidate instructions */
2408 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2409 xtensa_core_status_check(target);
2410 if (ret != ERROR_OK) {
2411 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2412 return ret;
2413 }
2414 }
2415
2416 /* Write new instructions to memory */
2417 ret = target_write_buffer(target, address, size, buffer);
2418 if (ret != ERROR_OK) {
2419 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2420 return ret;
2421 }
2422
2423 if (issue_dhwbi) {
2424 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2425 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2426 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2427 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2428 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2429 if (!same_dc_line) {
2430 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2431 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2432 }
2433
2434 /* Execute invalidate instructions */
2435 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2436 xtensa_core_status_check(target);
2437 }
2438
2439 /* TODO: Handle L2 cache if present */
2440 return ret;
2441 }
2442
2443 static int xtensa_sw_breakpoint_add(struct target *target,
2444 struct breakpoint *breakpoint,
2445 struct xtensa_sw_breakpoint *sw_bp)
2446 {
2447 struct xtensa *xtensa = target_to_xtensa(target);
2448 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2449 if (ret != ERROR_OK) {
2450 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2451 return ret;
2452 }
2453
2454 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2455 sw_bp->oocd_bp = breakpoint;
2456
2457 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2458
2459 /* Underlying memory write will convert instruction endianness, don't do that here */
2460 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2461 if (ret != ERROR_OK) {
2462 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2463 return ret;
2464 }
2465
2466 return ERROR_OK;
2467 }
2468
2469 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2470 {
2471 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2472 if (ret != ERROR_OK) {
2473 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2474 return ret;
2475 }
2476 sw_bp->oocd_bp = NULL;
2477 return ERROR_OK;
2478 }
2479
2480 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2481 {
2482 struct xtensa *xtensa = target_to_xtensa(target);
2483 unsigned int slot;
2484
2485 if (breakpoint->type == BKPT_SOFT) {
2486 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2487 if (!xtensa->sw_brps[slot].oocd_bp ||
2488 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2489 break;
2490 }
2491 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2492 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2493 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2494 }
2495 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2496 if (ret != ERROR_OK) {
2497 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2498 return ret;
2499 }
2500 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2501 slot,
2502 breakpoint->address);
2503 return ERROR_OK;
2504 }
2505
2506 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2507 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2508 break;
2509 }
2510 if (slot == xtensa->core_config->debug.ibreaks_num) {
2511 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2512 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2513 }
2514
2515 xtensa->hw_brps[slot] = breakpoint;
2516 /* We will actually write the breakpoints when we resume the target. */
2517 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2518 slot,
2519 breakpoint->address);
2520
2521 return ERROR_OK;
2522 }
2523
2524 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2525 {
2526 struct xtensa *xtensa = target_to_xtensa(target);
2527 unsigned int slot;
2528
2529 if (breakpoint->type == BKPT_SOFT) {
2530 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2531 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2532 break;
2533 }
2534 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2535 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2536 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2537 }
2538 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2539 if (ret != ERROR_OK) {
2540 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2541 return ret;
2542 }
2543 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2544 return ERROR_OK;
2545 }
2546
2547 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2548 if (xtensa->hw_brps[slot] == breakpoint)
2549 break;
2550 }
2551 if (slot == xtensa->core_config->debug.ibreaks_num) {
2552 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2553 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2554 }
2555 xtensa->hw_brps[slot] = NULL;
2556 if (xtensa->core_config->core_type == XT_NX)
2557 xtensa_reg_set(target, xtensa->nx_reg_idx[XT_NX_REG_IDX_IBREAKC0] + slot, 0);
2558 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2559 return ERROR_OK;
2560 }
2561
2562 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2563 {
2564 struct xtensa *xtensa = target_to_xtensa(target);
2565 unsigned int slot;
2566 xtensa_reg_val_t dbreakcval;
2567
2568 if (target->state != TARGET_HALTED) {
2569 LOG_TARGET_ERROR(target, "not halted");
2570 return ERROR_TARGET_NOT_HALTED;
2571 }
2572
2573 if (watchpoint->mask != WATCHPOINT_IGNORE_DATA_VALUE_MASK) {
2574 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2575 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2576 }
2577
2578 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2579 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2580 break;
2581 }
2582 if (slot == xtensa->core_config->debug.dbreaks_num) {
2583 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2584 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2585 }
2586
2587 /* Figure out value for dbreakc5..0
2588 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2589 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2590 !IS_PWR_OF_2(watchpoint->length) ||
2591 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2592 LOG_TARGET_WARNING(
2593 target,
2594 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2595 " not supported by hardware.",
2596 watchpoint->length,
2597 watchpoint->address);
2598 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2599 }
2600 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2601
2602 if (watchpoint->rw == WPT_READ)
2603 dbreakcval |= BIT(30);
2604 if (watchpoint->rw == WPT_WRITE)
2605 dbreakcval |= BIT(31);
2606 if (watchpoint->rw == WPT_ACCESS)
2607 dbreakcval |= BIT(30) | BIT(31);
2608
2609 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2610 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2611 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2612 xtensa->hw_wps[slot] = watchpoint;
2613 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2614 watchpoint->address);
2615 return ERROR_OK;
2616 }
2617
2618 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2619 {
2620 struct xtensa *xtensa = target_to_xtensa(target);
2621 unsigned int slot;
2622
2623 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2624 if (xtensa->hw_wps[slot] == watchpoint)
2625 break;
2626 }
2627 if (slot == xtensa->core_config->debug.dbreaks_num) {
2628 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2629 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2630 }
2631 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2632 xtensa->hw_wps[slot] = NULL;
2633 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2634 watchpoint->address);
2635 return ERROR_OK;
2636 }
2637
2638 static int xtensa_build_reg_cache(struct target *target)
2639 {
2640 struct xtensa *xtensa = target_to_xtensa(target);
2641 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2642 unsigned int last_dbreg_num = 0;
2643
2644 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2645 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2646 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2647
2648 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2649
2650 if (!reg_cache) {
2651 LOG_ERROR("Failed to alloc reg cache!");
2652 return ERROR_FAIL;
2653 }
2654 reg_cache->name = "Xtensa registers";
2655 reg_cache->next = NULL;
2656 /* Init reglist */
2657 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2658 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2659 if (!reg_list) {
2660 LOG_ERROR("Failed to alloc reg list!");
2661 goto fail;
2662 }
2663 xtensa->dbregs_num = 0;
2664 unsigned int didx = 0;
2665 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2666 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2667 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2668 for (unsigned int i = 0; i < listsize; i++, didx++) {
2669 reg_list[didx].exist = rlist[i].exist;
2670 reg_list[didx].name = rlist[i].name;
2671 reg_list[didx].size = 32;
2672 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2673 if (!reg_list[didx].value) {
2674 LOG_ERROR("Failed to alloc reg list value!");
2675 goto fail;
2676 }
2677 reg_list[didx].dirty = false;
2678 reg_list[didx].valid = false;
2679 reg_list[didx].type = &xtensa_reg_type;
2680 reg_list[didx].arch_info = xtensa;
2681 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2682 last_dbreg_num = rlist[i].dbreg_num;
2683
2684 if (xtensa_extra_debug_log) {
2685 LOG_TARGET_DEBUG(target,
2686 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2687 reg_list[didx].name,
2688 whichlist,
2689 reg_list[didx].exist,
2690 didx,
2691 rlist[i].type,
2692 rlist[i].dbreg_num);
2693 }
2694 }
2695 }
2696
2697 xtensa->dbregs_num = last_dbreg_num + 1;
2698 reg_cache->reg_list = reg_list;
2699 reg_cache->num_regs = reg_list_size;
2700
2701 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2702 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2703
2704 /* Construct empty-register list for handling unknown register requests */
2705 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2706 if (!xtensa->empty_regs) {
2707 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2708 goto fail;
2709 }
2710 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2711 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2712 if (!xtensa->empty_regs[i].name) {
2713 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2714 goto fail;
2715 }
2716 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
2717 xtensa->empty_regs[i].size = 32;
2718 xtensa->empty_regs[i].type = &xtensa_reg_type;
2719 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2720 if (!xtensa->empty_regs[i].value) {
2721 LOG_ERROR("Failed to alloc empty reg list value!");
2722 goto fail;
2723 }
2724 xtensa->empty_regs[i].arch_info = xtensa;
2725 }
2726
2727 /* Construct contiguous register list from contiguous descriptor list */
2728 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
2729 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2730 if (!xtensa->contiguous_regs_list) {
2731 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2732 goto fail;
2733 }
2734 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2735 unsigned int j;
2736 for (j = 0; j < reg_cache->num_regs; j++) {
2737 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2738 /* Register number field is not filled above.
2739 Here we are assigning the corresponding index from the contiguous reg list.
2740 These indexes are in the same order with gdb g-packet request/response.
2741 Some more changes may be required for sparse reg lists.
2742 */
2743 reg_cache->reg_list[j].number = i;
2744 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
2745 LOG_TARGET_DEBUG(target,
2746 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2747 xtensa->contiguous_regs_list[i]->name,
2748 xtensa->contiguous_regs_desc[i]->dbreg_num);
2749 break;
2750 }
2751 }
2752 if (j == reg_cache->num_regs)
2753 LOG_TARGET_WARNING(target, "contiguous register %s not found",
2754 xtensa->contiguous_regs_desc[i]->name);
2755 }
2756 }
2757
2758 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2759 if (!xtensa->algo_context_backup) {
2760 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2761 goto fail;
2762 }
2763 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2764 struct reg *reg = &reg_cache->reg_list[i];
2765 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2766 if (!xtensa->algo_context_backup[i]) {
2767 LOG_ERROR("Failed to alloc mem for algorithm context!");
2768 goto fail;
2769 }
2770 }
2771 xtensa->core_cache = reg_cache;
2772 if (cache_p)
2773 *cache_p = reg_cache;
2774 return ERROR_OK;
2775
2776 fail:
2777 if (reg_list) {
2778 for (unsigned int i = 0; i < reg_list_size; i++)
2779 free(reg_list[i].value);
2780 free(reg_list);
2781 }
2782 if (xtensa->empty_regs) {
2783 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2784 free((void *)xtensa->empty_regs[i].name);
2785 free(xtensa->empty_regs[i].value);
2786 }
2787 free(xtensa->empty_regs);
2788 }
2789 if (xtensa->algo_context_backup) {
2790 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2791 free(xtensa->algo_context_backup[i]);
2792 free(xtensa->algo_context_backup);
2793 }
2794 free(reg_cache);
2795
2796 return ERROR_FAIL;
2797 }
2798
2799 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
2800 {
2801 struct xtensa *xtensa = target_to_xtensa(target);
2802 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
2803 /* Process op[] list */
2804 while (opstr && (*opstr == ':')) {
2805 uint8_t ops[32];
2806 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
2807 if (oplen > 32) {
2808 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
2809 break;
2810 }
2811 unsigned int i = 0;
2812 while ((i < oplen) && opstr && (*opstr == ':'))
2813 ops[i++] = strtoul(opstr + 1, &opstr, 16);
2814 if (i != oplen) {
2815 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
2816 break;
2817 }
2818
2819 char insn_buf[128];
2820 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
2821 for (i = 0; i < oplen; i++)
2822 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
2823 LOG_TARGET_DEBUG(target, "%s", insn_buf);
2824 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
2825 status = ERROR_OK;
2826 }
2827 return status;
2828 }
2829
2830 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
2831 {
2832 struct xtensa *xtensa = target_to_xtensa(target);
2833 bool iswrite = (packet[0] == 'Q');
2834 enum xtensa_qerr_e error;
2835
2836 /* Read/write TIE register. Requires spill location.
2837 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2838 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2839 */
2840 if (!(xtensa->spill_buf)) {
2841 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2842 error = XT_QERR_FAIL;
2843 goto xtensa_gdbqc_qxtreg_fail;
2844 }
2845
2846 char *delim;
2847 uint32_t regnum = strtoul(packet + 6, &delim, 16);
2848 if (*delim != ':') {
2849 LOG_ERROR("Malformed qxtreg packet");
2850 error = XT_QERR_INVAL;
2851 goto xtensa_gdbqc_qxtreg_fail;
2852 }
2853 uint32_t reglen = strtoul(delim + 1, &delim, 16);
2854 if (*delim != ':') {
2855 LOG_ERROR("Malformed qxtreg packet");
2856 error = XT_QERR_INVAL;
2857 goto xtensa_gdbqc_qxtreg_fail;
2858 }
2859 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
2860 memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
2861 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
2862 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
2863 LOG_ERROR("TIE register too large");
2864 error = XT_QERR_MEM;
2865 goto xtensa_gdbqc_qxtreg_fail;
2866 }
2867
2868 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2869 * (2) read old a4, (3) write spill address to a4.
2870 * NOTE: ensure a4 is restored properly by all error handling logic
2871 */
2872 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
2873 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
2874 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2875 if (status != ERROR_OK) {
2876 LOG_ERROR("Spill memory save");
2877 error = XT_QERR_MEM;
2878 goto xtensa_gdbqc_qxtreg_fail;
2879 }
2880 if (iswrite) {
2881 /* Extract value and store in spill memory */
2882 unsigned int b = 0;
2883 char *valbuf = strchr(delim, '=');
2884 if (!(valbuf && (*valbuf == '='))) {
2885 LOG_ERROR("Malformed Qxtreg packet");
2886 error = XT_QERR_INVAL;
2887 goto xtensa_gdbqc_qxtreg_fail;
2888 }
2889 valbuf++;
2890 while (*valbuf && *(valbuf + 1)) {
2891 char bytestr[3] = { 0, 0, 0 };
2892 strncpy(bytestr, valbuf, 2);
2893 regbuf[b++] = strtoul(bytestr, NULL, 16);
2894 valbuf += 2;
2895 }
2896 if (b != reglen) {
2897 LOG_ERROR("Malformed Qxtreg packet");
2898 error = XT_QERR_INVAL;
2899 goto xtensa_gdbqc_qxtreg_fail;
2900 }
2901 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2902 reglen / memop_size, regbuf);
2903 if (status != ERROR_OK) {
2904 LOG_ERROR("TIE value store");
2905 error = XT_QERR_MEM;
2906 goto xtensa_gdbqc_qxtreg_fail;
2907 }
2908 }
2909 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
2910 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, xtensa->spill_loc);
2911 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2912
2913 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
2914
2915 /* Restore a4 but not yet spill memory. Execute it all... */
2916 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, orig_a4);
2917 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2918 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2919 if (status != ERROR_OK) {
2920 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
2921 tieop_status = status;
2922 }
2923 status = xtensa_core_status_check(target);
2924 if (status != ERROR_OK) {
2925 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
2926 tieop_status = status;
2927 }
2928
2929 if (tieop_status == ERROR_OK) {
2930 if (iswrite) {
2931 /* TIE write succeeded; send OK */
2932 strcpy(*response_p, "OK");
2933 } else {
2934 /* TIE read succeeded; copy result from spill memory */
2935 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
2936 if (status != ERROR_OK) {
2937 LOG_TARGET_ERROR(target, "TIE result read");
2938 tieop_status = status;
2939 }
2940 unsigned int i;
2941 for (i = 0; i < reglen; i++)
2942 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
2943 *(*response_p + 2 * i) = '\0';
2944 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
2945 }
2946 }
2947
2948 /* Restore spill memory first, then report any previous errors */
2949 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2950 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2951 if (status != ERROR_OK) {
2952 LOG_ERROR("Spill memory restore");
2953 error = XT_QERR_MEM;
2954 goto xtensa_gdbqc_qxtreg_fail;
2955 }
2956 if (tieop_status != ERROR_OK) {
2957 LOG_ERROR("TIE execution");
2958 error = XT_QERR_FAIL;
2959 goto xtensa_gdbqc_qxtreg_fail;
2960 }
2961 return ERROR_OK;
2962
2963 xtensa_gdbqc_qxtreg_fail:
2964 strcpy(*response_p, xt_qerr[error].chrval);
2965 return xt_qerr[error].intval;
2966 }
2967
2968 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
2969 {
2970 struct xtensa *xtensa = target_to_xtensa(target);
2971 enum xtensa_qerr_e error;
2972 if (!packet || !response_p) {
2973 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
2974 return ERROR_FAIL;
2975 }
2976
2977 *response_p = xtensa->qpkt_resp;
2978 if (strncmp(packet, "qxtn", 4) == 0) {
2979 strcpy(*response_p, "OpenOCD");
2980 return ERROR_OK;
2981 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
2982 return ERROR_OK;
2983 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
2984 /* Confirm host cache params match core .cfg file */
2985 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
2986 &xtensa->core_config->icache : &xtensa->core_config->dcache;
2987 unsigned int line_size = 0, size = 0, way_count = 0;
2988 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
2989 if ((cachep->line_size != line_size) ||
2990 (cachep->size != size) ||
2991 (cachep->way_count != way_count)) {
2992 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2993 cachep == &xtensa->core_config->icache ? 'I' : 'D');
2994 }
2995 strcpy(*response_p, "OK");
2996 return ERROR_OK;
2997 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
2998 /* Confirm host IRAM/IROM params match core .cfg file */
2999 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
3000 &xtensa->core_config->iram : &xtensa->core_config->irom;
3001 unsigned int base = 0, size = 0, i;
3002 char *pkt = (char *)&packet[7];
3003 do {
3004 pkt++;
3005 size = strtoul(pkt, &pkt, 16);
3006 pkt++;
3007 base = strtoul(pkt, &pkt, 16);
3008 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
3009 for (i = 0; i < memp->count; i++) {
3010 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
3011 break;
3012 }
3013 if (i == memp->count) {
3014 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
3015 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
3016 break;
3017 }
3018 for (i = 0; i < 11; i++) {
3019 pkt++;
3020 strtoul(pkt, &pkt, 16);
3021 }
3022 } while (pkt && (pkt[0] == ','));
3023 strcpy(*response_p, "OK");
3024 return ERROR_OK;
3025 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
3026 /* Confirm host EXCM_LEVEL matches core .cfg file */
3027 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
3028 if (!xtensa->core_config->high_irq.enabled ||
3029 (excm_level != xtensa->core_config->high_irq.excm_level))
3030 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
3031 strcpy(*response_p, "OK");
3032 return ERROR_OK;
3033 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
3034 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
3035 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
3036 strcpy(*response_p, "OK");
3037 return ERROR_OK;
3038 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
3039 char *delim;
3040 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
3041 if (*delim != ':') {
3042 LOG_ERROR("Malformed Qxtspill packet");
3043 error = XT_QERR_INVAL;
3044 goto xtensa_gdb_query_custom_fail;
3045 }
3046 xtensa->spill_loc = spill_loc;
3047 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
3048 if (xtensa->spill_buf)
3049 free(xtensa->spill_buf);
3050 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
3051 if (!xtensa->spill_buf) {
3052 LOG_ERROR("Spill buf alloc");
3053 error = XT_QERR_MEM;
3054 goto xtensa_gdb_query_custom_fail;
3055 }
3056 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
3057 strcpy(*response_p, "OK");
3058 return ERROR_OK;
3059 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
3060 return xtensa_gdbqc_qxtreg(target, packet, response_p);
3061 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
3062 (strncmp(packet, "qxtftie", 7) == 0) ||
3063 (strncmp(packet, "qxtstie", 7) == 0)) {
3064 /* Return empty string to indicate trace, TIE wire debug are unsupported */
3065 strcpy(*response_p, "");
3066 return ERROR_OK;
3067 }
3068
3069 /* Warn for all other queries, but do not return errors */
3070 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
3071 strcpy(*response_p, "");
3072 return ERROR_OK;
3073
3074 xtensa_gdb_query_custom_fail:
3075 strcpy(*response_p, xt_qerr[error].chrval);
3076 return xt_qerr[error].intval;
3077 }
3078
3079 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
3080 const struct xtensa_debug_module_config *dm_cfg)
3081 {
3082 target->arch_info = xtensa;
3083 xtensa->common_magic = XTENSA_COMMON_MAGIC;
3084 xtensa->target = target;
3085 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
3086
3087 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
3088 if (!xtensa->core_config) {
3089 LOG_ERROR("Xtensa configuration alloc failed\n");
3090 return ERROR_FAIL;
3091 }
3092
3093 /* Default cache settings are disabled with 1 way */
3094 xtensa->core_config->icache.way_count = 1;
3095 xtensa->core_config->dcache.way_count = 1;
3096
3097 /* chrval: AR3/AR4 register names will change with window mapping.
3098 * intval: tracks whether scratch register was set through gdb P packet.
3099 */
3100 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
3101 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
3102 if (!xtensa->scratch_ars[s].chrval) {
3103 for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
3104 free(xtensa->scratch_ars[f].chrval);
3105 free(xtensa->core_config);
3106 LOG_ERROR("Xtensa scratch AR alloc failed\n");
3107 return ERROR_FAIL;
3108 }
3109 xtensa->scratch_ars[s].intval = false;
3110 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
3111 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
3112 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
3113 }
3114
3115 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
3116 }
3117
3118 void xtensa_set_permissive_mode(struct target *target, bool state)
3119 {
3120 target_to_xtensa(target)->permissive_mode = state;
3121 }
3122
3123 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
3124 {
3125 struct xtensa *xtensa = target_to_xtensa(target);
3126
3127 xtensa->come_online_probes_num = 3;
3128 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
3129 if (!xtensa->hw_brps) {
3130 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
3131 return ERROR_FAIL;
3132 }
3133 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
3134 if (!xtensa->hw_wps) {
3135 free(xtensa->hw_brps);
3136 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
3137 return ERROR_FAIL;
3138 }
3139 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
3140 if (!xtensa->sw_brps) {
3141 free(xtensa->hw_brps);
3142 free(xtensa->hw_wps);
3143 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
3144 return ERROR_FAIL;
3145 }
3146
3147 xtensa->spill_loc = 0xffffffff;
3148 xtensa->spill_bytes = 0;
3149 xtensa->spill_buf = NULL;
3150 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
3151
3152 return xtensa_build_reg_cache(target);
3153 }
3154
3155 static void xtensa_free_reg_cache(struct target *target)
3156 {
3157 struct xtensa *xtensa = target_to_xtensa(target);
3158 struct reg_cache *cache = xtensa->core_cache;
3159
3160 if (cache) {
3161 register_unlink_cache(&target->reg_cache, cache);
3162 for (unsigned int i = 0; i < cache->num_regs; i++) {
3163 free(xtensa->algo_context_backup[i]);
3164 free(cache->reg_list[i].value);
3165 }
3166 free(xtensa->algo_context_backup);
3167 free(cache->reg_list);
3168 free(cache);
3169 }
3170 xtensa->core_cache = NULL;
3171 xtensa->algo_context_backup = NULL;
3172
3173 if (xtensa->empty_regs) {
3174 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3175 free((void *)xtensa->empty_regs[i].name);
3176 free(xtensa->empty_regs[i].value);
3177 }
3178 free(xtensa->empty_regs);
3179 }
3180 xtensa->empty_regs = NULL;
3181 if (xtensa->optregs) {
3182 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
3183 free((void *)xtensa->optregs[i].name);
3184 free(xtensa->optregs);
3185 }
3186 xtensa->optregs = NULL;
3187 }
3188
3189 void xtensa_target_deinit(struct target *target)
3190 {
3191 struct xtensa *xtensa = target_to_xtensa(target);
3192
3193 LOG_DEBUG("start");
3194
3195 if (target_was_examined(target)) {
3196 int ret = xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, OCDDCR_ENABLEOCD);
3197 if (ret != ERROR_OK) {
3198 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
3199 return;
3200 }
3201 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
3202 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3203 if (ret != ERROR_OK) {
3204 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
3205 return;
3206 }
3207 xtensa_dm_deinit(&xtensa->dbg_mod);
3208 }
3209 xtensa_free_reg_cache(target);
3210 free(xtensa->hw_brps);
3211 free(xtensa->hw_wps);
3212 free(xtensa->sw_brps);
3213 if (xtensa->spill_buf) {
3214 free(xtensa->spill_buf);
3215 xtensa->spill_buf = NULL;
3216 }
3217 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
3218 free(xtensa->scratch_ars[s].chrval);
3219 free(xtensa->core_config);
3220 }
3221
3222 const char *xtensa_get_gdb_arch(struct target *target)
3223 {
3224 return "xtensa";
3225 }
3226
3227 /* exe <ascii-encoded hexadecimal instruction bytes> */
3228 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
3229 {
3230 struct xtensa *xtensa = target_to_xtensa(target);
3231
3232 if (CMD_ARGC != 1)
3233 return ERROR_COMMAND_SYNTAX_ERROR;
3234
3235 /* Process ascii-encoded hex byte string */
3236 const char *parm = CMD_ARGV[0];
3237 unsigned int parm_len = strlen(parm);
3238 if ((parm_len >= 64) || (parm_len & 1)) {
3239 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3240 return ERROR_FAIL;
3241 }
3242
3243 uint8_t ops[32];
3244 memset(ops, 0, 32);
3245 unsigned int oplen = parm_len / 2;
3246 char encoded_byte[3] = { 0, 0, 0 };
3247 for (unsigned int i = 0; i < oplen; i++) {
3248 encoded_byte[0] = *parm++;
3249 encoded_byte[1] = *parm++;
3250 ops[i] = strtoul(encoded_byte, NULL, 16);
3251 }
3252
3253 /* GDB must handle state save/restore.
3254 * Flush reg cache in case spill location is in an AR
3255 * Update CPENABLE only for this execution; later restore cached copy
3256 * Keep a copy of exccause in case executed code triggers an exception
3257 */
3258 int status = xtensa_write_dirty_registers(target);
3259 if (status != ERROR_OK) {
3260 LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3261 return ERROR_FAIL;
3262 }
3263 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3264 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3265 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3266 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
3267 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3268 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3269 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3270 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
3271 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3272
3273 /* Queue instruction list and execute everything */
3274 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3275 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3276 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3277 if (status != ERROR_OK)
3278 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3279 status = xtensa_core_status_check(target);
3280 if (status != ERROR_OK)
3281 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3282
3283 /* Reread register cache and restore saved regs after instruction execution */
3284 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3285 LOG_TARGET_ERROR(target, "%s: Failed to fetch register cache (post-exec).", target_name(target));
3286 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3287 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3288 return status;
3289 }
3290
3291 COMMAND_HANDLER(xtensa_cmd_exe)
3292 {
3293 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3294 }
3295
3296 /* xtdef <name> */
3297 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3298 {
3299 if (CMD_ARGC != 1)
3300 return ERROR_COMMAND_SYNTAX_ERROR;
3301
3302 const char *core_name = CMD_ARGV[0];
3303 if (strcasecmp(core_name, "LX") == 0) {
3304 xtensa->core_config->core_type = XT_LX;
3305 } else if (strcasecmp(core_name, "NX") == 0) {
3306 xtensa->core_config->core_type = XT_NX;
3307 } else {
3308 LOG_ERROR("xtdef [LX|NX]\n");
3309 return ERROR_COMMAND_SYNTAX_ERROR;
3310 }
3311 return ERROR_OK;
3312 }
3313
3314 COMMAND_HANDLER(xtensa_cmd_xtdef)
3315 {
3316 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3317 target_to_xtensa(get_current_target(CMD_CTX)));
3318 }
3319
3320 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3321 {
3322 if ((val < min) || (val > max)) {
3323 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3324 return false;
3325 }
3326 return true;
3327 }
3328
3329 /* xtopt <name> <value> */
3330 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3331 {
3332 if (CMD_ARGC != 2)
3333 return ERROR_COMMAND_SYNTAX_ERROR;
3334
3335 const char *opt_name = CMD_ARGV[0];
3336 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3337 if (strcasecmp(opt_name, "arnum") == 0) {
3338 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3339 return ERROR_COMMAND_ARGUMENT_INVALID;
3340 xtensa->core_config->aregs_num = opt_val;
3341 } else if (strcasecmp(opt_name, "windowed") == 0) {
3342 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3343 return ERROR_COMMAND_ARGUMENT_INVALID;
3344 xtensa->core_config->windowed = opt_val;
3345 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3346 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3347 return ERROR_COMMAND_ARGUMENT_INVALID;
3348 xtensa->core_config->coproc = opt_val;
3349 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3350 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3351 return ERROR_COMMAND_ARGUMENT_INVALID;
3352 xtensa->core_config->exceptions = opt_val;
3353 } else if (strcasecmp(opt_name, "intnum") == 0) {
3354 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3355 return ERROR_COMMAND_ARGUMENT_INVALID;
3356 xtensa->core_config->irq.enabled = (opt_val > 0);
3357 xtensa->core_config->irq.irq_num = opt_val;
3358 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3359 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3360 return ERROR_COMMAND_ARGUMENT_INVALID;
3361 xtensa->core_config->high_irq.enabled = opt_val;
3362 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3363 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3364 return ERROR_COMMAND_ARGUMENT_INVALID;
3365 if (!xtensa->core_config->high_irq.enabled) {
3366 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3367 return ERROR_COMMAND_ARGUMENT_INVALID;
3368 }
3369 xtensa->core_config->high_irq.excm_level = opt_val;
3370 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3371 if (xtensa->core_config->core_type == XT_LX) {
3372 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3373 return ERROR_COMMAND_ARGUMENT_INVALID;
3374 } else {
3375 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3376 return ERROR_COMMAND_ARGUMENT_INVALID;
3377 }
3378 if (!xtensa->core_config->high_irq.enabled) {
3379 LOG_ERROR("xtopt intlevels requires hipriints\n");
3380 return ERROR_COMMAND_ARGUMENT_INVALID;
3381 }
3382 xtensa->core_config->high_irq.level_num = opt_val;
3383 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3384 if (xtensa->core_config->core_type == XT_LX) {
3385 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3386 return ERROR_COMMAND_ARGUMENT_INVALID;
3387 } else {
3388 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3389 return ERROR_COMMAND_ARGUMENT_INVALID;
3390 }
3391 xtensa->core_config->debug.enabled = 1;
3392 xtensa->core_config->debug.irq_level = opt_val;
3393 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3394 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3395 return ERROR_COMMAND_ARGUMENT_INVALID;
3396 xtensa->core_config->debug.ibreaks_num = opt_val;
3397 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3398 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3399 return ERROR_COMMAND_ARGUMENT_INVALID;
3400 xtensa->core_config->debug.dbreaks_num = opt_val;
3401 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3402 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3403 return ERROR_COMMAND_ARGUMENT_INVALID;
3404 xtensa->core_config->trace.mem_sz = opt_val;
3405 xtensa->core_config->trace.enabled = (opt_val > 0);
3406 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3407 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3408 return ERROR_COMMAND_ARGUMENT_INVALID;
3409 xtensa->core_config->trace.reversed_mem_access = opt_val;
3410 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3411 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3412 return ERROR_COMMAND_ARGUMENT_INVALID;
3413 xtensa->core_config->debug.perfcount_num = opt_val;
3414 } else {
3415 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3416 return ERROR_OK;
3417 }
3418
3419 return ERROR_OK;
3420 }
3421
3422 COMMAND_HANDLER(xtensa_cmd_xtopt)
3423 {
3424 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3425 target_to_xtensa(get_current_target(CMD_CTX)));
3426 }
3427
3428 /* xtmem <type> [parameters] */
3429 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3430 {
3431 struct xtensa_cache_config *cachep = NULL;
3432 struct xtensa_local_mem_config *memp = NULL;
3433 int mem_access = 0;
3434 bool is_dcache = false;
3435
3436 if (CMD_ARGC == 0) {
3437 LOG_ERROR("xtmem <type> [parameters]\n");
3438 return ERROR_COMMAND_SYNTAX_ERROR;
3439 }
3440
3441 const char *mem_name = CMD_ARGV[0];
3442 if (strcasecmp(mem_name, "icache") == 0) {
3443 cachep = &xtensa->core_config->icache;
3444 } else if (strcasecmp(mem_name, "dcache") == 0) {
3445 cachep = &xtensa->core_config->dcache;
3446 is_dcache = true;
3447 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3448 /* TODO: support L2 cache */
3449 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3450 /* TODO: support L2 cache */
3451 } else if (strcasecmp(mem_name, "iram") == 0) {
3452 memp = &xtensa->core_config->iram;
3453 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3454 } else if (strcasecmp(mem_name, "dram") == 0) {
3455 memp = &xtensa->core_config->dram;
3456 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3457 } else if (strcasecmp(mem_name, "sram") == 0) {
3458 memp = &xtensa->core_config->sram;
3459 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3460 } else if (strcasecmp(mem_name, "irom") == 0) {
3461 memp = &xtensa->core_config->irom;
3462 mem_access = XT_MEM_ACCESS_READ;
3463 } else if (strcasecmp(mem_name, "drom") == 0) {
3464 memp = &xtensa->core_config->drom;
3465 mem_access = XT_MEM_ACCESS_READ;
3466 } else if (strcasecmp(mem_name, "srom") == 0) {
3467 memp = &xtensa->core_config->srom;
3468 mem_access = XT_MEM_ACCESS_READ;
3469 } else {
3470 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3471 return ERROR_COMMAND_ARGUMENT_INVALID;
3472 }
3473
3474 if (cachep) {
3475 if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3476 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3477 return ERROR_COMMAND_SYNTAX_ERROR;
3478 }
3479 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3480 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3481 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3482 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3483 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3484 } else if (memp) {
3485 if (CMD_ARGC != 3) {
3486 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3487 return ERROR_COMMAND_SYNTAX_ERROR;
3488 }
3489 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3490 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3491 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3492 memcfgp->access = mem_access;
3493 memp->count++;
3494 }
3495
3496 return ERROR_OK;
3497 }
3498
3499 COMMAND_HANDLER(xtensa_cmd_xtmem)
3500 {
3501 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3502 target_to_xtensa(get_current_target(CMD_CTX)));
3503 }
3504
3505 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3506 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3507 {
3508 if (CMD_ARGC != 4) {
3509 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3510 return ERROR_COMMAND_SYNTAX_ERROR;
3511 }
3512
3513 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3514 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3515 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3516 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3517
3518 if ((nfgseg > 32)) {
3519 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3520 return ERROR_COMMAND_ARGUMENT_INVALID;
3521 } else if (minsegsize & (minsegsize - 1)) {
3522 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3523 return ERROR_COMMAND_ARGUMENT_INVALID;
3524 } else if (lockable > 1) {
3525 LOG_ERROR("<lockable> must be 0 or 1\n");
3526 return ERROR_COMMAND_ARGUMENT_INVALID;
3527 } else if (execonly > 1) {
3528 LOG_ERROR("<execonly> must be 0 or 1\n");
3529 return ERROR_COMMAND_ARGUMENT_INVALID;
3530 }
3531
3532 xtensa->core_config->mpu.enabled = true;
3533 xtensa->core_config->mpu.nfgseg = nfgseg;
3534 xtensa->core_config->mpu.minsegsize = minsegsize;
3535 xtensa->core_config->mpu.lockable = lockable;
3536 xtensa->core_config->mpu.execonly = execonly;
3537 return ERROR_OK;
3538 }
3539
3540 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3541 {
3542 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3543 target_to_xtensa(get_current_target(CMD_CTX)));
3544 }
3545
3546 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3547 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3548 {
3549 if (CMD_ARGC != 2) {
3550 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3551 return ERROR_COMMAND_SYNTAX_ERROR;
3552 }
3553
3554 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3555 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3556 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3557 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3558 return ERROR_COMMAND_ARGUMENT_INVALID;
3559 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3560 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3561 return ERROR_COMMAND_ARGUMENT_INVALID;
3562 }
3563
3564 xtensa->core_config->mmu.enabled = true;
3565 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3566 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3567 return ERROR_OK;
3568 }
3569
3570 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3571 {
3572 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3573 target_to_xtensa(get_current_target(CMD_CTX)));
3574 }
3575
3576 /* xtregs <numregs>
3577 * xtreg <regname> <regnum> */
3578 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3579 {
3580 if (CMD_ARGC == 1) {
3581 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3582 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3583 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3584 return ERROR_COMMAND_SYNTAX_ERROR;
3585 }
3586 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3587 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3588 numregs, xtensa->genpkt_regs_num);
3589 return ERROR_COMMAND_SYNTAX_ERROR;
3590 }
3591 xtensa->total_regs_num = numregs;
3592 xtensa->core_regs_num = 0;
3593 xtensa->num_optregs = 0;
3594 /* A little more memory than required, but saves a second initialization pass */
3595 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3596 if (!xtensa->optregs) {
3597 LOG_ERROR("Failed to allocate xtensa->optregs!");
3598 return ERROR_FAIL;
3599 }
3600 return ERROR_OK;
3601 } else if (CMD_ARGC != 2) {
3602 return ERROR_COMMAND_SYNTAX_ERROR;
3603 }
3604
3605 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3606 * if general register (g-packet) requests or contiguous register maps are supported */
3607 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3608 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3609 if (!xtensa->contiguous_regs_desc) {
3610 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3611 return ERROR_FAIL;
3612 }
3613 }
3614
3615 const char *regname = CMD_ARGV[0];
3616 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3617 if (regnum > UINT16_MAX) {
3618 LOG_ERROR("<regnum> must be a 16-bit number");
3619 return ERROR_COMMAND_ARGUMENT_INVALID;
3620 }
3621
3622 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3623 if (xtensa->total_regs_num)
3624 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3625 regname, regnum,
3626 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3627 else
3628 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3629 regname, regnum);
3630 return ERROR_FAIL;
3631 }
3632
3633 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3634 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3635 bool is_extended_reg = true;
3636 unsigned int ridx;
3637 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3638 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3639 /* Flag core register as defined */
3640 rptr = &xtensa_regs[ridx];
3641 xtensa->core_regs_num++;
3642 is_extended_reg = false;
3643 break;
3644 }
3645 }
3646
3647 rptr->exist = true;
3648 if (is_extended_reg) {
3649 /* Register ID, debugger-visible register ID */
3650 rptr->name = strdup(CMD_ARGV[0]);
3651 rptr->dbreg_num = regnum;
3652 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3653 xtensa->num_optregs++;
3654
3655 /* Register type */
3656 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3657 rptr->type = XT_REG_GENERAL;
3658 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3659 rptr->type = XT_REG_USER;
3660 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3661 rptr->type = XT_REG_FR;
3662 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3663 rptr->type = XT_REG_SPECIAL;
3664 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3665 /* WARNING: For these registers, regnum points to the
3666 * index of the corresponding ARx registers, NOT to
3667 * the processor register number! */
3668 rptr->type = XT_REG_RELGEN;
3669 rptr->reg_num += XT_REG_IDX_ARFIRST;
3670 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3671 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3672 rptr->type = XT_REG_TIE;
3673 } else {
3674 rptr->type = XT_REG_OTHER;
3675 }
3676
3677 /* Register flags */
3678 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3679 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3680 (strcmp(rptr->name, "intclear") == 0))
3681 rptr->flags = XT_REGF_NOREAD;
3682 else
3683 rptr->flags = 0;
3684
3685 if (rptr->reg_num == (XT_EPS_REG_NUM_BASE + xtensa->core_config->debug.irq_level) &&
3686 xtensa->core_config->core_type == XT_LX && rptr->type == XT_REG_SPECIAL) {
3687 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3688 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3689 }
3690 if (xtensa->core_config->core_type == XT_NX) {
3691 enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_NUM;
3692 if (strcmp(rptr->name, "ibreakc0") == 0)
3693 idx = XT_NX_REG_IDX_IBREAKC0;
3694 else if (strcmp(rptr->name, "wb") == 0)
3695 idx = XT_NX_REG_IDX_WB;
3696 else if (strcmp(rptr->name, "ms") == 0)
3697 idx = XT_NX_REG_IDX_MS;
3698 else if (strcmp(rptr->name, "ievec") == 0)
3699 idx = XT_NX_REG_IDX_IEVEC;
3700 else if (strcmp(rptr->name, "ieextern") == 0)
3701 idx = XT_NX_REG_IDX_IEEXTERN;
3702 else if (strcmp(rptr->name, "mesr") == 0)
3703 idx = XT_NX_REG_IDX_MESR;
3704 else if (strcmp(rptr->name, "mesrclr") == 0)
3705 idx = XT_NX_REG_IDX_MESRCLR;
3706 if (idx < XT_NX_REG_IDX_NUM) {
3707 if (xtensa->nx_reg_idx[idx] != 0) {
3708 LOG_ERROR("nx_reg_idx[%d] previously set to %d",
3709 idx, xtensa->nx_reg_idx[idx]);
3710 return ERROR_FAIL;
3711 }
3712 xtensa->nx_reg_idx[idx] = XT_NUM_REGS + xtensa->num_optregs - 1;
3713 LOG_DEBUG("NX reg %s: index %d (%d)",
3714 rptr->name, xtensa->nx_reg_idx[idx], idx);
3715 }
3716 }
3717 } else if (strcmp(rptr->name, "cpenable") == 0) {
3718 xtensa->core_config->coproc = true;
3719 }
3720
3721 /* Build out list of contiguous registers in specified order */
3722 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3723 if (xtensa->contiguous_regs_desc) {
3724 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3725 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3726 }
3727 if (xtensa_extra_debug_log)
3728 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3729 is_extended_reg ? "config-specific" : "core",
3730 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3731 is_extended_reg ? xtensa->num_optregs : ridx,
3732 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3733 return ERROR_OK;
3734 }
3735
3736 COMMAND_HANDLER(xtensa_cmd_xtreg)
3737 {
3738 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3739 target_to_xtensa(get_current_target(CMD_CTX)));
3740 }
3741
3742 /* xtregfmt <contiguous|sparse> [numgregs] */
3743 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3744 {
3745 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3746 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3747 return ERROR_OK;
3748 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3749 xtensa->regmap_contiguous = true;
3750 if (CMD_ARGC == 2) {
3751 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3752 if ((numgregs <= 0) ||
3753 ((numgregs > xtensa->total_regs_num) &&
3754 (xtensa->total_regs_num > 0))) {
3755 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3756 numgregs, xtensa->total_regs_num);
3757 return ERROR_COMMAND_SYNTAX_ERROR;
3758 }
3759 xtensa->genpkt_regs_num = numgregs;
3760 }
3761 return ERROR_OK;
3762 }
3763 }
3764 return ERROR_COMMAND_SYNTAX_ERROR;
3765 }
3766
3767 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3768 {
3769 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3770 target_to_xtensa(get_current_target(CMD_CTX)));
3771 }
3772
3773 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3774 {
3775 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3776 &xtensa->permissive_mode, "xtensa permissive mode");
3777 }
3778
3779 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3780 {
3781 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3782 target_to_xtensa(get_current_target(CMD_CTX)));
3783 }
3784
3785 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3786 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
3787 {
3788 struct xtensa_perfmon_config config = {
3789 .mask = 0xffff,
3790 .kernelcnt = 0,
3791 .tracelevel = -1 /* use DEBUGLEVEL by default */
3792 };
3793
3794 if (CMD_ARGC < 2 || CMD_ARGC > 6)
3795 return ERROR_COMMAND_SYNTAX_ERROR;
3796
3797 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
3798 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
3799 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3800 return ERROR_COMMAND_ARGUMENT_INVALID;
3801 }
3802
3803 config.select = strtoul(CMD_ARGV[1], NULL, 0);
3804 if (config.select > XTENSA_MAX_PERF_SELECT) {
3805 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
3806 return ERROR_COMMAND_ARGUMENT_INVALID;
3807 }
3808
3809 if (CMD_ARGC >= 3) {
3810 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
3811 if (config.mask > XTENSA_MAX_PERF_MASK) {
3812 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
3813 return ERROR_COMMAND_ARGUMENT_INVALID;
3814 }
3815 }
3816
3817 if (CMD_ARGC >= 4) {
3818 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
3819 if (config.kernelcnt > 1) {
3820 command_print(CMD, "kernelcnt should be 0 or 1");
3821 return ERROR_COMMAND_ARGUMENT_INVALID;
3822 }
3823 }
3824
3825 if (CMD_ARGC >= 5) {
3826 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
3827 if (config.tracelevel > 7) {
3828 command_print(CMD, "tracelevel should be <=7");
3829 return ERROR_COMMAND_ARGUMENT_INVALID;
3830 }
3831 }
3832
3833 if (config.tracelevel == -1)
3834 config.tracelevel = xtensa->core_config->debug.irq_level;
3835
3836 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
3837 }
3838
3839 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
3840 {
3841 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
3842 target_to_xtensa(get_current_target(CMD_CTX)));
3843 }
3844
3845 /* perfmon_dump [counter_id] */
3846 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
3847 {
3848 if (CMD_ARGC > 1)
3849 return ERROR_COMMAND_SYNTAX_ERROR;
3850
3851 int counter_id = -1;
3852 if (CMD_ARGC == 1) {
3853 counter_id = strtol(CMD_ARGV[0], NULL, 0);
3854 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
3855 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3856 return ERROR_COMMAND_ARGUMENT_INVALID;
3857 }
3858 }
3859
3860 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
3861 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
3862 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
3863 char result_buf[128] = { 0 };
3864 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
3865 struct xtensa_perfmon_result result;
3866 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
3867 if (res != ERROR_OK)
3868 return res;
3869 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
3870 "%-12" PRIu64 "%s",
3871 result.value,
3872 result.overflow ? " (overflow)" : "");
3873 LOG_INFO("%s", result_buf);
3874 }
3875
3876 return ERROR_OK;
3877 }
3878
3879 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
3880 {
3881 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
3882 target_to_xtensa(get_current_target(CMD_CTX)));
3883 }
3884
3885 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
3886 {
3887 int state = -1;
3888
3889 if (CMD_ARGC < 1) {
3890 const char *st;
3891 state = xtensa->stepping_isr_mode;
3892 if (state == XT_STEPPING_ISR_ON)
3893 st = "OFF";
3894 else if (state == XT_STEPPING_ISR_OFF)
3895 st = "ON";
3896 else
3897 st = "UNKNOWN";
3898 command_print(CMD, "Current ISR step mode: %s", st);
3899 return ERROR_OK;
3900 }
3901
3902 if (xtensa->core_config->core_type == XT_NX) {
3903 command_print(CMD, "ERROR: ISR step mode only supported on Xtensa LX");
3904 return ERROR_FAIL;
3905 }
3906
3907 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3908 if (!strcasecmp(CMD_ARGV[0], "off"))
3909 state = XT_STEPPING_ISR_ON;
3910 else if (!strcasecmp(CMD_ARGV[0], "on"))
3911 state = XT_STEPPING_ISR_OFF;
3912
3913 if (state == -1) {
3914 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
3915 return ERROR_FAIL;
3916 }
3917 xtensa->stepping_isr_mode = state;
3918 return ERROR_OK;
3919 }
3920
3921 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
3922 {
3923 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
3924 target_to_xtensa(get_current_target(CMD_CTX)));
3925 }
3926
3927 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
3928 {
3929 int res;
3930 uint32_t val = 0;
3931
3932 if (CMD_ARGC >= 1) {
3933 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3934 if (!strcasecmp(CMD_ARGV[0], "none")) {
3935 val = 0;
3936 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
3937 val |= OCDDCR_BREAKINEN;
3938 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
3939 val |= OCDDCR_BREAKOUTEN;
3940 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
3941 val |= OCDDCR_RUNSTALLINEN;
3942 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
3943 val |= OCDDCR_DEBUGMODEOUTEN;
3944 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
3945 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
3946 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
3947 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
3948 } else {
3949 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
3950 command_print(
3951 CMD,
3952 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3953 return ERROR_OK;
3954 }
3955 }
3956 res = xtensa_smpbreak_set(target, val);
3957 if (res != ERROR_OK)
3958 command_print(CMD, "Failed to set smpbreak config %d", res);
3959 } else {
3960 struct xtensa *xtensa = target_to_xtensa(target);
3961 res = xtensa_smpbreak_read(xtensa, &val);
3962 if (res == ERROR_OK)
3963 command_print(CMD, "Current bits set:%s%s%s%s",
3964 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
3965 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
3966 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
3967 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
3968 );
3969 else
3970 command_print(CMD, "Failed to get smpbreak config %d", res);
3971 }
3972 return res;
3973 }
3974
3975 COMMAND_HANDLER(xtensa_cmd_smpbreak)
3976 {
3977 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
3978 get_current_target(CMD_CTX));
3979 }
3980
3981 COMMAND_HELPER(xtensa_cmd_dm_rw_do, struct xtensa *xtensa)
3982 {
3983 if (CMD_ARGC == 1) {
3984 // read: xtensa dm addr
3985 uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
3986 uint32_t val;
3987 int res = xtensa_dm_read(&xtensa->dbg_mod, addr, &val);
3988 if (res == ERROR_OK)
3989 command_print(CMD, "xtensa DM(0x%08" PRIx32 ") -> 0x%08" PRIx32, addr, val);
3990 else
3991 command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : read ERROR %" PRId32, addr, res);
3992 return res;
3993 } else if (CMD_ARGC == 2) {
3994 // write: xtensa dm addr value
3995 uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
3996 uint32_t val = strtoul(CMD_ARGV[1], NULL, 0);
3997 int res = xtensa_dm_write(&xtensa->dbg_mod, addr, val);
3998 if (res == ERROR_OK)
3999 command_print(CMD, "xtensa DM(0x%08" PRIx32 ") <- 0x%08" PRIx32, addr, val);
4000 else
4001 command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : write ERROR %" PRId32, addr, res);
4002 return res;
4003 }
4004 return ERROR_COMMAND_SYNTAX_ERROR;
4005 }
4006
4007 COMMAND_HANDLER(xtensa_cmd_dm_rw)
4008 {
4009 return CALL_COMMAND_HANDLER(xtensa_cmd_dm_rw_do,
4010 target_to_xtensa(get_current_target(CMD_CTX)));
4011 }
4012
4013 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
4014 {
4015 struct xtensa_trace_status trace_status;
4016 struct xtensa_trace_start_config cfg = {
4017 .stoppc = 0,
4018 .stopmask = XTENSA_STOPMASK_DISABLED,
4019 .after = 0,
4020 .after_is_words = false
4021 };
4022
4023 /* Parse arguments */
4024 for (unsigned int i = 0; i < CMD_ARGC; i++) {
4025 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
4026 char *e;
4027 i++;
4028 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
4029 cfg.stopmask = 0;
4030 if (*e == '/')
4031 cfg.stopmask = strtol(e, NULL, 0);
4032 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
4033 i++;
4034 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
4035 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
4036 cfg.after_is_words = 0;
4037 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
4038 cfg.after_is_words = 1;
4039 } else {
4040 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
4041 return ERROR_FAIL;
4042 }
4043 }
4044
4045 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
4046 if (res != ERROR_OK)
4047 return res;
4048 if (trace_status.stat & TRAXSTAT_TRACT) {
4049 LOG_WARNING("Silently stop active tracing!");
4050 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
4051 if (res != ERROR_OK)
4052 return res;
4053 }
4054
4055 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
4056 if (res != ERROR_OK)
4057 return res;
4058
4059 xtensa->trace_active = true;
4060 command_print(CMD, "Trace started.");
4061 return ERROR_OK;
4062 }
4063
4064 COMMAND_HANDLER(xtensa_cmd_tracestart)
4065 {
4066 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
4067 target_to_xtensa(get_current_target(CMD_CTX)));
4068 }
4069
4070 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
4071 {
4072 struct xtensa_trace_status trace_status;
4073
4074 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
4075 if (res != ERROR_OK)
4076 return res;
4077
4078 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
4079 command_print(CMD, "No trace is currently active.");
4080 return ERROR_FAIL;
4081 }
4082
4083 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
4084 if (res != ERROR_OK)
4085 return res;
4086
4087 xtensa->trace_active = false;
4088 command_print(CMD, "Trace stop triggered.");
4089 return ERROR_OK;
4090 }
4091
4092 COMMAND_HANDLER(xtensa_cmd_tracestop)
4093 {
4094 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
4095 target_to_xtensa(get_current_target(CMD_CTX)));
4096 }
4097
4098 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
4099 {
4100 struct xtensa_trace_config trace_config;
4101 struct xtensa_trace_status trace_status;
4102 uint32_t memsz, wmem;
4103
4104 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
4105 if (res != ERROR_OK)
4106 return res;
4107
4108 if (trace_status.stat & TRAXSTAT_TRACT) {
4109 command_print(CMD, "Tracing is still active. Please stop it first.");
4110 return ERROR_FAIL;
4111 }
4112
4113 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
4114 if (res != ERROR_OK)
4115 return res;
4116
4117 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
4118 command_print(CMD, "No active trace found; nothing to dump.");
4119 return ERROR_FAIL;
4120 }
4121
4122 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
4123 LOG_INFO("Total trace memory: %d words", memsz);
4124 if ((trace_config.addr &
4125 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
4126 /*Memory hasn't overwritten itself yet. */
4127 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
4128 LOG_INFO("...but trace is only %d words", wmem);
4129 if (wmem < memsz)
4130 memsz = wmem;
4131 } else {
4132 if (trace_config.addr & TRAXADDR_TWSAT) {
4133 LOG_INFO("Real trace is many times longer than that (overflow)");
4134 } else {
4135 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
4136 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
4137 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
4138 }
4139 }
4140
4141 uint8_t *tracemem = malloc(memsz * 4);
4142 if (!tracemem) {
4143 command_print(CMD, "Failed to alloc memory for trace data!");
4144 return ERROR_FAIL;
4145 }
4146 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
4147 if (res != ERROR_OK) {
4148 free(tracemem);
4149 return res;
4150 }
4151
4152 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4153 if (f <= 0) {
4154 free(tracemem);
4155 command_print(CMD, "Unable to open file %s", fname);
4156 return ERROR_FAIL;
4157 }
4158 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
4159 command_print(CMD, "Unable to write to file %s", fname);
4160 else
4161 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
4162 close(f);
4163
4164 bool is_all_zeroes = true;
4165 for (unsigned int i = 0; i < memsz * 4; i++) {
4166 if (tracemem[i] != 0) {
4167 is_all_zeroes = false;
4168 break;
4169 }
4170 }
4171 free(tracemem);
4172 if (is_all_zeroes)
4173 command_print(
4174 CMD,
4175 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
4176
4177 return ERROR_OK;
4178 }
4179
4180 COMMAND_HANDLER(xtensa_cmd_tracedump)
4181 {
4182 if (CMD_ARGC != 1) {
4183 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
4184 return ERROR_FAIL;
4185 }
4186
4187 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
4188 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
4189 }
4190
4191 static const struct command_registration xtensa_any_command_handlers[] = {
4192 {
4193 .name = "xtdef",
4194 .handler = xtensa_cmd_xtdef,
4195 .mode = COMMAND_CONFIG,
4196 .help = "Configure Xtensa core type",
4197 .usage = "<type>",
4198 },
4199 {
4200 .name = "xtopt",
4201 .handler = xtensa_cmd_xtopt,
4202 .mode = COMMAND_CONFIG,
4203 .help = "Configure Xtensa core option",
4204 .usage = "<name> <value>",
4205 },
4206 {
4207 .name = "xtmem",
4208 .handler = xtensa_cmd_xtmem,
4209 .mode = COMMAND_CONFIG,
4210 .help = "Configure Xtensa memory/cache option",
4211 .usage = "<type> [parameters]",
4212 },
4213 {
4214 .name = "xtmmu",
4215 .handler = xtensa_cmd_xtmmu,
4216 .mode = COMMAND_CONFIG,
4217 .help = "Configure Xtensa MMU option",
4218 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
4219 },
4220 {
4221 .name = "xtmpu",
4222 .handler = xtensa_cmd_xtmpu,
4223 .mode = COMMAND_CONFIG,
4224 .help = "Configure Xtensa MPU option",
4225 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
4226 },
4227 {
4228 .name = "xtreg",
4229 .handler = xtensa_cmd_xtreg,
4230 .mode = COMMAND_CONFIG,
4231 .help = "Configure Xtensa register",
4232 .usage = "<regname> <regnum>",
4233 },
4234 {
4235 .name = "xtregs",
4236 .handler = xtensa_cmd_xtreg,
4237 .mode = COMMAND_CONFIG,
4238 .help = "Configure number of Xtensa registers",
4239 .usage = "<numregs>",
4240 },
4241 {
4242 .name = "xtregfmt",
4243 .handler = xtensa_cmd_xtregfmt,
4244 .mode = COMMAND_CONFIG,
4245 .help = "Configure format of Xtensa register map",
4246 .usage = "<contiguous|sparse> [numgregs]",
4247 },
4248 {
4249 .name = "set_permissive",
4250 .handler = xtensa_cmd_permissive_mode,
4251 .mode = COMMAND_ANY,
4252 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
4253 .usage = "[0|1]",
4254 },
4255 {
4256 .name = "maskisr",
4257 .handler = xtensa_cmd_mask_interrupts,
4258 .mode = COMMAND_ANY,
4259 .help = "mask Xtensa interrupts at step",
4260 .usage = "['on'|'off']",
4261 },
4262 {
4263 .name = "smpbreak",
4264 .handler = xtensa_cmd_smpbreak,
4265 .mode = COMMAND_ANY,
4266 .help = "Set the way the CPU chains OCD breaks",
4267 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
4268 },
4269 {
4270 .name = "dm",
4271 .handler = xtensa_cmd_dm_rw,
4272 .mode = COMMAND_ANY,
4273 .help = "Xtensa DM read/write",
4274 .usage = "addr [value]"
4275 },
4276 {
4277 .name = "perfmon_enable",
4278 .handler = xtensa_cmd_perfmon_enable,
4279 .mode = COMMAND_EXEC,
4280 .help = "Enable and start performance counter",
4281 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
4282 },
4283 {
4284 .name = "perfmon_dump",
4285 .handler = xtensa_cmd_perfmon_dump,
4286 .mode = COMMAND_EXEC,
4287 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
4288 .usage = "[counter_id]",
4289 },
4290 {
4291 .name = "tracestart",
4292 .handler = xtensa_cmd_tracestart,
4293 .mode = COMMAND_EXEC,
4294 .help =
4295 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
4296 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
4297 },
4298 {
4299 .name = "tracestop",
4300 .handler = xtensa_cmd_tracestop,
4301 .mode = COMMAND_EXEC,
4302 .help = "Tracing: Stop current trace as started by the tracestart command",
4303 .usage = "",
4304 },
4305 {
4306 .name = "tracedump",
4307 .handler = xtensa_cmd_tracedump,
4308 .mode = COMMAND_EXEC,
4309 .help = "Tracing: Dump trace memory to a files. One file per core.",
4310 .usage = "<outfile>",
4311 },
4312 {
4313 .name = "exe",
4314 .handler = xtensa_cmd_exe,
4315 .mode = COMMAND_ANY,
4316 .help = "Xtensa stub execution",
4317 .usage = "<ascii-encoded hexadecimal instruction bytes>",
4318 },
4319 COMMAND_REGISTRATION_DONE
4320 };
4321
4322 const struct command_registration xtensa_command_handlers[] = {
4323 {
4324 .name = "xtensa",
4325 .mode = COMMAND_ANY,
4326 .help = "Xtensa command group",
4327 .usage = "",
4328 .chain = xtensa_any_command_handlers,
4329 },
4330 COMMAND_REGISTRATION_DONE
4331 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)