target/xtensa: extra debug info for "xtensa exe" failures
[openocd.git] / src / target / xtensa / xtensa.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
10
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19 #include <target/algorithm.h>
20
21 #include "xtensa_chip.h"
22 #include "xtensa.h"
23
24 /* Swap 4-bit Xtensa opcodes and fields */
25 #define XT_NIBSWAP8(V) \
26 ((((V) & 0x0F) << 4) \
27 | (((V) & 0xF0) >> 4))
28
29 #define XT_NIBSWAP16(V) \
30 ((((V) & 0x000F) << 12) \
31 | (((V) & 0x00F0) << 4) \
32 | (((V) & 0x0F00) >> 4) \
33 | (((V) & 0xF000) >> 12))
34
35 #define XT_NIBSWAP24(V) \
36 ((((V) & 0x00000F) << 20) \
37 | (((V) & 0x0000F0) << 12) \
38 | (((V) & 0x000F00) << 4) \
39 | (((V) & 0x00F000) >> 4) \
40 | (((V) & 0x0F0000) >> 12) \
41 | (((V) & 0xF00000) >> 20))
42
43 /* _XT_INS_FORMAT_*()
44 * Instruction formatting converted from little-endian inputs
45 * and shifted to the MSB-side of DIR for BE systems.
46 */
47 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
48 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
49 | (((T) & 0x0F) << 16) \
50 | (((SR) & 0xFF) << 8)) << 8 \
51 : (OPCODE) \
52 | (((SR) & 0xFF) << 8) \
53 | (((T) & 0x0F) << 4))
54
55 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
56 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
57 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
58 | (((R) & 0x0F) << 8)) << 8 \
59 : (OPCODE) \
60 | (((ST) & 0xFF) << 4) \
61 | (((R) & 0x0F) << 12))
62
63 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
64 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
65 | (((T) & 0x0F) << 8) \
66 | (((S) & 0x0F) << 4) \
67 | ((IMM4) & 0x0F)) << 16 \
68 : (OPCODE) \
69 | (((T) & 0x0F) << 4) \
70 | (((S) & 0x0F) << 8) \
71 | (((IMM4) & 0x0F) << 12))
72
73 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
74 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
75 | (((T) & 0x0F) << 16) \
76 | (((S) & 0x0F) << 12) \
77 | (((R) & 0x0F) << 8) \
78 | ((IMM8) & 0xFF)) << 8 \
79 : (OPCODE) \
80 | (((IMM8) & 0xFF) << 16) \
81 | (((R) & 0x0F) << 12) \
82 | (((S) & 0x0F) << 8) \
83 | (((T) & 0x0F) << 4))
84
85 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
86 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
87 | (((T) & 0x0F) << 16) \
88 | (((S) & 0x0F) << 12) \
89 | (((R) & 0x0F) << 8)) << 8 \
90 | ((IMM4) & 0x0F) \
91 : (OPCODE) \
92 | (((IMM4) & 0x0F) << 20) \
93 | (((R) & 0x0F) << 12) \
94 | (((S) & 0x0F) << 8) \
95 | (((T) & 0x0F) << 4))
96
97 /* Xtensa processor instruction opcodes
98 */
99 /* "Return From Debug Operation" to Normal */
100 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
101 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
102 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
103
104 /* Load to DDR register, increase addr register */
105 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
106 /* Store from DDR register, increase addr register */
107 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
108
109 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
110 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
111 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
112 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
113 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
114 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
115
116 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
117 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
118 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
119 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
120 /* Store 8-bit to A(S)+IMM8 from A(T) */
121 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
122
123 /* Cache Instructions */
124 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
125 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
126 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
127 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
128
129 /* Control Instructions */
130 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
131 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
132
133 /* Read Special Register */
134 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
135 /* Write Special Register */
136 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
137 /* Swap Special Register */
138 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
139
140 /* Rotate Window by (-8..7) */
141 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
142
143 /* Read User Register */
144 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
145 /* Write User Register */
146 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
147
148 /* Read Floating-Point Register */
149 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
150 /* Write Floating-Point Register */
151 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
152
153 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
154 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
155 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
156
157 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
158 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
159 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
160
161 #define XT_WATCHPOINTS_NUM_MAX 2
162
163 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
164 * These get used a lot so making a shortcut is useful.
165 */
166 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
167 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
168 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
169 #define XT_REG_A0 (xtensa_regs[XT_REG_IDX_AR0].reg_num)
170 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
171 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
172
173 #define XT_PS_REG_NUM (0xe6U)
174 #define XT_EPS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
175 #define XT_EPC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
176 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
177 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
178 #define XT_NX_IBREAKC_BASE (0xc0U) /* (IBREAKC0..IBREAKC1) for NX */
179
180 #define XT_SW_BREAKPOINTS_MAX_NUM 32
181 #define XT_HW_IBREAK_MAX_NUM 2
182 #define XT_HW_DBREAK_MAX_NUM 2
183
184 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
185 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
186 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
187 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
188 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
189 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
190 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
191 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
192 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
247 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
248 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
249 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
250 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
251 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
252 XT_MK_REG_DESC("ps", XT_PS_REG_NUM, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
253 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
254 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
255 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
256 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
259 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
261 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
262 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
263 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
264 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
265 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
266
267 /* WARNING: For these registers, regnum points to the
268 * index of the corresponding ARx registers, NOT to
269 * the processor register number! */
270 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
271 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
272 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
273 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
274 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
275 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
276 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
282 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
283 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
284 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
285 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
286 };
287
288 /**
289 * Types of memory used at xtensa target
290 */
291 enum xtensa_mem_region_type {
292 XTENSA_MEM_REG_IROM = 0x0,
293 XTENSA_MEM_REG_IRAM,
294 XTENSA_MEM_REG_DROM,
295 XTENSA_MEM_REG_DRAM,
296 XTENSA_MEM_REG_SRAM,
297 XTENSA_MEM_REG_SROM,
298 XTENSA_MEM_REGS_NUM
299 };
300
301 /* Register definition as union for list allocation */
302 union xtensa_reg_val_u {
303 xtensa_reg_val_t val;
304 uint8_t buf[4];
305 };
306
307 static const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
308 { .chrval = "E00", .intval = ERROR_FAIL },
309 { .chrval = "E01", .intval = ERROR_FAIL },
310 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
311 { .chrval = "E03", .intval = ERROR_FAIL },
312 };
313
314 /* Set to true for extra debug logging */
315 static const bool xtensa_extra_debug_log;
316
317 /**
318 * Gets a config for the specific mem type
319 */
320 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
321 struct xtensa *xtensa,
322 enum xtensa_mem_region_type type)
323 {
324 switch (type) {
325 case XTENSA_MEM_REG_IROM:
326 return &xtensa->core_config->irom;
327 case XTENSA_MEM_REG_IRAM:
328 return &xtensa->core_config->iram;
329 case XTENSA_MEM_REG_DROM:
330 return &xtensa->core_config->drom;
331 case XTENSA_MEM_REG_DRAM:
332 return &xtensa->core_config->dram;
333 case XTENSA_MEM_REG_SRAM:
334 return &xtensa->core_config->sram;
335 case XTENSA_MEM_REG_SROM:
336 return &xtensa->core_config->srom;
337 default:
338 return NULL;
339 }
340 }
341
342 /**
343 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
344 * for a given address
345 * Returns NULL if nothing found
346 */
347 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
348 const struct xtensa_local_mem_config *mem,
349 target_addr_t address)
350 {
351 for (unsigned int i = 0; i < mem->count; i++) {
352 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
353 if (address >= region->base && address < (region->base + region->size))
354 return region;
355 }
356 return NULL;
357 }
358
359 /**
360 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
361 * for a given address
362 * Returns NULL if nothing found
363 */
364 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
365 struct xtensa *xtensa,
366 target_addr_t address)
367 {
368 const struct xtensa_local_mem_region_config *result;
369 const struct xtensa_local_mem_config *mcgf;
370 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
371 mcgf = xtensa_get_mem_config(xtensa, mtype);
372 result = xtensa_memory_region_find(mcgf, address);
373 if (result)
374 return result;
375 }
376 return NULL;
377 }
378
379 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
380 const struct xtensa_local_mem_config *mem,
381 target_addr_t address)
382 {
383 if (!cache->size)
384 return false;
385 return xtensa_memory_region_find(mem, address);
386 }
387
388 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
389 {
390 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
391 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
392 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
393 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
394 }
395
396 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
397 {
398 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
399 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
400 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
401 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
402 }
403
404 static int xtensa_core_reg_get(struct reg *reg)
405 {
406 /* We don't need this because we read all registers on halt anyway. */
407 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
408 struct target *target = xtensa->target;
409
410 if (target->state != TARGET_HALTED)
411 return ERROR_TARGET_NOT_HALTED;
412 if (!reg->exist) {
413 if (strncmp(reg->name, "?0x", 3) == 0) {
414 unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
415 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
416 return ERROR_OK;
417 }
418 return ERROR_COMMAND_ARGUMENT_INVALID;
419 }
420 return ERROR_OK;
421 }
422
423 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
424 {
425 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
426 struct target *target = xtensa->target;
427
428 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
429 if (target->state != TARGET_HALTED)
430 return ERROR_TARGET_NOT_HALTED;
431
432 if (!reg->exist) {
433 if (strncmp(reg->name, "?0x", 3) == 0) {
434 unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
435 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
436 return ERROR_OK;
437 }
438 return ERROR_COMMAND_ARGUMENT_INVALID;
439 }
440
441 buf_cpy(buf, reg->value, reg->size);
442
443 if (xtensa->core_config->windowed) {
444 /* If the user updates a potential scratch register, track for conflicts */
445 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
446 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
447 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
448 buf_get_u32(reg->value, 0, 32));
449 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
450 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
451 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
452 xtensa->scratch_ars[s].intval = true;
453 break;
454 }
455 }
456 }
457 reg->dirty = true;
458 reg->valid = true;
459
460 return ERROR_OK;
461 }
462
463 static const struct reg_arch_type xtensa_reg_type = {
464 .get = xtensa_core_reg_get,
465 .set = xtensa_core_reg_set,
466 };
467
468 /* Convert a register index that's indexed relative to windowbase, to the real address. */
469 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
470 enum xtensa_reg_id reg_idx,
471 int windowbase)
472 {
473 unsigned int idx;
474 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
475 idx = reg_idx - XT_REG_IDX_AR0;
476 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
477 idx = reg_idx - XT_REG_IDX_A0;
478 } else {
479 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
480 return -1;
481 }
482 /* Each windowbase value represents 4 registers on LX and 8 on NX */
483 int base_inc = (xtensa->core_config->core_type == XT_LX) ? 4 : 8;
484 return ((idx + windowbase * base_inc) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
485 }
486
487 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
488 enum xtensa_reg_id reg_idx,
489 int windowbase)
490 {
491 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
492 }
493
494 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
495 {
496 struct reg *reg_list = xtensa->core_cache->reg_list;
497 reg_list[reg_idx].dirty = true;
498 }
499
500 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
501 {
502 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DIR0EXEC, ins);
503 }
504
505 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
506 {
507 const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
508 if ((oplen > 0) && (oplen <= max_oplen)) {
509 uint8_t ops_padded[max_oplen];
510 memcpy(ops_padded, ops, oplen);
511 memset(ops_padded + oplen, 0, max_oplen - oplen);
512 unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
513 for (int32_t i = oplenw - 1; i > 0; i--)
514 xtensa_queue_dbg_reg_write(xtensa,
515 XDMREG_DIR0 + i,
516 target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t)*i]));
517 /* Write DIR0EXEC last */
518 xtensa_queue_dbg_reg_write(xtensa,
519 XDMREG_DIR0EXEC,
520 target_buffer_get_u32(xtensa->target, &ops_padded[0]));
521 }
522 }
523
524 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
525 {
526 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
527 return dm->pwr_ops->queue_reg_write(dm, reg, data);
528 }
529
530 /* NOTE: Assumes A3 has already been saved */
531 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
532 {
533 struct xtensa *xtensa = target_to_xtensa(target);
534 unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
535 uint32_t woe_dis;
536 uint8_t woe_buf[4];
537
538 if (xtensa->core_config->windowed) {
539 /* Save PS (LX) or WB (NX) and disable window overflow exceptions prior to AR save */
540 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, woe_sr, XT_REG_A3));
541 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
542 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, woe_buf);
543 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
544 if (res != ERROR_OK) {
545 LOG_TARGET_ERROR(target, "Failed to read %s (%d)!",
546 (woe_sr == XT_SR_PS) ? "PS" : "WB", res);
547 return res;
548 }
549 xtensa_core_status_check(target);
550 *woe = buf_get_u32(woe_buf, 0, 32);
551 woe_dis = *woe & ~((woe_sr == XT_SR_PS) ? XT_PS_WOE_MSK : XT_WB_S_MSK);
552 LOG_TARGET_DEBUG(target, "Clearing %s (0x%08" PRIx32 " -> 0x%08" PRIx32 ")",
553 (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB.S", *woe, woe_dis);
554 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe_dis);
555 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
556 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, woe_sr, XT_REG_A3));
557 }
558 return ERROR_OK;
559 }
560
561 /* NOTE: Assumes A3 has already been saved */
562 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
563 {
564 struct xtensa *xtensa = target_to_xtensa(target);
565 unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
566 if (xtensa->core_config->windowed) {
567 /* Restore window overflow exception state */
568 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe);
569 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
570 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, woe_sr, XT_REG_A3));
571 LOG_TARGET_DEBUG(target, "Restored %s (0x%08" PRIx32 ")",
572 (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB", woe);
573 }
574 }
575
576 static bool xtensa_reg_is_readable(int flags, int cpenable)
577 {
578 if (flags & XT_REGF_NOREAD)
579 return false;
580 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
581 return false;
582 return true;
583 }
584
585 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
586 {
587 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
588 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
589 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
590 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
591 } else {
592 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
593 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
594 }
595 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
596 }
597
598 static int xtensa_write_dirty_registers(struct target *target)
599 {
600 struct xtensa *xtensa = target_to_xtensa(target);
601 int res;
602 xtensa_reg_val_t regval, windowbase = 0;
603 bool scratch_reg_dirty = false, delay_cpenable = false;
604 struct reg *reg_list = xtensa->core_cache->reg_list;
605 unsigned int reg_list_size = xtensa->core_cache->num_regs;
606 bool preserve_a3 = false;
607 uint8_t a3_buf[4];
608 xtensa_reg_val_t a3 = 0, woe;
609 unsigned int ms_idx = (xtensa->core_config->core_type == XT_NX) ?
610 xtensa->nx_reg_idx[XT_NX_REG_IDX_MS] : reg_list_size;
611 xtensa_reg_val_t ms = 0;
612 bool restore_ms = false;
613
614 LOG_TARGET_DEBUG(target, "start");
615
616 /* We need to write the dirty registers in the cache list back to the processor.
617 * Start by writing the SFR/user registers. */
618 for (unsigned int i = 0; i < reg_list_size; i++) {
619 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
620 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
621 if (reg_list[i].dirty) {
622 if (rlist[ridx].type == XT_REG_SPECIAL ||
623 rlist[ridx].type == XT_REG_USER ||
624 rlist[ridx].type == XT_REG_FR) {
625 scratch_reg_dirty = true;
626 if (i == XT_REG_IDX_CPENABLE) {
627 delay_cpenable = true;
628 continue;
629 }
630 regval = xtensa_reg_get(target, i);
631 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
632 reg_list[i].name,
633 rlist[ridx].reg_num,
634 regval);
635 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
636 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
637 if (reg_list[i].exist) {
638 unsigned int reg_num = rlist[ridx].reg_num;
639 if (rlist[ridx].type == XT_REG_USER) {
640 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
641 } else if (rlist[ridx].type == XT_REG_FR) {
642 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
643 } else {/*SFR */
644 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
645 if (xtensa->core_config->core_type == XT_LX) {
646 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
647 reg_num = (XT_EPC_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
648 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
649 } else {
650 /* NX PC set through issuing a jump instruction */
651 xtensa_queue_exec_ins(xtensa, XT_INS_JX(xtensa, XT_REG_A3));
652 }
653 } else if (i == ms_idx) {
654 /* MS must be restored after ARs. This ensures ARs remain in correct
655 * order even for reversed register groups (overflow/underflow).
656 */
657 ms = regval;
658 restore_ms = true;
659 LOG_TARGET_DEBUG(target, "Delaying MS write: 0x%x", ms);
660 } else {
661 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
662 }
663 }
664 }
665 reg_list[i].dirty = false;
666 }
667 }
668 }
669 if (scratch_reg_dirty)
670 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
671 if (delay_cpenable) {
672 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
673 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
674 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
675 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
676 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
677 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
678 XT_REG_A3));
679 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
680 }
681
682 preserve_a3 = (xtensa->core_config->windowed) || (xtensa->core_config->core_type == XT_NX);
683 if (preserve_a3) {
684 /* Save (windowed) A3 for scratch use */
685 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
686 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
687 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
688 if (res != ERROR_OK)
689 return res;
690 xtensa_core_status_check(target);
691 a3 = buf_get_u32(a3_buf, 0, 32);
692 }
693
694 if (xtensa->core_config->windowed) {
695 res = xtensa_window_state_save(target, &woe);
696 if (res != ERROR_OK)
697 return res;
698 /* Grab the windowbase, we need it. */
699 uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
700 XT_REG_IDX_WINDOWBASE : xtensa->nx_reg_idx[XT_NX_REG_IDX_WB];
701 windowbase = xtensa_reg_get(target, wb_idx);
702 if (xtensa->core_config->core_type == XT_NX)
703 windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
704
705 /* Check if there are mismatches between the ARx and corresponding Ax registers.
706 * When the user sets a register on a windowed config, xt-gdb may set the ARx
707 * register directly. Thus we take ARx as priority over Ax if both are dirty
708 * and it's unclear if the user set one over the other explicitly.
709 */
710 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
711 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
712 if (reg_list[i].dirty && reg_list[j].dirty) {
713 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
714 bool show_warning = true;
715 if (i == XT_REG_IDX_A3)
716 show_warning = xtensa_scratch_regs_fixup(xtensa,
717 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
718 else if (i == XT_REG_IDX_A4)
719 show_warning = xtensa_scratch_regs_fixup(xtensa,
720 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
721 if (show_warning)
722 LOG_WARNING(
723 "Warning: Both A%d [0x%08" PRIx32
724 "] as well as its underlying physical register "
725 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
726 i - XT_REG_IDX_A0,
727 buf_get_u32(reg_list[i].value, 0, 32),
728 j - XT_REG_IDX_AR0,
729 buf_get_u32(reg_list[j].value, 0, 32));
730 }
731 }
732 }
733 }
734
735 /* Write A0-A16. */
736 for (unsigned int i = 0; i < 16; i++) {
737 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
738 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
739 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
740 xtensa_regs[XT_REG_IDX_A0 + i].name,
741 regval,
742 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
743 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
744 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
745 reg_list[XT_REG_IDX_A0 + i].dirty = false;
746 if (i == 3) {
747 /* Avoid stomping A3 during restore at end of function */
748 a3 = regval;
749 }
750 }
751 }
752
753 if (xtensa->core_config->windowed) {
754 /* Now write AR registers */
755 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
756 /* Write the 16 registers we can see */
757 for (unsigned int i = 0; i < 16; i++) {
758 if (i + j < xtensa->core_config->aregs_num) {
759 enum xtensa_reg_id realadr =
760 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
761 windowbase);
762 /* Write back any dirty un-windowed registers */
763 if (reg_list[realadr].dirty) {
764 regval = xtensa_reg_get(target, realadr);
765 LOG_TARGET_DEBUG(
766 target,
767 "Writing back reg %s value %08" PRIX32 ", num =%i",
768 xtensa_regs[realadr].name,
769 regval,
770 xtensa_regs[realadr].reg_num);
771 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
772 xtensa_queue_exec_ins(xtensa,
773 XT_INS_RSR(xtensa, XT_SR_DDR,
774 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
775 reg_list[realadr].dirty = false;
776 if ((i + j) == 3)
777 /* Avoid stomping AR during A3 restore at end of function */
778 a3 = regval;
779 }
780 }
781 }
782
783 /* Now rotate the window so we'll see the next 16 registers. The final rotate
784 * will wraparound, leaving us in the state we were.
785 * Each ROTW rotates 4 registers on LX and 8 on NX */
786 int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
787 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, rotw_arg));
788 }
789
790 xtensa_window_state_restore(target, woe);
791
792 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
793 xtensa->scratch_ars[s].intval = false;
794 }
795
796 if (restore_ms) {
797 uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
798 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, ms);
799 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
800 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, ms_regno, XT_REG_A3));
801 LOG_TARGET_DEBUG(target, "Delayed MS (0x%x) write complete: 0x%x", ms_regno, ms);
802 }
803
804 if (preserve_a3) {
805 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
806 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
807 }
808
809 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
810 xtensa_core_status_check(target);
811
812 return res;
813 }
814
815 static inline bool xtensa_is_stopped(struct target *target)
816 {
817 struct xtensa *xtensa = target_to_xtensa(target);
818 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
819 }
820
821 int xtensa_examine(struct target *target)
822 {
823 struct xtensa *xtensa = target_to_xtensa(target);
824 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
825
826 LOG_TARGET_DEBUG(target, "");
827
828 if (xtensa->core_config->core_type == XT_UNDEF) {
829 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
830 return ERROR_FAIL;
831 }
832
833 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
834 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
835 xtensa_dm_queue_enable(&xtensa->dbg_mod);
836 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
837 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
838 if (res != ERROR_OK)
839 return res;
840 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
841 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
842 return ERROR_TARGET_FAILURE;
843 }
844 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
845 target_set_examined(target);
846 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
847 return ERROR_OK;
848 }
849
850 int xtensa_wakeup(struct target *target)
851 {
852 struct xtensa *xtensa = target_to_xtensa(target);
853 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
854
855 if (xtensa->reset_asserted)
856 cmd |= PWRCTL_CORERESET(xtensa);
857 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
858 /* TODO: can we join this with the write above? */
859 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
860 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
861 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
862 }
863
864 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
865 {
866 uint32_t dsr_data = 0x00110000;
867 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
868 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
869 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
870
871 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
872 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, set | OCDDCR_ENABLEOCD);
873 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, clear);
874 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DSR, dsr_data);
875 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
876 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
877 }
878
879 int xtensa_smpbreak_set(struct target *target, uint32_t set)
880 {
881 struct xtensa *xtensa = target_to_xtensa(target);
882 int res = ERROR_OK;
883
884 xtensa->smp_break = set;
885 if (target_was_examined(target))
886 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
887 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
888 return res;
889 }
890
891 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
892 {
893 uint8_t dcr_buf[sizeof(uint32_t)];
894
895 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DCRSET, dcr_buf);
896 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
897 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
898 *val = buf_get_u32(dcr_buf, 0, 32);
899
900 return res;
901 }
902
903 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
904 {
905 struct xtensa *xtensa = target_to_xtensa(target);
906 *val = xtensa->smp_break;
907 return ERROR_OK;
908 }
909
910 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
911 {
912 return buf_get_u32(reg->value, 0, 32);
913 }
914
915 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
916 {
917 buf_set_u32(reg->value, 0, 32, value);
918 reg->dirty = true;
919 }
920
921 static int xtensa_imprecise_exception_occurred(struct target *target)
922 {
923 struct xtensa *xtensa = target_to_xtensa(target);
924 for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESR; idx++) {
925 enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
926 if (xtensa->nx_reg_idx[idx]) {
927 xtensa_reg_val_t reg = xtensa_reg_get(target, xtensa->nx_reg_idx[idx]);
928 if (reg & XT_IMPR_EXC_MSK) {
929 LOG_TARGET_DEBUG(target, "Imprecise exception: %s: 0x%x",
930 xtensa->core_cache->reg_list[ridx].name, reg);
931 return true;
932 }
933 }
934 }
935 return false;
936 }
937
938 static void xtensa_imprecise_exception_clear(struct target *target)
939 {
940 struct xtensa *xtensa = target_to_xtensa(target);
941 for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESRCLR; idx++) {
942 enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
943 if (ridx && idx != XT_NX_REG_IDX_MESR) {
944 xtensa_reg_val_t value = (idx == XT_NX_REG_IDX_MESRCLR) ? XT_MESRCLR_IMPR_EXC_MSK : 0;
945 xtensa_reg_set(target, ridx, value);
946 LOG_TARGET_DEBUG(target, "Imprecise exception: clearing %s (0x%x)",
947 xtensa->core_cache->reg_list[ridx].name, value);
948 }
949 }
950 }
951
952 int xtensa_core_status_check(struct target *target)
953 {
954 struct xtensa *xtensa = target_to_xtensa(target);
955 int res, needclear = 0, needimprclear = 0;
956
957 xtensa_dm_core_status_read(&xtensa->dbg_mod);
958 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
959 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
960 if (dsr & OCDDSR_EXECBUSY) {
961 if (!xtensa->suppress_dsr_errors)
962 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
963 needclear = 1;
964 }
965 if (dsr & OCDDSR_EXECEXCEPTION) {
966 if (!xtensa->suppress_dsr_errors)
967 LOG_TARGET_ERROR(target,
968 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
969 dsr);
970 needclear = 1;
971 }
972 if (dsr & OCDDSR_EXECOVERRUN) {
973 if (!xtensa->suppress_dsr_errors)
974 LOG_TARGET_ERROR(target,
975 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
976 dsr);
977 needclear = 1;
978 }
979 if (xtensa->core_config->core_type == XT_NX && (xtensa_imprecise_exception_occurred(target))) {
980 if (!xtensa->suppress_dsr_errors)
981 LOG_TARGET_ERROR(target,
982 "%s: Imprecise exception occurred!", target_name(target));
983 needclear = 1;
984 needimprclear = 1;
985 }
986 if (needclear) {
987 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
988 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
989 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
990 LOG_TARGET_ERROR(target, "clearing DSR failed!");
991 if (xtensa->core_config->core_type == XT_NX && needimprclear)
992 xtensa_imprecise_exception_clear(target);
993 return ERROR_FAIL;
994 }
995 return ERROR_OK;
996 }
997
998 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
999 {
1000 struct xtensa *xtensa = target_to_xtensa(target);
1001 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1002 return xtensa_reg_get_value(reg);
1003 }
1004
1005 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
1006 {
1007 struct xtensa *xtensa = target_to_xtensa(target);
1008 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1009 if (xtensa_reg_get_value(reg) == value)
1010 return;
1011 xtensa_reg_set_value(reg, value);
1012 }
1013
1014 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
1015 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
1016 {
1017 struct xtensa *xtensa = target_to_xtensa(target);
1018 uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1019 XT_REG_IDX_WINDOWBASE : xtensa->nx_reg_idx[XT_NX_REG_IDX_WB];
1020 uint32_t windowbase = (xtensa->core_config->windowed ?
1021 xtensa_reg_get(target, wb_idx) : 0);
1022 if (xtensa->core_config->core_type == XT_NX)
1023 windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1024 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
1025 xtensa_reg_set(target, a_idx, value);
1026 xtensa_reg_set(target, ar_idx, value);
1027 }
1028
1029 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
1030 uint32_t xtensa_cause_get(struct target *target)
1031 {
1032 struct xtensa *xtensa = target_to_xtensa(target);
1033 if (xtensa->core_config->core_type == XT_LX) {
1034 /* LX cause in DEBUGCAUSE */
1035 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1036 }
1037 if (xtensa->nx_stop_cause & DEBUGCAUSE_VALID)
1038 return xtensa->nx_stop_cause;
1039
1040 /* NX cause determined from DSR.StopCause */
1041 if (xtensa_dm_core_status_read(&xtensa->dbg_mod) != ERROR_OK) {
1042 LOG_TARGET_ERROR(target, "Read DSR error");
1043 } else {
1044 uint32_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
1045 /* NX causes are prioritized; only 1 bit can be set */
1046 switch ((dsr & OCDDSR_STOPCAUSE) >> OCDDSR_STOPCAUSE_SHIFT) {
1047 case OCDDSR_STOPCAUSE_DI:
1048 xtensa->nx_stop_cause = DEBUGCAUSE_DI;
1049 break;
1050 case OCDDSR_STOPCAUSE_SS:
1051 xtensa->nx_stop_cause = DEBUGCAUSE_IC;
1052 break;
1053 case OCDDSR_STOPCAUSE_IB:
1054 xtensa->nx_stop_cause = DEBUGCAUSE_IB;
1055 break;
1056 case OCDDSR_STOPCAUSE_B:
1057 case OCDDSR_STOPCAUSE_B1:
1058 xtensa->nx_stop_cause = DEBUGCAUSE_BI;
1059 break;
1060 case OCDDSR_STOPCAUSE_BN:
1061 xtensa->nx_stop_cause = DEBUGCAUSE_BN;
1062 break;
1063 case OCDDSR_STOPCAUSE_DB0:
1064 case OCDDSR_STOPCAUSE_DB1:
1065 xtensa->nx_stop_cause = DEBUGCAUSE_DB;
1066 break;
1067 default:
1068 LOG_TARGET_ERROR(target, "Unknown stop cause (DSR: 0x%08x)", dsr);
1069 break;
1070 }
1071 if (xtensa->nx_stop_cause)
1072 xtensa->nx_stop_cause |= DEBUGCAUSE_VALID;
1073 }
1074 return xtensa->nx_stop_cause;
1075 }
1076
1077 void xtensa_cause_clear(struct target *target)
1078 {
1079 struct xtensa *xtensa = target_to_xtensa(target);
1080 if (xtensa->core_config->core_type == XT_LX) {
1081 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
1082 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1083 } else {
1084 /* NX DSR.STOPCAUSE is not writeable; clear cached copy but leave it valid */
1085 xtensa->nx_stop_cause = DEBUGCAUSE_VALID;
1086 }
1087 }
1088
1089 void xtensa_cause_reset(struct target *target)
1090 {
1091 /* Clear DEBUGCAUSE_VALID to trigger re-read (on NX) */
1092 struct xtensa *xtensa = target_to_xtensa(target);
1093 xtensa->nx_stop_cause = 0;
1094 }
1095
1096 int xtensa_assert_reset(struct target *target)
1097 {
1098 struct xtensa *xtensa = target_to_xtensa(target);
1099
1100 LOG_TARGET_DEBUG(target, " begin");
1101 xtensa_queue_pwr_reg_write(xtensa,
1102 XDMREG_PWRCTL,
1103 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
1104 PWRCTL_COREWAKEUP(xtensa) | PWRCTL_CORERESET(xtensa));
1105 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1106 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1107 if (res != ERROR_OK)
1108 return res;
1109
1110 /* registers are now invalid */
1111 xtensa->reset_asserted = true;
1112 register_cache_invalidate(xtensa->core_cache);
1113 target->state = TARGET_RESET;
1114 return ERROR_OK;
1115 }
1116
1117 int xtensa_deassert_reset(struct target *target)
1118 {
1119 struct xtensa *xtensa = target_to_xtensa(target);
1120
1121 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
1122 if (target->reset_halt)
1123 xtensa_queue_dbg_reg_write(xtensa,
1124 XDMREG_DCRSET,
1125 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1126 xtensa_queue_pwr_reg_write(xtensa,
1127 XDMREG_PWRCTL,
1128 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
1129 PWRCTL_COREWAKEUP(xtensa));
1130 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1131 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1132 if (res != ERROR_OK)
1133 return res;
1134 target->state = TARGET_RUNNING;
1135 xtensa->reset_asserted = false;
1136 return res;
1137 }
1138
1139 int xtensa_soft_reset_halt(struct target *target)
1140 {
1141 LOG_TARGET_DEBUG(target, "begin");
1142 return xtensa_assert_reset(target);
1143 }
1144
1145 int xtensa_fetch_all_regs(struct target *target)
1146 {
1147 struct xtensa *xtensa = target_to_xtensa(target);
1148 struct reg *reg_list = xtensa->core_cache->reg_list;
1149 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1150 xtensa_reg_val_t cpenable = 0, windowbase = 0, a0 = 0, a3;
1151 unsigned int ms_idx = reg_list_size;
1152 uint32_t ms = 0;
1153 uint32_t woe;
1154 uint8_t a0_buf[4], a3_buf[4], ms_buf[4];
1155 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1156
1157 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1158 if (!regvals) {
1159 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1160 return ERROR_FAIL;
1161 }
1162 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1163 if (!dsrs) {
1164 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1165 free(regvals);
1166 return ERROR_FAIL;
1167 }
1168
1169 LOG_TARGET_DEBUG(target, "start");
1170
1171 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1172 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1173 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
1174 if (xtensa->core_config->core_type == XT_NX) {
1175 /* Save (windowed) A0 as well--it will be required for reading PC */
1176 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A0));
1177 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a0_buf);
1178
1179 /* Set MS.DispSt, clear MS.DE prior to accessing ARs. This ensures ARs remain
1180 * in correct order even for reversed register groups (overflow/underflow).
1181 */
1182 ms_idx = xtensa->nx_reg_idx[XT_NX_REG_IDX_MS];
1183 uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
1184 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, ms_regno, XT_REG_A3));
1185 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1186 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, ms_buf);
1187 LOG_TARGET_DEBUG(target, "Overriding MS (0x%x): 0x%x", ms_regno, XT_MS_DISPST_DBG);
1188 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, XT_MS_DISPST_DBG);
1189 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1190 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, ms_regno, XT_REG_A3));
1191 }
1192
1193 int res = xtensa_window_state_save(target, &woe);
1194 if (res != ERROR_OK)
1195 goto xtensa_fetch_all_regs_done;
1196
1197 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1198 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1199 * in one go, then sort everything out from the regvals variable. */
1200
1201 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1202 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1203 /*Grab the 16 registers we can see */
1204 for (unsigned int i = 0; i < 16; i++) {
1205 if (i + j < xtensa->core_config->aregs_num) {
1206 xtensa_queue_exec_ins(xtensa,
1207 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1208 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1209 regvals[XT_REG_IDX_AR0 + i + j].buf);
1210 if (debug_dsrs)
1211 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR,
1212 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1213 }
1214 }
1215 if (xtensa->core_config->windowed) {
1216 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1217 * will wraparound, leaving us in the state we were.
1218 * Each ROTW rotates 4 registers on LX and 8 on NX */
1219 int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
1220 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, rotw_arg));
1221 }
1222 }
1223 xtensa_window_state_restore(target, woe);
1224
1225 if (xtensa->core_config->coproc) {
1226 /* As the very first thing after AREGS, go grab CPENABLE */
1227 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1228 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1229 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1230 }
1231 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1232 if (res != ERROR_OK) {
1233 LOG_ERROR("Failed to read ARs (%d)!", res);
1234 goto xtensa_fetch_all_regs_done;
1235 }
1236 xtensa_core_status_check(target);
1237
1238 a3 = buf_get_u32(a3_buf, 0, 32);
1239 if (xtensa->core_config->core_type == XT_NX) {
1240 a0 = buf_get_u32(a0_buf, 0, 32);
1241 ms = buf_get_u32(ms_buf, 0, 32);
1242 }
1243
1244 if (xtensa->core_config->coproc) {
1245 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1246
1247 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1248 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
1249 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1250 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1251
1252 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1253 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1254 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1255 }
1256 /* We're now free to use any of A0-A15 as scratch registers
1257 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1258 for (unsigned int i = 0; i < reg_list_size; i++) {
1259 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1260 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1261 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1262 bool reg_fetched = true;
1263 unsigned int reg_num = rlist[ridx].reg_num;
1264 switch (rlist[ridx].type) {
1265 case XT_REG_USER:
1266 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1267 break;
1268 case XT_REG_FR:
1269 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1270 break;
1271 case XT_REG_SPECIAL:
1272 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1273 if (xtensa->core_config->core_type == XT_LX) {
1274 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1275 reg_num = XT_EPC_REG_NUM_BASE + xtensa->core_config->debug.irq_level;
1276 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1277 } else {
1278 /* NX PC read through CALL0(0) and reading A0 */
1279 xtensa_queue_exec_ins(xtensa, XT_INS_CALL0(xtensa, 0));
1280 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A0));
1281 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1282 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1283 reg_fetched = false;
1284 }
1285 } else if ((xtensa->core_config->core_type == XT_LX)
1286 && (reg_num == xtensa_regs[XT_REG_IDX_PS].reg_num)) {
1287 /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1288 reg_num = XT_EPS_REG_NUM_BASE + xtensa->core_config->debug.irq_level;
1289 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1290 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1291 /* CPENABLE already read/updated; don't re-read */
1292 reg_fetched = false;
1293 break;
1294 } else {
1295 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1296 }
1297 break;
1298 default:
1299 reg_fetched = false;
1300 }
1301 if (reg_fetched) {
1302 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1303 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1304 if (debug_dsrs)
1305 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1306 }
1307 }
1308 }
1309 /* Ok, send the whole mess to the CPU. */
1310 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1311 if (res != ERROR_OK) {
1312 LOG_ERROR("Failed to fetch AR regs!");
1313 goto xtensa_fetch_all_regs_done;
1314 }
1315 xtensa_core_status_check(target);
1316
1317 if (debug_dsrs) {
1318 /* DSR checking: follows order in which registers are requested. */
1319 for (unsigned int i = 0; i < reg_list_size; i++) {
1320 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1321 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1322 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1323 (rlist[ridx].type != XT_REG_DEBUG) &&
1324 (rlist[ridx].type != XT_REG_RELGEN) &&
1325 (rlist[ridx].type != XT_REG_TIE) &&
1326 (rlist[ridx].type != XT_REG_OTHER)) {
1327 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1328 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1329 res = ERROR_FAIL;
1330 goto xtensa_fetch_all_regs_done;
1331 }
1332 }
1333 }
1334 }
1335
1336 if (xtensa->core_config->windowed) {
1337 /* We need the windowbase to decode the general addresses. */
1338 uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1339 XT_REG_IDX_WINDOWBASE : xtensa->nx_reg_idx[XT_NX_REG_IDX_WB];
1340 windowbase = buf_get_u32(regvals[wb_idx].buf, 0, 32);
1341 if (xtensa->core_config->core_type == XT_NX)
1342 windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1343 }
1344
1345 /* Decode the result and update the cache. */
1346 for (unsigned int i = 0; i < reg_list_size; i++) {
1347 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1348 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1349 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1350 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1351 /* The 64-value general register set is read from (windowbase) on down.
1352 * We need to get the real register address by subtracting windowbase and
1353 * wrapping around. */
1354 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1355 windowbase);
1356 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1357 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1358 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1359 if (xtensa_extra_debug_log) {
1360 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1361 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1362 }
1363 } else {
1364 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1365 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1366 if (xtensa_extra_debug_log)
1367 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1368 if (rlist[ridx].reg_num == XT_PC_REG_NUM_VIRTUAL &&
1369 xtensa->core_config->core_type == XT_NX) {
1370 /* A0 from prior CALL0 points to next instruction; decrement it */
1371 regval -= 3;
1372 is_dirty = 1;
1373 } else if (i == ms_idx) {
1374 LOG_TARGET_DEBUG(target, "Caching MS: 0x%x", ms);
1375 regval = ms;
1376 is_dirty = 1;
1377 }
1378 xtensa_reg_set(target, i, regval);
1379 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1380 }
1381 reg_list[i].valid = true;
1382 } else {
1383 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1384 /* Report read-only registers all-zero but valid */
1385 reg_list[i].valid = true;
1386 xtensa_reg_set(target, i, 0);
1387 } else {
1388 reg_list[i].valid = false;
1389 }
1390 }
1391 }
1392
1393 if (xtensa->core_config->windowed) {
1394 /* We have used A3 as a scratch register.
1395 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1396 */
1397 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1398 xtensa_reg_set(target, ar3_idx, a3);
1399 xtensa_mark_register_dirty(xtensa, ar3_idx);
1400
1401 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1402 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1403 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1404 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1405 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1406 xtensa->scratch_ars[s].intval = false;
1407 }
1408
1409 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1410 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1411 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1412 if (xtensa->core_config->core_type == XT_NX) {
1413 xtensa_reg_set(target, XT_REG_IDX_A0, a0);
1414 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A0);
1415 }
1416
1417 xtensa->regs_fetched = true;
1418 xtensa_fetch_all_regs_done:
1419 free(regvals);
1420 free(dsrs);
1421 return res;
1422 }
1423
1424 int xtensa_get_gdb_reg_list(struct target *target,
1425 struct reg **reg_list[],
1426 int *reg_list_size,
1427 enum target_register_class reg_class)
1428 {
1429 struct xtensa *xtensa = target_to_xtensa(target);
1430 unsigned int num_regs;
1431
1432 if (reg_class == REG_CLASS_GENERAL) {
1433 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1434 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1435 return ERROR_FAIL;
1436 }
1437 num_regs = xtensa->genpkt_regs_num;
1438 } else {
1439 /* Determine whether to return a contiguous or sparse register map */
1440 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1441 }
1442
1443 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1444
1445 *reg_list = calloc(num_regs, sizeof(struct reg *));
1446 if (!*reg_list)
1447 return ERROR_FAIL;
1448
1449 *reg_list_size = num_regs;
1450 if (xtensa->regmap_contiguous) {
1451 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1452 for (unsigned int i = 0; i < num_regs; i++)
1453 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1454 return ERROR_OK;
1455 }
1456
1457 for (unsigned int i = 0; i < num_regs; i++)
1458 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1459 unsigned int k = 0;
1460 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1461 if (xtensa->core_cache->reg_list[i].exist) {
1462 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1463 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1464 int sparse_idx = rlist[ridx].dbreg_num;
1465 if (i == XT_REG_IDX_PS && xtensa->core_config->core_type == XT_LX) {
1466 if (xtensa->eps_dbglevel_idx == 0) {
1467 LOG_ERROR("eps_dbglevel_idx not set\n");
1468 return ERROR_FAIL;
1469 }
1470 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1471 if (xtensa_extra_debug_log)
1472 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1473 sparse_idx, xtensa->core_config->debug.irq_level,
1474 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1475 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1476 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1477 } else {
1478 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1479 }
1480 if (i == XT_REG_IDX_PC)
1481 /* Make a duplicate copy of PC for external access */
1482 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1483 k++;
1484 }
1485 }
1486
1487 if (k == num_regs)
1488 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1489
1490 return ERROR_OK;
1491 }
1492
1493 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1494 {
1495 struct xtensa *xtensa = target_to_xtensa(target);
1496 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1497 xtensa->core_config->mmu.dtlb_entries_count > 0;
1498 return ERROR_OK;
1499 }
1500
1501 int xtensa_halt(struct target *target)
1502 {
1503 struct xtensa *xtensa = target_to_xtensa(target);
1504
1505 LOG_TARGET_DEBUG(target, "start");
1506 if (target->state == TARGET_HALTED) {
1507 LOG_TARGET_DEBUG(target, "target was already halted");
1508 return ERROR_OK;
1509 }
1510 /* First we have to read dsr and check if the target stopped */
1511 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1512 if (res != ERROR_OK) {
1513 LOG_TARGET_ERROR(target, "Failed to read core status!");
1514 return res;
1515 }
1516 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1517 if (!xtensa_is_stopped(target)) {
1518 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1519 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1520 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1521 if (res != ERROR_OK)
1522 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1523 }
1524
1525 return res;
1526 }
1527
1528 int xtensa_prepare_resume(struct target *target,
1529 int current,
1530 target_addr_t address,
1531 int handle_breakpoints,
1532 int debug_execution)
1533 {
1534 struct xtensa *xtensa = target_to_xtensa(target);
1535 uint32_t bpena = 0;
1536
1537 LOG_TARGET_DEBUG(target,
1538 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1539 current,
1540 address,
1541 handle_breakpoints,
1542 debug_execution);
1543
1544 if (target->state != TARGET_HALTED) {
1545 LOG_TARGET_ERROR(target, "not halted");
1546 return ERROR_TARGET_NOT_HALTED;
1547 }
1548 xtensa->halt_request = false;
1549
1550 if (address && !current) {
1551 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1552 } else {
1553 uint32_t cause = xtensa_cause_get(target);
1554 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1555 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1556 if (cause & DEBUGCAUSE_DB)
1557 /* We stopped due to a watchpoint. We can't just resume executing the
1558 * instruction again because */
1559 /* that would trigger the watchpoint again. To fix this, we single-step,
1560 * which ignores watchpoints. */
1561 xtensa_do_step(target, current, address, handle_breakpoints);
1562 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1563 /* We stopped due to a break instruction. We can't just resume executing the
1564 * instruction again because */
1565 /* that would trigger the break again. To fix this, we single-step, which
1566 * ignores break. */
1567 xtensa_do_step(target, current, address, handle_breakpoints);
1568 }
1569
1570 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1571 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1572 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1573 if (xtensa->hw_brps[slot]) {
1574 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1575 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1576 if (xtensa->core_config->core_type == XT_NX)
1577 xtensa_reg_set(target, xtensa->nx_reg_idx[XT_NX_REG_IDX_IBREAKC0] + slot, XT_IBREAKC_FB);
1578 bpena |= BIT(slot);
1579 }
1580 }
1581 if (xtensa->core_config->core_type == XT_LX)
1582 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1583
1584 /* Here we write all registers to the targets */
1585 int res = xtensa_write_dirty_registers(target);
1586 if (res != ERROR_OK)
1587 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1588 return res;
1589 }
1590
1591 int xtensa_do_resume(struct target *target)
1592 {
1593 struct xtensa *xtensa = target_to_xtensa(target);
1594
1595 LOG_TARGET_DEBUG(target, "start");
1596
1597 xtensa_cause_reset(target);
1598 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1599 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1600 if (res != ERROR_OK) {
1601 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1602 return res;
1603 }
1604 xtensa_core_status_check(target);
1605 return ERROR_OK;
1606 }
1607
1608 int xtensa_resume(struct target *target,
1609 int current,
1610 target_addr_t address,
1611 int handle_breakpoints,
1612 int debug_execution)
1613 {
1614 LOG_TARGET_DEBUG(target, "start");
1615 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1616 if (res != ERROR_OK) {
1617 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1618 return res;
1619 }
1620 res = xtensa_do_resume(target);
1621 if (res != ERROR_OK) {
1622 LOG_TARGET_ERROR(target, "Failed to resume!");
1623 return res;
1624 }
1625
1626 target->debug_reason = DBG_REASON_NOTHALTED;
1627 if (!debug_execution)
1628 target->state = TARGET_RUNNING;
1629 else
1630 target->state = TARGET_DEBUG_RUNNING;
1631
1632 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1633
1634 return ERROR_OK;
1635 }
1636
1637 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1638 {
1639 struct xtensa *xtensa = target_to_xtensa(target);
1640 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1641 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1642 if (err != ERROR_OK)
1643 return false;
1644
1645 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1646 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1647 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1648 return true;
1649
1650 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1651 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1652 return true;
1653
1654 return false;
1655 }
1656
1657 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1658 {
1659 struct xtensa *xtensa = target_to_xtensa(target);
1660 int res;
1661 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1662 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1663 xtensa_reg_val_t icountlvl, cause;
1664 xtensa_reg_val_t oldps, oldpc, cur_pc;
1665 bool ps_lowered = false;
1666
1667 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1668 current, address, handle_breakpoints);
1669
1670 if (target->state != TARGET_HALTED) {
1671 LOG_TARGET_ERROR(target, "not halted");
1672 return ERROR_TARGET_NOT_HALTED;
1673 }
1674
1675 if (xtensa->eps_dbglevel_idx == 0 && xtensa->core_config->core_type == XT_LX) {
1676 LOG_TARGET_ERROR(target, "eps_dbglevel_idx not set\n");
1677 return ERROR_FAIL;
1678 }
1679
1680 /* Save old ps (EPS[dbglvl] on LX), pc */
1681 oldps = xtensa_reg_get(target, (xtensa->core_config->core_type == XT_LX) ?
1682 xtensa->eps_dbglevel_idx : XT_REG_IDX_PS);
1683 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1684
1685 cause = xtensa_cause_get(target);
1686 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1687 oldps,
1688 oldpc,
1689 cause,
1690 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1691 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1692 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1693 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1694 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1695 /* pretend that we have stepped */
1696 if (cause & DEBUGCAUSE_BI)
1697 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1698 else
1699 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1700 return ERROR_OK;
1701 }
1702
1703 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1704 * at which the instructions are to be counted while stepping.
1705 *
1706 * For example, if we need to step by 2 instructions, and an interrupt occurs
1707 * in between, the processor will trigger the interrupt and halt after the 2nd
1708 * instruction within the interrupt vector and/or handler.
1709 *
1710 * However, sometimes we don't want the interrupt handlers to be executed at all
1711 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1712 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1713 * code from being counted during stepping. Note that C exception handlers must
1714 * run at level 0 and hence will be counted and stepped into, should one occur.
1715 *
1716 * TODO: Certain instructions should never be single-stepped and should instead
1717 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1718 * RFI >= DBGLEVEL.
1719 */
1720 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1721 if (!xtensa->core_config->high_irq.enabled) {
1722 LOG_TARGET_WARNING(
1723 target,
1724 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1725 return ERROR_FAIL;
1726 }
1727 /* Update ICOUNTLEVEL accordingly */
1728 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1729 } else {
1730 icountlvl = xtensa->core_config->debug.irq_level;
1731 }
1732
1733 if (cause & DEBUGCAUSE_DB) {
1734 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1735 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1736 * re-enable the watchpoint. */
1737 LOG_TARGET_DEBUG(
1738 target,
1739 "Single-stepping to get past instruction that triggered the watchpoint...");
1740 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1741 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1742 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1743 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1744 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1745 }
1746 }
1747
1748 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1749 /* handle normal SW breakpoint */
1750 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1751 if (xtensa->core_config->core_type == XT_LX && ((oldps & 0xf) >= icountlvl)) {
1752 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1753 ps_lowered = true;
1754 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1755 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1756 LOG_TARGET_DEBUG(target,
1757 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1758 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1759 newps,
1760 oldps);
1761 }
1762 do {
1763 if (xtensa->core_config->core_type == XT_LX) {
1764 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1765 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1766 } else {
1767 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_STEPREQUEST);
1768 }
1769
1770 /* Now that ICOUNT (LX) or DCR.StepRequest (NX) is set,
1771 * we can resume as if we were going to run
1772 */
1773 res = xtensa_prepare_resume(target, current, address, 0, 0);
1774 if (res != ERROR_OK) {
1775 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1776 return res;
1777 }
1778 res = xtensa_do_resume(target);
1779 if (res != ERROR_OK) {
1780 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1781 return res;
1782 }
1783
1784 /* Wait for stepping to complete */
1785 long long start = timeval_ms();
1786 while (timeval_ms() < start + 500) {
1787 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1788 *until stepping is complete. */
1789 usleep(1000);
1790 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1791 if (res != ERROR_OK) {
1792 LOG_TARGET_ERROR(target, "Failed to read core status!");
1793 return res;
1794 }
1795 if (xtensa_is_stopped(target))
1796 break;
1797 usleep(1000);
1798 }
1799 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1800 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1801 if (!xtensa_is_stopped(target)) {
1802 LOG_TARGET_WARNING(
1803 target,
1804 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1805 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1806 target->debug_reason = DBG_REASON_NOTHALTED;
1807 target->state = TARGET_RUNNING;
1808 return ERROR_FAIL;
1809 }
1810
1811 xtensa_fetch_all_regs(target);
1812 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1813
1814 LOG_TARGET_DEBUG(target,
1815 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1816 xtensa_reg_get(target, XT_REG_IDX_PS),
1817 cur_pc,
1818 xtensa_cause_get(target),
1819 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1820
1821 /* Do not step into WindowOverflow if ISRs are masked.
1822 If we stop in WindowOverflow at breakpoint with masked ISRs and
1823 try to do a step it will get us out of that handler */
1824 if (xtensa->core_config->windowed &&
1825 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1826 xtensa_pc_in_winexc(target, cur_pc)) {
1827 /* isrmask = on, need to step out of the window exception handler */
1828 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1829 oldpc = cur_pc;
1830 address = oldpc + 3;
1831 continue;
1832 }
1833
1834 if (oldpc == cur_pc)
1835 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1836 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1837 else
1838 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1839 break;
1840 } while (true);
1841
1842 target->debug_reason = DBG_REASON_SINGLESTEP;
1843 target->state = TARGET_HALTED;
1844 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1845
1846 if (cause & DEBUGCAUSE_DB) {
1847 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1848 /* Restore the DBREAKCx registers */
1849 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1850 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1851 }
1852
1853 /* Restore int level */
1854 if (ps_lowered) {
1855 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1856 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1857 oldps);
1858 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1859 }
1860
1861 /* write ICOUNTLEVEL back to zero */
1862 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1863 /* TODO: can we skip writing dirty registers and re-fetching them? */
1864 res = xtensa_write_dirty_registers(target);
1865 xtensa_fetch_all_regs(target);
1866 return res;
1867 }
1868
1869 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1870 {
1871 int retval = xtensa_do_step(target, current, address, handle_breakpoints);
1872 if (retval != ERROR_OK)
1873 return retval;
1874 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1875
1876 return ERROR_OK;
1877 }
1878
1879 /**
1880 * Returns true if two ranges are overlapping
1881 */
1882 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1883 target_addr_t r1_end,
1884 target_addr_t r2_start,
1885 target_addr_t r2_end)
1886 {
1887 if ((r2_start >= r1_start) && (r2_start < r1_end))
1888 return true; /* r2_start is in r1 region */
1889 if ((r2_end > r1_start) && (r2_end <= r1_end))
1890 return true; /* r2_end is in r1 region */
1891 return false;
1892 }
1893
1894 /**
1895 * Returns a size of overlapped region of two ranges.
1896 */
1897 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1898 target_addr_t r1_end,
1899 target_addr_t r2_start,
1900 target_addr_t r2_end)
1901 {
1902 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1903 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1904 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1905 return ov_end - ov_start;
1906 }
1907 return 0;
1908 }
1909
1910 /**
1911 * Check if the address gets to memory regions, and its access mode
1912 */
1913 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1914 {
1915 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1916 target_addr_t adr_end = address + size; /* region end */
1917 target_addr_t overlap_size;
1918 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1919
1920 while (adr_pos < adr_end) {
1921 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1922 if (!cm) /* address is not belong to anything */
1923 return false;
1924 if ((cm->access & access) != access) /* access check */
1925 return false;
1926 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1927 assert(overlap_size != 0);
1928 adr_pos += overlap_size;
1929 }
1930 return true;
1931 }
1932
1933 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1934 {
1935 struct xtensa *xtensa = target_to_xtensa(target);
1936 /* We are going to read memory in 32-bit increments. This may not be what the calling
1937 * function expects, so we may need to allocate a temp buffer and read into that first. */
1938 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1939 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1940 target_addr_t adr = addrstart_al;
1941 uint8_t *albuff;
1942 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1943
1944 if (target->state != TARGET_HALTED) {
1945 LOG_TARGET_ERROR(target, "not halted");
1946 return ERROR_TARGET_NOT_HALTED;
1947 }
1948
1949 if (!xtensa->permissive_mode) {
1950 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1951 XT_MEM_ACCESS_READ)) {
1952 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1953 return ERROR_FAIL;
1954 }
1955 }
1956
1957 unsigned int alloc_bytes = ALIGN_UP(addrend_al - addrstart_al, sizeof(uint32_t));
1958 albuff = calloc(alloc_bytes, 1);
1959 if (!albuff) {
1960 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
1961 addrend_al - addrstart_al);
1962 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1963 }
1964
1965 /* We're going to use A3 here */
1966 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1967 /* Write start address to A3 */
1968 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1969 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1970 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1971 if (xtensa->probe_lsddr32p != 0) {
1972 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1973 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1974 xtensa_queue_dbg_reg_read(xtensa,
1975 (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
1976 &albuff[i]);
1977 } else {
1978 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1979 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1980 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1981 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
1982 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[i]);
1983 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1984 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1985 }
1986 }
1987 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1988 if (res == ERROR_OK) {
1989 bool prev_suppress = xtensa->suppress_dsr_errors;
1990 xtensa->suppress_dsr_errors = true;
1991 res = xtensa_core_status_check(target);
1992 if (xtensa->probe_lsddr32p == -1)
1993 xtensa->probe_lsddr32p = 1;
1994 xtensa->suppress_dsr_errors = prev_suppress;
1995 }
1996 if (res != ERROR_OK) {
1997 if (xtensa->probe_lsddr32p != 0) {
1998 /* Disable fast memory access instructions and retry before reporting an error */
1999 LOG_TARGET_DEBUG(target, "Disabling LDDR32.P/SDDR32.P");
2000 xtensa->probe_lsddr32p = 0;
2001 res = xtensa_read_memory(target, address, size, count, albuff);
2002 bswap = false;
2003 } else {
2004 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
2005 count * size, address);
2006 }
2007 }
2008
2009 if (bswap)
2010 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
2011 memcpy(buffer, albuff + (address & 3), (size * count));
2012 free(albuff);
2013 return res;
2014 }
2015
2016 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2017 {
2018 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
2019 return xtensa_read_memory(target, address, 1, count, buffer);
2020 }
2021
2022 int xtensa_write_memory(struct target *target,
2023 target_addr_t address,
2024 uint32_t size,
2025 uint32_t count,
2026 const uint8_t *buffer)
2027 {
2028 /* This memory write function can get thrown nigh everything into it, from
2029 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
2030 * accept anything but aligned uint32 writes, though. That is why we convert
2031 * everything into that. */
2032 struct xtensa *xtensa = target_to_xtensa(target);
2033 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
2034 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
2035 target_addr_t adr = addrstart_al;
2036 int res;
2037 uint8_t *albuff;
2038 bool fill_head_tail = false;
2039
2040 if (target->state != TARGET_HALTED) {
2041 LOG_TARGET_ERROR(target, "not halted");
2042 return ERROR_TARGET_NOT_HALTED;
2043 }
2044
2045 if (!xtensa->permissive_mode) {
2046 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
2047 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
2048 return ERROR_FAIL;
2049 }
2050 }
2051
2052 if (size == 0 || count == 0 || !buffer)
2053 return ERROR_COMMAND_SYNTAX_ERROR;
2054
2055 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
2056 if (addrstart_al == address && addrend_al == address + (size * count)) {
2057 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
2058 /* Need a buffer for byte-swapping */
2059 albuff = malloc(addrend_al - addrstart_al);
2060 else
2061 /* We discard the const here because albuff can also be non-const */
2062 albuff = (uint8_t *)buffer;
2063 } else {
2064 fill_head_tail = true;
2065 albuff = malloc(addrend_al - addrstart_al);
2066 }
2067 if (!albuff) {
2068 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
2069 addrend_al - addrstart_al);
2070 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2071 }
2072
2073 /* We're going to use A3 here */
2074 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2075
2076 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
2077 if (fill_head_tail) {
2078 /* See if we need to read the first and/or last word. */
2079 if (address & 3) {
2080 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
2081 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2082 if (xtensa->probe_lsddr32p == 1) {
2083 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
2084 } else {
2085 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
2086 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
2087 }
2088 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[0]);
2089 }
2090 if ((address + (size * count)) & 3) {
2091 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
2092 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2093 if (xtensa->probe_lsddr32p == 1) {
2094 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
2095 } else {
2096 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
2097 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
2098 }
2099 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
2100 &albuff[addrend_al - addrstart_al - 4]);
2101 }
2102 /* Grab bytes */
2103 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2104 if (res != ERROR_OK) {
2105 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
2106 if (albuff != buffer)
2107 free(albuff);
2108 return res;
2109 }
2110 xtensa_core_status_check(target);
2111 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
2112 bool swapped_w0 = false;
2113 if (address & 3) {
2114 buf_bswap32(&albuff[0], &albuff[0], 4);
2115 swapped_w0 = true;
2116 }
2117 if ((address + (size * count)) & 3) {
2118 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
2119 /* Don't double-swap if buffer start/end are within the same word */
2120 } else {
2121 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
2122 &albuff[addrend_al - addrstart_al - 4], 4);
2123 }
2124 }
2125 }
2126 /* Copy data to be written into the aligned buffer (in host-endianness) */
2127 memcpy(&albuff[address & 3], buffer, size * count);
2128 /* Now we can write albuff in aligned uint32s. */
2129 }
2130
2131 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
2132 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
2133
2134 /* Write start address to A3 */
2135 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
2136 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2137 /* Write the aligned buffer */
2138 if (xtensa->probe_lsddr32p != 0) {
2139 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2140 if (i == 0) {
2141 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
2142 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
2143 } else {
2144 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
2145 }
2146 }
2147 } else {
2148 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
2149 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2150 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
2151 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2152 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
2153 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
2154 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2155 }
2156 }
2157
2158 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2159 if (res == ERROR_OK) {
2160 bool prev_suppress = xtensa->suppress_dsr_errors;
2161 xtensa->suppress_dsr_errors = true;
2162 res = xtensa_core_status_check(target);
2163 if (xtensa->probe_lsddr32p == -1)
2164 xtensa->probe_lsddr32p = 1;
2165 xtensa->suppress_dsr_errors = prev_suppress;
2166 }
2167 if (res != ERROR_OK) {
2168 if (xtensa->probe_lsddr32p != 0) {
2169 /* Disable fast memory access instructions and retry before reporting an error */
2170 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
2171 xtensa->probe_lsddr32p = 0;
2172 res = xtensa_write_memory(target, address, size, count, buffer);
2173 } else {
2174 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
2175 count * size, address);
2176 }
2177 } else {
2178 /* Invalidate ICACHE, writeback DCACHE if present */
2179 uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
2180 uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
2181 if (issue_ihi || issue_dhwb) {
2182 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2183 uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2184 uint32_t linesize = MIN(ilinesize, dlinesize);
2185 uint32_t off = 0;
2186 adr = addrstart_al;
2187
2188 while ((adr + off) < addrend_al) {
2189 if (off == 0) {
2190 /* Write start address to A3 */
2191 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr);
2192 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2193 }
2194 if (issue_ihi)
2195 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
2196 if (issue_dhwb)
2197 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
2198 off += linesize;
2199 if (off > 1020) {
2200 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
2201 adr += off;
2202 off = 0;
2203 }
2204 }
2205
2206 /* Execute cache WB/INV instructions */
2207 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2208 xtensa_core_status_check(target);
2209 if (res != ERROR_OK)
2210 LOG_TARGET_ERROR(target,
2211 "Error issuing cache writeback/invaldate instruction(s): %d",
2212 res);
2213 }
2214 }
2215 if (albuff != buffer)
2216 free(albuff);
2217
2218 return res;
2219 }
2220
2221 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2222 {
2223 /* xtensa_write_memory can handle everything. Just pass on to that. */
2224 return xtensa_write_memory(target, address, 1, count, buffer);
2225 }
2226
2227 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2228 {
2229 LOG_WARNING("not implemented yet");
2230 return ERROR_FAIL;
2231 }
2232
2233 int xtensa_poll(struct target *target)
2234 {
2235 struct xtensa *xtensa = target_to_xtensa(target);
2236 if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2237 target->state = TARGET_UNKNOWN;
2238 return ERROR_TARGET_NOT_EXAMINED;
2239 }
2240
2241 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET(xtensa) |
2242 PWRSTAT_COREWASRESET(xtensa));
2243 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2244 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2245 xtensa->dbg_mod.power_status.stat,
2246 PWRSTAT_DEBUGWASRESET(xtensa) | PWRSTAT_COREWASRESET(xtensa),
2247 xtensa->dbg_mod.power_status.stath);
2248 if (res != ERROR_OK)
2249 return res;
2250
2251 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2252 LOG_TARGET_INFO(target, "Debug controller was reset.");
2253 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2254 if (res != ERROR_OK)
2255 return res;
2256 }
2257 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2258 LOG_TARGET_INFO(target, "Core was reset.");
2259 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2260 /* Enable JTAG, set reset if needed */
2261 res = xtensa_wakeup(target);
2262 if (res != ERROR_OK)
2263 return res;
2264
2265 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2266 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2267 if (res != ERROR_OK)
2268 return res;
2269 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2270 LOG_TARGET_DEBUG(target,
2271 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2272 prev_dsr,
2273 xtensa->dbg_mod.core_status.dsr);
2274 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET(xtensa)) {
2275 /* if RESET state is persitent */
2276 target->state = TARGET_RESET;
2277 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2278 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2279 xtensa->dbg_mod.core_status.dsr,
2280 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2281 target->state = TARGET_UNKNOWN;
2282 if (xtensa->come_online_probes_num == 0)
2283 target->examined = false;
2284 else
2285 xtensa->come_online_probes_num--;
2286 } else if (xtensa_is_stopped(target)) {
2287 if (target->state != TARGET_HALTED) {
2288 enum target_state oldstate = target->state;
2289 target->state = TARGET_HALTED;
2290 /* Examine why the target has been halted */
2291 target->debug_reason = DBG_REASON_DBGRQ;
2292 xtensa_fetch_all_regs(target);
2293 /* When setting debug reason DEBUGCAUSE events have the following
2294 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2295 /* Watchpoint and breakpoint events at the same time results in special
2296 * debug reason: DBG_REASON_WPTANDBKPT. */
2297 uint32_t halt_cause = xtensa_cause_get(target);
2298 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2299 if (halt_cause & DEBUGCAUSE_IC)
2300 target->debug_reason = DBG_REASON_SINGLESTEP;
2301 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2302 if (halt_cause & DEBUGCAUSE_DB)
2303 target->debug_reason = DBG_REASON_WPTANDBKPT;
2304 else
2305 target->debug_reason = DBG_REASON_BREAKPOINT;
2306 } else if (halt_cause & DEBUGCAUSE_DB) {
2307 target->debug_reason = DBG_REASON_WATCHPOINT;
2308 }
2309 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2310 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2311 xtensa_reg_get(target, XT_REG_IDX_PC),
2312 target->debug_reason,
2313 oldstate);
2314 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2315 halt_cause,
2316 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2317 xtensa->dbg_mod.core_status.dsr);
2318 xtensa_dm_core_status_clear(
2319 &xtensa->dbg_mod,
2320 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2321 OCDDSR_DEBUGINTTRAX |
2322 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2323 if (xtensa->core_config->core_type == XT_NX) {
2324 /* Enable imprecise exceptions while in halted state */
2325 xtensa_reg_val_t ps = xtensa_reg_get(target, XT_REG_IDX_PS);
2326 xtensa_reg_val_t newps = ps & ~(XT_PS_DIEXC_MSK);
2327 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_PS);
2328 LOG_TARGET_DEBUG(target, "Enabling PS.DIEXC: 0x%08x -> 0x%08x", ps, newps);
2329 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, newps);
2330 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2331 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
2332 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2333 if (res != ERROR_OK) {
2334 LOG_TARGET_ERROR(target, "Failed to write PS.DIEXC (%d)!", res);
2335 return res;
2336 }
2337 xtensa_core_status_check(target);
2338 }
2339 }
2340 } else {
2341 target->debug_reason = DBG_REASON_NOTHALTED;
2342 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2343 target->state = TARGET_RUNNING;
2344 target->debug_reason = DBG_REASON_NOTHALTED;
2345 }
2346 }
2347 if (xtensa->trace_active) {
2348 /* Detect if tracing was active but has stopped. */
2349 struct xtensa_trace_status trace_status;
2350 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2351 if (res == ERROR_OK) {
2352 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2353 LOG_INFO("Detected end of trace.");
2354 if (trace_status.stat & TRAXSTAT_PCMTG)
2355 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2356 if (trace_status.stat & TRAXSTAT_PTITG)
2357 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2358 if (trace_status.stat & TRAXSTAT_CTITG)
2359 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2360 xtensa->trace_active = false;
2361 }
2362 }
2363 }
2364 return ERROR_OK;
2365 }
2366
2367 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2368 {
2369 struct xtensa *xtensa = target_to_xtensa(target);
2370 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2371 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2372 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2373 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2374 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2375 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2376 int ret;
2377
2378 if (size > icache_line_size)
2379 return ERROR_FAIL;
2380
2381 if (issue_ihi || issue_dhwbi) {
2382 /* We're going to use A3 here */
2383 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2384
2385 /* Write start address to A3 and invalidate */
2386 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2387 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2388 LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2389 if (issue_dhwbi) {
2390 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2391 if (!same_dc_line) {
2392 LOG_TARGET_DEBUG(target,
2393 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2394 address + 4);
2395 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2396 }
2397 }
2398 if (issue_ihi) {
2399 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2400 if (!same_ic_line) {
2401 LOG_TARGET_DEBUG(target,
2402 "IHI second icache line for address "TARGET_ADDR_FMT,
2403 address + 4);
2404 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2405 }
2406 }
2407
2408 /* Execute invalidate instructions */
2409 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2410 xtensa_core_status_check(target);
2411 if (ret != ERROR_OK) {
2412 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2413 return ret;
2414 }
2415 }
2416
2417 /* Write new instructions to memory */
2418 ret = target_write_buffer(target, address, size, buffer);
2419 if (ret != ERROR_OK) {
2420 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2421 return ret;
2422 }
2423
2424 if (issue_dhwbi) {
2425 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2426 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2427 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2428 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2429 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2430 if (!same_dc_line) {
2431 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2432 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2433 }
2434
2435 /* Execute invalidate instructions */
2436 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2437 xtensa_core_status_check(target);
2438 }
2439
2440 /* TODO: Handle L2 cache if present */
2441 return ret;
2442 }
2443
2444 static int xtensa_sw_breakpoint_add(struct target *target,
2445 struct breakpoint *breakpoint,
2446 struct xtensa_sw_breakpoint *sw_bp)
2447 {
2448 struct xtensa *xtensa = target_to_xtensa(target);
2449 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2450 if (ret != ERROR_OK) {
2451 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2452 return ret;
2453 }
2454
2455 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2456 sw_bp->oocd_bp = breakpoint;
2457
2458 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2459
2460 /* Underlying memory write will convert instruction endianness, don't do that here */
2461 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2462 if (ret != ERROR_OK) {
2463 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2464 return ret;
2465 }
2466
2467 return ERROR_OK;
2468 }
2469
2470 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2471 {
2472 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2473 if (ret != ERROR_OK) {
2474 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2475 return ret;
2476 }
2477 sw_bp->oocd_bp = NULL;
2478 return ERROR_OK;
2479 }
2480
2481 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2482 {
2483 struct xtensa *xtensa = target_to_xtensa(target);
2484 unsigned int slot;
2485
2486 if (breakpoint->type == BKPT_SOFT) {
2487 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2488 if (!xtensa->sw_brps[slot].oocd_bp ||
2489 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2490 break;
2491 }
2492 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2493 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2494 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2495 }
2496 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2497 if (ret != ERROR_OK) {
2498 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2499 return ret;
2500 }
2501 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2502 slot,
2503 breakpoint->address);
2504 return ERROR_OK;
2505 }
2506
2507 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2508 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2509 break;
2510 }
2511 if (slot == xtensa->core_config->debug.ibreaks_num) {
2512 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2513 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2514 }
2515
2516 xtensa->hw_brps[slot] = breakpoint;
2517 /* We will actually write the breakpoints when we resume the target. */
2518 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2519 slot,
2520 breakpoint->address);
2521
2522 return ERROR_OK;
2523 }
2524
2525 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2526 {
2527 struct xtensa *xtensa = target_to_xtensa(target);
2528 unsigned int slot;
2529
2530 if (breakpoint->type == BKPT_SOFT) {
2531 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2532 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2533 break;
2534 }
2535 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2536 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2537 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2538 }
2539 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2540 if (ret != ERROR_OK) {
2541 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2542 return ret;
2543 }
2544 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2545 return ERROR_OK;
2546 }
2547
2548 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2549 if (xtensa->hw_brps[slot] == breakpoint)
2550 break;
2551 }
2552 if (slot == xtensa->core_config->debug.ibreaks_num) {
2553 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2554 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2555 }
2556 xtensa->hw_brps[slot] = NULL;
2557 if (xtensa->core_config->core_type == XT_NX)
2558 xtensa_reg_set(target, xtensa->nx_reg_idx[XT_NX_REG_IDX_IBREAKC0] + slot, 0);
2559 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2560 return ERROR_OK;
2561 }
2562
2563 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2564 {
2565 struct xtensa *xtensa = target_to_xtensa(target);
2566 unsigned int slot;
2567 xtensa_reg_val_t dbreakcval;
2568
2569 if (target->state != TARGET_HALTED) {
2570 LOG_TARGET_ERROR(target, "not halted");
2571 return ERROR_TARGET_NOT_HALTED;
2572 }
2573
2574 if (watchpoint->mask != WATCHPOINT_IGNORE_DATA_VALUE_MASK) {
2575 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2576 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2577 }
2578
2579 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2580 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2581 break;
2582 }
2583 if (slot == xtensa->core_config->debug.dbreaks_num) {
2584 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2585 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2586 }
2587
2588 /* Figure out value for dbreakc5..0
2589 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2590 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2591 !IS_PWR_OF_2(watchpoint->length) ||
2592 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2593 LOG_TARGET_WARNING(
2594 target,
2595 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2596 " not supported by hardware.",
2597 watchpoint->length,
2598 watchpoint->address);
2599 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2600 }
2601 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2602
2603 if (watchpoint->rw == WPT_READ)
2604 dbreakcval |= BIT(30);
2605 if (watchpoint->rw == WPT_WRITE)
2606 dbreakcval |= BIT(31);
2607 if (watchpoint->rw == WPT_ACCESS)
2608 dbreakcval |= BIT(30) | BIT(31);
2609
2610 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2611 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2612 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2613 xtensa->hw_wps[slot] = watchpoint;
2614 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2615 watchpoint->address);
2616 return ERROR_OK;
2617 }
2618
2619 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2620 {
2621 struct xtensa *xtensa = target_to_xtensa(target);
2622 unsigned int slot;
2623
2624 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2625 if (xtensa->hw_wps[slot] == watchpoint)
2626 break;
2627 }
2628 if (slot == xtensa->core_config->debug.dbreaks_num) {
2629 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2630 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2631 }
2632 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2633 xtensa->hw_wps[slot] = NULL;
2634 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2635 watchpoint->address);
2636 return ERROR_OK;
2637 }
2638
2639 int xtensa_start_algorithm(struct target *target,
2640 int num_mem_params, struct mem_param *mem_params,
2641 int num_reg_params, struct reg_param *reg_params,
2642 target_addr_t entry_point, target_addr_t exit_point,
2643 void *arch_info)
2644 {
2645 struct xtensa *xtensa = target_to_xtensa(target);
2646 struct xtensa_algorithm *algorithm_info = arch_info;
2647 int retval = ERROR_OK;
2648 bool usr_ps = false;
2649
2650 /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
2651 * at the exit point */
2652
2653 if (target->state != TARGET_HALTED) {
2654 LOG_WARNING("Target not halted!");
2655 return ERROR_TARGET_NOT_HALTED;
2656 }
2657
2658 for (unsigned int i = 0; i < xtensa->core_cache->num_regs; i++) {
2659 struct reg *reg = &xtensa->core_cache->reg_list[i];
2660 buf_cpy(reg->value, xtensa->algo_context_backup[i], reg->size);
2661 }
2662 /* save debug reason, it will be changed */
2663 algorithm_info->ctx_debug_reason = target->debug_reason;
2664 /* write mem params */
2665 for (int i = 0; i < num_mem_params; i++) {
2666 if (mem_params[i].direction != PARAM_IN) {
2667 retval = target_write_buffer(target, mem_params[i].address,
2668 mem_params[i].size,
2669 mem_params[i].value);
2670 if (retval != ERROR_OK)
2671 return retval;
2672 }
2673 }
2674 /* write reg params */
2675 for (int i = 0; i < num_reg_params; i++) {
2676 if (reg_params[i].size > 32) {
2677 LOG_ERROR("BUG: not supported register size (%d)", reg_params[i].size);
2678 return ERROR_FAIL;
2679 }
2680 struct reg *reg = register_get_by_name(xtensa->core_cache, reg_params[i].reg_name, 0);
2681 if (!reg) {
2682 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
2683 return ERROR_FAIL;
2684 }
2685 if (reg->size != reg_params[i].size) {
2686 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
2687 return ERROR_FAIL;
2688 }
2689 if (memcmp(reg_params[i].reg_name, "ps", 3)) {
2690 usr_ps = true;
2691 } else {
2692 unsigned int reg_id = xtensa->eps_dbglevel_idx;
2693 assert(reg_id < xtensa->core_cache->num_regs && "Attempt to access non-existing reg!");
2694 reg = &xtensa->core_cache->reg_list[reg_id];
2695 }
2696 xtensa_reg_set_value(reg, buf_get_u32(reg_params[i].value, 0, reg->size));
2697 reg->valid = 1;
2698 }
2699 /* ignore custom core mode if custom PS value is specified */
2700 if (!usr_ps) {
2701 unsigned int eps_reg_idx = xtensa->eps_dbglevel_idx;
2702 xtensa_reg_val_t ps = xtensa_reg_get(target, eps_reg_idx);
2703 enum xtensa_mode core_mode = XT_PS_RING_GET(ps);
2704 if (algorithm_info->core_mode != XT_MODE_ANY && algorithm_info->core_mode != core_mode) {
2705 LOG_DEBUG("setting core_mode: 0x%x", algorithm_info->core_mode);
2706 xtensa_reg_val_t new_ps = (ps & ~XT_PS_RING_MSK) | XT_PS_RING(algorithm_info->core_mode);
2707 /* save previous core mode */
2708 /* TODO: core_mode is not restored for now. Can be added to the end of wait_algorithm */
2709 algorithm_info->core_mode = core_mode;
2710 xtensa_reg_set(target, eps_reg_idx, new_ps);
2711 xtensa->core_cache->reg_list[eps_reg_idx].valid = 1;
2712 }
2713 }
2714
2715 return xtensa_resume(target, 0, entry_point, 1, 1);
2716 }
2717
2718 /** Waits for an algorithm in the target. */
2719 int xtensa_wait_algorithm(struct target *target,
2720 int num_mem_params, struct mem_param *mem_params,
2721 int num_reg_params, struct reg_param *reg_params,
2722 target_addr_t exit_point, unsigned int timeout_ms,
2723 void *arch_info)
2724 {
2725 struct xtensa *xtensa = target_to_xtensa(target);
2726 struct xtensa_algorithm *algorithm_info = arch_info;
2727 int retval = ERROR_OK;
2728 xtensa_reg_val_t pc;
2729
2730 /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
2731 * at the exit point */
2732
2733 retval = target_wait_state(target, TARGET_HALTED, timeout_ms);
2734 /* If the target fails to halt due to the breakpoint, force a halt */
2735 if (retval != ERROR_OK || target->state != TARGET_HALTED) {
2736 retval = target_halt(target);
2737 if (retval != ERROR_OK)
2738 return retval;
2739 retval = target_wait_state(target, TARGET_HALTED, 500);
2740 if (retval != ERROR_OK)
2741 return retval;
2742 LOG_TARGET_ERROR(target, "not halted %d, pc 0x%" PRIx32 ", ps 0x%" PRIx32, retval,
2743 xtensa_reg_get(target, XT_REG_IDX_PC),
2744 xtensa_reg_get(target, xtensa->eps_dbglevel_idx));
2745 return ERROR_TARGET_TIMEOUT;
2746 }
2747 pc = xtensa_reg_get(target, XT_REG_IDX_PC);
2748 if (exit_point && pc != exit_point) {
2749 LOG_ERROR("failed algorithm halted at 0x%" PRIx32 ", expected " TARGET_ADDR_FMT, pc, exit_point);
2750 return ERROR_TARGET_TIMEOUT;
2751 }
2752 /* Copy core register values to reg_params[] */
2753 for (int i = 0; i < num_reg_params; i++) {
2754 if (reg_params[i].direction != PARAM_OUT) {
2755 struct reg *reg = register_get_by_name(xtensa->core_cache, reg_params[i].reg_name, 0);
2756 if (!reg) {
2757 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
2758 return ERROR_FAIL;
2759 }
2760 if (reg->size != reg_params[i].size) {
2761 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
2762 return ERROR_FAIL;
2763 }
2764 buf_set_u32(reg_params[i].value, 0, 32, xtensa_reg_get_value(reg));
2765 }
2766 }
2767 /* Read memory values to mem_params */
2768 LOG_DEBUG("Read mem params");
2769 for (int i = 0; i < num_mem_params; i++) {
2770 LOG_DEBUG("Check mem param @ " TARGET_ADDR_FMT, mem_params[i].address);
2771 if (mem_params[i].direction != PARAM_OUT) {
2772 LOG_DEBUG("Read mem param @ " TARGET_ADDR_FMT, mem_params[i].address);
2773 retval = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value);
2774 if (retval != ERROR_OK)
2775 return retval;
2776 }
2777 }
2778
2779 /* avoid gdb keep_alive warning */
2780 keep_alive();
2781
2782 for (int i = xtensa->core_cache->num_regs - 1; i >= 0; i--) {
2783 struct reg *reg = &xtensa->core_cache->reg_list[i];
2784 if (i == XT_REG_IDX_PS) {
2785 continue; /* restore mapped reg number of PS depends on NDEBUGLEVEL */
2786 } else if (i == XT_REG_IDX_DEBUGCAUSE) {
2787 /*FIXME: restoring DEBUGCAUSE causes exception when executing corresponding
2788 * instruction in DIR */
2789 LOG_DEBUG("Skip restoring register %s: 0x%8.8" PRIx32 " -> 0x%8.8" PRIx32,
2790 xtensa->core_cache->reg_list[i].name,
2791 buf_get_u32(reg->value, 0, 32),
2792 buf_get_u32(xtensa->algo_context_backup[i], 0, 32));
2793 buf_cpy(xtensa->algo_context_backup[i], reg->value, reg->size);
2794 xtensa->core_cache->reg_list[i].dirty = 0;
2795 xtensa->core_cache->reg_list[i].valid = 0;
2796 } else if (memcmp(xtensa->algo_context_backup[i], reg->value, reg->size / 8)) {
2797 if (reg->size <= 32) {
2798 LOG_DEBUG("restoring register %s: 0x%8.8" PRIx32 " -> 0x%8.8" PRIx32,
2799 xtensa->core_cache->reg_list[i].name,
2800 buf_get_u32(reg->value, 0, reg->size),
2801 buf_get_u32(xtensa->algo_context_backup[i], 0, reg->size));
2802 } else if (reg->size <= 64) {
2803 LOG_DEBUG("restoring register %s: 0x%8.8" PRIx64 " -> 0x%8.8" PRIx64,
2804 xtensa->core_cache->reg_list[i].name,
2805 buf_get_u64(reg->value, 0, reg->size),
2806 buf_get_u64(xtensa->algo_context_backup[i], 0, reg->size));
2807 } else {
2808 LOG_DEBUG("restoring register %s %u-bits", xtensa->core_cache->reg_list[i].name, reg->size);
2809 }
2810 buf_cpy(xtensa->algo_context_backup[i], reg->value, reg->size);
2811 xtensa->core_cache->reg_list[i].dirty = 1;
2812 xtensa->core_cache->reg_list[i].valid = 1;
2813 }
2814 }
2815 target->debug_reason = algorithm_info->ctx_debug_reason;
2816
2817 retval = xtensa_write_dirty_registers(target);
2818 if (retval != ERROR_OK)
2819 LOG_ERROR("Failed to write dirty regs (%d)!", retval);
2820
2821 return retval;
2822 }
2823
2824 int xtensa_run_algorithm(struct target *target,
2825 int num_mem_params, struct mem_param *mem_params,
2826 int num_reg_params, struct reg_param *reg_params,
2827 target_addr_t entry_point, target_addr_t exit_point,
2828 unsigned int timeout_ms, void *arch_info)
2829 {
2830 int retval = xtensa_start_algorithm(target,
2831 num_mem_params, mem_params,
2832 num_reg_params, reg_params,
2833 entry_point, exit_point,
2834 arch_info);
2835
2836 if (retval == ERROR_OK) {
2837 retval = xtensa_wait_algorithm(target,
2838 num_mem_params, mem_params,
2839 num_reg_params, reg_params,
2840 exit_point, timeout_ms,
2841 arch_info);
2842 }
2843
2844 return retval;
2845 }
2846
2847 static int xtensa_build_reg_cache(struct target *target)
2848 {
2849 struct xtensa *xtensa = target_to_xtensa(target);
2850 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2851 unsigned int last_dbreg_num = 0;
2852
2853 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2854 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2855 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2856
2857 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2858
2859 if (!reg_cache) {
2860 LOG_ERROR("Failed to alloc reg cache!");
2861 return ERROR_FAIL;
2862 }
2863 reg_cache->name = "Xtensa registers";
2864 reg_cache->next = NULL;
2865 /* Init reglist */
2866 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2867 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2868 if (!reg_list) {
2869 LOG_ERROR("Failed to alloc reg list!");
2870 goto fail;
2871 }
2872 xtensa->dbregs_num = 0;
2873 unsigned int didx = 0;
2874 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2875 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2876 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2877 for (unsigned int i = 0; i < listsize; i++, didx++) {
2878 reg_list[didx].exist = rlist[i].exist;
2879 reg_list[didx].name = rlist[i].name;
2880 reg_list[didx].size = 32;
2881 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2882 if (!reg_list[didx].value) {
2883 LOG_ERROR("Failed to alloc reg list value!");
2884 goto fail;
2885 }
2886 reg_list[didx].dirty = false;
2887 reg_list[didx].valid = false;
2888 reg_list[didx].type = &xtensa_reg_type;
2889 reg_list[didx].arch_info = xtensa;
2890 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2891 last_dbreg_num = rlist[i].dbreg_num;
2892
2893 if (xtensa_extra_debug_log) {
2894 LOG_TARGET_DEBUG(target,
2895 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2896 reg_list[didx].name,
2897 whichlist,
2898 reg_list[didx].exist,
2899 didx,
2900 rlist[i].type,
2901 rlist[i].dbreg_num);
2902 }
2903 }
2904 }
2905
2906 xtensa->dbregs_num = last_dbreg_num + 1;
2907 reg_cache->reg_list = reg_list;
2908 reg_cache->num_regs = reg_list_size;
2909
2910 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2911 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2912
2913 /* Construct empty-register list for handling unknown register requests */
2914 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2915 if (!xtensa->empty_regs) {
2916 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2917 goto fail;
2918 }
2919 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2920 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2921 if (!xtensa->empty_regs[i].name) {
2922 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2923 goto fail;
2924 }
2925 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
2926 xtensa->empty_regs[i].size = 32;
2927 xtensa->empty_regs[i].type = &xtensa_reg_type;
2928 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2929 if (!xtensa->empty_regs[i].value) {
2930 LOG_ERROR("Failed to alloc empty reg list value!");
2931 goto fail;
2932 }
2933 xtensa->empty_regs[i].arch_info = xtensa;
2934 }
2935
2936 /* Construct contiguous register list from contiguous descriptor list */
2937 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
2938 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2939 if (!xtensa->contiguous_regs_list) {
2940 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2941 goto fail;
2942 }
2943 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2944 unsigned int j;
2945 for (j = 0; j < reg_cache->num_regs; j++) {
2946 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2947 /* Register number field is not filled above.
2948 Here we are assigning the corresponding index from the contiguous reg list.
2949 These indexes are in the same order with gdb g-packet request/response.
2950 Some more changes may be required for sparse reg lists.
2951 */
2952 reg_cache->reg_list[j].number = i;
2953 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
2954 LOG_TARGET_DEBUG(target,
2955 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2956 xtensa->contiguous_regs_list[i]->name,
2957 xtensa->contiguous_regs_desc[i]->dbreg_num);
2958 break;
2959 }
2960 }
2961 if (j == reg_cache->num_regs)
2962 LOG_TARGET_WARNING(target, "contiguous register %s not found",
2963 xtensa->contiguous_regs_desc[i]->name);
2964 }
2965 }
2966
2967 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2968 if (!xtensa->algo_context_backup) {
2969 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2970 goto fail;
2971 }
2972 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2973 struct reg *reg = &reg_cache->reg_list[i];
2974 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2975 if (!xtensa->algo_context_backup[i]) {
2976 LOG_ERROR("Failed to alloc mem for algorithm context!");
2977 goto fail;
2978 }
2979 }
2980 xtensa->core_cache = reg_cache;
2981 if (cache_p)
2982 *cache_p = reg_cache;
2983 return ERROR_OK;
2984
2985 fail:
2986 if (reg_list) {
2987 for (unsigned int i = 0; i < reg_list_size; i++)
2988 free(reg_list[i].value);
2989 free(reg_list);
2990 }
2991 if (xtensa->empty_regs) {
2992 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2993 free((void *)xtensa->empty_regs[i].name);
2994 free(xtensa->empty_regs[i].value);
2995 }
2996 free(xtensa->empty_regs);
2997 }
2998 if (xtensa->algo_context_backup) {
2999 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
3000 free(xtensa->algo_context_backup[i]);
3001 free(xtensa->algo_context_backup);
3002 }
3003 free(reg_cache);
3004
3005 return ERROR_FAIL;
3006 }
3007
3008 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
3009 {
3010 struct xtensa *xtensa = target_to_xtensa(target);
3011 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
3012 /* Process op[] list */
3013 while (opstr && (*opstr == ':')) {
3014 uint8_t ops[32];
3015 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
3016 if (oplen > 32) {
3017 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
3018 break;
3019 }
3020 unsigned int i = 0;
3021 while ((i < oplen) && opstr && (*opstr == ':'))
3022 ops[i++] = strtoul(opstr + 1, &opstr, 16);
3023 if (i != oplen) {
3024 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
3025 break;
3026 }
3027
3028 char insn_buf[128];
3029 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
3030 for (i = 0; i < oplen; i++)
3031 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
3032 LOG_TARGET_DEBUG(target, "%s", insn_buf);
3033 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3034 status = ERROR_OK;
3035 }
3036 return status;
3037 }
3038
3039 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
3040 {
3041 struct xtensa *xtensa = target_to_xtensa(target);
3042 bool iswrite = (packet[0] == 'Q');
3043 enum xtensa_qerr_e error;
3044
3045 /* Read/write TIE register. Requires spill location.
3046 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
3047 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
3048 */
3049 if (!(xtensa->spill_buf)) {
3050 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
3051 error = XT_QERR_FAIL;
3052 goto xtensa_gdbqc_qxtreg_fail;
3053 }
3054
3055 char *delim;
3056 uint32_t regnum = strtoul(packet + 6, &delim, 16);
3057 if (*delim != ':') {
3058 LOG_ERROR("Malformed qxtreg packet");
3059 error = XT_QERR_INVAL;
3060 goto xtensa_gdbqc_qxtreg_fail;
3061 }
3062 uint32_t reglen = strtoul(delim + 1, &delim, 16);
3063 if (*delim != ':') {
3064 LOG_ERROR("Malformed qxtreg packet");
3065 error = XT_QERR_INVAL;
3066 goto xtensa_gdbqc_qxtreg_fail;
3067 }
3068 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
3069 memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
3070 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
3071 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
3072 LOG_ERROR("TIE register too large");
3073 error = XT_QERR_MEM;
3074 goto xtensa_gdbqc_qxtreg_fail;
3075 }
3076
3077 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
3078 * (2) read old a4, (3) write spill address to a4.
3079 * NOTE: ensure a4 is restored properly by all error handling logic
3080 */
3081 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
3082 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
3083 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
3084 if (status != ERROR_OK) {
3085 LOG_ERROR("Spill memory save");
3086 error = XT_QERR_MEM;
3087 goto xtensa_gdbqc_qxtreg_fail;
3088 }
3089 if (iswrite) {
3090 /* Extract value and store in spill memory */
3091 unsigned int b = 0;
3092 char *valbuf = strchr(delim, '=');
3093 if (!(valbuf && (*valbuf == '='))) {
3094 LOG_ERROR("Malformed Qxtreg packet");
3095 error = XT_QERR_INVAL;
3096 goto xtensa_gdbqc_qxtreg_fail;
3097 }
3098 valbuf++;
3099 while (*valbuf && *(valbuf + 1)) {
3100 char bytestr[3] = { 0, 0, 0 };
3101 strncpy(bytestr, valbuf, 2);
3102 regbuf[b++] = strtoul(bytestr, NULL, 16);
3103 valbuf += 2;
3104 }
3105 if (b != reglen) {
3106 LOG_ERROR("Malformed Qxtreg packet");
3107 error = XT_QERR_INVAL;
3108 goto xtensa_gdbqc_qxtreg_fail;
3109 }
3110 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
3111 reglen / memop_size, regbuf);
3112 if (status != ERROR_OK) {
3113 LOG_ERROR("TIE value store");
3114 error = XT_QERR_MEM;
3115 goto xtensa_gdbqc_qxtreg_fail;
3116 }
3117 }
3118 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
3119 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, xtensa->spill_loc);
3120 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
3121
3122 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
3123
3124 /* Restore a4 but not yet spill memory. Execute it all... */
3125 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, orig_a4);
3126 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
3127 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3128 if (status != ERROR_OK) {
3129 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3130 tieop_status = status;
3131 }
3132 status = xtensa_core_status_check(target);
3133 if (status != ERROR_OK) {
3134 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3135 tieop_status = status;
3136 }
3137
3138 if (tieop_status == ERROR_OK) {
3139 if (iswrite) {
3140 /* TIE write succeeded; send OK */
3141 strcpy(*response_p, "OK");
3142 } else {
3143 /* TIE read succeeded; copy result from spill memory */
3144 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
3145 if (status != ERROR_OK) {
3146 LOG_TARGET_ERROR(target, "TIE result read");
3147 tieop_status = status;
3148 }
3149 unsigned int i;
3150 for (i = 0; i < reglen; i++)
3151 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
3152 *(*response_p + 2 * i) = '\0';
3153 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
3154 }
3155 }
3156
3157 /* Restore spill memory first, then report any previous errors */
3158 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
3159 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
3160 if (status != ERROR_OK) {
3161 LOG_ERROR("Spill memory restore");
3162 error = XT_QERR_MEM;
3163 goto xtensa_gdbqc_qxtreg_fail;
3164 }
3165 if (tieop_status != ERROR_OK) {
3166 LOG_ERROR("TIE execution");
3167 error = XT_QERR_FAIL;
3168 goto xtensa_gdbqc_qxtreg_fail;
3169 }
3170 return ERROR_OK;
3171
3172 xtensa_gdbqc_qxtreg_fail:
3173 strcpy(*response_p, xt_qerr[error].chrval);
3174 return xt_qerr[error].intval;
3175 }
3176
3177 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
3178 {
3179 struct xtensa *xtensa = target_to_xtensa(target);
3180 enum xtensa_qerr_e error;
3181 if (!packet || !response_p) {
3182 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
3183 return ERROR_FAIL;
3184 }
3185
3186 *response_p = xtensa->qpkt_resp;
3187 if (strncmp(packet, "qxtn", 4) == 0) {
3188 strcpy(*response_p, "OpenOCD");
3189 return ERROR_OK;
3190 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
3191 return ERROR_OK;
3192 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
3193 /* Confirm host cache params match core .cfg file */
3194 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
3195 &xtensa->core_config->icache : &xtensa->core_config->dcache;
3196 unsigned int line_size = 0, size = 0, way_count = 0;
3197 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
3198 if ((cachep->line_size != line_size) ||
3199 (cachep->size != size) ||
3200 (cachep->way_count != way_count)) {
3201 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
3202 cachep == &xtensa->core_config->icache ? 'I' : 'D');
3203 }
3204 strcpy(*response_p, "OK");
3205 return ERROR_OK;
3206 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
3207 /* Confirm host IRAM/IROM params match core .cfg file */
3208 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
3209 &xtensa->core_config->iram : &xtensa->core_config->irom;
3210 unsigned int base = 0, size = 0, i;
3211 char *pkt = (char *)&packet[7];
3212 do {
3213 pkt++;
3214 size = strtoul(pkt, &pkt, 16);
3215 pkt++;
3216 base = strtoul(pkt, &pkt, 16);
3217 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
3218 for (i = 0; i < memp->count; i++) {
3219 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
3220 break;
3221 }
3222 if (i == memp->count) {
3223 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
3224 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
3225 break;
3226 }
3227 for (i = 0; i < 11; i++) {
3228 pkt++;
3229 strtoul(pkt, &pkt, 16);
3230 }
3231 } while (pkt && (pkt[0] == ','));
3232 strcpy(*response_p, "OK");
3233 return ERROR_OK;
3234 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
3235 /* Confirm host EXCM_LEVEL matches core .cfg file */
3236 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
3237 if (!xtensa->core_config->high_irq.enabled ||
3238 (excm_level != xtensa->core_config->high_irq.excm_level))
3239 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
3240 strcpy(*response_p, "OK");
3241 return ERROR_OK;
3242 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
3243 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
3244 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
3245 strcpy(*response_p, "OK");
3246 return ERROR_OK;
3247 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
3248 char *delim;
3249 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
3250 if (*delim != ':') {
3251 LOG_ERROR("Malformed Qxtspill packet");
3252 error = XT_QERR_INVAL;
3253 goto xtensa_gdb_query_custom_fail;
3254 }
3255 xtensa->spill_loc = spill_loc;
3256 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
3257 if (xtensa->spill_buf)
3258 free(xtensa->spill_buf);
3259 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
3260 if (!xtensa->spill_buf) {
3261 LOG_ERROR("Spill buf alloc");
3262 error = XT_QERR_MEM;
3263 goto xtensa_gdb_query_custom_fail;
3264 }
3265 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
3266 strcpy(*response_p, "OK");
3267 return ERROR_OK;
3268 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
3269 return xtensa_gdbqc_qxtreg(target, packet, response_p);
3270 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
3271 (strncmp(packet, "qxtftie", 7) == 0) ||
3272 (strncmp(packet, "qxtstie", 7) == 0)) {
3273 /* Return empty string to indicate trace, TIE wire debug are unsupported */
3274 strcpy(*response_p, "");
3275 return ERROR_OK;
3276 }
3277
3278 /* Warn for all other queries, but do not return errors */
3279 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
3280 strcpy(*response_p, "");
3281 return ERROR_OK;
3282
3283 xtensa_gdb_query_custom_fail:
3284 strcpy(*response_p, xt_qerr[error].chrval);
3285 return xt_qerr[error].intval;
3286 }
3287
3288 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
3289 const struct xtensa_debug_module_config *dm_cfg)
3290 {
3291 target->arch_info = xtensa;
3292 xtensa->common_magic = XTENSA_COMMON_MAGIC;
3293 xtensa->target = target;
3294 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
3295
3296 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
3297 if (!xtensa->core_config) {
3298 LOG_ERROR("Xtensa configuration alloc failed\n");
3299 return ERROR_FAIL;
3300 }
3301
3302 /* Default cache settings are disabled with 1 way */
3303 xtensa->core_config->icache.way_count = 1;
3304 xtensa->core_config->dcache.way_count = 1;
3305
3306 /* chrval: AR3/AR4 register names will change with window mapping.
3307 * intval: tracks whether scratch register was set through gdb P packet.
3308 */
3309 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
3310 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
3311 if (!xtensa->scratch_ars[s].chrval) {
3312 for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
3313 free(xtensa->scratch_ars[f].chrval);
3314 free(xtensa->core_config);
3315 LOG_ERROR("Xtensa scratch AR alloc failed\n");
3316 return ERROR_FAIL;
3317 }
3318 xtensa->scratch_ars[s].intval = false;
3319 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
3320 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
3321 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
3322 }
3323
3324 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
3325 }
3326
3327 void xtensa_set_permissive_mode(struct target *target, bool state)
3328 {
3329 target_to_xtensa(target)->permissive_mode = state;
3330 }
3331
3332 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
3333 {
3334 struct xtensa *xtensa = target_to_xtensa(target);
3335
3336 xtensa->come_online_probes_num = 3;
3337 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
3338 if (!xtensa->hw_brps) {
3339 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
3340 return ERROR_FAIL;
3341 }
3342 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
3343 if (!xtensa->hw_wps) {
3344 free(xtensa->hw_brps);
3345 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
3346 return ERROR_FAIL;
3347 }
3348 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
3349 if (!xtensa->sw_brps) {
3350 free(xtensa->hw_brps);
3351 free(xtensa->hw_wps);
3352 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
3353 return ERROR_FAIL;
3354 }
3355
3356 xtensa->spill_loc = 0xffffffff;
3357 xtensa->spill_bytes = 0;
3358 xtensa->spill_buf = NULL;
3359 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
3360
3361 return xtensa_build_reg_cache(target);
3362 }
3363
3364 static void xtensa_free_reg_cache(struct target *target)
3365 {
3366 struct xtensa *xtensa = target_to_xtensa(target);
3367 struct reg_cache *cache = xtensa->core_cache;
3368
3369 if (cache) {
3370 register_unlink_cache(&target->reg_cache, cache);
3371 for (unsigned int i = 0; i < cache->num_regs; i++) {
3372 free(xtensa->algo_context_backup[i]);
3373 free(cache->reg_list[i].value);
3374 }
3375 free(xtensa->algo_context_backup);
3376 free(cache->reg_list);
3377 free(cache);
3378 }
3379 xtensa->core_cache = NULL;
3380 xtensa->algo_context_backup = NULL;
3381
3382 if (xtensa->empty_regs) {
3383 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3384 free((void *)xtensa->empty_regs[i].name);
3385 free(xtensa->empty_regs[i].value);
3386 }
3387 free(xtensa->empty_regs);
3388 }
3389 xtensa->empty_regs = NULL;
3390 if (xtensa->optregs) {
3391 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
3392 free((void *)xtensa->optregs[i].name);
3393 free(xtensa->optregs);
3394 }
3395 xtensa->optregs = NULL;
3396 }
3397
3398 void xtensa_target_deinit(struct target *target)
3399 {
3400 struct xtensa *xtensa = target_to_xtensa(target);
3401
3402 LOG_DEBUG("start");
3403
3404 if (target_was_examined(target)) {
3405 int ret = xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, OCDDCR_ENABLEOCD);
3406 if (ret != ERROR_OK) {
3407 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
3408 return;
3409 }
3410 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
3411 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3412 if (ret != ERROR_OK) {
3413 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
3414 return;
3415 }
3416 xtensa_dm_deinit(&xtensa->dbg_mod);
3417 }
3418 xtensa_free_reg_cache(target);
3419 free(xtensa->hw_brps);
3420 free(xtensa->hw_wps);
3421 free(xtensa->sw_brps);
3422 if (xtensa->spill_buf) {
3423 free(xtensa->spill_buf);
3424 xtensa->spill_buf = NULL;
3425 }
3426 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
3427 free(xtensa->scratch_ars[s].chrval);
3428 free(xtensa->core_config);
3429 }
3430
3431 const char *xtensa_get_gdb_arch(struct target *target)
3432 {
3433 return "xtensa";
3434 }
3435
3436 /* exe <ascii-encoded hexadecimal instruction bytes> */
3437 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
3438 {
3439 struct xtensa *xtensa = target_to_xtensa(target);
3440
3441 if (CMD_ARGC != 1)
3442 return ERROR_COMMAND_SYNTAX_ERROR;
3443
3444 /* Process ascii-encoded hex byte string */
3445 const char *parm = CMD_ARGV[0];
3446 unsigned int parm_len = strlen(parm);
3447 if ((parm_len >= 64) || (parm_len & 1)) {
3448 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3449 return ERROR_FAIL;
3450 }
3451
3452 uint8_t ops[32];
3453 memset(ops, 0, 32);
3454 unsigned int oplen = parm_len / 2;
3455 char encoded_byte[3] = { 0, 0, 0 };
3456 for (unsigned int i = 0; i < oplen; i++) {
3457 encoded_byte[0] = *parm++;
3458 encoded_byte[1] = *parm++;
3459 ops[i] = strtoul(encoded_byte, NULL, 16);
3460 }
3461
3462 /* GDB must handle state save/restore.
3463 * Flush reg cache in case spill location is in an AR
3464 * Update CPENABLE only for this execution; later restore cached copy
3465 * Keep a copy of exccause in case executed code triggers an exception
3466 */
3467 int status = xtensa_write_dirty_registers(target);
3468 if (status != ERROR_OK) {
3469 LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3470 return ERROR_FAIL;
3471 }
3472 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3473 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3474 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3475 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
3476 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3477 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3478 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3479 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
3480 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3481
3482 /* Queue instruction list and execute everything */
3483 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3484 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3485 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3486 if (status != ERROR_OK) {
3487 LOG_TARGET_ERROR(target, "exec: queue error %d", status);
3488 } else {
3489 status = xtensa_core_status_check(target);
3490 if (status != ERROR_OK)
3491 LOG_TARGET_ERROR(target, "exec: status error %d", status);
3492 }
3493
3494 /* Reread register cache and restore saved regs after instruction execution */
3495 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3496 LOG_TARGET_ERROR(target, "post-exec: register fetch error");
3497 if (status != ERROR_OK) {
3498 LOG_TARGET_ERROR(target, "post-exec: EXCCAUSE 0x%02" PRIx32,
3499 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
3500 }
3501 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3502 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3503 return status;
3504 }
3505
3506 COMMAND_HANDLER(xtensa_cmd_exe)
3507 {
3508 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3509 }
3510
3511 /* xtdef <name> */
3512 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3513 {
3514 if (CMD_ARGC != 1)
3515 return ERROR_COMMAND_SYNTAX_ERROR;
3516
3517 const char *core_name = CMD_ARGV[0];
3518 if (strcasecmp(core_name, "LX") == 0) {
3519 xtensa->core_config->core_type = XT_LX;
3520 } else if (strcasecmp(core_name, "NX") == 0) {
3521 xtensa->core_config->core_type = XT_NX;
3522 } else {
3523 LOG_ERROR("xtdef [LX|NX]\n");
3524 return ERROR_COMMAND_SYNTAX_ERROR;
3525 }
3526 return ERROR_OK;
3527 }
3528
3529 COMMAND_HANDLER(xtensa_cmd_xtdef)
3530 {
3531 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3532 target_to_xtensa(get_current_target(CMD_CTX)));
3533 }
3534
3535 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3536 {
3537 if ((val < min) || (val > max)) {
3538 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3539 return false;
3540 }
3541 return true;
3542 }
3543
3544 /* xtopt <name> <value> */
3545 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3546 {
3547 if (CMD_ARGC != 2)
3548 return ERROR_COMMAND_SYNTAX_ERROR;
3549
3550 const char *opt_name = CMD_ARGV[0];
3551 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3552 if (strcasecmp(opt_name, "arnum") == 0) {
3553 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3554 return ERROR_COMMAND_ARGUMENT_INVALID;
3555 xtensa->core_config->aregs_num = opt_val;
3556 } else if (strcasecmp(opt_name, "windowed") == 0) {
3557 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3558 return ERROR_COMMAND_ARGUMENT_INVALID;
3559 xtensa->core_config->windowed = opt_val;
3560 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3561 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3562 return ERROR_COMMAND_ARGUMENT_INVALID;
3563 xtensa->core_config->coproc = opt_val;
3564 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3565 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3566 return ERROR_COMMAND_ARGUMENT_INVALID;
3567 xtensa->core_config->exceptions = opt_val;
3568 } else if (strcasecmp(opt_name, "intnum") == 0) {
3569 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3570 return ERROR_COMMAND_ARGUMENT_INVALID;
3571 xtensa->core_config->irq.enabled = (opt_val > 0);
3572 xtensa->core_config->irq.irq_num = opt_val;
3573 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3574 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3575 return ERROR_COMMAND_ARGUMENT_INVALID;
3576 xtensa->core_config->high_irq.enabled = opt_val;
3577 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3578 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3579 return ERROR_COMMAND_ARGUMENT_INVALID;
3580 if (!xtensa->core_config->high_irq.enabled) {
3581 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3582 return ERROR_COMMAND_ARGUMENT_INVALID;
3583 }
3584 xtensa->core_config->high_irq.excm_level = opt_val;
3585 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3586 if (xtensa->core_config->core_type == XT_LX) {
3587 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3588 return ERROR_COMMAND_ARGUMENT_INVALID;
3589 } else {
3590 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3591 return ERROR_COMMAND_ARGUMENT_INVALID;
3592 }
3593 if (!xtensa->core_config->high_irq.enabled) {
3594 LOG_ERROR("xtopt intlevels requires hipriints\n");
3595 return ERROR_COMMAND_ARGUMENT_INVALID;
3596 }
3597 xtensa->core_config->high_irq.level_num = opt_val;
3598 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3599 if (xtensa->core_config->core_type == XT_LX) {
3600 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3601 return ERROR_COMMAND_ARGUMENT_INVALID;
3602 } else {
3603 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3604 return ERROR_COMMAND_ARGUMENT_INVALID;
3605 }
3606 xtensa->core_config->debug.enabled = 1;
3607 xtensa->core_config->debug.irq_level = opt_val;
3608 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3609 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3610 return ERROR_COMMAND_ARGUMENT_INVALID;
3611 xtensa->core_config->debug.ibreaks_num = opt_val;
3612 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3613 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3614 return ERROR_COMMAND_ARGUMENT_INVALID;
3615 xtensa->core_config->debug.dbreaks_num = opt_val;
3616 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3617 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3618 return ERROR_COMMAND_ARGUMENT_INVALID;
3619 xtensa->core_config->trace.mem_sz = opt_val;
3620 xtensa->core_config->trace.enabled = (opt_val > 0);
3621 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3622 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3623 return ERROR_COMMAND_ARGUMENT_INVALID;
3624 xtensa->core_config->trace.reversed_mem_access = opt_val;
3625 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3626 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3627 return ERROR_COMMAND_ARGUMENT_INVALID;
3628 xtensa->core_config->debug.perfcount_num = opt_val;
3629 } else {
3630 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3631 return ERROR_OK;
3632 }
3633
3634 return ERROR_OK;
3635 }
3636
3637 COMMAND_HANDLER(xtensa_cmd_xtopt)
3638 {
3639 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3640 target_to_xtensa(get_current_target(CMD_CTX)));
3641 }
3642
3643 /* xtmem <type> [parameters] */
3644 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3645 {
3646 struct xtensa_cache_config *cachep = NULL;
3647 struct xtensa_local_mem_config *memp = NULL;
3648 int mem_access = 0;
3649 bool is_dcache = false;
3650
3651 if (CMD_ARGC == 0) {
3652 LOG_ERROR("xtmem <type> [parameters]\n");
3653 return ERROR_COMMAND_SYNTAX_ERROR;
3654 }
3655
3656 const char *mem_name = CMD_ARGV[0];
3657 if (strcasecmp(mem_name, "icache") == 0) {
3658 cachep = &xtensa->core_config->icache;
3659 } else if (strcasecmp(mem_name, "dcache") == 0) {
3660 cachep = &xtensa->core_config->dcache;
3661 is_dcache = true;
3662 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3663 /* TODO: support L2 cache */
3664 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3665 /* TODO: support L2 cache */
3666 } else if (strcasecmp(mem_name, "iram") == 0) {
3667 memp = &xtensa->core_config->iram;
3668 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3669 } else if (strcasecmp(mem_name, "dram") == 0) {
3670 memp = &xtensa->core_config->dram;
3671 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3672 } else if (strcasecmp(mem_name, "sram") == 0) {
3673 memp = &xtensa->core_config->sram;
3674 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3675 } else if (strcasecmp(mem_name, "irom") == 0) {
3676 memp = &xtensa->core_config->irom;
3677 mem_access = XT_MEM_ACCESS_READ;
3678 } else if (strcasecmp(mem_name, "drom") == 0) {
3679 memp = &xtensa->core_config->drom;
3680 mem_access = XT_MEM_ACCESS_READ;
3681 } else if (strcasecmp(mem_name, "srom") == 0) {
3682 memp = &xtensa->core_config->srom;
3683 mem_access = XT_MEM_ACCESS_READ;
3684 } else {
3685 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3686 return ERROR_COMMAND_ARGUMENT_INVALID;
3687 }
3688
3689 if (cachep) {
3690 if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3691 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3692 return ERROR_COMMAND_SYNTAX_ERROR;
3693 }
3694 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3695 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3696 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3697 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3698 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3699 } else if (memp) {
3700 if (CMD_ARGC != 3) {
3701 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3702 return ERROR_COMMAND_SYNTAX_ERROR;
3703 }
3704 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3705 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3706 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3707 memcfgp->access = mem_access;
3708 memp->count++;
3709 }
3710
3711 return ERROR_OK;
3712 }
3713
3714 COMMAND_HANDLER(xtensa_cmd_xtmem)
3715 {
3716 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3717 target_to_xtensa(get_current_target(CMD_CTX)));
3718 }
3719
3720 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3721 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3722 {
3723 if (CMD_ARGC != 4) {
3724 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3725 return ERROR_COMMAND_SYNTAX_ERROR;
3726 }
3727
3728 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3729 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3730 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3731 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3732
3733 if ((nfgseg > 32)) {
3734 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3735 return ERROR_COMMAND_ARGUMENT_INVALID;
3736 } else if (minsegsize & (minsegsize - 1)) {
3737 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3738 return ERROR_COMMAND_ARGUMENT_INVALID;
3739 } else if (lockable > 1) {
3740 LOG_ERROR("<lockable> must be 0 or 1\n");
3741 return ERROR_COMMAND_ARGUMENT_INVALID;
3742 } else if (execonly > 1) {
3743 LOG_ERROR("<execonly> must be 0 or 1\n");
3744 return ERROR_COMMAND_ARGUMENT_INVALID;
3745 }
3746
3747 xtensa->core_config->mpu.enabled = true;
3748 xtensa->core_config->mpu.nfgseg = nfgseg;
3749 xtensa->core_config->mpu.minsegsize = minsegsize;
3750 xtensa->core_config->mpu.lockable = lockable;
3751 xtensa->core_config->mpu.execonly = execonly;
3752 return ERROR_OK;
3753 }
3754
3755 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3756 {
3757 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3758 target_to_xtensa(get_current_target(CMD_CTX)));
3759 }
3760
3761 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3762 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3763 {
3764 if (CMD_ARGC != 2) {
3765 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3766 return ERROR_COMMAND_SYNTAX_ERROR;
3767 }
3768
3769 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3770 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3771 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3772 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3773 return ERROR_COMMAND_ARGUMENT_INVALID;
3774 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3775 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3776 return ERROR_COMMAND_ARGUMENT_INVALID;
3777 }
3778
3779 xtensa->core_config->mmu.enabled = true;
3780 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3781 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3782 return ERROR_OK;
3783 }
3784
3785 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3786 {
3787 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3788 target_to_xtensa(get_current_target(CMD_CTX)));
3789 }
3790
3791 /* xtregs <numregs>
3792 * xtreg <regname> <regnum> */
3793 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3794 {
3795 if (CMD_ARGC == 1) {
3796 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3797 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3798 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3799 return ERROR_COMMAND_SYNTAX_ERROR;
3800 }
3801 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3802 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3803 numregs, xtensa->genpkt_regs_num);
3804 return ERROR_COMMAND_SYNTAX_ERROR;
3805 }
3806 xtensa->total_regs_num = numregs;
3807 xtensa->core_regs_num = 0;
3808 xtensa->num_optregs = 0;
3809 /* A little more memory than required, but saves a second initialization pass */
3810 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3811 if (!xtensa->optregs) {
3812 LOG_ERROR("Failed to allocate xtensa->optregs!");
3813 return ERROR_FAIL;
3814 }
3815 return ERROR_OK;
3816 } else if (CMD_ARGC != 2) {
3817 return ERROR_COMMAND_SYNTAX_ERROR;
3818 }
3819
3820 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3821 * if general register (g-packet) requests or contiguous register maps are supported */
3822 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3823 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3824 if (!xtensa->contiguous_regs_desc) {
3825 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3826 return ERROR_FAIL;
3827 }
3828 }
3829
3830 const char *regname = CMD_ARGV[0];
3831 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3832 if (regnum > UINT16_MAX) {
3833 LOG_ERROR("<regnum> must be a 16-bit number");
3834 return ERROR_COMMAND_ARGUMENT_INVALID;
3835 }
3836
3837 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3838 if (xtensa->total_regs_num)
3839 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3840 regname, regnum,
3841 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3842 else
3843 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3844 regname, regnum);
3845 return ERROR_FAIL;
3846 }
3847
3848 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3849 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3850 bool is_extended_reg = true;
3851 unsigned int ridx;
3852 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3853 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3854 /* Flag core register as defined */
3855 rptr = &xtensa_regs[ridx];
3856 xtensa->core_regs_num++;
3857 is_extended_reg = false;
3858 break;
3859 }
3860 }
3861
3862 rptr->exist = true;
3863 if (is_extended_reg) {
3864 /* Register ID, debugger-visible register ID */
3865 rptr->name = strdup(CMD_ARGV[0]);
3866 rptr->dbreg_num = regnum;
3867 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3868 xtensa->num_optregs++;
3869
3870 /* Register type */
3871 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3872 rptr->type = XT_REG_GENERAL;
3873 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3874 rptr->type = XT_REG_USER;
3875 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3876 rptr->type = XT_REG_FR;
3877 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3878 rptr->type = XT_REG_SPECIAL;
3879 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3880 /* WARNING: For these registers, regnum points to the
3881 * index of the corresponding ARx registers, NOT to
3882 * the processor register number! */
3883 rptr->type = XT_REG_RELGEN;
3884 rptr->reg_num += XT_REG_IDX_ARFIRST;
3885 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3886 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3887 rptr->type = XT_REG_TIE;
3888 } else {
3889 rptr->type = XT_REG_OTHER;
3890 }
3891
3892 /* Register flags */
3893 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3894 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3895 (strcmp(rptr->name, "intclear") == 0))
3896 rptr->flags = XT_REGF_NOREAD;
3897 else
3898 rptr->flags = 0;
3899
3900 if (rptr->reg_num == (XT_EPS_REG_NUM_BASE + xtensa->core_config->debug.irq_level) &&
3901 xtensa->core_config->core_type == XT_LX && rptr->type == XT_REG_SPECIAL) {
3902 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3903 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3904 }
3905 if (xtensa->core_config->core_type == XT_NX) {
3906 enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_NUM;
3907 if (strcmp(rptr->name, "ibreakc0") == 0)
3908 idx = XT_NX_REG_IDX_IBREAKC0;
3909 else if (strcmp(rptr->name, "wb") == 0)
3910 idx = XT_NX_REG_IDX_WB;
3911 else if (strcmp(rptr->name, "ms") == 0)
3912 idx = XT_NX_REG_IDX_MS;
3913 else if (strcmp(rptr->name, "ievec") == 0)
3914 idx = XT_NX_REG_IDX_IEVEC;
3915 else if (strcmp(rptr->name, "ieextern") == 0)
3916 idx = XT_NX_REG_IDX_IEEXTERN;
3917 else if (strcmp(rptr->name, "mesr") == 0)
3918 idx = XT_NX_REG_IDX_MESR;
3919 else if (strcmp(rptr->name, "mesrclr") == 0)
3920 idx = XT_NX_REG_IDX_MESRCLR;
3921 if (idx < XT_NX_REG_IDX_NUM) {
3922 if (xtensa->nx_reg_idx[idx] != 0) {
3923 LOG_ERROR("nx_reg_idx[%d] previously set to %d",
3924 idx, xtensa->nx_reg_idx[idx]);
3925 return ERROR_FAIL;
3926 }
3927 xtensa->nx_reg_idx[idx] = XT_NUM_REGS + xtensa->num_optregs - 1;
3928 LOG_DEBUG("NX reg %s: index %d (%d)",
3929 rptr->name, xtensa->nx_reg_idx[idx], idx);
3930 }
3931 }
3932 } else if (strcmp(rptr->name, "cpenable") == 0) {
3933 xtensa->core_config->coproc = true;
3934 }
3935
3936 /* Build out list of contiguous registers in specified order */
3937 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3938 if (xtensa->contiguous_regs_desc) {
3939 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3940 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3941 }
3942 if (xtensa_extra_debug_log)
3943 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3944 is_extended_reg ? "config-specific" : "core",
3945 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3946 is_extended_reg ? xtensa->num_optregs : ridx,
3947 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3948 return ERROR_OK;
3949 }
3950
3951 COMMAND_HANDLER(xtensa_cmd_xtreg)
3952 {
3953 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3954 target_to_xtensa(get_current_target(CMD_CTX)));
3955 }
3956
3957 /* xtregfmt <contiguous|sparse> [numgregs] */
3958 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3959 {
3960 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3961 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3962 return ERROR_OK;
3963 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3964 xtensa->regmap_contiguous = true;
3965 if (CMD_ARGC == 2) {
3966 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3967 if ((numgregs <= 0) ||
3968 ((numgregs > xtensa->total_regs_num) &&
3969 (xtensa->total_regs_num > 0))) {
3970 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3971 numgregs, xtensa->total_regs_num);
3972 return ERROR_COMMAND_SYNTAX_ERROR;
3973 }
3974 xtensa->genpkt_regs_num = numgregs;
3975 }
3976 return ERROR_OK;
3977 }
3978 }
3979 return ERROR_COMMAND_SYNTAX_ERROR;
3980 }
3981
3982 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3983 {
3984 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3985 target_to_xtensa(get_current_target(CMD_CTX)));
3986 }
3987
3988 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3989 {
3990 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3991 &xtensa->permissive_mode, "xtensa permissive mode");
3992 }
3993
3994 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3995 {
3996 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3997 target_to_xtensa(get_current_target(CMD_CTX)));
3998 }
3999
4000 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
4001 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
4002 {
4003 struct xtensa_perfmon_config config = {
4004 .mask = 0xffff,
4005 .kernelcnt = 0,
4006 .tracelevel = -1 /* use DEBUGLEVEL by default */
4007 };
4008
4009 if (CMD_ARGC < 2 || CMD_ARGC > 6)
4010 return ERROR_COMMAND_SYNTAX_ERROR;
4011
4012 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
4013 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
4014 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
4015 return ERROR_COMMAND_ARGUMENT_INVALID;
4016 }
4017
4018 config.select = strtoul(CMD_ARGV[1], NULL, 0);
4019 if (config.select > XTENSA_MAX_PERF_SELECT) {
4020 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
4021 return ERROR_COMMAND_ARGUMENT_INVALID;
4022 }
4023
4024 if (CMD_ARGC >= 3) {
4025 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
4026 if (config.mask > XTENSA_MAX_PERF_MASK) {
4027 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
4028 return ERROR_COMMAND_ARGUMENT_INVALID;
4029 }
4030 }
4031
4032 if (CMD_ARGC >= 4) {
4033 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
4034 if (config.kernelcnt > 1) {
4035 command_print(CMD, "kernelcnt should be 0 or 1");
4036 return ERROR_COMMAND_ARGUMENT_INVALID;
4037 }
4038 }
4039
4040 if (CMD_ARGC >= 5) {
4041 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
4042 if (config.tracelevel > 7) {
4043 command_print(CMD, "tracelevel should be <=7");
4044 return ERROR_COMMAND_ARGUMENT_INVALID;
4045 }
4046 }
4047
4048 if (config.tracelevel == -1)
4049 config.tracelevel = xtensa->core_config->debug.irq_level;
4050
4051 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
4052 }
4053
4054 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
4055 {
4056 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
4057 target_to_xtensa(get_current_target(CMD_CTX)));
4058 }
4059
4060 /* perfmon_dump [counter_id] */
4061 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
4062 {
4063 if (CMD_ARGC > 1)
4064 return ERROR_COMMAND_SYNTAX_ERROR;
4065
4066 int counter_id = -1;
4067 if (CMD_ARGC == 1) {
4068 counter_id = strtol(CMD_ARGV[0], NULL, 0);
4069 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
4070 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
4071 return ERROR_COMMAND_ARGUMENT_INVALID;
4072 }
4073 }
4074
4075 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
4076 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
4077 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
4078 char result_buf[128] = { 0 };
4079 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
4080 struct xtensa_perfmon_result result;
4081 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
4082 if (res != ERROR_OK)
4083 return res;
4084 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
4085 "%-12" PRIu64 "%s",
4086 result.value,
4087 result.overflow ? " (overflow)" : "");
4088 LOG_INFO("%s", result_buf);
4089 }
4090
4091 return ERROR_OK;
4092 }
4093
4094 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
4095 {
4096 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
4097 target_to_xtensa(get_current_target(CMD_CTX)));
4098 }
4099
4100 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
4101 {
4102 int state = -1;
4103
4104 if (CMD_ARGC < 1) {
4105 const char *st;
4106 state = xtensa->stepping_isr_mode;
4107 if (state == XT_STEPPING_ISR_ON)
4108 st = "OFF";
4109 else if (state == XT_STEPPING_ISR_OFF)
4110 st = "ON";
4111 else
4112 st = "UNKNOWN";
4113 command_print(CMD, "Current ISR step mode: %s", st);
4114 return ERROR_OK;
4115 }
4116
4117 if (xtensa->core_config->core_type == XT_NX) {
4118 command_print(CMD, "ERROR: ISR step mode only supported on Xtensa LX");
4119 return ERROR_FAIL;
4120 }
4121
4122 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
4123 if (!strcasecmp(CMD_ARGV[0], "off"))
4124 state = XT_STEPPING_ISR_ON;
4125 else if (!strcasecmp(CMD_ARGV[0], "on"))
4126 state = XT_STEPPING_ISR_OFF;
4127
4128 if (state == -1) {
4129 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
4130 return ERROR_FAIL;
4131 }
4132 xtensa->stepping_isr_mode = state;
4133 return ERROR_OK;
4134 }
4135
4136 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
4137 {
4138 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
4139 target_to_xtensa(get_current_target(CMD_CTX)));
4140 }
4141
4142 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
4143 {
4144 int res;
4145 uint32_t val = 0;
4146
4147 if (CMD_ARGC >= 1) {
4148 for (unsigned int i = 0; i < CMD_ARGC; i++) {
4149 if (!strcasecmp(CMD_ARGV[0], "none")) {
4150 val = 0;
4151 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
4152 val |= OCDDCR_BREAKINEN;
4153 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
4154 val |= OCDDCR_BREAKOUTEN;
4155 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
4156 val |= OCDDCR_RUNSTALLINEN;
4157 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
4158 val |= OCDDCR_DEBUGMODEOUTEN;
4159 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
4160 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
4161 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
4162 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
4163 } else {
4164 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
4165 command_print(
4166 CMD,
4167 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
4168 return ERROR_OK;
4169 }
4170 }
4171 res = xtensa_smpbreak_set(target, val);
4172 if (res != ERROR_OK)
4173 command_print(CMD, "Failed to set smpbreak config %d", res);
4174 } else {
4175 struct xtensa *xtensa = target_to_xtensa(target);
4176 res = xtensa_smpbreak_read(xtensa, &val);
4177 if (res == ERROR_OK)
4178 command_print(CMD, "Current bits set:%s%s%s%s",
4179 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
4180 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
4181 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
4182 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
4183 );
4184 else
4185 command_print(CMD, "Failed to get smpbreak config %d", res);
4186 }
4187 return res;
4188 }
4189
4190 COMMAND_HANDLER(xtensa_cmd_smpbreak)
4191 {
4192 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
4193 get_current_target(CMD_CTX));
4194 }
4195
4196 COMMAND_HELPER(xtensa_cmd_dm_rw_do, struct xtensa *xtensa)
4197 {
4198 if (CMD_ARGC == 1) {
4199 // read: xtensa dm addr
4200 uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
4201 uint32_t val;
4202 int res = xtensa_dm_read(&xtensa->dbg_mod, addr, &val);
4203 if (res == ERROR_OK)
4204 command_print(CMD, "xtensa DM(0x%08" PRIx32 ") -> 0x%08" PRIx32, addr, val);
4205 else
4206 command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : read ERROR %" PRId32, addr, res);
4207 return res;
4208 } else if (CMD_ARGC == 2) {
4209 // write: xtensa dm addr value
4210 uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
4211 uint32_t val = strtoul(CMD_ARGV[1], NULL, 0);
4212 int res = xtensa_dm_write(&xtensa->dbg_mod, addr, val);
4213 if (res == ERROR_OK)
4214 command_print(CMD, "xtensa DM(0x%08" PRIx32 ") <- 0x%08" PRIx32, addr, val);
4215 else
4216 command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : write ERROR %" PRId32, addr, res);
4217 return res;
4218 }
4219 return ERROR_COMMAND_SYNTAX_ERROR;
4220 }
4221
4222 COMMAND_HANDLER(xtensa_cmd_dm_rw)
4223 {
4224 return CALL_COMMAND_HANDLER(xtensa_cmd_dm_rw_do,
4225 target_to_xtensa(get_current_target(CMD_CTX)));
4226 }
4227
4228 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
4229 {
4230 struct xtensa_trace_status trace_status;
4231 struct xtensa_trace_start_config cfg = {
4232 .stoppc = 0,
4233 .stopmask = XTENSA_STOPMASK_DISABLED,
4234 .after = 0,
4235 .after_is_words = false
4236 };
4237
4238 /* Parse arguments */
4239 for (unsigned int i = 0; i < CMD_ARGC; i++) {
4240 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
4241 char *e;
4242 i++;
4243 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
4244 cfg.stopmask = 0;
4245 if (*e == '/')
4246 cfg.stopmask = strtol(e, NULL, 0);
4247 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
4248 i++;
4249 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
4250 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
4251 cfg.after_is_words = 0;
4252 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
4253 cfg.after_is_words = 1;
4254 } else {
4255 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
4256 return ERROR_FAIL;
4257 }
4258 }
4259
4260 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
4261 if (res != ERROR_OK)
4262 return res;
4263 if (trace_status.stat & TRAXSTAT_TRACT) {
4264 LOG_WARNING("Silently stop active tracing!");
4265 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
4266 if (res != ERROR_OK)
4267 return res;
4268 }
4269
4270 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
4271 if (res != ERROR_OK)
4272 return res;
4273
4274 xtensa->trace_active = true;
4275 command_print(CMD, "Trace started.");
4276 return ERROR_OK;
4277 }
4278
4279 COMMAND_HANDLER(xtensa_cmd_tracestart)
4280 {
4281 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
4282 target_to_xtensa(get_current_target(CMD_CTX)));
4283 }
4284
4285 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
4286 {
4287 struct xtensa_trace_status trace_status;
4288
4289 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
4290 if (res != ERROR_OK)
4291 return res;
4292
4293 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
4294 command_print(CMD, "No trace is currently active.");
4295 return ERROR_FAIL;
4296 }
4297
4298 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
4299 if (res != ERROR_OK)
4300 return res;
4301
4302 xtensa->trace_active = false;
4303 command_print(CMD, "Trace stop triggered.");
4304 return ERROR_OK;
4305 }
4306
4307 COMMAND_HANDLER(xtensa_cmd_tracestop)
4308 {
4309 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
4310 target_to_xtensa(get_current_target(CMD_CTX)));
4311 }
4312
4313 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
4314 {
4315 struct xtensa_trace_config trace_config;
4316 struct xtensa_trace_status trace_status;
4317 uint32_t memsz, wmem;
4318
4319 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
4320 if (res != ERROR_OK)
4321 return res;
4322
4323 if (trace_status.stat & TRAXSTAT_TRACT) {
4324 command_print(CMD, "Tracing is still active. Please stop it first.");
4325 return ERROR_FAIL;
4326 }
4327
4328 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
4329 if (res != ERROR_OK)
4330 return res;
4331
4332 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
4333 command_print(CMD, "No active trace found; nothing to dump.");
4334 return ERROR_FAIL;
4335 }
4336
4337 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
4338 LOG_INFO("Total trace memory: %d words", memsz);
4339 if ((trace_config.addr &
4340 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
4341 /*Memory hasn't overwritten itself yet. */
4342 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
4343 LOG_INFO("...but trace is only %d words", wmem);
4344 if (wmem < memsz)
4345 memsz = wmem;
4346 } else {
4347 if (trace_config.addr & TRAXADDR_TWSAT) {
4348 LOG_INFO("Real trace is many times longer than that (overflow)");
4349 } else {
4350 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
4351 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
4352 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
4353 }
4354 }
4355
4356 uint8_t *tracemem = malloc(memsz * 4);
4357 if (!tracemem) {
4358 command_print(CMD, "Failed to alloc memory for trace data!");
4359 return ERROR_FAIL;
4360 }
4361 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
4362 if (res != ERROR_OK) {
4363 free(tracemem);
4364 return res;
4365 }
4366
4367 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4368 if (f <= 0) {
4369 free(tracemem);
4370 command_print(CMD, "Unable to open file %s", fname);
4371 return ERROR_FAIL;
4372 }
4373 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
4374 command_print(CMD, "Unable to write to file %s", fname);
4375 else
4376 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
4377 close(f);
4378
4379 bool is_all_zeroes = true;
4380 for (unsigned int i = 0; i < memsz * 4; i++) {
4381 if (tracemem[i] != 0) {
4382 is_all_zeroes = false;
4383 break;
4384 }
4385 }
4386 free(tracemem);
4387 if (is_all_zeroes)
4388 command_print(
4389 CMD,
4390 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
4391
4392 return ERROR_OK;
4393 }
4394
4395 COMMAND_HANDLER(xtensa_cmd_tracedump)
4396 {
4397 if (CMD_ARGC != 1) {
4398 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
4399 return ERROR_FAIL;
4400 }
4401
4402 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
4403 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
4404 }
4405
4406 static const struct command_registration xtensa_any_command_handlers[] = {
4407 {
4408 .name = "xtdef",
4409 .handler = xtensa_cmd_xtdef,
4410 .mode = COMMAND_CONFIG,
4411 .help = "Configure Xtensa core type",
4412 .usage = "<type>",
4413 },
4414 {
4415 .name = "xtopt",
4416 .handler = xtensa_cmd_xtopt,
4417 .mode = COMMAND_CONFIG,
4418 .help = "Configure Xtensa core option",
4419 .usage = "<name> <value>",
4420 },
4421 {
4422 .name = "xtmem",
4423 .handler = xtensa_cmd_xtmem,
4424 .mode = COMMAND_CONFIG,
4425 .help = "Configure Xtensa memory/cache option",
4426 .usage = "<type> [parameters]",
4427 },
4428 {
4429 .name = "xtmmu",
4430 .handler = xtensa_cmd_xtmmu,
4431 .mode = COMMAND_CONFIG,
4432 .help = "Configure Xtensa MMU option",
4433 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
4434 },
4435 {
4436 .name = "xtmpu",
4437 .handler = xtensa_cmd_xtmpu,
4438 .mode = COMMAND_CONFIG,
4439 .help = "Configure Xtensa MPU option",
4440 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
4441 },
4442 {
4443 .name = "xtreg",
4444 .handler = xtensa_cmd_xtreg,
4445 .mode = COMMAND_CONFIG,
4446 .help = "Configure Xtensa register",
4447 .usage = "<regname> <regnum>",
4448 },
4449 {
4450 .name = "xtregs",
4451 .handler = xtensa_cmd_xtreg,
4452 .mode = COMMAND_CONFIG,
4453 .help = "Configure number of Xtensa registers",
4454 .usage = "<numregs>",
4455 },
4456 {
4457 .name = "xtregfmt",
4458 .handler = xtensa_cmd_xtregfmt,
4459 .mode = COMMAND_CONFIG,
4460 .help = "Configure format of Xtensa register map",
4461 .usage = "<contiguous|sparse> [numgregs]",
4462 },
4463 {
4464 .name = "set_permissive",
4465 .handler = xtensa_cmd_permissive_mode,
4466 .mode = COMMAND_ANY,
4467 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
4468 .usage = "[0|1]",
4469 },
4470 {
4471 .name = "maskisr",
4472 .handler = xtensa_cmd_mask_interrupts,
4473 .mode = COMMAND_ANY,
4474 .help = "mask Xtensa interrupts at step",
4475 .usage = "['on'|'off']",
4476 },
4477 {
4478 .name = "smpbreak",
4479 .handler = xtensa_cmd_smpbreak,
4480 .mode = COMMAND_ANY,
4481 .help = "Set the way the CPU chains OCD breaks",
4482 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
4483 },
4484 {
4485 .name = "dm",
4486 .handler = xtensa_cmd_dm_rw,
4487 .mode = COMMAND_ANY,
4488 .help = "Xtensa DM read/write",
4489 .usage = "addr [value]"
4490 },
4491 {
4492 .name = "perfmon_enable",
4493 .handler = xtensa_cmd_perfmon_enable,
4494 .mode = COMMAND_EXEC,
4495 .help = "Enable and start performance counter",
4496 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
4497 },
4498 {
4499 .name = "perfmon_dump",
4500 .handler = xtensa_cmd_perfmon_dump,
4501 .mode = COMMAND_EXEC,
4502 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
4503 .usage = "[counter_id]",
4504 },
4505 {
4506 .name = "tracestart",
4507 .handler = xtensa_cmd_tracestart,
4508 .mode = COMMAND_EXEC,
4509 .help =
4510 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
4511 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
4512 },
4513 {
4514 .name = "tracestop",
4515 .handler = xtensa_cmd_tracestop,
4516 .mode = COMMAND_EXEC,
4517 .help = "Tracing: Stop current trace as started by the tracestart command",
4518 .usage = "",
4519 },
4520 {
4521 .name = "tracedump",
4522 .handler = xtensa_cmd_tracedump,
4523 .mode = COMMAND_EXEC,
4524 .help = "Tracing: Dump trace memory to a files. One file per core.",
4525 .usage = "<outfile>",
4526 },
4527 {
4528 .name = "exe",
4529 .handler = xtensa_cmd_exe,
4530 .mode = COMMAND_ANY,
4531 .help = "Xtensa stub execution",
4532 .usage = "<ascii-encoded hexadecimal instruction bytes>",
4533 },
4534 COMMAND_REGISTRATION_DONE
4535 };
4536
4537 const struct command_registration xtensa_command_handlers[] = {
4538 {
4539 .name = "xtensa",
4540 .mode = COMMAND_ANY,
4541 .help = "Xtensa command group",
4542 .usage = "",
4543 .chain = xtensa_any_command_handlers,
4544 },
4545 COMMAND_REGISTRATION_DONE
4546 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)