target/xtensa: remove needless target_was_examined check
[openocd.git] / src / target / xtensa / xtensa.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
10
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19
20 #include "xtensa_chip.h"
21 #include "xtensa.h"
22
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25 ((((V) & 0x0F) << 4) \
26 | (((V) & 0xF0) >> 4))
27
28 #define XT_NIBSWAP16(V) \
29 ((((V) & 0x000F) << 12) \
30 | (((V) & 0x00F0) << 4) \
31 | (((V) & 0x0F00) >> 4) \
32 | (((V) & 0xF000) >> 12))
33
34 #define XT_NIBSWAP24(V) \
35 ((((V) & 0x00000F) << 20) \
36 | (((V) & 0x0000F0) << 12) \
37 | (((V) & 0x000F00) << 4) \
38 | (((V) & 0x00F000) >> 4) \
39 | (((V) & 0x0F0000) >> 12) \
40 | (((V) & 0xF00000) >> 20))
41
42 /* _XT_INS_FORMAT_*()
43 * Instruction formatting converted from little-endian inputs
44 * and shifted to the MSB-side of DIR for BE systems.
45 */
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48 | (((T) & 0x0F) << 16) \
49 | (((SR) & 0xFF) << 8)) << 8 \
50 : (OPCODE) \
51 | (((SR) & 0xFF) << 8) \
52 | (((T) & 0x0F) << 4))
53
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57 | (((R) & 0x0F) << 8)) << 8 \
58 : (OPCODE) \
59 | (((ST) & 0xFF) << 4) \
60 | (((R) & 0x0F) << 12))
61
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64 | (((T) & 0x0F) << 8) \
65 | (((S) & 0x0F) << 4) \
66 | ((IMM4) & 0x0F)) << 16 \
67 : (OPCODE) \
68 | (((T) & 0x0F) << 4) \
69 | (((S) & 0x0F) << 8) \
70 | (((IMM4) & 0x0F) << 12))
71
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74 | (((T) & 0x0F) << 16) \
75 | (((S) & 0x0F) << 12) \
76 | (((R) & 0x0F) << 8) \
77 | ((IMM8) & 0xFF)) << 8 \
78 : (OPCODE) \
79 | (((IMM8) & 0xFF) << 16) \
80 | (((R) & 0x0F) << 12) \
81 | (((S) & 0x0F) << 8) \
82 | (((T) & 0x0F) << 4))
83
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86 | (((T) & 0x0F) << 16) \
87 | (((S) & 0x0F) << 12) \
88 | (((R) & 0x0F) << 8)) << 8 \
89 | ((IMM4) & 0x0F) \
90 : (OPCODE) \
91 | (((IMM4) & 0x0F) << 20) \
92 | (((R) & 0x0F) << 12) \
93 | (((S) & 0x0F) << 8) \
94 | (((T) & 0x0F) << 4))
95
96 /* Xtensa processor instruction opcodes
97 */
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
102
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
107
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
114
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
121
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
127
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
131
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
138
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
141
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
146
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
151
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
155
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
159
160 #define XT_WATCHPOINTS_NUM_MAX 2
161
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163 * These get used a lot so making a shortcut is useful.
164 */
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
169 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
170
171 #define XT_PS_REG_NUM (0xe6U)
172 #define XT_EPS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
173 #define XT_EPC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
174 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
175 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
176
177 #define XT_SW_BREAKPOINTS_MAX_NUM 32
178 #define XT_HW_IBREAK_MAX_NUM 2
179 #define XT_HW_DBREAK_MAX_NUM 2
180
181 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
182 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
183 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
184 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
185 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
186 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
187 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
188 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
189 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
190 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
191 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
192 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
247 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
248 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
249 XT_MK_REG_DESC("ps", XT_PS_REG_NUM, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
250 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
251 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
252 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
253 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
254 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
255 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
256 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
259 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
261 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
262 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
263
264 /* WARNING: For these registers, regnum points to the
265 * index of the corresponding ARx registers, NOT to
266 * the processor register number! */
267 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
268 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
269 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
270 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
271 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
272 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
273 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
274 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
275 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
276 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
282 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
283 };
284
285 /**
286 * Types of memory used at xtensa target
287 */
288 enum xtensa_mem_region_type {
289 XTENSA_MEM_REG_IROM = 0x0,
290 XTENSA_MEM_REG_IRAM,
291 XTENSA_MEM_REG_DROM,
292 XTENSA_MEM_REG_DRAM,
293 XTENSA_MEM_REG_SRAM,
294 XTENSA_MEM_REG_SROM,
295 XTENSA_MEM_REGS_NUM
296 };
297
298 /* Register definition as union for list allocation */
299 union xtensa_reg_val_u {
300 xtensa_reg_val_t val;
301 uint8_t buf[4];
302 };
303
304 static const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
305 { .chrval = "E00", .intval = ERROR_FAIL },
306 { .chrval = "E01", .intval = ERROR_FAIL },
307 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
308 { .chrval = "E03", .intval = ERROR_FAIL },
309 };
310
311 /* Set to true for extra debug logging */
312 static const bool xtensa_extra_debug_log;
313
314 /**
315 * Gets a config for the specific mem type
316 */
317 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
318 struct xtensa *xtensa,
319 enum xtensa_mem_region_type type)
320 {
321 switch (type) {
322 case XTENSA_MEM_REG_IROM:
323 return &xtensa->core_config->irom;
324 case XTENSA_MEM_REG_IRAM:
325 return &xtensa->core_config->iram;
326 case XTENSA_MEM_REG_DROM:
327 return &xtensa->core_config->drom;
328 case XTENSA_MEM_REG_DRAM:
329 return &xtensa->core_config->dram;
330 case XTENSA_MEM_REG_SRAM:
331 return &xtensa->core_config->sram;
332 case XTENSA_MEM_REG_SROM:
333 return &xtensa->core_config->srom;
334 default:
335 return NULL;
336 }
337 }
338
339 /**
340 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
341 * for a given address
342 * Returns NULL if nothing found
343 */
344 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
345 const struct xtensa_local_mem_config *mem,
346 target_addr_t address)
347 {
348 for (unsigned int i = 0; i < mem->count; i++) {
349 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
350 if (address >= region->base && address < (region->base + region->size))
351 return region;
352 }
353 return NULL;
354 }
355
356 /**
357 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
358 * for a given address
359 * Returns NULL if nothing found
360 */
361 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
362 struct xtensa *xtensa,
363 target_addr_t address)
364 {
365 const struct xtensa_local_mem_region_config *result;
366 const struct xtensa_local_mem_config *mcgf;
367 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
368 mcgf = xtensa_get_mem_config(xtensa, mtype);
369 result = xtensa_memory_region_find(mcgf, address);
370 if (result)
371 return result;
372 }
373 return NULL;
374 }
375
376 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
377 const struct xtensa_local_mem_config *mem,
378 target_addr_t address)
379 {
380 if (!cache->size)
381 return false;
382 return xtensa_memory_region_find(mem, address);
383 }
384
385 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
386 {
387 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
388 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
389 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
390 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
391 }
392
393 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
394 {
395 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
396 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
397 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
398 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
399 }
400
401 static int xtensa_core_reg_get(struct reg *reg)
402 {
403 /* We don't need this because we read all registers on halt anyway. */
404 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
405 struct target *target = xtensa->target;
406
407 if (target->state != TARGET_HALTED)
408 return ERROR_TARGET_NOT_HALTED;
409 if (!reg->exist) {
410 if (strncmp(reg->name, "?0x", 3) == 0) {
411 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
412 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
413 return ERROR_OK;
414 }
415 return ERROR_COMMAND_ARGUMENT_INVALID;
416 }
417 return ERROR_OK;
418 }
419
420 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
421 {
422 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
423 struct target *target = xtensa->target;
424
425 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
426 if (target->state != TARGET_HALTED)
427 return ERROR_TARGET_NOT_HALTED;
428
429 if (!reg->exist) {
430 if (strncmp(reg->name, "?0x", 3) == 0) {
431 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
432 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
433 return ERROR_OK;
434 }
435 return ERROR_COMMAND_ARGUMENT_INVALID;
436 }
437
438 buf_cpy(buf, reg->value, reg->size);
439
440 if (xtensa->core_config->windowed) {
441 /* If the user updates a potential scratch register, track for conflicts */
442 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
443 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
444 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
445 buf_get_u32(reg->value, 0, 32));
446 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
447 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
448 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
449 xtensa->scratch_ars[s].intval = true;
450 break;
451 }
452 }
453 }
454 reg->dirty = true;
455 reg->valid = true;
456
457 return ERROR_OK;
458 }
459
460 static const struct reg_arch_type xtensa_reg_type = {
461 .get = xtensa_core_reg_get,
462 .set = xtensa_core_reg_set,
463 };
464
465 /* Convert a register index that's indexed relative to windowbase, to the real address. */
466 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
467 enum xtensa_reg_id reg_idx,
468 int windowbase)
469 {
470 unsigned int idx;
471 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
472 idx = reg_idx - XT_REG_IDX_AR0;
473 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
474 idx = reg_idx - XT_REG_IDX_A0;
475 } else {
476 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
477 return -1;
478 }
479 return ((idx + windowbase * 4) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
480 }
481
482 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
483 enum xtensa_reg_id reg_idx,
484 int windowbase)
485 {
486 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
487 }
488
489 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
490 {
491 struct reg *reg_list = xtensa->core_cache->reg_list;
492 reg_list[reg_idx].dirty = true;
493 }
494
495 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
496 {
497 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DIR0EXEC, ins);
498 }
499
500 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
501 {
502 const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
503 if ((oplen > 0) && (oplen <= max_oplen)) {
504 uint8_t ops_padded[max_oplen];
505 memcpy(ops_padded, ops, oplen);
506 memset(ops_padded + oplen, 0, max_oplen - oplen);
507 unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
508 for (int32_t i = oplenw - 1; i > 0; i--)
509 xtensa_queue_dbg_reg_write(xtensa,
510 XDMREG_DIR0 + i,
511 target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t)*i]));
512 /* Write DIR0EXEC last */
513 xtensa_queue_dbg_reg_write(xtensa,
514 XDMREG_DIR0EXEC,
515 target_buffer_get_u32(xtensa->target, &ops_padded[0]));
516 }
517 }
518
519 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
520 {
521 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
522 return dm->pwr_ops->queue_reg_write(dm, reg, data);
523 }
524
525 /* NOTE: Assumes A3 has already been saved */
526 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
527 {
528 struct xtensa *xtensa = target_to_xtensa(target);
529 int woe_dis;
530 uint8_t woe_buf[4];
531
532 if (xtensa->core_config->windowed) {
533 /* Save PS (LX) and disable window overflow exceptions prior to AR save */
534 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_PS, XT_REG_A3));
535 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
536 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, woe_buf);
537 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
538 if (res != ERROR_OK) {
539 LOG_ERROR("Failed to read PS (%d)!", res);
540 return res;
541 }
542 xtensa_core_status_check(target);
543 *woe = buf_get_u32(woe_buf, 0, 32);
544 woe_dis = *woe & ~XT_PS_WOE_MSK;
545 LOG_DEBUG("Clearing PS.WOE (0x%08" PRIx32 " -> 0x%08" PRIx32 ")", *woe, woe_dis);
546 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe_dis);
547 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
548 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
549 }
550 return ERROR_OK;
551 }
552
553 /* NOTE: Assumes A3 has already been saved */
554 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
555 {
556 struct xtensa *xtensa = target_to_xtensa(target);
557 if (xtensa->core_config->windowed) {
558 /* Restore window overflow exception state */
559 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe);
560 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
561 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
562 LOG_DEBUG("Restored PS.WOE (0x%08" PRIx32 ")", woe);
563 }
564 }
565
566 static bool xtensa_reg_is_readable(int flags, int cpenable)
567 {
568 if (flags & XT_REGF_NOREAD)
569 return false;
570 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
571 return false;
572 return true;
573 }
574
575 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
576 {
577 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
578 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
579 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
580 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
581 } else {
582 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
583 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
584 }
585 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
586 }
587
588 static int xtensa_write_dirty_registers(struct target *target)
589 {
590 struct xtensa *xtensa = target_to_xtensa(target);
591 int res;
592 xtensa_reg_val_t regval, windowbase = 0;
593 bool scratch_reg_dirty = false, delay_cpenable = false;
594 struct reg *reg_list = xtensa->core_cache->reg_list;
595 unsigned int reg_list_size = xtensa->core_cache->num_regs;
596 bool preserve_a3 = false;
597 uint8_t a3_buf[4];
598 xtensa_reg_val_t a3 = 0, woe;
599
600 LOG_TARGET_DEBUG(target, "start");
601
602 /* We need to write the dirty registers in the cache list back to the processor.
603 * Start by writing the SFR/user registers. */
604 for (unsigned int i = 0; i < reg_list_size; i++) {
605 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
606 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
607 if (reg_list[i].dirty) {
608 if (rlist[ridx].type == XT_REG_SPECIAL ||
609 rlist[ridx].type == XT_REG_USER ||
610 rlist[ridx].type == XT_REG_FR) {
611 scratch_reg_dirty = true;
612 if (i == XT_REG_IDX_CPENABLE) {
613 delay_cpenable = true;
614 continue;
615 }
616 regval = xtensa_reg_get(target, i);
617 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
618 reg_list[i].name,
619 rlist[ridx].reg_num,
620 regval);
621 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
622 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
623 if (reg_list[i].exist) {
624 unsigned int reg_num = rlist[ridx].reg_num;
625 if (rlist[ridx].type == XT_REG_USER) {
626 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
627 } else if (rlist[ridx].type == XT_REG_FR) {
628 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
629 } else {/*SFR */
630 if (reg_num == XT_PC_REG_NUM_VIRTUAL)
631 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL
632 **/
633 reg_num =
634 (XT_EPC_REG_NUM_BASE +
635 xtensa->core_config->debug.irq_level);
636 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
637 }
638 }
639 reg_list[i].dirty = false;
640 }
641 }
642 }
643 if (scratch_reg_dirty)
644 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
645 if (delay_cpenable) {
646 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
647 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
648 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
649 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
650 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
651 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
652 XT_REG_A3));
653 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
654 }
655
656 preserve_a3 = (xtensa->core_config->windowed);
657 if (preserve_a3) {
658 /* Save (windowed) A3 for scratch use */
659 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
660 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
661 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
662 if (res != ERROR_OK)
663 return res;
664 xtensa_core_status_check(target);
665 a3 = buf_get_u32(a3_buf, 0, 32);
666 }
667
668 if (xtensa->core_config->windowed) {
669 res = xtensa_window_state_save(target, &woe);
670 if (res != ERROR_OK)
671 return res;
672 /* Grab the windowbase, we need it. */
673 windowbase = xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE);
674 /* Check if there are mismatches between the ARx and corresponding Ax registers.
675 * When the user sets a register on a windowed config, xt-gdb may set the ARx
676 * register directly. Thus we take ARx as priority over Ax if both are dirty
677 * and it's unclear if the user set one over the other explicitly.
678 */
679 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
680 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
681 if (reg_list[i].dirty && reg_list[j].dirty) {
682 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
683 bool show_warning = true;
684 if (i == XT_REG_IDX_A3)
685 show_warning = xtensa_scratch_regs_fixup(xtensa,
686 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
687 else if (i == XT_REG_IDX_A4)
688 show_warning = xtensa_scratch_regs_fixup(xtensa,
689 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
690 if (show_warning)
691 LOG_WARNING(
692 "Warning: Both A%d [0x%08" PRIx32
693 "] as well as its underlying physical register "
694 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
695 i - XT_REG_IDX_A0,
696 buf_get_u32(reg_list[i].value, 0, 32),
697 j - XT_REG_IDX_AR0,
698 buf_get_u32(reg_list[j].value, 0, 32));
699 }
700 }
701 }
702 }
703
704 /* Write A0-A16. */
705 for (unsigned int i = 0; i < 16; i++) {
706 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
707 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
708 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
709 xtensa_regs[XT_REG_IDX_A0 + i].name,
710 regval,
711 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
712 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
713 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
714 reg_list[XT_REG_IDX_A0 + i].dirty = false;
715 if (i == 3) {
716 /* Avoid stomping A3 during restore at end of function */
717 a3 = regval;
718 }
719 }
720 }
721
722 if (xtensa->core_config->windowed) {
723 /* Now write AR registers */
724 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
725 /* Write the 16 registers we can see */
726 for (unsigned int i = 0; i < 16; i++) {
727 if (i + j < xtensa->core_config->aregs_num) {
728 enum xtensa_reg_id realadr =
729 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
730 windowbase);
731 /* Write back any dirty un-windowed registers */
732 if (reg_list[realadr].dirty) {
733 regval = xtensa_reg_get(target, realadr);
734 LOG_TARGET_DEBUG(
735 target,
736 "Writing back reg %s value %08" PRIX32 ", num =%i",
737 xtensa_regs[realadr].name,
738 regval,
739 xtensa_regs[realadr].reg_num);
740 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
741 xtensa_queue_exec_ins(xtensa,
742 XT_INS_RSR(xtensa, XT_SR_DDR,
743 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
744 reg_list[realadr].dirty = false;
745 if ((i + j) == 3)
746 /* Avoid stomping AR during A3 restore at end of function */
747 a3 = regval;
748 }
749 }
750 }
751 /*Now rotate the window so we'll see the next 16 registers. The final rotate
752 * will wraparound, */
753 /*leaving us in the state we were. */
754 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
755 }
756
757 xtensa_window_state_restore(target, woe);
758
759 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
760 xtensa->scratch_ars[s].intval = false;
761 }
762
763 if (preserve_a3) {
764 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
765 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
766 }
767
768 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
769 xtensa_core_status_check(target);
770
771 return res;
772 }
773
774 static inline bool xtensa_is_stopped(struct target *target)
775 {
776 struct xtensa *xtensa = target_to_xtensa(target);
777 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
778 }
779
780 int xtensa_examine(struct target *target)
781 {
782 struct xtensa *xtensa = target_to_xtensa(target);
783 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
784
785 LOG_DEBUG("coreid = %d", target->coreid);
786
787 if (xtensa->core_config->core_type == XT_UNDEF) {
788 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
789 return ERROR_FAIL;
790 }
791
792 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
793 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
794 xtensa_dm_queue_enable(&xtensa->dbg_mod);
795 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
796 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
797 if (res != ERROR_OK)
798 return res;
799 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
800 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
801 return ERROR_TARGET_FAILURE;
802 }
803 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
804 target_set_examined(target);
805 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
806 return ERROR_OK;
807 }
808
809 int xtensa_wakeup(struct target *target)
810 {
811 struct xtensa *xtensa = target_to_xtensa(target);
812 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
813
814 if (xtensa->reset_asserted)
815 cmd |= PWRCTL_CORERESET(xtensa);
816 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
817 /* TODO: can we join this with the write above? */
818 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
819 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
820 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
821 }
822
823 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
824 {
825 uint32_t dsr_data = 0x00110000;
826 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
827 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
828 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
829
830 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
831 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, set | OCDDCR_ENABLEOCD);
832 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, clear);
833 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DSR, dsr_data);
834 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
835 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
836 }
837
838 int xtensa_smpbreak_set(struct target *target, uint32_t set)
839 {
840 struct xtensa *xtensa = target_to_xtensa(target);
841 int res = ERROR_OK;
842
843 xtensa->smp_break = set;
844 if (target_was_examined(target))
845 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
846 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
847 return res;
848 }
849
850 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
851 {
852 uint8_t dcr_buf[sizeof(uint32_t)];
853
854 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DCRSET, dcr_buf);
855 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
856 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
857 *val = buf_get_u32(dcr_buf, 0, 32);
858
859 return res;
860 }
861
862 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
863 {
864 struct xtensa *xtensa = target_to_xtensa(target);
865 *val = xtensa->smp_break;
866 return ERROR_OK;
867 }
868
869 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
870 {
871 return buf_get_u32(reg->value, 0, 32);
872 }
873
874 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
875 {
876 buf_set_u32(reg->value, 0, 32, value);
877 reg->dirty = true;
878 }
879
880 int xtensa_core_status_check(struct target *target)
881 {
882 struct xtensa *xtensa = target_to_xtensa(target);
883 int res, needclear = 0;
884
885 xtensa_dm_core_status_read(&xtensa->dbg_mod);
886 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
887 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
888 if (dsr & OCDDSR_EXECBUSY) {
889 if (!xtensa->suppress_dsr_errors)
890 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
891 needclear = 1;
892 }
893 if (dsr & OCDDSR_EXECEXCEPTION) {
894 if (!xtensa->suppress_dsr_errors)
895 LOG_TARGET_ERROR(target,
896 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
897 dsr);
898 needclear = 1;
899 }
900 if (dsr & OCDDSR_EXECOVERRUN) {
901 if (!xtensa->suppress_dsr_errors)
902 LOG_TARGET_ERROR(target,
903 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
904 dsr);
905 needclear = 1;
906 }
907 if (needclear) {
908 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
909 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
910 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
911 LOG_TARGET_ERROR(target, "clearing DSR failed!");
912 return ERROR_FAIL;
913 }
914 return ERROR_OK;
915 }
916
917 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
918 {
919 struct xtensa *xtensa = target_to_xtensa(target);
920 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
921 return xtensa_reg_get_value(reg);
922 }
923
924 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
925 {
926 struct xtensa *xtensa = target_to_xtensa(target);
927 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
928 if (xtensa_reg_get_value(reg) == value)
929 return;
930 xtensa_reg_set_value(reg, value);
931 }
932
933 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
934 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
935 {
936 struct xtensa *xtensa = target_to_xtensa(target);
937 uint32_t windowbase = (xtensa->core_config->windowed ?
938 xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE) : 0);
939 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
940 xtensa_reg_set(target, a_idx, value);
941 xtensa_reg_set(target, ar_idx, value);
942 }
943
944 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
945 uint32_t xtensa_cause_get(struct target *target)
946 {
947 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
948 }
949
950 void xtensa_cause_clear(struct target *target)
951 {
952 struct xtensa *xtensa = target_to_xtensa(target);
953 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
954 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
955 }
956
957 int xtensa_assert_reset(struct target *target)
958 {
959 struct xtensa *xtensa = target_to_xtensa(target);
960
961 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
962 xtensa_queue_pwr_reg_write(xtensa,
963 XDMREG_PWRCTL,
964 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
965 PWRCTL_COREWAKEUP(xtensa) | PWRCTL_CORERESET(xtensa));
966 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
967 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
968 if (res != ERROR_OK)
969 return res;
970
971 /* registers are now invalid */
972 xtensa->reset_asserted = true;
973 register_cache_invalidate(xtensa->core_cache);
974 target->state = TARGET_RESET;
975 return ERROR_OK;
976 }
977
978 int xtensa_deassert_reset(struct target *target)
979 {
980 struct xtensa *xtensa = target_to_xtensa(target);
981
982 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
983 if (target->reset_halt)
984 xtensa_queue_dbg_reg_write(xtensa,
985 XDMREG_DCRSET,
986 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
987 xtensa_queue_pwr_reg_write(xtensa,
988 XDMREG_PWRCTL,
989 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
990 PWRCTL_COREWAKEUP(xtensa));
991 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
992 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
993 if (res != ERROR_OK)
994 return res;
995 target->state = TARGET_RUNNING;
996 xtensa->reset_asserted = false;
997 return res;
998 }
999
1000 int xtensa_soft_reset_halt(struct target *target)
1001 {
1002 LOG_TARGET_DEBUG(target, "begin");
1003 return xtensa_assert_reset(target);
1004 }
1005
1006 int xtensa_fetch_all_regs(struct target *target)
1007 {
1008 struct xtensa *xtensa = target_to_xtensa(target);
1009 struct reg *reg_list = xtensa->core_cache->reg_list;
1010 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1011 xtensa_reg_val_t cpenable = 0, windowbase = 0, a3;
1012 uint32_t woe;
1013 uint8_t a3_buf[4];
1014 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1015
1016 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1017 if (!regvals) {
1018 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1019 return ERROR_FAIL;
1020 }
1021 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1022 if (!dsrs) {
1023 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1024 free(regvals);
1025 return ERROR_FAIL;
1026 }
1027
1028 LOG_TARGET_DEBUG(target, "start");
1029
1030 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1031 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1032 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
1033 int res = xtensa_window_state_save(target, &woe);
1034 if (res != ERROR_OK)
1035 goto xtensa_fetch_all_regs_done;
1036
1037 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1038 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1039 * in one go, then sort everything out from the regvals variable. */
1040
1041 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1042 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1043 /*Grab the 16 registers we can see */
1044 for (unsigned int i = 0; i < 16; i++) {
1045 if (i + j < xtensa->core_config->aregs_num) {
1046 xtensa_queue_exec_ins(xtensa,
1047 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1048 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1049 regvals[XT_REG_IDX_AR0 + i + j].buf);
1050 if (debug_dsrs)
1051 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR,
1052 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1053 }
1054 }
1055 if (xtensa->core_config->windowed)
1056 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1057 * will wraparound, */
1058 /* leaving us in the state we were. */
1059 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
1060 }
1061 xtensa_window_state_restore(target, woe);
1062
1063 if (xtensa->core_config->coproc) {
1064 /* As the very first thing after AREGS, go grab CPENABLE */
1065 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1066 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1067 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1068 }
1069 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1070 if (res != ERROR_OK) {
1071 LOG_ERROR("Failed to read ARs (%d)!", res);
1072 goto xtensa_fetch_all_regs_done;
1073 }
1074 xtensa_core_status_check(target);
1075
1076 a3 = buf_get_u32(a3_buf, 0, 32);
1077
1078 if (xtensa->core_config->coproc) {
1079 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1080
1081 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1082 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
1083 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1084 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1085
1086 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1087 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1088 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1089 }
1090 /* We're now free to use any of A0-A15 as scratch registers
1091 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1092 for (unsigned int i = 0; i < reg_list_size; i++) {
1093 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1094 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1095 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1096 bool reg_fetched = true;
1097 unsigned int reg_num = rlist[ridx].reg_num;
1098 switch (rlist[ridx].type) {
1099 case XT_REG_USER:
1100 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1101 break;
1102 case XT_REG_FR:
1103 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1104 break;
1105 case XT_REG_SPECIAL:
1106 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1107 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1108 reg_num = XT_EPC_REG_NUM_BASE + xtensa->core_config->debug.irq_level;
1109 } else if (reg_num == xtensa_regs[XT_REG_IDX_PS].reg_num) {
1110 /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1111 reg_num = XT_EPS_REG_NUM_BASE + xtensa->core_config->debug.irq_level;
1112 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1113 /* CPENABLE already read/updated; don't re-read */
1114 reg_fetched = false;
1115 break;
1116 }
1117 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1118 break;
1119 default:
1120 reg_fetched = false;
1121 }
1122 if (reg_fetched) {
1123 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1124 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1125 if (debug_dsrs)
1126 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1127 }
1128 }
1129 }
1130 /* Ok, send the whole mess to the CPU. */
1131 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1132 if (res != ERROR_OK) {
1133 LOG_ERROR("Failed to fetch AR regs!");
1134 goto xtensa_fetch_all_regs_done;
1135 }
1136 xtensa_core_status_check(target);
1137
1138 if (debug_dsrs) {
1139 /* DSR checking: follows order in which registers are requested. */
1140 for (unsigned int i = 0; i < reg_list_size; i++) {
1141 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1142 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1143 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1144 (rlist[ridx].type != XT_REG_DEBUG) &&
1145 (rlist[ridx].type != XT_REG_RELGEN) &&
1146 (rlist[ridx].type != XT_REG_TIE) &&
1147 (rlist[ridx].type != XT_REG_OTHER)) {
1148 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1149 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1150 res = ERROR_FAIL;
1151 goto xtensa_fetch_all_regs_done;
1152 }
1153 }
1154 }
1155 }
1156
1157 if (xtensa->core_config->windowed)
1158 /* We need the windowbase to decode the general addresses. */
1159 windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE].buf, 0, 32);
1160 /* Decode the result and update the cache. */
1161 for (unsigned int i = 0; i < reg_list_size; i++) {
1162 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1163 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1164 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1165 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1166 /* The 64-value general register set is read from (windowbase) on down.
1167 * We need to get the real register address by subtracting windowbase and
1168 * wrapping around. */
1169 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1170 windowbase);
1171 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1172 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1173 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1174 if (xtensa_extra_debug_log) {
1175 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1176 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1177 }
1178 } else {
1179 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1180 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1181 if (xtensa_extra_debug_log)
1182 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1183 xtensa_reg_set(target, i, regval);
1184 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1185 }
1186 reg_list[i].valid = true;
1187 } else {
1188 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1189 /* Report read-only registers all-zero but valid */
1190 reg_list[i].valid = true;
1191 xtensa_reg_set(target, i, 0);
1192 } else {
1193 reg_list[i].valid = false;
1194 }
1195 }
1196 }
1197
1198 if (xtensa->core_config->windowed) {
1199 /* We have used A3 as a scratch register.
1200 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1201 */
1202 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1203 xtensa_reg_set(target, ar3_idx, a3);
1204 xtensa_mark_register_dirty(xtensa, ar3_idx);
1205
1206 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1207 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1208 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1209 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1210 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1211 xtensa->scratch_ars[s].intval = false;
1212 }
1213
1214 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1215 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1216 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1217 xtensa->regs_fetched = true;
1218 xtensa_fetch_all_regs_done:
1219 free(regvals);
1220 free(dsrs);
1221 return res;
1222 }
1223
1224 int xtensa_get_gdb_reg_list(struct target *target,
1225 struct reg **reg_list[],
1226 int *reg_list_size,
1227 enum target_register_class reg_class)
1228 {
1229 struct xtensa *xtensa = target_to_xtensa(target);
1230 unsigned int num_regs;
1231
1232 if (reg_class == REG_CLASS_GENERAL) {
1233 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1234 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1235 return ERROR_FAIL;
1236 }
1237 num_regs = xtensa->genpkt_regs_num;
1238 } else {
1239 /* Determine whether to return a contiguous or sparse register map */
1240 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1241 }
1242
1243 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1244
1245 *reg_list = calloc(num_regs, sizeof(struct reg *));
1246 if (!*reg_list)
1247 return ERROR_FAIL;
1248
1249 *reg_list_size = num_regs;
1250 if (xtensa->regmap_contiguous) {
1251 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1252 for (unsigned int i = 0; i < num_regs; i++)
1253 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1254 return ERROR_OK;
1255 }
1256
1257 for (unsigned int i = 0; i < num_regs; i++)
1258 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1259 unsigned int k = 0;
1260 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1261 if (xtensa->core_cache->reg_list[i].exist) {
1262 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1263 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1264 int sparse_idx = rlist[ridx].dbreg_num;
1265 if (i == XT_REG_IDX_PS) {
1266 if (xtensa->eps_dbglevel_idx == 0) {
1267 LOG_ERROR("eps_dbglevel_idx not set\n");
1268 return ERROR_FAIL;
1269 }
1270 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1271 if (xtensa_extra_debug_log)
1272 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1273 sparse_idx, xtensa->core_config->debug.irq_level,
1274 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1275 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1276 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1277 } else {
1278 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1279 }
1280 if (i == XT_REG_IDX_PC)
1281 /* Make a duplicate copy of PC for external access */
1282 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1283 k++;
1284 }
1285 }
1286
1287 if (k == num_regs)
1288 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1289
1290 return ERROR_OK;
1291 }
1292
1293 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1294 {
1295 struct xtensa *xtensa = target_to_xtensa(target);
1296 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1297 xtensa->core_config->mmu.dtlb_entries_count > 0;
1298 return ERROR_OK;
1299 }
1300
1301 int xtensa_halt(struct target *target)
1302 {
1303 struct xtensa *xtensa = target_to_xtensa(target);
1304
1305 LOG_TARGET_DEBUG(target, "start");
1306 if (target->state == TARGET_HALTED) {
1307 LOG_TARGET_DEBUG(target, "target was already halted");
1308 return ERROR_OK;
1309 }
1310 /* First we have to read dsr and check if the target stopped */
1311 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1312 if (res != ERROR_OK) {
1313 LOG_TARGET_ERROR(target, "Failed to read core status!");
1314 return res;
1315 }
1316 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1317 if (!xtensa_is_stopped(target)) {
1318 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1319 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1320 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1321 if (res != ERROR_OK)
1322 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1323 }
1324
1325 return res;
1326 }
1327
1328 int xtensa_prepare_resume(struct target *target,
1329 int current,
1330 target_addr_t address,
1331 int handle_breakpoints,
1332 int debug_execution)
1333 {
1334 struct xtensa *xtensa = target_to_xtensa(target);
1335 uint32_t bpena = 0;
1336
1337 LOG_TARGET_DEBUG(target,
1338 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1339 current,
1340 address,
1341 handle_breakpoints,
1342 debug_execution);
1343
1344 if (target->state != TARGET_HALTED) {
1345 LOG_TARGET_WARNING(target, "target not halted");
1346 return ERROR_TARGET_NOT_HALTED;
1347 }
1348
1349 if (address && !current) {
1350 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1351 } else {
1352 uint32_t cause = xtensa_cause_get(target);
1353 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1354 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1355 if (cause & DEBUGCAUSE_DB)
1356 /* We stopped due to a watchpoint. We can't just resume executing the
1357 * instruction again because */
1358 /* that would trigger the watchpoint again. To fix this, we single-step,
1359 * which ignores watchpoints. */
1360 xtensa_do_step(target, current, address, handle_breakpoints);
1361 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1362 /* We stopped due to a break instruction. We can't just resume executing the
1363 * instruction again because */
1364 /* that would trigger the break again. To fix this, we single-step, which
1365 * ignores break. */
1366 xtensa_do_step(target, current, address, handle_breakpoints);
1367 }
1368
1369 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1370 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1371 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1372 if (xtensa->hw_brps[slot]) {
1373 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1374 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1375 bpena |= BIT(slot);
1376 }
1377 }
1378 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1379
1380 /* Here we write all registers to the targets */
1381 int res = xtensa_write_dirty_registers(target);
1382 if (res != ERROR_OK)
1383 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1384 return res;
1385 }
1386
1387 int xtensa_do_resume(struct target *target)
1388 {
1389 struct xtensa *xtensa = target_to_xtensa(target);
1390
1391 LOG_TARGET_DEBUG(target, "start");
1392
1393 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1394 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1395 if (res != ERROR_OK) {
1396 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1397 return res;
1398 }
1399 xtensa_core_status_check(target);
1400 return ERROR_OK;
1401 }
1402
1403 int xtensa_resume(struct target *target,
1404 int current,
1405 target_addr_t address,
1406 int handle_breakpoints,
1407 int debug_execution)
1408 {
1409 LOG_TARGET_DEBUG(target, "start");
1410 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1411 if (res != ERROR_OK) {
1412 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1413 return res;
1414 }
1415 res = xtensa_do_resume(target);
1416 if (res != ERROR_OK) {
1417 LOG_TARGET_ERROR(target, "Failed to resume!");
1418 return res;
1419 }
1420
1421 target->debug_reason = DBG_REASON_NOTHALTED;
1422 if (!debug_execution)
1423 target->state = TARGET_RUNNING;
1424 else
1425 target->state = TARGET_DEBUG_RUNNING;
1426
1427 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1428
1429 return ERROR_OK;
1430 }
1431
1432 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1433 {
1434 struct xtensa *xtensa = target_to_xtensa(target);
1435 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1436 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1437 if (err != ERROR_OK)
1438 return false;
1439
1440 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1441 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1442 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1443 return true;
1444
1445 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1446 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1447 return true;
1448
1449 return false;
1450 }
1451
1452 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1453 {
1454 struct xtensa *xtensa = target_to_xtensa(target);
1455 int res;
1456 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1457 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1458 xtensa_reg_val_t icountlvl, cause;
1459 xtensa_reg_val_t oldps, oldpc, cur_pc;
1460 bool ps_lowered = false;
1461
1462 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1463 current, address, handle_breakpoints);
1464
1465 if (target->state != TARGET_HALTED) {
1466 LOG_TARGET_WARNING(target, "target not halted");
1467 return ERROR_TARGET_NOT_HALTED;
1468 }
1469
1470 if (xtensa->eps_dbglevel_idx == 0) {
1471 LOG_ERROR("eps_dbglevel_idx not set\n");
1472 return ERROR_FAIL;
1473 }
1474
1475 /* Save old ps (EPS[dbglvl] on LX), pc */
1476 oldps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
1477 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1478
1479 cause = xtensa_cause_get(target);
1480 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1481 oldps,
1482 oldpc,
1483 cause,
1484 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1485 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1486 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1487 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1488 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1489 /* pretend that we have stepped */
1490 if (cause & DEBUGCAUSE_BI)
1491 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1492 else
1493 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1494 return ERROR_OK;
1495 }
1496
1497 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1498 * at which the instructions are to be counted while stepping.
1499 *
1500 * For example, if we need to step by 2 instructions, and an interrupt occurs
1501 * in between, the processor will trigger the interrupt and halt after the 2nd
1502 * instruction within the interrupt vector and/or handler.
1503 *
1504 * However, sometimes we don't want the interrupt handlers to be executed at all
1505 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1506 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1507 * code from being counted during stepping. Note that C exception handlers must
1508 * run at level 0 and hence will be counted and stepped into, should one occur.
1509 *
1510 * TODO: Certain instructions should never be single-stepped and should instead
1511 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1512 * RFI >= DBGLEVEL.
1513 */
1514 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1515 if (!xtensa->core_config->high_irq.enabled) {
1516 LOG_TARGET_WARNING(
1517 target,
1518 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1519 return ERROR_FAIL;
1520 }
1521 /* Update ICOUNTLEVEL accordingly */
1522 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1523 } else {
1524 icountlvl = xtensa->core_config->debug.irq_level;
1525 }
1526
1527 if (cause & DEBUGCAUSE_DB) {
1528 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1529 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1530 * re-enable the watchpoint. */
1531 LOG_TARGET_DEBUG(
1532 target,
1533 "Single-stepping to get past instruction that triggered the watchpoint...");
1534 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1535 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1536 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1537 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1538 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1539 }
1540 }
1541
1542 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1543 /* handle normal SW breakpoint */
1544 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1545 if ((oldps & 0xf) >= icountlvl) {
1546 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1547 ps_lowered = true;
1548 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1549 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1550 LOG_TARGET_DEBUG(target,
1551 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1552 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1553 newps,
1554 oldps);
1555 }
1556 do {
1557 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1558 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1559
1560 /* Now ICOUNT is set, we can resume as if we were going to run */
1561 res = xtensa_prepare_resume(target, current, address, 0, 0);
1562 if (res != ERROR_OK) {
1563 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1564 return res;
1565 }
1566 res = xtensa_do_resume(target);
1567 if (res != ERROR_OK) {
1568 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1569 return res;
1570 }
1571
1572 /* Wait for stepping to complete */
1573 long long start = timeval_ms();
1574 while (timeval_ms() < start + 500) {
1575 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1576 *until stepping is complete. */
1577 usleep(1000);
1578 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1579 if (res != ERROR_OK) {
1580 LOG_TARGET_ERROR(target, "Failed to read core status!");
1581 return res;
1582 }
1583 if (xtensa_is_stopped(target))
1584 break;
1585 usleep(1000);
1586 }
1587 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1588 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1589 if (!xtensa_is_stopped(target)) {
1590 LOG_TARGET_WARNING(
1591 target,
1592 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1593 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1594 target->debug_reason = DBG_REASON_NOTHALTED;
1595 target->state = TARGET_RUNNING;
1596 return ERROR_FAIL;
1597 }
1598
1599 xtensa_fetch_all_regs(target);
1600 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1601
1602 LOG_TARGET_DEBUG(target,
1603 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1604 xtensa_reg_get(target, XT_REG_IDX_PS),
1605 cur_pc,
1606 xtensa_cause_get(target),
1607 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1608
1609 /* Do not step into WindowOverflow if ISRs are masked.
1610 If we stop in WindowOverflow at breakpoint with masked ISRs and
1611 try to do a step it will get us out of that handler */
1612 if (xtensa->core_config->windowed &&
1613 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1614 xtensa_pc_in_winexc(target, cur_pc)) {
1615 /* isrmask = on, need to step out of the window exception handler */
1616 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1617 oldpc = cur_pc;
1618 address = oldpc + 3;
1619 continue;
1620 }
1621
1622 if (oldpc == cur_pc)
1623 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1624 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1625 else
1626 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1627 break;
1628 } while (true);
1629
1630 target->debug_reason = DBG_REASON_SINGLESTEP;
1631 target->state = TARGET_HALTED;
1632 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1633
1634 if (cause & DEBUGCAUSE_DB) {
1635 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1636 /* Restore the DBREAKCx registers */
1637 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1638 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1639 }
1640
1641 /* Restore int level */
1642 if (ps_lowered) {
1643 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1644 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1645 oldps);
1646 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1647 }
1648
1649 /* write ICOUNTLEVEL back to zero */
1650 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1651 /* TODO: can we skip writing dirty registers and re-fetching them? */
1652 res = xtensa_write_dirty_registers(target);
1653 xtensa_fetch_all_regs(target);
1654 return res;
1655 }
1656
1657 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1658 {
1659 int retval = xtensa_do_step(target, current, address, handle_breakpoints);
1660 if (retval != ERROR_OK)
1661 return retval;
1662 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1663
1664 return ERROR_OK;
1665 }
1666
1667 /**
1668 * Returns true if two ranges are overlapping
1669 */
1670 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1671 target_addr_t r1_end,
1672 target_addr_t r2_start,
1673 target_addr_t r2_end)
1674 {
1675 if ((r2_start >= r1_start) && (r2_start < r1_end))
1676 return true; /* r2_start is in r1 region */
1677 if ((r2_end > r1_start) && (r2_end <= r1_end))
1678 return true; /* r2_end is in r1 region */
1679 return false;
1680 }
1681
1682 /**
1683 * Returns a size of overlapped region of two ranges.
1684 */
1685 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1686 target_addr_t r1_end,
1687 target_addr_t r2_start,
1688 target_addr_t r2_end)
1689 {
1690 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1691 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1692 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1693 return ov_end - ov_start;
1694 }
1695 return 0;
1696 }
1697
1698 /**
1699 * Check if the address gets to memory regions, and its access mode
1700 */
1701 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1702 {
1703 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1704 target_addr_t adr_end = address + size; /* region end */
1705 target_addr_t overlap_size;
1706 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1707
1708 while (adr_pos < adr_end) {
1709 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1710 if (!cm) /* address is not belong to anything */
1711 return false;
1712 if ((cm->access & access) != access) /* access check */
1713 return false;
1714 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1715 assert(overlap_size != 0);
1716 adr_pos += overlap_size;
1717 }
1718 return true;
1719 }
1720
1721 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1722 {
1723 struct xtensa *xtensa = target_to_xtensa(target);
1724 /* We are going to read memory in 32-bit increments. This may not be what the calling
1725 * function expects, so we may need to allocate a temp buffer and read into that first. */
1726 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1727 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1728 target_addr_t adr = addrstart_al;
1729 uint8_t *albuff;
1730 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1731
1732 if (target->state != TARGET_HALTED) {
1733 LOG_TARGET_WARNING(target, "target not halted");
1734 return ERROR_TARGET_NOT_HALTED;
1735 }
1736
1737 if (!xtensa->permissive_mode) {
1738 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1739 XT_MEM_ACCESS_READ)) {
1740 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1741 return ERROR_FAIL;
1742 }
1743 }
1744
1745 unsigned int alloc_bytes = ALIGN_UP(addrend_al - addrstart_al, sizeof(uint32_t));
1746 albuff = calloc(alloc_bytes, 1);
1747 if (!albuff) {
1748 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
1749 addrend_al - addrstart_al);
1750 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1751 }
1752
1753 /* We're going to use A3 here */
1754 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1755 /* Write start address to A3 */
1756 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1757 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1758 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1759 if (xtensa->probe_lsddr32p != 0) {
1760 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1761 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1762 xtensa_queue_dbg_reg_read(xtensa,
1763 (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
1764 &albuff[i]);
1765 } else {
1766 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1767 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1768 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1769 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
1770 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[i]);
1771 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1772 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1773 }
1774 }
1775 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1776 if (res == ERROR_OK) {
1777 bool prev_suppress = xtensa->suppress_dsr_errors;
1778 xtensa->suppress_dsr_errors = true;
1779 res = xtensa_core_status_check(target);
1780 if (xtensa->probe_lsddr32p == -1)
1781 xtensa->probe_lsddr32p = 1;
1782 xtensa->suppress_dsr_errors = prev_suppress;
1783 }
1784 if (res != ERROR_OK) {
1785 if (xtensa->probe_lsddr32p != 0) {
1786 /* Disable fast memory access instructions and retry before reporting an error */
1787 LOG_TARGET_DEBUG(target, "Disabling LDDR32.P/SDDR32.P");
1788 xtensa->probe_lsddr32p = 0;
1789 res = xtensa_read_memory(target, address, size, count, albuff);
1790 bswap = false;
1791 } else {
1792 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
1793 count * size, address);
1794 }
1795 }
1796
1797 if (bswap)
1798 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
1799 memcpy(buffer, albuff + (address & 3), (size * count));
1800 free(albuff);
1801 return res;
1802 }
1803
1804 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1805 {
1806 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1807 return xtensa_read_memory(target, address, 1, count, buffer);
1808 }
1809
1810 int xtensa_write_memory(struct target *target,
1811 target_addr_t address,
1812 uint32_t size,
1813 uint32_t count,
1814 const uint8_t *buffer)
1815 {
1816 /* This memory write function can get thrown nigh everything into it, from
1817 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1818 * accept anything but aligned uint32 writes, though. That is why we convert
1819 * everything into that. */
1820 struct xtensa *xtensa = target_to_xtensa(target);
1821 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1822 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1823 target_addr_t adr = addrstart_al;
1824 int res;
1825 uint8_t *albuff;
1826 bool fill_head_tail = false;
1827
1828 if (target->state != TARGET_HALTED) {
1829 LOG_TARGET_WARNING(target, "target not halted");
1830 return ERROR_TARGET_NOT_HALTED;
1831 }
1832
1833 if (!xtensa->permissive_mode) {
1834 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
1835 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1836 return ERROR_FAIL;
1837 }
1838 }
1839
1840 if (size == 0 || count == 0 || !buffer)
1841 return ERROR_COMMAND_SYNTAX_ERROR;
1842
1843 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1844 if (addrstart_al == address && addrend_al == address + (size * count)) {
1845 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1846 /* Need a buffer for byte-swapping */
1847 albuff = malloc(addrend_al - addrstart_al);
1848 else
1849 /* We discard the const here because albuff can also be non-const */
1850 albuff = (uint8_t *)buffer;
1851 } else {
1852 fill_head_tail = true;
1853 albuff = malloc(addrend_al - addrstart_al);
1854 }
1855 if (!albuff) {
1856 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
1857 addrend_al - addrstart_al);
1858 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1859 }
1860
1861 /* We're going to use A3 here */
1862 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1863
1864 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1865 if (fill_head_tail) {
1866 /* See if we need to read the first and/or last word. */
1867 if (address & 3) {
1868 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1869 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1870 if (xtensa->probe_lsddr32p == 1) {
1871 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1872 } else {
1873 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1874 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1875 }
1876 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[0]);
1877 }
1878 if ((address + (size * count)) & 3) {
1879 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
1880 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1881 if (xtensa->probe_lsddr32p == 1) {
1882 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1883 } else {
1884 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1885 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1886 }
1887 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1888 &albuff[addrend_al - addrstart_al - 4]);
1889 }
1890 /* Grab bytes */
1891 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1892 if (res != ERROR_OK) {
1893 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1894 if (albuff != buffer)
1895 free(albuff);
1896 return res;
1897 }
1898 xtensa_core_status_check(target);
1899 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
1900 bool swapped_w0 = false;
1901 if (address & 3) {
1902 buf_bswap32(&albuff[0], &albuff[0], 4);
1903 swapped_w0 = true;
1904 }
1905 if ((address + (size * count)) & 3) {
1906 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
1907 /* Don't double-swap if buffer start/end are within the same word */
1908 } else {
1909 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
1910 &albuff[addrend_al - addrstart_al - 4], 4);
1911 }
1912 }
1913 }
1914 /* Copy data to be written into the aligned buffer (in host-endianness) */
1915 memcpy(&albuff[address & 3], buffer, size * count);
1916 /* Now we can write albuff in aligned uint32s. */
1917 }
1918
1919 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1920 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
1921
1922 /* Write start address to A3 */
1923 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1924 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1925 /* Write the aligned buffer */
1926 if (xtensa->probe_lsddr32p != 0) {
1927 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1928 if (i == 0) {
1929 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
1930 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
1931 } else {
1932 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
1933 }
1934 }
1935 } else {
1936 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1937 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1938 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
1939 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
1940 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1941 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1942 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1943 }
1944 }
1945
1946 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1947 if (res == ERROR_OK) {
1948 bool prev_suppress = xtensa->suppress_dsr_errors;
1949 xtensa->suppress_dsr_errors = true;
1950 res = xtensa_core_status_check(target);
1951 if (xtensa->probe_lsddr32p == -1)
1952 xtensa->probe_lsddr32p = 1;
1953 xtensa->suppress_dsr_errors = prev_suppress;
1954 }
1955 if (res != ERROR_OK) {
1956 if (xtensa->probe_lsddr32p != 0) {
1957 /* Disable fast memory access instructions and retry before reporting an error */
1958 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1959 xtensa->probe_lsddr32p = 0;
1960 res = xtensa_write_memory(target, address, size, count, buffer);
1961 } else {
1962 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
1963 count * size, address);
1964 }
1965 } else {
1966 /* Invalidate ICACHE, writeback DCACHE if present */
1967 uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
1968 uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
1969 if (issue_ihi || issue_dhwb) {
1970 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
1971 uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
1972 uint32_t linesize = MIN(ilinesize, dlinesize);
1973 uint32_t off = 0;
1974 adr = addrstart_al;
1975
1976 while ((adr + off) < addrend_al) {
1977 if (off == 0) {
1978 /* Write start address to A3 */
1979 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr);
1980 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1981 }
1982 if (issue_ihi)
1983 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
1984 if (issue_dhwb)
1985 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
1986 off += linesize;
1987 if (off > 1020) {
1988 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
1989 adr += off;
1990 off = 0;
1991 }
1992 }
1993
1994 /* Execute cache WB/INV instructions */
1995 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1996 xtensa_core_status_check(target);
1997 if (res != ERROR_OK)
1998 LOG_TARGET_ERROR(target,
1999 "Error issuing cache writeback/invaldate instruction(s): %d",
2000 res);
2001 }
2002 }
2003 if (albuff != buffer)
2004 free(albuff);
2005
2006 return res;
2007 }
2008
2009 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2010 {
2011 /* xtensa_write_memory can handle everything. Just pass on to that. */
2012 return xtensa_write_memory(target, address, 1, count, buffer);
2013 }
2014
2015 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2016 {
2017 LOG_WARNING("not implemented yet");
2018 return ERROR_FAIL;
2019 }
2020
2021 int xtensa_poll(struct target *target)
2022 {
2023 struct xtensa *xtensa = target_to_xtensa(target);
2024 if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2025 target->state = TARGET_UNKNOWN;
2026 return ERROR_TARGET_NOT_EXAMINED;
2027 }
2028
2029 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET(xtensa) |
2030 PWRSTAT_COREWASRESET(xtensa));
2031 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2032 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2033 xtensa->dbg_mod.power_status.stat,
2034 PWRSTAT_DEBUGWASRESET(xtensa) | PWRSTAT_COREWASRESET(xtensa),
2035 xtensa->dbg_mod.power_status.stath);
2036 if (res != ERROR_OK)
2037 return res;
2038
2039 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2040 LOG_TARGET_INFO(target, "Debug controller was reset.");
2041 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2042 if (res != ERROR_OK)
2043 return res;
2044 }
2045 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2046 LOG_TARGET_INFO(target, "Core was reset.");
2047 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2048 /* Enable JTAG, set reset if needed */
2049 res = xtensa_wakeup(target);
2050 if (res != ERROR_OK)
2051 return res;
2052
2053 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2054 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2055 if (res != ERROR_OK)
2056 return res;
2057 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2058 LOG_TARGET_DEBUG(target,
2059 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2060 prev_dsr,
2061 xtensa->dbg_mod.core_status.dsr);
2062 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET(xtensa)) {
2063 /* if RESET state is persitent */
2064 target->state = TARGET_RESET;
2065 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2066 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2067 xtensa->dbg_mod.core_status.dsr,
2068 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2069 target->state = TARGET_UNKNOWN;
2070 if (xtensa->come_online_probes_num == 0)
2071 target->examined = false;
2072 else
2073 xtensa->come_online_probes_num--;
2074 } else if (xtensa_is_stopped(target)) {
2075 if (target->state != TARGET_HALTED) {
2076 enum target_state oldstate = target->state;
2077 target->state = TARGET_HALTED;
2078 /* Examine why the target has been halted */
2079 target->debug_reason = DBG_REASON_DBGRQ;
2080 xtensa_fetch_all_regs(target);
2081 /* When setting debug reason DEBUGCAUSE events have the following
2082 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2083 /* Watchpoint and breakpoint events at the same time results in special
2084 * debug reason: DBG_REASON_WPTANDBKPT. */
2085 uint32_t halt_cause = xtensa_cause_get(target);
2086 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2087 if (halt_cause & DEBUGCAUSE_IC)
2088 target->debug_reason = DBG_REASON_SINGLESTEP;
2089 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2090 if (halt_cause & DEBUGCAUSE_DB)
2091 target->debug_reason = DBG_REASON_WPTANDBKPT;
2092 else
2093 target->debug_reason = DBG_REASON_BREAKPOINT;
2094 } else if (halt_cause & DEBUGCAUSE_DB) {
2095 target->debug_reason = DBG_REASON_WATCHPOINT;
2096 }
2097 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2098 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2099 xtensa_reg_get(target, XT_REG_IDX_PC),
2100 target->debug_reason,
2101 oldstate);
2102 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2103 halt_cause,
2104 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2105 xtensa->dbg_mod.core_status.dsr);
2106 xtensa_dm_core_status_clear(
2107 &xtensa->dbg_mod,
2108 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2109 OCDDSR_DEBUGINTTRAX |
2110 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2111 }
2112 } else {
2113 target->debug_reason = DBG_REASON_NOTHALTED;
2114 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2115 target->state = TARGET_RUNNING;
2116 target->debug_reason = DBG_REASON_NOTHALTED;
2117 }
2118 }
2119 if (xtensa->trace_active) {
2120 /* Detect if tracing was active but has stopped. */
2121 struct xtensa_trace_status trace_status;
2122 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2123 if (res == ERROR_OK) {
2124 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2125 LOG_INFO("Detected end of trace.");
2126 if (trace_status.stat & TRAXSTAT_PCMTG)
2127 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2128 if (trace_status.stat & TRAXSTAT_PTITG)
2129 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2130 if (trace_status.stat & TRAXSTAT_CTITG)
2131 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2132 xtensa->trace_active = false;
2133 }
2134 }
2135 }
2136 return ERROR_OK;
2137 }
2138
2139 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2140 {
2141 struct xtensa *xtensa = target_to_xtensa(target);
2142 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2143 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2144 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2145 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2146 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2147 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2148 int ret;
2149
2150 if (size > icache_line_size)
2151 return ERROR_FAIL;
2152
2153 if (issue_ihi || issue_dhwbi) {
2154 /* We're going to use A3 here */
2155 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2156
2157 /* Write start address to A3 and invalidate */
2158 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2159 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2160 LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2161 if (issue_dhwbi) {
2162 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2163 if (!same_dc_line) {
2164 LOG_TARGET_DEBUG(target,
2165 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2166 address + 4);
2167 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2168 }
2169 }
2170 if (issue_ihi) {
2171 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2172 if (!same_ic_line) {
2173 LOG_TARGET_DEBUG(target,
2174 "IHI second icache line for address "TARGET_ADDR_FMT,
2175 address + 4);
2176 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2177 }
2178 }
2179
2180 /* Execute invalidate instructions */
2181 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2182 xtensa_core_status_check(target);
2183 if (ret != ERROR_OK) {
2184 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2185 return ret;
2186 }
2187 }
2188
2189 /* Write new instructions to memory */
2190 ret = target_write_buffer(target, address, size, buffer);
2191 if (ret != ERROR_OK) {
2192 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2193 return ret;
2194 }
2195
2196 if (issue_dhwbi) {
2197 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2198 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2199 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2200 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2201 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2202 if (!same_dc_line) {
2203 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2204 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2205 }
2206
2207 /* Execute invalidate instructions */
2208 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2209 xtensa_core_status_check(target);
2210 }
2211
2212 /* TODO: Handle L2 cache if present */
2213 return ret;
2214 }
2215
2216 static int xtensa_sw_breakpoint_add(struct target *target,
2217 struct breakpoint *breakpoint,
2218 struct xtensa_sw_breakpoint *sw_bp)
2219 {
2220 struct xtensa *xtensa = target_to_xtensa(target);
2221 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2222 if (ret != ERROR_OK) {
2223 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2224 return ret;
2225 }
2226
2227 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2228 sw_bp->oocd_bp = breakpoint;
2229
2230 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2231
2232 /* Underlying memory write will convert instruction endianness, don't do that here */
2233 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2234 if (ret != ERROR_OK) {
2235 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2236 return ret;
2237 }
2238
2239 return ERROR_OK;
2240 }
2241
2242 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2243 {
2244 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2245 if (ret != ERROR_OK) {
2246 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2247 return ret;
2248 }
2249 sw_bp->oocd_bp = NULL;
2250 return ERROR_OK;
2251 }
2252
2253 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2254 {
2255 struct xtensa *xtensa = target_to_xtensa(target);
2256 unsigned int slot;
2257
2258 if (breakpoint->type == BKPT_SOFT) {
2259 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2260 if (!xtensa->sw_brps[slot].oocd_bp ||
2261 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2262 break;
2263 }
2264 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2265 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2266 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2267 }
2268 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2269 if (ret != ERROR_OK) {
2270 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2271 return ret;
2272 }
2273 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2274 slot,
2275 breakpoint->address);
2276 return ERROR_OK;
2277 }
2278
2279 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2280 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2281 break;
2282 }
2283 if (slot == xtensa->core_config->debug.ibreaks_num) {
2284 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2285 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2286 }
2287
2288 xtensa->hw_brps[slot] = breakpoint;
2289 /* We will actually write the breakpoints when we resume the target. */
2290 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2291 slot,
2292 breakpoint->address);
2293
2294 return ERROR_OK;
2295 }
2296
2297 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2298 {
2299 struct xtensa *xtensa = target_to_xtensa(target);
2300 unsigned int slot;
2301
2302 if (breakpoint->type == BKPT_SOFT) {
2303 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2304 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2305 break;
2306 }
2307 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2308 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2309 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2310 }
2311 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2312 if (ret != ERROR_OK) {
2313 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2314 return ret;
2315 }
2316 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2317 return ERROR_OK;
2318 }
2319
2320 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2321 if (xtensa->hw_brps[slot] == breakpoint)
2322 break;
2323 }
2324 if (slot == xtensa->core_config->debug.ibreaks_num) {
2325 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2326 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2327 }
2328 xtensa->hw_brps[slot] = NULL;
2329 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2330 return ERROR_OK;
2331 }
2332
2333 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2334 {
2335 struct xtensa *xtensa = target_to_xtensa(target);
2336 unsigned int slot;
2337 xtensa_reg_val_t dbreakcval;
2338
2339 if (target->state != TARGET_HALTED) {
2340 LOG_TARGET_WARNING(target, "target not halted");
2341 return ERROR_TARGET_NOT_HALTED;
2342 }
2343
2344 if (watchpoint->mask != ~(uint32_t)0) {
2345 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2346 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2347 }
2348
2349 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2350 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2351 break;
2352 }
2353 if (slot == xtensa->core_config->debug.dbreaks_num) {
2354 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2355 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2356 }
2357
2358 /* Figure out value for dbreakc5..0
2359 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2360 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2361 !IS_PWR_OF_2(watchpoint->length) ||
2362 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2363 LOG_TARGET_WARNING(
2364 target,
2365 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2366 " not supported by hardware.",
2367 watchpoint->length,
2368 watchpoint->address);
2369 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2370 }
2371 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2372
2373 if (watchpoint->rw == WPT_READ)
2374 dbreakcval |= BIT(30);
2375 if (watchpoint->rw == WPT_WRITE)
2376 dbreakcval |= BIT(31);
2377 if (watchpoint->rw == WPT_ACCESS)
2378 dbreakcval |= BIT(30) | BIT(31);
2379
2380 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2381 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2382 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2383 xtensa->hw_wps[slot] = watchpoint;
2384 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2385 watchpoint->address);
2386 return ERROR_OK;
2387 }
2388
2389 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2390 {
2391 struct xtensa *xtensa = target_to_xtensa(target);
2392 unsigned int slot;
2393
2394 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2395 if (xtensa->hw_wps[slot] == watchpoint)
2396 break;
2397 }
2398 if (slot == xtensa->core_config->debug.dbreaks_num) {
2399 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2400 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2401 }
2402 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2403 xtensa->hw_wps[slot] = NULL;
2404 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2405 watchpoint->address);
2406 return ERROR_OK;
2407 }
2408
2409 static int xtensa_build_reg_cache(struct target *target)
2410 {
2411 struct xtensa *xtensa = target_to_xtensa(target);
2412 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2413 unsigned int last_dbreg_num = 0;
2414
2415 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2416 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2417 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2418
2419 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2420
2421 if (!reg_cache) {
2422 LOG_ERROR("Failed to alloc reg cache!");
2423 return ERROR_FAIL;
2424 }
2425 reg_cache->name = "Xtensa registers";
2426 reg_cache->next = NULL;
2427 /* Init reglist */
2428 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2429 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2430 if (!reg_list) {
2431 LOG_ERROR("Failed to alloc reg list!");
2432 goto fail;
2433 }
2434 xtensa->dbregs_num = 0;
2435 unsigned int didx = 0;
2436 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2437 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2438 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2439 for (unsigned int i = 0; i < listsize; i++, didx++) {
2440 reg_list[didx].exist = rlist[i].exist;
2441 reg_list[didx].name = rlist[i].name;
2442 reg_list[didx].size = 32;
2443 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2444 if (!reg_list[didx].value) {
2445 LOG_ERROR("Failed to alloc reg list value!");
2446 goto fail;
2447 }
2448 reg_list[didx].dirty = false;
2449 reg_list[didx].valid = false;
2450 reg_list[didx].type = &xtensa_reg_type;
2451 reg_list[didx].arch_info = xtensa;
2452 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2453 last_dbreg_num = rlist[i].dbreg_num;
2454
2455 if (xtensa_extra_debug_log) {
2456 LOG_TARGET_DEBUG(target,
2457 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2458 reg_list[didx].name,
2459 whichlist,
2460 reg_list[didx].exist,
2461 didx,
2462 rlist[i].type,
2463 rlist[i].dbreg_num);
2464 }
2465 }
2466 }
2467
2468 xtensa->dbregs_num = last_dbreg_num + 1;
2469 reg_cache->reg_list = reg_list;
2470 reg_cache->num_regs = reg_list_size;
2471
2472 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2473 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2474
2475 /* Construct empty-register list for handling unknown register requests */
2476 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2477 if (!xtensa->empty_regs) {
2478 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2479 goto fail;
2480 }
2481 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2482 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2483 if (!xtensa->empty_regs[i].name) {
2484 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2485 goto fail;
2486 }
2487 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
2488 xtensa->empty_regs[i].size = 32;
2489 xtensa->empty_regs[i].type = &xtensa_reg_type;
2490 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2491 if (!xtensa->empty_regs[i].value) {
2492 LOG_ERROR("Failed to alloc empty reg list value!");
2493 goto fail;
2494 }
2495 xtensa->empty_regs[i].arch_info = xtensa;
2496 }
2497
2498 /* Construct contiguous register list from contiguous descriptor list */
2499 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
2500 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2501 if (!xtensa->contiguous_regs_list) {
2502 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2503 goto fail;
2504 }
2505 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2506 unsigned int j;
2507 for (j = 0; j < reg_cache->num_regs; j++) {
2508 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2509 /* Register number field is not filled above.
2510 Here we are assigning the corresponding index from the contiguous reg list.
2511 These indexes are in the same order with gdb g-packet request/response.
2512 Some more changes may be required for sparse reg lists.
2513 */
2514 reg_cache->reg_list[j].number = i;
2515 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
2516 LOG_TARGET_DEBUG(target,
2517 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2518 xtensa->contiguous_regs_list[i]->name,
2519 xtensa->contiguous_regs_desc[i]->dbreg_num);
2520 break;
2521 }
2522 }
2523 if (j == reg_cache->num_regs)
2524 LOG_TARGET_WARNING(target, "contiguous register %s not found",
2525 xtensa->contiguous_regs_desc[i]->name);
2526 }
2527 }
2528
2529 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2530 if (!xtensa->algo_context_backup) {
2531 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2532 goto fail;
2533 }
2534 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2535 struct reg *reg = &reg_cache->reg_list[i];
2536 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2537 if (!xtensa->algo_context_backup[i]) {
2538 LOG_ERROR("Failed to alloc mem for algorithm context!");
2539 goto fail;
2540 }
2541 }
2542 xtensa->core_cache = reg_cache;
2543 if (cache_p)
2544 *cache_p = reg_cache;
2545 return ERROR_OK;
2546
2547 fail:
2548 if (reg_list) {
2549 for (unsigned int i = 0; i < reg_list_size; i++)
2550 free(reg_list[i].value);
2551 free(reg_list);
2552 }
2553 if (xtensa->empty_regs) {
2554 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2555 free((void *)xtensa->empty_regs[i].name);
2556 free(xtensa->empty_regs[i].value);
2557 }
2558 free(xtensa->empty_regs);
2559 }
2560 if (xtensa->algo_context_backup) {
2561 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2562 free(xtensa->algo_context_backup[i]);
2563 free(xtensa->algo_context_backup);
2564 }
2565 free(reg_cache);
2566
2567 return ERROR_FAIL;
2568 }
2569
2570 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
2571 {
2572 struct xtensa *xtensa = target_to_xtensa(target);
2573 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
2574 /* Process op[] list */
2575 while (opstr && (*opstr == ':')) {
2576 uint8_t ops[32];
2577 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
2578 if (oplen > 32) {
2579 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
2580 break;
2581 }
2582 unsigned int i = 0;
2583 while ((i < oplen) && opstr && (*opstr == ':'))
2584 ops[i++] = strtoul(opstr + 1, &opstr, 16);
2585 if (i != oplen) {
2586 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
2587 break;
2588 }
2589
2590 char insn_buf[128];
2591 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
2592 for (i = 0; i < oplen; i++)
2593 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
2594 LOG_TARGET_DEBUG(target, "%s", insn_buf);
2595 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
2596 status = ERROR_OK;
2597 }
2598 return status;
2599 }
2600
2601 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
2602 {
2603 struct xtensa *xtensa = target_to_xtensa(target);
2604 bool iswrite = (packet[0] == 'Q');
2605 enum xtensa_qerr_e error;
2606
2607 /* Read/write TIE register. Requires spill location.
2608 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2609 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2610 */
2611 if (!(xtensa->spill_buf)) {
2612 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2613 error = XT_QERR_FAIL;
2614 goto xtensa_gdbqc_qxtreg_fail;
2615 }
2616
2617 char *delim;
2618 uint32_t regnum = strtoul(packet + 6, &delim, 16);
2619 if (*delim != ':') {
2620 LOG_ERROR("Malformed qxtreg packet");
2621 error = XT_QERR_INVAL;
2622 goto xtensa_gdbqc_qxtreg_fail;
2623 }
2624 uint32_t reglen = strtoul(delim + 1, &delim, 16);
2625 if (*delim != ':') {
2626 LOG_ERROR("Malformed qxtreg packet");
2627 error = XT_QERR_INVAL;
2628 goto xtensa_gdbqc_qxtreg_fail;
2629 }
2630 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
2631 memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
2632 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
2633 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
2634 LOG_ERROR("TIE register too large");
2635 error = XT_QERR_MEM;
2636 goto xtensa_gdbqc_qxtreg_fail;
2637 }
2638
2639 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2640 * (2) read old a4, (3) write spill address to a4.
2641 * NOTE: ensure a4 is restored properly by all error handling logic
2642 */
2643 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
2644 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
2645 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2646 if (status != ERROR_OK) {
2647 LOG_ERROR("Spill memory save");
2648 error = XT_QERR_MEM;
2649 goto xtensa_gdbqc_qxtreg_fail;
2650 }
2651 if (iswrite) {
2652 /* Extract value and store in spill memory */
2653 unsigned int b = 0;
2654 char *valbuf = strchr(delim, '=');
2655 if (!(valbuf && (*valbuf == '='))) {
2656 LOG_ERROR("Malformed Qxtreg packet");
2657 error = XT_QERR_INVAL;
2658 goto xtensa_gdbqc_qxtreg_fail;
2659 }
2660 valbuf++;
2661 while (*valbuf && *(valbuf + 1)) {
2662 char bytestr[3] = { 0, 0, 0 };
2663 strncpy(bytestr, valbuf, 2);
2664 regbuf[b++] = strtoul(bytestr, NULL, 16);
2665 valbuf += 2;
2666 }
2667 if (b != reglen) {
2668 LOG_ERROR("Malformed Qxtreg packet");
2669 error = XT_QERR_INVAL;
2670 goto xtensa_gdbqc_qxtreg_fail;
2671 }
2672 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2673 reglen / memop_size, regbuf);
2674 if (status != ERROR_OK) {
2675 LOG_ERROR("TIE value store");
2676 error = XT_QERR_MEM;
2677 goto xtensa_gdbqc_qxtreg_fail;
2678 }
2679 }
2680 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
2681 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, xtensa->spill_loc);
2682 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2683
2684 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
2685
2686 /* Restore a4 but not yet spill memory. Execute it all... */
2687 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, orig_a4);
2688 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2689 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2690 if (status != ERROR_OK) {
2691 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
2692 tieop_status = status;
2693 }
2694 status = xtensa_core_status_check(target);
2695 if (status != ERROR_OK) {
2696 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
2697 tieop_status = status;
2698 }
2699
2700 if (tieop_status == ERROR_OK) {
2701 if (iswrite) {
2702 /* TIE write succeeded; send OK */
2703 strcpy(*response_p, "OK");
2704 } else {
2705 /* TIE read succeeded; copy result from spill memory */
2706 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
2707 if (status != ERROR_OK) {
2708 LOG_TARGET_ERROR(target, "TIE result read");
2709 tieop_status = status;
2710 }
2711 unsigned int i;
2712 for (i = 0; i < reglen; i++)
2713 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
2714 *(*response_p + 2 * i) = '\0';
2715 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
2716 }
2717 }
2718
2719 /* Restore spill memory first, then report any previous errors */
2720 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2721 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2722 if (status != ERROR_OK) {
2723 LOG_ERROR("Spill memory restore");
2724 error = XT_QERR_MEM;
2725 goto xtensa_gdbqc_qxtreg_fail;
2726 }
2727 if (tieop_status != ERROR_OK) {
2728 LOG_ERROR("TIE execution");
2729 error = XT_QERR_FAIL;
2730 goto xtensa_gdbqc_qxtreg_fail;
2731 }
2732 return ERROR_OK;
2733
2734 xtensa_gdbqc_qxtreg_fail:
2735 strcpy(*response_p, xt_qerr[error].chrval);
2736 return xt_qerr[error].intval;
2737 }
2738
2739 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
2740 {
2741 struct xtensa *xtensa = target_to_xtensa(target);
2742 enum xtensa_qerr_e error;
2743 if (!packet || !response_p) {
2744 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
2745 return ERROR_FAIL;
2746 }
2747
2748 *response_p = xtensa->qpkt_resp;
2749 if (strncmp(packet, "qxtn", 4) == 0) {
2750 strcpy(*response_p, "OpenOCD");
2751 return ERROR_OK;
2752 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
2753 return ERROR_OK;
2754 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
2755 /* Confirm host cache params match core .cfg file */
2756 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
2757 &xtensa->core_config->icache : &xtensa->core_config->dcache;
2758 unsigned int line_size = 0, size = 0, way_count = 0;
2759 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
2760 if ((cachep->line_size != line_size) ||
2761 (cachep->size != size) ||
2762 (cachep->way_count != way_count)) {
2763 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2764 cachep == &xtensa->core_config->icache ? 'I' : 'D');
2765 }
2766 strcpy(*response_p, "OK");
2767 return ERROR_OK;
2768 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
2769 /* Confirm host IRAM/IROM params match core .cfg file */
2770 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
2771 &xtensa->core_config->iram : &xtensa->core_config->irom;
2772 unsigned int base = 0, size = 0, i;
2773 char *pkt = (char *)&packet[7];
2774 do {
2775 pkt++;
2776 size = strtoul(pkt, &pkt, 16);
2777 pkt++;
2778 base = strtoul(pkt, &pkt, 16);
2779 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
2780 for (i = 0; i < memp->count; i++) {
2781 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
2782 break;
2783 }
2784 if (i == memp->count) {
2785 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
2786 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
2787 break;
2788 }
2789 for (i = 0; i < 11; i++) {
2790 pkt++;
2791 strtoul(pkt, &pkt, 16);
2792 }
2793 } while (pkt && (pkt[0] == ','));
2794 strcpy(*response_p, "OK");
2795 return ERROR_OK;
2796 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
2797 /* Confirm host EXCM_LEVEL matches core .cfg file */
2798 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
2799 if (!xtensa->core_config->high_irq.enabled ||
2800 (excm_level != xtensa->core_config->high_irq.excm_level))
2801 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
2802 strcpy(*response_p, "OK");
2803 return ERROR_OK;
2804 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
2805 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
2806 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
2807 strcpy(*response_p, "OK");
2808 return ERROR_OK;
2809 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
2810 char *delim;
2811 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
2812 if (*delim != ':') {
2813 LOG_ERROR("Malformed Qxtspill packet");
2814 error = XT_QERR_INVAL;
2815 goto xtensa_gdb_query_custom_fail;
2816 }
2817 xtensa->spill_loc = spill_loc;
2818 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
2819 if (xtensa->spill_buf)
2820 free(xtensa->spill_buf);
2821 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
2822 if (!xtensa->spill_buf) {
2823 LOG_ERROR("Spill buf alloc");
2824 error = XT_QERR_MEM;
2825 goto xtensa_gdb_query_custom_fail;
2826 }
2827 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
2828 strcpy(*response_p, "OK");
2829 return ERROR_OK;
2830 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
2831 return xtensa_gdbqc_qxtreg(target, packet, response_p);
2832 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
2833 (strncmp(packet, "qxtftie", 7) == 0) ||
2834 (strncmp(packet, "qxtstie", 7) == 0)) {
2835 /* Return empty string to indicate trace, TIE wire debug are unsupported */
2836 strcpy(*response_p, "");
2837 return ERROR_OK;
2838 }
2839
2840 /* Warn for all other queries, but do not return errors */
2841 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
2842 strcpy(*response_p, "");
2843 return ERROR_OK;
2844
2845 xtensa_gdb_query_custom_fail:
2846 strcpy(*response_p, xt_qerr[error].chrval);
2847 return xt_qerr[error].intval;
2848 }
2849
2850 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
2851 const struct xtensa_debug_module_config *dm_cfg)
2852 {
2853 target->arch_info = xtensa;
2854 xtensa->common_magic = XTENSA_COMMON_MAGIC;
2855 xtensa->target = target;
2856 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
2857
2858 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
2859 if (!xtensa->core_config) {
2860 LOG_ERROR("Xtensa configuration alloc failed\n");
2861 return ERROR_FAIL;
2862 }
2863
2864 /* Default cache settings are disabled with 1 way */
2865 xtensa->core_config->icache.way_count = 1;
2866 xtensa->core_config->dcache.way_count = 1;
2867
2868 /* chrval: AR3/AR4 register names will change with window mapping.
2869 * intval: tracks whether scratch register was set through gdb P packet.
2870 */
2871 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
2872 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
2873 if (!xtensa->scratch_ars[s].chrval) {
2874 for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
2875 free(xtensa->scratch_ars[f].chrval);
2876 free(xtensa->core_config);
2877 LOG_ERROR("Xtensa scratch AR alloc failed\n");
2878 return ERROR_FAIL;
2879 }
2880 xtensa->scratch_ars[s].intval = false;
2881 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
2882 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
2883 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
2884 }
2885
2886 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2887 }
2888
2889 void xtensa_set_permissive_mode(struct target *target, bool state)
2890 {
2891 target_to_xtensa(target)->permissive_mode = state;
2892 }
2893
2894 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2895 {
2896 struct xtensa *xtensa = target_to_xtensa(target);
2897
2898 xtensa->come_online_probes_num = 3;
2899 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
2900 if (!xtensa->hw_brps) {
2901 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2902 return ERROR_FAIL;
2903 }
2904 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
2905 if (!xtensa->hw_wps) {
2906 free(xtensa->hw_brps);
2907 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2908 return ERROR_FAIL;
2909 }
2910 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2911 if (!xtensa->sw_brps) {
2912 free(xtensa->hw_brps);
2913 free(xtensa->hw_wps);
2914 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2915 return ERROR_FAIL;
2916 }
2917
2918 xtensa->spill_loc = 0xffffffff;
2919 xtensa->spill_bytes = 0;
2920 xtensa->spill_buf = NULL;
2921 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
2922
2923 return xtensa_build_reg_cache(target);
2924 }
2925
2926 static void xtensa_free_reg_cache(struct target *target)
2927 {
2928 struct xtensa *xtensa = target_to_xtensa(target);
2929 struct reg_cache *cache = xtensa->core_cache;
2930
2931 if (cache) {
2932 register_unlink_cache(&target->reg_cache, cache);
2933 for (unsigned int i = 0; i < cache->num_regs; i++) {
2934 free(xtensa->algo_context_backup[i]);
2935 free(cache->reg_list[i].value);
2936 }
2937 free(xtensa->algo_context_backup);
2938 free(cache->reg_list);
2939 free(cache);
2940 }
2941 xtensa->core_cache = NULL;
2942 xtensa->algo_context_backup = NULL;
2943
2944 if (xtensa->empty_regs) {
2945 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2946 free((void *)xtensa->empty_regs[i].name);
2947 free(xtensa->empty_regs[i].value);
2948 }
2949 free(xtensa->empty_regs);
2950 }
2951 xtensa->empty_regs = NULL;
2952 if (xtensa->optregs) {
2953 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
2954 free((void *)xtensa->optregs[i].name);
2955 free(xtensa->optregs);
2956 }
2957 xtensa->optregs = NULL;
2958 }
2959
2960 void xtensa_target_deinit(struct target *target)
2961 {
2962 struct xtensa *xtensa = target_to_xtensa(target);
2963
2964 LOG_DEBUG("start");
2965
2966 if (target_was_examined(target)) {
2967 int ret = xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, OCDDCR_ENABLEOCD);
2968 if (ret != ERROR_OK) {
2969 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2970 return;
2971 }
2972 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
2973 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2974 if (ret != ERROR_OK) {
2975 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2976 return;
2977 }
2978 xtensa_dm_deinit(&xtensa->dbg_mod);
2979 }
2980 xtensa_free_reg_cache(target);
2981 free(xtensa->hw_brps);
2982 free(xtensa->hw_wps);
2983 free(xtensa->sw_brps);
2984 if (xtensa->spill_buf) {
2985 free(xtensa->spill_buf);
2986 xtensa->spill_buf = NULL;
2987 }
2988 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
2989 free(xtensa->scratch_ars[s].chrval);
2990 free(xtensa->core_config);
2991 }
2992
2993 const char *xtensa_get_gdb_arch(struct target *target)
2994 {
2995 return "xtensa";
2996 }
2997
2998 /* exe <ascii-encoded hexadecimal instruction bytes> */
2999 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
3000 {
3001 struct xtensa *xtensa = target_to_xtensa(target);
3002
3003 if (CMD_ARGC != 1)
3004 return ERROR_COMMAND_SYNTAX_ERROR;
3005
3006 /* Process ascii-encoded hex byte string */
3007 const char *parm = CMD_ARGV[0];
3008 unsigned int parm_len = strlen(parm);
3009 if ((parm_len >= 64) || (parm_len & 1)) {
3010 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3011 return ERROR_FAIL;
3012 }
3013
3014 uint8_t ops[32];
3015 memset(ops, 0, 32);
3016 unsigned int oplen = parm_len / 2;
3017 char encoded_byte[3] = { 0, 0, 0 };
3018 for (unsigned int i = 0; i < oplen; i++) {
3019 encoded_byte[0] = *parm++;
3020 encoded_byte[1] = *parm++;
3021 ops[i] = strtoul(encoded_byte, NULL, 16);
3022 }
3023
3024 /* GDB must handle state save/restore.
3025 * Flush reg cache in case spill location is in an AR
3026 * Update CPENABLE only for this execution; later restore cached copy
3027 * Keep a copy of exccause in case executed code triggers an exception
3028 */
3029 int status = xtensa_write_dirty_registers(target);
3030 if (status != ERROR_OK) {
3031 LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3032 return ERROR_FAIL;
3033 }
3034 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3035 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3036 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3037 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
3038 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3039 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3040 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3041 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
3042 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3043
3044 /* Queue instruction list and execute everything */
3045 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3046 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3047 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3048 if (status != ERROR_OK)
3049 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3050 status = xtensa_core_status_check(target);
3051 if (status != ERROR_OK)
3052 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3053
3054 /* Reread register cache and restore saved regs after instruction execution */
3055 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3056 LOG_TARGET_ERROR(target, "%s: Failed to fetch register cache (post-exec).", target_name(target));
3057 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3058 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3059 return status;
3060 }
3061
3062 COMMAND_HANDLER(xtensa_cmd_exe)
3063 {
3064 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3065 }
3066
3067 /* xtdef <name> */
3068 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3069 {
3070 if (CMD_ARGC != 1)
3071 return ERROR_COMMAND_SYNTAX_ERROR;
3072
3073 const char *core_name = CMD_ARGV[0];
3074 if (strcasecmp(core_name, "LX") == 0) {
3075 xtensa->core_config->core_type = XT_LX;
3076 } else {
3077 LOG_ERROR("xtdef [LX]\n");
3078 return ERROR_COMMAND_SYNTAX_ERROR;
3079 }
3080 return ERROR_OK;
3081 }
3082
3083 COMMAND_HANDLER(xtensa_cmd_xtdef)
3084 {
3085 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3086 target_to_xtensa(get_current_target(CMD_CTX)));
3087 }
3088
3089 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3090 {
3091 if ((val < min) || (val > max)) {
3092 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3093 return false;
3094 }
3095 return true;
3096 }
3097
3098 /* xtopt <name> <value> */
3099 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3100 {
3101 if (CMD_ARGC != 2)
3102 return ERROR_COMMAND_SYNTAX_ERROR;
3103
3104 const char *opt_name = CMD_ARGV[0];
3105 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3106 if (strcasecmp(opt_name, "arnum") == 0) {
3107 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3108 return ERROR_COMMAND_ARGUMENT_INVALID;
3109 xtensa->core_config->aregs_num = opt_val;
3110 } else if (strcasecmp(opt_name, "windowed") == 0) {
3111 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3112 return ERROR_COMMAND_ARGUMENT_INVALID;
3113 xtensa->core_config->windowed = opt_val;
3114 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3115 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3116 return ERROR_COMMAND_ARGUMENT_INVALID;
3117 xtensa->core_config->coproc = opt_val;
3118 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3119 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3120 return ERROR_COMMAND_ARGUMENT_INVALID;
3121 xtensa->core_config->exceptions = opt_val;
3122 } else if (strcasecmp(opt_name, "intnum") == 0) {
3123 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3124 return ERROR_COMMAND_ARGUMENT_INVALID;
3125 xtensa->core_config->irq.enabled = (opt_val > 0);
3126 xtensa->core_config->irq.irq_num = opt_val;
3127 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3128 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3129 return ERROR_COMMAND_ARGUMENT_INVALID;
3130 xtensa->core_config->high_irq.enabled = opt_val;
3131 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3132 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3133 return ERROR_COMMAND_ARGUMENT_INVALID;
3134 if (!xtensa->core_config->high_irq.enabled) {
3135 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3136 return ERROR_COMMAND_ARGUMENT_INVALID;
3137 }
3138 xtensa->core_config->high_irq.excm_level = opt_val;
3139 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3140 if (xtensa->core_config->core_type == XT_LX) {
3141 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3142 return ERROR_COMMAND_ARGUMENT_INVALID;
3143 } else {
3144 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3145 return ERROR_COMMAND_ARGUMENT_INVALID;
3146 }
3147 if (!xtensa->core_config->high_irq.enabled) {
3148 LOG_ERROR("xtopt intlevels requires hipriints\n");
3149 return ERROR_COMMAND_ARGUMENT_INVALID;
3150 }
3151 xtensa->core_config->high_irq.level_num = opt_val;
3152 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3153 if (xtensa->core_config->core_type == XT_LX) {
3154 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3155 return ERROR_COMMAND_ARGUMENT_INVALID;
3156 } else {
3157 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3158 return ERROR_COMMAND_ARGUMENT_INVALID;
3159 }
3160 xtensa->core_config->debug.enabled = 1;
3161 xtensa->core_config->debug.irq_level = opt_val;
3162 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3163 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3164 return ERROR_COMMAND_ARGUMENT_INVALID;
3165 xtensa->core_config->debug.ibreaks_num = opt_val;
3166 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3167 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3168 return ERROR_COMMAND_ARGUMENT_INVALID;
3169 xtensa->core_config->debug.dbreaks_num = opt_val;
3170 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3171 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3172 return ERROR_COMMAND_ARGUMENT_INVALID;
3173 xtensa->core_config->trace.mem_sz = opt_val;
3174 xtensa->core_config->trace.enabled = (opt_val > 0);
3175 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3176 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3177 return ERROR_COMMAND_ARGUMENT_INVALID;
3178 xtensa->core_config->trace.reversed_mem_access = opt_val;
3179 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3180 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3181 return ERROR_COMMAND_ARGUMENT_INVALID;
3182 xtensa->core_config->debug.perfcount_num = opt_val;
3183 } else {
3184 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3185 return ERROR_OK;
3186 }
3187
3188 return ERROR_OK;
3189 }
3190
3191 COMMAND_HANDLER(xtensa_cmd_xtopt)
3192 {
3193 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3194 target_to_xtensa(get_current_target(CMD_CTX)));
3195 }
3196
3197 /* xtmem <type> [parameters] */
3198 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3199 {
3200 struct xtensa_cache_config *cachep = NULL;
3201 struct xtensa_local_mem_config *memp = NULL;
3202 int mem_access = 0;
3203 bool is_dcache = false;
3204
3205 if (CMD_ARGC == 0) {
3206 LOG_ERROR("xtmem <type> [parameters]\n");
3207 return ERROR_COMMAND_SYNTAX_ERROR;
3208 }
3209
3210 const char *mem_name = CMD_ARGV[0];
3211 if (strcasecmp(mem_name, "icache") == 0) {
3212 cachep = &xtensa->core_config->icache;
3213 } else if (strcasecmp(mem_name, "dcache") == 0) {
3214 cachep = &xtensa->core_config->dcache;
3215 is_dcache = true;
3216 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3217 /* TODO: support L2 cache */
3218 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3219 /* TODO: support L2 cache */
3220 } else if (strcasecmp(mem_name, "iram") == 0) {
3221 memp = &xtensa->core_config->iram;
3222 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3223 } else if (strcasecmp(mem_name, "dram") == 0) {
3224 memp = &xtensa->core_config->dram;
3225 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3226 } else if (strcasecmp(mem_name, "sram") == 0) {
3227 memp = &xtensa->core_config->sram;
3228 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3229 } else if (strcasecmp(mem_name, "irom") == 0) {
3230 memp = &xtensa->core_config->irom;
3231 mem_access = XT_MEM_ACCESS_READ;
3232 } else if (strcasecmp(mem_name, "drom") == 0) {
3233 memp = &xtensa->core_config->drom;
3234 mem_access = XT_MEM_ACCESS_READ;
3235 } else if (strcasecmp(mem_name, "srom") == 0) {
3236 memp = &xtensa->core_config->srom;
3237 mem_access = XT_MEM_ACCESS_READ;
3238 } else {
3239 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3240 return ERROR_COMMAND_ARGUMENT_INVALID;
3241 }
3242
3243 if (cachep) {
3244 if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3245 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3246 return ERROR_COMMAND_SYNTAX_ERROR;
3247 }
3248 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3249 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3250 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3251 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3252 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3253 } else if (memp) {
3254 if (CMD_ARGC != 3) {
3255 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3256 return ERROR_COMMAND_SYNTAX_ERROR;
3257 }
3258 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3259 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3260 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3261 memcfgp->access = mem_access;
3262 memp->count++;
3263 }
3264
3265 return ERROR_OK;
3266 }
3267
3268 COMMAND_HANDLER(xtensa_cmd_xtmem)
3269 {
3270 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3271 target_to_xtensa(get_current_target(CMD_CTX)));
3272 }
3273
3274 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3275 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3276 {
3277 if (CMD_ARGC != 4) {
3278 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3279 return ERROR_COMMAND_SYNTAX_ERROR;
3280 }
3281
3282 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3283 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3284 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3285 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3286
3287 if ((nfgseg > 32)) {
3288 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3289 return ERROR_COMMAND_ARGUMENT_INVALID;
3290 } else if (minsegsize & (minsegsize - 1)) {
3291 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3292 return ERROR_COMMAND_ARGUMENT_INVALID;
3293 } else if (lockable > 1) {
3294 LOG_ERROR("<lockable> must be 0 or 1\n");
3295 return ERROR_COMMAND_ARGUMENT_INVALID;
3296 } else if (execonly > 1) {
3297 LOG_ERROR("<execonly> must be 0 or 1\n");
3298 return ERROR_COMMAND_ARGUMENT_INVALID;
3299 }
3300
3301 xtensa->core_config->mpu.enabled = true;
3302 xtensa->core_config->mpu.nfgseg = nfgseg;
3303 xtensa->core_config->mpu.minsegsize = minsegsize;
3304 xtensa->core_config->mpu.lockable = lockable;
3305 xtensa->core_config->mpu.execonly = execonly;
3306 return ERROR_OK;
3307 }
3308
3309 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3310 {
3311 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3312 target_to_xtensa(get_current_target(CMD_CTX)));
3313 }
3314
3315 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3316 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3317 {
3318 if (CMD_ARGC != 2) {
3319 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3320 return ERROR_COMMAND_SYNTAX_ERROR;
3321 }
3322
3323 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3324 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3325 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3326 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3327 return ERROR_COMMAND_ARGUMENT_INVALID;
3328 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3329 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3330 return ERROR_COMMAND_ARGUMENT_INVALID;
3331 }
3332
3333 xtensa->core_config->mmu.enabled = true;
3334 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3335 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3336 return ERROR_OK;
3337 }
3338
3339 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3340 {
3341 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3342 target_to_xtensa(get_current_target(CMD_CTX)));
3343 }
3344
3345 /* xtregs <numregs>
3346 * xtreg <regname> <regnum> */
3347 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3348 {
3349 if (CMD_ARGC == 1) {
3350 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3351 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3352 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3353 return ERROR_COMMAND_SYNTAX_ERROR;
3354 }
3355 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3356 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3357 numregs, xtensa->genpkt_regs_num);
3358 return ERROR_COMMAND_SYNTAX_ERROR;
3359 }
3360 xtensa->total_regs_num = numregs;
3361 xtensa->core_regs_num = 0;
3362 xtensa->num_optregs = 0;
3363 /* A little more memory than required, but saves a second initialization pass */
3364 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3365 if (!xtensa->optregs) {
3366 LOG_ERROR("Failed to allocate xtensa->optregs!");
3367 return ERROR_FAIL;
3368 }
3369 return ERROR_OK;
3370 } else if (CMD_ARGC != 2) {
3371 return ERROR_COMMAND_SYNTAX_ERROR;
3372 }
3373
3374 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3375 * if general register (g-packet) requests or contiguous register maps are supported */
3376 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3377 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3378 if (!xtensa->contiguous_regs_desc) {
3379 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3380 return ERROR_FAIL;
3381 }
3382 }
3383
3384 const char *regname = CMD_ARGV[0];
3385 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3386 if (regnum > UINT16_MAX) {
3387 LOG_ERROR("<regnum> must be a 16-bit number");
3388 return ERROR_COMMAND_ARGUMENT_INVALID;
3389 }
3390
3391 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3392 if (xtensa->total_regs_num)
3393 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3394 regname, regnum,
3395 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3396 else
3397 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3398 regname, regnum);
3399 return ERROR_FAIL;
3400 }
3401
3402 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3403 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3404 bool is_extended_reg = true;
3405 unsigned int ridx;
3406 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3407 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3408 /* Flag core register as defined */
3409 rptr = &xtensa_regs[ridx];
3410 xtensa->core_regs_num++;
3411 is_extended_reg = false;
3412 break;
3413 }
3414 }
3415
3416 rptr->exist = true;
3417 if (is_extended_reg) {
3418 /* Register ID, debugger-visible register ID */
3419 rptr->name = strdup(CMD_ARGV[0]);
3420 rptr->dbreg_num = regnum;
3421 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3422 xtensa->num_optregs++;
3423
3424 /* Register type */
3425 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3426 rptr->type = XT_REG_GENERAL;
3427 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3428 rptr->type = XT_REG_USER;
3429 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3430 rptr->type = XT_REG_FR;
3431 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3432 rptr->type = XT_REG_SPECIAL;
3433 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3434 /* WARNING: For these registers, regnum points to the
3435 * index of the corresponding ARx registers, NOT to
3436 * the processor register number! */
3437 rptr->type = XT_REG_RELGEN;
3438 rptr->reg_num += XT_REG_IDX_ARFIRST;
3439 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3440 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3441 rptr->type = XT_REG_TIE;
3442 } else {
3443 rptr->type = XT_REG_OTHER;
3444 }
3445
3446 /* Register flags */
3447 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3448 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3449 (strcmp(rptr->name, "intclear") == 0))
3450 rptr->flags = XT_REGF_NOREAD;
3451 else
3452 rptr->flags = 0;
3453
3454 if (rptr->reg_num == (XT_EPS_REG_NUM_BASE + xtensa->core_config->debug.irq_level) &&
3455 xtensa->core_config->core_type == XT_LX && rptr->type == XT_REG_SPECIAL) {
3456 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3457 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3458 }
3459 } else if (strcmp(rptr->name, "cpenable") == 0) {
3460 xtensa->core_config->coproc = true;
3461 }
3462
3463 /* Build out list of contiguous registers in specified order */
3464 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3465 if (xtensa->contiguous_regs_desc) {
3466 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3467 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3468 }
3469 if (xtensa_extra_debug_log)
3470 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3471 is_extended_reg ? "config-specific" : "core",
3472 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3473 is_extended_reg ? xtensa->num_optregs : ridx,
3474 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3475 return ERROR_OK;
3476 }
3477
3478 COMMAND_HANDLER(xtensa_cmd_xtreg)
3479 {
3480 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3481 target_to_xtensa(get_current_target(CMD_CTX)));
3482 }
3483
3484 /* xtregfmt <contiguous|sparse> [numgregs] */
3485 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3486 {
3487 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3488 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3489 return ERROR_OK;
3490 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3491 xtensa->regmap_contiguous = true;
3492 if (CMD_ARGC == 2) {
3493 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3494 if ((numgregs <= 0) ||
3495 ((numgregs > xtensa->total_regs_num) &&
3496 (xtensa->total_regs_num > 0))) {
3497 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3498 numgregs, xtensa->total_regs_num);
3499 return ERROR_COMMAND_SYNTAX_ERROR;
3500 }
3501 xtensa->genpkt_regs_num = numgregs;
3502 }
3503 return ERROR_OK;
3504 }
3505 }
3506 return ERROR_COMMAND_SYNTAX_ERROR;
3507 }
3508
3509 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3510 {
3511 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3512 target_to_xtensa(get_current_target(CMD_CTX)));
3513 }
3514
3515 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3516 {
3517 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3518 &xtensa->permissive_mode, "xtensa permissive mode");
3519 }
3520
3521 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3522 {
3523 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3524 target_to_xtensa(get_current_target(CMD_CTX)));
3525 }
3526
3527 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3528 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
3529 {
3530 struct xtensa_perfmon_config config = {
3531 .mask = 0xffff,
3532 .kernelcnt = 0,
3533 .tracelevel = -1 /* use DEBUGLEVEL by default */
3534 };
3535
3536 if (CMD_ARGC < 2 || CMD_ARGC > 6)
3537 return ERROR_COMMAND_SYNTAX_ERROR;
3538
3539 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
3540 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
3541 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3542 return ERROR_COMMAND_ARGUMENT_INVALID;
3543 }
3544
3545 config.select = strtoul(CMD_ARGV[1], NULL, 0);
3546 if (config.select > XTENSA_MAX_PERF_SELECT) {
3547 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
3548 return ERROR_COMMAND_ARGUMENT_INVALID;
3549 }
3550
3551 if (CMD_ARGC >= 3) {
3552 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
3553 if (config.mask > XTENSA_MAX_PERF_MASK) {
3554 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
3555 return ERROR_COMMAND_ARGUMENT_INVALID;
3556 }
3557 }
3558
3559 if (CMD_ARGC >= 4) {
3560 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
3561 if (config.kernelcnt > 1) {
3562 command_print(CMD, "kernelcnt should be 0 or 1");
3563 return ERROR_COMMAND_ARGUMENT_INVALID;
3564 }
3565 }
3566
3567 if (CMD_ARGC >= 5) {
3568 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
3569 if (config.tracelevel > 7) {
3570 command_print(CMD, "tracelevel should be <=7");
3571 return ERROR_COMMAND_ARGUMENT_INVALID;
3572 }
3573 }
3574
3575 if (config.tracelevel == -1)
3576 config.tracelevel = xtensa->core_config->debug.irq_level;
3577
3578 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
3579 }
3580
3581 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
3582 {
3583 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
3584 target_to_xtensa(get_current_target(CMD_CTX)));
3585 }
3586
3587 /* perfmon_dump [counter_id] */
3588 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
3589 {
3590 if (CMD_ARGC > 1)
3591 return ERROR_COMMAND_SYNTAX_ERROR;
3592
3593 int counter_id = -1;
3594 if (CMD_ARGC == 1) {
3595 counter_id = strtol(CMD_ARGV[0], NULL, 0);
3596 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
3597 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3598 return ERROR_COMMAND_ARGUMENT_INVALID;
3599 }
3600 }
3601
3602 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
3603 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
3604 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
3605 char result_buf[128] = { 0 };
3606 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
3607 struct xtensa_perfmon_result result;
3608 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
3609 if (res != ERROR_OK)
3610 return res;
3611 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
3612 "%-12" PRIu64 "%s",
3613 result.value,
3614 result.overflow ? " (overflow)" : "");
3615 LOG_INFO("%s", result_buf);
3616 }
3617
3618 return ERROR_OK;
3619 }
3620
3621 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
3622 {
3623 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
3624 target_to_xtensa(get_current_target(CMD_CTX)));
3625 }
3626
3627 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
3628 {
3629 int state = -1;
3630
3631 if (CMD_ARGC < 1) {
3632 const char *st;
3633 state = xtensa->stepping_isr_mode;
3634 if (state == XT_STEPPING_ISR_ON)
3635 st = "OFF";
3636 else if (state == XT_STEPPING_ISR_OFF)
3637 st = "ON";
3638 else
3639 st = "UNKNOWN";
3640 command_print(CMD, "Current ISR step mode: %s", st);
3641 return ERROR_OK;
3642 }
3643 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3644 if (!strcasecmp(CMD_ARGV[0], "off"))
3645 state = XT_STEPPING_ISR_ON;
3646 else if (!strcasecmp(CMD_ARGV[0], "on"))
3647 state = XT_STEPPING_ISR_OFF;
3648
3649 if (state == -1) {
3650 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
3651 return ERROR_FAIL;
3652 }
3653 xtensa->stepping_isr_mode = state;
3654 return ERROR_OK;
3655 }
3656
3657 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
3658 {
3659 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
3660 target_to_xtensa(get_current_target(CMD_CTX)));
3661 }
3662
3663 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
3664 {
3665 int res;
3666 uint32_t val = 0;
3667
3668 if (CMD_ARGC >= 1) {
3669 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3670 if (!strcasecmp(CMD_ARGV[0], "none")) {
3671 val = 0;
3672 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
3673 val |= OCDDCR_BREAKINEN;
3674 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
3675 val |= OCDDCR_BREAKOUTEN;
3676 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
3677 val |= OCDDCR_RUNSTALLINEN;
3678 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
3679 val |= OCDDCR_DEBUGMODEOUTEN;
3680 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
3681 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
3682 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
3683 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
3684 } else {
3685 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
3686 command_print(
3687 CMD,
3688 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3689 return ERROR_OK;
3690 }
3691 }
3692 res = xtensa_smpbreak_set(target, val);
3693 if (res != ERROR_OK)
3694 command_print(CMD, "Failed to set smpbreak config %d", res);
3695 } else {
3696 struct xtensa *xtensa = target_to_xtensa(target);
3697 res = xtensa_smpbreak_read(xtensa, &val);
3698 if (res == ERROR_OK)
3699 command_print(CMD, "Current bits set:%s%s%s%s",
3700 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
3701 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
3702 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
3703 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
3704 );
3705 else
3706 command_print(CMD, "Failed to get smpbreak config %d", res);
3707 }
3708 return res;
3709 }
3710
3711 COMMAND_HANDLER(xtensa_cmd_smpbreak)
3712 {
3713 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
3714 get_current_target(CMD_CTX));
3715 }
3716
3717 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
3718 {
3719 struct xtensa_trace_status trace_status;
3720 struct xtensa_trace_start_config cfg = {
3721 .stoppc = 0,
3722 .stopmask = XTENSA_STOPMASK_DISABLED,
3723 .after = 0,
3724 .after_is_words = false
3725 };
3726
3727 /* Parse arguments */
3728 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3729 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
3730 char *e;
3731 i++;
3732 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
3733 cfg.stopmask = 0;
3734 if (*e == '/')
3735 cfg.stopmask = strtol(e, NULL, 0);
3736 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
3737 i++;
3738 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
3739 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
3740 cfg.after_is_words = 0;
3741 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
3742 cfg.after_is_words = 1;
3743 } else {
3744 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
3745 return ERROR_FAIL;
3746 }
3747 }
3748
3749 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3750 if (res != ERROR_OK)
3751 return res;
3752 if (trace_status.stat & TRAXSTAT_TRACT) {
3753 LOG_WARNING("Silently stop active tracing!");
3754 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
3755 if (res != ERROR_OK)
3756 return res;
3757 }
3758
3759 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
3760 if (res != ERROR_OK)
3761 return res;
3762
3763 xtensa->trace_active = true;
3764 command_print(CMD, "Trace started.");
3765 return ERROR_OK;
3766 }
3767
3768 COMMAND_HANDLER(xtensa_cmd_tracestart)
3769 {
3770 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
3771 target_to_xtensa(get_current_target(CMD_CTX)));
3772 }
3773
3774 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
3775 {
3776 struct xtensa_trace_status trace_status;
3777
3778 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3779 if (res != ERROR_OK)
3780 return res;
3781
3782 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
3783 command_print(CMD, "No trace is currently active.");
3784 return ERROR_FAIL;
3785 }
3786
3787 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
3788 if (res != ERROR_OK)
3789 return res;
3790
3791 xtensa->trace_active = false;
3792 command_print(CMD, "Trace stop triggered.");
3793 return ERROR_OK;
3794 }
3795
3796 COMMAND_HANDLER(xtensa_cmd_tracestop)
3797 {
3798 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
3799 target_to_xtensa(get_current_target(CMD_CTX)));
3800 }
3801
3802 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
3803 {
3804 struct xtensa_trace_config trace_config;
3805 struct xtensa_trace_status trace_status;
3806 uint32_t memsz, wmem;
3807
3808 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3809 if (res != ERROR_OK)
3810 return res;
3811
3812 if (trace_status.stat & TRAXSTAT_TRACT) {
3813 command_print(CMD, "Tracing is still active. Please stop it first.");
3814 return ERROR_FAIL;
3815 }
3816
3817 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
3818 if (res != ERROR_OK)
3819 return res;
3820
3821 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
3822 command_print(CMD, "No active trace found; nothing to dump.");
3823 return ERROR_FAIL;
3824 }
3825
3826 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
3827 LOG_INFO("Total trace memory: %d words", memsz);
3828 if ((trace_config.addr &
3829 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
3830 /*Memory hasn't overwritten itself yet. */
3831 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
3832 LOG_INFO("...but trace is only %d words", wmem);
3833 if (wmem < memsz)
3834 memsz = wmem;
3835 } else {
3836 if (trace_config.addr & TRAXADDR_TWSAT) {
3837 LOG_INFO("Real trace is many times longer than that (overflow)");
3838 } else {
3839 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
3840 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
3841 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
3842 }
3843 }
3844
3845 uint8_t *tracemem = malloc(memsz * 4);
3846 if (!tracemem) {
3847 command_print(CMD, "Failed to alloc memory for trace data!");
3848 return ERROR_FAIL;
3849 }
3850 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
3851 if (res != ERROR_OK) {
3852 free(tracemem);
3853 return res;
3854 }
3855
3856 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
3857 if (f <= 0) {
3858 free(tracemem);
3859 command_print(CMD, "Unable to open file %s", fname);
3860 return ERROR_FAIL;
3861 }
3862 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
3863 command_print(CMD, "Unable to write to file %s", fname);
3864 else
3865 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
3866 close(f);
3867
3868 bool is_all_zeroes = true;
3869 for (unsigned int i = 0; i < memsz * 4; i++) {
3870 if (tracemem[i] != 0) {
3871 is_all_zeroes = false;
3872 break;
3873 }
3874 }
3875 free(tracemem);
3876 if (is_all_zeroes)
3877 command_print(
3878 CMD,
3879 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
3880
3881 return ERROR_OK;
3882 }
3883
3884 COMMAND_HANDLER(xtensa_cmd_tracedump)
3885 {
3886 if (CMD_ARGC != 1) {
3887 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
3888 return ERROR_FAIL;
3889 }
3890
3891 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
3892 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
3893 }
3894
3895 static const struct command_registration xtensa_any_command_handlers[] = {
3896 {
3897 .name = "xtdef",
3898 .handler = xtensa_cmd_xtdef,
3899 .mode = COMMAND_CONFIG,
3900 .help = "Configure Xtensa core type",
3901 .usage = "<type>",
3902 },
3903 {
3904 .name = "xtopt",
3905 .handler = xtensa_cmd_xtopt,
3906 .mode = COMMAND_CONFIG,
3907 .help = "Configure Xtensa core option",
3908 .usage = "<name> <value>",
3909 },
3910 {
3911 .name = "xtmem",
3912 .handler = xtensa_cmd_xtmem,
3913 .mode = COMMAND_CONFIG,
3914 .help = "Configure Xtensa memory/cache option",
3915 .usage = "<type> [parameters]",
3916 },
3917 {
3918 .name = "xtmmu",
3919 .handler = xtensa_cmd_xtmmu,
3920 .mode = COMMAND_CONFIG,
3921 .help = "Configure Xtensa MMU option",
3922 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
3923 },
3924 {
3925 .name = "xtmpu",
3926 .handler = xtensa_cmd_xtmpu,
3927 .mode = COMMAND_CONFIG,
3928 .help = "Configure Xtensa MPU option",
3929 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
3930 },
3931 {
3932 .name = "xtreg",
3933 .handler = xtensa_cmd_xtreg,
3934 .mode = COMMAND_CONFIG,
3935 .help = "Configure Xtensa register",
3936 .usage = "<regname> <regnum>",
3937 },
3938 {
3939 .name = "xtregs",
3940 .handler = xtensa_cmd_xtreg,
3941 .mode = COMMAND_CONFIG,
3942 .help = "Configure number of Xtensa registers",
3943 .usage = "<numregs>",
3944 },
3945 {
3946 .name = "xtregfmt",
3947 .handler = xtensa_cmd_xtregfmt,
3948 .mode = COMMAND_CONFIG,
3949 .help = "Configure format of Xtensa register map",
3950 .usage = "<contiguous|sparse> [numgregs]",
3951 },
3952 {
3953 .name = "set_permissive",
3954 .handler = xtensa_cmd_permissive_mode,
3955 .mode = COMMAND_ANY,
3956 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
3957 .usage = "[0|1]",
3958 },
3959 {
3960 .name = "maskisr",
3961 .handler = xtensa_cmd_mask_interrupts,
3962 .mode = COMMAND_ANY,
3963 .help = "mask Xtensa interrupts at step",
3964 .usage = "['on'|'off']",
3965 },
3966 {
3967 .name = "smpbreak",
3968 .handler = xtensa_cmd_smpbreak,
3969 .mode = COMMAND_ANY,
3970 .help = "Set the way the CPU chains OCD breaks",
3971 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
3972 },
3973 {
3974 .name = "perfmon_enable",
3975 .handler = xtensa_cmd_perfmon_enable,
3976 .mode = COMMAND_EXEC,
3977 .help = "Enable and start performance counter",
3978 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
3979 },
3980 {
3981 .name = "perfmon_dump",
3982 .handler = xtensa_cmd_perfmon_dump,
3983 .mode = COMMAND_EXEC,
3984 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
3985 .usage = "[counter_id]",
3986 },
3987 {
3988 .name = "tracestart",
3989 .handler = xtensa_cmd_tracestart,
3990 .mode = COMMAND_EXEC,
3991 .help =
3992 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
3993 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
3994 },
3995 {
3996 .name = "tracestop",
3997 .handler = xtensa_cmd_tracestop,
3998 .mode = COMMAND_EXEC,
3999 .help = "Tracing: Stop current trace as started by the tracestart command",
4000 .usage = "",
4001 },
4002 {
4003 .name = "tracedump",
4004 .handler = xtensa_cmd_tracedump,
4005 .mode = COMMAND_EXEC,
4006 .help = "Tracing: Dump trace memory to a files. One file per core.",
4007 .usage = "<outfile>",
4008 },
4009 {
4010 .name = "exe",
4011 .handler = xtensa_cmd_exe,
4012 .mode = COMMAND_ANY,
4013 .help = "Xtensa stub execution",
4014 .usage = "<ascii-encoded hexadecimal instruction bytes>",
4015 },
4016 COMMAND_REGISTRATION_DONE
4017 };
4018
4019 const struct command_registration xtensa_command_handlers[] = {
4020 {
4021 .name = "xtensa",
4022 .mode = COMMAND_ANY,
4023 .help = "Xtensa command group",
4024 .usage = "",
4025 .chain = xtensa_any_command_handlers,
4026 },
4027 COMMAND_REGISTRATION_DONE
4028 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)