target/xtensa: fix clang analyzer warning
[openocd.git] / src / target / xtensa / xtensa.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
10
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19
20 #include "xtensa_chip.h"
21 #include "xtensa.h"
22
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25 ((((V) & 0x0F) << 4) \
26 | (((V) & 0xF0) >> 4))
27
28 #define XT_NIBSWAP16(V) \
29 ((((V) & 0x000F) << 12) \
30 | (((V) & 0x00F0) << 4) \
31 | (((V) & 0x0F00) >> 4) \
32 | (((V) & 0xF000) >> 12))
33
34 #define XT_NIBSWAP24(V) \
35 ((((V) & 0x00000F) << 20) \
36 | (((V) & 0x0000F0) << 12) \
37 | (((V) & 0x000F00) << 4) \
38 | (((V) & 0x00F000) >> 4) \
39 | (((V) & 0x0F0000) >> 12) \
40 | (((V) & 0xF00000) >> 20))
41
42 /* _XT_INS_FORMAT_*()
43 * Instruction formatting converted from little-endian inputs
44 * and shifted to the MSB-side of DIR for BE systems.
45 */
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48 | (((T) & 0x0F) << 16) \
49 | (((SR) & 0xFF) << 8)) << 8 \
50 : (OPCODE) \
51 | (((SR) & 0xFF) << 8) \
52 | (((T) & 0x0F) << 4))
53
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57 | (((R) & 0x0F) << 8)) << 8 \
58 : (OPCODE) \
59 | (((ST) & 0xFF) << 4) \
60 | (((R) & 0x0F) << 12))
61
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64 | (((T) & 0x0F) << 8) \
65 | (((S) & 0x0F) << 4) \
66 | ((IMM4) & 0x0F)) << 16 \
67 : (OPCODE) \
68 | (((T) & 0x0F) << 4) \
69 | (((S) & 0x0F) << 8) \
70 | (((IMM4) & 0x0F) << 12))
71
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74 | (((T) & 0x0F) << 16) \
75 | (((S) & 0x0F) << 12) \
76 | (((R) & 0x0F) << 8) \
77 | ((IMM8) & 0xFF)) << 8 \
78 : (OPCODE) \
79 | (((IMM8) & 0xFF) << 16) \
80 | (((R) & 0x0F) << 12) \
81 | (((S) & 0x0F) << 8) \
82 | (((T) & 0x0F) << 4))
83
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86 | (((T) & 0x0F) << 16) \
87 | (((S) & 0x0F) << 12) \
88 | (((R) & 0x0F) << 8)) << 8 \
89 | ((IMM4) & 0x0F) \
90 : (OPCODE) \
91 | (((IMM4) & 0x0F) << 20) \
92 | (((R) & 0x0F) << 12) \
93 | (((S) & 0x0F) << 8) \
94 | (((T) & 0x0F) << 4))
95
96 /* Xtensa processor instruction opcodes
97 */
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
102
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
107
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
114
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
121
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
127
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
131
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
138
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
141
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
146
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
151
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
155
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
159
160 #define XT_WATCHPOINTS_NUM_MAX 2
161
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163 * These get used a lot so making a shortcut is useful.
164 */
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
169 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
170
171 #define XT_PS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
172 #define XT_PC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
173 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
174 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
175
176 #define XT_SW_BREAKPOINTS_MAX_NUM 32
177 #define XT_HW_IBREAK_MAX_NUM 2
178 #define XT_HW_DBREAK_MAX_NUM 2
179
180 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
181 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
182 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
183 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
184 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
185 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
186 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
187 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
188 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
189 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
190 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
191 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
192 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
247 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
248 XT_MK_REG_DESC("ps", 0xE6, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
249 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
250 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
251 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
252 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
253 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
254 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
255 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
256 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
259 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
261 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
262
263 /* WARNING: For these registers, regnum points to the
264 * index of the corresponding ARx registers, NOT to
265 * the processor register number! */
266 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
267 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
268 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
269 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
270 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
271 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
272 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
273 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
274 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
275 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
276 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
282 };
283
284 /**
285 * Types of memory used at xtensa target
286 */
287 enum xtensa_mem_region_type {
288 XTENSA_MEM_REG_IROM = 0x0,
289 XTENSA_MEM_REG_IRAM,
290 XTENSA_MEM_REG_DROM,
291 XTENSA_MEM_REG_DRAM,
292 XTENSA_MEM_REG_SRAM,
293 XTENSA_MEM_REG_SROM,
294 XTENSA_MEM_REGS_NUM
295 };
296
297 /* Register definition as union for list allocation */
298 union xtensa_reg_val_u {
299 xtensa_reg_val_t val;
300 uint8_t buf[4];
301 };
302
303 static const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
304 { .chrval = "E00", .intval = ERROR_FAIL },
305 { .chrval = "E01", .intval = ERROR_FAIL },
306 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
307 { .chrval = "E03", .intval = ERROR_FAIL },
308 };
309
310 /* Set to true for extra debug logging */
311 static const bool xtensa_extra_debug_log;
312
313 /**
314 * Gets a config for the specific mem type
315 */
316 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
317 struct xtensa *xtensa,
318 enum xtensa_mem_region_type type)
319 {
320 switch (type) {
321 case XTENSA_MEM_REG_IROM:
322 return &xtensa->core_config->irom;
323 case XTENSA_MEM_REG_IRAM:
324 return &xtensa->core_config->iram;
325 case XTENSA_MEM_REG_DROM:
326 return &xtensa->core_config->drom;
327 case XTENSA_MEM_REG_DRAM:
328 return &xtensa->core_config->dram;
329 case XTENSA_MEM_REG_SRAM:
330 return &xtensa->core_config->sram;
331 case XTENSA_MEM_REG_SROM:
332 return &xtensa->core_config->srom;
333 default:
334 return NULL;
335 }
336 }
337
338 /**
339 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
340 * for a given address
341 * Returns NULL if nothing found
342 */
343 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
344 const struct xtensa_local_mem_config *mem,
345 target_addr_t address)
346 {
347 for (unsigned int i = 0; i < mem->count; i++) {
348 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
349 if (address >= region->base && address < (region->base + region->size))
350 return region;
351 }
352 return NULL;
353 }
354
355 /**
356 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
357 * for a given address
358 * Returns NULL if nothing found
359 */
360 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
361 struct xtensa *xtensa,
362 target_addr_t address)
363 {
364 const struct xtensa_local_mem_region_config *result;
365 const struct xtensa_local_mem_config *mcgf;
366 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
367 mcgf = xtensa_get_mem_config(xtensa, mtype);
368 result = xtensa_memory_region_find(mcgf, address);
369 if (result)
370 return result;
371 }
372 return NULL;
373 }
374
375 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
376 const struct xtensa_local_mem_config *mem,
377 target_addr_t address)
378 {
379 if (!cache->size)
380 return false;
381 return xtensa_memory_region_find(mem, address);
382 }
383
384 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
385 {
386 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
387 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
388 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
389 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
390 }
391
392 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
393 {
394 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
395 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
396 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
397 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
398 }
399
400 static int xtensa_core_reg_get(struct reg *reg)
401 {
402 /* We don't need this because we read all registers on halt anyway. */
403 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
404 struct target *target = xtensa->target;
405
406 if (target->state != TARGET_HALTED)
407 return ERROR_TARGET_NOT_HALTED;
408 if (!reg->exist) {
409 if (strncmp(reg->name, "?0x", 3) == 0) {
410 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
411 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
412 return ERROR_OK;
413 }
414 return ERROR_COMMAND_ARGUMENT_INVALID;
415 }
416 return ERROR_OK;
417 }
418
419 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
420 {
421 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
422 struct target *target = xtensa->target;
423
424 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
425 if (target->state != TARGET_HALTED)
426 return ERROR_TARGET_NOT_HALTED;
427
428 if (!reg->exist) {
429 if (strncmp(reg->name, "?0x", 3) == 0) {
430 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
431 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
432 return ERROR_OK;
433 }
434 return ERROR_COMMAND_ARGUMENT_INVALID;
435 }
436
437 buf_cpy(buf, reg->value, reg->size);
438
439 if (xtensa->core_config->windowed) {
440 /* If the user updates a potential scratch register, track for conflicts */
441 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
442 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
443 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
444 buf_get_u32(reg->value, 0, 32));
445 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
446 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
447 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
448 xtensa->scratch_ars[s].intval = true;
449 break;
450 }
451 }
452 }
453 reg->dirty = true;
454 reg->valid = true;
455
456 return ERROR_OK;
457 }
458
459 static const struct reg_arch_type xtensa_reg_type = {
460 .get = xtensa_core_reg_get,
461 .set = xtensa_core_reg_set,
462 };
463
464 /* Convert a register index that's indexed relative to windowbase, to the real address. */
465 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
466 enum xtensa_reg_id reg_idx,
467 int windowbase)
468 {
469 unsigned int idx;
470 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
471 idx = reg_idx - XT_REG_IDX_AR0;
472 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
473 idx = reg_idx - XT_REG_IDX_A0;
474 } else {
475 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
476 return -1;
477 }
478 return ((idx + windowbase * 4) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
479 }
480
481 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
482 enum xtensa_reg_id reg_idx,
483 int windowbase)
484 {
485 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
486 }
487
488 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
489 {
490 struct reg *reg_list = xtensa->core_cache->reg_list;
491 reg_list[reg_idx].dirty = true;
492 }
493
494 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
495 {
496 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DIR0EXEC, ins);
497 }
498
499 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
500 {
501 const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
502 if ((oplen > 0) && (oplen <= max_oplen)) {
503 uint8_t ops_padded[max_oplen];
504 memcpy(ops_padded, ops, oplen);
505 memset(ops_padded + oplen, 0, max_oplen - oplen);
506 unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
507 for (int32_t i = oplenw - 1; i > 0; i--)
508 xtensa_queue_dbg_reg_write(xtensa,
509 XDMREG_DIR0 + i,
510 target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t)*i]));
511 /* Write DIR0EXEC last */
512 xtensa_queue_dbg_reg_write(xtensa,
513 XDMREG_DIR0EXEC,
514 target_buffer_get_u32(xtensa->target, &ops_padded[0]));
515 }
516 }
517
518 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
519 {
520 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
521 return dm->pwr_ops->queue_reg_write(dm, reg, data);
522 }
523
524 /* NOTE: Assumes A3 has already been saved */
525 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
526 {
527 struct xtensa *xtensa = target_to_xtensa(target);
528 int woe_dis;
529 uint8_t woe_buf[4];
530
531 if (xtensa->core_config->windowed) {
532 /* Save PS (LX) and disable window overflow exceptions prior to AR save */
533 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_PS, XT_REG_A3));
534 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
535 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, woe_buf);
536 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
537 if (res != ERROR_OK) {
538 LOG_ERROR("Failed to read PS (%d)!", res);
539 return res;
540 }
541 xtensa_core_status_check(target);
542 *woe = buf_get_u32(woe_buf, 0, 32);
543 woe_dis = *woe & ~XT_PS_WOE_MSK;
544 LOG_DEBUG("Clearing PS.WOE (0x%08" PRIx32 " -> 0x%08" PRIx32 ")", *woe, woe_dis);
545 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe_dis);
546 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
547 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
548 }
549 return ERROR_OK;
550 }
551
552 /* NOTE: Assumes A3 has already been saved */
553 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
554 {
555 struct xtensa *xtensa = target_to_xtensa(target);
556 if (xtensa->core_config->windowed) {
557 /* Restore window overflow exception state */
558 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe);
559 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
560 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
561 LOG_DEBUG("Restored PS.WOE (0x%08" PRIx32 ")", woe);
562 }
563 }
564
565 static bool xtensa_reg_is_readable(int flags, int cpenable)
566 {
567 if (flags & XT_REGF_NOREAD)
568 return false;
569 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
570 return false;
571 return true;
572 }
573
574 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
575 {
576 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
577 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
578 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
579 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
580 } else {
581 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
582 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
583 }
584 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
585 }
586
587 static int xtensa_write_dirty_registers(struct target *target)
588 {
589 struct xtensa *xtensa = target_to_xtensa(target);
590 int res;
591 xtensa_reg_val_t regval, windowbase = 0;
592 bool scratch_reg_dirty = false, delay_cpenable = false;
593 struct reg *reg_list = xtensa->core_cache->reg_list;
594 unsigned int reg_list_size = xtensa->core_cache->num_regs;
595 bool preserve_a3 = false;
596 uint8_t a3_buf[4];
597 xtensa_reg_val_t a3 = 0, woe;
598
599 LOG_TARGET_DEBUG(target, "start");
600
601 /* We need to write the dirty registers in the cache list back to the processor.
602 * Start by writing the SFR/user registers. */
603 for (unsigned int i = 0; i < reg_list_size; i++) {
604 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
605 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
606 if (reg_list[i].dirty) {
607 if (rlist[ridx].type == XT_REG_SPECIAL ||
608 rlist[ridx].type == XT_REG_USER ||
609 rlist[ridx].type == XT_REG_FR) {
610 scratch_reg_dirty = true;
611 if (i == XT_REG_IDX_CPENABLE) {
612 delay_cpenable = true;
613 continue;
614 }
615 regval = xtensa_reg_get(target, i);
616 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
617 reg_list[i].name,
618 rlist[ridx].reg_num,
619 regval);
620 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
621 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
622 if (reg_list[i].exist) {
623 unsigned int reg_num = rlist[ridx].reg_num;
624 if (rlist[ridx].type == XT_REG_USER) {
625 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
626 } else if (rlist[ridx].type == XT_REG_FR) {
627 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
628 } else {/*SFR */
629 if (reg_num == XT_PC_REG_NUM_VIRTUAL)
630 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL
631 **/
632 reg_num =
633 (XT_PC_REG_NUM_BASE +
634 xtensa->core_config->debug.irq_level);
635 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
636 }
637 }
638 reg_list[i].dirty = false;
639 }
640 }
641 }
642 if (scratch_reg_dirty)
643 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
644 if (delay_cpenable) {
645 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
646 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
647 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
648 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
649 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
650 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
651 XT_REG_A3));
652 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
653 }
654
655 preserve_a3 = (xtensa->core_config->windowed);
656 if (preserve_a3) {
657 /* Save (windowed) A3 for scratch use */
658 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
659 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
660 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
661 if (res != ERROR_OK)
662 return res;
663 xtensa_core_status_check(target);
664 a3 = buf_get_u32(a3_buf, 0, 32);
665 }
666
667 if (xtensa->core_config->windowed) {
668 res = xtensa_window_state_save(target, &woe);
669 if (res != ERROR_OK)
670 return res;
671 /* Grab the windowbase, we need it. */
672 windowbase = xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE);
673 /* Check if there are mismatches between the ARx and corresponding Ax registers.
674 * When the user sets a register on a windowed config, xt-gdb may set the ARx
675 * register directly. Thus we take ARx as priority over Ax if both are dirty
676 * and it's unclear if the user set one over the other explicitly.
677 */
678 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
679 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
680 if (reg_list[i].dirty && reg_list[j].dirty) {
681 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
682 bool show_warning = true;
683 if (i == XT_REG_IDX_A3)
684 show_warning = xtensa_scratch_regs_fixup(xtensa,
685 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
686 else if (i == XT_REG_IDX_A4)
687 show_warning = xtensa_scratch_regs_fixup(xtensa,
688 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
689 if (show_warning)
690 LOG_WARNING(
691 "Warning: Both A%d [0x%08" PRIx32
692 "] as well as its underlying physical register "
693 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
694 i - XT_REG_IDX_A0,
695 buf_get_u32(reg_list[i].value, 0, 32),
696 j - XT_REG_IDX_AR0,
697 buf_get_u32(reg_list[j].value, 0, 32));
698 }
699 }
700 }
701 }
702
703 /* Write A0-A16. */
704 for (unsigned int i = 0; i < 16; i++) {
705 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
706 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
707 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
708 xtensa_regs[XT_REG_IDX_A0 + i].name,
709 regval,
710 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
711 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
712 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
713 reg_list[XT_REG_IDX_A0 + i].dirty = false;
714 if (i == 3) {
715 /* Avoid stomping A3 during restore at end of function */
716 a3 = regval;
717 }
718 }
719 }
720
721 if (xtensa->core_config->windowed) {
722 /* Now write AR registers */
723 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
724 /* Write the 16 registers we can see */
725 for (unsigned int i = 0; i < 16; i++) {
726 if (i + j < xtensa->core_config->aregs_num) {
727 enum xtensa_reg_id realadr =
728 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
729 windowbase);
730 /* Write back any dirty un-windowed registers */
731 if (reg_list[realadr].dirty) {
732 regval = xtensa_reg_get(target, realadr);
733 LOG_TARGET_DEBUG(
734 target,
735 "Writing back reg %s value %08" PRIX32 ", num =%i",
736 xtensa_regs[realadr].name,
737 regval,
738 xtensa_regs[realadr].reg_num);
739 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
740 xtensa_queue_exec_ins(xtensa,
741 XT_INS_RSR(xtensa, XT_SR_DDR,
742 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
743 reg_list[realadr].dirty = false;
744 if ((i + j) == 3)
745 /* Avoid stomping AR during A3 restore at end of function */
746 a3 = regval;
747 }
748 }
749 }
750 /*Now rotate the window so we'll see the next 16 registers. The final rotate
751 * will wraparound, */
752 /*leaving us in the state we were. */
753 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
754 }
755
756 xtensa_window_state_restore(target, woe);
757
758 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
759 xtensa->scratch_ars[s].intval = false;
760 }
761
762 if (preserve_a3) {
763 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
764 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
765 }
766
767 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
768 xtensa_core_status_check(target);
769
770 return res;
771 }
772
773 static inline bool xtensa_is_stopped(struct target *target)
774 {
775 struct xtensa *xtensa = target_to_xtensa(target);
776 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
777 }
778
779 int xtensa_examine(struct target *target)
780 {
781 struct xtensa *xtensa = target_to_xtensa(target);
782 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
783
784 LOG_DEBUG("coreid = %d", target->coreid);
785
786 if (xtensa->core_config->core_type == XT_UNDEF) {
787 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
788 return ERROR_FAIL;
789 }
790
791 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
792 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
793 xtensa_dm_queue_enable(&xtensa->dbg_mod);
794 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
795 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
796 if (res != ERROR_OK)
797 return res;
798 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
799 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
800 return ERROR_TARGET_FAILURE;
801 }
802 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
803 if (!target_was_examined(target))
804 target_set_examined(target);
805 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
806 return ERROR_OK;
807 }
808
809 int xtensa_wakeup(struct target *target)
810 {
811 struct xtensa *xtensa = target_to_xtensa(target);
812 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
813
814 if (xtensa->reset_asserted)
815 cmd |= PWRCTL_CORERESET(xtensa);
816 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
817 /* TODO: can we join this with the write above? */
818 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
819 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
820 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
821 }
822
823 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
824 {
825 uint32_t dsr_data = 0x00110000;
826 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
827 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
828 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
829
830 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
831 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, set | OCDDCR_ENABLEOCD);
832 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, clear);
833 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DSR, dsr_data);
834 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
835 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
836 }
837
838 int xtensa_smpbreak_set(struct target *target, uint32_t set)
839 {
840 struct xtensa *xtensa = target_to_xtensa(target);
841 int res = ERROR_OK;
842
843 xtensa->smp_break = set;
844 if (target_was_examined(target))
845 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
846 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
847 return res;
848 }
849
850 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
851 {
852 uint8_t dcr_buf[sizeof(uint32_t)];
853
854 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DCRSET, dcr_buf);
855 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
856 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
857 *val = buf_get_u32(dcr_buf, 0, 32);
858
859 return res;
860 }
861
862 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
863 {
864 struct xtensa *xtensa = target_to_xtensa(target);
865 *val = xtensa->smp_break;
866 return ERROR_OK;
867 }
868
869 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
870 {
871 return buf_get_u32(reg->value, 0, 32);
872 }
873
874 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
875 {
876 buf_set_u32(reg->value, 0, 32, value);
877 reg->dirty = true;
878 }
879
880 int xtensa_core_status_check(struct target *target)
881 {
882 struct xtensa *xtensa = target_to_xtensa(target);
883 int res, needclear = 0;
884
885 xtensa_dm_core_status_read(&xtensa->dbg_mod);
886 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
887 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
888 if (dsr & OCDDSR_EXECBUSY) {
889 if (!xtensa->suppress_dsr_errors)
890 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
891 needclear = 1;
892 }
893 if (dsr & OCDDSR_EXECEXCEPTION) {
894 if (!xtensa->suppress_dsr_errors)
895 LOG_TARGET_ERROR(target,
896 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
897 dsr);
898 needclear = 1;
899 }
900 if (dsr & OCDDSR_EXECOVERRUN) {
901 if (!xtensa->suppress_dsr_errors)
902 LOG_TARGET_ERROR(target,
903 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
904 dsr);
905 needclear = 1;
906 }
907 if (needclear) {
908 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
909 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
910 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
911 LOG_TARGET_ERROR(target, "clearing DSR failed!");
912 return ERROR_FAIL;
913 }
914 return ERROR_OK;
915 }
916
917 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
918 {
919 struct xtensa *xtensa = target_to_xtensa(target);
920 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
921 return xtensa_reg_get_value(reg);
922 }
923
924 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
925 {
926 struct xtensa *xtensa = target_to_xtensa(target);
927 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
928 if (xtensa_reg_get_value(reg) == value)
929 return;
930 xtensa_reg_set_value(reg, value);
931 }
932
933 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
934 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
935 {
936 struct xtensa *xtensa = target_to_xtensa(target);
937 uint32_t windowbase = (xtensa->core_config->windowed ?
938 xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE) : 0);
939 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
940 xtensa_reg_set(target, a_idx, value);
941 xtensa_reg_set(target, ar_idx, value);
942 }
943
944 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
945 uint32_t xtensa_cause_get(struct target *target)
946 {
947 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
948 }
949
950 void xtensa_cause_clear(struct target *target)
951 {
952 struct xtensa *xtensa = target_to_xtensa(target);
953 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
954 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
955 }
956
957 int xtensa_assert_reset(struct target *target)
958 {
959 struct xtensa *xtensa = target_to_xtensa(target);
960
961 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
962 target->state = TARGET_RESET;
963 xtensa_queue_pwr_reg_write(xtensa,
964 XDMREG_PWRCTL,
965 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
966 PWRCTL_COREWAKEUP(xtensa) | PWRCTL_CORERESET(xtensa));
967 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
968 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
969 if (res != ERROR_OK)
970 return res;
971 xtensa->reset_asserted = true;
972 return res;
973 }
974
975 int xtensa_deassert_reset(struct target *target)
976 {
977 struct xtensa *xtensa = target_to_xtensa(target);
978
979 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
980 if (target->reset_halt)
981 xtensa_queue_dbg_reg_write(xtensa,
982 XDMREG_DCRSET,
983 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
984 xtensa_queue_pwr_reg_write(xtensa,
985 XDMREG_PWRCTL,
986 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
987 PWRCTL_COREWAKEUP(xtensa));
988 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
989 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
990 if (res != ERROR_OK)
991 return res;
992 target->state = TARGET_RUNNING;
993 xtensa->reset_asserted = false;
994 return res;
995 }
996
997 int xtensa_soft_reset_halt(struct target *target)
998 {
999 LOG_TARGET_DEBUG(target, "begin");
1000 return xtensa_assert_reset(target);
1001 }
1002
1003 int xtensa_fetch_all_regs(struct target *target)
1004 {
1005 struct xtensa *xtensa = target_to_xtensa(target);
1006 struct reg *reg_list = xtensa->core_cache->reg_list;
1007 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1008 xtensa_reg_val_t cpenable = 0, windowbase = 0, a3;
1009 uint32_t woe;
1010 uint8_t a3_buf[4];
1011 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1012
1013 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1014 if (!regvals) {
1015 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1016 return ERROR_FAIL;
1017 }
1018 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1019 if (!dsrs) {
1020 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1021 free(regvals);
1022 return ERROR_FAIL;
1023 }
1024
1025 LOG_TARGET_DEBUG(target, "start");
1026
1027 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1028 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1029 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
1030 int res = xtensa_window_state_save(target, &woe);
1031 if (res != ERROR_OK)
1032 goto xtensa_fetch_all_regs_done;
1033
1034 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1035 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1036 * in one go, then sort everything out from the regvals variable. */
1037
1038 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1039 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1040 /*Grab the 16 registers we can see */
1041 for (unsigned int i = 0; i < 16; i++) {
1042 if (i + j < xtensa->core_config->aregs_num) {
1043 xtensa_queue_exec_ins(xtensa,
1044 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1045 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1046 regvals[XT_REG_IDX_AR0 + i + j].buf);
1047 if (debug_dsrs)
1048 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR,
1049 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1050 }
1051 }
1052 if (xtensa->core_config->windowed)
1053 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1054 * will wraparound, */
1055 /* leaving us in the state we were. */
1056 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
1057 }
1058 xtensa_window_state_restore(target, woe);
1059
1060 if (xtensa->core_config->coproc) {
1061 /* As the very first thing after AREGS, go grab CPENABLE */
1062 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1063 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1064 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1065 }
1066 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1067 if (res != ERROR_OK) {
1068 LOG_ERROR("Failed to read ARs (%d)!", res);
1069 goto xtensa_fetch_all_regs_done;
1070 }
1071 xtensa_core_status_check(target);
1072
1073 a3 = buf_get_u32(a3_buf, 0, 32);
1074
1075 if (xtensa->core_config->coproc) {
1076 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1077
1078 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1079 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
1080 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1081 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1082
1083 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1084 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1085 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1086 }
1087 /* We're now free to use any of A0-A15 as scratch registers
1088 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1089 for (unsigned int i = 0; i < reg_list_size; i++) {
1090 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1091 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1092 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1093 bool reg_fetched = true;
1094 unsigned int reg_num = rlist[ridx].reg_num;
1095 switch (rlist[ridx].type) {
1096 case XT_REG_USER:
1097 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1098 break;
1099 case XT_REG_FR:
1100 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1101 break;
1102 case XT_REG_SPECIAL:
1103 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1104 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1105 reg_num = (XT_PC_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
1106 } else if (reg_num == xtensa_regs[XT_REG_IDX_PS].reg_num) {
1107 /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1108 reg_num = (XT_PS_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
1109 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1110 /* CPENABLE already read/updated; don't re-read */
1111 reg_fetched = false;
1112 break;
1113 }
1114 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1115 break;
1116 default:
1117 reg_fetched = false;
1118 }
1119 if (reg_fetched) {
1120 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1121 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1122 if (debug_dsrs)
1123 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1124 }
1125 }
1126 }
1127 /* Ok, send the whole mess to the CPU. */
1128 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1129 if (res != ERROR_OK) {
1130 LOG_ERROR("Failed to fetch AR regs!");
1131 goto xtensa_fetch_all_regs_done;
1132 }
1133 xtensa_core_status_check(target);
1134
1135 if (debug_dsrs) {
1136 /* DSR checking: follows order in which registers are requested. */
1137 for (unsigned int i = 0; i < reg_list_size; i++) {
1138 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1139 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1140 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1141 (rlist[ridx].type != XT_REG_DEBUG) &&
1142 (rlist[ridx].type != XT_REG_RELGEN) &&
1143 (rlist[ridx].type != XT_REG_TIE) &&
1144 (rlist[ridx].type != XT_REG_OTHER)) {
1145 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1146 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1147 res = ERROR_FAIL;
1148 goto xtensa_fetch_all_regs_done;
1149 }
1150 }
1151 }
1152 }
1153
1154 if (xtensa->core_config->windowed)
1155 /* We need the windowbase to decode the general addresses. */
1156 windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE].buf, 0, 32);
1157 /* Decode the result and update the cache. */
1158 for (unsigned int i = 0; i < reg_list_size; i++) {
1159 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1160 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1161 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1162 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1163 /* The 64-value general register set is read from (windowbase) on down.
1164 * We need to get the real register address by subtracting windowbase and
1165 * wrapping around. */
1166 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1167 windowbase);
1168 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1169 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1170 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1171 if (xtensa_extra_debug_log) {
1172 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1173 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1174 }
1175 } else {
1176 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1177 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1178 if (xtensa_extra_debug_log)
1179 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1180 xtensa_reg_set(target, i, regval);
1181 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1182 }
1183 reg_list[i].valid = true;
1184 } else {
1185 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1186 /* Report read-only registers all-zero but valid */
1187 reg_list[i].valid = true;
1188 xtensa_reg_set(target, i, 0);
1189 } else {
1190 reg_list[i].valid = false;
1191 }
1192 }
1193 }
1194
1195 if (xtensa->core_config->windowed) {
1196 /* We have used A3 as a scratch register.
1197 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1198 */
1199 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1200 xtensa_reg_set(target, ar3_idx, a3);
1201 xtensa_mark_register_dirty(xtensa, ar3_idx);
1202
1203 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1204 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1205 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1206 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1207 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1208 xtensa->scratch_ars[s].intval = false;
1209 }
1210
1211 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1212 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1213 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1214 xtensa->regs_fetched = true;
1215 xtensa_fetch_all_regs_done:
1216 free(regvals);
1217 free(dsrs);
1218 return res;
1219 }
1220
1221 int xtensa_get_gdb_reg_list(struct target *target,
1222 struct reg **reg_list[],
1223 int *reg_list_size,
1224 enum target_register_class reg_class)
1225 {
1226 struct xtensa *xtensa = target_to_xtensa(target);
1227 unsigned int num_regs;
1228
1229 if (reg_class == REG_CLASS_GENERAL) {
1230 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1231 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1232 return ERROR_FAIL;
1233 }
1234 num_regs = xtensa->genpkt_regs_num;
1235 } else {
1236 /* Determine whether to return a contiguous or sparse register map */
1237 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1238 }
1239
1240 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1241
1242 *reg_list = calloc(num_regs, sizeof(struct reg *));
1243 if (!*reg_list)
1244 return ERROR_FAIL;
1245
1246 *reg_list_size = num_regs;
1247 if (xtensa->regmap_contiguous) {
1248 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1249 for (unsigned int i = 0; i < num_regs; i++)
1250 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1251 return ERROR_OK;
1252 }
1253
1254 for (unsigned int i = 0; i < num_regs; i++)
1255 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1256 unsigned int k = 0;
1257 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1258 if (xtensa->core_cache->reg_list[i].exist) {
1259 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1260 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1261 int sparse_idx = rlist[ridx].dbreg_num;
1262 if (i == XT_REG_IDX_PS) {
1263 if (xtensa->eps_dbglevel_idx == 0) {
1264 LOG_ERROR("eps_dbglevel_idx not set\n");
1265 return ERROR_FAIL;
1266 }
1267 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1268 if (xtensa_extra_debug_log)
1269 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1270 sparse_idx, xtensa->core_config->debug.irq_level,
1271 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1272 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1273 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1274 } else {
1275 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1276 }
1277 if (i == XT_REG_IDX_PC)
1278 /* Make a duplicate copy of PC for external access */
1279 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1280 k++;
1281 }
1282 }
1283
1284 if (k == num_regs)
1285 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1286
1287 return ERROR_OK;
1288 }
1289
1290 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1291 {
1292 struct xtensa *xtensa = target_to_xtensa(target);
1293 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1294 xtensa->core_config->mmu.dtlb_entries_count > 0;
1295 return ERROR_OK;
1296 }
1297
1298 int xtensa_halt(struct target *target)
1299 {
1300 struct xtensa *xtensa = target_to_xtensa(target);
1301
1302 LOG_TARGET_DEBUG(target, "start");
1303 if (target->state == TARGET_HALTED) {
1304 LOG_TARGET_DEBUG(target, "target was already halted");
1305 return ERROR_OK;
1306 }
1307 /* First we have to read dsr and check if the target stopped */
1308 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1309 if (res != ERROR_OK) {
1310 LOG_TARGET_ERROR(target, "Failed to read core status!");
1311 return res;
1312 }
1313 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1314 if (!xtensa_is_stopped(target)) {
1315 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1316 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1317 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1318 if (res != ERROR_OK)
1319 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1320 }
1321
1322 return res;
1323 }
1324
1325 int xtensa_prepare_resume(struct target *target,
1326 int current,
1327 target_addr_t address,
1328 int handle_breakpoints,
1329 int debug_execution)
1330 {
1331 struct xtensa *xtensa = target_to_xtensa(target);
1332 uint32_t bpena = 0;
1333
1334 LOG_TARGET_DEBUG(target,
1335 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1336 current,
1337 address,
1338 handle_breakpoints,
1339 debug_execution);
1340
1341 if (target->state != TARGET_HALTED) {
1342 LOG_TARGET_WARNING(target, "target not halted");
1343 return ERROR_TARGET_NOT_HALTED;
1344 }
1345
1346 if (address && !current) {
1347 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1348 } else {
1349 uint32_t cause = xtensa_cause_get(target);
1350 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1351 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1352 if (cause & DEBUGCAUSE_DB)
1353 /* We stopped due to a watchpoint. We can't just resume executing the
1354 * instruction again because */
1355 /* that would trigger the watchpoint again. To fix this, we single-step,
1356 * which ignores watchpoints. */
1357 xtensa_do_step(target, current, address, handle_breakpoints);
1358 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1359 /* We stopped due to a break instruction. We can't just resume executing the
1360 * instruction again because */
1361 /* that would trigger the break again. To fix this, we single-step, which
1362 * ignores break. */
1363 xtensa_do_step(target, current, address, handle_breakpoints);
1364 }
1365
1366 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1367 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1368 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1369 if (xtensa->hw_brps[slot]) {
1370 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1371 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1372 bpena |= BIT(slot);
1373 }
1374 }
1375 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1376
1377 /* Here we write all registers to the targets */
1378 int res = xtensa_write_dirty_registers(target);
1379 if (res != ERROR_OK)
1380 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1381 return res;
1382 }
1383
1384 int xtensa_do_resume(struct target *target)
1385 {
1386 struct xtensa *xtensa = target_to_xtensa(target);
1387
1388 LOG_TARGET_DEBUG(target, "start");
1389
1390 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1391 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1392 if (res != ERROR_OK) {
1393 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1394 return res;
1395 }
1396 xtensa_core_status_check(target);
1397 return ERROR_OK;
1398 }
1399
1400 int xtensa_resume(struct target *target,
1401 int current,
1402 target_addr_t address,
1403 int handle_breakpoints,
1404 int debug_execution)
1405 {
1406 LOG_TARGET_DEBUG(target, "start");
1407 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1408 if (res != ERROR_OK) {
1409 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1410 return res;
1411 }
1412 res = xtensa_do_resume(target);
1413 if (res != ERROR_OK) {
1414 LOG_TARGET_ERROR(target, "Failed to resume!");
1415 return res;
1416 }
1417
1418 target->debug_reason = DBG_REASON_NOTHALTED;
1419 if (!debug_execution)
1420 target->state = TARGET_RUNNING;
1421 else
1422 target->state = TARGET_DEBUG_RUNNING;
1423
1424 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1425
1426 return ERROR_OK;
1427 }
1428
1429 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1430 {
1431 struct xtensa *xtensa = target_to_xtensa(target);
1432 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1433 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1434 if (err != ERROR_OK)
1435 return false;
1436
1437 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1438 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1439 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1440 return true;
1441
1442 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1443 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1444 return true;
1445
1446 return false;
1447 }
1448
1449 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1450 {
1451 struct xtensa *xtensa = target_to_xtensa(target);
1452 int res;
1453 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1454 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1455 xtensa_reg_val_t icountlvl, cause;
1456 xtensa_reg_val_t oldps, oldpc, cur_pc;
1457 bool ps_lowered = false;
1458
1459 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1460 current, address, handle_breakpoints);
1461
1462 if (target->state != TARGET_HALTED) {
1463 LOG_TARGET_WARNING(target, "target not halted");
1464 return ERROR_TARGET_NOT_HALTED;
1465 }
1466
1467 if (xtensa->eps_dbglevel_idx == 0) {
1468 LOG_ERROR("eps_dbglevel_idx not set\n");
1469 return ERROR_FAIL;
1470 }
1471
1472 /* Save old ps (EPS[dbglvl] on LX), pc */
1473 oldps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
1474 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1475
1476 cause = xtensa_cause_get(target);
1477 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1478 oldps,
1479 oldpc,
1480 cause,
1481 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1482 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1483 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1484 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1485 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1486 /* pretend that we have stepped */
1487 if (cause & DEBUGCAUSE_BI)
1488 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1489 else
1490 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1491 return ERROR_OK;
1492 }
1493
1494 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1495 * at which the instructions are to be counted while stepping.
1496 *
1497 * For example, if we need to step by 2 instructions, and an interrupt occurs
1498 * in between, the processor will trigger the interrupt and halt after the 2nd
1499 * instruction within the interrupt vector and/or handler.
1500 *
1501 * However, sometimes we don't want the interrupt handlers to be executed at all
1502 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1503 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1504 * code from being counted during stepping. Note that C exception handlers must
1505 * run at level 0 and hence will be counted and stepped into, should one occur.
1506 *
1507 * TODO: Certain instructions should never be single-stepped and should instead
1508 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1509 * RFI >= DBGLEVEL.
1510 */
1511 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1512 if (!xtensa->core_config->high_irq.enabled) {
1513 LOG_TARGET_WARNING(
1514 target,
1515 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1516 return ERROR_FAIL;
1517 }
1518 /* Update ICOUNTLEVEL accordingly */
1519 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1520 } else {
1521 icountlvl = xtensa->core_config->debug.irq_level;
1522 }
1523
1524 if (cause & DEBUGCAUSE_DB) {
1525 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1526 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1527 * re-enable the watchpoint. */
1528 LOG_TARGET_DEBUG(
1529 target,
1530 "Single-stepping to get past instruction that triggered the watchpoint...");
1531 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1532 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1533 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1534 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1535 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1536 }
1537 }
1538
1539 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1540 /* handle normal SW breakpoint */
1541 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1542 if ((oldps & 0xf) >= icountlvl) {
1543 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1544 ps_lowered = true;
1545 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1546 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1547 LOG_TARGET_DEBUG(target,
1548 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1549 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1550 newps,
1551 oldps);
1552 }
1553 do {
1554 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1555 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1556
1557 /* Now ICOUNT is set, we can resume as if we were going to run */
1558 res = xtensa_prepare_resume(target, current, address, 0, 0);
1559 if (res != ERROR_OK) {
1560 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1561 return res;
1562 }
1563 res = xtensa_do_resume(target);
1564 if (res != ERROR_OK) {
1565 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1566 return res;
1567 }
1568
1569 /* Wait for stepping to complete */
1570 long long start = timeval_ms();
1571 while (timeval_ms() < start + 500) {
1572 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1573 *until stepping is complete. */
1574 usleep(1000);
1575 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1576 if (res != ERROR_OK) {
1577 LOG_TARGET_ERROR(target, "Failed to read core status!");
1578 return res;
1579 }
1580 if (xtensa_is_stopped(target))
1581 break;
1582 usleep(1000);
1583 }
1584 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1585 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1586 if (!xtensa_is_stopped(target)) {
1587 LOG_TARGET_WARNING(
1588 target,
1589 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1590 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1591 target->debug_reason = DBG_REASON_NOTHALTED;
1592 target->state = TARGET_RUNNING;
1593 return ERROR_FAIL;
1594 }
1595
1596 xtensa_fetch_all_regs(target);
1597 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1598
1599 LOG_TARGET_DEBUG(target,
1600 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1601 xtensa_reg_get(target, XT_REG_IDX_PS),
1602 cur_pc,
1603 xtensa_cause_get(target),
1604 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1605
1606 /* Do not step into WindowOverflow if ISRs are masked.
1607 If we stop in WindowOverflow at breakpoint with masked ISRs and
1608 try to do a step it will get us out of that handler */
1609 if (xtensa->core_config->windowed &&
1610 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1611 xtensa_pc_in_winexc(target, cur_pc)) {
1612 /* isrmask = on, need to step out of the window exception handler */
1613 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1614 oldpc = cur_pc;
1615 address = oldpc + 3;
1616 continue;
1617 }
1618
1619 if (oldpc == cur_pc)
1620 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1621 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1622 else
1623 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1624 break;
1625 } while (true);
1626
1627 target->debug_reason = DBG_REASON_SINGLESTEP;
1628 target->state = TARGET_HALTED;
1629 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1630 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1631
1632 if (cause & DEBUGCAUSE_DB) {
1633 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1634 /* Restore the DBREAKCx registers */
1635 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1636 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1637 }
1638
1639 /* Restore int level */
1640 if (ps_lowered) {
1641 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1642 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1643 oldps);
1644 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1645 }
1646
1647 /* write ICOUNTLEVEL back to zero */
1648 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1649 /* TODO: can we skip writing dirty registers and re-fetching them? */
1650 res = xtensa_write_dirty_registers(target);
1651 xtensa_fetch_all_regs(target);
1652 return res;
1653 }
1654
1655 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1656 {
1657 return xtensa_do_step(target, current, address, handle_breakpoints);
1658 }
1659
1660 /**
1661 * Returns true if two ranges are overlapping
1662 */
1663 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1664 target_addr_t r1_end,
1665 target_addr_t r2_start,
1666 target_addr_t r2_end)
1667 {
1668 if ((r2_start >= r1_start) && (r2_start < r1_end))
1669 return true; /* r2_start is in r1 region */
1670 if ((r2_end > r1_start) && (r2_end <= r1_end))
1671 return true; /* r2_end is in r1 region */
1672 return false;
1673 }
1674
1675 /**
1676 * Returns a size of overlapped region of two ranges.
1677 */
1678 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1679 target_addr_t r1_end,
1680 target_addr_t r2_start,
1681 target_addr_t r2_end)
1682 {
1683 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1684 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1685 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1686 return ov_end - ov_start;
1687 }
1688 return 0;
1689 }
1690
1691 /**
1692 * Check if the address gets to memory regions, and its access mode
1693 */
1694 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1695 {
1696 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1697 target_addr_t adr_end = address + size; /* region end */
1698 target_addr_t overlap_size;
1699 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1700
1701 while (adr_pos < adr_end) {
1702 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1703 if (!cm) /* address is not belong to anything */
1704 return false;
1705 if ((cm->access & access) != access) /* access check */
1706 return false;
1707 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1708 assert(overlap_size != 0);
1709 adr_pos += overlap_size;
1710 }
1711 return true;
1712 }
1713
1714 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1715 {
1716 struct xtensa *xtensa = target_to_xtensa(target);
1717 /* We are going to read memory in 32-bit increments. This may not be what the calling
1718 * function expects, so we may need to allocate a temp buffer and read into that first. */
1719 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1720 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1721 target_addr_t adr = addrstart_al;
1722 uint8_t *albuff;
1723 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1724
1725 if (target->state != TARGET_HALTED) {
1726 LOG_TARGET_WARNING(target, "target not halted");
1727 return ERROR_TARGET_NOT_HALTED;
1728 }
1729
1730 if (!xtensa->permissive_mode) {
1731 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1732 XT_MEM_ACCESS_READ)) {
1733 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1734 return ERROR_FAIL;
1735 }
1736 }
1737
1738 if (addrstart_al == address && addrend_al == address + (size * count)) {
1739 albuff = buffer;
1740 } else {
1741 albuff = malloc(addrend_al - addrstart_al);
1742 if (!albuff) {
1743 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1744 addrend_al - addrstart_al);
1745 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1746 }
1747 }
1748
1749 /* We're going to use A3 here */
1750 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1751 /* Write start address to A3 */
1752 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1753 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1754 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1755 if (xtensa->probe_lsddr32p != 0) {
1756 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1757 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1758 xtensa_queue_dbg_reg_read(xtensa,
1759 (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
1760 &albuff[i]);
1761 } else {
1762 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1763 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1764 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1765 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
1766 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[i]);
1767 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1768 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1769 }
1770 }
1771 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1772 if (res == ERROR_OK) {
1773 bool prev_suppress = xtensa->suppress_dsr_errors;
1774 xtensa->suppress_dsr_errors = true;
1775 res = xtensa_core_status_check(target);
1776 if (xtensa->probe_lsddr32p == -1)
1777 xtensa->probe_lsddr32p = 1;
1778 xtensa->suppress_dsr_errors = prev_suppress;
1779 }
1780 if (res != ERROR_OK) {
1781 if (xtensa->probe_lsddr32p != 0) {
1782 /* Disable fast memory access instructions and retry before reporting an error */
1783 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1784 xtensa->probe_lsddr32p = 0;
1785 res = xtensa_read_memory(target, address, size, count, buffer);
1786 bswap = false;
1787 } else {
1788 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
1789 count * size, address);
1790 }
1791 }
1792
1793 if (bswap)
1794 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
1795 if (albuff != buffer) {
1796 memcpy(buffer, albuff + (address & 3), (size * count));
1797 free(albuff);
1798 }
1799
1800 return res;
1801 }
1802
1803 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1804 {
1805 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1806 return xtensa_read_memory(target, address, 1, count, buffer);
1807 }
1808
1809 int xtensa_write_memory(struct target *target,
1810 target_addr_t address,
1811 uint32_t size,
1812 uint32_t count,
1813 const uint8_t *buffer)
1814 {
1815 /* This memory write function can get thrown nigh everything into it, from
1816 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1817 * accept anything but aligned uint32 writes, though. That is why we convert
1818 * everything into that. */
1819 struct xtensa *xtensa = target_to_xtensa(target);
1820 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1821 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1822 target_addr_t adr = addrstart_al;
1823 int res;
1824 uint8_t *albuff;
1825 bool fill_head_tail = false;
1826
1827 if (target->state != TARGET_HALTED) {
1828 LOG_TARGET_WARNING(target, "target not halted");
1829 return ERROR_TARGET_NOT_HALTED;
1830 }
1831
1832 if (!xtensa->permissive_mode) {
1833 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
1834 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1835 return ERROR_FAIL;
1836 }
1837 }
1838
1839 if (size == 0 || count == 0 || !buffer)
1840 return ERROR_COMMAND_SYNTAX_ERROR;
1841
1842 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1843 if (addrstart_al == address && addrend_al == address + (size * count)) {
1844 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1845 /* Need a buffer for byte-swapping */
1846 albuff = malloc(addrend_al - addrstart_al);
1847 else
1848 /* We discard the const here because albuff can also be non-const */
1849 albuff = (uint8_t *)buffer;
1850 } else {
1851 fill_head_tail = true;
1852 albuff = malloc(addrend_al - addrstart_al);
1853 }
1854 if (!albuff) {
1855 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1856 addrend_al - addrstart_al);
1857 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1858 }
1859
1860 /* We're going to use A3 here */
1861 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1862
1863 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1864 if (fill_head_tail) {
1865 /* See if we need to read the first and/or last word. */
1866 if (address & 3) {
1867 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1868 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1869 if (xtensa->probe_lsddr32p == 1) {
1870 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1871 } else {
1872 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1873 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1874 }
1875 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[0]);
1876 }
1877 if ((address + (size * count)) & 3) {
1878 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
1879 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1880 if (xtensa->probe_lsddr32p == 1) {
1881 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1882 } else {
1883 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1884 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1885 }
1886 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1887 &albuff[addrend_al - addrstart_al - 4]);
1888 }
1889 /* Grab bytes */
1890 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1891 if (res != ERROR_OK) {
1892 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1893 if (albuff != buffer)
1894 free(albuff);
1895 return res;
1896 }
1897 xtensa_core_status_check(target);
1898 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
1899 bool swapped_w0 = false;
1900 if (address & 3) {
1901 buf_bswap32(&albuff[0], &albuff[0], 4);
1902 swapped_w0 = true;
1903 }
1904 if ((address + (size * count)) & 3) {
1905 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
1906 /* Don't double-swap if buffer start/end are within the same word */
1907 } else {
1908 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
1909 &albuff[addrend_al - addrstart_al - 4], 4);
1910 }
1911 }
1912 }
1913 /* Copy data to be written into the aligned buffer (in host-endianness) */
1914 memcpy(&albuff[address & 3], buffer, size * count);
1915 /* Now we can write albuff in aligned uint32s. */
1916 }
1917
1918 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1919 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
1920
1921 /* Write start address to A3 */
1922 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1923 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1924 /* Write the aligned buffer */
1925 if (xtensa->probe_lsddr32p != 0) {
1926 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1927 if (i == 0) {
1928 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
1929 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
1930 } else {
1931 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
1932 }
1933 }
1934 } else {
1935 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1936 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1937 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
1938 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
1939 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1940 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1941 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1942 }
1943 }
1944
1945 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1946 if (res == ERROR_OK) {
1947 bool prev_suppress = xtensa->suppress_dsr_errors;
1948 xtensa->suppress_dsr_errors = true;
1949 res = xtensa_core_status_check(target);
1950 if (xtensa->probe_lsddr32p == -1)
1951 xtensa->probe_lsddr32p = 1;
1952 xtensa->suppress_dsr_errors = prev_suppress;
1953 }
1954 if (res != ERROR_OK) {
1955 if (xtensa->probe_lsddr32p != 0) {
1956 /* Disable fast memory access instructions and retry before reporting an error */
1957 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1958 xtensa->probe_lsddr32p = 0;
1959 res = xtensa_write_memory(target, address, size, count, buffer);
1960 } else {
1961 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
1962 count * size, address);
1963 }
1964 } else {
1965 /* Invalidate ICACHE, writeback DCACHE if present */
1966 uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
1967 uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
1968 if (issue_ihi || issue_dhwb) {
1969 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
1970 uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
1971 uint32_t linesize = MIN(ilinesize, dlinesize);
1972 uint32_t off = 0;
1973 adr = addrstart_al;
1974
1975 while ((adr + off) < addrend_al) {
1976 if (off == 0) {
1977 /* Write start address to A3 */
1978 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr);
1979 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1980 }
1981 if (issue_ihi)
1982 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
1983 if (issue_dhwb)
1984 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
1985 off += linesize;
1986 if (off > 1020) {
1987 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
1988 adr += off;
1989 off = 0;
1990 }
1991 }
1992
1993 /* Execute cache WB/INV instructions */
1994 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1995 xtensa_core_status_check(target);
1996 if (res != ERROR_OK)
1997 LOG_TARGET_ERROR(target,
1998 "Error issuing cache writeback/invaldate instruction(s): %d",
1999 res);
2000 }
2001 }
2002 if (albuff != buffer)
2003 free(albuff);
2004
2005 return res;
2006 }
2007
2008 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2009 {
2010 /* xtensa_write_memory can handle everything. Just pass on to that. */
2011 return xtensa_write_memory(target, address, 1, count, buffer);
2012 }
2013
2014 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2015 {
2016 LOG_WARNING("not implemented yet");
2017 return ERROR_FAIL;
2018 }
2019
2020 int xtensa_poll(struct target *target)
2021 {
2022 struct xtensa *xtensa = target_to_xtensa(target);
2023 if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2024 target->state = TARGET_UNKNOWN;
2025 return ERROR_TARGET_NOT_EXAMINED;
2026 }
2027
2028 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET(xtensa) |
2029 PWRSTAT_COREWASRESET(xtensa));
2030 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2031 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2032 xtensa->dbg_mod.power_status.stat,
2033 PWRSTAT_DEBUGWASRESET(xtensa) | PWRSTAT_COREWASRESET(xtensa),
2034 xtensa->dbg_mod.power_status.stath);
2035 if (res != ERROR_OK)
2036 return res;
2037
2038 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2039 LOG_TARGET_INFO(target, "Debug controller was reset.");
2040 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2041 if (res != ERROR_OK)
2042 return res;
2043 }
2044 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2045 LOG_TARGET_INFO(target, "Core was reset.");
2046 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2047 /* Enable JTAG, set reset if needed */
2048 res = xtensa_wakeup(target);
2049 if (res != ERROR_OK)
2050 return res;
2051
2052 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2053 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2054 if (res != ERROR_OK)
2055 return res;
2056 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2057 LOG_TARGET_DEBUG(target,
2058 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2059 prev_dsr,
2060 xtensa->dbg_mod.core_status.dsr);
2061 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET(xtensa)) {
2062 /* if RESET state is persitent */
2063 target->state = TARGET_RESET;
2064 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2065 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2066 xtensa->dbg_mod.core_status.dsr,
2067 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2068 target->state = TARGET_UNKNOWN;
2069 if (xtensa->come_online_probes_num == 0)
2070 target->examined = false;
2071 else
2072 xtensa->come_online_probes_num--;
2073 } else if (xtensa_is_stopped(target)) {
2074 if (target->state != TARGET_HALTED) {
2075 enum target_state oldstate = target->state;
2076 target->state = TARGET_HALTED;
2077 /* Examine why the target has been halted */
2078 target->debug_reason = DBG_REASON_DBGRQ;
2079 xtensa_fetch_all_regs(target);
2080 /* When setting debug reason DEBUGCAUSE events have the following
2081 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2082 /* Watchpoint and breakpoint events at the same time results in special
2083 * debug reason: DBG_REASON_WPTANDBKPT. */
2084 uint32_t halt_cause = xtensa_cause_get(target);
2085 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2086 if (halt_cause & DEBUGCAUSE_IC)
2087 target->debug_reason = DBG_REASON_SINGLESTEP;
2088 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2089 if (halt_cause & DEBUGCAUSE_DB)
2090 target->debug_reason = DBG_REASON_WPTANDBKPT;
2091 else
2092 target->debug_reason = DBG_REASON_BREAKPOINT;
2093 } else if (halt_cause & DEBUGCAUSE_DB) {
2094 target->debug_reason = DBG_REASON_WATCHPOINT;
2095 }
2096 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2097 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2098 xtensa_reg_get(target, XT_REG_IDX_PC),
2099 target->debug_reason,
2100 oldstate);
2101 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2102 halt_cause,
2103 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2104 xtensa->dbg_mod.core_status.dsr);
2105 xtensa_dm_core_status_clear(
2106 &xtensa->dbg_mod,
2107 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2108 OCDDSR_DEBUGINTTRAX |
2109 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2110 }
2111 } else {
2112 target->debug_reason = DBG_REASON_NOTHALTED;
2113 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2114 target->state = TARGET_RUNNING;
2115 target->debug_reason = DBG_REASON_NOTHALTED;
2116 }
2117 }
2118 if (xtensa->trace_active) {
2119 /* Detect if tracing was active but has stopped. */
2120 struct xtensa_trace_status trace_status;
2121 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2122 if (res == ERROR_OK) {
2123 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2124 LOG_INFO("Detected end of trace.");
2125 if (trace_status.stat & TRAXSTAT_PCMTG)
2126 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2127 if (trace_status.stat & TRAXSTAT_PTITG)
2128 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2129 if (trace_status.stat & TRAXSTAT_CTITG)
2130 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2131 xtensa->trace_active = false;
2132 }
2133 }
2134 }
2135 return ERROR_OK;
2136 }
2137
2138 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2139 {
2140 struct xtensa *xtensa = target_to_xtensa(target);
2141 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2142 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2143 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2144 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2145 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2146 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2147 int ret;
2148
2149 if (size > icache_line_size)
2150 return ERROR_FAIL;
2151
2152 if (issue_ihi || issue_dhwbi) {
2153 /* We're going to use A3 here */
2154 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2155
2156 /* Write start address to A3 and invalidate */
2157 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2158 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2159 LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2160 if (issue_dhwbi) {
2161 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2162 if (!same_dc_line) {
2163 LOG_TARGET_DEBUG(target,
2164 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2165 address + 4);
2166 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2167 }
2168 }
2169 if (issue_ihi) {
2170 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2171 if (!same_ic_line) {
2172 LOG_TARGET_DEBUG(target,
2173 "IHI second icache line for address "TARGET_ADDR_FMT,
2174 address + 4);
2175 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2176 }
2177 }
2178
2179 /* Execute invalidate instructions */
2180 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2181 xtensa_core_status_check(target);
2182 if (ret != ERROR_OK) {
2183 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2184 return ret;
2185 }
2186 }
2187
2188 /* Write new instructions to memory */
2189 ret = target_write_buffer(target, address, size, buffer);
2190 if (ret != ERROR_OK) {
2191 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2192 return ret;
2193 }
2194
2195 if (issue_dhwbi) {
2196 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2197 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2198 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2199 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2200 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2201 if (!same_dc_line) {
2202 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2203 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2204 }
2205
2206 /* Execute invalidate instructions */
2207 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2208 xtensa_core_status_check(target);
2209 }
2210
2211 /* TODO: Handle L2 cache if present */
2212 return ret;
2213 }
2214
2215 static int xtensa_sw_breakpoint_add(struct target *target,
2216 struct breakpoint *breakpoint,
2217 struct xtensa_sw_breakpoint *sw_bp)
2218 {
2219 struct xtensa *xtensa = target_to_xtensa(target);
2220 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2221 if (ret != ERROR_OK) {
2222 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2223 return ret;
2224 }
2225
2226 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2227 sw_bp->oocd_bp = breakpoint;
2228
2229 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2230
2231 /* Underlying memory write will convert instruction endianness, don't do that here */
2232 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2233 if (ret != ERROR_OK) {
2234 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2235 return ret;
2236 }
2237
2238 return ERROR_OK;
2239 }
2240
2241 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2242 {
2243 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2244 if (ret != ERROR_OK) {
2245 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2246 return ret;
2247 }
2248 sw_bp->oocd_bp = NULL;
2249 return ERROR_OK;
2250 }
2251
2252 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2253 {
2254 struct xtensa *xtensa = target_to_xtensa(target);
2255 unsigned int slot;
2256
2257 if (breakpoint->type == BKPT_SOFT) {
2258 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2259 if (!xtensa->sw_brps[slot].oocd_bp ||
2260 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2261 break;
2262 }
2263 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2264 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2265 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2266 }
2267 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2268 if (ret != ERROR_OK) {
2269 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2270 return ret;
2271 }
2272 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2273 slot,
2274 breakpoint->address);
2275 return ERROR_OK;
2276 }
2277
2278 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2279 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2280 break;
2281 }
2282 if (slot == xtensa->core_config->debug.ibreaks_num) {
2283 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2284 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2285 }
2286
2287 xtensa->hw_brps[slot] = breakpoint;
2288 /* We will actually write the breakpoints when we resume the target. */
2289 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2290 slot,
2291 breakpoint->address);
2292
2293 return ERROR_OK;
2294 }
2295
2296 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2297 {
2298 struct xtensa *xtensa = target_to_xtensa(target);
2299 unsigned int slot;
2300
2301 if (breakpoint->type == BKPT_SOFT) {
2302 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2303 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2304 break;
2305 }
2306 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2307 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2308 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2309 }
2310 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2311 if (ret != ERROR_OK) {
2312 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2313 return ret;
2314 }
2315 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2316 return ERROR_OK;
2317 }
2318
2319 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2320 if (xtensa->hw_brps[slot] == breakpoint)
2321 break;
2322 }
2323 if (slot == xtensa->core_config->debug.ibreaks_num) {
2324 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2325 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2326 }
2327 xtensa->hw_brps[slot] = NULL;
2328 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2329 return ERROR_OK;
2330 }
2331
2332 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2333 {
2334 struct xtensa *xtensa = target_to_xtensa(target);
2335 unsigned int slot;
2336 xtensa_reg_val_t dbreakcval;
2337
2338 if (target->state != TARGET_HALTED) {
2339 LOG_TARGET_WARNING(target, "target not halted");
2340 return ERROR_TARGET_NOT_HALTED;
2341 }
2342
2343 if (watchpoint->mask != ~(uint32_t)0) {
2344 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2345 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2346 }
2347
2348 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2349 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2350 break;
2351 }
2352 if (slot == xtensa->core_config->debug.dbreaks_num) {
2353 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2354 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2355 }
2356
2357 /* Figure out value for dbreakc5..0
2358 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2359 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2360 !IS_PWR_OF_2(watchpoint->length) ||
2361 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2362 LOG_TARGET_WARNING(
2363 target,
2364 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2365 " not supported by hardware.",
2366 watchpoint->length,
2367 watchpoint->address);
2368 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2369 }
2370 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2371
2372 if (watchpoint->rw == WPT_READ)
2373 dbreakcval |= BIT(30);
2374 if (watchpoint->rw == WPT_WRITE)
2375 dbreakcval |= BIT(31);
2376 if (watchpoint->rw == WPT_ACCESS)
2377 dbreakcval |= BIT(30) | BIT(31);
2378
2379 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2380 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2381 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2382 xtensa->hw_wps[slot] = watchpoint;
2383 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2384 watchpoint->address);
2385 return ERROR_OK;
2386 }
2387
2388 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2389 {
2390 struct xtensa *xtensa = target_to_xtensa(target);
2391 unsigned int slot;
2392
2393 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2394 if (xtensa->hw_wps[slot] == watchpoint)
2395 break;
2396 }
2397 if (slot == xtensa->core_config->debug.dbreaks_num) {
2398 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2399 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2400 }
2401 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2402 xtensa->hw_wps[slot] = NULL;
2403 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2404 watchpoint->address);
2405 return ERROR_OK;
2406 }
2407
2408 static int xtensa_build_reg_cache(struct target *target)
2409 {
2410 struct xtensa *xtensa = target_to_xtensa(target);
2411 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2412 unsigned int last_dbreg_num = 0;
2413
2414 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2415 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2416 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2417
2418 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2419
2420 if (!reg_cache) {
2421 LOG_ERROR("Failed to alloc reg cache!");
2422 return ERROR_FAIL;
2423 }
2424 reg_cache->name = "Xtensa registers";
2425 reg_cache->next = NULL;
2426 /* Init reglist */
2427 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2428 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2429 if (!reg_list) {
2430 LOG_ERROR("Failed to alloc reg list!");
2431 goto fail;
2432 }
2433 xtensa->dbregs_num = 0;
2434 unsigned int didx = 0;
2435 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2436 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2437 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2438 for (unsigned int i = 0; i < listsize; i++, didx++) {
2439 reg_list[didx].exist = rlist[i].exist;
2440 reg_list[didx].name = rlist[i].name;
2441 reg_list[didx].size = 32;
2442 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2443 if (!reg_list[didx].value) {
2444 LOG_ERROR("Failed to alloc reg list value!");
2445 goto fail;
2446 }
2447 reg_list[didx].dirty = false;
2448 reg_list[didx].valid = false;
2449 reg_list[didx].type = &xtensa_reg_type;
2450 reg_list[didx].arch_info = xtensa;
2451 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2452 last_dbreg_num = rlist[i].dbreg_num;
2453
2454 if (xtensa_extra_debug_log) {
2455 LOG_TARGET_DEBUG(target,
2456 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2457 reg_list[didx].name,
2458 whichlist,
2459 reg_list[didx].exist,
2460 didx,
2461 rlist[i].type,
2462 rlist[i].dbreg_num);
2463 }
2464 }
2465 }
2466
2467 xtensa->dbregs_num = last_dbreg_num + 1;
2468 reg_cache->reg_list = reg_list;
2469 reg_cache->num_regs = reg_list_size;
2470
2471 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2472 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2473
2474 /* Construct empty-register list for handling unknown register requests */
2475 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2476 if (!xtensa->empty_regs) {
2477 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2478 goto fail;
2479 }
2480 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2481 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2482 if (!xtensa->empty_regs[i].name) {
2483 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2484 goto fail;
2485 }
2486 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
2487 xtensa->empty_regs[i].size = 32;
2488 xtensa->empty_regs[i].type = &xtensa_reg_type;
2489 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2490 if (!xtensa->empty_regs[i].value) {
2491 LOG_ERROR("Failed to alloc empty reg list value!");
2492 goto fail;
2493 }
2494 xtensa->empty_regs[i].arch_info = xtensa;
2495 }
2496
2497 /* Construct contiguous register list from contiguous descriptor list */
2498 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
2499 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2500 if (!xtensa->contiguous_regs_list) {
2501 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2502 goto fail;
2503 }
2504 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2505 unsigned int j;
2506 for (j = 0; j < reg_cache->num_regs; j++) {
2507 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2508 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
2509 LOG_TARGET_DEBUG(target,
2510 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2511 xtensa->contiguous_regs_list[i]->name,
2512 xtensa->contiguous_regs_desc[i]->dbreg_num);
2513 break;
2514 }
2515 }
2516 if (j == reg_cache->num_regs)
2517 LOG_TARGET_WARNING(target, "contiguous register %s not found",
2518 xtensa->contiguous_regs_desc[i]->name);
2519 }
2520 }
2521
2522 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2523 if (!xtensa->algo_context_backup) {
2524 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2525 goto fail;
2526 }
2527 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2528 struct reg *reg = &reg_cache->reg_list[i];
2529 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2530 if (!xtensa->algo_context_backup[i]) {
2531 LOG_ERROR("Failed to alloc mem for algorithm context!");
2532 goto fail;
2533 }
2534 }
2535 xtensa->core_cache = reg_cache;
2536 if (cache_p)
2537 *cache_p = reg_cache;
2538 return ERROR_OK;
2539
2540 fail:
2541 if (reg_list) {
2542 for (unsigned int i = 0; i < reg_list_size; i++)
2543 free(reg_list[i].value);
2544 free(reg_list);
2545 }
2546 if (xtensa->empty_regs) {
2547 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2548 free((void *)xtensa->empty_regs[i].name);
2549 free(xtensa->empty_regs[i].value);
2550 }
2551 free(xtensa->empty_regs);
2552 }
2553 if (xtensa->algo_context_backup) {
2554 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2555 free(xtensa->algo_context_backup[i]);
2556 free(xtensa->algo_context_backup);
2557 }
2558 free(reg_cache);
2559
2560 return ERROR_FAIL;
2561 }
2562
2563 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
2564 {
2565 struct xtensa *xtensa = target_to_xtensa(target);
2566 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
2567 /* Process op[] list */
2568 while (opstr && (*opstr == ':')) {
2569 uint8_t ops[32];
2570 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
2571 if (oplen > 32) {
2572 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
2573 break;
2574 }
2575 unsigned int i = 0;
2576 while ((i < oplen) && opstr && (*opstr == ':'))
2577 ops[i++] = strtoul(opstr + 1, &opstr, 16);
2578 if (i != oplen) {
2579 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
2580 break;
2581 }
2582
2583 char insn_buf[128];
2584 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
2585 for (i = 0; i < oplen; i++)
2586 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
2587 LOG_TARGET_DEBUG(target, "%s", insn_buf);
2588 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
2589 status = ERROR_OK;
2590 }
2591 return status;
2592 }
2593
2594 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
2595 {
2596 struct xtensa *xtensa = target_to_xtensa(target);
2597 bool iswrite = (packet[0] == 'Q');
2598 enum xtensa_qerr_e error;
2599
2600 /* Read/write TIE register. Requires spill location.
2601 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2602 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2603 */
2604 if (!(xtensa->spill_buf)) {
2605 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2606 error = XT_QERR_FAIL;
2607 goto xtensa_gdbqc_qxtreg_fail;
2608 }
2609
2610 char *delim;
2611 uint32_t regnum = strtoul(packet + 6, &delim, 16);
2612 if (*delim != ':') {
2613 LOG_ERROR("Malformed qxtreg packet");
2614 error = XT_QERR_INVAL;
2615 goto xtensa_gdbqc_qxtreg_fail;
2616 }
2617 uint32_t reglen = strtoul(delim + 1, &delim, 16);
2618 if (*delim != ':') {
2619 LOG_ERROR("Malformed qxtreg packet");
2620 error = XT_QERR_INVAL;
2621 goto xtensa_gdbqc_qxtreg_fail;
2622 }
2623 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
2624 memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
2625 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
2626 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
2627 LOG_ERROR("TIE register too large");
2628 error = XT_QERR_MEM;
2629 goto xtensa_gdbqc_qxtreg_fail;
2630 }
2631
2632 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2633 * (2) read old a4, (3) write spill address to a4.
2634 * NOTE: ensure a4 is restored properly by all error handling logic
2635 */
2636 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
2637 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
2638 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2639 if (status != ERROR_OK) {
2640 LOG_ERROR("Spill memory save");
2641 error = XT_QERR_MEM;
2642 goto xtensa_gdbqc_qxtreg_fail;
2643 }
2644 if (iswrite) {
2645 /* Extract value and store in spill memory */
2646 unsigned int b = 0;
2647 char *valbuf = strchr(delim, '=');
2648 if (!(valbuf && (*valbuf == '='))) {
2649 LOG_ERROR("Malformed Qxtreg packet");
2650 error = XT_QERR_INVAL;
2651 goto xtensa_gdbqc_qxtreg_fail;
2652 }
2653 valbuf++;
2654 while (*valbuf && *(valbuf + 1)) {
2655 char bytestr[3] = { 0, 0, 0 };
2656 strncpy(bytestr, valbuf, 2);
2657 regbuf[b++] = strtoul(bytestr, NULL, 16);
2658 valbuf += 2;
2659 }
2660 if (b != reglen) {
2661 LOG_ERROR("Malformed Qxtreg packet");
2662 error = XT_QERR_INVAL;
2663 goto xtensa_gdbqc_qxtreg_fail;
2664 }
2665 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2666 reglen / memop_size, regbuf);
2667 if (status != ERROR_OK) {
2668 LOG_ERROR("TIE value store");
2669 error = XT_QERR_MEM;
2670 goto xtensa_gdbqc_qxtreg_fail;
2671 }
2672 }
2673 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
2674 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, xtensa->spill_loc);
2675 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2676
2677 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
2678
2679 /* Restore a4 but not yet spill memory. Execute it all... */
2680 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, orig_a4);
2681 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2682 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2683 if (status != ERROR_OK) {
2684 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
2685 tieop_status = status;
2686 }
2687 status = xtensa_core_status_check(target);
2688 if (status != ERROR_OK) {
2689 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
2690 tieop_status = status;
2691 }
2692
2693 if (tieop_status == ERROR_OK) {
2694 if (iswrite) {
2695 /* TIE write succeeded; send OK */
2696 strcpy(*response_p, "OK");
2697 } else {
2698 /* TIE read succeeded; copy result from spill memory */
2699 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
2700 if (status != ERROR_OK) {
2701 LOG_TARGET_ERROR(target, "TIE result read");
2702 tieop_status = status;
2703 }
2704 unsigned int i;
2705 for (i = 0; i < reglen; i++)
2706 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
2707 *(*response_p + 2 * i) = '\0';
2708 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
2709 }
2710 }
2711
2712 /* Restore spill memory first, then report any previous errors */
2713 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2714 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2715 if (status != ERROR_OK) {
2716 LOG_ERROR("Spill memory restore");
2717 error = XT_QERR_MEM;
2718 goto xtensa_gdbqc_qxtreg_fail;
2719 }
2720 if (tieop_status != ERROR_OK) {
2721 LOG_ERROR("TIE execution");
2722 error = XT_QERR_FAIL;
2723 goto xtensa_gdbqc_qxtreg_fail;
2724 }
2725 return ERROR_OK;
2726
2727 xtensa_gdbqc_qxtreg_fail:
2728 strcpy(*response_p, xt_qerr[error].chrval);
2729 return xt_qerr[error].intval;
2730 }
2731
2732 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
2733 {
2734 struct xtensa *xtensa = target_to_xtensa(target);
2735 enum xtensa_qerr_e error;
2736 if (!packet || !response_p) {
2737 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
2738 return ERROR_FAIL;
2739 }
2740
2741 *response_p = xtensa->qpkt_resp;
2742 if (strncmp(packet, "qxtn", 4) == 0) {
2743 strcpy(*response_p, "OpenOCD");
2744 return ERROR_OK;
2745 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
2746 return ERROR_OK;
2747 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
2748 /* Confirm host cache params match core .cfg file */
2749 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
2750 &xtensa->core_config->icache : &xtensa->core_config->dcache;
2751 unsigned int line_size = 0, size = 0, way_count = 0;
2752 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
2753 if ((cachep->line_size != line_size) ||
2754 (cachep->size != size) ||
2755 (cachep->way_count != way_count)) {
2756 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2757 cachep == &xtensa->core_config->icache ? 'I' : 'D');
2758 }
2759 strcpy(*response_p, "OK");
2760 return ERROR_OK;
2761 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
2762 /* Confirm host IRAM/IROM params match core .cfg file */
2763 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
2764 &xtensa->core_config->iram : &xtensa->core_config->irom;
2765 unsigned int base = 0, size = 0, i;
2766 char *pkt = (char *)&packet[7];
2767 do {
2768 pkt++;
2769 size = strtoul(pkt, &pkt, 16);
2770 pkt++;
2771 base = strtoul(pkt, &pkt, 16);
2772 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
2773 for (i = 0; i < memp->count; i++) {
2774 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
2775 break;
2776 }
2777 if (i == memp->count) {
2778 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
2779 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
2780 break;
2781 }
2782 for (i = 0; i < 11; i++) {
2783 pkt++;
2784 strtoul(pkt, &pkt, 16);
2785 }
2786 } while (pkt && (pkt[0] == ','));
2787 strcpy(*response_p, "OK");
2788 return ERROR_OK;
2789 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
2790 /* Confirm host EXCM_LEVEL matches core .cfg file */
2791 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
2792 if (!xtensa->core_config->high_irq.enabled ||
2793 (excm_level != xtensa->core_config->high_irq.excm_level))
2794 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
2795 strcpy(*response_p, "OK");
2796 return ERROR_OK;
2797 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
2798 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
2799 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
2800 strcpy(*response_p, "OK");
2801 return ERROR_OK;
2802 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
2803 char *delim;
2804 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
2805 if (*delim != ':') {
2806 LOG_ERROR("Malformed Qxtspill packet");
2807 error = XT_QERR_INVAL;
2808 goto xtensa_gdb_query_custom_fail;
2809 }
2810 xtensa->spill_loc = spill_loc;
2811 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
2812 if (xtensa->spill_buf)
2813 free(xtensa->spill_buf);
2814 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
2815 if (!xtensa->spill_buf) {
2816 LOG_ERROR("Spill buf alloc");
2817 error = XT_QERR_MEM;
2818 goto xtensa_gdb_query_custom_fail;
2819 }
2820 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
2821 strcpy(*response_p, "OK");
2822 return ERROR_OK;
2823 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
2824 return xtensa_gdbqc_qxtreg(target, packet, response_p);
2825 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
2826 (strncmp(packet, "qxtftie", 7) == 0) ||
2827 (strncmp(packet, "qxtstie", 7) == 0)) {
2828 /* Return empty string to indicate trace, TIE wire debug are unsupported */
2829 strcpy(*response_p, "");
2830 return ERROR_OK;
2831 }
2832
2833 /* Warn for all other queries, but do not return errors */
2834 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
2835 strcpy(*response_p, "");
2836 return ERROR_OK;
2837
2838 xtensa_gdb_query_custom_fail:
2839 strcpy(*response_p, xt_qerr[error].chrval);
2840 return xt_qerr[error].intval;
2841 }
2842
2843 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
2844 const struct xtensa_debug_module_config *dm_cfg)
2845 {
2846 target->arch_info = xtensa;
2847 xtensa->common_magic = XTENSA_COMMON_MAGIC;
2848 xtensa->target = target;
2849 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
2850
2851 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
2852 if (!xtensa->core_config) {
2853 LOG_ERROR("Xtensa configuration alloc failed\n");
2854 return ERROR_FAIL;
2855 }
2856
2857 /* Default cache settings are disabled with 1 way */
2858 xtensa->core_config->icache.way_count = 1;
2859 xtensa->core_config->dcache.way_count = 1;
2860
2861 /* chrval: AR3/AR4 register names will change with window mapping.
2862 * intval: tracks whether scratch register was set through gdb P packet.
2863 */
2864 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
2865 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
2866 if (!xtensa->scratch_ars[s].chrval) {
2867 for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
2868 free(xtensa->scratch_ars[f].chrval);
2869 free(xtensa->core_config);
2870 LOG_ERROR("Xtensa scratch AR alloc failed\n");
2871 return ERROR_FAIL;
2872 }
2873 xtensa->scratch_ars[s].intval = false;
2874 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
2875 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
2876 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
2877 }
2878
2879 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2880 }
2881
2882 void xtensa_set_permissive_mode(struct target *target, bool state)
2883 {
2884 target_to_xtensa(target)->permissive_mode = state;
2885 }
2886
2887 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2888 {
2889 struct xtensa *xtensa = target_to_xtensa(target);
2890
2891 xtensa->come_online_probes_num = 3;
2892 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
2893 if (!xtensa->hw_brps) {
2894 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2895 return ERROR_FAIL;
2896 }
2897 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
2898 if (!xtensa->hw_wps) {
2899 free(xtensa->hw_brps);
2900 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2901 return ERROR_FAIL;
2902 }
2903 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2904 if (!xtensa->sw_brps) {
2905 free(xtensa->hw_brps);
2906 free(xtensa->hw_wps);
2907 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2908 return ERROR_FAIL;
2909 }
2910
2911 xtensa->spill_loc = 0xffffffff;
2912 xtensa->spill_bytes = 0;
2913 xtensa->spill_buf = NULL;
2914 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
2915
2916 return xtensa_build_reg_cache(target);
2917 }
2918
2919 static void xtensa_free_reg_cache(struct target *target)
2920 {
2921 struct xtensa *xtensa = target_to_xtensa(target);
2922 struct reg_cache *cache = xtensa->core_cache;
2923
2924 if (cache) {
2925 register_unlink_cache(&target->reg_cache, cache);
2926 for (unsigned int i = 0; i < cache->num_regs; i++) {
2927 free(xtensa->algo_context_backup[i]);
2928 free(cache->reg_list[i].value);
2929 }
2930 free(xtensa->algo_context_backup);
2931 free(cache->reg_list);
2932 free(cache);
2933 }
2934 xtensa->core_cache = NULL;
2935 xtensa->algo_context_backup = NULL;
2936
2937 if (xtensa->empty_regs) {
2938 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2939 free((void *)xtensa->empty_regs[i].name);
2940 free(xtensa->empty_regs[i].value);
2941 }
2942 free(xtensa->empty_regs);
2943 }
2944 xtensa->empty_regs = NULL;
2945 if (xtensa->optregs) {
2946 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
2947 free((void *)xtensa->optregs[i].name);
2948 free(xtensa->optregs);
2949 }
2950 xtensa->optregs = NULL;
2951 }
2952
2953 void xtensa_target_deinit(struct target *target)
2954 {
2955 struct xtensa *xtensa = target_to_xtensa(target);
2956
2957 LOG_DEBUG("start");
2958
2959 if (target_was_examined(target)) {
2960 int ret = xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, OCDDCR_ENABLEOCD);
2961 if (ret != ERROR_OK) {
2962 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2963 return;
2964 }
2965 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
2966 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2967 if (ret != ERROR_OK) {
2968 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2969 return;
2970 }
2971 xtensa_dm_deinit(&xtensa->dbg_mod);
2972 }
2973 xtensa_free_reg_cache(target);
2974 free(xtensa->hw_brps);
2975 free(xtensa->hw_wps);
2976 free(xtensa->sw_brps);
2977 if (xtensa->spill_buf) {
2978 free(xtensa->spill_buf);
2979 xtensa->spill_buf = NULL;
2980 }
2981 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
2982 free(xtensa->scratch_ars[s].chrval);
2983 free(xtensa->core_config);
2984 }
2985
2986 const char *xtensa_get_gdb_arch(struct target *target)
2987 {
2988 return "xtensa";
2989 }
2990
2991 /* exe <ascii-encoded hexadecimal instruction bytes> */
2992 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
2993 {
2994 struct xtensa *xtensa = target_to_xtensa(target);
2995
2996 if (CMD_ARGC != 1)
2997 return ERROR_COMMAND_SYNTAX_ERROR;
2998
2999 /* Process ascii-encoded hex byte string */
3000 const char *parm = CMD_ARGV[0];
3001 unsigned int parm_len = strlen(parm);
3002 if ((parm_len >= 64) || (parm_len & 1)) {
3003 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3004 return ERROR_FAIL;
3005 }
3006
3007 uint8_t ops[32];
3008 memset(ops, 0, 32);
3009 unsigned int oplen = parm_len / 2;
3010 char encoded_byte[3] = { 0, 0, 0 };
3011 for (unsigned int i = 0; i < oplen; i++) {
3012 encoded_byte[0] = *parm++;
3013 encoded_byte[1] = *parm++;
3014 ops[i] = strtoul(encoded_byte, NULL, 16);
3015 }
3016
3017 /* GDB must handle state save/restore.
3018 * Flush reg cache in case spill location is in an AR
3019 * Update CPENABLE only for this execution; later restore cached copy
3020 * Keep a copy of exccause in case executed code triggers an exception
3021 */
3022 int status = xtensa_write_dirty_registers(target);
3023 if (status != ERROR_OK) {
3024 LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3025 return ERROR_FAIL;
3026 }
3027 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3028 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3029 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3030 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
3031 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3032 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3033 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3034 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
3035 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3036
3037 /* Queue instruction list and execute everything */
3038 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3039 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3040 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3041 if (status != ERROR_OK)
3042 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3043 status = xtensa_core_status_check(target);
3044 if (status != ERROR_OK)
3045 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3046
3047 /* Reread register cache and restore saved regs after instruction execution */
3048 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3049 LOG_TARGET_ERROR(target, "%s: Failed to fetch register cache (post-exec).", target_name(target));
3050 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3051 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3052 return status;
3053 }
3054
3055 COMMAND_HANDLER(xtensa_cmd_exe)
3056 {
3057 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3058 }
3059
3060 /* xtdef <name> */
3061 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3062 {
3063 if (CMD_ARGC != 1)
3064 return ERROR_COMMAND_SYNTAX_ERROR;
3065
3066 const char *core_name = CMD_ARGV[0];
3067 if (strcasecmp(core_name, "LX") == 0) {
3068 xtensa->core_config->core_type = XT_LX;
3069 } else {
3070 LOG_ERROR("xtdef [LX]\n");
3071 return ERROR_COMMAND_SYNTAX_ERROR;
3072 }
3073 return ERROR_OK;
3074 }
3075
3076 COMMAND_HANDLER(xtensa_cmd_xtdef)
3077 {
3078 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3079 target_to_xtensa(get_current_target(CMD_CTX)));
3080 }
3081
3082 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3083 {
3084 if ((val < min) || (val > max)) {
3085 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3086 return false;
3087 }
3088 return true;
3089 }
3090
3091 /* xtopt <name> <value> */
3092 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3093 {
3094 if (CMD_ARGC != 2)
3095 return ERROR_COMMAND_SYNTAX_ERROR;
3096
3097 const char *opt_name = CMD_ARGV[0];
3098 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3099 if (strcasecmp(opt_name, "arnum") == 0) {
3100 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3101 return ERROR_COMMAND_ARGUMENT_INVALID;
3102 xtensa->core_config->aregs_num = opt_val;
3103 } else if (strcasecmp(opt_name, "windowed") == 0) {
3104 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3105 return ERROR_COMMAND_ARGUMENT_INVALID;
3106 xtensa->core_config->windowed = opt_val;
3107 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3108 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3109 return ERROR_COMMAND_ARGUMENT_INVALID;
3110 xtensa->core_config->coproc = opt_val;
3111 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3112 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3113 return ERROR_COMMAND_ARGUMENT_INVALID;
3114 xtensa->core_config->exceptions = opt_val;
3115 } else if (strcasecmp(opt_name, "intnum") == 0) {
3116 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3117 return ERROR_COMMAND_ARGUMENT_INVALID;
3118 xtensa->core_config->irq.enabled = (opt_val > 0);
3119 xtensa->core_config->irq.irq_num = opt_val;
3120 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3121 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3122 return ERROR_COMMAND_ARGUMENT_INVALID;
3123 xtensa->core_config->high_irq.enabled = opt_val;
3124 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3125 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3126 return ERROR_COMMAND_ARGUMENT_INVALID;
3127 if (!xtensa->core_config->high_irq.enabled) {
3128 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3129 return ERROR_COMMAND_ARGUMENT_INVALID;
3130 }
3131 xtensa->core_config->high_irq.excm_level = opt_val;
3132 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3133 if (xtensa->core_config->core_type == XT_LX) {
3134 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3135 return ERROR_COMMAND_ARGUMENT_INVALID;
3136 } else {
3137 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3138 return ERROR_COMMAND_ARGUMENT_INVALID;
3139 }
3140 if (!xtensa->core_config->high_irq.enabled) {
3141 LOG_ERROR("xtopt intlevels requires hipriints\n");
3142 return ERROR_COMMAND_ARGUMENT_INVALID;
3143 }
3144 xtensa->core_config->high_irq.level_num = opt_val;
3145 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3146 if (xtensa->core_config->core_type == XT_LX) {
3147 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3148 return ERROR_COMMAND_ARGUMENT_INVALID;
3149 } else {
3150 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3151 return ERROR_COMMAND_ARGUMENT_INVALID;
3152 }
3153 xtensa->core_config->debug.enabled = 1;
3154 xtensa->core_config->debug.irq_level = opt_val;
3155 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3156 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3157 return ERROR_COMMAND_ARGUMENT_INVALID;
3158 xtensa->core_config->debug.ibreaks_num = opt_val;
3159 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3160 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3161 return ERROR_COMMAND_ARGUMENT_INVALID;
3162 xtensa->core_config->debug.dbreaks_num = opt_val;
3163 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3164 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3165 return ERROR_COMMAND_ARGUMENT_INVALID;
3166 xtensa->core_config->trace.mem_sz = opt_val;
3167 xtensa->core_config->trace.enabled = (opt_val > 0);
3168 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3169 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3170 return ERROR_COMMAND_ARGUMENT_INVALID;
3171 xtensa->core_config->trace.reversed_mem_access = opt_val;
3172 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3173 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3174 return ERROR_COMMAND_ARGUMENT_INVALID;
3175 xtensa->core_config->debug.perfcount_num = opt_val;
3176 } else {
3177 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3178 return ERROR_OK;
3179 }
3180
3181 return ERROR_OK;
3182 }
3183
3184 COMMAND_HANDLER(xtensa_cmd_xtopt)
3185 {
3186 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3187 target_to_xtensa(get_current_target(CMD_CTX)));
3188 }
3189
3190 /* xtmem <type> [parameters] */
3191 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3192 {
3193 struct xtensa_cache_config *cachep = NULL;
3194 struct xtensa_local_mem_config *memp = NULL;
3195 int mem_access = 0;
3196 bool is_dcache = false;
3197
3198 if (CMD_ARGC == 0) {
3199 LOG_ERROR("xtmem <type> [parameters]\n");
3200 return ERROR_COMMAND_SYNTAX_ERROR;
3201 }
3202
3203 const char *mem_name = CMD_ARGV[0];
3204 if (strcasecmp(mem_name, "icache") == 0) {
3205 cachep = &xtensa->core_config->icache;
3206 } else if (strcasecmp(mem_name, "dcache") == 0) {
3207 cachep = &xtensa->core_config->dcache;
3208 is_dcache = true;
3209 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3210 /* TODO: support L2 cache */
3211 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3212 /* TODO: support L2 cache */
3213 } else if (strcasecmp(mem_name, "iram") == 0) {
3214 memp = &xtensa->core_config->iram;
3215 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3216 } else if (strcasecmp(mem_name, "dram") == 0) {
3217 memp = &xtensa->core_config->dram;
3218 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3219 } else if (strcasecmp(mem_name, "sram") == 0) {
3220 memp = &xtensa->core_config->sram;
3221 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3222 } else if (strcasecmp(mem_name, "irom") == 0) {
3223 memp = &xtensa->core_config->irom;
3224 mem_access = XT_MEM_ACCESS_READ;
3225 } else if (strcasecmp(mem_name, "drom") == 0) {
3226 memp = &xtensa->core_config->drom;
3227 mem_access = XT_MEM_ACCESS_READ;
3228 } else if (strcasecmp(mem_name, "srom") == 0) {
3229 memp = &xtensa->core_config->srom;
3230 mem_access = XT_MEM_ACCESS_READ;
3231 } else {
3232 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3233 return ERROR_COMMAND_ARGUMENT_INVALID;
3234 }
3235
3236 if (cachep) {
3237 if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3238 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3239 return ERROR_COMMAND_SYNTAX_ERROR;
3240 }
3241 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3242 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3243 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3244 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3245 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3246 } else if (memp) {
3247 if (CMD_ARGC != 3) {
3248 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3249 return ERROR_COMMAND_SYNTAX_ERROR;
3250 }
3251 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3252 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3253 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3254 memcfgp->access = mem_access;
3255 memp->count++;
3256 }
3257
3258 return ERROR_OK;
3259 }
3260
3261 COMMAND_HANDLER(xtensa_cmd_xtmem)
3262 {
3263 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3264 target_to_xtensa(get_current_target(CMD_CTX)));
3265 }
3266
3267 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3268 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3269 {
3270 if (CMD_ARGC != 4) {
3271 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3272 return ERROR_COMMAND_SYNTAX_ERROR;
3273 }
3274
3275 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3276 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3277 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3278 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3279
3280 if ((nfgseg > 32)) {
3281 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3282 return ERROR_COMMAND_ARGUMENT_INVALID;
3283 } else if (minsegsize & (minsegsize - 1)) {
3284 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3285 return ERROR_COMMAND_ARGUMENT_INVALID;
3286 } else if (lockable > 1) {
3287 LOG_ERROR("<lockable> must be 0 or 1\n");
3288 return ERROR_COMMAND_ARGUMENT_INVALID;
3289 } else if (execonly > 1) {
3290 LOG_ERROR("<execonly> must be 0 or 1\n");
3291 return ERROR_COMMAND_ARGUMENT_INVALID;
3292 }
3293
3294 xtensa->core_config->mpu.enabled = true;
3295 xtensa->core_config->mpu.nfgseg = nfgseg;
3296 xtensa->core_config->mpu.minsegsize = minsegsize;
3297 xtensa->core_config->mpu.lockable = lockable;
3298 xtensa->core_config->mpu.execonly = execonly;
3299 return ERROR_OK;
3300 }
3301
3302 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3303 {
3304 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3305 target_to_xtensa(get_current_target(CMD_CTX)));
3306 }
3307
3308 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3309 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3310 {
3311 if (CMD_ARGC != 2) {
3312 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3313 return ERROR_COMMAND_SYNTAX_ERROR;
3314 }
3315
3316 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3317 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3318 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3319 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3320 return ERROR_COMMAND_ARGUMENT_INVALID;
3321 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3322 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3323 return ERROR_COMMAND_ARGUMENT_INVALID;
3324 }
3325
3326 xtensa->core_config->mmu.enabled = true;
3327 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3328 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3329 return ERROR_OK;
3330 }
3331
3332 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3333 {
3334 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3335 target_to_xtensa(get_current_target(CMD_CTX)));
3336 }
3337
3338 /* xtregs <numregs>
3339 * xtreg <regname> <regnum> */
3340 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3341 {
3342 if (CMD_ARGC == 1) {
3343 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3344 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3345 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3346 return ERROR_COMMAND_SYNTAX_ERROR;
3347 }
3348 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3349 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3350 numregs, xtensa->genpkt_regs_num);
3351 return ERROR_COMMAND_SYNTAX_ERROR;
3352 }
3353 xtensa->total_regs_num = numregs;
3354 xtensa->core_regs_num = 0;
3355 xtensa->num_optregs = 0;
3356 /* A little more memory than required, but saves a second initialization pass */
3357 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3358 if (!xtensa->optregs) {
3359 LOG_ERROR("Failed to allocate xtensa->optregs!");
3360 return ERROR_FAIL;
3361 }
3362 return ERROR_OK;
3363 } else if (CMD_ARGC != 2) {
3364 return ERROR_COMMAND_SYNTAX_ERROR;
3365 }
3366
3367 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3368 * if general register (g-packet) requests or contiguous register maps are supported */
3369 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3370 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3371 if (!xtensa->contiguous_regs_desc) {
3372 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3373 return ERROR_FAIL;
3374 }
3375 }
3376
3377 const char *regname = CMD_ARGV[0];
3378 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3379 if (regnum > UINT16_MAX) {
3380 LOG_ERROR("<regnum> must be a 16-bit number");
3381 return ERROR_COMMAND_ARGUMENT_INVALID;
3382 }
3383
3384 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3385 if (xtensa->total_regs_num)
3386 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3387 regname, regnum,
3388 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3389 else
3390 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3391 regname, regnum);
3392 return ERROR_FAIL;
3393 }
3394
3395 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3396 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3397 bool is_extended_reg = true;
3398 unsigned int ridx;
3399 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3400 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3401 /* Flag core register as defined */
3402 rptr = &xtensa_regs[ridx];
3403 xtensa->core_regs_num++;
3404 is_extended_reg = false;
3405 break;
3406 }
3407 }
3408
3409 rptr->exist = true;
3410 if (is_extended_reg) {
3411 /* Register ID, debugger-visible register ID */
3412 rptr->name = strdup(CMD_ARGV[0]);
3413 rptr->dbreg_num = regnum;
3414 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3415 xtensa->num_optregs++;
3416
3417 /* Register type */
3418 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3419 rptr->type = XT_REG_GENERAL;
3420 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3421 rptr->type = XT_REG_USER;
3422 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3423 rptr->type = XT_REG_FR;
3424 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3425 rptr->type = XT_REG_SPECIAL;
3426 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3427 /* WARNING: For these registers, regnum points to the
3428 * index of the corresponding ARx registers, NOT to
3429 * the processor register number! */
3430 rptr->type = XT_REG_RELGEN;
3431 rptr->reg_num += XT_REG_IDX_ARFIRST;
3432 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3433 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3434 rptr->type = XT_REG_TIE;
3435 } else {
3436 rptr->type = XT_REG_OTHER;
3437 }
3438
3439 /* Register flags */
3440 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3441 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3442 (strcmp(rptr->name, "intclear") == 0))
3443 rptr->flags = XT_REGF_NOREAD;
3444 else
3445 rptr->flags = 0;
3446
3447 if ((rptr->reg_num == (XT_PS_REG_NUM_BASE + xtensa->core_config->debug.irq_level)) &&
3448 (xtensa->core_config->core_type == XT_LX) && (rptr->type == XT_REG_SPECIAL)) {
3449 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3450 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3451 }
3452 } else if (strcmp(rptr->name, "cpenable") == 0) {
3453 xtensa->core_config->coproc = true;
3454 }
3455
3456 /* Build out list of contiguous registers in specified order */
3457 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3458 if (xtensa->contiguous_regs_desc) {
3459 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3460 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3461 }
3462 if (xtensa_extra_debug_log)
3463 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3464 is_extended_reg ? "config-specific" : "core",
3465 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3466 is_extended_reg ? xtensa->num_optregs : ridx,
3467 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3468 return ERROR_OK;
3469 }
3470
3471 COMMAND_HANDLER(xtensa_cmd_xtreg)
3472 {
3473 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3474 target_to_xtensa(get_current_target(CMD_CTX)));
3475 }
3476
3477 /* xtregfmt <contiguous|sparse> [numgregs] */
3478 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3479 {
3480 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3481 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3482 return ERROR_OK;
3483 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3484 xtensa->regmap_contiguous = true;
3485 if (CMD_ARGC == 2) {
3486 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3487 if ((numgregs <= 0) ||
3488 ((numgregs > xtensa->total_regs_num) &&
3489 (xtensa->total_regs_num > 0))) {
3490 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3491 numgregs, xtensa->total_regs_num);
3492 return ERROR_COMMAND_SYNTAX_ERROR;
3493 }
3494 xtensa->genpkt_regs_num = numgregs;
3495 }
3496 return ERROR_OK;
3497 }
3498 }
3499 return ERROR_COMMAND_SYNTAX_ERROR;
3500 }
3501
3502 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3503 {
3504 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3505 target_to_xtensa(get_current_target(CMD_CTX)));
3506 }
3507
3508 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3509 {
3510 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3511 &xtensa->permissive_mode, "xtensa permissive mode");
3512 }
3513
3514 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3515 {
3516 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3517 target_to_xtensa(get_current_target(CMD_CTX)));
3518 }
3519
3520 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3521 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
3522 {
3523 struct xtensa_perfmon_config config = {
3524 .mask = 0xffff,
3525 .kernelcnt = 0,
3526 .tracelevel = -1 /* use DEBUGLEVEL by default */
3527 };
3528
3529 if (CMD_ARGC < 2 || CMD_ARGC > 6)
3530 return ERROR_COMMAND_SYNTAX_ERROR;
3531
3532 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
3533 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
3534 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3535 return ERROR_COMMAND_ARGUMENT_INVALID;
3536 }
3537
3538 config.select = strtoul(CMD_ARGV[1], NULL, 0);
3539 if (config.select > XTENSA_MAX_PERF_SELECT) {
3540 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
3541 return ERROR_COMMAND_ARGUMENT_INVALID;
3542 }
3543
3544 if (CMD_ARGC >= 3) {
3545 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
3546 if (config.mask > XTENSA_MAX_PERF_MASK) {
3547 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
3548 return ERROR_COMMAND_ARGUMENT_INVALID;
3549 }
3550 }
3551
3552 if (CMD_ARGC >= 4) {
3553 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
3554 if (config.kernelcnt > 1) {
3555 command_print(CMD, "kernelcnt should be 0 or 1");
3556 return ERROR_COMMAND_ARGUMENT_INVALID;
3557 }
3558 }
3559
3560 if (CMD_ARGC >= 5) {
3561 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
3562 if (config.tracelevel > 7) {
3563 command_print(CMD, "tracelevel should be <=7");
3564 return ERROR_COMMAND_ARGUMENT_INVALID;
3565 }
3566 }
3567
3568 if (config.tracelevel == -1)
3569 config.tracelevel = xtensa->core_config->debug.irq_level;
3570
3571 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
3572 }
3573
3574 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
3575 {
3576 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
3577 target_to_xtensa(get_current_target(CMD_CTX)));
3578 }
3579
3580 /* perfmon_dump [counter_id] */
3581 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
3582 {
3583 if (CMD_ARGC > 1)
3584 return ERROR_COMMAND_SYNTAX_ERROR;
3585
3586 int counter_id = -1;
3587 if (CMD_ARGC == 1) {
3588 counter_id = strtol(CMD_ARGV[0], NULL, 0);
3589 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
3590 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3591 return ERROR_COMMAND_ARGUMENT_INVALID;
3592 }
3593 }
3594
3595 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
3596 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
3597 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
3598 char result_buf[128] = { 0 };
3599 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
3600 struct xtensa_perfmon_result result;
3601 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
3602 if (res != ERROR_OK)
3603 return res;
3604 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
3605 "%-12" PRIu64 "%s",
3606 result.value,
3607 result.overflow ? " (overflow)" : "");
3608 LOG_INFO("%s", result_buf);
3609 }
3610
3611 return ERROR_OK;
3612 }
3613
3614 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
3615 {
3616 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
3617 target_to_xtensa(get_current_target(CMD_CTX)));
3618 }
3619
3620 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
3621 {
3622 int state = -1;
3623
3624 if (CMD_ARGC < 1) {
3625 const char *st;
3626 state = xtensa->stepping_isr_mode;
3627 if (state == XT_STEPPING_ISR_ON)
3628 st = "OFF";
3629 else if (state == XT_STEPPING_ISR_OFF)
3630 st = "ON";
3631 else
3632 st = "UNKNOWN";
3633 command_print(CMD, "Current ISR step mode: %s", st);
3634 return ERROR_OK;
3635 }
3636 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3637 if (!strcasecmp(CMD_ARGV[0], "off"))
3638 state = XT_STEPPING_ISR_ON;
3639 else if (!strcasecmp(CMD_ARGV[0], "on"))
3640 state = XT_STEPPING_ISR_OFF;
3641
3642 if (state == -1) {
3643 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
3644 return ERROR_FAIL;
3645 }
3646 xtensa->stepping_isr_mode = state;
3647 return ERROR_OK;
3648 }
3649
3650 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
3651 {
3652 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
3653 target_to_xtensa(get_current_target(CMD_CTX)));
3654 }
3655
3656 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
3657 {
3658 int res;
3659 uint32_t val = 0;
3660
3661 if (CMD_ARGC >= 1) {
3662 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3663 if (!strcasecmp(CMD_ARGV[0], "none")) {
3664 val = 0;
3665 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
3666 val |= OCDDCR_BREAKINEN;
3667 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
3668 val |= OCDDCR_BREAKOUTEN;
3669 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
3670 val |= OCDDCR_RUNSTALLINEN;
3671 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
3672 val |= OCDDCR_DEBUGMODEOUTEN;
3673 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
3674 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
3675 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
3676 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
3677 } else {
3678 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
3679 command_print(
3680 CMD,
3681 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3682 return ERROR_OK;
3683 }
3684 }
3685 res = xtensa_smpbreak_set(target, val);
3686 if (res != ERROR_OK)
3687 command_print(CMD, "Failed to set smpbreak config %d", res);
3688 } else {
3689 struct xtensa *xtensa = target_to_xtensa(target);
3690 res = xtensa_smpbreak_read(xtensa, &val);
3691 if (res == ERROR_OK)
3692 command_print(CMD, "Current bits set:%s%s%s%s",
3693 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
3694 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
3695 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
3696 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
3697 );
3698 else
3699 command_print(CMD, "Failed to get smpbreak config %d", res);
3700 }
3701 return res;
3702 }
3703
3704 COMMAND_HANDLER(xtensa_cmd_smpbreak)
3705 {
3706 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
3707 get_current_target(CMD_CTX));
3708 }
3709
3710 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
3711 {
3712 struct xtensa_trace_status trace_status;
3713 struct xtensa_trace_start_config cfg = {
3714 .stoppc = 0,
3715 .stopmask = XTENSA_STOPMASK_DISABLED,
3716 .after = 0,
3717 .after_is_words = false
3718 };
3719
3720 /* Parse arguments */
3721 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3722 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
3723 char *e;
3724 i++;
3725 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
3726 cfg.stopmask = 0;
3727 if (*e == '/')
3728 cfg.stopmask = strtol(e, NULL, 0);
3729 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
3730 i++;
3731 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
3732 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
3733 cfg.after_is_words = 0;
3734 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
3735 cfg.after_is_words = 1;
3736 } else {
3737 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
3738 return ERROR_FAIL;
3739 }
3740 }
3741
3742 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3743 if (res != ERROR_OK)
3744 return res;
3745 if (trace_status.stat & TRAXSTAT_TRACT) {
3746 LOG_WARNING("Silently stop active tracing!");
3747 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
3748 if (res != ERROR_OK)
3749 return res;
3750 }
3751
3752 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
3753 if (res != ERROR_OK)
3754 return res;
3755
3756 xtensa->trace_active = true;
3757 command_print(CMD, "Trace started.");
3758 return ERROR_OK;
3759 }
3760
3761 COMMAND_HANDLER(xtensa_cmd_tracestart)
3762 {
3763 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
3764 target_to_xtensa(get_current_target(CMD_CTX)));
3765 }
3766
3767 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
3768 {
3769 struct xtensa_trace_status trace_status;
3770
3771 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3772 if (res != ERROR_OK)
3773 return res;
3774
3775 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
3776 command_print(CMD, "No trace is currently active.");
3777 return ERROR_FAIL;
3778 }
3779
3780 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
3781 if (res != ERROR_OK)
3782 return res;
3783
3784 xtensa->trace_active = false;
3785 command_print(CMD, "Trace stop triggered.");
3786 return ERROR_OK;
3787 }
3788
3789 COMMAND_HANDLER(xtensa_cmd_tracestop)
3790 {
3791 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
3792 target_to_xtensa(get_current_target(CMD_CTX)));
3793 }
3794
3795 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
3796 {
3797 struct xtensa_trace_config trace_config;
3798 struct xtensa_trace_status trace_status;
3799 uint32_t memsz, wmem;
3800
3801 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3802 if (res != ERROR_OK)
3803 return res;
3804
3805 if (trace_status.stat & TRAXSTAT_TRACT) {
3806 command_print(CMD, "Tracing is still active. Please stop it first.");
3807 return ERROR_FAIL;
3808 }
3809
3810 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
3811 if (res != ERROR_OK)
3812 return res;
3813
3814 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
3815 command_print(CMD, "No active trace found; nothing to dump.");
3816 return ERROR_FAIL;
3817 }
3818
3819 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
3820 LOG_INFO("Total trace memory: %d words", memsz);
3821 if ((trace_config.addr &
3822 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
3823 /*Memory hasn't overwritten itself yet. */
3824 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
3825 LOG_INFO("...but trace is only %d words", wmem);
3826 if (wmem < memsz)
3827 memsz = wmem;
3828 } else {
3829 if (trace_config.addr & TRAXADDR_TWSAT) {
3830 LOG_INFO("Real trace is many times longer than that (overflow)");
3831 } else {
3832 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
3833 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
3834 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
3835 }
3836 }
3837
3838 uint8_t *tracemem = malloc(memsz * 4);
3839 if (!tracemem) {
3840 command_print(CMD, "Failed to alloc memory for trace data!");
3841 return ERROR_FAIL;
3842 }
3843 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
3844 if (res != ERROR_OK) {
3845 free(tracemem);
3846 return res;
3847 }
3848
3849 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
3850 if (f <= 0) {
3851 free(tracemem);
3852 command_print(CMD, "Unable to open file %s", fname);
3853 return ERROR_FAIL;
3854 }
3855 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
3856 command_print(CMD, "Unable to write to file %s", fname);
3857 else
3858 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
3859 close(f);
3860
3861 bool is_all_zeroes = true;
3862 for (unsigned int i = 0; i < memsz * 4; i++) {
3863 if (tracemem[i] != 0) {
3864 is_all_zeroes = false;
3865 break;
3866 }
3867 }
3868 free(tracemem);
3869 if (is_all_zeroes)
3870 command_print(
3871 CMD,
3872 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
3873
3874 return ERROR_OK;
3875 }
3876
3877 COMMAND_HANDLER(xtensa_cmd_tracedump)
3878 {
3879 if (CMD_ARGC != 1) {
3880 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
3881 return ERROR_FAIL;
3882 }
3883
3884 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
3885 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
3886 }
3887
3888 static const struct command_registration xtensa_any_command_handlers[] = {
3889 {
3890 .name = "xtdef",
3891 .handler = xtensa_cmd_xtdef,
3892 .mode = COMMAND_CONFIG,
3893 .help = "Configure Xtensa core type",
3894 .usage = "<type>",
3895 },
3896 {
3897 .name = "xtopt",
3898 .handler = xtensa_cmd_xtopt,
3899 .mode = COMMAND_CONFIG,
3900 .help = "Configure Xtensa core option",
3901 .usage = "<name> <value>",
3902 },
3903 {
3904 .name = "xtmem",
3905 .handler = xtensa_cmd_xtmem,
3906 .mode = COMMAND_CONFIG,
3907 .help = "Configure Xtensa memory/cache option",
3908 .usage = "<type> [parameters]",
3909 },
3910 {
3911 .name = "xtmmu",
3912 .handler = xtensa_cmd_xtmmu,
3913 .mode = COMMAND_CONFIG,
3914 .help = "Configure Xtensa MMU option",
3915 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
3916 },
3917 {
3918 .name = "xtmpu",
3919 .handler = xtensa_cmd_xtmpu,
3920 .mode = COMMAND_CONFIG,
3921 .help = "Configure Xtensa MPU option",
3922 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
3923 },
3924 {
3925 .name = "xtreg",
3926 .handler = xtensa_cmd_xtreg,
3927 .mode = COMMAND_CONFIG,
3928 .help = "Configure Xtensa register",
3929 .usage = "<regname> <regnum>",
3930 },
3931 {
3932 .name = "xtregs",
3933 .handler = xtensa_cmd_xtreg,
3934 .mode = COMMAND_CONFIG,
3935 .help = "Configure number of Xtensa registers",
3936 .usage = "<numregs>",
3937 },
3938 {
3939 .name = "xtregfmt",
3940 .handler = xtensa_cmd_xtregfmt,
3941 .mode = COMMAND_CONFIG,
3942 .help = "Configure format of Xtensa register map",
3943 .usage = "<contiguous|sparse> [numgregs]",
3944 },
3945 {
3946 .name = "set_permissive",
3947 .handler = xtensa_cmd_permissive_mode,
3948 .mode = COMMAND_ANY,
3949 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
3950 .usage = "[0|1]",
3951 },
3952 {
3953 .name = "maskisr",
3954 .handler = xtensa_cmd_mask_interrupts,
3955 .mode = COMMAND_ANY,
3956 .help = "mask Xtensa interrupts at step",
3957 .usage = "['on'|'off']",
3958 },
3959 {
3960 .name = "smpbreak",
3961 .handler = xtensa_cmd_smpbreak,
3962 .mode = COMMAND_ANY,
3963 .help = "Set the way the CPU chains OCD breaks",
3964 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
3965 },
3966 {
3967 .name = "perfmon_enable",
3968 .handler = xtensa_cmd_perfmon_enable,
3969 .mode = COMMAND_EXEC,
3970 .help = "Enable and start performance counter",
3971 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
3972 },
3973 {
3974 .name = "perfmon_dump",
3975 .handler = xtensa_cmd_perfmon_dump,
3976 .mode = COMMAND_EXEC,
3977 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
3978 .usage = "[counter_id]",
3979 },
3980 {
3981 .name = "tracestart",
3982 .handler = xtensa_cmd_tracestart,
3983 .mode = COMMAND_EXEC,
3984 .help =
3985 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
3986 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
3987 },
3988 {
3989 .name = "tracestop",
3990 .handler = xtensa_cmd_tracestop,
3991 .mode = COMMAND_EXEC,
3992 .help = "Tracing: Stop current trace as started by the tracestart command",
3993 .usage = "",
3994 },
3995 {
3996 .name = "tracedump",
3997 .handler = xtensa_cmd_tracedump,
3998 .mode = COMMAND_EXEC,
3999 .help = "Tracing: Dump trace memory to a files. One file per core.",
4000 .usage = "<outfile>",
4001 },
4002 {
4003 .name = "exe",
4004 .handler = xtensa_cmd_exe,
4005 .mode = COMMAND_ANY,
4006 .help = "Xtensa stub execution",
4007 .usage = "<ascii-encoded hexadecimal instruction bytes>",
4008 },
4009 COMMAND_REGISTRATION_DONE
4010 };
4011
4012 const struct command_registration xtensa_command_handlers[] = {
4013 {
4014 .name = "xtensa",
4015 .mode = COMMAND_ANY,
4016 .help = "Xtensa command group",
4017 .usage = "",
4018 .chain = xtensa_any_command_handlers,
4019 },
4020 COMMAND_REGISTRATION_DONE
4021 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)