target/xtensa: fix final clang analyzer warning
[openocd.git] / src / target / xtensa / xtensa.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
10
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19
20 #include "xtensa_chip.h"
21 #include "xtensa.h"
22
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25 ((((V) & 0x0F) << 4) \
26 | (((V) & 0xF0) >> 4))
27
28 #define XT_NIBSWAP16(V) \
29 ((((V) & 0x000F) << 12) \
30 | (((V) & 0x00F0) << 4) \
31 | (((V) & 0x0F00) >> 4) \
32 | (((V) & 0xF000) >> 12))
33
34 #define XT_NIBSWAP24(V) \
35 ((((V) & 0x00000F) << 20) \
36 | (((V) & 0x0000F0) << 12) \
37 | (((V) & 0x000F00) << 4) \
38 | (((V) & 0x00F000) >> 4) \
39 | (((V) & 0x0F0000) >> 12) \
40 | (((V) & 0xF00000) >> 20))
41
42 /* _XT_INS_FORMAT_*()
43 * Instruction formatting converted from little-endian inputs
44 * and shifted to the MSB-side of DIR for BE systems.
45 */
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48 | (((T) & 0x0F) << 16) \
49 | (((SR) & 0xFF) << 8)) << 8 \
50 : (OPCODE) \
51 | (((SR) & 0xFF) << 8) \
52 | (((T) & 0x0F) << 4))
53
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57 | (((R) & 0x0F) << 8)) << 8 \
58 : (OPCODE) \
59 | (((ST) & 0xFF) << 4) \
60 | (((R) & 0x0F) << 12))
61
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64 | (((T) & 0x0F) << 8) \
65 | (((S) & 0x0F) << 4) \
66 | ((IMM4) & 0x0F)) << 16 \
67 : (OPCODE) \
68 | (((T) & 0x0F) << 4) \
69 | (((S) & 0x0F) << 8) \
70 | (((IMM4) & 0x0F) << 12))
71
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74 | (((T) & 0x0F) << 16) \
75 | (((S) & 0x0F) << 12) \
76 | (((R) & 0x0F) << 8) \
77 | ((IMM8) & 0xFF)) << 8 \
78 : (OPCODE) \
79 | (((IMM8) & 0xFF) << 16) \
80 | (((R) & 0x0F) << 12) \
81 | (((S) & 0x0F) << 8) \
82 | (((T) & 0x0F) << 4))
83
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86 | (((T) & 0x0F) << 16) \
87 | (((S) & 0x0F) << 12) \
88 | (((R) & 0x0F) << 8)) << 8 \
89 | ((IMM4) & 0x0F) \
90 : (OPCODE) \
91 | (((IMM4) & 0x0F) << 20) \
92 | (((R) & 0x0F) << 12) \
93 | (((S) & 0x0F) << 8) \
94 | (((T) & 0x0F) << 4))
95
96 /* Xtensa processor instruction opcodes
97 */
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
102
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
107
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
114
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
121
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
127
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
131
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
138
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
141
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
146
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
151
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
155
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
159
160 #define XT_WATCHPOINTS_NUM_MAX 2
161
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163 * These get used a lot so making a shortcut is useful.
164 */
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
169 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
170
171 #define XT_PS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
172 #define XT_PC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
173 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
174 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
175
176 #define XT_SW_BREAKPOINTS_MAX_NUM 32
177 #define XT_HW_IBREAK_MAX_NUM 2
178 #define XT_HW_DBREAK_MAX_NUM 2
179
180 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
181 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
182 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
183 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
184 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
185 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
186 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
187 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
188 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
189 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
190 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
191 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
192 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
247 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
248 XT_MK_REG_DESC("ps", 0xE6, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
249 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
250 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
251 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
252 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
253 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
254 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
255 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
256 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
259 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
261 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
262
263 /* WARNING: For these registers, regnum points to the
264 * index of the corresponding ARx registers, NOT to
265 * the processor register number! */
266 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
267 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
268 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
269 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
270 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
271 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
272 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
273 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
274 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
275 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
276 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
282 };
283
284 /**
285 * Types of memory used at xtensa target
286 */
287 enum xtensa_mem_region_type {
288 XTENSA_MEM_REG_IROM = 0x0,
289 XTENSA_MEM_REG_IRAM,
290 XTENSA_MEM_REG_DROM,
291 XTENSA_MEM_REG_DRAM,
292 XTENSA_MEM_REG_SRAM,
293 XTENSA_MEM_REG_SROM,
294 XTENSA_MEM_REGS_NUM
295 };
296
297 /* Register definition as union for list allocation */
298 union xtensa_reg_val_u {
299 xtensa_reg_val_t val;
300 uint8_t buf[4];
301 };
302
303 static const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
304 { .chrval = "E00", .intval = ERROR_FAIL },
305 { .chrval = "E01", .intval = ERROR_FAIL },
306 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
307 { .chrval = "E03", .intval = ERROR_FAIL },
308 };
309
310 /* Set to true for extra debug logging */
311 static const bool xtensa_extra_debug_log;
312
313 /**
314 * Gets a config for the specific mem type
315 */
316 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
317 struct xtensa *xtensa,
318 enum xtensa_mem_region_type type)
319 {
320 switch (type) {
321 case XTENSA_MEM_REG_IROM:
322 return &xtensa->core_config->irom;
323 case XTENSA_MEM_REG_IRAM:
324 return &xtensa->core_config->iram;
325 case XTENSA_MEM_REG_DROM:
326 return &xtensa->core_config->drom;
327 case XTENSA_MEM_REG_DRAM:
328 return &xtensa->core_config->dram;
329 case XTENSA_MEM_REG_SRAM:
330 return &xtensa->core_config->sram;
331 case XTENSA_MEM_REG_SROM:
332 return &xtensa->core_config->srom;
333 default:
334 return NULL;
335 }
336 }
337
338 /**
339 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
340 * for a given address
341 * Returns NULL if nothing found
342 */
343 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
344 const struct xtensa_local_mem_config *mem,
345 target_addr_t address)
346 {
347 for (unsigned int i = 0; i < mem->count; i++) {
348 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
349 if (address >= region->base && address < (region->base + region->size))
350 return region;
351 }
352 return NULL;
353 }
354
355 /**
356 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
357 * for a given address
358 * Returns NULL if nothing found
359 */
360 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
361 struct xtensa *xtensa,
362 target_addr_t address)
363 {
364 const struct xtensa_local_mem_region_config *result;
365 const struct xtensa_local_mem_config *mcgf;
366 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
367 mcgf = xtensa_get_mem_config(xtensa, mtype);
368 result = xtensa_memory_region_find(mcgf, address);
369 if (result)
370 return result;
371 }
372 return NULL;
373 }
374
375 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
376 const struct xtensa_local_mem_config *mem,
377 target_addr_t address)
378 {
379 if (!cache->size)
380 return false;
381 return xtensa_memory_region_find(mem, address);
382 }
383
384 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
385 {
386 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
387 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
388 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
389 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
390 }
391
392 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
393 {
394 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
395 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
396 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
397 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
398 }
399
400 static int xtensa_core_reg_get(struct reg *reg)
401 {
402 /* We don't need this because we read all registers on halt anyway. */
403 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
404 struct target *target = xtensa->target;
405
406 if (target->state != TARGET_HALTED)
407 return ERROR_TARGET_NOT_HALTED;
408 if (!reg->exist) {
409 if (strncmp(reg->name, "?0x", 3) == 0) {
410 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
411 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
412 return ERROR_OK;
413 }
414 return ERROR_COMMAND_ARGUMENT_INVALID;
415 }
416 return ERROR_OK;
417 }
418
419 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
420 {
421 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
422 struct target *target = xtensa->target;
423
424 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
425 if (target->state != TARGET_HALTED)
426 return ERROR_TARGET_NOT_HALTED;
427
428 if (!reg->exist) {
429 if (strncmp(reg->name, "?0x", 3) == 0) {
430 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
431 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
432 return ERROR_OK;
433 }
434 return ERROR_COMMAND_ARGUMENT_INVALID;
435 }
436
437 buf_cpy(buf, reg->value, reg->size);
438
439 if (xtensa->core_config->windowed) {
440 /* If the user updates a potential scratch register, track for conflicts */
441 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
442 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
443 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
444 buf_get_u32(reg->value, 0, 32));
445 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
446 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
447 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
448 xtensa->scratch_ars[s].intval = true;
449 break;
450 }
451 }
452 }
453 reg->dirty = true;
454 reg->valid = true;
455
456 return ERROR_OK;
457 }
458
459 static const struct reg_arch_type xtensa_reg_type = {
460 .get = xtensa_core_reg_get,
461 .set = xtensa_core_reg_set,
462 };
463
464 /* Convert a register index that's indexed relative to windowbase, to the real address. */
465 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
466 enum xtensa_reg_id reg_idx,
467 int windowbase)
468 {
469 unsigned int idx;
470 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
471 idx = reg_idx - XT_REG_IDX_AR0;
472 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
473 idx = reg_idx - XT_REG_IDX_A0;
474 } else {
475 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
476 return -1;
477 }
478 return ((idx + windowbase * 4) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
479 }
480
481 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
482 enum xtensa_reg_id reg_idx,
483 int windowbase)
484 {
485 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
486 }
487
488 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
489 {
490 struct reg *reg_list = xtensa->core_cache->reg_list;
491 reg_list[reg_idx].dirty = true;
492 }
493
494 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
495 {
496 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DIR0EXEC, ins);
497 }
498
499 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
500 {
501 const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
502 if ((oplen > 0) && (oplen <= max_oplen)) {
503 uint8_t ops_padded[max_oplen];
504 memcpy(ops_padded, ops, oplen);
505 memset(ops_padded + oplen, 0, max_oplen - oplen);
506 unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
507 for (int32_t i = oplenw - 1; i > 0; i--)
508 xtensa_queue_dbg_reg_write(xtensa,
509 XDMREG_DIR0 + i,
510 target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t)*i]));
511 /* Write DIR0EXEC last */
512 xtensa_queue_dbg_reg_write(xtensa,
513 XDMREG_DIR0EXEC,
514 target_buffer_get_u32(xtensa->target, &ops_padded[0]));
515 }
516 }
517
518 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
519 {
520 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
521 return dm->pwr_ops->queue_reg_write(dm, reg, data);
522 }
523
524 /* NOTE: Assumes A3 has already been saved */
525 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
526 {
527 struct xtensa *xtensa = target_to_xtensa(target);
528 int woe_dis;
529 uint8_t woe_buf[4];
530
531 if (xtensa->core_config->windowed) {
532 /* Save PS (LX) and disable window overflow exceptions prior to AR save */
533 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_PS, XT_REG_A3));
534 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
535 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, woe_buf);
536 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
537 if (res != ERROR_OK) {
538 LOG_ERROR("Failed to read PS (%d)!", res);
539 return res;
540 }
541 xtensa_core_status_check(target);
542 *woe = buf_get_u32(woe_buf, 0, 32);
543 woe_dis = *woe & ~XT_PS_WOE_MSK;
544 LOG_DEBUG("Clearing PS.WOE (0x%08" PRIx32 " -> 0x%08" PRIx32 ")", *woe, woe_dis);
545 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe_dis);
546 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
547 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
548 }
549 return ERROR_OK;
550 }
551
552 /* NOTE: Assumes A3 has already been saved */
553 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
554 {
555 struct xtensa *xtensa = target_to_xtensa(target);
556 if (xtensa->core_config->windowed) {
557 /* Restore window overflow exception state */
558 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe);
559 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
560 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
561 LOG_DEBUG("Restored PS.WOE (0x%08" PRIx32 ")", woe);
562 }
563 }
564
565 static bool xtensa_reg_is_readable(int flags, int cpenable)
566 {
567 if (flags & XT_REGF_NOREAD)
568 return false;
569 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
570 return false;
571 return true;
572 }
573
574 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
575 {
576 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
577 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
578 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
579 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
580 } else {
581 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
582 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
583 }
584 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
585 }
586
587 static int xtensa_write_dirty_registers(struct target *target)
588 {
589 struct xtensa *xtensa = target_to_xtensa(target);
590 int res;
591 xtensa_reg_val_t regval, windowbase = 0;
592 bool scratch_reg_dirty = false, delay_cpenable = false;
593 struct reg *reg_list = xtensa->core_cache->reg_list;
594 unsigned int reg_list_size = xtensa->core_cache->num_regs;
595 bool preserve_a3 = false;
596 uint8_t a3_buf[4];
597 xtensa_reg_val_t a3 = 0, woe;
598
599 LOG_TARGET_DEBUG(target, "start");
600
601 /* We need to write the dirty registers in the cache list back to the processor.
602 * Start by writing the SFR/user registers. */
603 for (unsigned int i = 0; i < reg_list_size; i++) {
604 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
605 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
606 if (reg_list[i].dirty) {
607 if (rlist[ridx].type == XT_REG_SPECIAL ||
608 rlist[ridx].type == XT_REG_USER ||
609 rlist[ridx].type == XT_REG_FR) {
610 scratch_reg_dirty = true;
611 if (i == XT_REG_IDX_CPENABLE) {
612 delay_cpenable = true;
613 continue;
614 }
615 regval = xtensa_reg_get(target, i);
616 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
617 reg_list[i].name,
618 rlist[ridx].reg_num,
619 regval);
620 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
621 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
622 if (reg_list[i].exist) {
623 unsigned int reg_num = rlist[ridx].reg_num;
624 if (rlist[ridx].type == XT_REG_USER) {
625 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
626 } else if (rlist[ridx].type == XT_REG_FR) {
627 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
628 } else {/*SFR */
629 if (reg_num == XT_PC_REG_NUM_VIRTUAL)
630 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL
631 **/
632 reg_num =
633 (XT_PC_REG_NUM_BASE +
634 xtensa->core_config->debug.irq_level);
635 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
636 }
637 }
638 reg_list[i].dirty = false;
639 }
640 }
641 }
642 if (scratch_reg_dirty)
643 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
644 if (delay_cpenable) {
645 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
646 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
647 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
648 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
649 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
650 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
651 XT_REG_A3));
652 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
653 }
654
655 preserve_a3 = (xtensa->core_config->windowed);
656 if (preserve_a3) {
657 /* Save (windowed) A3 for scratch use */
658 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
659 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
660 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
661 if (res != ERROR_OK)
662 return res;
663 xtensa_core_status_check(target);
664 a3 = buf_get_u32(a3_buf, 0, 32);
665 }
666
667 if (xtensa->core_config->windowed) {
668 res = xtensa_window_state_save(target, &woe);
669 if (res != ERROR_OK)
670 return res;
671 /* Grab the windowbase, we need it. */
672 windowbase = xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE);
673 /* Check if there are mismatches between the ARx and corresponding Ax registers.
674 * When the user sets a register on a windowed config, xt-gdb may set the ARx
675 * register directly. Thus we take ARx as priority over Ax if both are dirty
676 * and it's unclear if the user set one over the other explicitly.
677 */
678 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
679 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
680 if (reg_list[i].dirty && reg_list[j].dirty) {
681 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
682 bool show_warning = true;
683 if (i == XT_REG_IDX_A3)
684 show_warning = xtensa_scratch_regs_fixup(xtensa,
685 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
686 else if (i == XT_REG_IDX_A4)
687 show_warning = xtensa_scratch_regs_fixup(xtensa,
688 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
689 if (show_warning)
690 LOG_WARNING(
691 "Warning: Both A%d [0x%08" PRIx32
692 "] as well as its underlying physical register "
693 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
694 i - XT_REG_IDX_A0,
695 buf_get_u32(reg_list[i].value, 0, 32),
696 j - XT_REG_IDX_AR0,
697 buf_get_u32(reg_list[j].value, 0, 32));
698 }
699 }
700 }
701 }
702
703 /* Write A0-A16. */
704 for (unsigned int i = 0; i < 16; i++) {
705 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
706 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
707 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
708 xtensa_regs[XT_REG_IDX_A0 + i].name,
709 regval,
710 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
711 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
712 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
713 reg_list[XT_REG_IDX_A0 + i].dirty = false;
714 if (i == 3) {
715 /* Avoid stomping A3 during restore at end of function */
716 a3 = regval;
717 }
718 }
719 }
720
721 if (xtensa->core_config->windowed) {
722 /* Now write AR registers */
723 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
724 /* Write the 16 registers we can see */
725 for (unsigned int i = 0; i < 16; i++) {
726 if (i + j < xtensa->core_config->aregs_num) {
727 enum xtensa_reg_id realadr =
728 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
729 windowbase);
730 /* Write back any dirty un-windowed registers */
731 if (reg_list[realadr].dirty) {
732 regval = xtensa_reg_get(target, realadr);
733 LOG_TARGET_DEBUG(
734 target,
735 "Writing back reg %s value %08" PRIX32 ", num =%i",
736 xtensa_regs[realadr].name,
737 regval,
738 xtensa_regs[realadr].reg_num);
739 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
740 xtensa_queue_exec_ins(xtensa,
741 XT_INS_RSR(xtensa, XT_SR_DDR,
742 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
743 reg_list[realadr].dirty = false;
744 if ((i + j) == 3)
745 /* Avoid stomping AR during A3 restore at end of function */
746 a3 = regval;
747 }
748 }
749 }
750 /*Now rotate the window so we'll see the next 16 registers. The final rotate
751 * will wraparound, */
752 /*leaving us in the state we were. */
753 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
754 }
755
756 xtensa_window_state_restore(target, woe);
757
758 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
759 xtensa->scratch_ars[s].intval = false;
760 }
761
762 if (preserve_a3) {
763 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
764 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
765 }
766
767 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
768 xtensa_core_status_check(target);
769
770 return res;
771 }
772
773 static inline bool xtensa_is_stopped(struct target *target)
774 {
775 struct xtensa *xtensa = target_to_xtensa(target);
776 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
777 }
778
779 int xtensa_examine(struct target *target)
780 {
781 struct xtensa *xtensa = target_to_xtensa(target);
782 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
783
784 LOG_DEBUG("coreid = %d", target->coreid);
785
786 if (xtensa->core_config->core_type == XT_UNDEF) {
787 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
788 return ERROR_FAIL;
789 }
790
791 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
792 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
793 xtensa_dm_queue_enable(&xtensa->dbg_mod);
794 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
795 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
796 if (res != ERROR_OK)
797 return res;
798 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
799 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
800 return ERROR_TARGET_FAILURE;
801 }
802 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
803 if (!target_was_examined(target))
804 target_set_examined(target);
805 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
806 return ERROR_OK;
807 }
808
809 int xtensa_wakeup(struct target *target)
810 {
811 struct xtensa *xtensa = target_to_xtensa(target);
812 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
813
814 if (xtensa->reset_asserted)
815 cmd |= PWRCTL_CORERESET(xtensa);
816 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
817 /* TODO: can we join this with the write above? */
818 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
819 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
820 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
821 }
822
823 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
824 {
825 uint32_t dsr_data = 0x00110000;
826 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
827 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
828 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
829
830 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
831 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, set | OCDDCR_ENABLEOCD);
832 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, clear);
833 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DSR, dsr_data);
834 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
835 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
836 }
837
838 int xtensa_smpbreak_set(struct target *target, uint32_t set)
839 {
840 struct xtensa *xtensa = target_to_xtensa(target);
841 int res = ERROR_OK;
842
843 xtensa->smp_break = set;
844 if (target_was_examined(target))
845 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
846 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
847 return res;
848 }
849
850 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
851 {
852 uint8_t dcr_buf[sizeof(uint32_t)];
853
854 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DCRSET, dcr_buf);
855 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
856 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
857 *val = buf_get_u32(dcr_buf, 0, 32);
858
859 return res;
860 }
861
862 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
863 {
864 struct xtensa *xtensa = target_to_xtensa(target);
865 *val = xtensa->smp_break;
866 return ERROR_OK;
867 }
868
869 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
870 {
871 return buf_get_u32(reg->value, 0, 32);
872 }
873
874 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
875 {
876 buf_set_u32(reg->value, 0, 32, value);
877 reg->dirty = true;
878 }
879
880 int xtensa_core_status_check(struct target *target)
881 {
882 struct xtensa *xtensa = target_to_xtensa(target);
883 int res, needclear = 0;
884
885 xtensa_dm_core_status_read(&xtensa->dbg_mod);
886 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
887 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
888 if (dsr & OCDDSR_EXECBUSY) {
889 if (!xtensa->suppress_dsr_errors)
890 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
891 needclear = 1;
892 }
893 if (dsr & OCDDSR_EXECEXCEPTION) {
894 if (!xtensa->suppress_dsr_errors)
895 LOG_TARGET_ERROR(target,
896 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
897 dsr);
898 needclear = 1;
899 }
900 if (dsr & OCDDSR_EXECOVERRUN) {
901 if (!xtensa->suppress_dsr_errors)
902 LOG_TARGET_ERROR(target,
903 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
904 dsr);
905 needclear = 1;
906 }
907 if (needclear) {
908 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
909 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
910 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
911 LOG_TARGET_ERROR(target, "clearing DSR failed!");
912 return ERROR_FAIL;
913 }
914 return ERROR_OK;
915 }
916
917 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
918 {
919 struct xtensa *xtensa = target_to_xtensa(target);
920 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
921 return xtensa_reg_get_value(reg);
922 }
923
924 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
925 {
926 struct xtensa *xtensa = target_to_xtensa(target);
927 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
928 if (xtensa_reg_get_value(reg) == value)
929 return;
930 xtensa_reg_set_value(reg, value);
931 }
932
933 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
934 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
935 {
936 struct xtensa *xtensa = target_to_xtensa(target);
937 uint32_t windowbase = (xtensa->core_config->windowed ?
938 xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE) : 0);
939 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
940 xtensa_reg_set(target, a_idx, value);
941 xtensa_reg_set(target, ar_idx, value);
942 }
943
944 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
945 uint32_t xtensa_cause_get(struct target *target)
946 {
947 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
948 }
949
950 void xtensa_cause_clear(struct target *target)
951 {
952 struct xtensa *xtensa = target_to_xtensa(target);
953 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
954 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
955 }
956
957 int xtensa_assert_reset(struct target *target)
958 {
959 struct xtensa *xtensa = target_to_xtensa(target);
960
961 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
962 xtensa_queue_pwr_reg_write(xtensa,
963 XDMREG_PWRCTL,
964 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
965 PWRCTL_COREWAKEUP(xtensa) | PWRCTL_CORERESET(xtensa));
966 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
967 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
968 if (res != ERROR_OK)
969 return res;
970
971 /* registers are now invalid */
972 xtensa->reset_asserted = true;
973 register_cache_invalidate(xtensa->core_cache);
974 target->state = TARGET_RESET;
975 return ERROR_OK;
976 }
977
978 int xtensa_deassert_reset(struct target *target)
979 {
980 struct xtensa *xtensa = target_to_xtensa(target);
981
982 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
983 if (target->reset_halt)
984 xtensa_queue_dbg_reg_write(xtensa,
985 XDMREG_DCRSET,
986 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
987 xtensa_queue_pwr_reg_write(xtensa,
988 XDMREG_PWRCTL,
989 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
990 PWRCTL_COREWAKEUP(xtensa));
991 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
992 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
993 if (res != ERROR_OK)
994 return res;
995 target->state = TARGET_RUNNING;
996 xtensa->reset_asserted = false;
997 return res;
998 }
999
1000 int xtensa_soft_reset_halt(struct target *target)
1001 {
1002 LOG_TARGET_DEBUG(target, "begin");
1003 return xtensa_assert_reset(target);
1004 }
1005
1006 int xtensa_fetch_all_regs(struct target *target)
1007 {
1008 struct xtensa *xtensa = target_to_xtensa(target);
1009 struct reg *reg_list = xtensa->core_cache->reg_list;
1010 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1011 xtensa_reg_val_t cpenable = 0, windowbase = 0, a3;
1012 uint32_t woe;
1013 uint8_t a3_buf[4];
1014 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1015
1016 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1017 if (!regvals) {
1018 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1019 return ERROR_FAIL;
1020 }
1021 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1022 if (!dsrs) {
1023 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1024 free(regvals);
1025 return ERROR_FAIL;
1026 }
1027
1028 LOG_TARGET_DEBUG(target, "start");
1029
1030 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1031 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1032 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
1033 int res = xtensa_window_state_save(target, &woe);
1034 if (res != ERROR_OK)
1035 goto xtensa_fetch_all_regs_done;
1036
1037 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1038 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1039 * in one go, then sort everything out from the regvals variable. */
1040
1041 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1042 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1043 /*Grab the 16 registers we can see */
1044 for (unsigned int i = 0; i < 16; i++) {
1045 if (i + j < xtensa->core_config->aregs_num) {
1046 xtensa_queue_exec_ins(xtensa,
1047 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1048 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1049 regvals[XT_REG_IDX_AR0 + i + j].buf);
1050 if (debug_dsrs)
1051 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR,
1052 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1053 }
1054 }
1055 if (xtensa->core_config->windowed)
1056 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1057 * will wraparound, */
1058 /* leaving us in the state we were. */
1059 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
1060 }
1061 xtensa_window_state_restore(target, woe);
1062
1063 if (xtensa->core_config->coproc) {
1064 /* As the very first thing after AREGS, go grab CPENABLE */
1065 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1066 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1067 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1068 }
1069 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1070 if (res != ERROR_OK) {
1071 LOG_ERROR("Failed to read ARs (%d)!", res);
1072 goto xtensa_fetch_all_regs_done;
1073 }
1074 xtensa_core_status_check(target);
1075
1076 a3 = buf_get_u32(a3_buf, 0, 32);
1077
1078 if (xtensa->core_config->coproc) {
1079 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1080
1081 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1082 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
1083 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1084 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1085
1086 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1087 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1088 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1089 }
1090 /* We're now free to use any of A0-A15 as scratch registers
1091 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1092 for (unsigned int i = 0; i < reg_list_size; i++) {
1093 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1094 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1095 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1096 bool reg_fetched = true;
1097 unsigned int reg_num = rlist[ridx].reg_num;
1098 switch (rlist[ridx].type) {
1099 case XT_REG_USER:
1100 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1101 break;
1102 case XT_REG_FR:
1103 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1104 break;
1105 case XT_REG_SPECIAL:
1106 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1107 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1108 reg_num = (XT_PC_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
1109 } else if (reg_num == xtensa_regs[XT_REG_IDX_PS].reg_num) {
1110 /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1111 reg_num = (XT_PS_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
1112 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1113 /* CPENABLE already read/updated; don't re-read */
1114 reg_fetched = false;
1115 break;
1116 }
1117 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1118 break;
1119 default:
1120 reg_fetched = false;
1121 }
1122 if (reg_fetched) {
1123 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1124 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1125 if (debug_dsrs)
1126 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1127 }
1128 }
1129 }
1130 /* Ok, send the whole mess to the CPU. */
1131 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1132 if (res != ERROR_OK) {
1133 LOG_ERROR("Failed to fetch AR regs!");
1134 goto xtensa_fetch_all_regs_done;
1135 }
1136 xtensa_core_status_check(target);
1137
1138 if (debug_dsrs) {
1139 /* DSR checking: follows order in which registers are requested. */
1140 for (unsigned int i = 0; i < reg_list_size; i++) {
1141 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1142 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1143 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1144 (rlist[ridx].type != XT_REG_DEBUG) &&
1145 (rlist[ridx].type != XT_REG_RELGEN) &&
1146 (rlist[ridx].type != XT_REG_TIE) &&
1147 (rlist[ridx].type != XT_REG_OTHER)) {
1148 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1149 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1150 res = ERROR_FAIL;
1151 goto xtensa_fetch_all_regs_done;
1152 }
1153 }
1154 }
1155 }
1156
1157 if (xtensa->core_config->windowed)
1158 /* We need the windowbase to decode the general addresses. */
1159 windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE].buf, 0, 32);
1160 /* Decode the result and update the cache. */
1161 for (unsigned int i = 0; i < reg_list_size; i++) {
1162 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1163 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1164 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1165 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1166 /* The 64-value general register set is read from (windowbase) on down.
1167 * We need to get the real register address by subtracting windowbase and
1168 * wrapping around. */
1169 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1170 windowbase);
1171 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1172 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1173 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1174 if (xtensa_extra_debug_log) {
1175 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1176 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1177 }
1178 } else {
1179 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1180 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1181 if (xtensa_extra_debug_log)
1182 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1183 xtensa_reg_set(target, i, regval);
1184 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1185 }
1186 reg_list[i].valid = true;
1187 } else {
1188 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1189 /* Report read-only registers all-zero but valid */
1190 reg_list[i].valid = true;
1191 xtensa_reg_set(target, i, 0);
1192 } else {
1193 reg_list[i].valid = false;
1194 }
1195 }
1196 }
1197
1198 if (xtensa->core_config->windowed) {
1199 /* We have used A3 as a scratch register.
1200 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1201 */
1202 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1203 xtensa_reg_set(target, ar3_idx, a3);
1204 xtensa_mark_register_dirty(xtensa, ar3_idx);
1205
1206 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1207 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1208 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1209 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1210 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1211 xtensa->scratch_ars[s].intval = false;
1212 }
1213
1214 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1215 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1216 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1217 xtensa->regs_fetched = true;
1218 xtensa_fetch_all_regs_done:
1219 free(regvals);
1220 free(dsrs);
1221 return res;
1222 }
1223
1224 int xtensa_get_gdb_reg_list(struct target *target,
1225 struct reg **reg_list[],
1226 int *reg_list_size,
1227 enum target_register_class reg_class)
1228 {
1229 struct xtensa *xtensa = target_to_xtensa(target);
1230 unsigned int num_regs;
1231
1232 if (reg_class == REG_CLASS_GENERAL) {
1233 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1234 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1235 return ERROR_FAIL;
1236 }
1237 num_regs = xtensa->genpkt_regs_num;
1238 } else {
1239 /* Determine whether to return a contiguous or sparse register map */
1240 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1241 }
1242
1243 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1244
1245 *reg_list = calloc(num_regs, sizeof(struct reg *));
1246 if (!*reg_list)
1247 return ERROR_FAIL;
1248
1249 *reg_list_size = num_regs;
1250 if (xtensa->regmap_contiguous) {
1251 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1252 for (unsigned int i = 0; i < num_regs; i++)
1253 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1254 return ERROR_OK;
1255 }
1256
1257 for (unsigned int i = 0; i < num_regs; i++)
1258 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1259 unsigned int k = 0;
1260 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1261 if (xtensa->core_cache->reg_list[i].exist) {
1262 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1263 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1264 int sparse_idx = rlist[ridx].dbreg_num;
1265 if (i == XT_REG_IDX_PS) {
1266 if (xtensa->eps_dbglevel_idx == 0) {
1267 LOG_ERROR("eps_dbglevel_idx not set\n");
1268 return ERROR_FAIL;
1269 }
1270 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1271 if (xtensa_extra_debug_log)
1272 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1273 sparse_idx, xtensa->core_config->debug.irq_level,
1274 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1275 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1276 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1277 } else {
1278 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1279 }
1280 if (i == XT_REG_IDX_PC)
1281 /* Make a duplicate copy of PC for external access */
1282 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1283 k++;
1284 }
1285 }
1286
1287 if (k == num_regs)
1288 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1289
1290 return ERROR_OK;
1291 }
1292
1293 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1294 {
1295 struct xtensa *xtensa = target_to_xtensa(target);
1296 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1297 xtensa->core_config->mmu.dtlb_entries_count > 0;
1298 return ERROR_OK;
1299 }
1300
1301 int xtensa_halt(struct target *target)
1302 {
1303 struct xtensa *xtensa = target_to_xtensa(target);
1304
1305 LOG_TARGET_DEBUG(target, "start");
1306 if (target->state == TARGET_HALTED) {
1307 LOG_TARGET_DEBUG(target, "target was already halted");
1308 return ERROR_OK;
1309 }
1310 /* First we have to read dsr and check if the target stopped */
1311 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1312 if (res != ERROR_OK) {
1313 LOG_TARGET_ERROR(target, "Failed to read core status!");
1314 return res;
1315 }
1316 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1317 if (!xtensa_is_stopped(target)) {
1318 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1319 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1320 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1321 if (res != ERROR_OK)
1322 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1323 }
1324
1325 return res;
1326 }
1327
1328 int xtensa_prepare_resume(struct target *target,
1329 int current,
1330 target_addr_t address,
1331 int handle_breakpoints,
1332 int debug_execution)
1333 {
1334 struct xtensa *xtensa = target_to_xtensa(target);
1335 uint32_t bpena = 0;
1336
1337 LOG_TARGET_DEBUG(target,
1338 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1339 current,
1340 address,
1341 handle_breakpoints,
1342 debug_execution);
1343
1344 if (target->state != TARGET_HALTED) {
1345 LOG_TARGET_WARNING(target, "target not halted");
1346 return ERROR_TARGET_NOT_HALTED;
1347 }
1348
1349 if (address && !current) {
1350 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1351 } else {
1352 uint32_t cause = xtensa_cause_get(target);
1353 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1354 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1355 if (cause & DEBUGCAUSE_DB)
1356 /* We stopped due to a watchpoint. We can't just resume executing the
1357 * instruction again because */
1358 /* that would trigger the watchpoint again. To fix this, we single-step,
1359 * which ignores watchpoints. */
1360 xtensa_do_step(target, current, address, handle_breakpoints);
1361 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1362 /* We stopped due to a break instruction. We can't just resume executing the
1363 * instruction again because */
1364 /* that would trigger the break again. To fix this, we single-step, which
1365 * ignores break. */
1366 xtensa_do_step(target, current, address, handle_breakpoints);
1367 }
1368
1369 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1370 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1371 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1372 if (xtensa->hw_brps[slot]) {
1373 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1374 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1375 bpena |= BIT(slot);
1376 }
1377 }
1378 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1379
1380 /* Here we write all registers to the targets */
1381 int res = xtensa_write_dirty_registers(target);
1382 if (res != ERROR_OK)
1383 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1384 return res;
1385 }
1386
1387 int xtensa_do_resume(struct target *target)
1388 {
1389 struct xtensa *xtensa = target_to_xtensa(target);
1390
1391 LOG_TARGET_DEBUG(target, "start");
1392
1393 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1394 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1395 if (res != ERROR_OK) {
1396 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1397 return res;
1398 }
1399 xtensa_core_status_check(target);
1400 return ERROR_OK;
1401 }
1402
1403 int xtensa_resume(struct target *target,
1404 int current,
1405 target_addr_t address,
1406 int handle_breakpoints,
1407 int debug_execution)
1408 {
1409 LOG_TARGET_DEBUG(target, "start");
1410 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1411 if (res != ERROR_OK) {
1412 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1413 return res;
1414 }
1415 res = xtensa_do_resume(target);
1416 if (res != ERROR_OK) {
1417 LOG_TARGET_ERROR(target, "Failed to resume!");
1418 return res;
1419 }
1420
1421 target->debug_reason = DBG_REASON_NOTHALTED;
1422 if (!debug_execution)
1423 target->state = TARGET_RUNNING;
1424 else
1425 target->state = TARGET_DEBUG_RUNNING;
1426
1427 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1428
1429 return ERROR_OK;
1430 }
1431
1432 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1433 {
1434 struct xtensa *xtensa = target_to_xtensa(target);
1435 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1436 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1437 if (err != ERROR_OK)
1438 return false;
1439
1440 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1441 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1442 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1443 return true;
1444
1445 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1446 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1447 return true;
1448
1449 return false;
1450 }
1451
1452 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1453 {
1454 struct xtensa *xtensa = target_to_xtensa(target);
1455 int res;
1456 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1457 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1458 xtensa_reg_val_t icountlvl, cause;
1459 xtensa_reg_val_t oldps, oldpc, cur_pc;
1460 bool ps_lowered = false;
1461
1462 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1463 current, address, handle_breakpoints);
1464
1465 if (target->state != TARGET_HALTED) {
1466 LOG_TARGET_WARNING(target, "target not halted");
1467 return ERROR_TARGET_NOT_HALTED;
1468 }
1469
1470 if (xtensa->eps_dbglevel_idx == 0) {
1471 LOG_ERROR("eps_dbglevel_idx not set\n");
1472 return ERROR_FAIL;
1473 }
1474
1475 /* Save old ps (EPS[dbglvl] on LX), pc */
1476 oldps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
1477 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1478
1479 cause = xtensa_cause_get(target);
1480 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1481 oldps,
1482 oldpc,
1483 cause,
1484 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1485 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1486 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1487 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1488 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1489 /* pretend that we have stepped */
1490 if (cause & DEBUGCAUSE_BI)
1491 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1492 else
1493 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1494 return ERROR_OK;
1495 }
1496
1497 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1498 * at which the instructions are to be counted while stepping.
1499 *
1500 * For example, if we need to step by 2 instructions, and an interrupt occurs
1501 * in between, the processor will trigger the interrupt and halt after the 2nd
1502 * instruction within the interrupt vector and/or handler.
1503 *
1504 * However, sometimes we don't want the interrupt handlers to be executed at all
1505 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1506 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1507 * code from being counted during stepping. Note that C exception handlers must
1508 * run at level 0 and hence will be counted and stepped into, should one occur.
1509 *
1510 * TODO: Certain instructions should never be single-stepped and should instead
1511 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1512 * RFI >= DBGLEVEL.
1513 */
1514 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1515 if (!xtensa->core_config->high_irq.enabled) {
1516 LOG_TARGET_WARNING(
1517 target,
1518 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1519 return ERROR_FAIL;
1520 }
1521 /* Update ICOUNTLEVEL accordingly */
1522 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1523 } else {
1524 icountlvl = xtensa->core_config->debug.irq_level;
1525 }
1526
1527 if (cause & DEBUGCAUSE_DB) {
1528 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1529 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1530 * re-enable the watchpoint. */
1531 LOG_TARGET_DEBUG(
1532 target,
1533 "Single-stepping to get past instruction that triggered the watchpoint...");
1534 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1535 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1536 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1537 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1538 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1539 }
1540 }
1541
1542 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1543 /* handle normal SW breakpoint */
1544 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1545 if ((oldps & 0xf) >= icountlvl) {
1546 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1547 ps_lowered = true;
1548 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1549 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1550 LOG_TARGET_DEBUG(target,
1551 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1552 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1553 newps,
1554 oldps);
1555 }
1556 do {
1557 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1558 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1559
1560 /* Now ICOUNT is set, we can resume as if we were going to run */
1561 res = xtensa_prepare_resume(target, current, address, 0, 0);
1562 if (res != ERROR_OK) {
1563 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1564 return res;
1565 }
1566 res = xtensa_do_resume(target);
1567 if (res != ERROR_OK) {
1568 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1569 return res;
1570 }
1571
1572 /* Wait for stepping to complete */
1573 long long start = timeval_ms();
1574 while (timeval_ms() < start + 500) {
1575 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1576 *until stepping is complete. */
1577 usleep(1000);
1578 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1579 if (res != ERROR_OK) {
1580 LOG_TARGET_ERROR(target, "Failed to read core status!");
1581 return res;
1582 }
1583 if (xtensa_is_stopped(target))
1584 break;
1585 usleep(1000);
1586 }
1587 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1588 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1589 if (!xtensa_is_stopped(target)) {
1590 LOG_TARGET_WARNING(
1591 target,
1592 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1593 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1594 target->debug_reason = DBG_REASON_NOTHALTED;
1595 target->state = TARGET_RUNNING;
1596 return ERROR_FAIL;
1597 }
1598
1599 xtensa_fetch_all_regs(target);
1600 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1601
1602 LOG_TARGET_DEBUG(target,
1603 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1604 xtensa_reg_get(target, XT_REG_IDX_PS),
1605 cur_pc,
1606 xtensa_cause_get(target),
1607 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1608
1609 /* Do not step into WindowOverflow if ISRs are masked.
1610 If we stop in WindowOverflow at breakpoint with masked ISRs and
1611 try to do a step it will get us out of that handler */
1612 if (xtensa->core_config->windowed &&
1613 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1614 xtensa_pc_in_winexc(target, cur_pc)) {
1615 /* isrmask = on, need to step out of the window exception handler */
1616 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1617 oldpc = cur_pc;
1618 address = oldpc + 3;
1619 continue;
1620 }
1621
1622 if (oldpc == cur_pc)
1623 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1624 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1625 else
1626 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1627 break;
1628 } while (true);
1629
1630 target->debug_reason = DBG_REASON_SINGLESTEP;
1631 target->state = TARGET_HALTED;
1632 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1633 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1634
1635 if (cause & DEBUGCAUSE_DB) {
1636 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1637 /* Restore the DBREAKCx registers */
1638 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1639 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1640 }
1641
1642 /* Restore int level */
1643 if (ps_lowered) {
1644 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1645 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1646 oldps);
1647 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1648 }
1649
1650 /* write ICOUNTLEVEL back to zero */
1651 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1652 /* TODO: can we skip writing dirty registers and re-fetching them? */
1653 res = xtensa_write_dirty_registers(target);
1654 xtensa_fetch_all_regs(target);
1655 return res;
1656 }
1657
1658 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1659 {
1660 return xtensa_do_step(target, current, address, handle_breakpoints);
1661 }
1662
1663 /**
1664 * Returns true if two ranges are overlapping
1665 */
1666 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1667 target_addr_t r1_end,
1668 target_addr_t r2_start,
1669 target_addr_t r2_end)
1670 {
1671 if ((r2_start >= r1_start) && (r2_start < r1_end))
1672 return true; /* r2_start is in r1 region */
1673 if ((r2_end > r1_start) && (r2_end <= r1_end))
1674 return true; /* r2_end is in r1 region */
1675 return false;
1676 }
1677
1678 /**
1679 * Returns a size of overlapped region of two ranges.
1680 */
1681 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1682 target_addr_t r1_end,
1683 target_addr_t r2_start,
1684 target_addr_t r2_end)
1685 {
1686 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1687 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1688 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1689 return ov_end - ov_start;
1690 }
1691 return 0;
1692 }
1693
1694 /**
1695 * Check if the address gets to memory regions, and its access mode
1696 */
1697 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1698 {
1699 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1700 target_addr_t adr_end = address + size; /* region end */
1701 target_addr_t overlap_size;
1702 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1703
1704 while (adr_pos < adr_end) {
1705 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1706 if (!cm) /* address is not belong to anything */
1707 return false;
1708 if ((cm->access & access) != access) /* access check */
1709 return false;
1710 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1711 assert(overlap_size != 0);
1712 adr_pos += overlap_size;
1713 }
1714 return true;
1715 }
1716
1717 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1718 {
1719 struct xtensa *xtensa = target_to_xtensa(target);
1720 /* We are going to read memory in 32-bit increments. This may not be what the calling
1721 * function expects, so we may need to allocate a temp buffer and read into that first. */
1722 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1723 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1724 target_addr_t adr = addrstart_al;
1725 uint8_t *albuff;
1726 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1727
1728 if (target->state != TARGET_HALTED) {
1729 LOG_TARGET_WARNING(target, "target not halted");
1730 return ERROR_TARGET_NOT_HALTED;
1731 }
1732
1733 if (!xtensa->permissive_mode) {
1734 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1735 XT_MEM_ACCESS_READ)) {
1736 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1737 return ERROR_FAIL;
1738 }
1739 }
1740
1741 unsigned int alloc_bytes = ALIGN_UP(addrend_al - addrstart_al, sizeof(uint32_t));
1742 albuff = calloc(alloc_bytes, 1);
1743 if (!albuff) {
1744 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
1745 addrend_al - addrstart_al);
1746 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1747 }
1748
1749 /* We're going to use A3 here */
1750 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1751 /* Write start address to A3 */
1752 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1753 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1754 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1755 if (xtensa->probe_lsddr32p != 0) {
1756 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1757 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1758 xtensa_queue_dbg_reg_read(xtensa,
1759 (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
1760 &albuff[i]);
1761 } else {
1762 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1763 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1764 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1765 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
1766 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[i]);
1767 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1768 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1769 }
1770 }
1771 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1772 if (res == ERROR_OK) {
1773 bool prev_suppress = xtensa->suppress_dsr_errors;
1774 xtensa->suppress_dsr_errors = true;
1775 res = xtensa_core_status_check(target);
1776 if (xtensa->probe_lsddr32p == -1)
1777 xtensa->probe_lsddr32p = 1;
1778 xtensa->suppress_dsr_errors = prev_suppress;
1779 }
1780 if (res != ERROR_OK) {
1781 if (xtensa->probe_lsddr32p != 0) {
1782 /* Disable fast memory access instructions and retry before reporting an error */
1783 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1784 xtensa->probe_lsddr32p = 0;
1785 res = xtensa_read_memory(target, address, size, count, buffer);
1786 bswap = false;
1787 } else {
1788 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
1789 count * size, address);
1790 }
1791 }
1792
1793 if (bswap)
1794 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
1795 memcpy(buffer, albuff + (address & 3), (size * count));
1796 free(albuff);
1797 return res;
1798 }
1799
1800 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1801 {
1802 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1803 return xtensa_read_memory(target, address, 1, count, buffer);
1804 }
1805
1806 int xtensa_write_memory(struct target *target,
1807 target_addr_t address,
1808 uint32_t size,
1809 uint32_t count,
1810 const uint8_t *buffer)
1811 {
1812 /* This memory write function can get thrown nigh everything into it, from
1813 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1814 * accept anything but aligned uint32 writes, though. That is why we convert
1815 * everything into that. */
1816 struct xtensa *xtensa = target_to_xtensa(target);
1817 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1818 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1819 target_addr_t adr = addrstart_al;
1820 int res;
1821 uint8_t *albuff;
1822 bool fill_head_tail = false;
1823
1824 if (target->state != TARGET_HALTED) {
1825 LOG_TARGET_WARNING(target, "target not halted");
1826 return ERROR_TARGET_NOT_HALTED;
1827 }
1828
1829 if (!xtensa->permissive_mode) {
1830 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
1831 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1832 return ERROR_FAIL;
1833 }
1834 }
1835
1836 if (size == 0 || count == 0 || !buffer)
1837 return ERROR_COMMAND_SYNTAX_ERROR;
1838
1839 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1840 if (addrstart_al == address && addrend_al == address + (size * count)) {
1841 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1842 /* Need a buffer for byte-swapping */
1843 albuff = malloc(addrend_al - addrstart_al);
1844 else
1845 /* We discard the const here because albuff can also be non-const */
1846 albuff = (uint8_t *)buffer;
1847 } else {
1848 fill_head_tail = true;
1849 albuff = malloc(addrend_al - addrstart_al);
1850 }
1851 if (!albuff) {
1852 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
1853 addrend_al - addrstart_al);
1854 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1855 }
1856
1857 /* We're going to use A3 here */
1858 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1859
1860 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1861 if (fill_head_tail) {
1862 /* See if we need to read the first and/or last word. */
1863 if (address & 3) {
1864 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1865 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1866 if (xtensa->probe_lsddr32p == 1) {
1867 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1868 } else {
1869 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1870 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1871 }
1872 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[0]);
1873 }
1874 if ((address + (size * count)) & 3) {
1875 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
1876 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1877 if (xtensa->probe_lsddr32p == 1) {
1878 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1879 } else {
1880 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1881 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1882 }
1883 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1884 &albuff[addrend_al - addrstart_al - 4]);
1885 }
1886 /* Grab bytes */
1887 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1888 if (res != ERROR_OK) {
1889 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1890 if (albuff != buffer)
1891 free(albuff);
1892 return res;
1893 }
1894 xtensa_core_status_check(target);
1895 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
1896 bool swapped_w0 = false;
1897 if (address & 3) {
1898 buf_bswap32(&albuff[0], &albuff[0], 4);
1899 swapped_w0 = true;
1900 }
1901 if ((address + (size * count)) & 3) {
1902 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
1903 /* Don't double-swap if buffer start/end are within the same word */
1904 } else {
1905 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
1906 &albuff[addrend_al - addrstart_al - 4], 4);
1907 }
1908 }
1909 }
1910 /* Copy data to be written into the aligned buffer (in host-endianness) */
1911 memcpy(&albuff[address & 3], buffer, size * count);
1912 /* Now we can write albuff in aligned uint32s. */
1913 }
1914
1915 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1916 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
1917
1918 /* Write start address to A3 */
1919 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1920 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1921 /* Write the aligned buffer */
1922 if (xtensa->probe_lsddr32p != 0) {
1923 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1924 if (i == 0) {
1925 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
1926 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
1927 } else {
1928 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
1929 }
1930 }
1931 } else {
1932 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1933 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1934 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
1935 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
1936 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1937 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1938 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1939 }
1940 }
1941
1942 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1943 if (res == ERROR_OK) {
1944 bool prev_suppress = xtensa->suppress_dsr_errors;
1945 xtensa->suppress_dsr_errors = true;
1946 res = xtensa_core_status_check(target);
1947 if (xtensa->probe_lsddr32p == -1)
1948 xtensa->probe_lsddr32p = 1;
1949 xtensa->suppress_dsr_errors = prev_suppress;
1950 }
1951 if (res != ERROR_OK) {
1952 if (xtensa->probe_lsddr32p != 0) {
1953 /* Disable fast memory access instructions and retry before reporting an error */
1954 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1955 xtensa->probe_lsddr32p = 0;
1956 res = xtensa_write_memory(target, address, size, count, buffer);
1957 } else {
1958 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
1959 count * size, address);
1960 }
1961 } else {
1962 /* Invalidate ICACHE, writeback DCACHE if present */
1963 uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
1964 uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
1965 if (issue_ihi || issue_dhwb) {
1966 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
1967 uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
1968 uint32_t linesize = MIN(ilinesize, dlinesize);
1969 uint32_t off = 0;
1970 adr = addrstart_al;
1971
1972 while ((adr + off) < addrend_al) {
1973 if (off == 0) {
1974 /* Write start address to A3 */
1975 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr);
1976 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1977 }
1978 if (issue_ihi)
1979 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
1980 if (issue_dhwb)
1981 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
1982 off += linesize;
1983 if (off > 1020) {
1984 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
1985 adr += off;
1986 off = 0;
1987 }
1988 }
1989
1990 /* Execute cache WB/INV instructions */
1991 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1992 xtensa_core_status_check(target);
1993 if (res != ERROR_OK)
1994 LOG_TARGET_ERROR(target,
1995 "Error issuing cache writeback/invaldate instruction(s): %d",
1996 res);
1997 }
1998 }
1999 if (albuff != buffer)
2000 free(albuff);
2001
2002 return res;
2003 }
2004
2005 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2006 {
2007 /* xtensa_write_memory can handle everything. Just pass on to that. */
2008 return xtensa_write_memory(target, address, 1, count, buffer);
2009 }
2010
2011 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2012 {
2013 LOG_WARNING("not implemented yet");
2014 return ERROR_FAIL;
2015 }
2016
2017 int xtensa_poll(struct target *target)
2018 {
2019 struct xtensa *xtensa = target_to_xtensa(target);
2020 if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2021 target->state = TARGET_UNKNOWN;
2022 return ERROR_TARGET_NOT_EXAMINED;
2023 }
2024
2025 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET(xtensa) |
2026 PWRSTAT_COREWASRESET(xtensa));
2027 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2028 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2029 xtensa->dbg_mod.power_status.stat,
2030 PWRSTAT_DEBUGWASRESET(xtensa) | PWRSTAT_COREWASRESET(xtensa),
2031 xtensa->dbg_mod.power_status.stath);
2032 if (res != ERROR_OK)
2033 return res;
2034
2035 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2036 LOG_TARGET_INFO(target, "Debug controller was reset.");
2037 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2038 if (res != ERROR_OK)
2039 return res;
2040 }
2041 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2042 LOG_TARGET_INFO(target, "Core was reset.");
2043 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2044 /* Enable JTAG, set reset if needed */
2045 res = xtensa_wakeup(target);
2046 if (res != ERROR_OK)
2047 return res;
2048
2049 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2050 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2051 if (res != ERROR_OK)
2052 return res;
2053 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2054 LOG_TARGET_DEBUG(target,
2055 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2056 prev_dsr,
2057 xtensa->dbg_mod.core_status.dsr);
2058 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET(xtensa)) {
2059 /* if RESET state is persitent */
2060 target->state = TARGET_RESET;
2061 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2062 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2063 xtensa->dbg_mod.core_status.dsr,
2064 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2065 target->state = TARGET_UNKNOWN;
2066 if (xtensa->come_online_probes_num == 0)
2067 target->examined = false;
2068 else
2069 xtensa->come_online_probes_num--;
2070 } else if (xtensa_is_stopped(target)) {
2071 if (target->state != TARGET_HALTED) {
2072 enum target_state oldstate = target->state;
2073 target->state = TARGET_HALTED;
2074 /* Examine why the target has been halted */
2075 target->debug_reason = DBG_REASON_DBGRQ;
2076 xtensa_fetch_all_regs(target);
2077 /* When setting debug reason DEBUGCAUSE events have the following
2078 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2079 /* Watchpoint and breakpoint events at the same time results in special
2080 * debug reason: DBG_REASON_WPTANDBKPT. */
2081 uint32_t halt_cause = xtensa_cause_get(target);
2082 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2083 if (halt_cause & DEBUGCAUSE_IC)
2084 target->debug_reason = DBG_REASON_SINGLESTEP;
2085 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2086 if (halt_cause & DEBUGCAUSE_DB)
2087 target->debug_reason = DBG_REASON_WPTANDBKPT;
2088 else
2089 target->debug_reason = DBG_REASON_BREAKPOINT;
2090 } else if (halt_cause & DEBUGCAUSE_DB) {
2091 target->debug_reason = DBG_REASON_WATCHPOINT;
2092 }
2093 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2094 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2095 xtensa_reg_get(target, XT_REG_IDX_PC),
2096 target->debug_reason,
2097 oldstate);
2098 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2099 halt_cause,
2100 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2101 xtensa->dbg_mod.core_status.dsr);
2102 xtensa_dm_core_status_clear(
2103 &xtensa->dbg_mod,
2104 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2105 OCDDSR_DEBUGINTTRAX |
2106 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2107 }
2108 } else {
2109 target->debug_reason = DBG_REASON_NOTHALTED;
2110 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2111 target->state = TARGET_RUNNING;
2112 target->debug_reason = DBG_REASON_NOTHALTED;
2113 }
2114 }
2115 if (xtensa->trace_active) {
2116 /* Detect if tracing was active but has stopped. */
2117 struct xtensa_trace_status trace_status;
2118 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2119 if (res == ERROR_OK) {
2120 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2121 LOG_INFO("Detected end of trace.");
2122 if (trace_status.stat & TRAXSTAT_PCMTG)
2123 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2124 if (trace_status.stat & TRAXSTAT_PTITG)
2125 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2126 if (trace_status.stat & TRAXSTAT_CTITG)
2127 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2128 xtensa->trace_active = false;
2129 }
2130 }
2131 }
2132 return ERROR_OK;
2133 }
2134
2135 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2136 {
2137 struct xtensa *xtensa = target_to_xtensa(target);
2138 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2139 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2140 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2141 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2142 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2143 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2144 int ret;
2145
2146 if (size > icache_line_size)
2147 return ERROR_FAIL;
2148
2149 if (issue_ihi || issue_dhwbi) {
2150 /* We're going to use A3 here */
2151 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2152
2153 /* Write start address to A3 and invalidate */
2154 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2155 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2156 LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2157 if (issue_dhwbi) {
2158 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2159 if (!same_dc_line) {
2160 LOG_TARGET_DEBUG(target,
2161 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2162 address + 4);
2163 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2164 }
2165 }
2166 if (issue_ihi) {
2167 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2168 if (!same_ic_line) {
2169 LOG_TARGET_DEBUG(target,
2170 "IHI second icache line for address "TARGET_ADDR_FMT,
2171 address + 4);
2172 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2173 }
2174 }
2175
2176 /* Execute invalidate instructions */
2177 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2178 xtensa_core_status_check(target);
2179 if (ret != ERROR_OK) {
2180 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2181 return ret;
2182 }
2183 }
2184
2185 /* Write new instructions to memory */
2186 ret = target_write_buffer(target, address, size, buffer);
2187 if (ret != ERROR_OK) {
2188 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2189 return ret;
2190 }
2191
2192 if (issue_dhwbi) {
2193 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2194 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2195 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2196 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2197 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2198 if (!same_dc_line) {
2199 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2200 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2201 }
2202
2203 /* Execute invalidate instructions */
2204 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2205 xtensa_core_status_check(target);
2206 }
2207
2208 /* TODO: Handle L2 cache if present */
2209 return ret;
2210 }
2211
2212 static int xtensa_sw_breakpoint_add(struct target *target,
2213 struct breakpoint *breakpoint,
2214 struct xtensa_sw_breakpoint *sw_bp)
2215 {
2216 struct xtensa *xtensa = target_to_xtensa(target);
2217 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2218 if (ret != ERROR_OK) {
2219 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2220 return ret;
2221 }
2222
2223 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2224 sw_bp->oocd_bp = breakpoint;
2225
2226 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2227
2228 /* Underlying memory write will convert instruction endianness, don't do that here */
2229 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2230 if (ret != ERROR_OK) {
2231 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2232 return ret;
2233 }
2234
2235 return ERROR_OK;
2236 }
2237
2238 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2239 {
2240 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2241 if (ret != ERROR_OK) {
2242 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2243 return ret;
2244 }
2245 sw_bp->oocd_bp = NULL;
2246 return ERROR_OK;
2247 }
2248
2249 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2250 {
2251 struct xtensa *xtensa = target_to_xtensa(target);
2252 unsigned int slot;
2253
2254 if (breakpoint->type == BKPT_SOFT) {
2255 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2256 if (!xtensa->sw_brps[slot].oocd_bp ||
2257 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2258 break;
2259 }
2260 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2261 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2262 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2263 }
2264 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2265 if (ret != ERROR_OK) {
2266 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2267 return ret;
2268 }
2269 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2270 slot,
2271 breakpoint->address);
2272 return ERROR_OK;
2273 }
2274
2275 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2276 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2277 break;
2278 }
2279 if (slot == xtensa->core_config->debug.ibreaks_num) {
2280 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2281 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2282 }
2283
2284 xtensa->hw_brps[slot] = breakpoint;
2285 /* We will actually write the breakpoints when we resume the target. */
2286 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2287 slot,
2288 breakpoint->address);
2289
2290 return ERROR_OK;
2291 }
2292
2293 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2294 {
2295 struct xtensa *xtensa = target_to_xtensa(target);
2296 unsigned int slot;
2297
2298 if (breakpoint->type == BKPT_SOFT) {
2299 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2300 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2301 break;
2302 }
2303 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2304 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2305 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2306 }
2307 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2308 if (ret != ERROR_OK) {
2309 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2310 return ret;
2311 }
2312 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2313 return ERROR_OK;
2314 }
2315
2316 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2317 if (xtensa->hw_brps[slot] == breakpoint)
2318 break;
2319 }
2320 if (slot == xtensa->core_config->debug.ibreaks_num) {
2321 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2322 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2323 }
2324 xtensa->hw_brps[slot] = NULL;
2325 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2326 return ERROR_OK;
2327 }
2328
2329 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2330 {
2331 struct xtensa *xtensa = target_to_xtensa(target);
2332 unsigned int slot;
2333 xtensa_reg_val_t dbreakcval;
2334
2335 if (target->state != TARGET_HALTED) {
2336 LOG_TARGET_WARNING(target, "target not halted");
2337 return ERROR_TARGET_NOT_HALTED;
2338 }
2339
2340 if (watchpoint->mask != ~(uint32_t)0) {
2341 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2342 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2343 }
2344
2345 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2346 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2347 break;
2348 }
2349 if (slot == xtensa->core_config->debug.dbreaks_num) {
2350 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2351 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2352 }
2353
2354 /* Figure out value for dbreakc5..0
2355 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2356 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2357 !IS_PWR_OF_2(watchpoint->length) ||
2358 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2359 LOG_TARGET_WARNING(
2360 target,
2361 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2362 " not supported by hardware.",
2363 watchpoint->length,
2364 watchpoint->address);
2365 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2366 }
2367 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2368
2369 if (watchpoint->rw == WPT_READ)
2370 dbreakcval |= BIT(30);
2371 if (watchpoint->rw == WPT_WRITE)
2372 dbreakcval |= BIT(31);
2373 if (watchpoint->rw == WPT_ACCESS)
2374 dbreakcval |= BIT(30) | BIT(31);
2375
2376 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2377 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2378 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2379 xtensa->hw_wps[slot] = watchpoint;
2380 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2381 watchpoint->address);
2382 return ERROR_OK;
2383 }
2384
2385 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2386 {
2387 struct xtensa *xtensa = target_to_xtensa(target);
2388 unsigned int slot;
2389
2390 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2391 if (xtensa->hw_wps[slot] == watchpoint)
2392 break;
2393 }
2394 if (slot == xtensa->core_config->debug.dbreaks_num) {
2395 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2396 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2397 }
2398 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2399 xtensa->hw_wps[slot] = NULL;
2400 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2401 watchpoint->address);
2402 return ERROR_OK;
2403 }
2404
2405 static int xtensa_build_reg_cache(struct target *target)
2406 {
2407 struct xtensa *xtensa = target_to_xtensa(target);
2408 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2409 unsigned int last_dbreg_num = 0;
2410
2411 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2412 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2413 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2414
2415 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2416
2417 if (!reg_cache) {
2418 LOG_ERROR("Failed to alloc reg cache!");
2419 return ERROR_FAIL;
2420 }
2421 reg_cache->name = "Xtensa registers";
2422 reg_cache->next = NULL;
2423 /* Init reglist */
2424 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2425 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2426 if (!reg_list) {
2427 LOG_ERROR("Failed to alloc reg list!");
2428 goto fail;
2429 }
2430 xtensa->dbregs_num = 0;
2431 unsigned int didx = 0;
2432 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2433 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2434 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2435 for (unsigned int i = 0; i < listsize; i++, didx++) {
2436 reg_list[didx].exist = rlist[i].exist;
2437 reg_list[didx].name = rlist[i].name;
2438 reg_list[didx].size = 32;
2439 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2440 if (!reg_list[didx].value) {
2441 LOG_ERROR("Failed to alloc reg list value!");
2442 goto fail;
2443 }
2444 reg_list[didx].dirty = false;
2445 reg_list[didx].valid = false;
2446 reg_list[didx].type = &xtensa_reg_type;
2447 reg_list[didx].arch_info = xtensa;
2448 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2449 last_dbreg_num = rlist[i].dbreg_num;
2450
2451 if (xtensa_extra_debug_log) {
2452 LOG_TARGET_DEBUG(target,
2453 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2454 reg_list[didx].name,
2455 whichlist,
2456 reg_list[didx].exist,
2457 didx,
2458 rlist[i].type,
2459 rlist[i].dbreg_num);
2460 }
2461 }
2462 }
2463
2464 xtensa->dbregs_num = last_dbreg_num + 1;
2465 reg_cache->reg_list = reg_list;
2466 reg_cache->num_regs = reg_list_size;
2467
2468 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2469 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2470
2471 /* Construct empty-register list for handling unknown register requests */
2472 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2473 if (!xtensa->empty_regs) {
2474 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2475 goto fail;
2476 }
2477 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2478 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2479 if (!xtensa->empty_regs[i].name) {
2480 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2481 goto fail;
2482 }
2483 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
2484 xtensa->empty_regs[i].size = 32;
2485 xtensa->empty_regs[i].type = &xtensa_reg_type;
2486 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2487 if (!xtensa->empty_regs[i].value) {
2488 LOG_ERROR("Failed to alloc empty reg list value!");
2489 goto fail;
2490 }
2491 xtensa->empty_regs[i].arch_info = xtensa;
2492 }
2493
2494 /* Construct contiguous register list from contiguous descriptor list */
2495 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
2496 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2497 if (!xtensa->contiguous_regs_list) {
2498 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2499 goto fail;
2500 }
2501 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2502 unsigned int j;
2503 for (j = 0; j < reg_cache->num_regs; j++) {
2504 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2505 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
2506 LOG_TARGET_DEBUG(target,
2507 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2508 xtensa->contiguous_regs_list[i]->name,
2509 xtensa->contiguous_regs_desc[i]->dbreg_num);
2510 break;
2511 }
2512 }
2513 if (j == reg_cache->num_regs)
2514 LOG_TARGET_WARNING(target, "contiguous register %s not found",
2515 xtensa->contiguous_regs_desc[i]->name);
2516 }
2517 }
2518
2519 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2520 if (!xtensa->algo_context_backup) {
2521 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2522 goto fail;
2523 }
2524 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2525 struct reg *reg = &reg_cache->reg_list[i];
2526 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2527 if (!xtensa->algo_context_backup[i]) {
2528 LOG_ERROR("Failed to alloc mem for algorithm context!");
2529 goto fail;
2530 }
2531 }
2532 xtensa->core_cache = reg_cache;
2533 if (cache_p)
2534 *cache_p = reg_cache;
2535 return ERROR_OK;
2536
2537 fail:
2538 if (reg_list) {
2539 for (unsigned int i = 0; i < reg_list_size; i++)
2540 free(reg_list[i].value);
2541 free(reg_list);
2542 }
2543 if (xtensa->empty_regs) {
2544 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2545 free((void *)xtensa->empty_regs[i].name);
2546 free(xtensa->empty_regs[i].value);
2547 }
2548 free(xtensa->empty_regs);
2549 }
2550 if (xtensa->algo_context_backup) {
2551 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2552 free(xtensa->algo_context_backup[i]);
2553 free(xtensa->algo_context_backup);
2554 }
2555 free(reg_cache);
2556
2557 return ERROR_FAIL;
2558 }
2559
2560 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
2561 {
2562 struct xtensa *xtensa = target_to_xtensa(target);
2563 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
2564 /* Process op[] list */
2565 while (opstr && (*opstr == ':')) {
2566 uint8_t ops[32];
2567 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
2568 if (oplen > 32) {
2569 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
2570 break;
2571 }
2572 unsigned int i = 0;
2573 while ((i < oplen) && opstr && (*opstr == ':'))
2574 ops[i++] = strtoul(opstr + 1, &opstr, 16);
2575 if (i != oplen) {
2576 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
2577 break;
2578 }
2579
2580 char insn_buf[128];
2581 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
2582 for (i = 0; i < oplen; i++)
2583 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
2584 LOG_TARGET_DEBUG(target, "%s", insn_buf);
2585 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
2586 status = ERROR_OK;
2587 }
2588 return status;
2589 }
2590
2591 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
2592 {
2593 struct xtensa *xtensa = target_to_xtensa(target);
2594 bool iswrite = (packet[0] == 'Q');
2595 enum xtensa_qerr_e error;
2596
2597 /* Read/write TIE register. Requires spill location.
2598 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2599 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2600 */
2601 if (!(xtensa->spill_buf)) {
2602 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2603 error = XT_QERR_FAIL;
2604 goto xtensa_gdbqc_qxtreg_fail;
2605 }
2606
2607 char *delim;
2608 uint32_t regnum = strtoul(packet + 6, &delim, 16);
2609 if (*delim != ':') {
2610 LOG_ERROR("Malformed qxtreg packet");
2611 error = XT_QERR_INVAL;
2612 goto xtensa_gdbqc_qxtreg_fail;
2613 }
2614 uint32_t reglen = strtoul(delim + 1, &delim, 16);
2615 if (*delim != ':') {
2616 LOG_ERROR("Malformed qxtreg packet");
2617 error = XT_QERR_INVAL;
2618 goto xtensa_gdbqc_qxtreg_fail;
2619 }
2620 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
2621 memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
2622 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
2623 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
2624 LOG_ERROR("TIE register too large");
2625 error = XT_QERR_MEM;
2626 goto xtensa_gdbqc_qxtreg_fail;
2627 }
2628
2629 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2630 * (2) read old a4, (3) write spill address to a4.
2631 * NOTE: ensure a4 is restored properly by all error handling logic
2632 */
2633 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
2634 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
2635 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2636 if (status != ERROR_OK) {
2637 LOG_ERROR("Spill memory save");
2638 error = XT_QERR_MEM;
2639 goto xtensa_gdbqc_qxtreg_fail;
2640 }
2641 if (iswrite) {
2642 /* Extract value and store in spill memory */
2643 unsigned int b = 0;
2644 char *valbuf = strchr(delim, '=');
2645 if (!(valbuf && (*valbuf == '='))) {
2646 LOG_ERROR("Malformed Qxtreg packet");
2647 error = XT_QERR_INVAL;
2648 goto xtensa_gdbqc_qxtreg_fail;
2649 }
2650 valbuf++;
2651 while (*valbuf && *(valbuf + 1)) {
2652 char bytestr[3] = { 0, 0, 0 };
2653 strncpy(bytestr, valbuf, 2);
2654 regbuf[b++] = strtoul(bytestr, NULL, 16);
2655 valbuf += 2;
2656 }
2657 if (b != reglen) {
2658 LOG_ERROR("Malformed Qxtreg packet");
2659 error = XT_QERR_INVAL;
2660 goto xtensa_gdbqc_qxtreg_fail;
2661 }
2662 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2663 reglen / memop_size, regbuf);
2664 if (status != ERROR_OK) {
2665 LOG_ERROR("TIE value store");
2666 error = XT_QERR_MEM;
2667 goto xtensa_gdbqc_qxtreg_fail;
2668 }
2669 }
2670 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
2671 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, xtensa->spill_loc);
2672 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2673
2674 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
2675
2676 /* Restore a4 but not yet spill memory. Execute it all... */
2677 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, orig_a4);
2678 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2679 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2680 if (status != ERROR_OK) {
2681 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
2682 tieop_status = status;
2683 }
2684 status = xtensa_core_status_check(target);
2685 if (status != ERROR_OK) {
2686 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
2687 tieop_status = status;
2688 }
2689
2690 if (tieop_status == ERROR_OK) {
2691 if (iswrite) {
2692 /* TIE write succeeded; send OK */
2693 strcpy(*response_p, "OK");
2694 } else {
2695 /* TIE read succeeded; copy result from spill memory */
2696 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
2697 if (status != ERROR_OK) {
2698 LOG_TARGET_ERROR(target, "TIE result read");
2699 tieop_status = status;
2700 }
2701 unsigned int i;
2702 for (i = 0; i < reglen; i++)
2703 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
2704 *(*response_p + 2 * i) = '\0';
2705 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
2706 }
2707 }
2708
2709 /* Restore spill memory first, then report any previous errors */
2710 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2711 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2712 if (status != ERROR_OK) {
2713 LOG_ERROR("Spill memory restore");
2714 error = XT_QERR_MEM;
2715 goto xtensa_gdbqc_qxtreg_fail;
2716 }
2717 if (tieop_status != ERROR_OK) {
2718 LOG_ERROR("TIE execution");
2719 error = XT_QERR_FAIL;
2720 goto xtensa_gdbqc_qxtreg_fail;
2721 }
2722 return ERROR_OK;
2723
2724 xtensa_gdbqc_qxtreg_fail:
2725 strcpy(*response_p, xt_qerr[error].chrval);
2726 return xt_qerr[error].intval;
2727 }
2728
2729 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
2730 {
2731 struct xtensa *xtensa = target_to_xtensa(target);
2732 enum xtensa_qerr_e error;
2733 if (!packet || !response_p) {
2734 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
2735 return ERROR_FAIL;
2736 }
2737
2738 *response_p = xtensa->qpkt_resp;
2739 if (strncmp(packet, "qxtn", 4) == 0) {
2740 strcpy(*response_p, "OpenOCD");
2741 return ERROR_OK;
2742 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
2743 return ERROR_OK;
2744 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
2745 /* Confirm host cache params match core .cfg file */
2746 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
2747 &xtensa->core_config->icache : &xtensa->core_config->dcache;
2748 unsigned int line_size = 0, size = 0, way_count = 0;
2749 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
2750 if ((cachep->line_size != line_size) ||
2751 (cachep->size != size) ||
2752 (cachep->way_count != way_count)) {
2753 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2754 cachep == &xtensa->core_config->icache ? 'I' : 'D');
2755 }
2756 strcpy(*response_p, "OK");
2757 return ERROR_OK;
2758 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
2759 /* Confirm host IRAM/IROM params match core .cfg file */
2760 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
2761 &xtensa->core_config->iram : &xtensa->core_config->irom;
2762 unsigned int base = 0, size = 0, i;
2763 char *pkt = (char *)&packet[7];
2764 do {
2765 pkt++;
2766 size = strtoul(pkt, &pkt, 16);
2767 pkt++;
2768 base = strtoul(pkt, &pkt, 16);
2769 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
2770 for (i = 0; i < memp->count; i++) {
2771 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
2772 break;
2773 }
2774 if (i == memp->count) {
2775 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
2776 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
2777 break;
2778 }
2779 for (i = 0; i < 11; i++) {
2780 pkt++;
2781 strtoul(pkt, &pkt, 16);
2782 }
2783 } while (pkt && (pkt[0] == ','));
2784 strcpy(*response_p, "OK");
2785 return ERROR_OK;
2786 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
2787 /* Confirm host EXCM_LEVEL matches core .cfg file */
2788 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
2789 if (!xtensa->core_config->high_irq.enabled ||
2790 (excm_level != xtensa->core_config->high_irq.excm_level))
2791 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
2792 strcpy(*response_p, "OK");
2793 return ERROR_OK;
2794 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
2795 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
2796 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
2797 strcpy(*response_p, "OK");
2798 return ERROR_OK;
2799 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
2800 char *delim;
2801 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
2802 if (*delim != ':') {
2803 LOG_ERROR("Malformed Qxtspill packet");
2804 error = XT_QERR_INVAL;
2805 goto xtensa_gdb_query_custom_fail;
2806 }
2807 xtensa->spill_loc = spill_loc;
2808 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
2809 if (xtensa->spill_buf)
2810 free(xtensa->spill_buf);
2811 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
2812 if (!xtensa->spill_buf) {
2813 LOG_ERROR("Spill buf alloc");
2814 error = XT_QERR_MEM;
2815 goto xtensa_gdb_query_custom_fail;
2816 }
2817 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
2818 strcpy(*response_p, "OK");
2819 return ERROR_OK;
2820 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
2821 return xtensa_gdbqc_qxtreg(target, packet, response_p);
2822 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
2823 (strncmp(packet, "qxtftie", 7) == 0) ||
2824 (strncmp(packet, "qxtstie", 7) == 0)) {
2825 /* Return empty string to indicate trace, TIE wire debug are unsupported */
2826 strcpy(*response_p, "");
2827 return ERROR_OK;
2828 }
2829
2830 /* Warn for all other queries, but do not return errors */
2831 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
2832 strcpy(*response_p, "");
2833 return ERROR_OK;
2834
2835 xtensa_gdb_query_custom_fail:
2836 strcpy(*response_p, xt_qerr[error].chrval);
2837 return xt_qerr[error].intval;
2838 }
2839
2840 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
2841 const struct xtensa_debug_module_config *dm_cfg)
2842 {
2843 target->arch_info = xtensa;
2844 xtensa->common_magic = XTENSA_COMMON_MAGIC;
2845 xtensa->target = target;
2846 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
2847
2848 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
2849 if (!xtensa->core_config) {
2850 LOG_ERROR("Xtensa configuration alloc failed\n");
2851 return ERROR_FAIL;
2852 }
2853
2854 /* Default cache settings are disabled with 1 way */
2855 xtensa->core_config->icache.way_count = 1;
2856 xtensa->core_config->dcache.way_count = 1;
2857
2858 /* chrval: AR3/AR4 register names will change with window mapping.
2859 * intval: tracks whether scratch register was set through gdb P packet.
2860 */
2861 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
2862 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
2863 if (!xtensa->scratch_ars[s].chrval) {
2864 for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
2865 free(xtensa->scratch_ars[f].chrval);
2866 free(xtensa->core_config);
2867 LOG_ERROR("Xtensa scratch AR alloc failed\n");
2868 return ERROR_FAIL;
2869 }
2870 xtensa->scratch_ars[s].intval = false;
2871 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
2872 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
2873 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
2874 }
2875
2876 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2877 }
2878
2879 void xtensa_set_permissive_mode(struct target *target, bool state)
2880 {
2881 target_to_xtensa(target)->permissive_mode = state;
2882 }
2883
2884 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2885 {
2886 struct xtensa *xtensa = target_to_xtensa(target);
2887
2888 xtensa->come_online_probes_num = 3;
2889 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
2890 if (!xtensa->hw_brps) {
2891 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2892 return ERROR_FAIL;
2893 }
2894 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
2895 if (!xtensa->hw_wps) {
2896 free(xtensa->hw_brps);
2897 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2898 return ERROR_FAIL;
2899 }
2900 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2901 if (!xtensa->sw_brps) {
2902 free(xtensa->hw_brps);
2903 free(xtensa->hw_wps);
2904 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2905 return ERROR_FAIL;
2906 }
2907
2908 xtensa->spill_loc = 0xffffffff;
2909 xtensa->spill_bytes = 0;
2910 xtensa->spill_buf = NULL;
2911 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
2912
2913 return xtensa_build_reg_cache(target);
2914 }
2915
2916 static void xtensa_free_reg_cache(struct target *target)
2917 {
2918 struct xtensa *xtensa = target_to_xtensa(target);
2919 struct reg_cache *cache = xtensa->core_cache;
2920
2921 if (cache) {
2922 register_unlink_cache(&target->reg_cache, cache);
2923 for (unsigned int i = 0; i < cache->num_regs; i++) {
2924 free(xtensa->algo_context_backup[i]);
2925 free(cache->reg_list[i].value);
2926 }
2927 free(xtensa->algo_context_backup);
2928 free(cache->reg_list);
2929 free(cache);
2930 }
2931 xtensa->core_cache = NULL;
2932 xtensa->algo_context_backup = NULL;
2933
2934 if (xtensa->empty_regs) {
2935 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2936 free((void *)xtensa->empty_regs[i].name);
2937 free(xtensa->empty_regs[i].value);
2938 }
2939 free(xtensa->empty_regs);
2940 }
2941 xtensa->empty_regs = NULL;
2942 if (xtensa->optregs) {
2943 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
2944 free((void *)xtensa->optregs[i].name);
2945 free(xtensa->optregs);
2946 }
2947 xtensa->optregs = NULL;
2948 }
2949
2950 void xtensa_target_deinit(struct target *target)
2951 {
2952 struct xtensa *xtensa = target_to_xtensa(target);
2953
2954 LOG_DEBUG("start");
2955
2956 if (target_was_examined(target)) {
2957 int ret = xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, OCDDCR_ENABLEOCD);
2958 if (ret != ERROR_OK) {
2959 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2960 return;
2961 }
2962 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
2963 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2964 if (ret != ERROR_OK) {
2965 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2966 return;
2967 }
2968 xtensa_dm_deinit(&xtensa->dbg_mod);
2969 }
2970 xtensa_free_reg_cache(target);
2971 free(xtensa->hw_brps);
2972 free(xtensa->hw_wps);
2973 free(xtensa->sw_brps);
2974 if (xtensa->spill_buf) {
2975 free(xtensa->spill_buf);
2976 xtensa->spill_buf = NULL;
2977 }
2978 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
2979 free(xtensa->scratch_ars[s].chrval);
2980 free(xtensa->core_config);
2981 }
2982
2983 const char *xtensa_get_gdb_arch(struct target *target)
2984 {
2985 return "xtensa";
2986 }
2987
2988 /* exe <ascii-encoded hexadecimal instruction bytes> */
2989 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
2990 {
2991 struct xtensa *xtensa = target_to_xtensa(target);
2992
2993 if (CMD_ARGC != 1)
2994 return ERROR_COMMAND_SYNTAX_ERROR;
2995
2996 /* Process ascii-encoded hex byte string */
2997 const char *parm = CMD_ARGV[0];
2998 unsigned int parm_len = strlen(parm);
2999 if ((parm_len >= 64) || (parm_len & 1)) {
3000 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3001 return ERROR_FAIL;
3002 }
3003
3004 uint8_t ops[32];
3005 memset(ops, 0, 32);
3006 unsigned int oplen = parm_len / 2;
3007 char encoded_byte[3] = { 0, 0, 0 };
3008 for (unsigned int i = 0; i < oplen; i++) {
3009 encoded_byte[0] = *parm++;
3010 encoded_byte[1] = *parm++;
3011 ops[i] = strtoul(encoded_byte, NULL, 16);
3012 }
3013
3014 /* GDB must handle state save/restore.
3015 * Flush reg cache in case spill location is in an AR
3016 * Update CPENABLE only for this execution; later restore cached copy
3017 * Keep a copy of exccause in case executed code triggers an exception
3018 */
3019 int status = xtensa_write_dirty_registers(target);
3020 if (status != ERROR_OK) {
3021 LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3022 return ERROR_FAIL;
3023 }
3024 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3025 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3026 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3027 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
3028 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3029 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3030 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3031 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
3032 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3033
3034 /* Queue instruction list and execute everything */
3035 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3036 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3037 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3038 if (status != ERROR_OK)
3039 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3040 status = xtensa_core_status_check(target);
3041 if (status != ERROR_OK)
3042 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3043
3044 /* Reread register cache and restore saved regs after instruction execution */
3045 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3046 LOG_TARGET_ERROR(target, "%s: Failed to fetch register cache (post-exec).", target_name(target));
3047 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3048 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3049 return status;
3050 }
3051
3052 COMMAND_HANDLER(xtensa_cmd_exe)
3053 {
3054 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3055 }
3056
3057 /* xtdef <name> */
3058 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3059 {
3060 if (CMD_ARGC != 1)
3061 return ERROR_COMMAND_SYNTAX_ERROR;
3062
3063 const char *core_name = CMD_ARGV[0];
3064 if (strcasecmp(core_name, "LX") == 0) {
3065 xtensa->core_config->core_type = XT_LX;
3066 } else {
3067 LOG_ERROR("xtdef [LX]\n");
3068 return ERROR_COMMAND_SYNTAX_ERROR;
3069 }
3070 return ERROR_OK;
3071 }
3072
3073 COMMAND_HANDLER(xtensa_cmd_xtdef)
3074 {
3075 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3076 target_to_xtensa(get_current_target(CMD_CTX)));
3077 }
3078
3079 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3080 {
3081 if ((val < min) || (val > max)) {
3082 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3083 return false;
3084 }
3085 return true;
3086 }
3087
3088 /* xtopt <name> <value> */
3089 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3090 {
3091 if (CMD_ARGC != 2)
3092 return ERROR_COMMAND_SYNTAX_ERROR;
3093
3094 const char *opt_name = CMD_ARGV[0];
3095 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3096 if (strcasecmp(opt_name, "arnum") == 0) {
3097 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3098 return ERROR_COMMAND_ARGUMENT_INVALID;
3099 xtensa->core_config->aregs_num = opt_val;
3100 } else if (strcasecmp(opt_name, "windowed") == 0) {
3101 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3102 return ERROR_COMMAND_ARGUMENT_INVALID;
3103 xtensa->core_config->windowed = opt_val;
3104 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3105 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3106 return ERROR_COMMAND_ARGUMENT_INVALID;
3107 xtensa->core_config->coproc = opt_val;
3108 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3109 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3110 return ERROR_COMMAND_ARGUMENT_INVALID;
3111 xtensa->core_config->exceptions = opt_val;
3112 } else if (strcasecmp(opt_name, "intnum") == 0) {
3113 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3114 return ERROR_COMMAND_ARGUMENT_INVALID;
3115 xtensa->core_config->irq.enabled = (opt_val > 0);
3116 xtensa->core_config->irq.irq_num = opt_val;
3117 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3118 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3119 return ERROR_COMMAND_ARGUMENT_INVALID;
3120 xtensa->core_config->high_irq.enabled = opt_val;
3121 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3122 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3123 return ERROR_COMMAND_ARGUMENT_INVALID;
3124 if (!xtensa->core_config->high_irq.enabled) {
3125 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3126 return ERROR_COMMAND_ARGUMENT_INVALID;
3127 }
3128 xtensa->core_config->high_irq.excm_level = opt_val;
3129 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3130 if (xtensa->core_config->core_type == XT_LX) {
3131 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3132 return ERROR_COMMAND_ARGUMENT_INVALID;
3133 } else {
3134 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3135 return ERROR_COMMAND_ARGUMENT_INVALID;
3136 }
3137 if (!xtensa->core_config->high_irq.enabled) {
3138 LOG_ERROR("xtopt intlevels requires hipriints\n");
3139 return ERROR_COMMAND_ARGUMENT_INVALID;
3140 }
3141 xtensa->core_config->high_irq.level_num = opt_val;
3142 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3143 if (xtensa->core_config->core_type == XT_LX) {
3144 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3145 return ERROR_COMMAND_ARGUMENT_INVALID;
3146 } else {
3147 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3148 return ERROR_COMMAND_ARGUMENT_INVALID;
3149 }
3150 xtensa->core_config->debug.enabled = 1;
3151 xtensa->core_config->debug.irq_level = opt_val;
3152 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3153 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3154 return ERROR_COMMAND_ARGUMENT_INVALID;
3155 xtensa->core_config->debug.ibreaks_num = opt_val;
3156 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3157 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3158 return ERROR_COMMAND_ARGUMENT_INVALID;
3159 xtensa->core_config->debug.dbreaks_num = opt_val;
3160 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3161 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3162 return ERROR_COMMAND_ARGUMENT_INVALID;
3163 xtensa->core_config->trace.mem_sz = opt_val;
3164 xtensa->core_config->trace.enabled = (opt_val > 0);
3165 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3166 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3167 return ERROR_COMMAND_ARGUMENT_INVALID;
3168 xtensa->core_config->trace.reversed_mem_access = opt_val;
3169 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3170 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3171 return ERROR_COMMAND_ARGUMENT_INVALID;
3172 xtensa->core_config->debug.perfcount_num = opt_val;
3173 } else {
3174 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3175 return ERROR_OK;
3176 }
3177
3178 return ERROR_OK;
3179 }
3180
3181 COMMAND_HANDLER(xtensa_cmd_xtopt)
3182 {
3183 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3184 target_to_xtensa(get_current_target(CMD_CTX)));
3185 }
3186
3187 /* xtmem <type> [parameters] */
3188 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3189 {
3190 struct xtensa_cache_config *cachep = NULL;
3191 struct xtensa_local_mem_config *memp = NULL;
3192 int mem_access = 0;
3193 bool is_dcache = false;
3194
3195 if (CMD_ARGC == 0) {
3196 LOG_ERROR("xtmem <type> [parameters]\n");
3197 return ERROR_COMMAND_SYNTAX_ERROR;
3198 }
3199
3200 const char *mem_name = CMD_ARGV[0];
3201 if (strcasecmp(mem_name, "icache") == 0) {
3202 cachep = &xtensa->core_config->icache;
3203 } else if (strcasecmp(mem_name, "dcache") == 0) {
3204 cachep = &xtensa->core_config->dcache;
3205 is_dcache = true;
3206 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3207 /* TODO: support L2 cache */
3208 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3209 /* TODO: support L2 cache */
3210 } else if (strcasecmp(mem_name, "iram") == 0) {
3211 memp = &xtensa->core_config->iram;
3212 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3213 } else if (strcasecmp(mem_name, "dram") == 0) {
3214 memp = &xtensa->core_config->dram;
3215 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3216 } else if (strcasecmp(mem_name, "sram") == 0) {
3217 memp = &xtensa->core_config->sram;
3218 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3219 } else if (strcasecmp(mem_name, "irom") == 0) {
3220 memp = &xtensa->core_config->irom;
3221 mem_access = XT_MEM_ACCESS_READ;
3222 } else if (strcasecmp(mem_name, "drom") == 0) {
3223 memp = &xtensa->core_config->drom;
3224 mem_access = XT_MEM_ACCESS_READ;
3225 } else if (strcasecmp(mem_name, "srom") == 0) {
3226 memp = &xtensa->core_config->srom;
3227 mem_access = XT_MEM_ACCESS_READ;
3228 } else {
3229 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3230 return ERROR_COMMAND_ARGUMENT_INVALID;
3231 }
3232
3233 if (cachep) {
3234 if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3235 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3236 return ERROR_COMMAND_SYNTAX_ERROR;
3237 }
3238 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3239 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3240 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3241 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3242 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3243 } else if (memp) {
3244 if (CMD_ARGC != 3) {
3245 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3246 return ERROR_COMMAND_SYNTAX_ERROR;
3247 }
3248 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3249 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3250 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3251 memcfgp->access = mem_access;
3252 memp->count++;
3253 }
3254
3255 return ERROR_OK;
3256 }
3257
3258 COMMAND_HANDLER(xtensa_cmd_xtmem)
3259 {
3260 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3261 target_to_xtensa(get_current_target(CMD_CTX)));
3262 }
3263
3264 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3265 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3266 {
3267 if (CMD_ARGC != 4) {
3268 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3269 return ERROR_COMMAND_SYNTAX_ERROR;
3270 }
3271
3272 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3273 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3274 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3275 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3276
3277 if ((nfgseg > 32)) {
3278 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3279 return ERROR_COMMAND_ARGUMENT_INVALID;
3280 } else if (minsegsize & (minsegsize - 1)) {
3281 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3282 return ERROR_COMMAND_ARGUMENT_INVALID;
3283 } else if (lockable > 1) {
3284 LOG_ERROR("<lockable> must be 0 or 1\n");
3285 return ERROR_COMMAND_ARGUMENT_INVALID;
3286 } else if (execonly > 1) {
3287 LOG_ERROR("<execonly> must be 0 or 1\n");
3288 return ERROR_COMMAND_ARGUMENT_INVALID;
3289 }
3290
3291 xtensa->core_config->mpu.enabled = true;
3292 xtensa->core_config->mpu.nfgseg = nfgseg;
3293 xtensa->core_config->mpu.minsegsize = minsegsize;
3294 xtensa->core_config->mpu.lockable = lockable;
3295 xtensa->core_config->mpu.execonly = execonly;
3296 return ERROR_OK;
3297 }
3298
3299 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3300 {
3301 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3302 target_to_xtensa(get_current_target(CMD_CTX)));
3303 }
3304
3305 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3306 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3307 {
3308 if (CMD_ARGC != 2) {
3309 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3310 return ERROR_COMMAND_SYNTAX_ERROR;
3311 }
3312
3313 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3314 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3315 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3316 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3317 return ERROR_COMMAND_ARGUMENT_INVALID;
3318 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3319 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3320 return ERROR_COMMAND_ARGUMENT_INVALID;
3321 }
3322
3323 xtensa->core_config->mmu.enabled = true;
3324 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3325 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3326 return ERROR_OK;
3327 }
3328
3329 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3330 {
3331 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3332 target_to_xtensa(get_current_target(CMD_CTX)));
3333 }
3334
3335 /* xtregs <numregs>
3336 * xtreg <regname> <regnum> */
3337 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3338 {
3339 if (CMD_ARGC == 1) {
3340 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3341 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3342 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3343 return ERROR_COMMAND_SYNTAX_ERROR;
3344 }
3345 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3346 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3347 numregs, xtensa->genpkt_regs_num);
3348 return ERROR_COMMAND_SYNTAX_ERROR;
3349 }
3350 xtensa->total_regs_num = numregs;
3351 xtensa->core_regs_num = 0;
3352 xtensa->num_optregs = 0;
3353 /* A little more memory than required, but saves a second initialization pass */
3354 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3355 if (!xtensa->optregs) {
3356 LOG_ERROR("Failed to allocate xtensa->optregs!");
3357 return ERROR_FAIL;
3358 }
3359 return ERROR_OK;
3360 } else if (CMD_ARGC != 2) {
3361 return ERROR_COMMAND_SYNTAX_ERROR;
3362 }
3363
3364 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3365 * if general register (g-packet) requests or contiguous register maps are supported */
3366 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3367 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3368 if (!xtensa->contiguous_regs_desc) {
3369 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3370 return ERROR_FAIL;
3371 }
3372 }
3373
3374 const char *regname = CMD_ARGV[0];
3375 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3376 if (regnum > UINT16_MAX) {
3377 LOG_ERROR("<regnum> must be a 16-bit number");
3378 return ERROR_COMMAND_ARGUMENT_INVALID;
3379 }
3380
3381 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3382 if (xtensa->total_regs_num)
3383 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3384 regname, regnum,
3385 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3386 else
3387 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3388 regname, regnum);
3389 return ERROR_FAIL;
3390 }
3391
3392 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3393 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3394 bool is_extended_reg = true;
3395 unsigned int ridx;
3396 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3397 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3398 /* Flag core register as defined */
3399 rptr = &xtensa_regs[ridx];
3400 xtensa->core_regs_num++;
3401 is_extended_reg = false;
3402 break;
3403 }
3404 }
3405
3406 rptr->exist = true;
3407 if (is_extended_reg) {
3408 /* Register ID, debugger-visible register ID */
3409 rptr->name = strdup(CMD_ARGV[0]);
3410 rptr->dbreg_num = regnum;
3411 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3412 xtensa->num_optregs++;
3413
3414 /* Register type */
3415 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3416 rptr->type = XT_REG_GENERAL;
3417 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3418 rptr->type = XT_REG_USER;
3419 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3420 rptr->type = XT_REG_FR;
3421 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3422 rptr->type = XT_REG_SPECIAL;
3423 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3424 /* WARNING: For these registers, regnum points to the
3425 * index of the corresponding ARx registers, NOT to
3426 * the processor register number! */
3427 rptr->type = XT_REG_RELGEN;
3428 rptr->reg_num += XT_REG_IDX_ARFIRST;
3429 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3430 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3431 rptr->type = XT_REG_TIE;
3432 } else {
3433 rptr->type = XT_REG_OTHER;
3434 }
3435
3436 /* Register flags */
3437 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3438 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3439 (strcmp(rptr->name, "intclear") == 0))
3440 rptr->flags = XT_REGF_NOREAD;
3441 else
3442 rptr->flags = 0;
3443
3444 if ((rptr->reg_num == (XT_PS_REG_NUM_BASE + xtensa->core_config->debug.irq_level)) &&
3445 (xtensa->core_config->core_type == XT_LX) && (rptr->type == XT_REG_SPECIAL)) {
3446 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3447 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3448 }
3449 } else if (strcmp(rptr->name, "cpenable") == 0) {
3450 xtensa->core_config->coproc = true;
3451 }
3452
3453 /* Build out list of contiguous registers in specified order */
3454 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3455 if (xtensa->contiguous_regs_desc) {
3456 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3457 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3458 }
3459 if (xtensa_extra_debug_log)
3460 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3461 is_extended_reg ? "config-specific" : "core",
3462 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3463 is_extended_reg ? xtensa->num_optregs : ridx,
3464 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3465 return ERROR_OK;
3466 }
3467
3468 COMMAND_HANDLER(xtensa_cmd_xtreg)
3469 {
3470 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3471 target_to_xtensa(get_current_target(CMD_CTX)));
3472 }
3473
3474 /* xtregfmt <contiguous|sparse> [numgregs] */
3475 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3476 {
3477 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3478 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3479 return ERROR_OK;
3480 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3481 xtensa->regmap_contiguous = true;
3482 if (CMD_ARGC == 2) {
3483 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3484 if ((numgregs <= 0) ||
3485 ((numgregs > xtensa->total_regs_num) &&
3486 (xtensa->total_regs_num > 0))) {
3487 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3488 numgregs, xtensa->total_regs_num);
3489 return ERROR_COMMAND_SYNTAX_ERROR;
3490 }
3491 xtensa->genpkt_regs_num = numgregs;
3492 }
3493 return ERROR_OK;
3494 }
3495 }
3496 return ERROR_COMMAND_SYNTAX_ERROR;
3497 }
3498
3499 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3500 {
3501 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3502 target_to_xtensa(get_current_target(CMD_CTX)));
3503 }
3504
3505 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3506 {
3507 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3508 &xtensa->permissive_mode, "xtensa permissive mode");
3509 }
3510
3511 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3512 {
3513 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3514 target_to_xtensa(get_current_target(CMD_CTX)));
3515 }
3516
3517 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3518 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
3519 {
3520 struct xtensa_perfmon_config config = {
3521 .mask = 0xffff,
3522 .kernelcnt = 0,
3523 .tracelevel = -1 /* use DEBUGLEVEL by default */
3524 };
3525
3526 if (CMD_ARGC < 2 || CMD_ARGC > 6)
3527 return ERROR_COMMAND_SYNTAX_ERROR;
3528
3529 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
3530 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
3531 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3532 return ERROR_COMMAND_ARGUMENT_INVALID;
3533 }
3534
3535 config.select = strtoul(CMD_ARGV[1], NULL, 0);
3536 if (config.select > XTENSA_MAX_PERF_SELECT) {
3537 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
3538 return ERROR_COMMAND_ARGUMENT_INVALID;
3539 }
3540
3541 if (CMD_ARGC >= 3) {
3542 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
3543 if (config.mask > XTENSA_MAX_PERF_MASK) {
3544 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
3545 return ERROR_COMMAND_ARGUMENT_INVALID;
3546 }
3547 }
3548
3549 if (CMD_ARGC >= 4) {
3550 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
3551 if (config.kernelcnt > 1) {
3552 command_print(CMD, "kernelcnt should be 0 or 1");
3553 return ERROR_COMMAND_ARGUMENT_INVALID;
3554 }
3555 }
3556
3557 if (CMD_ARGC >= 5) {
3558 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
3559 if (config.tracelevel > 7) {
3560 command_print(CMD, "tracelevel should be <=7");
3561 return ERROR_COMMAND_ARGUMENT_INVALID;
3562 }
3563 }
3564
3565 if (config.tracelevel == -1)
3566 config.tracelevel = xtensa->core_config->debug.irq_level;
3567
3568 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
3569 }
3570
3571 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
3572 {
3573 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
3574 target_to_xtensa(get_current_target(CMD_CTX)));
3575 }
3576
3577 /* perfmon_dump [counter_id] */
3578 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
3579 {
3580 if (CMD_ARGC > 1)
3581 return ERROR_COMMAND_SYNTAX_ERROR;
3582
3583 int counter_id = -1;
3584 if (CMD_ARGC == 1) {
3585 counter_id = strtol(CMD_ARGV[0], NULL, 0);
3586 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
3587 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3588 return ERROR_COMMAND_ARGUMENT_INVALID;
3589 }
3590 }
3591
3592 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
3593 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
3594 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
3595 char result_buf[128] = { 0 };
3596 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
3597 struct xtensa_perfmon_result result;
3598 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
3599 if (res != ERROR_OK)
3600 return res;
3601 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
3602 "%-12" PRIu64 "%s",
3603 result.value,
3604 result.overflow ? " (overflow)" : "");
3605 LOG_INFO("%s", result_buf);
3606 }
3607
3608 return ERROR_OK;
3609 }
3610
3611 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
3612 {
3613 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
3614 target_to_xtensa(get_current_target(CMD_CTX)));
3615 }
3616
3617 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
3618 {
3619 int state = -1;
3620
3621 if (CMD_ARGC < 1) {
3622 const char *st;
3623 state = xtensa->stepping_isr_mode;
3624 if (state == XT_STEPPING_ISR_ON)
3625 st = "OFF";
3626 else if (state == XT_STEPPING_ISR_OFF)
3627 st = "ON";
3628 else
3629 st = "UNKNOWN";
3630 command_print(CMD, "Current ISR step mode: %s", st);
3631 return ERROR_OK;
3632 }
3633 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3634 if (!strcasecmp(CMD_ARGV[0], "off"))
3635 state = XT_STEPPING_ISR_ON;
3636 else if (!strcasecmp(CMD_ARGV[0], "on"))
3637 state = XT_STEPPING_ISR_OFF;
3638
3639 if (state == -1) {
3640 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
3641 return ERROR_FAIL;
3642 }
3643 xtensa->stepping_isr_mode = state;
3644 return ERROR_OK;
3645 }
3646
3647 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
3648 {
3649 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
3650 target_to_xtensa(get_current_target(CMD_CTX)));
3651 }
3652
3653 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
3654 {
3655 int res;
3656 uint32_t val = 0;
3657
3658 if (CMD_ARGC >= 1) {
3659 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3660 if (!strcasecmp(CMD_ARGV[0], "none")) {
3661 val = 0;
3662 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
3663 val |= OCDDCR_BREAKINEN;
3664 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
3665 val |= OCDDCR_BREAKOUTEN;
3666 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
3667 val |= OCDDCR_RUNSTALLINEN;
3668 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
3669 val |= OCDDCR_DEBUGMODEOUTEN;
3670 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
3671 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
3672 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
3673 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
3674 } else {
3675 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
3676 command_print(
3677 CMD,
3678 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3679 return ERROR_OK;
3680 }
3681 }
3682 res = xtensa_smpbreak_set(target, val);
3683 if (res != ERROR_OK)
3684 command_print(CMD, "Failed to set smpbreak config %d", res);
3685 } else {
3686 struct xtensa *xtensa = target_to_xtensa(target);
3687 res = xtensa_smpbreak_read(xtensa, &val);
3688 if (res == ERROR_OK)
3689 command_print(CMD, "Current bits set:%s%s%s%s",
3690 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
3691 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
3692 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
3693 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
3694 );
3695 else
3696 command_print(CMD, "Failed to get smpbreak config %d", res);
3697 }
3698 return res;
3699 }
3700
3701 COMMAND_HANDLER(xtensa_cmd_smpbreak)
3702 {
3703 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
3704 get_current_target(CMD_CTX));
3705 }
3706
3707 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
3708 {
3709 struct xtensa_trace_status trace_status;
3710 struct xtensa_trace_start_config cfg = {
3711 .stoppc = 0,
3712 .stopmask = XTENSA_STOPMASK_DISABLED,
3713 .after = 0,
3714 .after_is_words = false
3715 };
3716
3717 /* Parse arguments */
3718 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3719 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
3720 char *e;
3721 i++;
3722 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
3723 cfg.stopmask = 0;
3724 if (*e == '/')
3725 cfg.stopmask = strtol(e, NULL, 0);
3726 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
3727 i++;
3728 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
3729 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
3730 cfg.after_is_words = 0;
3731 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
3732 cfg.after_is_words = 1;
3733 } else {
3734 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
3735 return ERROR_FAIL;
3736 }
3737 }
3738
3739 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3740 if (res != ERROR_OK)
3741 return res;
3742 if (trace_status.stat & TRAXSTAT_TRACT) {
3743 LOG_WARNING("Silently stop active tracing!");
3744 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
3745 if (res != ERROR_OK)
3746 return res;
3747 }
3748
3749 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
3750 if (res != ERROR_OK)
3751 return res;
3752
3753 xtensa->trace_active = true;
3754 command_print(CMD, "Trace started.");
3755 return ERROR_OK;
3756 }
3757
3758 COMMAND_HANDLER(xtensa_cmd_tracestart)
3759 {
3760 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
3761 target_to_xtensa(get_current_target(CMD_CTX)));
3762 }
3763
3764 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
3765 {
3766 struct xtensa_trace_status trace_status;
3767
3768 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3769 if (res != ERROR_OK)
3770 return res;
3771
3772 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
3773 command_print(CMD, "No trace is currently active.");
3774 return ERROR_FAIL;
3775 }
3776
3777 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
3778 if (res != ERROR_OK)
3779 return res;
3780
3781 xtensa->trace_active = false;
3782 command_print(CMD, "Trace stop triggered.");
3783 return ERROR_OK;
3784 }
3785
3786 COMMAND_HANDLER(xtensa_cmd_tracestop)
3787 {
3788 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
3789 target_to_xtensa(get_current_target(CMD_CTX)));
3790 }
3791
3792 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
3793 {
3794 struct xtensa_trace_config trace_config;
3795 struct xtensa_trace_status trace_status;
3796 uint32_t memsz, wmem;
3797
3798 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3799 if (res != ERROR_OK)
3800 return res;
3801
3802 if (trace_status.stat & TRAXSTAT_TRACT) {
3803 command_print(CMD, "Tracing is still active. Please stop it first.");
3804 return ERROR_FAIL;
3805 }
3806
3807 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
3808 if (res != ERROR_OK)
3809 return res;
3810
3811 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
3812 command_print(CMD, "No active trace found; nothing to dump.");
3813 return ERROR_FAIL;
3814 }
3815
3816 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
3817 LOG_INFO("Total trace memory: %d words", memsz);
3818 if ((trace_config.addr &
3819 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
3820 /*Memory hasn't overwritten itself yet. */
3821 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
3822 LOG_INFO("...but trace is only %d words", wmem);
3823 if (wmem < memsz)
3824 memsz = wmem;
3825 } else {
3826 if (trace_config.addr & TRAXADDR_TWSAT) {
3827 LOG_INFO("Real trace is many times longer than that (overflow)");
3828 } else {
3829 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
3830 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
3831 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
3832 }
3833 }
3834
3835 uint8_t *tracemem = malloc(memsz * 4);
3836 if (!tracemem) {
3837 command_print(CMD, "Failed to alloc memory for trace data!");
3838 return ERROR_FAIL;
3839 }
3840 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
3841 if (res != ERROR_OK) {
3842 free(tracemem);
3843 return res;
3844 }
3845
3846 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
3847 if (f <= 0) {
3848 free(tracemem);
3849 command_print(CMD, "Unable to open file %s", fname);
3850 return ERROR_FAIL;
3851 }
3852 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
3853 command_print(CMD, "Unable to write to file %s", fname);
3854 else
3855 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
3856 close(f);
3857
3858 bool is_all_zeroes = true;
3859 for (unsigned int i = 0; i < memsz * 4; i++) {
3860 if (tracemem[i] != 0) {
3861 is_all_zeroes = false;
3862 break;
3863 }
3864 }
3865 free(tracemem);
3866 if (is_all_zeroes)
3867 command_print(
3868 CMD,
3869 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
3870
3871 return ERROR_OK;
3872 }
3873
3874 COMMAND_HANDLER(xtensa_cmd_tracedump)
3875 {
3876 if (CMD_ARGC != 1) {
3877 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
3878 return ERROR_FAIL;
3879 }
3880
3881 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
3882 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
3883 }
3884
3885 static const struct command_registration xtensa_any_command_handlers[] = {
3886 {
3887 .name = "xtdef",
3888 .handler = xtensa_cmd_xtdef,
3889 .mode = COMMAND_CONFIG,
3890 .help = "Configure Xtensa core type",
3891 .usage = "<type>",
3892 },
3893 {
3894 .name = "xtopt",
3895 .handler = xtensa_cmd_xtopt,
3896 .mode = COMMAND_CONFIG,
3897 .help = "Configure Xtensa core option",
3898 .usage = "<name> <value>",
3899 },
3900 {
3901 .name = "xtmem",
3902 .handler = xtensa_cmd_xtmem,
3903 .mode = COMMAND_CONFIG,
3904 .help = "Configure Xtensa memory/cache option",
3905 .usage = "<type> [parameters]",
3906 },
3907 {
3908 .name = "xtmmu",
3909 .handler = xtensa_cmd_xtmmu,
3910 .mode = COMMAND_CONFIG,
3911 .help = "Configure Xtensa MMU option",
3912 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
3913 },
3914 {
3915 .name = "xtmpu",
3916 .handler = xtensa_cmd_xtmpu,
3917 .mode = COMMAND_CONFIG,
3918 .help = "Configure Xtensa MPU option",
3919 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
3920 },
3921 {
3922 .name = "xtreg",
3923 .handler = xtensa_cmd_xtreg,
3924 .mode = COMMAND_CONFIG,
3925 .help = "Configure Xtensa register",
3926 .usage = "<regname> <regnum>",
3927 },
3928 {
3929 .name = "xtregs",
3930 .handler = xtensa_cmd_xtreg,
3931 .mode = COMMAND_CONFIG,
3932 .help = "Configure number of Xtensa registers",
3933 .usage = "<numregs>",
3934 },
3935 {
3936 .name = "xtregfmt",
3937 .handler = xtensa_cmd_xtregfmt,
3938 .mode = COMMAND_CONFIG,
3939 .help = "Configure format of Xtensa register map",
3940 .usage = "<contiguous|sparse> [numgregs]",
3941 },
3942 {
3943 .name = "set_permissive",
3944 .handler = xtensa_cmd_permissive_mode,
3945 .mode = COMMAND_ANY,
3946 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
3947 .usage = "[0|1]",
3948 },
3949 {
3950 .name = "maskisr",
3951 .handler = xtensa_cmd_mask_interrupts,
3952 .mode = COMMAND_ANY,
3953 .help = "mask Xtensa interrupts at step",
3954 .usage = "['on'|'off']",
3955 },
3956 {
3957 .name = "smpbreak",
3958 .handler = xtensa_cmd_smpbreak,
3959 .mode = COMMAND_ANY,
3960 .help = "Set the way the CPU chains OCD breaks",
3961 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
3962 },
3963 {
3964 .name = "perfmon_enable",
3965 .handler = xtensa_cmd_perfmon_enable,
3966 .mode = COMMAND_EXEC,
3967 .help = "Enable and start performance counter",
3968 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
3969 },
3970 {
3971 .name = "perfmon_dump",
3972 .handler = xtensa_cmd_perfmon_dump,
3973 .mode = COMMAND_EXEC,
3974 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
3975 .usage = "[counter_id]",
3976 },
3977 {
3978 .name = "tracestart",
3979 .handler = xtensa_cmd_tracestart,
3980 .mode = COMMAND_EXEC,
3981 .help =
3982 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
3983 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
3984 },
3985 {
3986 .name = "tracestop",
3987 .handler = xtensa_cmd_tracestop,
3988 .mode = COMMAND_EXEC,
3989 .help = "Tracing: Stop current trace as started by the tracestart command",
3990 .usage = "",
3991 },
3992 {
3993 .name = "tracedump",
3994 .handler = xtensa_cmd_tracedump,
3995 .mode = COMMAND_EXEC,
3996 .help = "Tracing: Dump trace memory to a files. One file per core.",
3997 .usage = "<outfile>",
3998 },
3999 {
4000 .name = "exe",
4001 .handler = xtensa_cmd_exe,
4002 .mode = COMMAND_ANY,
4003 .help = "Xtensa stub execution",
4004 .usage = "<ascii-encoded hexadecimal instruction bytes>",
4005 },
4006 COMMAND_REGISTRATION_DONE
4007 };
4008
4009 const struct command_registration xtensa_command_handlers[] = {
4010 {
4011 .name = "xtensa",
4012 .mode = COMMAND_ANY,
4013 .help = "Xtensa command group",
4014 .usage = "",
4015 .chain = xtensa_any_command_handlers,
4016 },
4017 COMMAND_REGISTRATION_DONE
4018 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)