openocd: fix SPDX tag format for files .c
[openocd.git] / src / target / xtensa / xtensa.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
10
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19
20 #include "xtensa_chip.h"
21 #include "xtensa.h"
22
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25 ((((V) & 0x0F) << 4) \
26 | (((V) & 0xF0) >> 4))
27
28 #define XT_NIBSWAP16(V) \
29 ((((V) & 0x000F) << 12) \
30 | (((V) & 0x00F0) << 4) \
31 | (((V) & 0x0F00) >> 4) \
32 | (((V) & 0xF000) >> 12))
33
34 #define XT_NIBSWAP24(V) \
35 ((((V) & 0x00000F) << 20) \
36 | (((V) & 0x0000F0) << 12) \
37 | (((V) & 0x000F00) << 4) \
38 | (((V) & 0x00F000) >> 4) \
39 | (((V) & 0x0F0000) >> 12) \
40 | (((V) & 0xF00000) >> 20))
41
42 /* _XT_INS_FORMAT_*()
43 * Instruction formatting converted from little-endian inputs
44 * and shifted to the MSB-side of DIR for BE systems.
45 */
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48 | (((T) & 0x0F) << 16) \
49 | (((SR) & 0xFF) << 8)) << 8 \
50 : (OPCODE) \
51 | (((SR) & 0xFF) << 8) \
52 | (((T) & 0x0F) << 4))
53
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57 | (((R) & 0x0F) << 8)) << 8 \
58 : (OPCODE) \
59 | (((ST) & 0xFF) << 4) \
60 | (((R) & 0x0F) << 12))
61
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64 | (((T) & 0x0F) << 8) \
65 | (((S) & 0x0F) << 4) \
66 | ((IMM4) & 0x0F)) << 16 \
67 : (OPCODE) \
68 | (((T) & 0x0F) << 4) \
69 | (((S) & 0x0F) << 8) \
70 | (((IMM4) & 0x0F) << 12))
71
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74 | (((T) & 0x0F) << 16) \
75 | (((S) & 0x0F) << 12) \
76 | (((R) & 0x0F) << 8) \
77 | ((IMM8) & 0xFF)) << 8 \
78 : (OPCODE) \
79 | (((IMM8) & 0xFF) << 16) \
80 | (((R) & 0x0F) << 12) \
81 | (((S) & 0x0F) << 8) \
82 | (((T) & 0x0F) << 4))
83
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86 | (((T) & 0x0F) << 16) \
87 | (((S) & 0x0F) << 12) \
88 | (((R) & 0x0F) << 8)) << 8 \
89 | ((IMM4) & 0x0F) \
90 : (OPCODE) \
91 | (((IMM4) & 0x0F) << 20) \
92 | (((R) & 0x0F) << 12) \
93 | (((S) & 0x0F) << 8) \
94 | (((T) & 0x0F) << 4))
95
96 /* Xtensa processor instruction opcodes
97 */
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
102
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
107
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
114
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
121
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
127
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
131
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
138
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
141
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
146
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
151
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
155
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
159
160 #define XT_WATCHPOINTS_NUM_MAX 2
161
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163 * These get used a lot so making a shortcut is useful.
164 */
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
169 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
170
171 #define XT_PS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
172 #define XT_PC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
173 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
174 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
175
176 #define XT_SW_BREAKPOINTS_MAX_NUM 32
177 #define XT_HW_IBREAK_MAX_NUM 2
178 #define XT_HW_DBREAK_MAX_NUM 2
179
180 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
181 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
182 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
183 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
184 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
185 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
186 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
187 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
188 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
189 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
190 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
191 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
192 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
247 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
248 XT_MK_REG_DESC("ps", 0xE6, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
249 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
250 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
251 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
252 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
253 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
254 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
255 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
256 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
259 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
261 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
262
263 /* WARNING: For these registers, regnum points to the
264 * index of the corresponding ARx registers, NOT to
265 * the processor register number! */
266 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
267 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
268 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
269 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
270 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
271 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
272 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
273 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
274 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
275 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
276 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
282 };
283
284 /**
285 * Types of memory used at xtensa target
286 */
287 enum xtensa_mem_region_type {
288 XTENSA_MEM_REG_IROM = 0x0,
289 XTENSA_MEM_REG_IRAM,
290 XTENSA_MEM_REG_DROM,
291 XTENSA_MEM_REG_DRAM,
292 XTENSA_MEM_REG_SRAM,
293 XTENSA_MEM_REG_SROM,
294 XTENSA_MEM_REGS_NUM
295 };
296
297 /* Register definition as union for list allocation */
298 union xtensa_reg_val_u {
299 xtensa_reg_val_t val;
300 uint8_t buf[4];
301 };
302
303 static const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
304 { .chrval = "E00", .intval = ERROR_FAIL },
305 { .chrval = "E01", .intval = ERROR_FAIL },
306 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
307 { .chrval = "E03", .intval = ERROR_FAIL },
308 };
309
310 /* Set to true for extra debug logging */
311 static const bool xtensa_extra_debug_log;
312
313 /**
314 * Gets a config for the specific mem type
315 */
316 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
317 struct xtensa *xtensa,
318 enum xtensa_mem_region_type type)
319 {
320 switch (type) {
321 case XTENSA_MEM_REG_IROM:
322 return &xtensa->core_config->irom;
323 case XTENSA_MEM_REG_IRAM:
324 return &xtensa->core_config->iram;
325 case XTENSA_MEM_REG_DROM:
326 return &xtensa->core_config->drom;
327 case XTENSA_MEM_REG_DRAM:
328 return &xtensa->core_config->dram;
329 case XTENSA_MEM_REG_SRAM:
330 return &xtensa->core_config->sram;
331 case XTENSA_MEM_REG_SROM:
332 return &xtensa->core_config->srom;
333 default:
334 return NULL;
335 }
336 }
337
338 /**
339 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
340 * for a given address
341 * Returns NULL if nothing found
342 */
343 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
344 const struct xtensa_local_mem_config *mem,
345 target_addr_t address)
346 {
347 for (unsigned int i = 0; i < mem->count; i++) {
348 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
349 if (address >= region->base && address < (region->base + region->size))
350 return region;
351 }
352 return NULL;
353 }
354
355 /**
356 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
357 * for a given address
358 * Returns NULL if nothing found
359 */
360 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
361 struct xtensa *xtensa,
362 target_addr_t address)
363 {
364 const struct xtensa_local_mem_region_config *result;
365 const struct xtensa_local_mem_config *mcgf;
366 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
367 mcgf = xtensa_get_mem_config(xtensa, mtype);
368 result = xtensa_memory_region_find(mcgf, address);
369 if (result)
370 return result;
371 }
372 return NULL;
373 }
374
375 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
376 const struct xtensa_local_mem_config *mem,
377 target_addr_t address)
378 {
379 if (!cache->size)
380 return false;
381 return xtensa_memory_region_find(mem, address);
382 }
383
384 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
385 {
386 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
387 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
388 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
389 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
390 }
391
392 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
393 {
394 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
395 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
396 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
397 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
398 }
399
400 static int xtensa_core_reg_get(struct reg *reg)
401 {
402 /* We don't need this because we read all registers on halt anyway. */
403 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
404 struct target *target = xtensa->target;
405
406 if (target->state != TARGET_HALTED)
407 return ERROR_TARGET_NOT_HALTED;
408 if (!reg->exist) {
409 if (strncmp(reg->name, "?0x", 3) == 0) {
410 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
411 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
412 return ERROR_OK;
413 }
414 return ERROR_COMMAND_ARGUMENT_INVALID;
415 }
416 return ERROR_OK;
417 }
418
419 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
420 {
421 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
422 struct target *target = xtensa->target;
423
424 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
425 if (target->state != TARGET_HALTED)
426 return ERROR_TARGET_NOT_HALTED;
427
428 if (!reg->exist) {
429 if (strncmp(reg->name, "?0x", 3) == 0) {
430 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
431 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
432 return ERROR_OK;
433 }
434 return ERROR_COMMAND_ARGUMENT_INVALID;
435 }
436
437 buf_cpy(buf, reg->value, reg->size);
438
439 if (xtensa->core_config->windowed) {
440 /* If the user updates a potential scratch register, track for conflicts */
441 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
442 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
443 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
444 buf_get_u32(reg->value, 0, 32));
445 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
446 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
447 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
448 xtensa->scratch_ars[s].intval = true;
449 break;
450 }
451 }
452 }
453 reg->dirty = true;
454 reg->valid = true;
455
456 return ERROR_OK;
457 }
458
459 static const struct reg_arch_type xtensa_reg_type = {
460 .get = xtensa_core_reg_get,
461 .set = xtensa_core_reg_set,
462 };
463
464 /* Convert a register index that's indexed relative to windowbase, to the real address. */
465 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
466 enum xtensa_reg_id reg_idx,
467 int windowbase)
468 {
469 unsigned int idx;
470 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
471 idx = reg_idx - XT_REG_IDX_AR0;
472 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
473 idx = reg_idx - XT_REG_IDX_A0;
474 } else {
475 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
476 return -1;
477 }
478 return ((idx + windowbase * 4) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
479 }
480
481 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
482 enum xtensa_reg_id reg_idx,
483 int windowbase)
484 {
485 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
486 }
487
488 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
489 {
490 struct reg *reg_list = xtensa->core_cache->reg_list;
491 reg_list[reg_idx].dirty = true;
492 }
493
494 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
495 {
496 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DIR0EXEC, ins);
497 }
498
499 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
500 {
501 const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
502 if ((oplen > 0) && (oplen <= max_oplen)) {
503 uint8_t ops_padded[max_oplen];
504 memcpy(ops_padded, ops, oplen);
505 memset(ops_padded + oplen, 0, max_oplen - oplen);
506 unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
507 for (int32_t i = oplenw - 1; i > 0; i--)
508 xtensa_queue_dbg_reg_write(xtensa,
509 XDMREG_DIR0 + i,
510 target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t)*i]));
511 /* Write DIR0EXEC last */
512 xtensa_queue_dbg_reg_write(xtensa,
513 XDMREG_DIR0EXEC,
514 target_buffer_get_u32(xtensa->target, &ops_padded[0]));
515 }
516 }
517
518 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
519 {
520 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
521 return dm->pwr_ops->queue_reg_write(dm, reg, data);
522 }
523
524 /* NOTE: Assumes A3 has already been saved */
525 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
526 {
527 struct xtensa *xtensa = target_to_xtensa(target);
528 int woe_dis;
529 uint8_t woe_buf[4];
530
531 if (xtensa->core_config->windowed) {
532 /* Save PS (LX) and disable window overflow exceptions prior to AR save */
533 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_PS, XT_REG_A3));
534 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
535 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, woe_buf);
536 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
537 if (res != ERROR_OK) {
538 LOG_ERROR("Failed to read PS (%d)!", res);
539 return res;
540 }
541 xtensa_core_status_check(target);
542 *woe = buf_get_u32(woe_buf, 0, 32);
543 woe_dis = *woe & ~XT_PS_WOE_MSK;
544 LOG_DEBUG("Clearing PS.WOE (0x%08" PRIx32 " -> 0x%08" PRIx32 ")", *woe, woe_dis);
545 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe_dis);
546 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
547 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
548 }
549 return ERROR_OK;
550 }
551
552 /* NOTE: Assumes A3 has already been saved */
553 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
554 {
555 struct xtensa *xtensa = target_to_xtensa(target);
556 if (xtensa->core_config->windowed) {
557 /* Restore window overflow exception state */
558 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe);
559 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
560 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
561 LOG_DEBUG("Restored PS.WOE (0x%08" PRIx32 ")", woe);
562 }
563 }
564
565 static bool xtensa_reg_is_readable(int flags, int cpenable)
566 {
567 if (flags & XT_REGF_NOREAD)
568 return false;
569 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
570 return false;
571 return true;
572 }
573
574 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
575 {
576 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
577 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
578 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
579 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
580 } else {
581 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
582 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
583 }
584 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
585 }
586
587 static int xtensa_write_dirty_registers(struct target *target)
588 {
589 struct xtensa *xtensa = target_to_xtensa(target);
590 int res;
591 xtensa_reg_val_t regval, windowbase = 0;
592 bool scratch_reg_dirty = false, delay_cpenable = false;
593 struct reg *reg_list = xtensa->core_cache->reg_list;
594 unsigned int reg_list_size = xtensa->core_cache->num_regs;
595 bool preserve_a3 = false;
596 uint8_t a3_buf[4];
597 xtensa_reg_val_t a3 = 0, woe;
598
599 LOG_TARGET_DEBUG(target, "start");
600
601 /* We need to write the dirty registers in the cache list back to the processor.
602 * Start by writing the SFR/user registers. */
603 for (unsigned int i = 0; i < reg_list_size; i++) {
604 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
605 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
606 if (reg_list[i].dirty) {
607 if (rlist[ridx].type == XT_REG_SPECIAL ||
608 rlist[ridx].type == XT_REG_USER ||
609 rlist[ridx].type == XT_REG_FR) {
610 scratch_reg_dirty = true;
611 if (i == XT_REG_IDX_CPENABLE) {
612 delay_cpenable = true;
613 continue;
614 }
615 regval = xtensa_reg_get(target, i);
616 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
617 reg_list[i].name,
618 rlist[ridx].reg_num,
619 regval);
620 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
621 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
622 if (reg_list[i].exist) {
623 unsigned int reg_num = rlist[ridx].reg_num;
624 if (rlist[ridx].type == XT_REG_USER) {
625 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
626 } else if (rlist[ridx].type == XT_REG_FR) {
627 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
628 } else {/*SFR */
629 if (reg_num == XT_PC_REG_NUM_VIRTUAL)
630 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL
631 **/
632 reg_num =
633 (XT_PC_REG_NUM_BASE +
634 xtensa->core_config->debug.irq_level);
635 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
636 }
637 }
638 reg_list[i].dirty = false;
639 }
640 }
641 }
642 if (scratch_reg_dirty)
643 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
644 if (delay_cpenable) {
645 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
646 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
647 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
648 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
649 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
650 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
651 XT_REG_A3));
652 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
653 }
654
655 preserve_a3 = (xtensa->core_config->windowed);
656 if (preserve_a3) {
657 /* Save (windowed) A3 for scratch use */
658 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
659 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
660 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
661 if (res != ERROR_OK)
662 return res;
663 xtensa_core_status_check(target);
664 a3 = buf_get_u32(a3_buf, 0, 32);
665 }
666
667 if (xtensa->core_config->windowed) {
668 res = xtensa_window_state_save(target, &woe);
669 if (res != ERROR_OK)
670 return res;
671 /* Grab the windowbase, we need it. */
672 windowbase = xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE);
673 /* Check if there are mismatches between the ARx and corresponding Ax registers.
674 * When the user sets a register on a windowed config, xt-gdb may set the ARx
675 * register directly. Thus we take ARx as priority over Ax if both are dirty
676 * and it's unclear if the user set one over the other explicitly.
677 */
678 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
679 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
680 if (reg_list[i].dirty && reg_list[j].dirty) {
681 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
682 bool show_warning = true;
683 if (i == XT_REG_IDX_A3)
684 show_warning = xtensa_scratch_regs_fixup(xtensa,
685 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
686 else if (i == XT_REG_IDX_A4)
687 show_warning = xtensa_scratch_regs_fixup(xtensa,
688 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
689 if (show_warning)
690 LOG_WARNING(
691 "Warning: Both A%d [0x%08" PRIx32
692 "] as well as its underlying physical register "
693 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
694 i - XT_REG_IDX_A0,
695 buf_get_u32(reg_list[i].value, 0, 32),
696 j - XT_REG_IDX_AR0,
697 buf_get_u32(reg_list[j].value, 0, 32));
698 }
699 }
700 }
701 }
702
703 /* Write A0-A16. */
704 for (unsigned int i = 0; i < 16; i++) {
705 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
706 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
707 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
708 xtensa_regs[XT_REG_IDX_A0 + i].name,
709 regval,
710 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
711 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
712 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
713 reg_list[XT_REG_IDX_A0 + i].dirty = false;
714 if (i == 3) {
715 /* Avoid stomping A3 during restore at end of function */
716 a3 = regval;
717 }
718 }
719 }
720
721 if (xtensa->core_config->windowed) {
722 /* Now write AR registers */
723 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
724 /* Write the 16 registers we can see */
725 for (unsigned int i = 0; i < 16; i++) {
726 if (i + j < xtensa->core_config->aregs_num) {
727 enum xtensa_reg_id realadr =
728 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
729 windowbase);
730 /* Write back any dirty un-windowed registers */
731 if (reg_list[realadr].dirty) {
732 regval = xtensa_reg_get(target, realadr);
733 LOG_TARGET_DEBUG(
734 target,
735 "Writing back reg %s value %08" PRIX32 ", num =%i",
736 xtensa_regs[realadr].name,
737 regval,
738 xtensa_regs[realadr].reg_num);
739 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
740 xtensa_queue_exec_ins(xtensa,
741 XT_INS_RSR(xtensa, XT_SR_DDR,
742 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
743 reg_list[realadr].dirty = false;
744 if ((i + j) == 3)
745 /* Avoid stomping AR during A3 restore at end of function */
746 a3 = regval;
747 }
748 }
749 }
750 /*Now rotate the window so we'll see the next 16 registers. The final rotate
751 * will wraparound, */
752 /*leaving us in the state we were. */
753 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
754 }
755
756 xtensa_window_state_restore(target, woe);
757
758 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
759 xtensa->scratch_ars[s].intval = false;
760 }
761
762 if (preserve_a3) {
763 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
764 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
765 }
766
767 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
768 xtensa_core_status_check(target);
769
770 return res;
771 }
772
773 static inline bool xtensa_is_stopped(struct target *target)
774 {
775 struct xtensa *xtensa = target_to_xtensa(target);
776 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
777 }
778
779 int xtensa_examine(struct target *target)
780 {
781 struct xtensa *xtensa = target_to_xtensa(target);
782 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
783
784 LOG_DEBUG("coreid = %d", target->coreid);
785
786 if (xtensa->core_config->core_type == XT_UNDEF) {
787 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
788 return ERROR_FAIL;
789 }
790
791 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
792 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
793 xtensa_dm_queue_enable(&xtensa->dbg_mod);
794 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
795 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
796 if (res != ERROR_OK)
797 return res;
798 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
799 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
800 return ERROR_TARGET_FAILURE;
801 }
802 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
803 if (!target_was_examined(target))
804 target_set_examined(target);
805 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
806 return ERROR_OK;
807 }
808
809 int xtensa_wakeup(struct target *target)
810 {
811 struct xtensa *xtensa = target_to_xtensa(target);
812 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
813
814 if (xtensa->reset_asserted)
815 cmd |= PWRCTL_CORERESET(xtensa);
816 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
817 /* TODO: can we join this with the write above? */
818 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
819 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
820 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
821 }
822
823 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
824 {
825 uint32_t dsr_data = 0x00110000;
826 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
827 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
828 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
829
830 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
831 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, set | OCDDCR_ENABLEOCD);
832 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, clear);
833 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DSR, dsr_data);
834 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
835 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
836 }
837
838 int xtensa_smpbreak_set(struct target *target, uint32_t set)
839 {
840 struct xtensa *xtensa = target_to_xtensa(target);
841 int res = ERROR_OK;
842
843 xtensa->smp_break = set;
844 if (target_was_examined(target))
845 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
846 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
847 return res;
848 }
849
850 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
851 {
852 uint8_t dcr_buf[sizeof(uint32_t)];
853
854 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DCRSET, dcr_buf);
855 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
856 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
857 *val = buf_get_u32(dcr_buf, 0, 32);
858
859 return res;
860 }
861
862 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
863 {
864 struct xtensa *xtensa = target_to_xtensa(target);
865 *val = xtensa->smp_break;
866 return ERROR_OK;
867 }
868
869 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
870 {
871 return buf_get_u32(reg->value, 0, 32);
872 }
873
874 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
875 {
876 buf_set_u32(reg->value, 0, 32, value);
877 reg->dirty = true;
878 }
879
880 int xtensa_core_status_check(struct target *target)
881 {
882 struct xtensa *xtensa = target_to_xtensa(target);
883 int res, needclear = 0;
884
885 xtensa_dm_core_status_read(&xtensa->dbg_mod);
886 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
887 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
888 if (dsr & OCDDSR_EXECBUSY) {
889 if (!xtensa->suppress_dsr_errors)
890 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
891 needclear = 1;
892 }
893 if (dsr & OCDDSR_EXECEXCEPTION) {
894 if (!xtensa->suppress_dsr_errors)
895 LOG_TARGET_ERROR(target,
896 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
897 dsr);
898 needclear = 1;
899 }
900 if (dsr & OCDDSR_EXECOVERRUN) {
901 if (!xtensa->suppress_dsr_errors)
902 LOG_TARGET_ERROR(target,
903 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
904 dsr);
905 needclear = 1;
906 }
907 if (needclear) {
908 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
909 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
910 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
911 LOG_TARGET_ERROR(target, "clearing DSR failed!");
912 return ERROR_FAIL;
913 }
914 return ERROR_OK;
915 }
916
917 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
918 {
919 struct xtensa *xtensa = target_to_xtensa(target);
920 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
921 return xtensa_reg_get_value(reg);
922 }
923
924 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
925 {
926 struct xtensa *xtensa = target_to_xtensa(target);
927 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
928 if (xtensa_reg_get_value(reg) == value)
929 return;
930 xtensa_reg_set_value(reg, value);
931 }
932
933 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
934 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
935 {
936 struct xtensa *xtensa = target_to_xtensa(target);
937 uint32_t windowbase = (xtensa->core_config->windowed ?
938 xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE) : 0);
939 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
940 xtensa_reg_set(target, a_idx, value);
941 xtensa_reg_set(target, ar_idx, value);
942 }
943
944 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
945 uint32_t xtensa_cause_get(struct target *target)
946 {
947 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
948 }
949
950 void xtensa_cause_clear(struct target *target)
951 {
952 struct xtensa *xtensa = target_to_xtensa(target);
953 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
954 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
955 }
956
957 int xtensa_assert_reset(struct target *target)
958 {
959 struct xtensa *xtensa = target_to_xtensa(target);
960
961 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
962 xtensa_queue_pwr_reg_write(xtensa,
963 XDMREG_PWRCTL,
964 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
965 PWRCTL_COREWAKEUP(xtensa) | PWRCTL_CORERESET(xtensa));
966 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
967 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
968 if (res != ERROR_OK)
969 return res;
970
971 /* registers are now invalid */
972 xtensa->reset_asserted = true;
973 register_cache_invalidate(xtensa->core_cache);
974 target->state = TARGET_RESET;
975 return ERROR_OK;
976 }
977
978 int xtensa_deassert_reset(struct target *target)
979 {
980 struct xtensa *xtensa = target_to_xtensa(target);
981
982 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
983 if (target->reset_halt)
984 xtensa_queue_dbg_reg_write(xtensa,
985 XDMREG_DCRSET,
986 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
987 xtensa_queue_pwr_reg_write(xtensa,
988 XDMREG_PWRCTL,
989 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
990 PWRCTL_COREWAKEUP(xtensa));
991 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
992 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
993 if (res != ERROR_OK)
994 return res;
995 target->state = TARGET_RUNNING;
996 xtensa->reset_asserted = false;
997 return res;
998 }
999
1000 int xtensa_soft_reset_halt(struct target *target)
1001 {
1002 LOG_TARGET_DEBUG(target, "begin");
1003 return xtensa_assert_reset(target);
1004 }
1005
1006 int xtensa_fetch_all_regs(struct target *target)
1007 {
1008 struct xtensa *xtensa = target_to_xtensa(target);
1009 struct reg *reg_list = xtensa->core_cache->reg_list;
1010 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1011 xtensa_reg_val_t cpenable = 0, windowbase = 0, a3;
1012 uint32_t woe;
1013 uint8_t a3_buf[4];
1014 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1015
1016 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1017 if (!regvals) {
1018 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1019 return ERROR_FAIL;
1020 }
1021 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1022 if (!dsrs) {
1023 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1024 free(regvals);
1025 return ERROR_FAIL;
1026 }
1027
1028 LOG_TARGET_DEBUG(target, "start");
1029
1030 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1031 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1032 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
1033 int res = xtensa_window_state_save(target, &woe);
1034 if (res != ERROR_OK)
1035 goto xtensa_fetch_all_regs_done;
1036
1037 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1038 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1039 * in one go, then sort everything out from the regvals variable. */
1040
1041 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1042 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1043 /*Grab the 16 registers we can see */
1044 for (unsigned int i = 0; i < 16; i++) {
1045 if (i + j < xtensa->core_config->aregs_num) {
1046 xtensa_queue_exec_ins(xtensa,
1047 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1048 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1049 regvals[XT_REG_IDX_AR0 + i + j].buf);
1050 if (debug_dsrs)
1051 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR,
1052 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1053 }
1054 }
1055 if (xtensa->core_config->windowed)
1056 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1057 * will wraparound, */
1058 /* leaving us in the state we were. */
1059 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
1060 }
1061 xtensa_window_state_restore(target, woe);
1062
1063 if (xtensa->core_config->coproc) {
1064 /* As the very first thing after AREGS, go grab CPENABLE */
1065 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1066 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1067 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1068 }
1069 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1070 if (res != ERROR_OK) {
1071 LOG_ERROR("Failed to read ARs (%d)!", res);
1072 goto xtensa_fetch_all_regs_done;
1073 }
1074 xtensa_core_status_check(target);
1075
1076 a3 = buf_get_u32(a3_buf, 0, 32);
1077
1078 if (xtensa->core_config->coproc) {
1079 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1080
1081 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1082 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
1083 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1084 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1085
1086 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1087 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1088 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1089 }
1090 /* We're now free to use any of A0-A15 as scratch registers
1091 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1092 for (unsigned int i = 0; i < reg_list_size; i++) {
1093 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1094 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1095 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1096 bool reg_fetched = true;
1097 unsigned int reg_num = rlist[ridx].reg_num;
1098 switch (rlist[ridx].type) {
1099 case XT_REG_USER:
1100 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1101 break;
1102 case XT_REG_FR:
1103 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1104 break;
1105 case XT_REG_SPECIAL:
1106 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1107 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1108 reg_num = (XT_PC_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
1109 } else if (reg_num == xtensa_regs[XT_REG_IDX_PS].reg_num) {
1110 /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1111 reg_num = (XT_PS_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
1112 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1113 /* CPENABLE already read/updated; don't re-read */
1114 reg_fetched = false;
1115 break;
1116 }
1117 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1118 break;
1119 default:
1120 reg_fetched = false;
1121 }
1122 if (reg_fetched) {
1123 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1124 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1125 if (debug_dsrs)
1126 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1127 }
1128 }
1129 }
1130 /* Ok, send the whole mess to the CPU. */
1131 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1132 if (res != ERROR_OK) {
1133 LOG_ERROR("Failed to fetch AR regs!");
1134 goto xtensa_fetch_all_regs_done;
1135 }
1136 xtensa_core_status_check(target);
1137
1138 if (debug_dsrs) {
1139 /* DSR checking: follows order in which registers are requested. */
1140 for (unsigned int i = 0; i < reg_list_size; i++) {
1141 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1142 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1143 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1144 (rlist[ridx].type != XT_REG_DEBUG) &&
1145 (rlist[ridx].type != XT_REG_RELGEN) &&
1146 (rlist[ridx].type != XT_REG_TIE) &&
1147 (rlist[ridx].type != XT_REG_OTHER)) {
1148 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1149 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1150 res = ERROR_FAIL;
1151 goto xtensa_fetch_all_regs_done;
1152 }
1153 }
1154 }
1155 }
1156
1157 if (xtensa->core_config->windowed)
1158 /* We need the windowbase to decode the general addresses. */
1159 windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE].buf, 0, 32);
1160 /* Decode the result and update the cache. */
1161 for (unsigned int i = 0; i < reg_list_size; i++) {
1162 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1163 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1164 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1165 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1166 /* The 64-value general register set is read from (windowbase) on down.
1167 * We need to get the real register address by subtracting windowbase and
1168 * wrapping around. */
1169 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1170 windowbase);
1171 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1172 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1173 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1174 if (xtensa_extra_debug_log) {
1175 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1176 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1177 }
1178 } else {
1179 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1180 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1181 if (xtensa_extra_debug_log)
1182 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1183 xtensa_reg_set(target, i, regval);
1184 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1185 }
1186 reg_list[i].valid = true;
1187 } else {
1188 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1189 /* Report read-only registers all-zero but valid */
1190 reg_list[i].valid = true;
1191 xtensa_reg_set(target, i, 0);
1192 } else {
1193 reg_list[i].valid = false;
1194 }
1195 }
1196 }
1197
1198 if (xtensa->core_config->windowed) {
1199 /* We have used A3 as a scratch register.
1200 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1201 */
1202 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1203 xtensa_reg_set(target, ar3_idx, a3);
1204 xtensa_mark_register_dirty(xtensa, ar3_idx);
1205
1206 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1207 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1208 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1209 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1210 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1211 xtensa->scratch_ars[s].intval = false;
1212 }
1213
1214 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1215 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1216 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1217 xtensa->regs_fetched = true;
1218 xtensa_fetch_all_regs_done:
1219 free(regvals);
1220 free(dsrs);
1221 return res;
1222 }
1223
1224 int xtensa_get_gdb_reg_list(struct target *target,
1225 struct reg **reg_list[],
1226 int *reg_list_size,
1227 enum target_register_class reg_class)
1228 {
1229 struct xtensa *xtensa = target_to_xtensa(target);
1230 unsigned int num_regs;
1231
1232 if (reg_class == REG_CLASS_GENERAL) {
1233 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1234 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1235 return ERROR_FAIL;
1236 }
1237 num_regs = xtensa->genpkt_regs_num;
1238 } else {
1239 /* Determine whether to return a contiguous or sparse register map */
1240 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1241 }
1242
1243 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1244
1245 *reg_list = calloc(num_regs, sizeof(struct reg *));
1246 if (!*reg_list)
1247 return ERROR_FAIL;
1248
1249 *reg_list_size = num_regs;
1250 if (xtensa->regmap_contiguous) {
1251 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1252 for (unsigned int i = 0; i < num_regs; i++)
1253 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1254 return ERROR_OK;
1255 }
1256
1257 for (unsigned int i = 0; i < num_regs; i++)
1258 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1259 unsigned int k = 0;
1260 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1261 if (xtensa->core_cache->reg_list[i].exist) {
1262 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1263 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1264 int sparse_idx = rlist[ridx].dbreg_num;
1265 if (i == XT_REG_IDX_PS) {
1266 if (xtensa->eps_dbglevel_idx == 0) {
1267 LOG_ERROR("eps_dbglevel_idx not set\n");
1268 return ERROR_FAIL;
1269 }
1270 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1271 if (xtensa_extra_debug_log)
1272 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1273 sparse_idx, xtensa->core_config->debug.irq_level,
1274 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1275 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1276 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1277 } else {
1278 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1279 }
1280 if (i == XT_REG_IDX_PC)
1281 /* Make a duplicate copy of PC for external access */
1282 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1283 k++;
1284 }
1285 }
1286
1287 if (k == num_regs)
1288 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1289
1290 return ERROR_OK;
1291 }
1292
1293 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1294 {
1295 struct xtensa *xtensa = target_to_xtensa(target);
1296 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1297 xtensa->core_config->mmu.dtlb_entries_count > 0;
1298 return ERROR_OK;
1299 }
1300
1301 int xtensa_halt(struct target *target)
1302 {
1303 struct xtensa *xtensa = target_to_xtensa(target);
1304
1305 LOG_TARGET_DEBUG(target, "start");
1306 if (target->state == TARGET_HALTED) {
1307 LOG_TARGET_DEBUG(target, "target was already halted");
1308 return ERROR_OK;
1309 }
1310 /* First we have to read dsr and check if the target stopped */
1311 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1312 if (res != ERROR_OK) {
1313 LOG_TARGET_ERROR(target, "Failed to read core status!");
1314 return res;
1315 }
1316 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1317 if (!xtensa_is_stopped(target)) {
1318 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1319 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1320 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1321 if (res != ERROR_OK)
1322 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1323 }
1324
1325 return res;
1326 }
1327
1328 int xtensa_prepare_resume(struct target *target,
1329 int current,
1330 target_addr_t address,
1331 int handle_breakpoints,
1332 int debug_execution)
1333 {
1334 struct xtensa *xtensa = target_to_xtensa(target);
1335 uint32_t bpena = 0;
1336
1337 LOG_TARGET_DEBUG(target,
1338 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1339 current,
1340 address,
1341 handle_breakpoints,
1342 debug_execution);
1343
1344 if (target->state != TARGET_HALTED) {
1345 LOG_TARGET_WARNING(target, "target not halted");
1346 return ERROR_TARGET_NOT_HALTED;
1347 }
1348
1349 if (address && !current) {
1350 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1351 } else {
1352 uint32_t cause = xtensa_cause_get(target);
1353 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1354 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1355 if (cause & DEBUGCAUSE_DB)
1356 /* We stopped due to a watchpoint. We can't just resume executing the
1357 * instruction again because */
1358 /* that would trigger the watchpoint again. To fix this, we single-step,
1359 * which ignores watchpoints. */
1360 xtensa_do_step(target, current, address, handle_breakpoints);
1361 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1362 /* We stopped due to a break instruction. We can't just resume executing the
1363 * instruction again because */
1364 /* that would trigger the break again. To fix this, we single-step, which
1365 * ignores break. */
1366 xtensa_do_step(target, current, address, handle_breakpoints);
1367 }
1368
1369 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1370 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1371 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1372 if (xtensa->hw_brps[slot]) {
1373 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1374 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1375 bpena |= BIT(slot);
1376 }
1377 }
1378 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1379
1380 /* Here we write all registers to the targets */
1381 int res = xtensa_write_dirty_registers(target);
1382 if (res != ERROR_OK)
1383 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1384 return res;
1385 }
1386
1387 int xtensa_do_resume(struct target *target)
1388 {
1389 struct xtensa *xtensa = target_to_xtensa(target);
1390
1391 LOG_TARGET_DEBUG(target, "start");
1392
1393 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1394 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1395 if (res != ERROR_OK) {
1396 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1397 return res;
1398 }
1399 xtensa_core_status_check(target);
1400 return ERROR_OK;
1401 }
1402
1403 int xtensa_resume(struct target *target,
1404 int current,
1405 target_addr_t address,
1406 int handle_breakpoints,
1407 int debug_execution)
1408 {
1409 LOG_TARGET_DEBUG(target, "start");
1410 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1411 if (res != ERROR_OK) {
1412 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1413 return res;
1414 }
1415 res = xtensa_do_resume(target);
1416 if (res != ERROR_OK) {
1417 LOG_TARGET_ERROR(target, "Failed to resume!");
1418 return res;
1419 }
1420
1421 target->debug_reason = DBG_REASON_NOTHALTED;
1422 if (!debug_execution)
1423 target->state = TARGET_RUNNING;
1424 else
1425 target->state = TARGET_DEBUG_RUNNING;
1426
1427 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1428
1429 return ERROR_OK;
1430 }
1431
1432 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1433 {
1434 struct xtensa *xtensa = target_to_xtensa(target);
1435 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1436 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1437 if (err != ERROR_OK)
1438 return false;
1439
1440 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1441 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1442 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1443 return true;
1444
1445 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1446 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1447 return true;
1448
1449 return false;
1450 }
1451
1452 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1453 {
1454 struct xtensa *xtensa = target_to_xtensa(target);
1455 int res;
1456 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1457 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1458 xtensa_reg_val_t icountlvl, cause;
1459 xtensa_reg_val_t oldps, oldpc, cur_pc;
1460 bool ps_lowered = false;
1461
1462 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1463 current, address, handle_breakpoints);
1464
1465 if (target->state != TARGET_HALTED) {
1466 LOG_TARGET_WARNING(target, "target not halted");
1467 return ERROR_TARGET_NOT_HALTED;
1468 }
1469
1470 if (xtensa->eps_dbglevel_idx == 0) {
1471 LOG_ERROR("eps_dbglevel_idx not set\n");
1472 return ERROR_FAIL;
1473 }
1474
1475 /* Save old ps (EPS[dbglvl] on LX), pc */
1476 oldps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
1477 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1478
1479 cause = xtensa_cause_get(target);
1480 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1481 oldps,
1482 oldpc,
1483 cause,
1484 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1485 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1486 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1487 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1488 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1489 /* pretend that we have stepped */
1490 if (cause & DEBUGCAUSE_BI)
1491 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1492 else
1493 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1494 return ERROR_OK;
1495 }
1496
1497 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1498 * at which the instructions are to be counted while stepping.
1499 *
1500 * For example, if we need to step by 2 instructions, and an interrupt occurs
1501 * in between, the processor will trigger the interrupt and halt after the 2nd
1502 * instruction within the interrupt vector and/or handler.
1503 *
1504 * However, sometimes we don't want the interrupt handlers to be executed at all
1505 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1506 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1507 * code from being counted during stepping. Note that C exception handlers must
1508 * run at level 0 and hence will be counted and stepped into, should one occur.
1509 *
1510 * TODO: Certain instructions should never be single-stepped and should instead
1511 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1512 * RFI >= DBGLEVEL.
1513 */
1514 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1515 if (!xtensa->core_config->high_irq.enabled) {
1516 LOG_TARGET_WARNING(
1517 target,
1518 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1519 return ERROR_FAIL;
1520 }
1521 /* Update ICOUNTLEVEL accordingly */
1522 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1523 } else {
1524 icountlvl = xtensa->core_config->debug.irq_level;
1525 }
1526
1527 if (cause & DEBUGCAUSE_DB) {
1528 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1529 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1530 * re-enable the watchpoint. */
1531 LOG_TARGET_DEBUG(
1532 target,
1533 "Single-stepping to get past instruction that triggered the watchpoint...");
1534 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1535 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1536 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1537 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1538 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1539 }
1540 }
1541
1542 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1543 /* handle normal SW breakpoint */
1544 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1545 if ((oldps & 0xf) >= icountlvl) {
1546 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1547 ps_lowered = true;
1548 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1549 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1550 LOG_TARGET_DEBUG(target,
1551 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1552 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1553 newps,
1554 oldps);
1555 }
1556 do {
1557 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1558 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1559
1560 /* Now ICOUNT is set, we can resume as if we were going to run */
1561 res = xtensa_prepare_resume(target, current, address, 0, 0);
1562 if (res != ERROR_OK) {
1563 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1564 return res;
1565 }
1566 res = xtensa_do_resume(target);
1567 if (res != ERROR_OK) {
1568 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1569 return res;
1570 }
1571
1572 /* Wait for stepping to complete */
1573 long long start = timeval_ms();
1574 while (timeval_ms() < start + 500) {
1575 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1576 *until stepping is complete. */
1577 usleep(1000);
1578 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1579 if (res != ERROR_OK) {
1580 LOG_TARGET_ERROR(target, "Failed to read core status!");
1581 return res;
1582 }
1583 if (xtensa_is_stopped(target))
1584 break;
1585 usleep(1000);
1586 }
1587 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1588 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1589 if (!xtensa_is_stopped(target)) {
1590 LOG_TARGET_WARNING(
1591 target,
1592 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1593 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1594 target->debug_reason = DBG_REASON_NOTHALTED;
1595 target->state = TARGET_RUNNING;
1596 return ERROR_FAIL;
1597 }
1598
1599 xtensa_fetch_all_regs(target);
1600 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1601
1602 LOG_TARGET_DEBUG(target,
1603 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1604 xtensa_reg_get(target, XT_REG_IDX_PS),
1605 cur_pc,
1606 xtensa_cause_get(target),
1607 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1608
1609 /* Do not step into WindowOverflow if ISRs are masked.
1610 If we stop in WindowOverflow at breakpoint with masked ISRs and
1611 try to do a step it will get us out of that handler */
1612 if (xtensa->core_config->windowed &&
1613 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1614 xtensa_pc_in_winexc(target, cur_pc)) {
1615 /* isrmask = on, need to step out of the window exception handler */
1616 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1617 oldpc = cur_pc;
1618 address = oldpc + 3;
1619 continue;
1620 }
1621
1622 if (oldpc == cur_pc)
1623 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1624 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1625 else
1626 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1627 break;
1628 } while (true);
1629
1630 target->debug_reason = DBG_REASON_SINGLESTEP;
1631 target->state = TARGET_HALTED;
1632 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1633 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1634
1635 if (cause & DEBUGCAUSE_DB) {
1636 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1637 /* Restore the DBREAKCx registers */
1638 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1639 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1640 }
1641
1642 /* Restore int level */
1643 if (ps_lowered) {
1644 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1645 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1646 oldps);
1647 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1648 }
1649
1650 /* write ICOUNTLEVEL back to zero */
1651 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1652 /* TODO: can we skip writing dirty registers and re-fetching them? */
1653 res = xtensa_write_dirty_registers(target);
1654 xtensa_fetch_all_regs(target);
1655 return res;
1656 }
1657
1658 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1659 {
1660 return xtensa_do_step(target, current, address, handle_breakpoints);
1661 }
1662
1663 /**
1664 * Returns true if two ranges are overlapping
1665 */
1666 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1667 target_addr_t r1_end,
1668 target_addr_t r2_start,
1669 target_addr_t r2_end)
1670 {
1671 if ((r2_start >= r1_start) && (r2_start < r1_end))
1672 return true; /* r2_start is in r1 region */
1673 if ((r2_end > r1_start) && (r2_end <= r1_end))
1674 return true; /* r2_end is in r1 region */
1675 return false;
1676 }
1677
1678 /**
1679 * Returns a size of overlapped region of two ranges.
1680 */
1681 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1682 target_addr_t r1_end,
1683 target_addr_t r2_start,
1684 target_addr_t r2_end)
1685 {
1686 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1687 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1688 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1689 return ov_end - ov_start;
1690 }
1691 return 0;
1692 }
1693
1694 /**
1695 * Check if the address gets to memory regions, and its access mode
1696 */
1697 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1698 {
1699 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1700 target_addr_t adr_end = address + size; /* region end */
1701 target_addr_t overlap_size;
1702 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1703
1704 while (adr_pos < adr_end) {
1705 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1706 if (!cm) /* address is not belong to anything */
1707 return false;
1708 if ((cm->access & access) != access) /* access check */
1709 return false;
1710 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1711 assert(overlap_size != 0);
1712 adr_pos += overlap_size;
1713 }
1714 return true;
1715 }
1716
1717 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1718 {
1719 struct xtensa *xtensa = target_to_xtensa(target);
1720 /* We are going to read memory in 32-bit increments. This may not be what the calling
1721 * function expects, so we may need to allocate a temp buffer and read into that first. */
1722 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1723 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1724 target_addr_t adr = addrstart_al;
1725 uint8_t *albuff;
1726 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1727
1728 if (target->state != TARGET_HALTED) {
1729 LOG_TARGET_WARNING(target, "target not halted");
1730 return ERROR_TARGET_NOT_HALTED;
1731 }
1732
1733 if (!xtensa->permissive_mode) {
1734 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1735 XT_MEM_ACCESS_READ)) {
1736 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1737 return ERROR_FAIL;
1738 }
1739 }
1740
1741 if (addrstart_al == address && addrend_al == address + (size * count)) {
1742 albuff = buffer;
1743 } else {
1744 albuff = malloc(addrend_al - addrstart_al);
1745 if (!albuff) {
1746 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1747 addrend_al - addrstart_al);
1748 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1749 }
1750 }
1751
1752 /* We're going to use A3 here */
1753 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1754 /* Write start address to A3 */
1755 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1756 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1757 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1758 if (xtensa->probe_lsddr32p != 0) {
1759 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1760 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1761 xtensa_queue_dbg_reg_read(xtensa,
1762 (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
1763 &albuff[i]);
1764 } else {
1765 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1766 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1767 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1768 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
1769 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[i]);
1770 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1771 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1772 }
1773 }
1774 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1775 if (res == ERROR_OK) {
1776 bool prev_suppress = xtensa->suppress_dsr_errors;
1777 xtensa->suppress_dsr_errors = true;
1778 res = xtensa_core_status_check(target);
1779 if (xtensa->probe_lsddr32p == -1)
1780 xtensa->probe_lsddr32p = 1;
1781 xtensa->suppress_dsr_errors = prev_suppress;
1782 }
1783 if (res != ERROR_OK) {
1784 if (xtensa->probe_lsddr32p != 0) {
1785 /* Disable fast memory access instructions and retry before reporting an error */
1786 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1787 xtensa->probe_lsddr32p = 0;
1788 res = xtensa_read_memory(target, address, size, count, buffer);
1789 bswap = false;
1790 } else {
1791 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
1792 count * size, address);
1793 }
1794 }
1795
1796 if (bswap)
1797 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
1798 if (albuff != buffer) {
1799 memcpy(buffer, albuff + (address & 3), (size * count));
1800 free(albuff);
1801 }
1802
1803 return res;
1804 }
1805
1806 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1807 {
1808 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1809 return xtensa_read_memory(target, address, 1, count, buffer);
1810 }
1811
1812 int xtensa_write_memory(struct target *target,
1813 target_addr_t address,
1814 uint32_t size,
1815 uint32_t count,
1816 const uint8_t *buffer)
1817 {
1818 /* This memory write function can get thrown nigh everything into it, from
1819 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1820 * accept anything but aligned uint32 writes, though. That is why we convert
1821 * everything into that. */
1822 struct xtensa *xtensa = target_to_xtensa(target);
1823 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1824 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1825 target_addr_t adr = addrstart_al;
1826 int res;
1827 uint8_t *albuff;
1828 bool fill_head_tail = false;
1829
1830 if (target->state != TARGET_HALTED) {
1831 LOG_TARGET_WARNING(target, "target not halted");
1832 return ERROR_TARGET_NOT_HALTED;
1833 }
1834
1835 if (!xtensa->permissive_mode) {
1836 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
1837 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1838 return ERROR_FAIL;
1839 }
1840 }
1841
1842 if (size == 0 || count == 0 || !buffer)
1843 return ERROR_COMMAND_SYNTAX_ERROR;
1844
1845 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1846 if (addrstart_al == address && addrend_al == address + (size * count)) {
1847 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1848 /* Need a buffer for byte-swapping */
1849 albuff = malloc(addrend_al - addrstart_al);
1850 else
1851 /* We discard the const here because albuff can also be non-const */
1852 albuff = (uint8_t *)buffer;
1853 } else {
1854 fill_head_tail = true;
1855 albuff = malloc(addrend_al - addrstart_al);
1856 }
1857 if (!albuff) {
1858 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1859 addrend_al - addrstart_al);
1860 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1861 }
1862
1863 /* We're going to use A3 here */
1864 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1865
1866 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1867 if (fill_head_tail) {
1868 /* See if we need to read the first and/or last word. */
1869 if (address & 3) {
1870 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1871 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1872 if (xtensa->probe_lsddr32p == 1) {
1873 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1874 } else {
1875 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1876 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1877 }
1878 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[0]);
1879 }
1880 if ((address + (size * count)) & 3) {
1881 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
1882 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1883 if (xtensa->probe_lsddr32p == 1) {
1884 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1885 } else {
1886 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1887 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1888 }
1889 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1890 &albuff[addrend_al - addrstart_al - 4]);
1891 }
1892 /* Grab bytes */
1893 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1894 if (res != ERROR_OK) {
1895 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1896 if (albuff != buffer)
1897 free(albuff);
1898 return res;
1899 }
1900 xtensa_core_status_check(target);
1901 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
1902 bool swapped_w0 = false;
1903 if (address & 3) {
1904 buf_bswap32(&albuff[0], &albuff[0], 4);
1905 swapped_w0 = true;
1906 }
1907 if ((address + (size * count)) & 3) {
1908 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
1909 /* Don't double-swap if buffer start/end are within the same word */
1910 } else {
1911 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
1912 &albuff[addrend_al - addrstart_al - 4], 4);
1913 }
1914 }
1915 }
1916 /* Copy data to be written into the aligned buffer (in host-endianness) */
1917 memcpy(&albuff[address & 3], buffer, size * count);
1918 /* Now we can write albuff in aligned uint32s. */
1919 }
1920
1921 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1922 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
1923
1924 /* Write start address to A3 */
1925 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1926 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1927 /* Write the aligned buffer */
1928 if (xtensa->probe_lsddr32p != 0) {
1929 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1930 if (i == 0) {
1931 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
1932 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
1933 } else {
1934 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
1935 }
1936 }
1937 } else {
1938 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1939 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1940 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
1941 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
1942 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1943 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1944 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1945 }
1946 }
1947
1948 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1949 if (res == ERROR_OK) {
1950 bool prev_suppress = xtensa->suppress_dsr_errors;
1951 xtensa->suppress_dsr_errors = true;
1952 res = xtensa_core_status_check(target);
1953 if (xtensa->probe_lsddr32p == -1)
1954 xtensa->probe_lsddr32p = 1;
1955 xtensa->suppress_dsr_errors = prev_suppress;
1956 }
1957 if (res != ERROR_OK) {
1958 if (xtensa->probe_lsddr32p != 0) {
1959 /* Disable fast memory access instructions and retry before reporting an error */
1960 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1961 xtensa->probe_lsddr32p = 0;
1962 res = xtensa_write_memory(target, address, size, count, buffer);
1963 } else {
1964 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
1965 count * size, address);
1966 }
1967 } else {
1968 /* Invalidate ICACHE, writeback DCACHE if present */
1969 uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
1970 uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
1971 if (issue_ihi || issue_dhwb) {
1972 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
1973 uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
1974 uint32_t linesize = MIN(ilinesize, dlinesize);
1975 uint32_t off = 0;
1976 adr = addrstart_al;
1977
1978 while ((adr + off) < addrend_al) {
1979 if (off == 0) {
1980 /* Write start address to A3 */
1981 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr);
1982 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1983 }
1984 if (issue_ihi)
1985 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
1986 if (issue_dhwb)
1987 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
1988 off += linesize;
1989 if (off > 1020) {
1990 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
1991 adr += off;
1992 off = 0;
1993 }
1994 }
1995
1996 /* Execute cache WB/INV instructions */
1997 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1998 xtensa_core_status_check(target);
1999 if (res != ERROR_OK)
2000 LOG_TARGET_ERROR(target,
2001 "Error issuing cache writeback/invaldate instruction(s): %d",
2002 res);
2003 }
2004 }
2005 if (albuff != buffer)
2006 free(albuff);
2007
2008 return res;
2009 }
2010
2011 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2012 {
2013 /* xtensa_write_memory can handle everything. Just pass on to that. */
2014 return xtensa_write_memory(target, address, 1, count, buffer);
2015 }
2016
2017 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2018 {
2019 LOG_WARNING("not implemented yet");
2020 return ERROR_FAIL;
2021 }
2022
2023 int xtensa_poll(struct target *target)
2024 {
2025 struct xtensa *xtensa = target_to_xtensa(target);
2026 if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2027 target->state = TARGET_UNKNOWN;
2028 return ERROR_TARGET_NOT_EXAMINED;
2029 }
2030
2031 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET(xtensa) |
2032 PWRSTAT_COREWASRESET(xtensa));
2033 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2034 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2035 xtensa->dbg_mod.power_status.stat,
2036 PWRSTAT_DEBUGWASRESET(xtensa) | PWRSTAT_COREWASRESET(xtensa),
2037 xtensa->dbg_mod.power_status.stath);
2038 if (res != ERROR_OK)
2039 return res;
2040
2041 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2042 LOG_TARGET_INFO(target, "Debug controller was reset.");
2043 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2044 if (res != ERROR_OK)
2045 return res;
2046 }
2047 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2048 LOG_TARGET_INFO(target, "Core was reset.");
2049 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2050 /* Enable JTAG, set reset if needed */
2051 res = xtensa_wakeup(target);
2052 if (res != ERROR_OK)
2053 return res;
2054
2055 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2056 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2057 if (res != ERROR_OK)
2058 return res;
2059 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2060 LOG_TARGET_DEBUG(target,
2061 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2062 prev_dsr,
2063 xtensa->dbg_mod.core_status.dsr);
2064 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET(xtensa)) {
2065 /* if RESET state is persitent */
2066 target->state = TARGET_RESET;
2067 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2068 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2069 xtensa->dbg_mod.core_status.dsr,
2070 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2071 target->state = TARGET_UNKNOWN;
2072 if (xtensa->come_online_probes_num == 0)
2073 target->examined = false;
2074 else
2075 xtensa->come_online_probes_num--;
2076 } else if (xtensa_is_stopped(target)) {
2077 if (target->state != TARGET_HALTED) {
2078 enum target_state oldstate = target->state;
2079 target->state = TARGET_HALTED;
2080 /* Examine why the target has been halted */
2081 target->debug_reason = DBG_REASON_DBGRQ;
2082 xtensa_fetch_all_regs(target);
2083 /* When setting debug reason DEBUGCAUSE events have the following
2084 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2085 /* Watchpoint and breakpoint events at the same time results in special
2086 * debug reason: DBG_REASON_WPTANDBKPT. */
2087 uint32_t halt_cause = xtensa_cause_get(target);
2088 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2089 if (halt_cause & DEBUGCAUSE_IC)
2090 target->debug_reason = DBG_REASON_SINGLESTEP;
2091 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2092 if (halt_cause & DEBUGCAUSE_DB)
2093 target->debug_reason = DBG_REASON_WPTANDBKPT;
2094 else
2095 target->debug_reason = DBG_REASON_BREAKPOINT;
2096 } else if (halt_cause & DEBUGCAUSE_DB) {
2097 target->debug_reason = DBG_REASON_WATCHPOINT;
2098 }
2099 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2100 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2101 xtensa_reg_get(target, XT_REG_IDX_PC),
2102 target->debug_reason,
2103 oldstate);
2104 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2105 halt_cause,
2106 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2107 xtensa->dbg_mod.core_status.dsr);
2108 xtensa_dm_core_status_clear(
2109 &xtensa->dbg_mod,
2110 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2111 OCDDSR_DEBUGINTTRAX |
2112 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2113 }
2114 } else {
2115 target->debug_reason = DBG_REASON_NOTHALTED;
2116 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2117 target->state = TARGET_RUNNING;
2118 target->debug_reason = DBG_REASON_NOTHALTED;
2119 }
2120 }
2121 if (xtensa->trace_active) {
2122 /* Detect if tracing was active but has stopped. */
2123 struct xtensa_trace_status trace_status;
2124 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2125 if (res == ERROR_OK) {
2126 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2127 LOG_INFO("Detected end of trace.");
2128 if (trace_status.stat & TRAXSTAT_PCMTG)
2129 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2130 if (trace_status.stat & TRAXSTAT_PTITG)
2131 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2132 if (trace_status.stat & TRAXSTAT_CTITG)
2133 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2134 xtensa->trace_active = false;
2135 }
2136 }
2137 }
2138 return ERROR_OK;
2139 }
2140
2141 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2142 {
2143 struct xtensa *xtensa = target_to_xtensa(target);
2144 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2145 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2146 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2147 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2148 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2149 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2150 int ret;
2151
2152 if (size > icache_line_size)
2153 return ERROR_FAIL;
2154
2155 if (issue_ihi || issue_dhwbi) {
2156 /* We're going to use A3 here */
2157 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2158
2159 /* Write start address to A3 and invalidate */
2160 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2161 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2162 LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2163 if (issue_dhwbi) {
2164 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2165 if (!same_dc_line) {
2166 LOG_TARGET_DEBUG(target,
2167 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2168 address + 4);
2169 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2170 }
2171 }
2172 if (issue_ihi) {
2173 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2174 if (!same_ic_line) {
2175 LOG_TARGET_DEBUG(target,
2176 "IHI second icache line for address "TARGET_ADDR_FMT,
2177 address + 4);
2178 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2179 }
2180 }
2181
2182 /* Execute invalidate instructions */
2183 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2184 xtensa_core_status_check(target);
2185 if (ret != ERROR_OK) {
2186 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2187 return ret;
2188 }
2189 }
2190
2191 /* Write new instructions to memory */
2192 ret = target_write_buffer(target, address, size, buffer);
2193 if (ret != ERROR_OK) {
2194 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2195 return ret;
2196 }
2197
2198 if (issue_dhwbi) {
2199 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2200 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2201 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2202 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2203 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2204 if (!same_dc_line) {
2205 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2206 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2207 }
2208
2209 /* Execute invalidate instructions */
2210 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2211 xtensa_core_status_check(target);
2212 }
2213
2214 /* TODO: Handle L2 cache if present */
2215 return ret;
2216 }
2217
2218 static int xtensa_sw_breakpoint_add(struct target *target,
2219 struct breakpoint *breakpoint,
2220 struct xtensa_sw_breakpoint *sw_bp)
2221 {
2222 struct xtensa *xtensa = target_to_xtensa(target);
2223 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2224 if (ret != ERROR_OK) {
2225 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2226 return ret;
2227 }
2228
2229 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2230 sw_bp->oocd_bp = breakpoint;
2231
2232 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2233
2234 /* Underlying memory write will convert instruction endianness, don't do that here */
2235 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2236 if (ret != ERROR_OK) {
2237 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2238 return ret;
2239 }
2240
2241 return ERROR_OK;
2242 }
2243
2244 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2245 {
2246 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2247 if (ret != ERROR_OK) {
2248 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2249 return ret;
2250 }
2251 sw_bp->oocd_bp = NULL;
2252 return ERROR_OK;
2253 }
2254
2255 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2256 {
2257 struct xtensa *xtensa = target_to_xtensa(target);
2258 unsigned int slot;
2259
2260 if (breakpoint->type == BKPT_SOFT) {
2261 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2262 if (!xtensa->sw_brps[slot].oocd_bp ||
2263 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2264 break;
2265 }
2266 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2267 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2268 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2269 }
2270 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2271 if (ret != ERROR_OK) {
2272 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2273 return ret;
2274 }
2275 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2276 slot,
2277 breakpoint->address);
2278 return ERROR_OK;
2279 }
2280
2281 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2282 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2283 break;
2284 }
2285 if (slot == xtensa->core_config->debug.ibreaks_num) {
2286 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2287 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2288 }
2289
2290 xtensa->hw_brps[slot] = breakpoint;
2291 /* We will actually write the breakpoints when we resume the target. */
2292 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2293 slot,
2294 breakpoint->address);
2295
2296 return ERROR_OK;
2297 }
2298
2299 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2300 {
2301 struct xtensa *xtensa = target_to_xtensa(target);
2302 unsigned int slot;
2303
2304 if (breakpoint->type == BKPT_SOFT) {
2305 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2306 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2307 break;
2308 }
2309 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2310 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2311 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2312 }
2313 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2314 if (ret != ERROR_OK) {
2315 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2316 return ret;
2317 }
2318 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2319 return ERROR_OK;
2320 }
2321
2322 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2323 if (xtensa->hw_brps[slot] == breakpoint)
2324 break;
2325 }
2326 if (slot == xtensa->core_config->debug.ibreaks_num) {
2327 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2328 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2329 }
2330 xtensa->hw_brps[slot] = NULL;
2331 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2332 return ERROR_OK;
2333 }
2334
2335 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2336 {
2337 struct xtensa *xtensa = target_to_xtensa(target);
2338 unsigned int slot;
2339 xtensa_reg_val_t dbreakcval;
2340
2341 if (target->state != TARGET_HALTED) {
2342 LOG_TARGET_WARNING(target, "target not halted");
2343 return ERROR_TARGET_NOT_HALTED;
2344 }
2345
2346 if (watchpoint->mask != ~(uint32_t)0) {
2347 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2348 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2349 }
2350
2351 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2352 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2353 break;
2354 }
2355 if (slot == xtensa->core_config->debug.dbreaks_num) {
2356 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2357 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2358 }
2359
2360 /* Figure out value for dbreakc5..0
2361 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2362 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2363 !IS_PWR_OF_2(watchpoint->length) ||
2364 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2365 LOG_TARGET_WARNING(
2366 target,
2367 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2368 " not supported by hardware.",
2369 watchpoint->length,
2370 watchpoint->address);
2371 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2372 }
2373 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2374
2375 if (watchpoint->rw == WPT_READ)
2376 dbreakcval |= BIT(30);
2377 if (watchpoint->rw == WPT_WRITE)
2378 dbreakcval |= BIT(31);
2379 if (watchpoint->rw == WPT_ACCESS)
2380 dbreakcval |= BIT(30) | BIT(31);
2381
2382 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2383 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2384 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2385 xtensa->hw_wps[slot] = watchpoint;
2386 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2387 watchpoint->address);
2388 return ERROR_OK;
2389 }
2390
2391 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2392 {
2393 struct xtensa *xtensa = target_to_xtensa(target);
2394 unsigned int slot;
2395
2396 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2397 if (xtensa->hw_wps[slot] == watchpoint)
2398 break;
2399 }
2400 if (slot == xtensa->core_config->debug.dbreaks_num) {
2401 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2402 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2403 }
2404 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2405 xtensa->hw_wps[slot] = NULL;
2406 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2407 watchpoint->address);
2408 return ERROR_OK;
2409 }
2410
2411 static int xtensa_build_reg_cache(struct target *target)
2412 {
2413 struct xtensa *xtensa = target_to_xtensa(target);
2414 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2415 unsigned int last_dbreg_num = 0;
2416
2417 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2418 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2419 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2420
2421 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2422
2423 if (!reg_cache) {
2424 LOG_ERROR("Failed to alloc reg cache!");
2425 return ERROR_FAIL;
2426 }
2427 reg_cache->name = "Xtensa registers";
2428 reg_cache->next = NULL;
2429 /* Init reglist */
2430 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2431 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2432 if (!reg_list) {
2433 LOG_ERROR("Failed to alloc reg list!");
2434 goto fail;
2435 }
2436 xtensa->dbregs_num = 0;
2437 unsigned int didx = 0;
2438 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2439 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2440 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2441 for (unsigned int i = 0; i < listsize; i++, didx++) {
2442 reg_list[didx].exist = rlist[i].exist;
2443 reg_list[didx].name = rlist[i].name;
2444 reg_list[didx].size = 32;
2445 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2446 if (!reg_list[didx].value) {
2447 LOG_ERROR("Failed to alloc reg list value!");
2448 goto fail;
2449 }
2450 reg_list[didx].dirty = false;
2451 reg_list[didx].valid = false;
2452 reg_list[didx].type = &xtensa_reg_type;
2453 reg_list[didx].arch_info = xtensa;
2454 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2455 last_dbreg_num = rlist[i].dbreg_num;
2456
2457 if (xtensa_extra_debug_log) {
2458 LOG_TARGET_DEBUG(target,
2459 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2460 reg_list[didx].name,
2461 whichlist,
2462 reg_list[didx].exist,
2463 didx,
2464 rlist[i].type,
2465 rlist[i].dbreg_num);
2466 }
2467 }
2468 }
2469
2470 xtensa->dbregs_num = last_dbreg_num + 1;
2471 reg_cache->reg_list = reg_list;
2472 reg_cache->num_regs = reg_list_size;
2473
2474 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2475 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2476
2477 /* Construct empty-register list for handling unknown register requests */
2478 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2479 if (!xtensa->empty_regs) {
2480 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2481 goto fail;
2482 }
2483 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2484 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2485 if (!xtensa->empty_regs[i].name) {
2486 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2487 goto fail;
2488 }
2489 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
2490 xtensa->empty_regs[i].size = 32;
2491 xtensa->empty_regs[i].type = &xtensa_reg_type;
2492 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2493 if (!xtensa->empty_regs[i].value) {
2494 LOG_ERROR("Failed to alloc empty reg list value!");
2495 goto fail;
2496 }
2497 xtensa->empty_regs[i].arch_info = xtensa;
2498 }
2499
2500 /* Construct contiguous register list from contiguous descriptor list */
2501 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
2502 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2503 if (!xtensa->contiguous_regs_list) {
2504 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2505 goto fail;
2506 }
2507 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2508 unsigned int j;
2509 for (j = 0; j < reg_cache->num_regs; j++) {
2510 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2511 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
2512 LOG_TARGET_DEBUG(target,
2513 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2514 xtensa->contiguous_regs_list[i]->name,
2515 xtensa->contiguous_regs_desc[i]->dbreg_num);
2516 break;
2517 }
2518 }
2519 if (j == reg_cache->num_regs)
2520 LOG_TARGET_WARNING(target, "contiguous register %s not found",
2521 xtensa->contiguous_regs_desc[i]->name);
2522 }
2523 }
2524
2525 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2526 if (!xtensa->algo_context_backup) {
2527 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2528 goto fail;
2529 }
2530 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2531 struct reg *reg = &reg_cache->reg_list[i];
2532 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2533 if (!xtensa->algo_context_backup[i]) {
2534 LOG_ERROR("Failed to alloc mem for algorithm context!");
2535 goto fail;
2536 }
2537 }
2538 xtensa->core_cache = reg_cache;
2539 if (cache_p)
2540 *cache_p = reg_cache;
2541 return ERROR_OK;
2542
2543 fail:
2544 if (reg_list) {
2545 for (unsigned int i = 0; i < reg_list_size; i++)
2546 free(reg_list[i].value);
2547 free(reg_list);
2548 }
2549 if (xtensa->empty_regs) {
2550 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2551 free((void *)xtensa->empty_regs[i].name);
2552 free(xtensa->empty_regs[i].value);
2553 }
2554 free(xtensa->empty_regs);
2555 }
2556 if (xtensa->algo_context_backup) {
2557 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2558 free(xtensa->algo_context_backup[i]);
2559 free(xtensa->algo_context_backup);
2560 }
2561 free(reg_cache);
2562
2563 return ERROR_FAIL;
2564 }
2565
2566 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
2567 {
2568 struct xtensa *xtensa = target_to_xtensa(target);
2569 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
2570 /* Process op[] list */
2571 while (opstr && (*opstr == ':')) {
2572 uint8_t ops[32];
2573 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
2574 if (oplen > 32) {
2575 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
2576 break;
2577 }
2578 unsigned int i = 0;
2579 while ((i < oplen) && opstr && (*opstr == ':'))
2580 ops[i++] = strtoul(opstr + 1, &opstr, 16);
2581 if (i != oplen) {
2582 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
2583 break;
2584 }
2585
2586 char insn_buf[128];
2587 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
2588 for (i = 0; i < oplen; i++)
2589 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
2590 LOG_TARGET_DEBUG(target, "%s", insn_buf);
2591 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
2592 status = ERROR_OK;
2593 }
2594 return status;
2595 }
2596
2597 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
2598 {
2599 struct xtensa *xtensa = target_to_xtensa(target);
2600 bool iswrite = (packet[0] == 'Q');
2601 enum xtensa_qerr_e error;
2602
2603 /* Read/write TIE register. Requires spill location.
2604 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2605 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2606 */
2607 if (!(xtensa->spill_buf)) {
2608 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2609 error = XT_QERR_FAIL;
2610 goto xtensa_gdbqc_qxtreg_fail;
2611 }
2612
2613 char *delim;
2614 uint32_t regnum = strtoul(packet + 6, &delim, 16);
2615 if (*delim != ':') {
2616 LOG_ERROR("Malformed qxtreg packet");
2617 error = XT_QERR_INVAL;
2618 goto xtensa_gdbqc_qxtreg_fail;
2619 }
2620 uint32_t reglen = strtoul(delim + 1, &delim, 16);
2621 if (*delim != ':') {
2622 LOG_ERROR("Malformed qxtreg packet");
2623 error = XT_QERR_INVAL;
2624 goto xtensa_gdbqc_qxtreg_fail;
2625 }
2626 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
2627 memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
2628 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
2629 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
2630 LOG_ERROR("TIE register too large");
2631 error = XT_QERR_MEM;
2632 goto xtensa_gdbqc_qxtreg_fail;
2633 }
2634
2635 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2636 * (2) read old a4, (3) write spill address to a4.
2637 * NOTE: ensure a4 is restored properly by all error handling logic
2638 */
2639 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
2640 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
2641 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2642 if (status != ERROR_OK) {
2643 LOG_ERROR("Spill memory save");
2644 error = XT_QERR_MEM;
2645 goto xtensa_gdbqc_qxtreg_fail;
2646 }
2647 if (iswrite) {
2648 /* Extract value and store in spill memory */
2649 unsigned int b = 0;
2650 char *valbuf = strchr(delim, '=');
2651 if (!(valbuf && (*valbuf == '='))) {
2652 LOG_ERROR("Malformed Qxtreg packet");
2653 error = XT_QERR_INVAL;
2654 goto xtensa_gdbqc_qxtreg_fail;
2655 }
2656 valbuf++;
2657 while (*valbuf && *(valbuf + 1)) {
2658 char bytestr[3] = { 0, 0, 0 };
2659 strncpy(bytestr, valbuf, 2);
2660 regbuf[b++] = strtoul(bytestr, NULL, 16);
2661 valbuf += 2;
2662 }
2663 if (b != reglen) {
2664 LOG_ERROR("Malformed Qxtreg packet");
2665 error = XT_QERR_INVAL;
2666 goto xtensa_gdbqc_qxtreg_fail;
2667 }
2668 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2669 reglen / memop_size, regbuf);
2670 if (status != ERROR_OK) {
2671 LOG_ERROR("TIE value store");
2672 error = XT_QERR_MEM;
2673 goto xtensa_gdbqc_qxtreg_fail;
2674 }
2675 }
2676 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
2677 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, xtensa->spill_loc);
2678 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2679
2680 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
2681
2682 /* Restore a4 but not yet spill memory. Execute it all... */
2683 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, orig_a4);
2684 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2685 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2686 if (status != ERROR_OK) {
2687 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
2688 tieop_status = status;
2689 }
2690 status = xtensa_core_status_check(target);
2691 if (status != ERROR_OK) {
2692 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
2693 tieop_status = status;
2694 }
2695
2696 if (tieop_status == ERROR_OK) {
2697 if (iswrite) {
2698 /* TIE write succeeded; send OK */
2699 strcpy(*response_p, "OK");
2700 } else {
2701 /* TIE read succeeded; copy result from spill memory */
2702 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
2703 if (status != ERROR_OK) {
2704 LOG_TARGET_ERROR(target, "TIE result read");
2705 tieop_status = status;
2706 }
2707 unsigned int i;
2708 for (i = 0; i < reglen; i++)
2709 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
2710 *(*response_p + 2 * i) = '\0';
2711 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
2712 }
2713 }
2714
2715 /* Restore spill memory first, then report any previous errors */
2716 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2717 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2718 if (status != ERROR_OK) {
2719 LOG_ERROR("Spill memory restore");
2720 error = XT_QERR_MEM;
2721 goto xtensa_gdbqc_qxtreg_fail;
2722 }
2723 if (tieop_status != ERROR_OK) {
2724 LOG_ERROR("TIE execution");
2725 error = XT_QERR_FAIL;
2726 goto xtensa_gdbqc_qxtreg_fail;
2727 }
2728 return ERROR_OK;
2729
2730 xtensa_gdbqc_qxtreg_fail:
2731 strcpy(*response_p, xt_qerr[error].chrval);
2732 return xt_qerr[error].intval;
2733 }
2734
2735 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
2736 {
2737 struct xtensa *xtensa = target_to_xtensa(target);
2738 enum xtensa_qerr_e error;
2739 if (!packet || !response_p) {
2740 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
2741 return ERROR_FAIL;
2742 }
2743
2744 *response_p = xtensa->qpkt_resp;
2745 if (strncmp(packet, "qxtn", 4) == 0) {
2746 strcpy(*response_p, "OpenOCD");
2747 return ERROR_OK;
2748 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
2749 return ERROR_OK;
2750 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
2751 /* Confirm host cache params match core .cfg file */
2752 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
2753 &xtensa->core_config->icache : &xtensa->core_config->dcache;
2754 unsigned int line_size = 0, size = 0, way_count = 0;
2755 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
2756 if ((cachep->line_size != line_size) ||
2757 (cachep->size != size) ||
2758 (cachep->way_count != way_count)) {
2759 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2760 cachep == &xtensa->core_config->icache ? 'I' : 'D');
2761 }
2762 strcpy(*response_p, "OK");
2763 return ERROR_OK;
2764 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
2765 /* Confirm host IRAM/IROM params match core .cfg file */
2766 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
2767 &xtensa->core_config->iram : &xtensa->core_config->irom;
2768 unsigned int base = 0, size = 0, i;
2769 char *pkt = (char *)&packet[7];
2770 do {
2771 pkt++;
2772 size = strtoul(pkt, &pkt, 16);
2773 pkt++;
2774 base = strtoul(pkt, &pkt, 16);
2775 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
2776 for (i = 0; i < memp->count; i++) {
2777 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
2778 break;
2779 }
2780 if (i == memp->count) {
2781 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
2782 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
2783 break;
2784 }
2785 for (i = 0; i < 11; i++) {
2786 pkt++;
2787 strtoul(pkt, &pkt, 16);
2788 }
2789 } while (pkt && (pkt[0] == ','));
2790 strcpy(*response_p, "OK");
2791 return ERROR_OK;
2792 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
2793 /* Confirm host EXCM_LEVEL matches core .cfg file */
2794 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
2795 if (!xtensa->core_config->high_irq.enabled ||
2796 (excm_level != xtensa->core_config->high_irq.excm_level))
2797 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
2798 strcpy(*response_p, "OK");
2799 return ERROR_OK;
2800 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
2801 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
2802 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
2803 strcpy(*response_p, "OK");
2804 return ERROR_OK;
2805 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
2806 char *delim;
2807 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
2808 if (*delim != ':') {
2809 LOG_ERROR("Malformed Qxtspill packet");
2810 error = XT_QERR_INVAL;
2811 goto xtensa_gdb_query_custom_fail;
2812 }
2813 xtensa->spill_loc = spill_loc;
2814 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
2815 if (xtensa->spill_buf)
2816 free(xtensa->spill_buf);
2817 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
2818 if (!xtensa->spill_buf) {
2819 LOG_ERROR("Spill buf alloc");
2820 error = XT_QERR_MEM;
2821 goto xtensa_gdb_query_custom_fail;
2822 }
2823 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
2824 strcpy(*response_p, "OK");
2825 return ERROR_OK;
2826 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
2827 return xtensa_gdbqc_qxtreg(target, packet, response_p);
2828 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
2829 (strncmp(packet, "qxtftie", 7) == 0) ||
2830 (strncmp(packet, "qxtstie", 7) == 0)) {
2831 /* Return empty string to indicate trace, TIE wire debug are unsupported */
2832 strcpy(*response_p, "");
2833 return ERROR_OK;
2834 }
2835
2836 /* Warn for all other queries, but do not return errors */
2837 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
2838 strcpy(*response_p, "");
2839 return ERROR_OK;
2840
2841 xtensa_gdb_query_custom_fail:
2842 strcpy(*response_p, xt_qerr[error].chrval);
2843 return xt_qerr[error].intval;
2844 }
2845
2846 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
2847 const struct xtensa_debug_module_config *dm_cfg)
2848 {
2849 target->arch_info = xtensa;
2850 xtensa->common_magic = XTENSA_COMMON_MAGIC;
2851 xtensa->target = target;
2852 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
2853
2854 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
2855 if (!xtensa->core_config) {
2856 LOG_ERROR("Xtensa configuration alloc failed\n");
2857 return ERROR_FAIL;
2858 }
2859
2860 /* Default cache settings are disabled with 1 way */
2861 xtensa->core_config->icache.way_count = 1;
2862 xtensa->core_config->dcache.way_count = 1;
2863
2864 /* chrval: AR3/AR4 register names will change with window mapping.
2865 * intval: tracks whether scratch register was set through gdb P packet.
2866 */
2867 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
2868 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
2869 if (!xtensa->scratch_ars[s].chrval) {
2870 for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
2871 free(xtensa->scratch_ars[f].chrval);
2872 free(xtensa->core_config);
2873 LOG_ERROR("Xtensa scratch AR alloc failed\n");
2874 return ERROR_FAIL;
2875 }
2876 xtensa->scratch_ars[s].intval = false;
2877 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
2878 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
2879 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
2880 }
2881
2882 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2883 }
2884
2885 void xtensa_set_permissive_mode(struct target *target, bool state)
2886 {
2887 target_to_xtensa(target)->permissive_mode = state;
2888 }
2889
2890 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2891 {
2892 struct xtensa *xtensa = target_to_xtensa(target);
2893
2894 xtensa->come_online_probes_num = 3;
2895 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
2896 if (!xtensa->hw_brps) {
2897 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2898 return ERROR_FAIL;
2899 }
2900 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
2901 if (!xtensa->hw_wps) {
2902 free(xtensa->hw_brps);
2903 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2904 return ERROR_FAIL;
2905 }
2906 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2907 if (!xtensa->sw_brps) {
2908 free(xtensa->hw_brps);
2909 free(xtensa->hw_wps);
2910 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2911 return ERROR_FAIL;
2912 }
2913
2914 xtensa->spill_loc = 0xffffffff;
2915 xtensa->spill_bytes = 0;
2916 xtensa->spill_buf = NULL;
2917 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
2918
2919 return xtensa_build_reg_cache(target);
2920 }
2921
2922 static void xtensa_free_reg_cache(struct target *target)
2923 {
2924 struct xtensa *xtensa = target_to_xtensa(target);
2925 struct reg_cache *cache = xtensa->core_cache;
2926
2927 if (cache) {
2928 register_unlink_cache(&target->reg_cache, cache);
2929 for (unsigned int i = 0; i < cache->num_regs; i++) {
2930 free(xtensa->algo_context_backup[i]);
2931 free(cache->reg_list[i].value);
2932 }
2933 free(xtensa->algo_context_backup);
2934 free(cache->reg_list);
2935 free(cache);
2936 }
2937 xtensa->core_cache = NULL;
2938 xtensa->algo_context_backup = NULL;
2939
2940 if (xtensa->empty_regs) {
2941 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2942 free((void *)xtensa->empty_regs[i].name);
2943 free(xtensa->empty_regs[i].value);
2944 }
2945 free(xtensa->empty_regs);
2946 }
2947 xtensa->empty_regs = NULL;
2948 if (xtensa->optregs) {
2949 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
2950 free((void *)xtensa->optregs[i].name);
2951 free(xtensa->optregs);
2952 }
2953 xtensa->optregs = NULL;
2954 }
2955
2956 void xtensa_target_deinit(struct target *target)
2957 {
2958 struct xtensa *xtensa = target_to_xtensa(target);
2959
2960 LOG_DEBUG("start");
2961
2962 if (target_was_examined(target)) {
2963 int ret = xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, OCDDCR_ENABLEOCD);
2964 if (ret != ERROR_OK) {
2965 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2966 return;
2967 }
2968 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
2969 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2970 if (ret != ERROR_OK) {
2971 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2972 return;
2973 }
2974 xtensa_dm_deinit(&xtensa->dbg_mod);
2975 }
2976 xtensa_free_reg_cache(target);
2977 free(xtensa->hw_brps);
2978 free(xtensa->hw_wps);
2979 free(xtensa->sw_brps);
2980 if (xtensa->spill_buf) {
2981 free(xtensa->spill_buf);
2982 xtensa->spill_buf = NULL;
2983 }
2984 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
2985 free(xtensa->scratch_ars[s].chrval);
2986 free(xtensa->core_config);
2987 }
2988
2989 const char *xtensa_get_gdb_arch(struct target *target)
2990 {
2991 return "xtensa";
2992 }
2993
2994 /* exe <ascii-encoded hexadecimal instruction bytes> */
2995 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
2996 {
2997 struct xtensa *xtensa = target_to_xtensa(target);
2998
2999 if (CMD_ARGC != 1)
3000 return ERROR_COMMAND_SYNTAX_ERROR;
3001
3002 /* Process ascii-encoded hex byte string */
3003 const char *parm = CMD_ARGV[0];
3004 unsigned int parm_len = strlen(parm);
3005 if ((parm_len >= 64) || (parm_len & 1)) {
3006 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3007 return ERROR_FAIL;
3008 }
3009
3010 uint8_t ops[32];
3011 memset(ops, 0, 32);
3012 unsigned int oplen = parm_len / 2;
3013 char encoded_byte[3] = { 0, 0, 0 };
3014 for (unsigned int i = 0; i < oplen; i++) {
3015 encoded_byte[0] = *parm++;
3016 encoded_byte[1] = *parm++;
3017 ops[i] = strtoul(encoded_byte, NULL, 16);
3018 }
3019
3020 /* GDB must handle state save/restore.
3021 * Flush reg cache in case spill location is in an AR
3022 * Update CPENABLE only for this execution; later restore cached copy
3023 * Keep a copy of exccause in case executed code triggers an exception
3024 */
3025 int status = xtensa_write_dirty_registers(target);
3026 if (status != ERROR_OK) {
3027 LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3028 return ERROR_FAIL;
3029 }
3030 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3031 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3032 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3033 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
3034 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3035 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3036 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3037 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
3038 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3039
3040 /* Queue instruction list and execute everything */
3041 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3042 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3043 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3044 if (status != ERROR_OK)
3045 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3046 status = xtensa_core_status_check(target);
3047 if (status != ERROR_OK)
3048 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3049
3050 /* Reread register cache and restore saved regs after instruction execution */
3051 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3052 LOG_TARGET_ERROR(target, "%s: Failed to fetch register cache (post-exec).", target_name(target));
3053 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3054 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3055 return status;
3056 }
3057
3058 COMMAND_HANDLER(xtensa_cmd_exe)
3059 {
3060 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3061 }
3062
3063 /* xtdef <name> */
3064 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3065 {
3066 if (CMD_ARGC != 1)
3067 return ERROR_COMMAND_SYNTAX_ERROR;
3068
3069 const char *core_name = CMD_ARGV[0];
3070 if (strcasecmp(core_name, "LX") == 0) {
3071 xtensa->core_config->core_type = XT_LX;
3072 } else {
3073 LOG_ERROR("xtdef [LX]\n");
3074 return ERROR_COMMAND_SYNTAX_ERROR;
3075 }
3076 return ERROR_OK;
3077 }
3078
3079 COMMAND_HANDLER(xtensa_cmd_xtdef)
3080 {
3081 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3082 target_to_xtensa(get_current_target(CMD_CTX)));
3083 }
3084
3085 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3086 {
3087 if ((val < min) || (val > max)) {
3088 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3089 return false;
3090 }
3091 return true;
3092 }
3093
3094 /* xtopt <name> <value> */
3095 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3096 {
3097 if (CMD_ARGC != 2)
3098 return ERROR_COMMAND_SYNTAX_ERROR;
3099
3100 const char *opt_name = CMD_ARGV[0];
3101 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3102 if (strcasecmp(opt_name, "arnum") == 0) {
3103 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3104 return ERROR_COMMAND_ARGUMENT_INVALID;
3105 xtensa->core_config->aregs_num = opt_val;
3106 } else if (strcasecmp(opt_name, "windowed") == 0) {
3107 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3108 return ERROR_COMMAND_ARGUMENT_INVALID;
3109 xtensa->core_config->windowed = opt_val;
3110 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3111 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3112 return ERROR_COMMAND_ARGUMENT_INVALID;
3113 xtensa->core_config->coproc = opt_val;
3114 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3115 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3116 return ERROR_COMMAND_ARGUMENT_INVALID;
3117 xtensa->core_config->exceptions = opt_val;
3118 } else if (strcasecmp(opt_name, "intnum") == 0) {
3119 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3120 return ERROR_COMMAND_ARGUMENT_INVALID;
3121 xtensa->core_config->irq.enabled = (opt_val > 0);
3122 xtensa->core_config->irq.irq_num = opt_val;
3123 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3124 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3125 return ERROR_COMMAND_ARGUMENT_INVALID;
3126 xtensa->core_config->high_irq.enabled = opt_val;
3127 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3128 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3129 return ERROR_COMMAND_ARGUMENT_INVALID;
3130 if (!xtensa->core_config->high_irq.enabled) {
3131 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3132 return ERROR_COMMAND_ARGUMENT_INVALID;
3133 }
3134 xtensa->core_config->high_irq.excm_level = opt_val;
3135 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3136 if (xtensa->core_config->core_type == XT_LX) {
3137 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3138 return ERROR_COMMAND_ARGUMENT_INVALID;
3139 } else {
3140 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3141 return ERROR_COMMAND_ARGUMENT_INVALID;
3142 }
3143 if (!xtensa->core_config->high_irq.enabled) {
3144 LOG_ERROR("xtopt intlevels requires hipriints\n");
3145 return ERROR_COMMAND_ARGUMENT_INVALID;
3146 }
3147 xtensa->core_config->high_irq.level_num = opt_val;
3148 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3149 if (xtensa->core_config->core_type == XT_LX) {
3150 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3151 return ERROR_COMMAND_ARGUMENT_INVALID;
3152 } else {
3153 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3154 return ERROR_COMMAND_ARGUMENT_INVALID;
3155 }
3156 xtensa->core_config->debug.enabled = 1;
3157 xtensa->core_config->debug.irq_level = opt_val;
3158 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3159 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3160 return ERROR_COMMAND_ARGUMENT_INVALID;
3161 xtensa->core_config->debug.ibreaks_num = opt_val;
3162 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3163 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3164 return ERROR_COMMAND_ARGUMENT_INVALID;
3165 xtensa->core_config->debug.dbreaks_num = opt_val;
3166 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3167 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3168 return ERROR_COMMAND_ARGUMENT_INVALID;
3169 xtensa->core_config->trace.mem_sz = opt_val;
3170 xtensa->core_config->trace.enabled = (opt_val > 0);
3171 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3172 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3173 return ERROR_COMMAND_ARGUMENT_INVALID;
3174 xtensa->core_config->trace.reversed_mem_access = opt_val;
3175 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3176 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3177 return ERROR_COMMAND_ARGUMENT_INVALID;
3178 xtensa->core_config->debug.perfcount_num = opt_val;
3179 } else {
3180 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3181 return ERROR_OK;
3182 }
3183
3184 return ERROR_OK;
3185 }
3186
3187 COMMAND_HANDLER(xtensa_cmd_xtopt)
3188 {
3189 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3190 target_to_xtensa(get_current_target(CMD_CTX)));
3191 }
3192
3193 /* xtmem <type> [parameters] */
3194 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3195 {
3196 struct xtensa_cache_config *cachep = NULL;
3197 struct xtensa_local_mem_config *memp = NULL;
3198 int mem_access = 0;
3199 bool is_dcache = false;
3200
3201 if (CMD_ARGC == 0) {
3202 LOG_ERROR("xtmem <type> [parameters]\n");
3203 return ERROR_COMMAND_SYNTAX_ERROR;
3204 }
3205
3206 const char *mem_name = CMD_ARGV[0];
3207 if (strcasecmp(mem_name, "icache") == 0) {
3208 cachep = &xtensa->core_config->icache;
3209 } else if (strcasecmp(mem_name, "dcache") == 0) {
3210 cachep = &xtensa->core_config->dcache;
3211 is_dcache = true;
3212 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3213 /* TODO: support L2 cache */
3214 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3215 /* TODO: support L2 cache */
3216 } else if (strcasecmp(mem_name, "iram") == 0) {
3217 memp = &xtensa->core_config->iram;
3218 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3219 } else if (strcasecmp(mem_name, "dram") == 0) {
3220 memp = &xtensa->core_config->dram;
3221 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3222 } else if (strcasecmp(mem_name, "sram") == 0) {
3223 memp = &xtensa->core_config->sram;
3224 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3225 } else if (strcasecmp(mem_name, "irom") == 0) {
3226 memp = &xtensa->core_config->irom;
3227 mem_access = XT_MEM_ACCESS_READ;
3228 } else if (strcasecmp(mem_name, "drom") == 0) {
3229 memp = &xtensa->core_config->drom;
3230 mem_access = XT_MEM_ACCESS_READ;
3231 } else if (strcasecmp(mem_name, "srom") == 0) {
3232 memp = &xtensa->core_config->srom;
3233 mem_access = XT_MEM_ACCESS_READ;
3234 } else {
3235 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3236 return ERROR_COMMAND_ARGUMENT_INVALID;
3237 }
3238
3239 if (cachep) {
3240 if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3241 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3242 return ERROR_COMMAND_SYNTAX_ERROR;
3243 }
3244 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3245 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3246 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3247 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3248 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3249 } else if (memp) {
3250 if (CMD_ARGC != 3) {
3251 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3252 return ERROR_COMMAND_SYNTAX_ERROR;
3253 }
3254 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3255 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3256 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3257 memcfgp->access = mem_access;
3258 memp->count++;
3259 }
3260
3261 return ERROR_OK;
3262 }
3263
3264 COMMAND_HANDLER(xtensa_cmd_xtmem)
3265 {
3266 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3267 target_to_xtensa(get_current_target(CMD_CTX)));
3268 }
3269
3270 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3271 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3272 {
3273 if (CMD_ARGC != 4) {
3274 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3275 return ERROR_COMMAND_SYNTAX_ERROR;
3276 }
3277
3278 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3279 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3280 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3281 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3282
3283 if ((nfgseg > 32)) {
3284 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3285 return ERROR_COMMAND_ARGUMENT_INVALID;
3286 } else if (minsegsize & (minsegsize - 1)) {
3287 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3288 return ERROR_COMMAND_ARGUMENT_INVALID;
3289 } else if (lockable > 1) {
3290 LOG_ERROR("<lockable> must be 0 or 1\n");
3291 return ERROR_COMMAND_ARGUMENT_INVALID;
3292 } else if (execonly > 1) {
3293 LOG_ERROR("<execonly> must be 0 or 1\n");
3294 return ERROR_COMMAND_ARGUMENT_INVALID;
3295 }
3296
3297 xtensa->core_config->mpu.enabled = true;
3298 xtensa->core_config->mpu.nfgseg = nfgseg;
3299 xtensa->core_config->mpu.minsegsize = minsegsize;
3300 xtensa->core_config->mpu.lockable = lockable;
3301 xtensa->core_config->mpu.execonly = execonly;
3302 return ERROR_OK;
3303 }
3304
3305 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3306 {
3307 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3308 target_to_xtensa(get_current_target(CMD_CTX)));
3309 }
3310
3311 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3312 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3313 {
3314 if (CMD_ARGC != 2) {
3315 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3316 return ERROR_COMMAND_SYNTAX_ERROR;
3317 }
3318
3319 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3320 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3321 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3322 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3323 return ERROR_COMMAND_ARGUMENT_INVALID;
3324 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3325 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3326 return ERROR_COMMAND_ARGUMENT_INVALID;
3327 }
3328
3329 xtensa->core_config->mmu.enabled = true;
3330 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3331 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3332 return ERROR_OK;
3333 }
3334
3335 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3336 {
3337 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3338 target_to_xtensa(get_current_target(CMD_CTX)));
3339 }
3340
3341 /* xtregs <numregs>
3342 * xtreg <regname> <regnum> */
3343 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3344 {
3345 if (CMD_ARGC == 1) {
3346 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3347 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3348 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3349 return ERROR_COMMAND_SYNTAX_ERROR;
3350 }
3351 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3352 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3353 numregs, xtensa->genpkt_regs_num);
3354 return ERROR_COMMAND_SYNTAX_ERROR;
3355 }
3356 xtensa->total_regs_num = numregs;
3357 xtensa->core_regs_num = 0;
3358 xtensa->num_optregs = 0;
3359 /* A little more memory than required, but saves a second initialization pass */
3360 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3361 if (!xtensa->optregs) {
3362 LOG_ERROR("Failed to allocate xtensa->optregs!");
3363 return ERROR_FAIL;
3364 }
3365 return ERROR_OK;
3366 } else if (CMD_ARGC != 2) {
3367 return ERROR_COMMAND_SYNTAX_ERROR;
3368 }
3369
3370 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3371 * if general register (g-packet) requests or contiguous register maps are supported */
3372 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3373 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3374 if (!xtensa->contiguous_regs_desc) {
3375 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3376 return ERROR_FAIL;
3377 }
3378 }
3379
3380 const char *regname = CMD_ARGV[0];
3381 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3382 if (regnum > UINT16_MAX) {
3383 LOG_ERROR("<regnum> must be a 16-bit number");
3384 return ERROR_COMMAND_ARGUMENT_INVALID;
3385 }
3386
3387 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3388 if (xtensa->total_regs_num)
3389 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3390 regname, regnum,
3391 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3392 else
3393 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3394 regname, regnum);
3395 return ERROR_FAIL;
3396 }
3397
3398 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3399 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3400 bool is_extended_reg = true;
3401 unsigned int ridx;
3402 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3403 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3404 /* Flag core register as defined */
3405 rptr = &xtensa_regs[ridx];
3406 xtensa->core_regs_num++;
3407 is_extended_reg = false;
3408 break;
3409 }
3410 }
3411
3412 rptr->exist = true;
3413 if (is_extended_reg) {
3414 /* Register ID, debugger-visible register ID */
3415 rptr->name = strdup(CMD_ARGV[0]);
3416 rptr->dbreg_num = regnum;
3417 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3418 xtensa->num_optregs++;
3419
3420 /* Register type */
3421 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3422 rptr->type = XT_REG_GENERAL;
3423 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3424 rptr->type = XT_REG_USER;
3425 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3426 rptr->type = XT_REG_FR;
3427 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3428 rptr->type = XT_REG_SPECIAL;
3429 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3430 /* WARNING: For these registers, regnum points to the
3431 * index of the corresponding ARx registers, NOT to
3432 * the processor register number! */
3433 rptr->type = XT_REG_RELGEN;
3434 rptr->reg_num += XT_REG_IDX_ARFIRST;
3435 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3436 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3437 rptr->type = XT_REG_TIE;
3438 } else {
3439 rptr->type = XT_REG_OTHER;
3440 }
3441
3442 /* Register flags */
3443 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3444 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3445 (strcmp(rptr->name, "intclear") == 0))
3446 rptr->flags = XT_REGF_NOREAD;
3447 else
3448 rptr->flags = 0;
3449
3450 if ((rptr->reg_num == (XT_PS_REG_NUM_BASE + xtensa->core_config->debug.irq_level)) &&
3451 (xtensa->core_config->core_type == XT_LX) && (rptr->type == XT_REG_SPECIAL)) {
3452 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3453 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3454 }
3455 } else if (strcmp(rptr->name, "cpenable") == 0) {
3456 xtensa->core_config->coproc = true;
3457 }
3458
3459 /* Build out list of contiguous registers in specified order */
3460 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3461 if (xtensa->contiguous_regs_desc) {
3462 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3463 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3464 }
3465 if (xtensa_extra_debug_log)
3466 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3467 is_extended_reg ? "config-specific" : "core",
3468 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3469 is_extended_reg ? xtensa->num_optregs : ridx,
3470 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3471 return ERROR_OK;
3472 }
3473
3474 COMMAND_HANDLER(xtensa_cmd_xtreg)
3475 {
3476 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3477 target_to_xtensa(get_current_target(CMD_CTX)));
3478 }
3479
3480 /* xtregfmt <contiguous|sparse> [numgregs] */
3481 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3482 {
3483 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3484 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3485 return ERROR_OK;
3486 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3487 xtensa->regmap_contiguous = true;
3488 if (CMD_ARGC == 2) {
3489 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3490 if ((numgregs <= 0) ||
3491 ((numgregs > xtensa->total_regs_num) &&
3492 (xtensa->total_regs_num > 0))) {
3493 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3494 numgregs, xtensa->total_regs_num);
3495 return ERROR_COMMAND_SYNTAX_ERROR;
3496 }
3497 xtensa->genpkt_regs_num = numgregs;
3498 }
3499 return ERROR_OK;
3500 }
3501 }
3502 return ERROR_COMMAND_SYNTAX_ERROR;
3503 }
3504
3505 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3506 {
3507 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3508 target_to_xtensa(get_current_target(CMD_CTX)));
3509 }
3510
3511 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3512 {
3513 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3514 &xtensa->permissive_mode, "xtensa permissive mode");
3515 }
3516
3517 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3518 {
3519 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3520 target_to_xtensa(get_current_target(CMD_CTX)));
3521 }
3522
3523 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3524 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
3525 {
3526 struct xtensa_perfmon_config config = {
3527 .mask = 0xffff,
3528 .kernelcnt = 0,
3529 .tracelevel = -1 /* use DEBUGLEVEL by default */
3530 };
3531
3532 if (CMD_ARGC < 2 || CMD_ARGC > 6)
3533 return ERROR_COMMAND_SYNTAX_ERROR;
3534
3535 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
3536 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
3537 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3538 return ERROR_COMMAND_ARGUMENT_INVALID;
3539 }
3540
3541 config.select = strtoul(CMD_ARGV[1], NULL, 0);
3542 if (config.select > XTENSA_MAX_PERF_SELECT) {
3543 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
3544 return ERROR_COMMAND_ARGUMENT_INVALID;
3545 }
3546
3547 if (CMD_ARGC >= 3) {
3548 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
3549 if (config.mask > XTENSA_MAX_PERF_MASK) {
3550 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
3551 return ERROR_COMMAND_ARGUMENT_INVALID;
3552 }
3553 }
3554
3555 if (CMD_ARGC >= 4) {
3556 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
3557 if (config.kernelcnt > 1) {
3558 command_print(CMD, "kernelcnt should be 0 or 1");
3559 return ERROR_COMMAND_ARGUMENT_INVALID;
3560 }
3561 }
3562
3563 if (CMD_ARGC >= 5) {
3564 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
3565 if (config.tracelevel > 7) {
3566 command_print(CMD, "tracelevel should be <=7");
3567 return ERROR_COMMAND_ARGUMENT_INVALID;
3568 }
3569 }
3570
3571 if (config.tracelevel == -1)
3572 config.tracelevel = xtensa->core_config->debug.irq_level;
3573
3574 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
3575 }
3576
3577 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
3578 {
3579 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
3580 target_to_xtensa(get_current_target(CMD_CTX)));
3581 }
3582
3583 /* perfmon_dump [counter_id] */
3584 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
3585 {
3586 if (CMD_ARGC > 1)
3587 return ERROR_COMMAND_SYNTAX_ERROR;
3588
3589 int counter_id = -1;
3590 if (CMD_ARGC == 1) {
3591 counter_id = strtol(CMD_ARGV[0], NULL, 0);
3592 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
3593 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3594 return ERROR_COMMAND_ARGUMENT_INVALID;
3595 }
3596 }
3597
3598 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
3599 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
3600 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
3601 char result_buf[128] = { 0 };
3602 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
3603 struct xtensa_perfmon_result result;
3604 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
3605 if (res != ERROR_OK)
3606 return res;
3607 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
3608 "%-12" PRIu64 "%s",
3609 result.value,
3610 result.overflow ? " (overflow)" : "");
3611 LOG_INFO("%s", result_buf);
3612 }
3613
3614 return ERROR_OK;
3615 }
3616
3617 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
3618 {
3619 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
3620 target_to_xtensa(get_current_target(CMD_CTX)));
3621 }
3622
3623 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
3624 {
3625 int state = -1;
3626
3627 if (CMD_ARGC < 1) {
3628 const char *st;
3629 state = xtensa->stepping_isr_mode;
3630 if (state == XT_STEPPING_ISR_ON)
3631 st = "OFF";
3632 else if (state == XT_STEPPING_ISR_OFF)
3633 st = "ON";
3634 else
3635 st = "UNKNOWN";
3636 command_print(CMD, "Current ISR step mode: %s", st);
3637 return ERROR_OK;
3638 }
3639 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3640 if (!strcasecmp(CMD_ARGV[0], "off"))
3641 state = XT_STEPPING_ISR_ON;
3642 else if (!strcasecmp(CMD_ARGV[0], "on"))
3643 state = XT_STEPPING_ISR_OFF;
3644
3645 if (state == -1) {
3646 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
3647 return ERROR_FAIL;
3648 }
3649 xtensa->stepping_isr_mode = state;
3650 return ERROR_OK;
3651 }
3652
3653 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
3654 {
3655 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
3656 target_to_xtensa(get_current_target(CMD_CTX)));
3657 }
3658
3659 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
3660 {
3661 int res;
3662 uint32_t val = 0;
3663
3664 if (CMD_ARGC >= 1) {
3665 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3666 if (!strcasecmp(CMD_ARGV[0], "none")) {
3667 val = 0;
3668 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
3669 val |= OCDDCR_BREAKINEN;
3670 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
3671 val |= OCDDCR_BREAKOUTEN;
3672 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
3673 val |= OCDDCR_RUNSTALLINEN;
3674 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
3675 val |= OCDDCR_DEBUGMODEOUTEN;
3676 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
3677 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
3678 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
3679 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
3680 } else {
3681 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
3682 command_print(
3683 CMD,
3684 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3685 return ERROR_OK;
3686 }
3687 }
3688 res = xtensa_smpbreak_set(target, val);
3689 if (res != ERROR_OK)
3690 command_print(CMD, "Failed to set smpbreak config %d", res);
3691 } else {
3692 struct xtensa *xtensa = target_to_xtensa(target);
3693 res = xtensa_smpbreak_read(xtensa, &val);
3694 if (res == ERROR_OK)
3695 command_print(CMD, "Current bits set:%s%s%s%s",
3696 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
3697 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
3698 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
3699 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
3700 );
3701 else
3702 command_print(CMD, "Failed to get smpbreak config %d", res);
3703 }
3704 return res;
3705 }
3706
3707 COMMAND_HANDLER(xtensa_cmd_smpbreak)
3708 {
3709 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
3710 get_current_target(CMD_CTX));
3711 }
3712
3713 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
3714 {
3715 struct xtensa_trace_status trace_status;
3716 struct xtensa_trace_start_config cfg = {
3717 .stoppc = 0,
3718 .stopmask = XTENSA_STOPMASK_DISABLED,
3719 .after = 0,
3720 .after_is_words = false
3721 };
3722
3723 /* Parse arguments */
3724 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3725 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
3726 char *e;
3727 i++;
3728 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
3729 cfg.stopmask = 0;
3730 if (*e == '/')
3731 cfg.stopmask = strtol(e, NULL, 0);
3732 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
3733 i++;
3734 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
3735 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
3736 cfg.after_is_words = 0;
3737 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
3738 cfg.after_is_words = 1;
3739 } else {
3740 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
3741 return ERROR_FAIL;
3742 }
3743 }
3744
3745 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3746 if (res != ERROR_OK)
3747 return res;
3748 if (trace_status.stat & TRAXSTAT_TRACT) {
3749 LOG_WARNING("Silently stop active tracing!");
3750 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
3751 if (res != ERROR_OK)
3752 return res;
3753 }
3754
3755 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
3756 if (res != ERROR_OK)
3757 return res;
3758
3759 xtensa->trace_active = true;
3760 command_print(CMD, "Trace started.");
3761 return ERROR_OK;
3762 }
3763
3764 COMMAND_HANDLER(xtensa_cmd_tracestart)
3765 {
3766 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
3767 target_to_xtensa(get_current_target(CMD_CTX)));
3768 }
3769
3770 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
3771 {
3772 struct xtensa_trace_status trace_status;
3773
3774 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3775 if (res != ERROR_OK)
3776 return res;
3777
3778 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
3779 command_print(CMD, "No trace is currently active.");
3780 return ERROR_FAIL;
3781 }
3782
3783 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
3784 if (res != ERROR_OK)
3785 return res;
3786
3787 xtensa->trace_active = false;
3788 command_print(CMD, "Trace stop triggered.");
3789 return ERROR_OK;
3790 }
3791
3792 COMMAND_HANDLER(xtensa_cmd_tracestop)
3793 {
3794 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
3795 target_to_xtensa(get_current_target(CMD_CTX)));
3796 }
3797
3798 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
3799 {
3800 struct xtensa_trace_config trace_config;
3801 struct xtensa_trace_status trace_status;
3802 uint32_t memsz, wmem;
3803
3804 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3805 if (res != ERROR_OK)
3806 return res;
3807
3808 if (trace_status.stat & TRAXSTAT_TRACT) {
3809 command_print(CMD, "Tracing is still active. Please stop it first.");
3810 return ERROR_FAIL;
3811 }
3812
3813 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
3814 if (res != ERROR_OK)
3815 return res;
3816
3817 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
3818 command_print(CMD, "No active trace found; nothing to dump.");
3819 return ERROR_FAIL;
3820 }
3821
3822 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
3823 LOG_INFO("Total trace memory: %d words", memsz);
3824 if ((trace_config.addr &
3825 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
3826 /*Memory hasn't overwritten itself yet. */
3827 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
3828 LOG_INFO("...but trace is only %d words", wmem);
3829 if (wmem < memsz)
3830 memsz = wmem;
3831 } else {
3832 if (trace_config.addr & TRAXADDR_TWSAT) {
3833 LOG_INFO("Real trace is many times longer than that (overflow)");
3834 } else {
3835 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
3836 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
3837 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
3838 }
3839 }
3840
3841 uint8_t *tracemem = malloc(memsz * 4);
3842 if (!tracemem) {
3843 command_print(CMD, "Failed to alloc memory for trace data!");
3844 return ERROR_FAIL;
3845 }
3846 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
3847 if (res != ERROR_OK) {
3848 free(tracemem);
3849 return res;
3850 }
3851
3852 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
3853 if (f <= 0) {
3854 free(tracemem);
3855 command_print(CMD, "Unable to open file %s", fname);
3856 return ERROR_FAIL;
3857 }
3858 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
3859 command_print(CMD, "Unable to write to file %s", fname);
3860 else
3861 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
3862 close(f);
3863
3864 bool is_all_zeroes = true;
3865 for (unsigned int i = 0; i < memsz * 4; i++) {
3866 if (tracemem[i] != 0) {
3867 is_all_zeroes = false;
3868 break;
3869 }
3870 }
3871 free(tracemem);
3872 if (is_all_zeroes)
3873 command_print(
3874 CMD,
3875 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
3876
3877 return ERROR_OK;
3878 }
3879
3880 COMMAND_HANDLER(xtensa_cmd_tracedump)
3881 {
3882 if (CMD_ARGC != 1) {
3883 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
3884 return ERROR_FAIL;
3885 }
3886
3887 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
3888 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
3889 }
3890
3891 static const struct command_registration xtensa_any_command_handlers[] = {
3892 {
3893 .name = "xtdef",
3894 .handler = xtensa_cmd_xtdef,
3895 .mode = COMMAND_CONFIG,
3896 .help = "Configure Xtensa core type",
3897 .usage = "<type>",
3898 },
3899 {
3900 .name = "xtopt",
3901 .handler = xtensa_cmd_xtopt,
3902 .mode = COMMAND_CONFIG,
3903 .help = "Configure Xtensa core option",
3904 .usage = "<name> <value>",
3905 },
3906 {
3907 .name = "xtmem",
3908 .handler = xtensa_cmd_xtmem,
3909 .mode = COMMAND_CONFIG,
3910 .help = "Configure Xtensa memory/cache option",
3911 .usage = "<type> [parameters]",
3912 },
3913 {
3914 .name = "xtmmu",
3915 .handler = xtensa_cmd_xtmmu,
3916 .mode = COMMAND_CONFIG,
3917 .help = "Configure Xtensa MMU option",
3918 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
3919 },
3920 {
3921 .name = "xtmpu",
3922 .handler = xtensa_cmd_xtmpu,
3923 .mode = COMMAND_CONFIG,
3924 .help = "Configure Xtensa MPU option",
3925 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
3926 },
3927 {
3928 .name = "xtreg",
3929 .handler = xtensa_cmd_xtreg,
3930 .mode = COMMAND_CONFIG,
3931 .help = "Configure Xtensa register",
3932 .usage = "<regname> <regnum>",
3933 },
3934 {
3935 .name = "xtregs",
3936 .handler = xtensa_cmd_xtreg,
3937 .mode = COMMAND_CONFIG,
3938 .help = "Configure number of Xtensa registers",
3939 .usage = "<numregs>",
3940 },
3941 {
3942 .name = "xtregfmt",
3943 .handler = xtensa_cmd_xtregfmt,
3944 .mode = COMMAND_CONFIG,
3945 .help = "Configure format of Xtensa register map",
3946 .usage = "<contiguous|sparse> [numgregs]",
3947 },
3948 {
3949 .name = "set_permissive",
3950 .handler = xtensa_cmd_permissive_mode,
3951 .mode = COMMAND_ANY,
3952 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
3953 .usage = "[0|1]",
3954 },
3955 {
3956 .name = "maskisr",
3957 .handler = xtensa_cmd_mask_interrupts,
3958 .mode = COMMAND_ANY,
3959 .help = "mask Xtensa interrupts at step",
3960 .usage = "['on'|'off']",
3961 },
3962 {
3963 .name = "smpbreak",
3964 .handler = xtensa_cmd_smpbreak,
3965 .mode = COMMAND_ANY,
3966 .help = "Set the way the CPU chains OCD breaks",
3967 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
3968 },
3969 {
3970 .name = "perfmon_enable",
3971 .handler = xtensa_cmd_perfmon_enable,
3972 .mode = COMMAND_EXEC,
3973 .help = "Enable and start performance counter",
3974 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
3975 },
3976 {
3977 .name = "perfmon_dump",
3978 .handler = xtensa_cmd_perfmon_dump,
3979 .mode = COMMAND_EXEC,
3980 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
3981 .usage = "[counter_id]",
3982 },
3983 {
3984 .name = "tracestart",
3985 .handler = xtensa_cmd_tracestart,
3986 .mode = COMMAND_EXEC,
3987 .help =
3988 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
3989 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
3990 },
3991 {
3992 .name = "tracestop",
3993 .handler = xtensa_cmd_tracestop,
3994 .mode = COMMAND_EXEC,
3995 .help = "Tracing: Stop current trace as started by the tracestart command",
3996 .usage = "",
3997 },
3998 {
3999 .name = "tracedump",
4000 .handler = xtensa_cmd_tracedump,
4001 .mode = COMMAND_EXEC,
4002 .help = "Tracing: Dump trace memory to a files. One file per core.",
4003 .usage = "<outfile>",
4004 },
4005 {
4006 .name = "exe",
4007 .handler = xtensa_cmd_exe,
4008 .mode = COMMAND_ANY,
4009 .help = "Xtensa stub execution",
4010 .usage = "<ascii-encoded hexadecimal instruction bytes>",
4011 },
4012 COMMAND_REGISTRATION_DONE
4013 };
4014
4015 const struct command_registration xtensa_command_handlers[] = {
4016 {
4017 .name = "xtensa",
4018 .mode = COMMAND_ANY,
4019 .help = "Xtensa command group",
4020 .usage = "",
4021 .chain = xtensa_any_command_handlers,
4022 },
4023 COMMAND_REGISTRATION_DONE
4024 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)