target/xtensa: rename pc and ps macro names
[openocd.git] / src / target / xtensa / xtensa.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
10
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19
20 #include "xtensa_chip.h"
21 #include "xtensa.h"
22
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25 ((((V) & 0x0F) << 4) \
26 | (((V) & 0xF0) >> 4))
27
28 #define XT_NIBSWAP16(V) \
29 ((((V) & 0x000F) << 12) \
30 | (((V) & 0x00F0) << 4) \
31 | (((V) & 0x0F00) >> 4) \
32 | (((V) & 0xF000) >> 12))
33
34 #define XT_NIBSWAP24(V) \
35 ((((V) & 0x00000F) << 20) \
36 | (((V) & 0x0000F0) << 12) \
37 | (((V) & 0x000F00) << 4) \
38 | (((V) & 0x00F000) >> 4) \
39 | (((V) & 0x0F0000) >> 12) \
40 | (((V) & 0xF00000) >> 20))
41
42 /* _XT_INS_FORMAT_*()
43 * Instruction formatting converted from little-endian inputs
44 * and shifted to the MSB-side of DIR for BE systems.
45 */
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48 | (((T) & 0x0F) << 16) \
49 | (((SR) & 0xFF) << 8)) << 8 \
50 : (OPCODE) \
51 | (((SR) & 0xFF) << 8) \
52 | (((T) & 0x0F) << 4))
53
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57 | (((R) & 0x0F) << 8)) << 8 \
58 : (OPCODE) \
59 | (((ST) & 0xFF) << 4) \
60 | (((R) & 0x0F) << 12))
61
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64 | (((T) & 0x0F) << 8) \
65 | (((S) & 0x0F) << 4) \
66 | ((IMM4) & 0x0F)) << 16 \
67 : (OPCODE) \
68 | (((T) & 0x0F) << 4) \
69 | (((S) & 0x0F) << 8) \
70 | (((IMM4) & 0x0F) << 12))
71
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74 | (((T) & 0x0F) << 16) \
75 | (((S) & 0x0F) << 12) \
76 | (((R) & 0x0F) << 8) \
77 | ((IMM8) & 0xFF)) << 8 \
78 : (OPCODE) \
79 | (((IMM8) & 0xFF) << 16) \
80 | (((R) & 0x0F) << 12) \
81 | (((S) & 0x0F) << 8) \
82 | (((T) & 0x0F) << 4))
83
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86 | (((T) & 0x0F) << 16) \
87 | (((S) & 0x0F) << 12) \
88 | (((R) & 0x0F) << 8)) << 8 \
89 | ((IMM4) & 0x0F) \
90 : (OPCODE) \
91 | (((IMM4) & 0x0F) << 20) \
92 | (((R) & 0x0F) << 12) \
93 | (((S) & 0x0F) << 8) \
94 | (((T) & 0x0F) << 4))
95
96 /* Xtensa processor instruction opcodes
97 */
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
102
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
107
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
114
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
121
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
127
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
131
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
138
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
141
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
146
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
151
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
155
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
159
160 #define XT_WATCHPOINTS_NUM_MAX 2
161
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163 * These get used a lot so making a shortcut is useful.
164 */
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
169 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
170
171 #define XT_PS_REG_NUM (0xe6U)
172 #define XT_EPS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
173 #define XT_EPC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
174 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
175 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
176
177 #define XT_SW_BREAKPOINTS_MAX_NUM 32
178 #define XT_HW_IBREAK_MAX_NUM 2
179 #define XT_HW_DBREAK_MAX_NUM 2
180
181 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
182 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
183 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
184 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
185 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
186 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
187 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
188 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
189 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
190 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
191 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
192 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
247 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
248 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
249 XT_MK_REG_DESC("ps", XT_PS_REG_NUM, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
250 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
251 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
252 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
253 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
254 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
255 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
256 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
259 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
261 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
262 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
263
264 /* WARNING: For these registers, regnum points to the
265 * index of the corresponding ARx registers, NOT to
266 * the processor register number! */
267 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
268 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
269 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
270 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
271 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
272 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
273 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
274 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
275 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
276 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
282 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
283 };
284
285 /**
286 * Types of memory used at xtensa target
287 */
288 enum xtensa_mem_region_type {
289 XTENSA_MEM_REG_IROM = 0x0,
290 XTENSA_MEM_REG_IRAM,
291 XTENSA_MEM_REG_DROM,
292 XTENSA_MEM_REG_DRAM,
293 XTENSA_MEM_REG_SRAM,
294 XTENSA_MEM_REG_SROM,
295 XTENSA_MEM_REGS_NUM
296 };
297
298 /* Register definition as union for list allocation */
299 union xtensa_reg_val_u {
300 xtensa_reg_val_t val;
301 uint8_t buf[4];
302 };
303
304 static const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
305 { .chrval = "E00", .intval = ERROR_FAIL },
306 { .chrval = "E01", .intval = ERROR_FAIL },
307 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
308 { .chrval = "E03", .intval = ERROR_FAIL },
309 };
310
311 /* Set to true for extra debug logging */
312 static const bool xtensa_extra_debug_log;
313
314 /**
315 * Gets a config for the specific mem type
316 */
317 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
318 struct xtensa *xtensa,
319 enum xtensa_mem_region_type type)
320 {
321 switch (type) {
322 case XTENSA_MEM_REG_IROM:
323 return &xtensa->core_config->irom;
324 case XTENSA_MEM_REG_IRAM:
325 return &xtensa->core_config->iram;
326 case XTENSA_MEM_REG_DROM:
327 return &xtensa->core_config->drom;
328 case XTENSA_MEM_REG_DRAM:
329 return &xtensa->core_config->dram;
330 case XTENSA_MEM_REG_SRAM:
331 return &xtensa->core_config->sram;
332 case XTENSA_MEM_REG_SROM:
333 return &xtensa->core_config->srom;
334 default:
335 return NULL;
336 }
337 }
338
339 /**
340 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
341 * for a given address
342 * Returns NULL if nothing found
343 */
344 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
345 const struct xtensa_local_mem_config *mem,
346 target_addr_t address)
347 {
348 for (unsigned int i = 0; i < mem->count; i++) {
349 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
350 if (address >= region->base && address < (region->base + region->size))
351 return region;
352 }
353 return NULL;
354 }
355
356 /**
357 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
358 * for a given address
359 * Returns NULL if nothing found
360 */
361 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
362 struct xtensa *xtensa,
363 target_addr_t address)
364 {
365 const struct xtensa_local_mem_region_config *result;
366 const struct xtensa_local_mem_config *mcgf;
367 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
368 mcgf = xtensa_get_mem_config(xtensa, mtype);
369 result = xtensa_memory_region_find(mcgf, address);
370 if (result)
371 return result;
372 }
373 return NULL;
374 }
375
376 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
377 const struct xtensa_local_mem_config *mem,
378 target_addr_t address)
379 {
380 if (!cache->size)
381 return false;
382 return xtensa_memory_region_find(mem, address);
383 }
384
385 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
386 {
387 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
388 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
389 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
390 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
391 }
392
393 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
394 {
395 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
396 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
397 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
398 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
399 }
400
401 static int xtensa_core_reg_get(struct reg *reg)
402 {
403 /* We don't need this because we read all registers on halt anyway. */
404 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
405 struct target *target = xtensa->target;
406
407 if (target->state != TARGET_HALTED)
408 return ERROR_TARGET_NOT_HALTED;
409 if (!reg->exist) {
410 if (strncmp(reg->name, "?0x", 3) == 0) {
411 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
412 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
413 return ERROR_OK;
414 }
415 return ERROR_COMMAND_ARGUMENT_INVALID;
416 }
417 return ERROR_OK;
418 }
419
420 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
421 {
422 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
423 struct target *target = xtensa->target;
424
425 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
426 if (target->state != TARGET_HALTED)
427 return ERROR_TARGET_NOT_HALTED;
428
429 if (!reg->exist) {
430 if (strncmp(reg->name, "?0x", 3) == 0) {
431 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
432 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
433 return ERROR_OK;
434 }
435 return ERROR_COMMAND_ARGUMENT_INVALID;
436 }
437
438 buf_cpy(buf, reg->value, reg->size);
439
440 if (xtensa->core_config->windowed) {
441 /* If the user updates a potential scratch register, track for conflicts */
442 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
443 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
444 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
445 buf_get_u32(reg->value, 0, 32));
446 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
447 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
448 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
449 xtensa->scratch_ars[s].intval = true;
450 break;
451 }
452 }
453 }
454 reg->dirty = true;
455 reg->valid = true;
456
457 return ERROR_OK;
458 }
459
460 static const struct reg_arch_type xtensa_reg_type = {
461 .get = xtensa_core_reg_get,
462 .set = xtensa_core_reg_set,
463 };
464
465 /* Convert a register index that's indexed relative to windowbase, to the real address. */
466 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
467 enum xtensa_reg_id reg_idx,
468 int windowbase)
469 {
470 unsigned int idx;
471 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
472 idx = reg_idx - XT_REG_IDX_AR0;
473 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
474 idx = reg_idx - XT_REG_IDX_A0;
475 } else {
476 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
477 return -1;
478 }
479 return ((idx + windowbase * 4) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
480 }
481
482 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
483 enum xtensa_reg_id reg_idx,
484 int windowbase)
485 {
486 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
487 }
488
489 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
490 {
491 struct reg *reg_list = xtensa->core_cache->reg_list;
492 reg_list[reg_idx].dirty = true;
493 }
494
495 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
496 {
497 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DIR0EXEC, ins);
498 }
499
500 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
501 {
502 const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
503 if ((oplen > 0) && (oplen <= max_oplen)) {
504 uint8_t ops_padded[max_oplen];
505 memcpy(ops_padded, ops, oplen);
506 memset(ops_padded + oplen, 0, max_oplen - oplen);
507 unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
508 for (int32_t i = oplenw - 1; i > 0; i--)
509 xtensa_queue_dbg_reg_write(xtensa,
510 XDMREG_DIR0 + i,
511 target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t)*i]));
512 /* Write DIR0EXEC last */
513 xtensa_queue_dbg_reg_write(xtensa,
514 XDMREG_DIR0EXEC,
515 target_buffer_get_u32(xtensa->target, &ops_padded[0]));
516 }
517 }
518
519 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
520 {
521 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
522 return dm->pwr_ops->queue_reg_write(dm, reg, data);
523 }
524
525 /* NOTE: Assumes A3 has already been saved */
526 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
527 {
528 struct xtensa *xtensa = target_to_xtensa(target);
529 int woe_dis;
530 uint8_t woe_buf[4];
531
532 if (xtensa->core_config->windowed) {
533 /* Save PS (LX) and disable window overflow exceptions prior to AR save */
534 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_PS, XT_REG_A3));
535 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
536 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, woe_buf);
537 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
538 if (res != ERROR_OK) {
539 LOG_ERROR("Failed to read PS (%d)!", res);
540 return res;
541 }
542 xtensa_core_status_check(target);
543 *woe = buf_get_u32(woe_buf, 0, 32);
544 woe_dis = *woe & ~XT_PS_WOE_MSK;
545 LOG_DEBUG("Clearing PS.WOE (0x%08" PRIx32 " -> 0x%08" PRIx32 ")", *woe, woe_dis);
546 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe_dis);
547 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
548 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
549 }
550 return ERROR_OK;
551 }
552
553 /* NOTE: Assumes A3 has already been saved */
554 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
555 {
556 struct xtensa *xtensa = target_to_xtensa(target);
557 if (xtensa->core_config->windowed) {
558 /* Restore window overflow exception state */
559 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe);
560 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
561 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
562 LOG_DEBUG("Restored PS.WOE (0x%08" PRIx32 ")", woe);
563 }
564 }
565
566 static bool xtensa_reg_is_readable(int flags, int cpenable)
567 {
568 if (flags & XT_REGF_NOREAD)
569 return false;
570 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
571 return false;
572 return true;
573 }
574
575 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
576 {
577 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
578 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
579 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
580 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
581 } else {
582 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
583 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
584 }
585 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
586 }
587
588 static int xtensa_write_dirty_registers(struct target *target)
589 {
590 struct xtensa *xtensa = target_to_xtensa(target);
591 int res;
592 xtensa_reg_val_t regval, windowbase = 0;
593 bool scratch_reg_dirty = false, delay_cpenable = false;
594 struct reg *reg_list = xtensa->core_cache->reg_list;
595 unsigned int reg_list_size = xtensa->core_cache->num_regs;
596 bool preserve_a3 = false;
597 uint8_t a3_buf[4];
598 xtensa_reg_val_t a3 = 0, woe;
599
600 LOG_TARGET_DEBUG(target, "start");
601
602 /* We need to write the dirty registers in the cache list back to the processor.
603 * Start by writing the SFR/user registers. */
604 for (unsigned int i = 0; i < reg_list_size; i++) {
605 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
606 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
607 if (reg_list[i].dirty) {
608 if (rlist[ridx].type == XT_REG_SPECIAL ||
609 rlist[ridx].type == XT_REG_USER ||
610 rlist[ridx].type == XT_REG_FR) {
611 scratch_reg_dirty = true;
612 if (i == XT_REG_IDX_CPENABLE) {
613 delay_cpenable = true;
614 continue;
615 }
616 regval = xtensa_reg_get(target, i);
617 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
618 reg_list[i].name,
619 rlist[ridx].reg_num,
620 regval);
621 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
622 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
623 if (reg_list[i].exist) {
624 unsigned int reg_num = rlist[ridx].reg_num;
625 if (rlist[ridx].type == XT_REG_USER) {
626 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
627 } else if (rlist[ridx].type == XT_REG_FR) {
628 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
629 } else {/*SFR */
630 if (reg_num == XT_PC_REG_NUM_VIRTUAL)
631 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL
632 **/
633 reg_num =
634 (XT_EPC_REG_NUM_BASE +
635 xtensa->core_config->debug.irq_level);
636 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
637 }
638 }
639 reg_list[i].dirty = false;
640 }
641 }
642 }
643 if (scratch_reg_dirty)
644 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
645 if (delay_cpenable) {
646 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
647 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
648 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
649 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
650 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
651 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
652 XT_REG_A3));
653 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
654 }
655
656 preserve_a3 = (xtensa->core_config->windowed);
657 if (preserve_a3) {
658 /* Save (windowed) A3 for scratch use */
659 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
660 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
661 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
662 if (res != ERROR_OK)
663 return res;
664 xtensa_core_status_check(target);
665 a3 = buf_get_u32(a3_buf, 0, 32);
666 }
667
668 if (xtensa->core_config->windowed) {
669 res = xtensa_window_state_save(target, &woe);
670 if (res != ERROR_OK)
671 return res;
672 /* Grab the windowbase, we need it. */
673 windowbase = xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE);
674 /* Check if there are mismatches between the ARx and corresponding Ax registers.
675 * When the user sets a register on a windowed config, xt-gdb may set the ARx
676 * register directly. Thus we take ARx as priority over Ax if both are dirty
677 * and it's unclear if the user set one over the other explicitly.
678 */
679 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
680 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
681 if (reg_list[i].dirty && reg_list[j].dirty) {
682 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
683 bool show_warning = true;
684 if (i == XT_REG_IDX_A3)
685 show_warning = xtensa_scratch_regs_fixup(xtensa,
686 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
687 else if (i == XT_REG_IDX_A4)
688 show_warning = xtensa_scratch_regs_fixup(xtensa,
689 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
690 if (show_warning)
691 LOG_WARNING(
692 "Warning: Both A%d [0x%08" PRIx32
693 "] as well as its underlying physical register "
694 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
695 i - XT_REG_IDX_A0,
696 buf_get_u32(reg_list[i].value, 0, 32),
697 j - XT_REG_IDX_AR0,
698 buf_get_u32(reg_list[j].value, 0, 32));
699 }
700 }
701 }
702 }
703
704 /* Write A0-A16. */
705 for (unsigned int i = 0; i < 16; i++) {
706 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
707 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
708 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
709 xtensa_regs[XT_REG_IDX_A0 + i].name,
710 regval,
711 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
712 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
713 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
714 reg_list[XT_REG_IDX_A0 + i].dirty = false;
715 if (i == 3) {
716 /* Avoid stomping A3 during restore at end of function */
717 a3 = regval;
718 }
719 }
720 }
721
722 if (xtensa->core_config->windowed) {
723 /* Now write AR registers */
724 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
725 /* Write the 16 registers we can see */
726 for (unsigned int i = 0; i < 16; i++) {
727 if (i + j < xtensa->core_config->aregs_num) {
728 enum xtensa_reg_id realadr =
729 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
730 windowbase);
731 /* Write back any dirty un-windowed registers */
732 if (reg_list[realadr].dirty) {
733 regval = xtensa_reg_get(target, realadr);
734 LOG_TARGET_DEBUG(
735 target,
736 "Writing back reg %s value %08" PRIX32 ", num =%i",
737 xtensa_regs[realadr].name,
738 regval,
739 xtensa_regs[realadr].reg_num);
740 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
741 xtensa_queue_exec_ins(xtensa,
742 XT_INS_RSR(xtensa, XT_SR_DDR,
743 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
744 reg_list[realadr].dirty = false;
745 if ((i + j) == 3)
746 /* Avoid stomping AR during A3 restore at end of function */
747 a3 = regval;
748 }
749 }
750 }
751 /*Now rotate the window so we'll see the next 16 registers. The final rotate
752 * will wraparound, */
753 /*leaving us in the state we were. */
754 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
755 }
756
757 xtensa_window_state_restore(target, woe);
758
759 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
760 xtensa->scratch_ars[s].intval = false;
761 }
762
763 if (preserve_a3) {
764 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
765 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
766 }
767
768 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
769 xtensa_core_status_check(target);
770
771 return res;
772 }
773
774 static inline bool xtensa_is_stopped(struct target *target)
775 {
776 struct xtensa *xtensa = target_to_xtensa(target);
777 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
778 }
779
780 int xtensa_examine(struct target *target)
781 {
782 struct xtensa *xtensa = target_to_xtensa(target);
783 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
784
785 LOG_DEBUG("coreid = %d", target->coreid);
786
787 if (xtensa->core_config->core_type == XT_UNDEF) {
788 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
789 return ERROR_FAIL;
790 }
791
792 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
793 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
794 xtensa_dm_queue_enable(&xtensa->dbg_mod);
795 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
796 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
797 if (res != ERROR_OK)
798 return res;
799 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
800 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
801 return ERROR_TARGET_FAILURE;
802 }
803 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
804 if (!target_was_examined(target))
805 target_set_examined(target);
806 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
807 return ERROR_OK;
808 }
809
810 int xtensa_wakeup(struct target *target)
811 {
812 struct xtensa *xtensa = target_to_xtensa(target);
813 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
814
815 if (xtensa->reset_asserted)
816 cmd |= PWRCTL_CORERESET(xtensa);
817 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
818 /* TODO: can we join this with the write above? */
819 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
820 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
821 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
822 }
823
824 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
825 {
826 uint32_t dsr_data = 0x00110000;
827 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
828 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
829 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
830
831 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
832 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, set | OCDDCR_ENABLEOCD);
833 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, clear);
834 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DSR, dsr_data);
835 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
836 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
837 }
838
839 int xtensa_smpbreak_set(struct target *target, uint32_t set)
840 {
841 struct xtensa *xtensa = target_to_xtensa(target);
842 int res = ERROR_OK;
843
844 xtensa->smp_break = set;
845 if (target_was_examined(target))
846 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
847 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
848 return res;
849 }
850
851 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
852 {
853 uint8_t dcr_buf[sizeof(uint32_t)];
854
855 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DCRSET, dcr_buf);
856 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
857 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
858 *val = buf_get_u32(dcr_buf, 0, 32);
859
860 return res;
861 }
862
863 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
864 {
865 struct xtensa *xtensa = target_to_xtensa(target);
866 *val = xtensa->smp_break;
867 return ERROR_OK;
868 }
869
870 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
871 {
872 return buf_get_u32(reg->value, 0, 32);
873 }
874
875 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
876 {
877 buf_set_u32(reg->value, 0, 32, value);
878 reg->dirty = true;
879 }
880
881 int xtensa_core_status_check(struct target *target)
882 {
883 struct xtensa *xtensa = target_to_xtensa(target);
884 int res, needclear = 0;
885
886 xtensa_dm_core_status_read(&xtensa->dbg_mod);
887 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
888 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
889 if (dsr & OCDDSR_EXECBUSY) {
890 if (!xtensa->suppress_dsr_errors)
891 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
892 needclear = 1;
893 }
894 if (dsr & OCDDSR_EXECEXCEPTION) {
895 if (!xtensa->suppress_dsr_errors)
896 LOG_TARGET_ERROR(target,
897 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
898 dsr);
899 needclear = 1;
900 }
901 if (dsr & OCDDSR_EXECOVERRUN) {
902 if (!xtensa->suppress_dsr_errors)
903 LOG_TARGET_ERROR(target,
904 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
905 dsr);
906 needclear = 1;
907 }
908 if (needclear) {
909 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
910 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
911 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
912 LOG_TARGET_ERROR(target, "clearing DSR failed!");
913 return ERROR_FAIL;
914 }
915 return ERROR_OK;
916 }
917
918 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
919 {
920 struct xtensa *xtensa = target_to_xtensa(target);
921 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
922 return xtensa_reg_get_value(reg);
923 }
924
925 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
926 {
927 struct xtensa *xtensa = target_to_xtensa(target);
928 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
929 if (xtensa_reg_get_value(reg) == value)
930 return;
931 xtensa_reg_set_value(reg, value);
932 }
933
934 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
935 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
936 {
937 struct xtensa *xtensa = target_to_xtensa(target);
938 uint32_t windowbase = (xtensa->core_config->windowed ?
939 xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE) : 0);
940 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
941 xtensa_reg_set(target, a_idx, value);
942 xtensa_reg_set(target, ar_idx, value);
943 }
944
945 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
946 uint32_t xtensa_cause_get(struct target *target)
947 {
948 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
949 }
950
951 void xtensa_cause_clear(struct target *target)
952 {
953 struct xtensa *xtensa = target_to_xtensa(target);
954 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
955 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
956 }
957
958 int xtensa_assert_reset(struct target *target)
959 {
960 struct xtensa *xtensa = target_to_xtensa(target);
961
962 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
963 xtensa_queue_pwr_reg_write(xtensa,
964 XDMREG_PWRCTL,
965 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
966 PWRCTL_COREWAKEUP(xtensa) | PWRCTL_CORERESET(xtensa));
967 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
968 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
969 if (res != ERROR_OK)
970 return res;
971
972 /* registers are now invalid */
973 xtensa->reset_asserted = true;
974 register_cache_invalidate(xtensa->core_cache);
975 target->state = TARGET_RESET;
976 return ERROR_OK;
977 }
978
979 int xtensa_deassert_reset(struct target *target)
980 {
981 struct xtensa *xtensa = target_to_xtensa(target);
982
983 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
984 if (target->reset_halt)
985 xtensa_queue_dbg_reg_write(xtensa,
986 XDMREG_DCRSET,
987 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
988 xtensa_queue_pwr_reg_write(xtensa,
989 XDMREG_PWRCTL,
990 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
991 PWRCTL_COREWAKEUP(xtensa));
992 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
993 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
994 if (res != ERROR_OK)
995 return res;
996 target->state = TARGET_RUNNING;
997 xtensa->reset_asserted = false;
998 return res;
999 }
1000
1001 int xtensa_soft_reset_halt(struct target *target)
1002 {
1003 LOG_TARGET_DEBUG(target, "begin");
1004 return xtensa_assert_reset(target);
1005 }
1006
1007 int xtensa_fetch_all_regs(struct target *target)
1008 {
1009 struct xtensa *xtensa = target_to_xtensa(target);
1010 struct reg *reg_list = xtensa->core_cache->reg_list;
1011 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1012 xtensa_reg_val_t cpenable = 0, windowbase = 0, a3;
1013 uint32_t woe;
1014 uint8_t a3_buf[4];
1015 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1016
1017 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1018 if (!regvals) {
1019 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1020 return ERROR_FAIL;
1021 }
1022 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1023 if (!dsrs) {
1024 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1025 free(regvals);
1026 return ERROR_FAIL;
1027 }
1028
1029 LOG_TARGET_DEBUG(target, "start");
1030
1031 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1032 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1033 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
1034 int res = xtensa_window_state_save(target, &woe);
1035 if (res != ERROR_OK)
1036 goto xtensa_fetch_all_regs_done;
1037
1038 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1039 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1040 * in one go, then sort everything out from the regvals variable. */
1041
1042 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1043 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1044 /*Grab the 16 registers we can see */
1045 for (unsigned int i = 0; i < 16; i++) {
1046 if (i + j < xtensa->core_config->aregs_num) {
1047 xtensa_queue_exec_ins(xtensa,
1048 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1049 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1050 regvals[XT_REG_IDX_AR0 + i + j].buf);
1051 if (debug_dsrs)
1052 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR,
1053 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1054 }
1055 }
1056 if (xtensa->core_config->windowed)
1057 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1058 * will wraparound, */
1059 /* leaving us in the state we were. */
1060 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
1061 }
1062 xtensa_window_state_restore(target, woe);
1063
1064 if (xtensa->core_config->coproc) {
1065 /* As the very first thing after AREGS, go grab CPENABLE */
1066 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1067 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1068 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1069 }
1070 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1071 if (res != ERROR_OK) {
1072 LOG_ERROR("Failed to read ARs (%d)!", res);
1073 goto xtensa_fetch_all_regs_done;
1074 }
1075 xtensa_core_status_check(target);
1076
1077 a3 = buf_get_u32(a3_buf, 0, 32);
1078
1079 if (xtensa->core_config->coproc) {
1080 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1081
1082 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1083 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
1084 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1085 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1086
1087 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1088 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1089 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1090 }
1091 /* We're now free to use any of A0-A15 as scratch registers
1092 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1093 for (unsigned int i = 0; i < reg_list_size; i++) {
1094 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1095 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1096 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1097 bool reg_fetched = true;
1098 unsigned int reg_num = rlist[ridx].reg_num;
1099 switch (rlist[ridx].type) {
1100 case XT_REG_USER:
1101 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1102 break;
1103 case XT_REG_FR:
1104 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1105 break;
1106 case XT_REG_SPECIAL:
1107 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1108 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1109 reg_num = XT_EPC_REG_NUM_BASE + xtensa->core_config->debug.irq_level;
1110 } else if (reg_num == xtensa_regs[XT_REG_IDX_PS].reg_num) {
1111 /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1112 reg_num = XT_EPS_REG_NUM_BASE + xtensa->core_config->debug.irq_level;
1113 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1114 /* CPENABLE already read/updated; don't re-read */
1115 reg_fetched = false;
1116 break;
1117 }
1118 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1119 break;
1120 default:
1121 reg_fetched = false;
1122 }
1123 if (reg_fetched) {
1124 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1125 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1126 if (debug_dsrs)
1127 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1128 }
1129 }
1130 }
1131 /* Ok, send the whole mess to the CPU. */
1132 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1133 if (res != ERROR_OK) {
1134 LOG_ERROR("Failed to fetch AR regs!");
1135 goto xtensa_fetch_all_regs_done;
1136 }
1137 xtensa_core_status_check(target);
1138
1139 if (debug_dsrs) {
1140 /* DSR checking: follows order in which registers are requested. */
1141 for (unsigned int i = 0; i < reg_list_size; i++) {
1142 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1143 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1144 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1145 (rlist[ridx].type != XT_REG_DEBUG) &&
1146 (rlist[ridx].type != XT_REG_RELGEN) &&
1147 (rlist[ridx].type != XT_REG_TIE) &&
1148 (rlist[ridx].type != XT_REG_OTHER)) {
1149 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1150 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1151 res = ERROR_FAIL;
1152 goto xtensa_fetch_all_regs_done;
1153 }
1154 }
1155 }
1156 }
1157
1158 if (xtensa->core_config->windowed)
1159 /* We need the windowbase to decode the general addresses. */
1160 windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE].buf, 0, 32);
1161 /* Decode the result and update the cache. */
1162 for (unsigned int i = 0; i < reg_list_size; i++) {
1163 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1164 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1165 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1166 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1167 /* The 64-value general register set is read from (windowbase) on down.
1168 * We need to get the real register address by subtracting windowbase and
1169 * wrapping around. */
1170 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1171 windowbase);
1172 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1173 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1174 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1175 if (xtensa_extra_debug_log) {
1176 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1177 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1178 }
1179 } else {
1180 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1181 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1182 if (xtensa_extra_debug_log)
1183 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1184 xtensa_reg_set(target, i, regval);
1185 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1186 }
1187 reg_list[i].valid = true;
1188 } else {
1189 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1190 /* Report read-only registers all-zero but valid */
1191 reg_list[i].valid = true;
1192 xtensa_reg_set(target, i, 0);
1193 } else {
1194 reg_list[i].valid = false;
1195 }
1196 }
1197 }
1198
1199 if (xtensa->core_config->windowed) {
1200 /* We have used A3 as a scratch register.
1201 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1202 */
1203 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1204 xtensa_reg_set(target, ar3_idx, a3);
1205 xtensa_mark_register_dirty(xtensa, ar3_idx);
1206
1207 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1208 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1209 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1210 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1211 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1212 xtensa->scratch_ars[s].intval = false;
1213 }
1214
1215 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1216 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1217 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1218 xtensa->regs_fetched = true;
1219 xtensa_fetch_all_regs_done:
1220 free(regvals);
1221 free(dsrs);
1222 return res;
1223 }
1224
1225 int xtensa_get_gdb_reg_list(struct target *target,
1226 struct reg **reg_list[],
1227 int *reg_list_size,
1228 enum target_register_class reg_class)
1229 {
1230 struct xtensa *xtensa = target_to_xtensa(target);
1231 unsigned int num_regs;
1232
1233 if (reg_class == REG_CLASS_GENERAL) {
1234 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1235 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1236 return ERROR_FAIL;
1237 }
1238 num_regs = xtensa->genpkt_regs_num;
1239 } else {
1240 /* Determine whether to return a contiguous or sparse register map */
1241 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1242 }
1243
1244 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1245
1246 *reg_list = calloc(num_regs, sizeof(struct reg *));
1247 if (!*reg_list)
1248 return ERROR_FAIL;
1249
1250 *reg_list_size = num_regs;
1251 if (xtensa->regmap_contiguous) {
1252 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1253 for (unsigned int i = 0; i < num_regs; i++)
1254 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1255 return ERROR_OK;
1256 }
1257
1258 for (unsigned int i = 0; i < num_regs; i++)
1259 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1260 unsigned int k = 0;
1261 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1262 if (xtensa->core_cache->reg_list[i].exist) {
1263 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1264 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1265 int sparse_idx = rlist[ridx].dbreg_num;
1266 if (i == XT_REG_IDX_PS) {
1267 if (xtensa->eps_dbglevel_idx == 0) {
1268 LOG_ERROR("eps_dbglevel_idx not set\n");
1269 return ERROR_FAIL;
1270 }
1271 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1272 if (xtensa_extra_debug_log)
1273 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1274 sparse_idx, xtensa->core_config->debug.irq_level,
1275 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1276 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1277 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1278 } else {
1279 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1280 }
1281 if (i == XT_REG_IDX_PC)
1282 /* Make a duplicate copy of PC for external access */
1283 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1284 k++;
1285 }
1286 }
1287
1288 if (k == num_regs)
1289 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1290
1291 return ERROR_OK;
1292 }
1293
1294 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1295 {
1296 struct xtensa *xtensa = target_to_xtensa(target);
1297 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1298 xtensa->core_config->mmu.dtlb_entries_count > 0;
1299 return ERROR_OK;
1300 }
1301
1302 int xtensa_halt(struct target *target)
1303 {
1304 struct xtensa *xtensa = target_to_xtensa(target);
1305
1306 LOG_TARGET_DEBUG(target, "start");
1307 if (target->state == TARGET_HALTED) {
1308 LOG_TARGET_DEBUG(target, "target was already halted");
1309 return ERROR_OK;
1310 }
1311 /* First we have to read dsr and check if the target stopped */
1312 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1313 if (res != ERROR_OK) {
1314 LOG_TARGET_ERROR(target, "Failed to read core status!");
1315 return res;
1316 }
1317 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1318 if (!xtensa_is_stopped(target)) {
1319 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1320 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1321 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1322 if (res != ERROR_OK)
1323 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1324 }
1325
1326 return res;
1327 }
1328
1329 int xtensa_prepare_resume(struct target *target,
1330 int current,
1331 target_addr_t address,
1332 int handle_breakpoints,
1333 int debug_execution)
1334 {
1335 struct xtensa *xtensa = target_to_xtensa(target);
1336 uint32_t bpena = 0;
1337
1338 LOG_TARGET_DEBUG(target,
1339 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1340 current,
1341 address,
1342 handle_breakpoints,
1343 debug_execution);
1344
1345 if (target->state != TARGET_HALTED) {
1346 LOG_TARGET_WARNING(target, "target not halted");
1347 return ERROR_TARGET_NOT_HALTED;
1348 }
1349
1350 if (address && !current) {
1351 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1352 } else {
1353 uint32_t cause = xtensa_cause_get(target);
1354 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1355 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1356 if (cause & DEBUGCAUSE_DB)
1357 /* We stopped due to a watchpoint. We can't just resume executing the
1358 * instruction again because */
1359 /* that would trigger the watchpoint again. To fix this, we single-step,
1360 * which ignores watchpoints. */
1361 xtensa_do_step(target, current, address, handle_breakpoints);
1362 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1363 /* We stopped due to a break instruction. We can't just resume executing the
1364 * instruction again because */
1365 /* that would trigger the break again. To fix this, we single-step, which
1366 * ignores break. */
1367 xtensa_do_step(target, current, address, handle_breakpoints);
1368 }
1369
1370 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1371 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1372 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1373 if (xtensa->hw_brps[slot]) {
1374 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1375 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1376 bpena |= BIT(slot);
1377 }
1378 }
1379 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1380
1381 /* Here we write all registers to the targets */
1382 int res = xtensa_write_dirty_registers(target);
1383 if (res != ERROR_OK)
1384 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1385 return res;
1386 }
1387
1388 int xtensa_do_resume(struct target *target)
1389 {
1390 struct xtensa *xtensa = target_to_xtensa(target);
1391
1392 LOG_TARGET_DEBUG(target, "start");
1393
1394 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1395 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1396 if (res != ERROR_OK) {
1397 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1398 return res;
1399 }
1400 xtensa_core_status_check(target);
1401 return ERROR_OK;
1402 }
1403
1404 int xtensa_resume(struct target *target,
1405 int current,
1406 target_addr_t address,
1407 int handle_breakpoints,
1408 int debug_execution)
1409 {
1410 LOG_TARGET_DEBUG(target, "start");
1411 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1412 if (res != ERROR_OK) {
1413 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1414 return res;
1415 }
1416 res = xtensa_do_resume(target);
1417 if (res != ERROR_OK) {
1418 LOG_TARGET_ERROR(target, "Failed to resume!");
1419 return res;
1420 }
1421
1422 target->debug_reason = DBG_REASON_NOTHALTED;
1423 if (!debug_execution)
1424 target->state = TARGET_RUNNING;
1425 else
1426 target->state = TARGET_DEBUG_RUNNING;
1427
1428 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1429
1430 return ERROR_OK;
1431 }
1432
1433 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1434 {
1435 struct xtensa *xtensa = target_to_xtensa(target);
1436 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1437 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1438 if (err != ERROR_OK)
1439 return false;
1440
1441 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1442 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1443 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1444 return true;
1445
1446 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1447 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1448 return true;
1449
1450 return false;
1451 }
1452
1453 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1454 {
1455 struct xtensa *xtensa = target_to_xtensa(target);
1456 int res;
1457 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1458 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1459 xtensa_reg_val_t icountlvl, cause;
1460 xtensa_reg_val_t oldps, oldpc, cur_pc;
1461 bool ps_lowered = false;
1462
1463 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1464 current, address, handle_breakpoints);
1465
1466 if (target->state != TARGET_HALTED) {
1467 LOG_TARGET_WARNING(target, "target not halted");
1468 return ERROR_TARGET_NOT_HALTED;
1469 }
1470
1471 if (xtensa->eps_dbglevel_idx == 0) {
1472 LOG_ERROR("eps_dbglevel_idx not set\n");
1473 return ERROR_FAIL;
1474 }
1475
1476 /* Save old ps (EPS[dbglvl] on LX), pc */
1477 oldps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
1478 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1479
1480 cause = xtensa_cause_get(target);
1481 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1482 oldps,
1483 oldpc,
1484 cause,
1485 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1486 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1487 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1488 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1489 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1490 /* pretend that we have stepped */
1491 if (cause & DEBUGCAUSE_BI)
1492 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1493 else
1494 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1495 return ERROR_OK;
1496 }
1497
1498 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1499 * at which the instructions are to be counted while stepping.
1500 *
1501 * For example, if we need to step by 2 instructions, and an interrupt occurs
1502 * in between, the processor will trigger the interrupt and halt after the 2nd
1503 * instruction within the interrupt vector and/or handler.
1504 *
1505 * However, sometimes we don't want the interrupt handlers to be executed at all
1506 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1507 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1508 * code from being counted during stepping. Note that C exception handlers must
1509 * run at level 0 and hence will be counted and stepped into, should one occur.
1510 *
1511 * TODO: Certain instructions should never be single-stepped and should instead
1512 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1513 * RFI >= DBGLEVEL.
1514 */
1515 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1516 if (!xtensa->core_config->high_irq.enabled) {
1517 LOG_TARGET_WARNING(
1518 target,
1519 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1520 return ERROR_FAIL;
1521 }
1522 /* Update ICOUNTLEVEL accordingly */
1523 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1524 } else {
1525 icountlvl = xtensa->core_config->debug.irq_level;
1526 }
1527
1528 if (cause & DEBUGCAUSE_DB) {
1529 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1530 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1531 * re-enable the watchpoint. */
1532 LOG_TARGET_DEBUG(
1533 target,
1534 "Single-stepping to get past instruction that triggered the watchpoint...");
1535 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1536 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1537 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1538 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1539 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1540 }
1541 }
1542
1543 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1544 /* handle normal SW breakpoint */
1545 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1546 if ((oldps & 0xf) >= icountlvl) {
1547 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1548 ps_lowered = true;
1549 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1550 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1551 LOG_TARGET_DEBUG(target,
1552 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1553 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1554 newps,
1555 oldps);
1556 }
1557 do {
1558 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1559 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1560
1561 /* Now ICOUNT is set, we can resume as if we were going to run */
1562 res = xtensa_prepare_resume(target, current, address, 0, 0);
1563 if (res != ERROR_OK) {
1564 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1565 return res;
1566 }
1567 res = xtensa_do_resume(target);
1568 if (res != ERROR_OK) {
1569 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1570 return res;
1571 }
1572
1573 /* Wait for stepping to complete */
1574 long long start = timeval_ms();
1575 while (timeval_ms() < start + 500) {
1576 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1577 *until stepping is complete. */
1578 usleep(1000);
1579 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1580 if (res != ERROR_OK) {
1581 LOG_TARGET_ERROR(target, "Failed to read core status!");
1582 return res;
1583 }
1584 if (xtensa_is_stopped(target))
1585 break;
1586 usleep(1000);
1587 }
1588 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1589 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1590 if (!xtensa_is_stopped(target)) {
1591 LOG_TARGET_WARNING(
1592 target,
1593 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1594 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1595 target->debug_reason = DBG_REASON_NOTHALTED;
1596 target->state = TARGET_RUNNING;
1597 return ERROR_FAIL;
1598 }
1599
1600 xtensa_fetch_all_regs(target);
1601 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1602
1603 LOG_TARGET_DEBUG(target,
1604 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1605 xtensa_reg_get(target, XT_REG_IDX_PS),
1606 cur_pc,
1607 xtensa_cause_get(target),
1608 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1609
1610 /* Do not step into WindowOverflow if ISRs are masked.
1611 If we stop in WindowOverflow at breakpoint with masked ISRs and
1612 try to do a step it will get us out of that handler */
1613 if (xtensa->core_config->windowed &&
1614 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1615 xtensa_pc_in_winexc(target, cur_pc)) {
1616 /* isrmask = on, need to step out of the window exception handler */
1617 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1618 oldpc = cur_pc;
1619 address = oldpc + 3;
1620 continue;
1621 }
1622
1623 if (oldpc == cur_pc)
1624 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1625 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1626 else
1627 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1628 break;
1629 } while (true);
1630
1631 target->debug_reason = DBG_REASON_SINGLESTEP;
1632 target->state = TARGET_HALTED;
1633 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1634 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1635
1636 if (cause & DEBUGCAUSE_DB) {
1637 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1638 /* Restore the DBREAKCx registers */
1639 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1640 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1641 }
1642
1643 /* Restore int level */
1644 if (ps_lowered) {
1645 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1646 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1647 oldps);
1648 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1649 }
1650
1651 /* write ICOUNTLEVEL back to zero */
1652 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1653 /* TODO: can we skip writing dirty registers and re-fetching them? */
1654 res = xtensa_write_dirty_registers(target);
1655 xtensa_fetch_all_regs(target);
1656 return res;
1657 }
1658
1659 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1660 {
1661 return xtensa_do_step(target, current, address, handle_breakpoints);
1662 }
1663
1664 /**
1665 * Returns true if two ranges are overlapping
1666 */
1667 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1668 target_addr_t r1_end,
1669 target_addr_t r2_start,
1670 target_addr_t r2_end)
1671 {
1672 if ((r2_start >= r1_start) && (r2_start < r1_end))
1673 return true; /* r2_start is in r1 region */
1674 if ((r2_end > r1_start) && (r2_end <= r1_end))
1675 return true; /* r2_end is in r1 region */
1676 return false;
1677 }
1678
1679 /**
1680 * Returns a size of overlapped region of two ranges.
1681 */
1682 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1683 target_addr_t r1_end,
1684 target_addr_t r2_start,
1685 target_addr_t r2_end)
1686 {
1687 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1688 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1689 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1690 return ov_end - ov_start;
1691 }
1692 return 0;
1693 }
1694
1695 /**
1696 * Check if the address gets to memory regions, and its access mode
1697 */
1698 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1699 {
1700 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1701 target_addr_t adr_end = address + size; /* region end */
1702 target_addr_t overlap_size;
1703 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1704
1705 while (adr_pos < adr_end) {
1706 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1707 if (!cm) /* address is not belong to anything */
1708 return false;
1709 if ((cm->access & access) != access) /* access check */
1710 return false;
1711 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1712 assert(overlap_size != 0);
1713 adr_pos += overlap_size;
1714 }
1715 return true;
1716 }
1717
1718 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1719 {
1720 struct xtensa *xtensa = target_to_xtensa(target);
1721 /* We are going to read memory in 32-bit increments. This may not be what the calling
1722 * function expects, so we may need to allocate a temp buffer and read into that first. */
1723 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1724 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1725 target_addr_t adr = addrstart_al;
1726 uint8_t *albuff;
1727 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1728
1729 if (target->state != TARGET_HALTED) {
1730 LOG_TARGET_WARNING(target, "target not halted");
1731 return ERROR_TARGET_NOT_HALTED;
1732 }
1733
1734 if (!xtensa->permissive_mode) {
1735 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1736 XT_MEM_ACCESS_READ)) {
1737 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1738 return ERROR_FAIL;
1739 }
1740 }
1741
1742 unsigned int alloc_bytes = ALIGN_UP(addrend_al - addrstart_al, sizeof(uint32_t));
1743 albuff = calloc(alloc_bytes, 1);
1744 if (!albuff) {
1745 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
1746 addrend_al - addrstart_al);
1747 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1748 }
1749
1750 /* We're going to use A3 here */
1751 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1752 /* Write start address to A3 */
1753 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1754 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1755 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1756 if (xtensa->probe_lsddr32p != 0) {
1757 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1758 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1759 xtensa_queue_dbg_reg_read(xtensa,
1760 (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
1761 &albuff[i]);
1762 } else {
1763 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1764 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1765 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1766 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
1767 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[i]);
1768 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1769 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1770 }
1771 }
1772 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1773 if (res == ERROR_OK) {
1774 bool prev_suppress = xtensa->suppress_dsr_errors;
1775 xtensa->suppress_dsr_errors = true;
1776 res = xtensa_core_status_check(target);
1777 if (xtensa->probe_lsddr32p == -1)
1778 xtensa->probe_lsddr32p = 1;
1779 xtensa->suppress_dsr_errors = prev_suppress;
1780 }
1781 if (res != ERROR_OK) {
1782 if (xtensa->probe_lsddr32p != 0) {
1783 /* Disable fast memory access instructions and retry before reporting an error */
1784 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1785 xtensa->probe_lsddr32p = 0;
1786 res = xtensa_read_memory(target, address, size, count, buffer);
1787 bswap = false;
1788 } else {
1789 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
1790 count * size, address);
1791 }
1792 }
1793
1794 if (bswap)
1795 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
1796 memcpy(buffer, albuff + (address & 3), (size * count));
1797 free(albuff);
1798 return res;
1799 }
1800
1801 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1802 {
1803 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1804 return xtensa_read_memory(target, address, 1, count, buffer);
1805 }
1806
1807 int xtensa_write_memory(struct target *target,
1808 target_addr_t address,
1809 uint32_t size,
1810 uint32_t count,
1811 const uint8_t *buffer)
1812 {
1813 /* This memory write function can get thrown nigh everything into it, from
1814 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1815 * accept anything but aligned uint32 writes, though. That is why we convert
1816 * everything into that. */
1817 struct xtensa *xtensa = target_to_xtensa(target);
1818 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1819 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1820 target_addr_t adr = addrstart_al;
1821 int res;
1822 uint8_t *albuff;
1823 bool fill_head_tail = false;
1824
1825 if (target->state != TARGET_HALTED) {
1826 LOG_TARGET_WARNING(target, "target not halted");
1827 return ERROR_TARGET_NOT_HALTED;
1828 }
1829
1830 if (!xtensa->permissive_mode) {
1831 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
1832 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1833 return ERROR_FAIL;
1834 }
1835 }
1836
1837 if (size == 0 || count == 0 || !buffer)
1838 return ERROR_COMMAND_SYNTAX_ERROR;
1839
1840 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1841 if (addrstart_al == address && addrend_al == address + (size * count)) {
1842 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1843 /* Need a buffer for byte-swapping */
1844 albuff = malloc(addrend_al - addrstart_al);
1845 else
1846 /* We discard the const here because albuff can also be non-const */
1847 albuff = (uint8_t *)buffer;
1848 } else {
1849 fill_head_tail = true;
1850 albuff = malloc(addrend_al - addrstart_al);
1851 }
1852 if (!albuff) {
1853 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
1854 addrend_al - addrstart_al);
1855 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1856 }
1857
1858 /* We're going to use A3 here */
1859 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1860
1861 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1862 if (fill_head_tail) {
1863 /* See if we need to read the first and/or last word. */
1864 if (address & 3) {
1865 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1866 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1867 if (xtensa->probe_lsddr32p == 1) {
1868 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1869 } else {
1870 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1871 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1872 }
1873 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[0]);
1874 }
1875 if ((address + (size * count)) & 3) {
1876 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
1877 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1878 if (xtensa->probe_lsddr32p == 1) {
1879 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1880 } else {
1881 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1882 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1883 }
1884 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1885 &albuff[addrend_al - addrstart_al - 4]);
1886 }
1887 /* Grab bytes */
1888 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1889 if (res != ERROR_OK) {
1890 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1891 if (albuff != buffer)
1892 free(albuff);
1893 return res;
1894 }
1895 xtensa_core_status_check(target);
1896 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
1897 bool swapped_w0 = false;
1898 if (address & 3) {
1899 buf_bswap32(&albuff[0], &albuff[0], 4);
1900 swapped_w0 = true;
1901 }
1902 if ((address + (size * count)) & 3) {
1903 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
1904 /* Don't double-swap if buffer start/end are within the same word */
1905 } else {
1906 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
1907 &albuff[addrend_al - addrstart_al - 4], 4);
1908 }
1909 }
1910 }
1911 /* Copy data to be written into the aligned buffer (in host-endianness) */
1912 memcpy(&albuff[address & 3], buffer, size * count);
1913 /* Now we can write albuff in aligned uint32s. */
1914 }
1915
1916 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1917 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
1918
1919 /* Write start address to A3 */
1920 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1921 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1922 /* Write the aligned buffer */
1923 if (xtensa->probe_lsddr32p != 0) {
1924 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1925 if (i == 0) {
1926 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
1927 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
1928 } else {
1929 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
1930 }
1931 }
1932 } else {
1933 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1934 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1935 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
1936 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
1937 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1938 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1939 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1940 }
1941 }
1942
1943 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1944 if (res == ERROR_OK) {
1945 bool prev_suppress = xtensa->suppress_dsr_errors;
1946 xtensa->suppress_dsr_errors = true;
1947 res = xtensa_core_status_check(target);
1948 if (xtensa->probe_lsddr32p == -1)
1949 xtensa->probe_lsddr32p = 1;
1950 xtensa->suppress_dsr_errors = prev_suppress;
1951 }
1952 if (res != ERROR_OK) {
1953 if (xtensa->probe_lsddr32p != 0) {
1954 /* Disable fast memory access instructions and retry before reporting an error */
1955 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1956 xtensa->probe_lsddr32p = 0;
1957 res = xtensa_write_memory(target, address, size, count, buffer);
1958 } else {
1959 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
1960 count * size, address);
1961 }
1962 } else {
1963 /* Invalidate ICACHE, writeback DCACHE if present */
1964 uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
1965 uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
1966 if (issue_ihi || issue_dhwb) {
1967 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
1968 uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
1969 uint32_t linesize = MIN(ilinesize, dlinesize);
1970 uint32_t off = 0;
1971 adr = addrstart_al;
1972
1973 while ((adr + off) < addrend_al) {
1974 if (off == 0) {
1975 /* Write start address to A3 */
1976 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr);
1977 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1978 }
1979 if (issue_ihi)
1980 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
1981 if (issue_dhwb)
1982 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
1983 off += linesize;
1984 if (off > 1020) {
1985 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
1986 adr += off;
1987 off = 0;
1988 }
1989 }
1990
1991 /* Execute cache WB/INV instructions */
1992 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1993 xtensa_core_status_check(target);
1994 if (res != ERROR_OK)
1995 LOG_TARGET_ERROR(target,
1996 "Error issuing cache writeback/invaldate instruction(s): %d",
1997 res);
1998 }
1999 }
2000 if (albuff != buffer)
2001 free(albuff);
2002
2003 return res;
2004 }
2005
2006 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2007 {
2008 /* xtensa_write_memory can handle everything. Just pass on to that. */
2009 return xtensa_write_memory(target, address, 1, count, buffer);
2010 }
2011
2012 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2013 {
2014 LOG_WARNING("not implemented yet");
2015 return ERROR_FAIL;
2016 }
2017
2018 int xtensa_poll(struct target *target)
2019 {
2020 struct xtensa *xtensa = target_to_xtensa(target);
2021 if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2022 target->state = TARGET_UNKNOWN;
2023 return ERROR_TARGET_NOT_EXAMINED;
2024 }
2025
2026 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET(xtensa) |
2027 PWRSTAT_COREWASRESET(xtensa));
2028 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2029 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2030 xtensa->dbg_mod.power_status.stat,
2031 PWRSTAT_DEBUGWASRESET(xtensa) | PWRSTAT_COREWASRESET(xtensa),
2032 xtensa->dbg_mod.power_status.stath);
2033 if (res != ERROR_OK)
2034 return res;
2035
2036 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2037 LOG_TARGET_INFO(target, "Debug controller was reset.");
2038 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2039 if (res != ERROR_OK)
2040 return res;
2041 }
2042 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2043 LOG_TARGET_INFO(target, "Core was reset.");
2044 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2045 /* Enable JTAG, set reset if needed */
2046 res = xtensa_wakeup(target);
2047 if (res != ERROR_OK)
2048 return res;
2049
2050 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2051 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2052 if (res != ERROR_OK)
2053 return res;
2054 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2055 LOG_TARGET_DEBUG(target,
2056 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2057 prev_dsr,
2058 xtensa->dbg_mod.core_status.dsr);
2059 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET(xtensa)) {
2060 /* if RESET state is persitent */
2061 target->state = TARGET_RESET;
2062 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2063 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2064 xtensa->dbg_mod.core_status.dsr,
2065 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2066 target->state = TARGET_UNKNOWN;
2067 if (xtensa->come_online_probes_num == 0)
2068 target->examined = false;
2069 else
2070 xtensa->come_online_probes_num--;
2071 } else if (xtensa_is_stopped(target)) {
2072 if (target->state != TARGET_HALTED) {
2073 enum target_state oldstate = target->state;
2074 target->state = TARGET_HALTED;
2075 /* Examine why the target has been halted */
2076 target->debug_reason = DBG_REASON_DBGRQ;
2077 xtensa_fetch_all_regs(target);
2078 /* When setting debug reason DEBUGCAUSE events have the following
2079 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2080 /* Watchpoint and breakpoint events at the same time results in special
2081 * debug reason: DBG_REASON_WPTANDBKPT. */
2082 uint32_t halt_cause = xtensa_cause_get(target);
2083 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2084 if (halt_cause & DEBUGCAUSE_IC)
2085 target->debug_reason = DBG_REASON_SINGLESTEP;
2086 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2087 if (halt_cause & DEBUGCAUSE_DB)
2088 target->debug_reason = DBG_REASON_WPTANDBKPT;
2089 else
2090 target->debug_reason = DBG_REASON_BREAKPOINT;
2091 } else if (halt_cause & DEBUGCAUSE_DB) {
2092 target->debug_reason = DBG_REASON_WATCHPOINT;
2093 }
2094 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2095 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2096 xtensa_reg_get(target, XT_REG_IDX_PC),
2097 target->debug_reason,
2098 oldstate);
2099 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2100 halt_cause,
2101 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2102 xtensa->dbg_mod.core_status.dsr);
2103 xtensa_dm_core_status_clear(
2104 &xtensa->dbg_mod,
2105 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2106 OCDDSR_DEBUGINTTRAX |
2107 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2108 }
2109 } else {
2110 target->debug_reason = DBG_REASON_NOTHALTED;
2111 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2112 target->state = TARGET_RUNNING;
2113 target->debug_reason = DBG_REASON_NOTHALTED;
2114 }
2115 }
2116 if (xtensa->trace_active) {
2117 /* Detect if tracing was active but has stopped. */
2118 struct xtensa_trace_status trace_status;
2119 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2120 if (res == ERROR_OK) {
2121 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2122 LOG_INFO("Detected end of trace.");
2123 if (trace_status.stat & TRAXSTAT_PCMTG)
2124 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2125 if (trace_status.stat & TRAXSTAT_PTITG)
2126 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2127 if (trace_status.stat & TRAXSTAT_CTITG)
2128 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2129 xtensa->trace_active = false;
2130 }
2131 }
2132 }
2133 return ERROR_OK;
2134 }
2135
2136 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2137 {
2138 struct xtensa *xtensa = target_to_xtensa(target);
2139 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2140 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2141 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2142 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2143 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2144 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2145 int ret;
2146
2147 if (size > icache_line_size)
2148 return ERROR_FAIL;
2149
2150 if (issue_ihi || issue_dhwbi) {
2151 /* We're going to use A3 here */
2152 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2153
2154 /* Write start address to A3 and invalidate */
2155 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2156 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2157 LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2158 if (issue_dhwbi) {
2159 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2160 if (!same_dc_line) {
2161 LOG_TARGET_DEBUG(target,
2162 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2163 address + 4);
2164 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2165 }
2166 }
2167 if (issue_ihi) {
2168 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2169 if (!same_ic_line) {
2170 LOG_TARGET_DEBUG(target,
2171 "IHI second icache line for address "TARGET_ADDR_FMT,
2172 address + 4);
2173 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2174 }
2175 }
2176
2177 /* Execute invalidate instructions */
2178 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2179 xtensa_core_status_check(target);
2180 if (ret != ERROR_OK) {
2181 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2182 return ret;
2183 }
2184 }
2185
2186 /* Write new instructions to memory */
2187 ret = target_write_buffer(target, address, size, buffer);
2188 if (ret != ERROR_OK) {
2189 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2190 return ret;
2191 }
2192
2193 if (issue_dhwbi) {
2194 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2195 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2196 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2197 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2198 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2199 if (!same_dc_line) {
2200 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2201 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2202 }
2203
2204 /* Execute invalidate instructions */
2205 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2206 xtensa_core_status_check(target);
2207 }
2208
2209 /* TODO: Handle L2 cache if present */
2210 return ret;
2211 }
2212
2213 static int xtensa_sw_breakpoint_add(struct target *target,
2214 struct breakpoint *breakpoint,
2215 struct xtensa_sw_breakpoint *sw_bp)
2216 {
2217 struct xtensa *xtensa = target_to_xtensa(target);
2218 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2219 if (ret != ERROR_OK) {
2220 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2221 return ret;
2222 }
2223
2224 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2225 sw_bp->oocd_bp = breakpoint;
2226
2227 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2228
2229 /* Underlying memory write will convert instruction endianness, don't do that here */
2230 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2231 if (ret != ERROR_OK) {
2232 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2233 return ret;
2234 }
2235
2236 return ERROR_OK;
2237 }
2238
2239 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2240 {
2241 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2242 if (ret != ERROR_OK) {
2243 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2244 return ret;
2245 }
2246 sw_bp->oocd_bp = NULL;
2247 return ERROR_OK;
2248 }
2249
2250 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2251 {
2252 struct xtensa *xtensa = target_to_xtensa(target);
2253 unsigned int slot;
2254
2255 if (breakpoint->type == BKPT_SOFT) {
2256 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2257 if (!xtensa->sw_brps[slot].oocd_bp ||
2258 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2259 break;
2260 }
2261 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2262 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2263 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2264 }
2265 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2266 if (ret != ERROR_OK) {
2267 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2268 return ret;
2269 }
2270 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2271 slot,
2272 breakpoint->address);
2273 return ERROR_OK;
2274 }
2275
2276 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2277 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2278 break;
2279 }
2280 if (slot == xtensa->core_config->debug.ibreaks_num) {
2281 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2282 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2283 }
2284
2285 xtensa->hw_brps[slot] = breakpoint;
2286 /* We will actually write the breakpoints when we resume the target. */
2287 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2288 slot,
2289 breakpoint->address);
2290
2291 return ERROR_OK;
2292 }
2293
2294 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2295 {
2296 struct xtensa *xtensa = target_to_xtensa(target);
2297 unsigned int slot;
2298
2299 if (breakpoint->type == BKPT_SOFT) {
2300 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2301 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2302 break;
2303 }
2304 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2305 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2306 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2307 }
2308 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2309 if (ret != ERROR_OK) {
2310 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2311 return ret;
2312 }
2313 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2314 return ERROR_OK;
2315 }
2316
2317 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2318 if (xtensa->hw_brps[slot] == breakpoint)
2319 break;
2320 }
2321 if (slot == xtensa->core_config->debug.ibreaks_num) {
2322 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2323 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2324 }
2325 xtensa->hw_brps[slot] = NULL;
2326 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2327 return ERROR_OK;
2328 }
2329
2330 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2331 {
2332 struct xtensa *xtensa = target_to_xtensa(target);
2333 unsigned int slot;
2334 xtensa_reg_val_t dbreakcval;
2335
2336 if (target->state != TARGET_HALTED) {
2337 LOG_TARGET_WARNING(target, "target not halted");
2338 return ERROR_TARGET_NOT_HALTED;
2339 }
2340
2341 if (watchpoint->mask != ~(uint32_t)0) {
2342 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2343 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2344 }
2345
2346 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2347 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2348 break;
2349 }
2350 if (slot == xtensa->core_config->debug.dbreaks_num) {
2351 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2352 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2353 }
2354
2355 /* Figure out value for dbreakc5..0
2356 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2357 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2358 !IS_PWR_OF_2(watchpoint->length) ||
2359 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2360 LOG_TARGET_WARNING(
2361 target,
2362 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2363 " not supported by hardware.",
2364 watchpoint->length,
2365 watchpoint->address);
2366 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2367 }
2368 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2369
2370 if (watchpoint->rw == WPT_READ)
2371 dbreakcval |= BIT(30);
2372 if (watchpoint->rw == WPT_WRITE)
2373 dbreakcval |= BIT(31);
2374 if (watchpoint->rw == WPT_ACCESS)
2375 dbreakcval |= BIT(30) | BIT(31);
2376
2377 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2378 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2379 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2380 xtensa->hw_wps[slot] = watchpoint;
2381 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2382 watchpoint->address);
2383 return ERROR_OK;
2384 }
2385
2386 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2387 {
2388 struct xtensa *xtensa = target_to_xtensa(target);
2389 unsigned int slot;
2390
2391 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2392 if (xtensa->hw_wps[slot] == watchpoint)
2393 break;
2394 }
2395 if (slot == xtensa->core_config->debug.dbreaks_num) {
2396 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2397 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2398 }
2399 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2400 xtensa->hw_wps[slot] = NULL;
2401 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2402 watchpoint->address);
2403 return ERROR_OK;
2404 }
2405
2406 static int xtensa_build_reg_cache(struct target *target)
2407 {
2408 struct xtensa *xtensa = target_to_xtensa(target);
2409 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2410 unsigned int last_dbreg_num = 0;
2411
2412 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2413 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2414 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2415
2416 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2417
2418 if (!reg_cache) {
2419 LOG_ERROR("Failed to alloc reg cache!");
2420 return ERROR_FAIL;
2421 }
2422 reg_cache->name = "Xtensa registers";
2423 reg_cache->next = NULL;
2424 /* Init reglist */
2425 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2426 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2427 if (!reg_list) {
2428 LOG_ERROR("Failed to alloc reg list!");
2429 goto fail;
2430 }
2431 xtensa->dbregs_num = 0;
2432 unsigned int didx = 0;
2433 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2434 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2435 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2436 for (unsigned int i = 0; i < listsize; i++, didx++) {
2437 reg_list[didx].exist = rlist[i].exist;
2438 reg_list[didx].name = rlist[i].name;
2439 reg_list[didx].size = 32;
2440 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2441 if (!reg_list[didx].value) {
2442 LOG_ERROR("Failed to alloc reg list value!");
2443 goto fail;
2444 }
2445 reg_list[didx].dirty = false;
2446 reg_list[didx].valid = false;
2447 reg_list[didx].type = &xtensa_reg_type;
2448 reg_list[didx].arch_info = xtensa;
2449 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2450 last_dbreg_num = rlist[i].dbreg_num;
2451
2452 if (xtensa_extra_debug_log) {
2453 LOG_TARGET_DEBUG(target,
2454 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2455 reg_list[didx].name,
2456 whichlist,
2457 reg_list[didx].exist,
2458 didx,
2459 rlist[i].type,
2460 rlist[i].dbreg_num);
2461 }
2462 }
2463 }
2464
2465 xtensa->dbregs_num = last_dbreg_num + 1;
2466 reg_cache->reg_list = reg_list;
2467 reg_cache->num_regs = reg_list_size;
2468
2469 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2470 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2471
2472 /* Construct empty-register list for handling unknown register requests */
2473 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2474 if (!xtensa->empty_regs) {
2475 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2476 goto fail;
2477 }
2478 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2479 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2480 if (!xtensa->empty_regs[i].name) {
2481 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2482 goto fail;
2483 }
2484 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
2485 xtensa->empty_regs[i].size = 32;
2486 xtensa->empty_regs[i].type = &xtensa_reg_type;
2487 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2488 if (!xtensa->empty_regs[i].value) {
2489 LOG_ERROR("Failed to alloc empty reg list value!");
2490 goto fail;
2491 }
2492 xtensa->empty_regs[i].arch_info = xtensa;
2493 }
2494
2495 /* Construct contiguous register list from contiguous descriptor list */
2496 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
2497 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2498 if (!xtensa->contiguous_regs_list) {
2499 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2500 goto fail;
2501 }
2502 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2503 unsigned int j;
2504 for (j = 0; j < reg_cache->num_regs; j++) {
2505 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2506 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
2507 LOG_TARGET_DEBUG(target,
2508 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2509 xtensa->contiguous_regs_list[i]->name,
2510 xtensa->contiguous_regs_desc[i]->dbreg_num);
2511 break;
2512 }
2513 }
2514 if (j == reg_cache->num_regs)
2515 LOG_TARGET_WARNING(target, "contiguous register %s not found",
2516 xtensa->contiguous_regs_desc[i]->name);
2517 }
2518 }
2519
2520 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2521 if (!xtensa->algo_context_backup) {
2522 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2523 goto fail;
2524 }
2525 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2526 struct reg *reg = &reg_cache->reg_list[i];
2527 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2528 if (!xtensa->algo_context_backup[i]) {
2529 LOG_ERROR("Failed to alloc mem for algorithm context!");
2530 goto fail;
2531 }
2532 }
2533 xtensa->core_cache = reg_cache;
2534 if (cache_p)
2535 *cache_p = reg_cache;
2536 return ERROR_OK;
2537
2538 fail:
2539 if (reg_list) {
2540 for (unsigned int i = 0; i < reg_list_size; i++)
2541 free(reg_list[i].value);
2542 free(reg_list);
2543 }
2544 if (xtensa->empty_regs) {
2545 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2546 free((void *)xtensa->empty_regs[i].name);
2547 free(xtensa->empty_regs[i].value);
2548 }
2549 free(xtensa->empty_regs);
2550 }
2551 if (xtensa->algo_context_backup) {
2552 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2553 free(xtensa->algo_context_backup[i]);
2554 free(xtensa->algo_context_backup);
2555 }
2556 free(reg_cache);
2557
2558 return ERROR_FAIL;
2559 }
2560
2561 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
2562 {
2563 struct xtensa *xtensa = target_to_xtensa(target);
2564 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
2565 /* Process op[] list */
2566 while (opstr && (*opstr == ':')) {
2567 uint8_t ops[32];
2568 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
2569 if (oplen > 32) {
2570 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
2571 break;
2572 }
2573 unsigned int i = 0;
2574 while ((i < oplen) && opstr && (*opstr == ':'))
2575 ops[i++] = strtoul(opstr + 1, &opstr, 16);
2576 if (i != oplen) {
2577 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
2578 break;
2579 }
2580
2581 char insn_buf[128];
2582 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
2583 for (i = 0; i < oplen; i++)
2584 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
2585 LOG_TARGET_DEBUG(target, "%s", insn_buf);
2586 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
2587 status = ERROR_OK;
2588 }
2589 return status;
2590 }
2591
2592 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
2593 {
2594 struct xtensa *xtensa = target_to_xtensa(target);
2595 bool iswrite = (packet[0] == 'Q');
2596 enum xtensa_qerr_e error;
2597
2598 /* Read/write TIE register. Requires spill location.
2599 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2600 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2601 */
2602 if (!(xtensa->spill_buf)) {
2603 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2604 error = XT_QERR_FAIL;
2605 goto xtensa_gdbqc_qxtreg_fail;
2606 }
2607
2608 char *delim;
2609 uint32_t regnum = strtoul(packet + 6, &delim, 16);
2610 if (*delim != ':') {
2611 LOG_ERROR("Malformed qxtreg packet");
2612 error = XT_QERR_INVAL;
2613 goto xtensa_gdbqc_qxtreg_fail;
2614 }
2615 uint32_t reglen = strtoul(delim + 1, &delim, 16);
2616 if (*delim != ':') {
2617 LOG_ERROR("Malformed qxtreg packet");
2618 error = XT_QERR_INVAL;
2619 goto xtensa_gdbqc_qxtreg_fail;
2620 }
2621 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
2622 memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
2623 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
2624 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
2625 LOG_ERROR("TIE register too large");
2626 error = XT_QERR_MEM;
2627 goto xtensa_gdbqc_qxtreg_fail;
2628 }
2629
2630 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2631 * (2) read old a4, (3) write spill address to a4.
2632 * NOTE: ensure a4 is restored properly by all error handling logic
2633 */
2634 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
2635 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
2636 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2637 if (status != ERROR_OK) {
2638 LOG_ERROR("Spill memory save");
2639 error = XT_QERR_MEM;
2640 goto xtensa_gdbqc_qxtreg_fail;
2641 }
2642 if (iswrite) {
2643 /* Extract value and store in spill memory */
2644 unsigned int b = 0;
2645 char *valbuf = strchr(delim, '=');
2646 if (!(valbuf && (*valbuf == '='))) {
2647 LOG_ERROR("Malformed Qxtreg packet");
2648 error = XT_QERR_INVAL;
2649 goto xtensa_gdbqc_qxtreg_fail;
2650 }
2651 valbuf++;
2652 while (*valbuf && *(valbuf + 1)) {
2653 char bytestr[3] = { 0, 0, 0 };
2654 strncpy(bytestr, valbuf, 2);
2655 regbuf[b++] = strtoul(bytestr, NULL, 16);
2656 valbuf += 2;
2657 }
2658 if (b != reglen) {
2659 LOG_ERROR("Malformed Qxtreg packet");
2660 error = XT_QERR_INVAL;
2661 goto xtensa_gdbqc_qxtreg_fail;
2662 }
2663 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2664 reglen / memop_size, regbuf);
2665 if (status != ERROR_OK) {
2666 LOG_ERROR("TIE value store");
2667 error = XT_QERR_MEM;
2668 goto xtensa_gdbqc_qxtreg_fail;
2669 }
2670 }
2671 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
2672 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, xtensa->spill_loc);
2673 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2674
2675 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
2676
2677 /* Restore a4 but not yet spill memory. Execute it all... */
2678 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, orig_a4);
2679 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2680 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2681 if (status != ERROR_OK) {
2682 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
2683 tieop_status = status;
2684 }
2685 status = xtensa_core_status_check(target);
2686 if (status != ERROR_OK) {
2687 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
2688 tieop_status = status;
2689 }
2690
2691 if (tieop_status == ERROR_OK) {
2692 if (iswrite) {
2693 /* TIE write succeeded; send OK */
2694 strcpy(*response_p, "OK");
2695 } else {
2696 /* TIE read succeeded; copy result from spill memory */
2697 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
2698 if (status != ERROR_OK) {
2699 LOG_TARGET_ERROR(target, "TIE result read");
2700 tieop_status = status;
2701 }
2702 unsigned int i;
2703 for (i = 0; i < reglen; i++)
2704 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
2705 *(*response_p + 2 * i) = '\0';
2706 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
2707 }
2708 }
2709
2710 /* Restore spill memory first, then report any previous errors */
2711 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2712 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2713 if (status != ERROR_OK) {
2714 LOG_ERROR("Spill memory restore");
2715 error = XT_QERR_MEM;
2716 goto xtensa_gdbqc_qxtreg_fail;
2717 }
2718 if (tieop_status != ERROR_OK) {
2719 LOG_ERROR("TIE execution");
2720 error = XT_QERR_FAIL;
2721 goto xtensa_gdbqc_qxtreg_fail;
2722 }
2723 return ERROR_OK;
2724
2725 xtensa_gdbqc_qxtreg_fail:
2726 strcpy(*response_p, xt_qerr[error].chrval);
2727 return xt_qerr[error].intval;
2728 }
2729
2730 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
2731 {
2732 struct xtensa *xtensa = target_to_xtensa(target);
2733 enum xtensa_qerr_e error;
2734 if (!packet || !response_p) {
2735 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
2736 return ERROR_FAIL;
2737 }
2738
2739 *response_p = xtensa->qpkt_resp;
2740 if (strncmp(packet, "qxtn", 4) == 0) {
2741 strcpy(*response_p, "OpenOCD");
2742 return ERROR_OK;
2743 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
2744 return ERROR_OK;
2745 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
2746 /* Confirm host cache params match core .cfg file */
2747 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
2748 &xtensa->core_config->icache : &xtensa->core_config->dcache;
2749 unsigned int line_size = 0, size = 0, way_count = 0;
2750 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
2751 if ((cachep->line_size != line_size) ||
2752 (cachep->size != size) ||
2753 (cachep->way_count != way_count)) {
2754 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2755 cachep == &xtensa->core_config->icache ? 'I' : 'D');
2756 }
2757 strcpy(*response_p, "OK");
2758 return ERROR_OK;
2759 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
2760 /* Confirm host IRAM/IROM params match core .cfg file */
2761 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
2762 &xtensa->core_config->iram : &xtensa->core_config->irom;
2763 unsigned int base = 0, size = 0, i;
2764 char *pkt = (char *)&packet[7];
2765 do {
2766 pkt++;
2767 size = strtoul(pkt, &pkt, 16);
2768 pkt++;
2769 base = strtoul(pkt, &pkt, 16);
2770 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
2771 for (i = 0; i < memp->count; i++) {
2772 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
2773 break;
2774 }
2775 if (i == memp->count) {
2776 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
2777 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
2778 break;
2779 }
2780 for (i = 0; i < 11; i++) {
2781 pkt++;
2782 strtoul(pkt, &pkt, 16);
2783 }
2784 } while (pkt && (pkt[0] == ','));
2785 strcpy(*response_p, "OK");
2786 return ERROR_OK;
2787 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
2788 /* Confirm host EXCM_LEVEL matches core .cfg file */
2789 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
2790 if (!xtensa->core_config->high_irq.enabled ||
2791 (excm_level != xtensa->core_config->high_irq.excm_level))
2792 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
2793 strcpy(*response_p, "OK");
2794 return ERROR_OK;
2795 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
2796 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
2797 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
2798 strcpy(*response_p, "OK");
2799 return ERROR_OK;
2800 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
2801 char *delim;
2802 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
2803 if (*delim != ':') {
2804 LOG_ERROR("Malformed Qxtspill packet");
2805 error = XT_QERR_INVAL;
2806 goto xtensa_gdb_query_custom_fail;
2807 }
2808 xtensa->spill_loc = spill_loc;
2809 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
2810 if (xtensa->spill_buf)
2811 free(xtensa->spill_buf);
2812 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
2813 if (!xtensa->spill_buf) {
2814 LOG_ERROR("Spill buf alloc");
2815 error = XT_QERR_MEM;
2816 goto xtensa_gdb_query_custom_fail;
2817 }
2818 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
2819 strcpy(*response_p, "OK");
2820 return ERROR_OK;
2821 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
2822 return xtensa_gdbqc_qxtreg(target, packet, response_p);
2823 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
2824 (strncmp(packet, "qxtftie", 7) == 0) ||
2825 (strncmp(packet, "qxtstie", 7) == 0)) {
2826 /* Return empty string to indicate trace, TIE wire debug are unsupported */
2827 strcpy(*response_p, "");
2828 return ERROR_OK;
2829 }
2830
2831 /* Warn for all other queries, but do not return errors */
2832 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
2833 strcpy(*response_p, "");
2834 return ERROR_OK;
2835
2836 xtensa_gdb_query_custom_fail:
2837 strcpy(*response_p, xt_qerr[error].chrval);
2838 return xt_qerr[error].intval;
2839 }
2840
2841 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
2842 const struct xtensa_debug_module_config *dm_cfg)
2843 {
2844 target->arch_info = xtensa;
2845 xtensa->common_magic = XTENSA_COMMON_MAGIC;
2846 xtensa->target = target;
2847 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
2848
2849 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
2850 if (!xtensa->core_config) {
2851 LOG_ERROR("Xtensa configuration alloc failed\n");
2852 return ERROR_FAIL;
2853 }
2854
2855 /* Default cache settings are disabled with 1 way */
2856 xtensa->core_config->icache.way_count = 1;
2857 xtensa->core_config->dcache.way_count = 1;
2858
2859 /* chrval: AR3/AR4 register names will change with window mapping.
2860 * intval: tracks whether scratch register was set through gdb P packet.
2861 */
2862 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
2863 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
2864 if (!xtensa->scratch_ars[s].chrval) {
2865 for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
2866 free(xtensa->scratch_ars[f].chrval);
2867 free(xtensa->core_config);
2868 LOG_ERROR("Xtensa scratch AR alloc failed\n");
2869 return ERROR_FAIL;
2870 }
2871 xtensa->scratch_ars[s].intval = false;
2872 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
2873 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
2874 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
2875 }
2876
2877 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2878 }
2879
2880 void xtensa_set_permissive_mode(struct target *target, bool state)
2881 {
2882 target_to_xtensa(target)->permissive_mode = state;
2883 }
2884
2885 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2886 {
2887 struct xtensa *xtensa = target_to_xtensa(target);
2888
2889 xtensa->come_online_probes_num = 3;
2890 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
2891 if (!xtensa->hw_brps) {
2892 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2893 return ERROR_FAIL;
2894 }
2895 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
2896 if (!xtensa->hw_wps) {
2897 free(xtensa->hw_brps);
2898 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2899 return ERROR_FAIL;
2900 }
2901 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2902 if (!xtensa->sw_brps) {
2903 free(xtensa->hw_brps);
2904 free(xtensa->hw_wps);
2905 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2906 return ERROR_FAIL;
2907 }
2908
2909 xtensa->spill_loc = 0xffffffff;
2910 xtensa->spill_bytes = 0;
2911 xtensa->spill_buf = NULL;
2912 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
2913
2914 return xtensa_build_reg_cache(target);
2915 }
2916
2917 static void xtensa_free_reg_cache(struct target *target)
2918 {
2919 struct xtensa *xtensa = target_to_xtensa(target);
2920 struct reg_cache *cache = xtensa->core_cache;
2921
2922 if (cache) {
2923 register_unlink_cache(&target->reg_cache, cache);
2924 for (unsigned int i = 0; i < cache->num_regs; i++) {
2925 free(xtensa->algo_context_backup[i]);
2926 free(cache->reg_list[i].value);
2927 }
2928 free(xtensa->algo_context_backup);
2929 free(cache->reg_list);
2930 free(cache);
2931 }
2932 xtensa->core_cache = NULL;
2933 xtensa->algo_context_backup = NULL;
2934
2935 if (xtensa->empty_regs) {
2936 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2937 free((void *)xtensa->empty_regs[i].name);
2938 free(xtensa->empty_regs[i].value);
2939 }
2940 free(xtensa->empty_regs);
2941 }
2942 xtensa->empty_regs = NULL;
2943 if (xtensa->optregs) {
2944 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
2945 free((void *)xtensa->optregs[i].name);
2946 free(xtensa->optregs);
2947 }
2948 xtensa->optregs = NULL;
2949 }
2950
2951 void xtensa_target_deinit(struct target *target)
2952 {
2953 struct xtensa *xtensa = target_to_xtensa(target);
2954
2955 LOG_DEBUG("start");
2956
2957 if (target_was_examined(target)) {
2958 int ret = xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, OCDDCR_ENABLEOCD);
2959 if (ret != ERROR_OK) {
2960 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2961 return;
2962 }
2963 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
2964 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2965 if (ret != ERROR_OK) {
2966 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2967 return;
2968 }
2969 xtensa_dm_deinit(&xtensa->dbg_mod);
2970 }
2971 xtensa_free_reg_cache(target);
2972 free(xtensa->hw_brps);
2973 free(xtensa->hw_wps);
2974 free(xtensa->sw_brps);
2975 if (xtensa->spill_buf) {
2976 free(xtensa->spill_buf);
2977 xtensa->spill_buf = NULL;
2978 }
2979 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
2980 free(xtensa->scratch_ars[s].chrval);
2981 free(xtensa->core_config);
2982 }
2983
2984 const char *xtensa_get_gdb_arch(struct target *target)
2985 {
2986 return "xtensa";
2987 }
2988
2989 /* exe <ascii-encoded hexadecimal instruction bytes> */
2990 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
2991 {
2992 struct xtensa *xtensa = target_to_xtensa(target);
2993
2994 if (CMD_ARGC != 1)
2995 return ERROR_COMMAND_SYNTAX_ERROR;
2996
2997 /* Process ascii-encoded hex byte string */
2998 const char *parm = CMD_ARGV[0];
2999 unsigned int parm_len = strlen(parm);
3000 if ((parm_len >= 64) || (parm_len & 1)) {
3001 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3002 return ERROR_FAIL;
3003 }
3004
3005 uint8_t ops[32];
3006 memset(ops, 0, 32);
3007 unsigned int oplen = parm_len / 2;
3008 char encoded_byte[3] = { 0, 0, 0 };
3009 for (unsigned int i = 0; i < oplen; i++) {
3010 encoded_byte[0] = *parm++;
3011 encoded_byte[1] = *parm++;
3012 ops[i] = strtoul(encoded_byte, NULL, 16);
3013 }
3014
3015 /* GDB must handle state save/restore.
3016 * Flush reg cache in case spill location is in an AR
3017 * Update CPENABLE only for this execution; later restore cached copy
3018 * Keep a copy of exccause in case executed code triggers an exception
3019 */
3020 int status = xtensa_write_dirty_registers(target);
3021 if (status != ERROR_OK) {
3022 LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3023 return ERROR_FAIL;
3024 }
3025 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3026 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3027 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3028 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
3029 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3030 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3031 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3032 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
3033 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3034
3035 /* Queue instruction list and execute everything */
3036 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3037 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3038 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3039 if (status != ERROR_OK)
3040 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3041 status = xtensa_core_status_check(target);
3042 if (status != ERROR_OK)
3043 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3044
3045 /* Reread register cache and restore saved regs after instruction execution */
3046 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3047 LOG_TARGET_ERROR(target, "%s: Failed to fetch register cache (post-exec).", target_name(target));
3048 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3049 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3050 return status;
3051 }
3052
3053 COMMAND_HANDLER(xtensa_cmd_exe)
3054 {
3055 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3056 }
3057
3058 /* xtdef <name> */
3059 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3060 {
3061 if (CMD_ARGC != 1)
3062 return ERROR_COMMAND_SYNTAX_ERROR;
3063
3064 const char *core_name = CMD_ARGV[0];
3065 if (strcasecmp(core_name, "LX") == 0) {
3066 xtensa->core_config->core_type = XT_LX;
3067 } else {
3068 LOG_ERROR("xtdef [LX]\n");
3069 return ERROR_COMMAND_SYNTAX_ERROR;
3070 }
3071 return ERROR_OK;
3072 }
3073
3074 COMMAND_HANDLER(xtensa_cmd_xtdef)
3075 {
3076 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3077 target_to_xtensa(get_current_target(CMD_CTX)));
3078 }
3079
3080 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3081 {
3082 if ((val < min) || (val > max)) {
3083 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3084 return false;
3085 }
3086 return true;
3087 }
3088
3089 /* xtopt <name> <value> */
3090 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3091 {
3092 if (CMD_ARGC != 2)
3093 return ERROR_COMMAND_SYNTAX_ERROR;
3094
3095 const char *opt_name = CMD_ARGV[0];
3096 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3097 if (strcasecmp(opt_name, "arnum") == 0) {
3098 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3099 return ERROR_COMMAND_ARGUMENT_INVALID;
3100 xtensa->core_config->aregs_num = opt_val;
3101 } else if (strcasecmp(opt_name, "windowed") == 0) {
3102 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3103 return ERROR_COMMAND_ARGUMENT_INVALID;
3104 xtensa->core_config->windowed = opt_val;
3105 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3106 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3107 return ERROR_COMMAND_ARGUMENT_INVALID;
3108 xtensa->core_config->coproc = opt_val;
3109 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3110 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3111 return ERROR_COMMAND_ARGUMENT_INVALID;
3112 xtensa->core_config->exceptions = opt_val;
3113 } else if (strcasecmp(opt_name, "intnum") == 0) {
3114 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3115 return ERROR_COMMAND_ARGUMENT_INVALID;
3116 xtensa->core_config->irq.enabled = (opt_val > 0);
3117 xtensa->core_config->irq.irq_num = opt_val;
3118 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3119 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3120 return ERROR_COMMAND_ARGUMENT_INVALID;
3121 xtensa->core_config->high_irq.enabled = opt_val;
3122 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3123 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3124 return ERROR_COMMAND_ARGUMENT_INVALID;
3125 if (!xtensa->core_config->high_irq.enabled) {
3126 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3127 return ERROR_COMMAND_ARGUMENT_INVALID;
3128 }
3129 xtensa->core_config->high_irq.excm_level = opt_val;
3130 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3131 if (xtensa->core_config->core_type == XT_LX) {
3132 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3133 return ERROR_COMMAND_ARGUMENT_INVALID;
3134 } else {
3135 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3136 return ERROR_COMMAND_ARGUMENT_INVALID;
3137 }
3138 if (!xtensa->core_config->high_irq.enabled) {
3139 LOG_ERROR("xtopt intlevels requires hipriints\n");
3140 return ERROR_COMMAND_ARGUMENT_INVALID;
3141 }
3142 xtensa->core_config->high_irq.level_num = opt_val;
3143 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3144 if (xtensa->core_config->core_type == XT_LX) {
3145 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3146 return ERROR_COMMAND_ARGUMENT_INVALID;
3147 } else {
3148 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3149 return ERROR_COMMAND_ARGUMENT_INVALID;
3150 }
3151 xtensa->core_config->debug.enabled = 1;
3152 xtensa->core_config->debug.irq_level = opt_val;
3153 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3154 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3155 return ERROR_COMMAND_ARGUMENT_INVALID;
3156 xtensa->core_config->debug.ibreaks_num = opt_val;
3157 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3158 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3159 return ERROR_COMMAND_ARGUMENT_INVALID;
3160 xtensa->core_config->debug.dbreaks_num = opt_val;
3161 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3162 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3163 return ERROR_COMMAND_ARGUMENT_INVALID;
3164 xtensa->core_config->trace.mem_sz = opt_val;
3165 xtensa->core_config->trace.enabled = (opt_val > 0);
3166 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3167 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3168 return ERROR_COMMAND_ARGUMENT_INVALID;
3169 xtensa->core_config->trace.reversed_mem_access = opt_val;
3170 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3171 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3172 return ERROR_COMMAND_ARGUMENT_INVALID;
3173 xtensa->core_config->debug.perfcount_num = opt_val;
3174 } else {
3175 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3176 return ERROR_OK;
3177 }
3178
3179 return ERROR_OK;
3180 }
3181
3182 COMMAND_HANDLER(xtensa_cmd_xtopt)
3183 {
3184 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3185 target_to_xtensa(get_current_target(CMD_CTX)));
3186 }
3187
3188 /* xtmem <type> [parameters] */
3189 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3190 {
3191 struct xtensa_cache_config *cachep = NULL;
3192 struct xtensa_local_mem_config *memp = NULL;
3193 int mem_access = 0;
3194 bool is_dcache = false;
3195
3196 if (CMD_ARGC == 0) {
3197 LOG_ERROR("xtmem <type> [parameters]\n");
3198 return ERROR_COMMAND_SYNTAX_ERROR;
3199 }
3200
3201 const char *mem_name = CMD_ARGV[0];
3202 if (strcasecmp(mem_name, "icache") == 0) {
3203 cachep = &xtensa->core_config->icache;
3204 } else if (strcasecmp(mem_name, "dcache") == 0) {
3205 cachep = &xtensa->core_config->dcache;
3206 is_dcache = true;
3207 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3208 /* TODO: support L2 cache */
3209 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3210 /* TODO: support L2 cache */
3211 } else if (strcasecmp(mem_name, "iram") == 0) {
3212 memp = &xtensa->core_config->iram;
3213 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3214 } else if (strcasecmp(mem_name, "dram") == 0) {
3215 memp = &xtensa->core_config->dram;
3216 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3217 } else if (strcasecmp(mem_name, "sram") == 0) {
3218 memp = &xtensa->core_config->sram;
3219 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3220 } else if (strcasecmp(mem_name, "irom") == 0) {
3221 memp = &xtensa->core_config->irom;
3222 mem_access = XT_MEM_ACCESS_READ;
3223 } else if (strcasecmp(mem_name, "drom") == 0) {
3224 memp = &xtensa->core_config->drom;
3225 mem_access = XT_MEM_ACCESS_READ;
3226 } else if (strcasecmp(mem_name, "srom") == 0) {
3227 memp = &xtensa->core_config->srom;
3228 mem_access = XT_MEM_ACCESS_READ;
3229 } else {
3230 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3231 return ERROR_COMMAND_ARGUMENT_INVALID;
3232 }
3233
3234 if (cachep) {
3235 if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3236 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3237 return ERROR_COMMAND_SYNTAX_ERROR;
3238 }
3239 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3240 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3241 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3242 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3243 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3244 } else if (memp) {
3245 if (CMD_ARGC != 3) {
3246 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3247 return ERROR_COMMAND_SYNTAX_ERROR;
3248 }
3249 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3250 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3251 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3252 memcfgp->access = mem_access;
3253 memp->count++;
3254 }
3255
3256 return ERROR_OK;
3257 }
3258
3259 COMMAND_HANDLER(xtensa_cmd_xtmem)
3260 {
3261 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3262 target_to_xtensa(get_current_target(CMD_CTX)));
3263 }
3264
3265 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3266 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3267 {
3268 if (CMD_ARGC != 4) {
3269 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3270 return ERROR_COMMAND_SYNTAX_ERROR;
3271 }
3272
3273 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3274 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3275 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3276 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3277
3278 if ((nfgseg > 32)) {
3279 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3280 return ERROR_COMMAND_ARGUMENT_INVALID;
3281 } else if (minsegsize & (minsegsize - 1)) {
3282 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3283 return ERROR_COMMAND_ARGUMENT_INVALID;
3284 } else if (lockable > 1) {
3285 LOG_ERROR("<lockable> must be 0 or 1\n");
3286 return ERROR_COMMAND_ARGUMENT_INVALID;
3287 } else if (execonly > 1) {
3288 LOG_ERROR("<execonly> must be 0 or 1\n");
3289 return ERROR_COMMAND_ARGUMENT_INVALID;
3290 }
3291
3292 xtensa->core_config->mpu.enabled = true;
3293 xtensa->core_config->mpu.nfgseg = nfgseg;
3294 xtensa->core_config->mpu.minsegsize = minsegsize;
3295 xtensa->core_config->mpu.lockable = lockable;
3296 xtensa->core_config->mpu.execonly = execonly;
3297 return ERROR_OK;
3298 }
3299
3300 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3301 {
3302 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3303 target_to_xtensa(get_current_target(CMD_CTX)));
3304 }
3305
3306 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3307 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3308 {
3309 if (CMD_ARGC != 2) {
3310 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3311 return ERROR_COMMAND_SYNTAX_ERROR;
3312 }
3313
3314 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3315 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3316 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3317 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3318 return ERROR_COMMAND_ARGUMENT_INVALID;
3319 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3320 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3321 return ERROR_COMMAND_ARGUMENT_INVALID;
3322 }
3323
3324 xtensa->core_config->mmu.enabled = true;
3325 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3326 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3327 return ERROR_OK;
3328 }
3329
3330 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3331 {
3332 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3333 target_to_xtensa(get_current_target(CMD_CTX)));
3334 }
3335
3336 /* xtregs <numregs>
3337 * xtreg <regname> <regnum> */
3338 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3339 {
3340 if (CMD_ARGC == 1) {
3341 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3342 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3343 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3344 return ERROR_COMMAND_SYNTAX_ERROR;
3345 }
3346 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3347 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3348 numregs, xtensa->genpkt_regs_num);
3349 return ERROR_COMMAND_SYNTAX_ERROR;
3350 }
3351 xtensa->total_regs_num = numregs;
3352 xtensa->core_regs_num = 0;
3353 xtensa->num_optregs = 0;
3354 /* A little more memory than required, but saves a second initialization pass */
3355 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3356 if (!xtensa->optregs) {
3357 LOG_ERROR("Failed to allocate xtensa->optregs!");
3358 return ERROR_FAIL;
3359 }
3360 return ERROR_OK;
3361 } else if (CMD_ARGC != 2) {
3362 return ERROR_COMMAND_SYNTAX_ERROR;
3363 }
3364
3365 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3366 * if general register (g-packet) requests or contiguous register maps are supported */
3367 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3368 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3369 if (!xtensa->contiguous_regs_desc) {
3370 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3371 return ERROR_FAIL;
3372 }
3373 }
3374
3375 const char *regname = CMD_ARGV[0];
3376 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3377 if (regnum > UINT16_MAX) {
3378 LOG_ERROR("<regnum> must be a 16-bit number");
3379 return ERROR_COMMAND_ARGUMENT_INVALID;
3380 }
3381
3382 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3383 if (xtensa->total_regs_num)
3384 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3385 regname, regnum,
3386 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3387 else
3388 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3389 regname, regnum);
3390 return ERROR_FAIL;
3391 }
3392
3393 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3394 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3395 bool is_extended_reg = true;
3396 unsigned int ridx;
3397 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3398 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3399 /* Flag core register as defined */
3400 rptr = &xtensa_regs[ridx];
3401 xtensa->core_regs_num++;
3402 is_extended_reg = false;
3403 break;
3404 }
3405 }
3406
3407 rptr->exist = true;
3408 if (is_extended_reg) {
3409 /* Register ID, debugger-visible register ID */
3410 rptr->name = strdup(CMD_ARGV[0]);
3411 rptr->dbreg_num = regnum;
3412 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3413 xtensa->num_optregs++;
3414
3415 /* Register type */
3416 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3417 rptr->type = XT_REG_GENERAL;
3418 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3419 rptr->type = XT_REG_USER;
3420 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3421 rptr->type = XT_REG_FR;
3422 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3423 rptr->type = XT_REG_SPECIAL;
3424 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3425 /* WARNING: For these registers, regnum points to the
3426 * index of the corresponding ARx registers, NOT to
3427 * the processor register number! */
3428 rptr->type = XT_REG_RELGEN;
3429 rptr->reg_num += XT_REG_IDX_ARFIRST;
3430 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3431 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3432 rptr->type = XT_REG_TIE;
3433 } else {
3434 rptr->type = XT_REG_OTHER;
3435 }
3436
3437 /* Register flags */
3438 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3439 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3440 (strcmp(rptr->name, "intclear") == 0))
3441 rptr->flags = XT_REGF_NOREAD;
3442 else
3443 rptr->flags = 0;
3444
3445 if (rptr->reg_num == (XT_EPS_REG_NUM_BASE + xtensa->core_config->debug.irq_level) &&
3446 xtensa->core_config->core_type == XT_LX && rptr->type == XT_REG_SPECIAL) {
3447 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3448 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3449 }
3450 } else if (strcmp(rptr->name, "cpenable") == 0) {
3451 xtensa->core_config->coproc = true;
3452 }
3453
3454 /* Build out list of contiguous registers in specified order */
3455 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3456 if (xtensa->contiguous_regs_desc) {
3457 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3458 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3459 }
3460 if (xtensa_extra_debug_log)
3461 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3462 is_extended_reg ? "config-specific" : "core",
3463 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3464 is_extended_reg ? xtensa->num_optregs : ridx,
3465 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3466 return ERROR_OK;
3467 }
3468
3469 COMMAND_HANDLER(xtensa_cmd_xtreg)
3470 {
3471 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3472 target_to_xtensa(get_current_target(CMD_CTX)));
3473 }
3474
3475 /* xtregfmt <contiguous|sparse> [numgregs] */
3476 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3477 {
3478 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3479 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3480 return ERROR_OK;
3481 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3482 xtensa->regmap_contiguous = true;
3483 if (CMD_ARGC == 2) {
3484 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3485 if ((numgregs <= 0) ||
3486 ((numgregs > xtensa->total_regs_num) &&
3487 (xtensa->total_regs_num > 0))) {
3488 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3489 numgregs, xtensa->total_regs_num);
3490 return ERROR_COMMAND_SYNTAX_ERROR;
3491 }
3492 xtensa->genpkt_regs_num = numgregs;
3493 }
3494 return ERROR_OK;
3495 }
3496 }
3497 return ERROR_COMMAND_SYNTAX_ERROR;
3498 }
3499
3500 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3501 {
3502 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3503 target_to_xtensa(get_current_target(CMD_CTX)));
3504 }
3505
3506 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3507 {
3508 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3509 &xtensa->permissive_mode, "xtensa permissive mode");
3510 }
3511
3512 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3513 {
3514 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3515 target_to_xtensa(get_current_target(CMD_CTX)));
3516 }
3517
3518 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3519 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
3520 {
3521 struct xtensa_perfmon_config config = {
3522 .mask = 0xffff,
3523 .kernelcnt = 0,
3524 .tracelevel = -1 /* use DEBUGLEVEL by default */
3525 };
3526
3527 if (CMD_ARGC < 2 || CMD_ARGC > 6)
3528 return ERROR_COMMAND_SYNTAX_ERROR;
3529
3530 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
3531 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
3532 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3533 return ERROR_COMMAND_ARGUMENT_INVALID;
3534 }
3535
3536 config.select = strtoul(CMD_ARGV[1], NULL, 0);
3537 if (config.select > XTENSA_MAX_PERF_SELECT) {
3538 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
3539 return ERROR_COMMAND_ARGUMENT_INVALID;
3540 }
3541
3542 if (CMD_ARGC >= 3) {
3543 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
3544 if (config.mask > XTENSA_MAX_PERF_MASK) {
3545 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
3546 return ERROR_COMMAND_ARGUMENT_INVALID;
3547 }
3548 }
3549
3550 if (CMD_ARGC >= 4) {
3551 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
3552 if (config.kernelcnt > 1) {
3553 command_print(CMD, "kernelcnt should be 0 or 1");
3554 return ERROR_COMMAND_ARGUMENT_INVALID;
3555 }
3556 }
3557
3558 if (CMD_ARGC >= 5) {
3559 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
3560 if (config.tracelevel > 7) {
3561 command_print(CMD, "tracelevel should be <=7");
3562 return ERROR_COMMAND_ARGUMENT_INVALID;
3563 }
3564 }
3565
3566 if (config.tracelevel == -1)
3567 config.tracelevel = xtensa->core_config->debug.irq_level;
3568
3569 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
3570 }
3571
3572 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
3573 {
3574 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
3575 target_to_xtensa(get_current_target(CMD_CTX)));
3576 }
3577
3578 /* perfmon_dump [counter_id] */
3579 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
3580 {
3581 if (CMD_ARGC > 1)
3582 return ERROR_COMMAND_SYNTAX_ERROR;
3583
3584 int counter_id = -1;
3585 if (CMD_ARGC == 1) {
3586 counter_id = strtol(CMD_ARGV[0], NULL, 0);
3587 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
3588 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3589 return ERROR_COMMAND_ARGUMENT_INVALID;
3590 }
3591 }
3592
3593 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
3594 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
3595 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
3596 char result_buf[128] = { 0 };
3597 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
3598 struct xtensa_perfmon_result result;
3599 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
3600 if (res != ERROR_OK)
3601 return res;
3602 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
3603 "%-12" PRIu64 "%s",
3604 result.value,
3605 result.overflow ? " (overflow)" : "");
3606 LOG_INFO("%s", result_buf);
3607 }
3608
3609 return ERROR_OK;
3610 }
3611
3612 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
3613 {
3614 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
3615 target_to_xtensa(get_current_target(CMD_CTX)));
3616 }
3617
3618 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
3619 {
3620 int state = -1;
3621
3622 if (CMD_ARGC < 1) {
3623 const char *st;
3624 state = xtensa->stepping_isr_mode;
3625 if (state == XT_STEPPING_ISR_ON)
3626 st = "OFF";
3627 else if (state == XT_STEPPING_ISR_OFF)
3628 st = "ON";
3629 else
3630 st = "UNKNOWN";
3631 command_print(CMD, "Current ISR step mode: %s", st);
3632 return ERROR_OK;
3633 }
3634 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3635 if (!strcasecmp(CMD_ARGV[0], "off"))
3636 state = XT_STEPPING_ISR_ON;
3637 else if (!strcasecmp(CMD_ARGV[0], "on"))
3638 state = XT_STEPPING_ISR_OFF;
3639
3640 if (state == -1) {
3641 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
3642 return ERROR_FAIL;
3643 }
3644 xtensa->stepping_isr_mode = state;
3645 return ERROR_OK;
3646 }
3647
3648 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
3649 {
3650 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
3651 target_to_xtensa(get_current_target(CMD_CTX)));
3652 }
3653
3654 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
3655 {
3656 int res;
3657 uint32_t val = 0;
3658
3659 if (CMD_ARGC >= 1) {
3660 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3661 if (!strcasecmp(CMD_ARGV[0], "none")) {
3662 val = 0;
3663 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
3664 val |= OCDDCR_BREAKINEN;
3665 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
3666 val |= OCDDCR_BREAKOUTEN;
3667 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
3668 val |= OCDDCR_RUNSTALLINEN;
3669 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
3670 val |= OCDDCR_DEBUGMODEOUTEN;
3671 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
3672 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
3673 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
3674 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
3675 } else {
3676 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
3677 command_print(
3678 CMD,
3679 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3680 return ERROR_OK;
3681 }
3682 }
3683 res = xtensa_smpbreak_set(target, val);
3684 if (res != ERROR_OK)
3685 command_print(CMD, "Failed to set smpbreak config %d", res);
3686 } else {
3687 struct xtensa *xtensa = target_to_xtensa(target);
3688 res = xtensa_smpbreak_read(xtensa, &val);
3689 if (res == ERROR_OK)
3690 command_print(CMD, "Current bits set:%s%s%s%s",
3691 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
3692 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
3693 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
3694 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
3695 );
3696 else
3697 command_print(CMD, "Failed to get smpbreak config %d", res);
3698 }
3699 return res;
3700 }
3701
3702 COMMAND_HANDLER(xtensa_cmd_smpbreak)
3703 {
3704 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
3705 get_current_target(CMD_CTX));
3706 }
3707
3708 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
3709 {
3710 struct xtensa_trace_status trace_status;
3711 struct xtensa_trace_start_config cfg = {
3712 .stoppc = 0,
3713 .stopmask = XTENSA_STOPMASK_DISABLED,
3714 .after = 0,
3715 .after_is_words = false
3716 };
3717
3718 /* Parse arguments */
3719 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3720 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
3721 char *e;
3722 i++;
3723 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
3724 cfg.stopmask = 0;
3725 if (*e == '/')
3726 cfg.stopmask = strtol(e, NULL, 0);
3727 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
3728 i++;
3729 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
3730 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
3731 cfg.after_is_words = 0;
3732 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
3733 cfg.after_is_words = 1;
3734 } else {
3735 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
3736 return ERROR_FAIL;
3737 }
3738 }
3739
3740 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3741 if (res != ERROR_OK)
3742 return res;
3743 if (trace_status.stat & TRAXSTAT_TRACT) {
3744 LOG_WARNING("Silently stop active tracing!");
3745 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
3746 if (res != ERROR_OK)
3747 return res;
3748 }
3749
3750 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
3751 if (res != ERROR_OK)
3752 return res;
3753
3754 xtensa->trace_active = true;
3755 command_print(CMD, "Trace started.");
3756 return ERROR_OK;
3757 }
3758
3759 COMMAND_HANDLER(xtensa_cmd_tracestart)
3760 {
3761 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
3762 target_to_xtensa(get_current_target(CMD_CTX)));
3763 }
3764
3765 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
3766 {
3767 struct xtensa_trace_status trace_status;
3768
3769 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3770 if (res != ERROR_OK)
3771 return res;
3772
3773 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
3774 command_print(CMD, "No trace is currently active.");
3775 return ERROR_FAIL;
3776 }
3777
3778 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
3779 if (res != ERROR_OK)
3780 return res;
3781
3782 xtensa->trace_active = false;
3783 command_print(CMD, "Trace stop triggered.");
3784 return ERROR_OK;
3785 }
3786
3787 COMMAND_HANDLER(xtensa_cmd_tracestop)
3788 {
3789 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
3790 target_to_xtensa(get_current_target(CMD_CTX)));
3791 }
3792
3793 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
3794 {
3795 struct xtensa_trace_config trace_config;
3796 struct xtensa_trace_status trace_status;
3797 uint32_t memsz, wmem;
3798
3799 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3800 if (res != ERROR_OK)
3801 return res;
3802
3803 if (trace_status.stat & TRAXSTAT_TRACT) {
3804 command_print(CMD, "Tracing is still active. Please stop it first.");
3805 return ERROR_FAIL;
3806 }
3807
3808 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
3809 if (res != ERROR_OK)
3810 return res;
3811
3812 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
3813 command_print(CMD, "No active trace found; nothing to dump.");
3814 return ERROR_FAIL;
3815 }
3816
3817 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
3818 LOG_INFO("Total trace memory: %d words", memsz);
3819 if ((trace_config.addr &
3820 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
3821 /*Memory hasn't overwritten itself yet. */
3822 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
3823 LOG_INFO("...but trace is only %d words", wmem);
3824 if (wmem < memsz)
3825 memsz = wmem;
3826 } else {
3827 if (trace_config.addr & TRAXADDR_TWSAT) {
3828 LOG_INFO("Real trace is many times longer than that (overflow)");
3829 } else {
3830 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
3831 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
3832 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
3833 }
3834 }
3835
3836 uint8_t *tracemem = malloc(memsz * 4);
3837 if (!tracemem) {
3838 command_print(CMD, "Failed to alloc memory for trace data!");
3839 return ERROR_FAIL;
3840 }
3841 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
3842 if (res != ERROR_OK) {
3843 free(tracemem);
3844 return res;
3845 }
3846
3847 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
3848 if (f <= 0) {
3849 free(tracemem);
3850 command_print(CMD, "Unable to open file %s", fname);
3851 return ERROR_FAIL;
3852 }
3853 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
3854 command_print(CMD, "Unable to write to file %s", fname);
3855 else
3856 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
3857 close(f);
3858
3859 bool is_all_zeroes = true;
3860 for (unsigned int i = 0; i < memsz * 4; i++) {
3861 if (tracemem[i] != 0) {
3862 is_all_zeroes = false;
3863 break;
3864 }
3865 }
3866 free(tracemem);
3867 if (is_all_zeroes)
3868 command_print(
3869 CMD,
3870 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
3871
3872 return ERROR_OK;
3873 }
3874
3875 COMMAND_HANDLER(xtensa_cmd_tracedump)
3876 {
3877 if (CMD_ARGC != 1) {
3878 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
3879 return ERROR_FAIL;
3880 }
3881
3882 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
3883 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
3884 }
3885
3886 static const struct command_registration xtensa_any_command_handlers[] = {
3887 {
3888 .name = "xtdef",
3889 .handler = xtensa_cmd_xtdef,
3890 .mode = COMMAND_CONFIG,
3891 .help = "Configure Xtensa core type",
3892 .usage = "<type>",
3893 },
3894 {
3895 .name = "xtopt",
3896 .handler = xtensa_cmd_xtopt,
3897 .mode = COMMAND_CONFIG,
3898 .help = "Configure Xtensa core option",
3899 .usage = "<name> <value>",
3900 },
3901 {
3902 .name = "xtmem",
3903 .handler = xtensa_cmd_xtmem,
3904 .mode = COMMAND_CONFIG,
3905 .help = "Configure Xtensa memory/cache option",
3906 .usage = "<type> [parameters]",
3907 },
3908 {
3909 .name = "xtmmu",
3910 .handler = xtensa_cmd_xtmmu,
3911 .mode = COMMAND_CONFIG,
3912 .help = "Configure Xtensa MMU option",
3913 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
3914 },
3915 {
3916 .name = "xtmpu",
3917 .handler = xtensa_cmd_xtmpu,
3918 .mode = COMMAND_CONFIG,
3919 .help = "Configure Xtensa MPU option",
3920 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
3921 },
3922 {
3923 .name = "xtreg",
3924 .handler = xtensa_cmd_xtreg,
3925 .mode = COMMAND_CONFIG,
3926 .help = "Configure Xtensa register",
3927 .usage = "<regname> <regnum>",
3928 },
3929 {
3930 .name = "xtregs",
3931 .handler = xtensa_cmd_xtreg,
3932 .mode = COMMAND_CONFIG,
3933 .help = "Configure number of Xtensa registers",
3934 .usage = "<numregs>",
3935 },
3936 {
3937 .name = "xtregfmt",
3938 .handler = xtensa_cmd_xtregfmt,
3939 .mode = COMMAND_CONFIG,
3940 .help = "Configure format of Xtensa register map",
3941 .usage = "<contiguous|sparse> [numgregs]",
3942 },
3943 {
3944 .name = "set_permissive",
3945 .handler = xtensa_cmd_permissive_mode,
3946 .mode = COMMAND_ANY,
3947 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
3948 .usage = "[0|1]",
3949 },
3950 {
3951 .name = "maskisr",
3952 .handler = xtensa_cmd_mask_interrupts,
3953 .mode = COMMAND_ANY,
3954 .help = "mask Xtensa interrupts at step",
3955 .usage = "['on'|'off']",
3956 },
3957 {
3958 .name = "smpbreak",
3959 .handler = xtensa_cmd_smpbreak,
3960 .mode = COMMAND_ANY,
3961 .help = "Set the way the CPU chains OCD breaks",
3962 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
3963 },
3964 {
3965 .name = "perfmon_enable",
3966 .handler = xtensa_cmd_perfmon_enable,
3967 .mode = COMMAND_EXEC,
3968 .help = "Enable and start performance counter",
3969 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
3970 },
3971 {
3972 .name = "perfmon_dump",
3973 .handler = xtensa_cmd_perfmon_dump,
3974 .mode = COMMAND_EXEC,
3975 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
3976 .usage = "[counter_id]",
3977 },
3978 {
3979 .name = "tracestart",
3980 .handler = xtensa_cmd_tracestart,
3981 .mode = COMMAND_EXEC,
3982 .help =
3983 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
3984 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
3985 },
3986 {
3987 .name = "tracestop",
3988 .handler = xtensa_cmd_tracestop,
3989 .mode = COMMAND_EXEC,
3990 .help = "Tracing: Stop current trace as started by the tracestart command",
3991 .usage = "",
3992 },
3993 {
3994 .name = "tracedump",
3995 .handler = xtensa_cmd_tracedump,
3996 .mode = COMMAND_EXEC,
3997 .help = "Tracing: Dump trace memory to a files. One file per core.",
3998 .usage = "<outfile>",
3999 },
4000 {
4001 .name = "exe",
4002 .handler = xtensa_cmd_exe,
4003 .mode = COMMAND_ANY,
4004 .help = "Xtensa stub execution",
4005 .usage = "<ascii-encoded hexadecimal instruction bytes>",
4006 },
4007 COMMAND_REGISTRATION_DONE
4008 };
4009
4010 const struct command_registration xtensa_command_handlers[] = {
4011 {
4012 .name = "xtensa",
4013 .mode = COMMAND_ANY,
4014 .help = "Xtensa command group",
4015 .usage = "",
4016 .chain = xtensa_any_command_handlers,
4017 },
4018 COMMAND_REGISTRATION_DONE
4019 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)