target: add generic Xtensa LX support
[openocd.git] / src / target / xtensa / xtensa.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
10
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19
20 #include "xtensa_chip.h"
21 #include "xtensa.h"
22
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25 ((((V) & 0x0F) << 4) \
26 | (((V) & 0xF0) >> 4))
27
28 #define XT_NIBSWAP16(V) \
29 ((((V) & 0x000F) << 12) \
30 | (((V) & 0x00F0) << 4) \
31 | (((V) & 0x0F00) >> 4) \
32 | (((V) & 0xF000) >> 12))
33
34 #define XT_NIBSWAP24(V) \
35 ((((V) & 0x00000F) << 20) \
36 | (((V) & 0x0000F0) << 12) \
37 | (((V) & 0x000F00) << 4) \
38 | (((V) & 0x00F000) >> 4) \
39 | (((V) & 0x0F0000) >> 12) \
40 | (((V) & 0xF00000) >> 20))
41
42 /* _XT_INS_FORMAT_*()
43 * Instruction formatting converted from little-endian inputs
44 * and shifted to the MSB-side of DIR for BE systems.
45 */
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48 | (((T) & 0x0F) << 16) \
49 | (((SR) & 0xFF) << 8)) << 8 \
50 : (OPCODE) \
51 | (((SR) & 0xFF) << 8) \
52 | (((T) & 0x0F) << 4))
53
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57 | (((R) & 0x0F) << 8)) << 8 \
58 : (OPCODE) \
59 | (((ST) & 0xFF) << 4) \
60 | (((R) & 0x0F) << 12))
61
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64 | (((T) & 0x0F) << 8) \
65 | (((S) & 0x0F) << 4) \
66 | ((IMM4) & 0x0F)) << 16 \
67 : (OPCODE) \
68 | (((T) & 0x0F) << 4) \
69 | (((S) & 0x0F) << 8) \
70 | (((IMM4) & 0x0F) << 12))
71
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74 | (((T) & 0x0F) << 16) \
75 | (((S) & 0x0F) << 12) \
76 | (((R) & 0x0F) << 8) \
77 | ((IMM8) & 0xFF)) << 8 \
78 : (OPCODE) \
79 | (((IMM8) & 0xFF) << 16) \
80 | (((R) & 0x0F) << 12) \
81 | (((S) & 0x0F) << 8) \
82 | (((T) & 0x0F) << 4))
83
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86 | (((T) & 0x0F) << 16) \
87 | (((S) & 0x0F) << 12) \
88 | (((R) & 0x0F) << 8)) << 8 \
89 | ((IMM4) & 0x0F) \
90 : (OPCODE) \
91 | (((IMM4) & 0x0F) << 20) \
92 | (((R) & 0x0F) << 12) \
93 | (((S) & 0x0F) << 8) \
94 | (((T) & 0x0F) << 4))
95
96 /* Xtensa processor instruction opcodes
97 */
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
102
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
107
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
114
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
121
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
127
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
131
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
138
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
141
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
146
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
151
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
155
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
159
160 #define XT_WATCHPOINTS_NUM_MAX 2
161
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163 * These get used a lot so making a shortcut is useful.
164 */
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
169 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
170
171 #define XT_PS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
172 #define XT_PC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
173 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
174 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
175
176 #define XT_SW_BREAKPOINTS_MAX_NUM 32
177 #define XT_HW_IBREAK_MAX_NUM 2
178 #define XT_HW_DBREAK_MAX_NUM 2
179
180 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
181 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
182 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
183 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
184 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
185 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
186 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
187 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
188 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
189 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
190 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
191 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
192 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
247 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
248 XT_MK_REG_DESC("ps", 0xE6, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
249 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
250 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
251 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
252 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
253 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
254 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
255 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
256 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
259 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
261 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
262
263 /* WARNING: For these registers, regnum points to the
264 * index of the corresponding ARx registers, NOT to
265 * the processor register number! */
266 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
267 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
268 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
269 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
270 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
271 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
272 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
273 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
274 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
275 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
276 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
282 };
283
284 /**
285 * Types of memory used at xtensa target
286 */
287 enum xtensa_mem_region_type {
288 XTENSA_MEM_REG_IROM = 0x0,
289 XTENSA_MEM_REG_IRAM,
290 XTENSA_MEM_REG_DROM,
291 XTENSA_MEM_REG_DRAM,
292 XTENSA_MEM_REG_SRAM,
293 XTENSA_MEM_REG_SROM,
294 XTENSA_MEM_REGS_NUM
295 };
296
297 /* Register definition as union for list allocation */
298 union xtensa_reg_val_u {
299 xtensa_reg_val_t val;
300 uint8_t buf[4];
301 };
302
303 const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
304 { .chrval = "E00", .intval = ERROR_FAIL },
305 { .chrval = "E01", .intval = ERROR_FAIL },
306 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
307 { .chrval = "E03", .intval = ERROR_FAIL },
308 };
309
310 /* Set to true for extra debug logging */
311 static const bool xtensa_extra_debug_log;
312
313 /**
314 * Gets a config for the specific mem type
315 */
316 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
317 struct xtensa *xtensa,
318 enum xtensa_mem_region_type type)
319 {
320 switch (type) {
321 case XTENSA_MEM_REG_IROM:
322 return &xtensa->core_config->irom;
323 case XTENSA_MEM_REG_IRAM:
324 return &xtensa->core_config->iram;
325 case XTENSA_MEM_REG_DROM:
326 return &xtensa->core_config->drom;
327 case XTENSA_MEM_REG_DRAM:
328 return &xtensa->core_config->dram;
329 case XTENSA_MEM_REG_SRAM:
330 return &xtensa->core_config->sram;
331 case XTENSA_MEM_REG_SROM:
332 return &xtensa->core_config->srom;
333 default:
334 return NULL;
335 }
336 }
337
338 /**
339 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
340 * for a given address
341 * Returns NULL if nothing found
342 */
343 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
344 const struct xtensa_local_mem_config *mem,
345 target_addr_t address)
346 {
347 for (unsigned int i = 0; i < mem->count; i++) {
348 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
349 if (address >= region->base && address < (region->base + region->size))
350 return region;
351 }
352 return NULL;
353 }
354
355 /**
356 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
357 * for a given address
358 * Returns NULL if nothing found
359 */
360 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
361 struct xtensa *xtensa,
362 target_addr_t address)
363 {
364 const struct xtensa_local_mem_region_config *result;
365 const struct xtensa_local_mem_config *mcgf;
366 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
367 mcgf = xtensa_get_mem_config(xtensa, mtype);
368 result = xtensa_memory_region_find(mcgf, address);
369 if (result)
370 return result;
371 }
372 return NULL;
373 }
374
375 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
376 const struct xtensa_local_mem_config *mem,
377 target_addr_t address)
378 {
379 if (!cache->size)
380 return false;
381 return xtensa_memory_region_find(mem, address);
382 }
383
384 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
385 {
386 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
387 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
388 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
389 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
390 }
391
392 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
393 {
394 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
395 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
396 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
397 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
398 }
399
400 static int xtensa_core_reg_get(struct reg *reg)
401 {
402 /* We don't need this because we read all registers on halt anyway. */
403 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
404 struct target *target = xtensa->target;
405
406 if (target->state != TARGET_HALTED)
407 return ERROR_TARGET_NOT_HALTED;
408 if (!reg->exist) {
409 if (strncmp(reg->name, "?0x", 3) == 0) {
410 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
411 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
412 return ERROR_OK;
413 }
414 return ERROR_COMMAND_ARGUMENT_INVALID;
415 }
416 return ERROR_OK;
417 }
418
419 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
420 {
421 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
422 struct target *target = xtensa->target;
423
424 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
425 if (target->state != TARGET_HALTED)
426 return ERROR_TARGET_NOT_HALTED;
427
428 if (!reg->exist) {
429 if (strncmp(reg->name, "?0x", 3) == 0) {
430 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
431 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
432 return ERROR_OK;
433 }
434 return ERROR_COMMAND_ARGUMENT_INVALID;
435 }
436
437 buf_cpy(buf, reg->value, reg->size);
438
439 if (xtensa->core_config->windowed) {
440 /* If the user updates a potential scratch register, track for conflicts */
441 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
442 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
443 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
444 buf_get_u32(reg->value, 0, 32));
445 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
446 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
447 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
448 xtensa->scratch_ars[s].intval = true;
449 break;
450 }
451 }
452 }
453 reg->dirty = true;
454 reg->valid = true;
455
456 return ERROR_OK;
457 }
458
459 static const struct reg_arch_type xtensa_reg_type = {
460 .get = xtensa_core_reg_get,
461 .set = xtensa_core_reg_set,
462 };
463
464 /* Convert a register index that's indexed relative to windowbase, to the real address. */
465 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
466 enum xtensa_reg_id reg_idx,
467 int windowbase)
468 {
469 unsigned int idx;
470 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
471 idx = reg_idx - XT_REG_IDX_AR0;
472 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
473 idx = reg_idx - XT_REG_IDX_A0;
474 } else {
475 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
476 return -1;
477 }
478 return ((idx + windowbase * 4) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
479 }
480
481 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
482 enum xtensa_reg_id reg_idx,
483 int windowbase)
484 {
485 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
486 }
487
488 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
489 {
490 struct reg *reg_list = xtensa->core_cache->reg_list;
491 reg_list[reg_idx].dirty = true;
492 }
493
494 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
495 {
496 xtensa_queue_dbg_reg_write(xtensa, NARADR_DIR0EXEC, ins);
497 }
498
499 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
500 {
501 if ((oplen > 0) && (oplen <= 64)) {
502 uint32_t opsw[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; /* 8 DIRx regs: max width 64B */
503 uint8_t oplenw = (oplen + 3) / 4;
504 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
505 buf_bswap32((uint8_t *)opsw, ops, oplenw * 4);
506 else
507 memcpy(opsw, ops, oplen);
508 for (int32_t i = oplenw - 1; i > 0; i--)
509 xtensa_queue_dbg_reg_write(xtensa, NARADR_DIR0 + i, opsw[i]);
510 /* Write DIR0EXEC last */
511 xtensa_queue_dbg_reg_write(xtensa, NARADR_DIR0EXEC, opsw[0]);
512 }
513 }
514
515 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
516 {
517 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
518 return dm->pwr_ops->queue_reg_write(dm, reg, data);
519 }
520
521 /* NOTE: Assumes A3 has already been saved */
522 int xtensa_window_state_save(struct target *target, uint32_t *woe)
523 {
524 struct xtensa *xtensa = target_to_xtensa(target);
525 int woe_dis;
526 uint8_t woe_buf[4];
527
528 if (xtensa->core_config->windowed) {
529 /* Save PS (LX) and disable window overflow exceptions prior to AR save */
530 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_PS, XT_REG_A3));
531 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
532 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, woe_buf);
533 int res = jtag_execute_queue();
534 if (res != ERROR_OK) {
535 LOG_ERROR("Failed to read PS (%d)!", res);
536 return res;
537 }
538 xtensa_core_status_check(target);
539 *woe = buf_get_u32(woe_buf, 0, 32);
540 woe_dis = *woe & ~XT_PS_WOE_MSK;
541 LOG_DEBUG("Clearing PS.WOE (0x%08" PRIx32 " -> 0x%08" PRIx32 ")", *woe, woe_dis);
542 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, woe_dis);
543 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
544 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
545 }
546 return ERROR_OK;
547 }
548
549 /* NOTE: Assumes A3 has already been saved */
550 void xtensa_window_state_restore(struct target *target, uint32_t woe)
551 {
552 struct xtensa *xtensa = target_to_xtensa(target);
553 if (xtensa->core_config->windowed) {
554 /* Restore window overflow exception state */
555 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, woe);
556 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
557 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
558 LOG_DEBUG("Restored PS.WOE (0x%08" PRIx32 ")", woe);
559 }
560 }
561
562 static bool xtensa_reg_is_readable(int flags, int cpenable)
563 {
564 if (flags & XT_REGF_NOREAD)
565 return false;
566 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
567 return false;
568 return true;
569 }
570
571 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
572 {
573 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
574 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
575 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
576 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
577 } else {
578 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
579 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
580 }
581 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
582 }
583
584 static int xtensa_write_dirty_registers(struct target *target)
585 {
586 struct xtensa *xtensa = target_to_xtensa(target);
587 int res;
588 xtensa_reg_val_t regval, windowbase = 0;
589 bool scratch_reg_dirty = false, delay_cpenable = false;
590 struct reg *reg_list = xtensa->core_cache->reg_list;
591 unsigned int reg_list_size = xtensa->core_cache->num_regs;
592 bool preserve_a3 = false;
593 uint8_t a3_buf[4];
594 xtensa_reg_val_t a3, woe;
595
596 LOG_TARGET_DEBUG(target, "start");
597
598 /* We need to write the dirty registers in the cache list back to the processor.
599 * Start by writing the SFR/user registers. */
600 for (unsigned int i = 0; i < reg_list_size; i++) {
601 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
602 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
603 if (reg_list[i].dirty) {
604 if (rlist[ridx].type == XT_REG_SPECIAL ||
605 rlist[ridx].type == XT_REG_USER ||
606 rlist[ridx].type == XT_REG_FR) {
607 scratch_reg_dirty = true;
608 if (i == XT_REG_IDX_CPENABLE) {
609 delay_cpenable = true;
610 continue;
611 }
612 regval = xtensa_reg_get(target, i);
613 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
614 reg_list[i].name,
615 rlist[ridx].reg_num,
616 regval);
617 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
618 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
619 if (reg_list[i].exist) {
620 unsigned int reg_num = rlist[ridx].reg_num;
621 if (rlist[ridx].type == XT_REG_USER) {
622 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
623 } else if (rlist[ridx].type == XT_REG_FR) {
624 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
625 } else {/*SFR */
626 if (reg_num == XT_PC_REG_NUM_VIRTUAL)
627 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL
628 **/
629 reg_num =
630 (XT_PC_REG_NUM_BASE +
631 xtensa->core_config->debug.irq_level);
632 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
633 }
634 }
635 reg_list[i].dirty = false;
636 }
637 }
638 }
639 if (scratch_reg_dirty)
640 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
641 if (delay_cpenable) {
642 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
643 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
644 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
645 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
646 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
647 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
648 XT_REG_A3));
649 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
650 }
651
652 preserve_a3 = (xtensa->core_config->windowed);
653 if (preserve_a3) {
654 /* Save (windowed) A3 for scratch use */
655 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
656 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, a3_buf);
657 res = jtag_execute_queue();
658 xtensa_core_status_check(target);
659 a3 = buf_get_u32(a3_buf, 0, 32);
660 }
661
662 if (xtensa->core_config->windowed) {
663 res = xtensa_window_state_save(target, &woe);
664 if (res != ERROR_OK)
665 return res;
666 /* Grab the windowbase, we need it. */
667 windowbase = xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE);
668 /* Check if there are mismatches between the ARx and corresponding Ax registers.
669 * When the user sets a register on a windowed config, xt-gdb may set the ARx
670 * register directly. Thus we take ARx as priority over Ax if both are dirty
671 * and it's unclear if the user set one over the other explicitly.
672 */
673 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
674 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
675 if (reg_list[i].dirty && reg_list[j].dirty) {
676 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
677 bool show_warning = true;
678 if (i == XT_REG_IDX_A3)
679 show_warning = xtensa_scratch_regs_fixup(xtensa,
680 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
681 else if (i == XT_REG_IDX_A4)
682 show_warning = xtensa_scratch_regs_fixup(xtensa,
683 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
684 if (show_warning)
685 LOG_WARNING(
686 "Warning: Both A%d [0x%08" PRIx32
687 "] as well as its underlying physical register "
688 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
689 i - XT_REG_IDX_A0,
690 buf_get_u32(reg_list[i].value, 0, 32),
691 j - XT_REG_IDX_AR0,
692 buf_get_u32(reg_list[j].value, 0, 32));
693 }
694 }
695 }
696 }
697
698 /* Write A0-A16. */
699 for (unsigned int i = 0; i < 16; i++) {
700 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
701 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
702 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
703 xtensa_regs[XT_REG_IDX_A0 + i].name,
704 regval,
705 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
706 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
707 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
708 reg_list[XT_REG_IDX_A0 + i].dirty = false;
709 if (i == 3) {
710 /* Avoid stomping A3 during restore at end of function */
711 a3 = regval;
712 }
713 }
714 }
715
716 if (xtensa->core_config->windowed) {
717 /* Now write AR registers */
718 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
719 /* Write the 16 registers we can see */
720 for (unsigned int i = 0; i < 16; i++) {
721 if (i + j < xtensa->core_config->aregs_num) {
722 enum xtensa_reg_id realadr =
723 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
724 windowbase);
725 /* Write back any dirty un-windowed registers */
726 if (reg_list[realadr].dirty) {
727 regval = xtensa_reg_get(target, realadr);
728 LOG_TARGET_DEBUG(
729 target,
730 "Writing back reg %s value %08" PRIX32 ", num =%i",
731 xtensa_regs[realadr].name,
732 regval,
733 xtensa_regs[realadr].reg_num);
734 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
735 xtensa_queue_exec_ins(xtensa,
736 XT_INS_RSR(xtensa, XT_SR_DDR,
737 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
738 reg_list[realadr].dirty = false;
739 if ((i + j) == 3)
740 /* Avoid stomping AR during A3 restore at end of function */
741 a3 = regval;
742 }
743 }
744 }
745 /*Now rotate the window so we'll see the next 16 registers. The final rotate
746 * will wraparound, */
747 /*leaving us in the state we were. */
748 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
749 }
750
751 xtensa_window_state_restore(target, woe);
752
753 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
754 xtensa->scratch_ars[s].intval = false;
755 }
756
757 if (preserve_a3) {
758 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, a3);
759 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
760 }
761
762 res = jtag_execute_queue();
763 xtensa_core_status_check(target);
764
765 return res;
766 }
767
768 static inline bool xtensa_is_stopped(struct target *target)
769 {
770 struct xtensa *xtensa = target_to_xtensa(target);
771 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
772 }
773
774 int xtensa_examine(struct target *target)
775 {
776 struct xtensa *xtensa = target_to_xtensa(target);
777 unsigned int cmd = PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP;
778
779 LOG_DEBUG("coreid = %d", target->coreid);
780
781 if (xtensa->core_config->core_type == XT_UNDEF) {
782 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
783 return ERROR_FAIL;
784 }
785
786 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd);
787 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE);
788 xtensa_dm_queue_enable(&xtensa->dbg_mod);
789 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
790 int res = jtag_execute_queue();
791 if (res != ERROR_OK)
792 return res;
793 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
794 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
795 return ERROR_TARGET_FAILURE;
796 }
797 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
798 if (!target_was_examined(target))
799 target_set_examined(target);
800 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
801 return ERROR_OK;
802 }
803
804 int xtensa_wakeup(struct target *target)
805 {
806 struct xtensa *xtensa = target_to_xtensa(target);
807 unsigned int cmd = PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP;
808
809 if (xtensa->reset_asserted)
810 cmd |= PWRCTL_CORERESET;
811 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd);
812 /* TODO: can we join this with the write above? */
813 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE);
814 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
815 return jtag_execute_queue();
816 }
817
818 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
819 {
820 uint32_t dsr_data = 0x00110000;
821 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
822 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
823 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
824
825 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
826 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRSET, set | OCDDCR_ENABLEOCD);
827 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRCLR, clear);
828 xtensa_queue_dbg_reg_write(xtensa, NARADR_DSR, dsr_data);
829 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
830 return jtag_execute_queue();
831 }
832
833 int xtensa_smpbreak_set(struct target *target, uint32_t set)
834 {
835 struct xtensa *xtensa = target_to_xtensa(target);
836 int res = ERROR_OK;
837
838 xtensa->smp_break = set;
839 if (target_was_examined(target))
840 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
841 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
842 return res;
843 }
844
845 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
846 {
847 uint8_t dcr_buf[sizeof(uint32_t)];
848
849 xtensa_queue_dbg_reg_read(xtensa, NARADR_DCRSET, dcr_buf);
850 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
851 int res = jtag_execute_queue();
852 *val = buf_get_u32(dcr_buf, 0, 32);
853
854 return res;
855 }
856
857 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
858 {
859 struct xtensa *xtensa = target_to_xtensa(target);
860 *val = xtensa->smp_break;
861 return ERROR_OK;
862 }
863
864 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
865 {
866 return buf_get_u32(reg->value, 0, 32);
867 }
868
869 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
870 {
871 buf_set_u32(reg->value, 0, 32, value);
872 reg->dirty = true;
873 }
874
875 int xtensa_core_status_check(struct target *target)
876 {
877 struct xtensa *xtensa = target_to_xtensa(target);
878 int res, needclear = 0;
879
880 xtensa_dm_core_status_read(&xtensa->dbg_mod);
881 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
882 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
883 if (dsr & OCDDSR_EXECBUSY) {
884 if (!xtensa->suppress_dsr_errors)
885 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
886 needclear = 1;
887 }
888 if (dsr & OCDDSR_EXECEXCEPTION) {
889 if (!xtensa->suppress_dsr_errors)
890 LOG_TARGET_ERROR(target,
891 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
892 dsr);
893 needclear = 1;
894 }
895 if (dsr & OCDDSR_EXECOVERRUN) {
896 if (!xtensa->suppress_dsr_errors)
897 LOG_TARGET_ERROR(target,
898 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
899 dsr);
900 needclear = 1;
901 }
902 if (needclear) {
903 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
904 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
905 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
906 LOG_TARGET_ERROR(target, "clearing DSR failed!");
907 return ERROR_FAIL;
908 }
909 return ERROR_OK;
910 }
911
912 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
913 {
914 struct xtensa *xtensa = target_to_xtensa(target);
915 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
916 return xtensa_reg_get_value(reg);
917 }
918
919 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
920 {
921 struct xtensa *xtensa = target_to_xtensa(target);
922 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
923 if (xtensa_reg_get_value(reg) == value)
924 return;
925 xtensa_reg_set_value(reg, value);
926 }
927
928 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
929 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
930 {
931 struct xtensa *xtensa = target_to_xtensa(target);
932 uint32_t windowbase = (xtensa->core_config->windowed ?
933 xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE) : 0);
934 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
935 xtensa_reg_set(target, a_idx, value);
936 xtensa_reg_set(target, ar_idx, value);
937 }
938
939 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
940 uint32_t xtensa_cause_get(struct target *target)
941 {
942 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
943 }
944
945 void xtensa_cause_clear(struct target *target)
946 {
947 struct xtensa *xtensa = target_to_xtensa(target);
948 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
949 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
950 }
951
952 int xtensa_assert_reset(struct target *target)
953 {
954 struct xtensa *xtensa = target_to_xtensa(target);
955
956 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
957 target->state = TARGET_RESET;
958 xtensa_queue_pwr_reg_write(xtensa,
959 DMREG_PWRCTL,
960 PWRCTL_JTAGDEBUGUSE | PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP |
961 PWRCTL_CORERESET);
962 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
963 int res = jtag_execute_queue();
964 if (res != ERROR_OK)
965 return res;
966 xtensa->reset_asserted = true;
967 return res;
968 }
969
970 int xtensa_deassert_reset(struct target *target)
971 {
972 struct xtensa *xtensa = target_to_xtensa(target);
973
974 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
975 if (target->reset_halt)
976 xtensa_queue_dbg_reg_write(xtensa,
977 NARADR_DCRSET,
978 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
979 xtensa_queue_pwr_reg_write(xtensa,
980 DMREG_PWRCTL,
981 PWRCTL_JTAGDEBUGUSE | PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP);
982 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
983 int res = jtag_execute_queue();
984 if (res != ERROR_OK)
985 return res;
986 target->state = TARGET_RUNNING;
987 xtensa->reset_asserted = false;
988 return res;
989 }
990
991 int xtensa_soft_reset_halt(struct target *target)
992 {
993 LOG_TARGET_DEBUG(target, "begin");
994 return xtensa_assert_reset(target);
995 }
996
997 int xtensa_fetch_all_regs(struct target *target)
998 {
999 struct xtensa *xtensa = target_to_xtensa(target);
1000 struct reg *reg_list = xtensa->core_cache->reg_list;
1001 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1002 xtensa_reg_val_t cpenable = 0, windowbase = 0, a3;
1003 uint32_t woe;
1004 uint8_t a3_buf[4];
1005 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1006
1007 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1008 if (!regvals) {
1009 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1010 return ERROR_FAIL;
1011 }
1012 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1013 if (!dsrs) {
1014 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1015 free(regvals);
1016 return ERROR_FAIL;
1017 }
1018
1019 LOG_TARGET_DEBUG(target, "start");
1020
1021 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1022 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1023 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, a3_buf);
1024 int res = xtensa_window_state_save(target, &woe);
1025 if (res != ERROR_OK)
1026 goto xtensa_fetch_all_regs_done;
1027
1028 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1029 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1030 * in one go, then sort everything out from the regvals variable. */
1031
1032 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1033 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1034 /*Grab the 16 registers we can see */
1035 for (unsigned int i = 0; i < 16; i++) {
1036 if (i + j < xtensa->core_config->aregs_num) {
1037 xtensa_queue_exec_ins(xtensa,
1038 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1039 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR,
1040 regvals[XT_REG_IDX_AR0 + i + j].buf);
1041 if (debug_dsrs)
1042 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR,
1043 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1044 }
1045 }
1046 if (xtensa->core_config->windowed)
1047 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1048 * will wraparound, */
1049 /* leaving us in the state we were. */
1050 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
1051 }
1052 xtensa_window_state_restore(target, woe);
1053
1054 if (xtensa->core_config->coproc) {
1055 /* As the very first thing after AREGS, go grab CPENABLE */
1056 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1057 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1058 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1059 }
1060 res = jtag_execute_queue();
1061 if (res != ERROR_OK) {
1062 LOG_ERROR("Failed to read ARs (%d)!", res);
1063 goto xtensa_fetch_all_regs_done;
1064 }
1065 xtensa_core_status_check(target);
1066
1067 a3 = buf_get_u32(a3_buf, 0, 32);
1068
1069 if (xtensa->core_config->coproc) {
1070 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1071
1072 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1073 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, 0xffffffff);
1074 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1075 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1076
1077 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1078 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1079 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1080 }
1081 /* We're now free to use any of A0-A15 as scratch registers
1082 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1083 for (unsigned int i = 0; i < reg_list_size; i++) {
1084 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1085 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1086 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1087 bool reg_fetched = true;
1088 unsigned int reg_num = rlist[ridx].reg_num;
1089 switch (rlist[ridx].type) {
1090 case XT_REG_USER:
1091 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1092 break;
1093 case XT_REG_FR:
1094 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1095 break;
1096 case XT_REG_SPECIAL:
1097 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1098 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1099 reg_num = (XT_PC_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
1100 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1101 /* CPENABLE already read/updated; don't re-read */
1102 reg_fetched = false;
1103 break;
1104 }
1105 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1106 break;
1107 default:
1108 reg_fetched = false;
1109 }
1110 if (reg_fetched) {
1111 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1112 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[i].buf);
1113 if (debug_dsrs)
1114 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR, dsrs[i].buf);
1115 }
1116 }
1117 }
1118 /* Ok, send the whole mess to the CPU. */
1119 res = jtag_execute_queue();
1120 if (res != ERROR_OK) {
1121 LOG_ERROR("Failed to fetch AR regs!");
1122 goto xtensa_fetch_all_regs_done;
1123 }
1124 xtensa_core_status_check(target);
1125
1126 if (debug_dsrs) {
1127 /* DSR checking: follows order in which registers are requested. */
1128 for (unsigned int i = 0; i < reg_list_size; i++) {
1129 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1130 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1131 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1132 (rlist[ridx].type != XT_REG_DEBUG) &&
1133 (rlist[ridx].type != XT_REG_RELGEN) &&
1134 (rlist[ridx].type != XT_REG_TIE) &&
1135 (rlist[ridx].type != XT_REG_OTHER)) {
1136 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1137 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1138 res = ERROR_FAIL;
1139 goto xtensa_fetch_all_regs_done;
1140 }
1141 }
1142 }
1143 }
1144
1145 if (xtensa->core_config->windowed)
1146 /* We need the windowbase to decode the general addresses. */
1147 windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE].buf, 0, 32);
1148 /* Decode the result and update the cache. */
1149 for (unsigned int i = 0; i < reg_list_size; i++) {
1150 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1151 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1152 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1153 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1154 /* The 64-value general register set is read from (windowbase) on down.
1155 * We need to get the real register address by subtracting windowbase and
1156 * wrapping around. */
1157 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1158 windowbase);
1159 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1160 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1161 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1162 if (xtensa_extra_debug_log) {
1163 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1164 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1165 }
1166 } else {
1167 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1168 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1169 if (xtensa_extra_debug_log)
1170 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1171 xtensa_reg_set(target, i, regval);
1172 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1173 }
1174 reg_list[i].valid = true;
1175 } else {
1176 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1177 /* Report read-only registers all-zero but valid */
1178 reg_list[i].valid = true;
1179 xtensa_reg_set(target, i, 0);
1180 } else {
1181 reg_list[i].valid = false;
1182 }
1183 }
1184 }
1185
1186 if (xtensa->core_config->windowed) {
1187 /* We have used A3 as a scratch register.
1188 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1189 */
1190 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1191 xtensa_reg_set(target, ar3_idx, a3);
1192 xtensa_mark_register_dirty(xtensa, ar3_idx);
1193
1194 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1195 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1196 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1197 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1198 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1199 xtensa->scratch_ars[s].intval = false;
1200 }
1201
1202 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1203 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1204 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1205 xtensa->regs_fetched = true;
1206 xtensa_fetch_all_regs_done:
1207 free(regvals);
1208 free(dsrs);
1209 return res;
1210 }
1211
1212 int xtensa_get_gdb_reg_list(struct target *target,
1213 struct reg **reg_list[],
1214 int *reg_list_size,
1215 enum target_register_class reg_class)
1216 {
1217 struct xtensa *xtensa = target_to_xtensa(target);
1218 unsigned int num_regs;
1219
1220 if (reg_class == REG_CLASS_GENERAL) {
1221 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1222 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1223 return ERROR_FAIL;
1224 }
1225 num_regs = xtensa->genpkt_regs_num;
1226 } else {
1227 /* Determine whether to return a contiguous or sparse register map */
1228 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1229 }
1230
1231 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1232
1233 *reg_list = calloc(num_regs, sizeof(struct reg *));
1234 if (!*reg_list)
1235 return ERROR_FAIL;
1236
1237 *reg_list_size = num_regs;
1238 if (xtensa->regmap_contiguous) {
1239 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1240 for (unsigned int i = 0; i < num_regs; i++)
1241 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1242 return ERROR_OK;
1243 }
1244
1245 for (unsigned int i = 0; i < num_regs; i++)
1246 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1247 unsigned int k = 0;
1248 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1249 if (xtensa->core_cache->reg_list[i].exist) {
1250 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1251 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1252 int sparse_idx = rlist[ridx].dbreg_num;
1253 if (i == XT_REG_IDX_PS) {
1254 if (xtensa->eps_dbglevel_idx == 0) {
1255 LOG_ERROR("eps_dbglevel_idx not set\n");
1256 return ERROR_FAIL;
1257 }
1258 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1259 if (xtensa_extra_debug_log)
1260 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1261 sparse_idx, xtensa->core_config->debug.irq_level,
1262 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1263 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1264 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1265 } else {
1266 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1267 }
1268 if (i == XT_REG_IDX_PC)
1269 /* Make a duplicate copy of PC for external access */
1270 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1271 k++;
1272 }
1273 }
1274
1275 if (k == num_regs)
1276 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1277
1278 return ERROR_OK;
1279 }
1280
1281 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1282 {
1283 struct xtensa *xtensa = target_to_xtensa(target);
1284 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1285 xtensa->core_config->mmu.dtlb_entries_count > 0;
1286 return ERROR_OK;
1287 }
1288
1289 int xtensa_halt(struct target *target)
1290 {
1291 struct xtensa *xtensa = target_to_xtensa(target);
1292
1293 LOG_TARGET_DEBUG(target, "start");
1294 if (target->state == TARGET_HALTED) {
1295 LOG_TARGET_DEBUG(target, "target was already halted");
1296 return ERROR_OK;
1297 }
1298 /* First we have to read dsr and check if the target stopped */
1299 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1300 if (res != ERROR_OK) {
1301 LOG_TARGET_ERROR(target, "Failed to read core status!");
1302 return res;
1303 }
1304 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1305 if (!xtensa_is_stopped(target)) {
1306 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1307 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1308 res = jtag_execute_queue();
1309 if (res != ERROR_OK)
1310 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1311 }
1312
1313 return res;
1314 }
1315
1316 int xtensa_prepare_resume(struct target *target,
1317 int current,
1318 target_addr_t address,
1319 int handle_breakpoints,
1320 int debug_execution)
1321 {
1322 struct xtensa *xtensa = target_to_xtensa(target);
1323 uint32_t bpena = 0;
1324
1325 LOG_TARGET_DEBUG(target,
1326 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1327 current,
1328 address,
1329 handle_breakpoints,
1330 debug_execution);
1331
1332 if (target->state != TARGET_HALTED) {
1333 LOG_TARGET_WARNING(target, "target not halted");
1334 return ERROR_TARGET_NOT_HALTED;
1335 }
1336
1337 if (address && !current) {
1338 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1339 } else {
1340 uint32_t cause = xtensa_cause_get(target);
1341 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1342 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1343 if (cause & DEBUGCAUSE_DB)
1344 /* We stopped due to a watchpoint. We can't just resume executing the
1345 * instruction again because */
1346 /* that would trigger the watchpoint again. To fix this, we single-step,
1347 * which ignores watchpoints. */
1348 xtensa_do_step(target, current, address, handle_breakpoints);
1349 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1350 /* We stopped due to a break instruction. We can't just resume executing the
1351 * instruction again because */
1352 /* that would trigger the break again. To fix this, we single-step, which
1353 * ignores break. */
1354 xtensa_do_step(target, current, address, handle_breakpoints);
1355 }
1356
1357 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1358 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1359 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1360 if (xtensa->hw_brps[slot]) {
1361 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1362 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1363 bpena |= BIT(slot);
1364 }
1365 }
1366 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1367
1368 /* Here we write all registers to the targets */
1369 int res = xtensa_write_dirty_registers(target);
1370 if (res != ERROR_OK)
1371 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1372 return res;
1373 }
1374
1375 int xtensa_do_resume(struct target *target)
1376 {
1377 struct xtensa *xtensa = target_to_xtensa(target);
1378
1379 LOG_TARGET_DEBUG(target, "start");
1380
1381 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1382 int res = jtag_execute_queue();
1383 if (res != ERROR_OK) {
1384 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1385 return res;
1386 }
1387 xtensa_core_status_check(target);
1388 return ERROR_OK;
1389 }
1390
1391 int xtensa_resume(struct target *target,
1392 int current,
1393 target_addr_t address,
1394 int handle_breakpoints,
1395 int debug_execution)
1396 {
1397 LOG_TARGET_DEBUG(target, "start");
1398 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1399 if (res != ERROR_OK) {
1400 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1401 return res;
1402 }
1403 res = xtensa_do_resume(target);
1404 if (res != ERROR_OK) {
1405 LOG_TARGET_ERROR(target, "Failed to resume!");
1406 return res;
1407 }
1408
1409 target->debug_reason = DBG_REASON_NOTHALTED;
1410 if (!debug_execution)
1411 target->state = TARGET_RUNNING;
1412 else
1413 target->state = TARGET_DEBUG_RUNNING;
1414
1415 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1416
1417 return ERROR_OK;
1418 }
1419
1420 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1421 {
1422 struct xtensa *xtensa = target_to_xtensa(target);
1423 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1424 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1425 if (err != ERROR_OK)
1426 return false;
1427
1428 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1429 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1430 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1431 return true;
1432
1433 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1434 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1435 return true;
1436
1437 return false;
1438 }
1439
1440 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1441 {
1442 struct xtensa *xtensa = target_to_xtensa(target);
1443 int res;
1444 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1445 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1446 xtensa_reg_val_t icountlvl, cause;
1447 xtensa_reg_val_t oldps, oldpc, cur_pc;
1448 bool ps_lowered = false;
1449
1450 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1451 current, address, handle_breakpoints);
1452
1453 if (target->state != TARGET_HALTED) {
1454 LOG_TARGET_WARNING(target, "target not halted");
1455 return ERROR_TARGET_NOT_HALTED;
1456 }
1457
1458 if (xtensa->eps_dbglevel_idx == 0) {
1459 LOG_ERROR("eps_dbglevel_idx not set\n");
1460 return ERROR_FAIL;
1461 }
1462
1463 /* Save old ps (EPS[dbglvl] on LX), pc */
1464 oldps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
1465 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1466
1467 cause = xtensa_cause_get(target);
1468 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1469 oldps,
1470 oldpc,
1471 cause,
1472 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1473 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1474 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1475 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1476 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1477 /* pretend that we have stepped */
1478 if (cause & DEBUGCAUSE_BI)
1479 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1480 else
1481 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1482 return ERROR_OK;
1483 }
1484
1485 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1486 * at which the instructions are to be counted while stepping.
1487 *
1488 * For example, if we need to step by 2 instructions, and an interrupt occurs
1489 * in between, the processor will trigger the interrupt and halt after the 2nd
1490 * instruction within the interrupt vector and/or handler.
1491 *
1492 * However, sometimes we don't want the interrupt handlers to be executed at all
1493 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1494 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1495 * code from being counted during stepping. Note that C exception handlers must
1496 * run at level 0 and hence will be counted and stepped into, should one occur.
1497 *
1498 * TODO: Certain instructions should never be single-stepped and should instead
1499 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1500 * RFI >= DBGLEVEL.
1501 */
1502 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1503 if (!xtensa->core_config->high_irq.enabled) {
1504 LOG_TARGET_WARNING(
1505 target,
1506 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1507 return ERROR_FAIL;
1508 }
1509 /* Update ICOUNTLEVEL accordingly */
1510 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1511 } else {
1512 icountlvl = xtensa->core_config->debug.irq_level;
1513 }
1514
1515 if (cause & DEBUGCAUSE_DB) {
1516 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1517 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1518 * re-enable the watchpoint. */
1519 LOG_TARGET_DEBUG(
1520 target,
1521 "Single-stepping to get past instruction that triggered the watchpoint...");
1522 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1523 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1524 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1525 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1526 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1527 }
1528 }
1529
1530 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1531 /* handle normal SW breakpoint */
1532 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1533 if ((oldps & 0xf) >= icountlvl) {
1534 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1535 ps_lowered = true;
1536 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1537 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1538 LOG_TARGET_DEBUG(target,
1539 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1540 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1541 newps,
1542 oldps);
1543 }
1544 do {
1545 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1546 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1547
1548 /* Now ICOUNT is set, we can resume as if we were going to run */
1549 res = xtensa_prepare_resume(target, current, address, 0, 0);
1550 if (res != ERROR_OK) {
1551 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1552 return res;
1553 }
1554 res = xtensa_do_resume(target);
1555 if (res != ERROR_OK) {
1556 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1557 return res;
1558 }
1559
1560 /* Wait for stepping to complete */
1561 long long start = timeval_ms();
1562 while (timeval_ms() < start + 500) {
1563 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1564 *until stepping is complete. */
1565 usleep(1000);
1566 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1567 if (res != ERROR_OK) {
1568 LOG_TARGET_ERROR(target, "Failed to read core status!");
1569 return res;
1570 }
1571 if (xtensa_is_stopped(target))
1572 break;
1573 usleep(1000);
1574 }
1575 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1576 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1577 if (!xtensa_is_stopped(target)) {
1578 LOG_TARGET_WARNING(
1579 target,
1580 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1581 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1582 target->debug_reason = DBG_REASON_NOTHALTED;
1583 target->state = TARGET_RUNNING;
1584 return ERROR_FAIL;
1585 }
1586 target->debug_reason = DBG_REASON_SINGLESTEP;
1587 target->state = TARGET_HALTED;
1588
1589 xtensa_fetch_all_regs(target);
1590
1591 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1592
1593 LOG_TARGET_DEBUG(target,
1594 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1595 xtensa_reg_get(target, XT_REG_IDX_PS),
1596 cur_pc,
1597 xtensa_cause_get(target),
1598 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1599
1600 /* Do not step into WindowOverflow if ISRs are masked.
1601 If we stop in WindowOverflow at breakpoint with masked ISRs and
1602 try to do a step it will get us out of that handler */
1603 if (xtensa->core_config->windowed &&
1604 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1605 xtensa_pc_in_winexc(target, cur_pc)) {
1606 /* isrmask = on, need to step out of the window exception handler */
1607 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1608 oldpc = cur_pc;
1609 address = oldpc + 3;
1610 continue;
1611 }
1612
1613 if (oldpc == cur_pc)
1614 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1615 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1616 else
1617 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1618 break;
1619 } while (true);
1620 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1621
1622 if (cause & DEBUGCAUSE_DB) {
1623 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1624 /* Restore the DBREAKCx registers */
1625 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1626 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1627 }
1628
1629 /* Restore int level */
1630 if (ps_lowered) {
1631 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1632 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1633 oldps);
1634 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1635 }
1636
1637 /* write ICOUNTLEVEL back to zero */
1638 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1639 /* TODO: can we skip writing dirty registers and re-fetching them? */
1640 res = xtensa_write_dirty_registers(target);
1641 xtensa_fetch_all_regs(target);
1642 return res;
1643 }
1644
1645 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1646 {
1647 return xtensa_do_step(target, current, address, handle_breakpoints);
1648 }
1649
1650 /**
1651 * Returns true if two ranges are overlapping
1652 */
1653 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1654 target_addr_t r1_end,
1655 target_addr_t r2_start,
1656 target_addr_t r2_end)
1657 {
1658 if ((r2_start >= r1_start) && (r2_start < r1_end))
1659 return true; /* r2_start is in r1 region */
1660 if ((r2_end > r1_start) && (r2_end <= r1_end))
1661 return true; /* r2_end is in r1 region */
1662 return false;
1663 }
1664
1665 /**
1666 * Returns a size of overlapped region of two ranges.
1667 */
1668 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1669 target_addr_t r1_end,
1670 target_addr_t r2_start,
1671 target_addr_t r2_end)
1672 {
1673 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1674 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1675 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1676 return ov_end - ov_start;
1677 }
1678 return 0;
1679 }
1680
1681 /**
1682 * Check if the address gets to memory regions, and its access mode
1683 */
1684 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1685 {
1686 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1687 target_addr_t adr_end = address + size; /* region end */
1688 target_addr_t overlap_size;
1689 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1690
1691 while (adr_pos < adr_end) {
1692 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1693 if (!cm) /* address is not belong to anything */
1694 return false;
1695 if ((cm->access & access) != access) /* access check */
1696 return false;
1697 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1698 assert(overlap_size != 0);
1699 adr_pos += overlap_size;
1700 }
1701 return true;
1702 }
1703
1704 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1705 {
1706 struct xtensa *xtensa = target_to_xtensa(target);
1707 /* We are going to read memory in 32-bit increments. This may not be what the calling
1708 * function expects, so we may need to allocate a temp buffer and read into that first. */
1709 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1710 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1711 target_addr_t adr = addrstart_al;
1712 uint8_t *albuff;
1713 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1714
1715 if (target->state != TARGET_HALTED) {
1716 LOG_TARGET_WARNING(target, "target not halted");
1717 return ERROR_TARGET_NOT_HALTED;
1718 }
1719
1720 if (!xtensa->permissive_mode) {
1721 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1722 XT_MEM_ACCESS_READ)) {
1723 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1724 return ERROR_FAIL;
1725 }
1726 }
1727
1728 if (addrstart_al == address && addrend_al == address + (size * count)) {
1729 albuff = buffer;
1730 } else {
1731 albuff = malloc(addrend_al - addrstart_al);
1732 if (!albuff) {
1733 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1734 addrend_al - addrstart_al);
1735 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1736 }
1737 }
1738
1739 /* We're going to use A3 here */
1740 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1741 /* Write start address to A3 */
1742 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1743 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1744 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1745 if (xtensa->probe_lsddr32p != 0) {
1746 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1747 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1748 xtensa_queue_dbg_reg_read(xtensa,
1749 (adr + sizeof(uint32_t) == addrend_al) ? NARADR_DDR : NARADR_DDREXEC,
1750 &albuff[i]);
1751 } else {
1752 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1753 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1754 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1755 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
1756 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, &albuff[i]);
1757 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, adr + sizeof(uint32_t));
1758 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1759 }
1760 }
1761 int res = jtag_execute_queue();
1762 if (res == ERROR_OK) {
1763 bool prev_suppress = xtensa->suppress_dsr_errors;
1764 xtensa->suppress_dsr_errors = true;
1765 res = xtensa_core_status_check(target);
1766 if (xtensa->probe_lsddr32p == -1)
1767 xtensa->probe_lsddr32p = 1;
1768 xtensa->suppress_dsr_errors = prev_suppress;
1769 }
1770 if (res != ERROR_OK) {
1771 if (xtensa->probe_lsddr32p != 0) {
1772 /* Disable fast memory access instructions and retry before reporting an error */
1773 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1774 xtensa->probe_lsddr32p = 0;
1775 res = xtensa_read_memory(target, address, size, count, buffer);
1776 bswap = false;
1777 } else {
1778 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
1779 count * size, address);
1780 }
1781 }
1782
1783 if (bswap)
1784 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
1785 if (albuff != buffer) {
1786 memcpy(buffer, albuff + (address & 3), (size * count));
1787 free(albuff);
1788 }
1789
1790 return res;
1791 }
1792
1793 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1794 {
1795 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1796 return xtensa_read_memory(target, address, 1, count, buffer);
1797 }
1798
1799 int xtensa_write_memory(struct target *target,
1800 target_addr_t address,
1801 uint32_t size,
1802 uint32_t count,
1803 const uint8_t *buffer)
1804 {
1805 /* This memory write function can get thrown nigh everything into it, from
1806 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1807 * accept anything but aligned uint32 writes, though. That is why we convert
1808 * everything into that. */
1809 struct xtensa *xtensa = target_to_xtensa(target);
1810 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1811 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1812 target_addr_t adr = addrstart_al;
1813 int res;
1814 uint8_t *albuff;
1815 bool fill_head_tail = false;
1816
1817 if (target->state != TARGET_HALTED) {
1818 LOG_TARGET_WARNING(target, "target not halted");
1819 return ERROR_TARGET_NOT_HALTED;
1820 }
1821
1822 if (!xtensa->permissive_mode) {
1823 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
1824 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1825 return ERROR_FAIL;
1826 }
1827 }
1828
1829 if (size == 0 || count == 0 || !buffer)
1830 return ERROR_COMMAND_SYNTAX_ERROR;
1831
1832 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1833 if (addrstart_al == address && addrend_al == address + (size * count)) {
1834 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1835 /* Need a buffer for byte-swapping */
1836 albuff = malloc(addrend_al - addrstart_al);
1837 else
1838 /* We discard the const here because albuff can also be non-const */
1839 albuff = (uint8_t *)buffer;
1840 } else {
1841 fill_head_tail = true;
1842 albuff = malloc(addrend_al - addrstart_al);
1843 }
1844 if (!albuff) {
1845 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1846 addrend_al - addrstart_al);
1847 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1848 }
1849
1850 /* We're going to use A3 here */
1851 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1852
1853 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1854 if (fill_head_tail) {
1855 /* See if we need to read the first and/or last word. */
1856 if (address & 3) {
1857 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1858 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1859 if (xtensa->probe_lsddr32p == 1) {
1860 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1861 } else {
1862 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1863 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1864 }
1865 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, &albuff[0]);
1866 }
1867 if ((address + (size * count)) & 3) {
1868 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrend_al - 4);
1869 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1870 if (xtensa->probe_lsddr32p == 1) {
1871 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1872 } else {
1873 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1874 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1875 }
1876 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR,
1877 &albuff[addrend_al - addrstart_al - 4]);
1878 }
1879 /* Grab bytes */
1880 res = jtag_execute_queue();
1881 if (res != ERROR_OK) {
1882 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1883 if (albuff != buffer)
1884 free(albuff);
1885 return res;
1886 }
1887 xtensa_core_status_check(target);
1888 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
1889 bool swapped_w0 = false;
1890 if (address & 3) {
1891 buf_bswap32(&albuff[0], &albuff[0], 4);
1892 swapped_w0 = true;
1893 }
1894 if ((address + (size * count)) & 3) {
1895 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
1896 /* Don't double-swap if buffer start/end are within the same word */
1897 } else {
1898 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
1899 &albuff[addrend_al - addrstart_al - 4], 4);
1900 }
1901 }
1902 }
1903 /* Copy data to be written into the aligned buffer (in host-endianness) */
1904 memcpy(&albuff[address & 3], buffer, size * count);
1905 /* Now we can write albuff in aligned uint32s. */
1906 }
1907
1908 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1909 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
1910
1911 /* Write start address to A3 */
1912 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1913 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1914 /* Write the aligned buffer */
1915 if (xtensa->probe_lsddr32p != 0) {
1916 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1917 if (i == 0) {
1918 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, buf_get_u32(&albuff[i], 0, 32));
1919 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
1920 } else {
1921 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
1922 }
1923 }
1924 } else {
1925 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1926 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1927 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, buf_get_u32(&albuff[i], 0, 32));
1928 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
1929 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1930 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, adr + sizeof(uint32_t));
1931 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1932 }
1933 }
1934
1935 res = jtag_execute_queue();
1936 if (res == ERROR_OK) {
1937 bool prev_suppress = xtensa->suppress_dsr_errors;
1938 xtensa->suppress_dsr_errors = true;
1939 res = xtensa_core_status_check(target);
1940 if (xtensa->probe_lsddr32p == -1)
1941 xtensa->probe_lsddr32p = 1;
1942 xtensa->suppress_dsr_errors = prev_suppress;
1943 }
1944 if (res != ERROR_OK) {
1945 if (xtensa->probe_lsddr32p != 0) {
1946 /* Disable fast memory access instructions and retry before reporting an error */
1947 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1948 xtensa->probe_lsddr32p = 0;
1949 res = xtensa_write_memory(target, address, size, count, buffer);
1950 } else {
1951 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
1952 count * size, address);
1953 }
1954 } else {
1955 /* Invalidate ICACHE, writeback DCACHE if present */
1956 uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
1957 uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
1958 if (issue_ihi || issue_dhwb) {
1959 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
1960 uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
1961 uint32_t linesize = MIN(ilinesize, dlinesize);
1962 uint32_t off = 0;
1963 adr = addrstart_al;
1964
1965 while ((adr + off) < addrend_al) {
1966 if (off == 0) {
1967 /* Write start address to A3 */
1968 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, adr);
1969 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1970 }
1971 if (issue_ihi)
1972 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
1973 if (issue_dhwb)
1974 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
1975 off += linesize;
1976 if (off > 1020) {
1977 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
1978 adr += off;
1979 off = 0;
1980 }
1981 }
1982
1983 /* Execute cache WB/INV instructions */
1984 res = jtag_execute_queue();
1985 xtensa_core_status_check(target);
1986 if (res != ERROR_OK)
1987 LOG_TARGET_ERROR(target,
1988 "Error issuing cache writeback/invaldate instruction(s): %d",
1989 res);
1990 }
1991 }
1992 if (albuff != buffer)
1993 free(albuff);
1994
1995 return res;
1996 }
1997
1998 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
1999 {
2000 /* xtensa_write_memory can handle everything. Just pass on to that. */
2001 return xtensa_write_memory(target, address, 1, count, buffer);
2002 }
2003
2004 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2005 {
2006 LOG_WARNING("not implemented yet");
2007 return ERROR_FAIL;
2008 }
2009
2010 int xtensa_poll(struct target *target)
2011 {
2012 struct xtensa *xtensa = target_to_xtensa(target);
2013
2014 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET | PWRSTAT_COREWASRESET);
2015 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2016 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2017 xtensa->dbg_mod.power_status.stat,
2018 PWRSTAT_DEBUGWASRESET | PWRSTAT_COREWASRESET,
2019 xtensa->dbg_mod.power_status.stath);
2020 if (res != ERROR_OK)
2021 return res;
2022
2023 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2024 LOG_TARGET_INFO(target, "Debug controller was reset.");
2025 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2026 if (res != ERROR_OK)
2027 return res;
2028 }
2029 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2030 LOG_TARGET_INFO(target, "Core was reset.");
2031 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2032 /* Enable JTAG, set reset if needed */
2033 res = xtensa_wakeup(target);
2034 if (res != ERROR_OK)
2035 return res;
2036
2037 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2038 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2039 if (res != ERROR_OK)
2040 return res;
2041 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2042 LOG_TARGET_DEBUG(target,
2043 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2044 prev_dsr,
2045 xtensa->dbg_mod.core_status.dsr);
2046 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET) {
2047 /* if RESET state is persitent */
2048 target->state = TARGET_RESET;
2049 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2050 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2051 xtensa->dbg_mod.core_status.dsr,
2052 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2053 target->state = TARGET_UNKNOWN;
2054 if (xtensa->come_online_probes_num == 0)
2055 target->examined = false;
2056 else
2057 xtensa->come_online_probes_num--;
2058 } else if (xtensa_is_stopped(target)) {
2059 if (target->state != TARGET_HALTED) {
2060 enum target_state oldstate = target->state;
2061 target->state = TARGET_HALTED;
2062 /* Examine why the target has been halted */
2063 target->debug_reason = DBG_REASON_DBGRQ;
2064 xtensa_fetch_all_regs(target);
2065 /* When setting debug reason DEBUGCAUSE events have the following
2066 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2067 /* Watchpoint and breakpoint events at the same time results in special
2068 * debug reason: DBG_REASON_WPTANDBKPT. */
2069 uint32_t halt_cause = xtensa_cause_get(target);
2070 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2071 if (halt_cause & DEBUGCAUSE_IC)
2072 target->debug_reason = DBG_REASON_SINGLESTEP;
2073 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2074 if (halt_cause & DEBUGCAUSE_DB)
2075 target->debug_reason = DBG_REASON_WPTANDBKPT;
2076 else
2077 target->debug_reason = DBG_REASON_BREAKPOINT;
2078 } else if (halt_cause & DEBUGCAUSE_DB) {
2079 target->debug_reason = DBG_REASON_WATCHPOINT;
2080 }
2081 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2082 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2083 xtensa_reg_get(target, XT_REG_IDX_PC),
2084 target->debug_reason,
2085 oldstate);
2086 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2087 halt_cause,
2088 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2089 xtensa->dbg_mod.core_status.dsr);
2090 xtensa_dm_core_status_clear(
2091 &xtensa->dbg_mod,
2092 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2093 OCDDSR_DEBUGINTTRAX |
2094 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2095 }
2096 } else {
2097 target->debug_reason = DBG_REASON_NOTHALTED;
2098 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2099 target->state = TARGET_RUNNING;
2100 target->debug_reason = DBG_REASON_NOTHALTED;
2101 }
2102 }
2103 if (xtensa->trace_active) {
2104 /* Detect if tracing was active but has stopped. */
2105 struct xtensa_trace_status trace_status;
2106 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2107 if (res == ERROR_OK) {
2108 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2109 LOG_INFO("Detected end of trace.");
2110 if (trace_status.stat & TRAXSTAT_PCMTG)
2111 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2112 if (trace_status.stat & TRAXSTAT_PTITG)
2113 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2114 if (trace_status.stat & TRAXSTAT_CTITG)
2115 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2116 xtensa->trace_active = false;
2117 }
2118 }
2119 }
2120 return ERROR_OK;
2121 }
2122
2123 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2124 {
2125 struct xtensa *xtensa = target_to_xtensa(target);
2126 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2127 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2128 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2129 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2130 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2131 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2132 int ret;
2133
2134 if (size > icache_line_size)
2135 return ERROR_FAIL;
2136
2137 if (issue_ihi || issue_dhwbi) {
2138 /* We're going to use A3 here */
2139 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2140
2141 /* Write start address to A3 and invalidate */
2142 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, address);
2143 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2144 LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2145 if (issue_dhwbi) {
2146 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2147 if (!same_dc_line) {
2148 LOG_TARGET_DEBUG(target,
2149 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2150 address + 4);
2151 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2152 }
2153 }
2154 if (issue_ihi) {
2155 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2156 if (!same_ic_line) {
2157 LOG_TARGET_DEBUG(target,
2158 "IHI second icache line for address "TARGET_ADDR_FMT,
2159 address + 4);
2160 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2161 }
2162 }
2163
2164 /* Execute invalidate instructions */
2165 ret = jtag_execute_queue();
2166 xtensa_core_status_check(target);
2167 if (ret != ERROR_OK) {
2168 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2169 return ret;
2170 }
2171 }
2172
2173 /* Write new instructions to memory */
2174 ret = target_write_buffer(target, address, size, buffer);
2175 if (ret != ERROR_OK) {
2176 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2177 return ret;
2178 }
2179
2180 if (issue_dhwbi) {
2181 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2182 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, address);
2183 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2184 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2185 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2186 if (!same_dc_line) {
2187 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2188 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2189 }
2190
2191 /* Execute invalidate instructions */
2192 ret = jtag_execute_queue();
2193 xtensa_core_status_check(target);
2194 }
2195
2196 /* TODO: Handle L2 cache if present */
2197 return ret;
2198 }
2199
2200 static int xtensa_sw_breakpoint_add(struct target *target,
2201 struct breakpoint *breakpoint,
2202 struct xtensa_sw_breakpoint *sw_bp)
2203 {
2204 struct xtensa *xtensa = target_to_xtensa(target);
2205 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2206 if (ret != ERROR_OK) {
2207 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2208 return ret;
2209 }
2210
2211 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2212 sw_bp->oocd_bp = breakpoint;
2213
2214 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2215
2216 /* Underlying memory write will convert instruction endianness, don't do that here */
2217 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2218 if (ret != ERROR_OK) {
2219 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2220 return ret;
2221 }
2222
2223 return ERROR_OK;
2224 }
2225
2226 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2227 {
2228 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2229 if (ret != ERROR_OK) {
2230 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2231 return ret;
2232 }
2233 sw_bp->oocd_bp = NULL;
2234 return ERROR_OK;
2235 }
2236
2237 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2238 {
2239 struct xtensa *xtensa = target_to_xtensa(target);
2240 unsigned int slot;
2241
2242 if (breakpoint->type == BKPT_SOFT) {
2243 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2244 if (!xtensa->sw_brps[slot].oocd_bp ||
2245 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2246 break;
2247 }
2248 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2249 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2250 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2251 }
2252 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2253 if (ret != ERROR_OK) {
2254 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2255 return ret;
2256 }
2257 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2258 slot,
2259 breakpoint->address);
2260 return ERROR_OK;
2261 }
2262
2263 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2264 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2265 break;
2266 }
2267 if (slot == xtensa->core_config->debug.ibreaks_num) {
2268 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2269 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2270 }
2271
2272 xtensa->hw_brps[slot] = breakpoint;
2273 /* We will actually write the breakpoints when we resume the target. */
2274 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2275 slot,
2276 breakpoint->address);
2277
2278 return ERROR_OK;
2279 }
2280
2281 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2282 {
2283 struct xtensa *xtensa = target_to_xtensa(target);
2284 unsigned int slot;
2285
2286 if (breakpoint->type == BKPT_SOFT) {
2287 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2288 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2289 break;
2290 }
2291 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2292 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2293 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2294 }
2295 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2296 if (ret != ERROR_OK) {
2297 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2298 return ret;
2299 }
2300 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2301 return ERROR_OK;
2302 }
2303
2304 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2305 if (xtensa->hw_brps[slot] == breakpoint)
2306 break;
2307 }
2308 if (slot == xtensa->core_config->debug.ibreaks_num) {
2309 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2310 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2311 }
2312 xtensa->hw_brps[slot] = NULL;
2313 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2314 return ERROR_OK;
2315 }
2316
2317 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2318 {
2319 struct xtensa *xtensa = target_to_xtensa(target);
2320 unsigned int slot;
2321 xtensa_reg_val_t dbreakcval;
2322
2323 if (target->state != TARGET_HALTED) {
2324 LOG_TARGET_WARNING(target, "target not halted");
2325 return ERROR_TARGET_NOT_HALTED;
2326 }
2327
2328 if (watchpoint->mask != ~(uint32_t)0) {
2329 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2330 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2331 }
2332
2333 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2334 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2335 break;
2336 }
2337 if (slot == xtensa->core_config->debug.dbreaks_num) {
2338 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2339 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2340 }
2341
2342 /* Figure out value for dbreakc5..0
2343 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2344 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2345 !IS_PWR_OF_2(watchpoint->length) ||
2346 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2347 LOG_TARGET_WARNING(
2348 target,
2349 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2350 " not supported by hardware.",
2351 watchpoint->length,
2352 watchpoint->address);
2353 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2354 }
2355 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2356
2357 if (watchpoint->rw == WPT_READ)
2358 dbreakcval |= BIT(30);
2359 if (watchpoint->rw == WPT_WRITE)
2360 dbreakcval |= BIT(31);
2361 if (watchpoint->rw == WPT_ACCESS)
2362 dbreakcval |= BIT(30) | BIT(31);
2363
2364 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2365 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2366 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2367 xtensa->hw_wps[slot] = watchpoint;
2368 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2369 watchpoint->address);
2370 return ERROR_OK;
2371 }
2372
2373 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2374 {
2375 struct xtensa *xtensa = target_to_xtensa(target);
2376 unsigned int slot;
2377
2378 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2379 if (xtensa->hw_wps[slot] == watchpoint)
2380 break;
2381 }
2382 if (slot == xtensa->core_config->debug.dbreaks_num) {
2383 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2384 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2385 }
2386 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2387 xtensa->hw_wps[slot] = NULL;
2388 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2389 watchpoint->address);
2390 return ERROR_OK;
2391 }
2392
2393 static int xtensa_build_reg_cache(struct target *target)
2394 {
2395 struct xtensa *xtensa = target_to_xtensa(target);
2396 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2397 unsigned int last_dbreg_num = 0;
2398
2399 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2400 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2401 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2402
2403 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2404
2405 if (!reg_cache) {
2406 LOG_ERROR("Failed to alloc reg cache!");
2407 return ERROR_FAIL;
2408 }
2409 reg_cache->name = "Xtensa registers";
2410 reg_cache->next = NULL;
2411 /* Init reglist */
2412 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2413 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2414 if (!reg_list) {
2415 LOG_ERROR("Failed to alloc reg list!");
2416 goto fail;
2417 }
2418 xtensa->dbregs_num = 0;
2419 unsigned int didx = 0;
2420 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2421 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2422 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2423 for (unsigned int i = 0; i < listsize; i++, didx++) {
2424 reg_list[didx].exist = rlist[i].exist;
2425 reg_list[didx].name = rlist[i].name;
2426 reg_list[didx].size = 32;
2427 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2428 if (!reg_list[didx].value) {
2429 LOG_ERROR("Failed to alloc reg list value!");
2430 goto fail;
2431 }
2432 reg_list[didx].dirty = false;
2433 reg_list[didx].valid = false;
2434 reg_list[didx].type = &xtensa_reg_type;
2435 reg_list[didx].arch_info = xtensa;
2436 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2437 last_dbreg_num = rlist[i].dbreg_num;
2438
2439 if (xtensa_extra_debug_log) {
2440 LOG_TARGET_DEBUG(target,
2441 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2442 reg_list[didx].name,
2443 whichlist,
2444 reg_list[didx].exist,
2445 didx,
2446 rlist[i].type,
2447 rlist[i].dbreg_num);
2448 }
2449 }
2450 }
2451
2452 xtensa->dbregs_num = last_dbreg_num + 1;
2453 reg_cache->reg_list = reg_list;
2454 reg_cache->num_regs = reg_list_size;
2455
2456 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2457 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2458
2459 /* Construct empty-register list for handling unknown register requests */
2460 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2461 if (!xtensa->empty_regs) {
2462 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2463 goto fail;
2464 }
2465 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2466 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2467 if (!xtensa->empty_regs[i].name) {
2468 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2469 goto fail;
2470 }
2471 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i);
2472 xtensa->empty_regs[i].size = 32;
2473 xtensa->empty_regs[i].type = &xtensa_reg_type;
2474 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2475 if (!xtensa->empty_regs[i].value) {
2476 LOG_ERROR("Failed to alloc empty reg list value!");
2477 goto fail;
2478 }
2479 xtensa->empty_regs[i].arch_info = xtensa;
2480 }
2481
2482 /* Construct contiguous register list from contiguous descriptor list */
2483 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
2484 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2485 if (!xtensa->contiguous_regs_list) {
2486 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2487 goto fail;
2488 }
2489 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2490 unsigned int j;
2491 for (j = 0; j < reg_cache->num_regs; j++) {
2492 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2493 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
2494 LOG_TARGET_DEBUG(target,
2495 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2496 xtensa->contiguous_regs_list[i]->name,
2497 xtensa->contiguous_regs_desc[i]->dbreg_num);
2498 break;
2499 }
2500 }
2501 if (j == reg_cache->num_regs)
2502 LOG_TARGET_WARNING(target, "contiguous register %s not found",
2503 xtensa->contiguous_regs_desc[i]->name);
2504 }
2505 }
2506
2507 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2508 if (!xtensa->algo_context_backup) {
2509 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2510 goto fail;
2511 }
2512 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2513 struct reg *reg = &reg_cache->reg_list[i];
2514 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2515 if (!xtensa->algo_context_backup[i]) {
2516 LOG_ERROR("Failed to alloc mem for algorithm context!");
2517 goto fail;
2518 }
2519 }
2520 xtensa->core_cache = reg_cache;
2521 if (cache_p)
2522 *cache_p = reg_cache;
2523 return ERROR_OK;
2524
2525 fail:
2526 if (reg_list) {
2527 for (unsigned int i = 0; i < reg_list_size; i++)
2528 free(reg_list[i].value);
2529 }
2530 if (xtensa->empty_regs) {
2531 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2532 free((void *)xtensa->empty_regs[i].name);
2533 free(xtensa->empty_regs[i].value);
2534 }
2535 free(xtensa->empty_regs);
2536 }
2537 if (xtensa->algo_context_backup) {
2538 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2539 free(xtensa->algo_context_backup[i]);
2540 free(xtensa->algo_context_backup);
2541 }
2542 free(reg_cache);
2543
2544 return ERROR_FAIL;
2545 }
2546
2547 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
2548 {
2549 struct xtensa *xtensa = target_to_xtensa(target);
2550 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
2551 /* Process op[] list */
2552 while (opstr && (*opstr == ':')) {
2553 uint8_t ops[32];
2554 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
2555 if (oplen > 32) {
2556 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
2557 break;
2558 }
2559 unsigned int i = 0;
2560 while ((i < oplen) && opstr && (*opstr == ':'))
2561 ops[i++] = strtoul(opstr + 1, &opstr, 16);
2562 if (i != oplen) {
2563 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
2564 break;
2565 }
2566
2567 char insn_buf[128];
2568 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
2569 for (i = 0; i < oplen; i++)
2570 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
2571 LOG_TARGET_DEBUG(target, "%s", insn_buf);
2572 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
2573 status = ERROR_OK;
2574 }
2575 return status;
2576 }
2577
2578 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
2579 {
2580 struct xtensa *xtensa = target_to_xtensa(target);
2581 bool iswrite = (packet[0] == 'Q');
2582 enum xtensa_qerr_e error;
2583
2584 /* Read/write TIE register. Requires spill location.
2585 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2586 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2587 */
2588 if (!(xtensa->spill_buf)) {
2589 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2590 error = XT_QERR_FAIL;
2591 goto xtensa_gdbqc_qxtreg_fail;
2592 }
2593
2594 char *delim;
2595 uint32_t regnum = strtoul(packet + 6, &delim, 16);
2596 if (*delim != ':') {
2597 LOG_ERROR("Malformed qxtreg packet");
2598 error = XT_QERR_INVAL;
2599 goto xtensa_gdbqc_qxtreg_fail;
2600 }
2601 uint32_t reglen = strtoul(delim + 1, &delim, 16);
2602 if (*delim != ':') {
2603 LOG_ERROR("Malformed qxtreg packet");
2604 error = XT_QERR_INVAL;
2605 goto xtensa_gdbqc_qxtreg_fail;
2606 }
2607 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
2608 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
2609 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
2610 LOG_ERROR("TIE register too large");
2611 error = XT_QERR_MEM;
2612 goto xtensa_gdbqc_qxtreg_fail;
2613 }
2614
2615 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2616 * (2) read old a4, (3) write spill address to a4.
2617 * NOTE: ensure a4 is restored properly by all error handling logic
2618 */
2619 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
2620 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
2621 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2622 if (status != ERROR_OK) {
2623 LOG_ERROR("Spill memory save");
2624 error = XT_QERR_MEM;
2625 goto xtensa_gdbqc_qxtreg_fail;
2626 }
2627 if (iswrite) {
2628 /* Extract value and store in spill memory */
2629 unsigned int b = 0;
2630 char *valbuf = strchr(delim, '=');
2631 if (!(valbuf && (*valbuf == '='))) {
2632 LOG_ERROR("Malformed Qxtreg packet");
2633 error = XT_QERR_INVAL;
2634 goto xtensa_gdbqc_qxtreg_fail;
2635 }
2636 valbuf++;
2637 while (*valbuf && *(valbuf + 1)) {
2638 char bytestr[3] = { 0, 0, 0 };
2639 strncpy(bytestr, valbuf, 2);
2640 regbuf[b++] = strtoul(bytestr, NULL, 16);
2641 valbuf += 2;
2642 }
2643 if (b != reglen) {
2644 LOG_ERROR("Malformed Qxtreg packet");
2645 error = XT_QERR_INVAL;
2646 goto xtensa_gdbqc_qxtreg_fail;
2647 }
2648 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2649 reglen / memop_size, regbuf);
2650 if (status != ERROR_OK) {
2651 LOG_ERROR("TIE value store");
2652 error = XT_QERR_MEM;
2653 goto xtensa_gdbqc_qxtreg_fail;
2654 }
2655 }
2656 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
2657 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, xtensa->spill_loc);
2658 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2659
2660 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
2661
2662 /* Restore a4 but not yet spill memory. Execute it all... */
2663 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, orig_a4);
2664 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2665 status = jtag_execute_queue();
2666 if (status != ERROR_OK) {
2667 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
2668 tieop_status = status;
2669 }
2670 status = xtensa_core_status_check(target);
2671 if (status != ERROR_OK) {
2672 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
2673 tieop_status = status;
2674 }
2675
2676 if (tieop_status == ERROR_OK) {
2677 if (iswrite) {
2678 /* TIE write succeeded; send OK */
2679 strcpy(*response_p, "OK");
2680 } else {
2681 /* TIE read succeeded; copy result from spill memory */
2682 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
2683 if (status != ERROR_OK) {
2684 LOG_TARGET_ERROR(target, "TIE result read");
2685 tieop_status = status;
2686 }
2687 unsigned int i;
2688 for (i = 0; i < reglen; i++)
2689 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
2690 *(*response_p + 2 * i) = '\0';
2691 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
2692 }
2693 }
2694
2695 /* Restore spill memory first, then report any previous errors */
2696 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2697 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2698 if (status != ERROR_OK) {
2699 LOG_ERROR("Spill memory restore");
2700 error = XT_QERR_MEM;
2701 goto xtensa_gdbqc_qxtreg_fail;
2702 }
2703 if (tieop_status != ERROR_OK) {
2704 LOG_ERROR("TIE execution");
2705 error = XT_QERR_FAIL;
2706 goto xtensa_gdbqc_qxtreg_fail;
2707 }
2708 return ERROR_OK;
2709
2710 xtensa_gdbqc_qxtreg_fail:
2711 strcpy(*response_p, xt_qerr[error].chrval);
2712 return xt_qerr[error].intval;
2713 }
2714
2715 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
2716 {
2717 struct xtensa *xtensa = target_to_xtensa(target);
2718 enum xtensa_qerr_e error;
2719 if (!packet || !response_p) {
2720 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
2721 return ERROR_FAIL;
2722 }
2723
2724 *response_p = xtensa->qpkt_resp;
2725 if (strncmp(packet, "qxtn", 4) == 0) {
2726 strcpy(*response_p, "OpenOCD");
2727 return ERROR_OK;
2728 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
2729 return ERROR_OK;
2730 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
2731 /* Confirm host cache params match core .cfg file */
2732 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
2733 &xtensa->core_config->icache : &xtensa->core_config->dcache;
2734 unsigned int line_size = 0, size = 0, way_count = 0;
2735 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
2736 if ((cachep->line_size != line_size) ||
2737 (cachep->size != size) ||
2738 (cachep->way_count != way_count)) {
2739 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2740 cachep == &xtensa->core_config->icache ? 'I' : 'D');
2741 }
2742 strcpy(*response_p, "OK");
2743 return ERROR_OK;
2744 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
2745 /* Confirm host IRAM/IROM params match core .cfg file */
2746 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
2747 &xtensa->core_config->iram : &xtensa->core_config->irom;
2748 unsigned int base = 0, size = 0, i;
2749 char *pkt = (char *)&packet[7];
2750 do {
2751 pkt++;
2752 size = strtoul(pkt, &pkt, 16);
2753 pkt++;
2754 base = strtoul(pkt, &pkt, 16);
2755 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
2756 for (i = 0; i < memp->count; i++) {
2757 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
2758 break;
2759 }
2760 if (i == memp->count) {
2761 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
2762 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
2763 break;
2764 }
2765 for (i = 0; i < 11; i++) {
2766 pkt++;
2767 strtoul(pkt, &pkt, 16);
2768 }
2769 } while (pkt && (pkt[0] == ','));
2770 strcpy(*response_p, "OK");
2771 return ERROR_OK;
2772 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
2773 /* Confirm host EXCM_LEVEL matches core .cfg file */
2774 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
2775 if (!xtensa->core_config->high_irq.enabled ||
2776 (excm_level != xtensa->core_config->high_irq.excm_level))
2777 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
2778 strcpy(*response_p, "OK");
2779 return ERROR_OK;
2780 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
2781 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
2782 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
2783 strcpy(*response_p, "OK");
2784 return ERROR_OK;
2785 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
2786 char *delim;
2787 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
2788 if (*delim != ':') {
2789 LOG_ERROR("Malformed Qxtspill packet");
2790 error = XT_QERR_INVAL;
2791 goto xtensa_gdb_query_custom_fail;
2792 }
2793 xtensa->spill_loc = spill_loc;
2794 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
2795 if (xtensa->spill_buf)
2796 free(xtensa->spill_buf);
2797 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
2798 if (!xtensa->spill_buf) {
2799 LOG_ERROR("Spill buf alloc");
2800 error = XT_QERR_MEM;
2801 goto xtensa_gdb_query_custom_fail;
2802 }
2803 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
2804 strcpy(*response_p, "OK");
2805 return ERROR_OK;
2806 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
2807 return xtensa_gdbqc_qxtreg(target, packet, response_p);
2808 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
2809 (strncmp(packet, "qxtftie", 7) == 0) ||
2810 (strncmp(packet, "qxtstie", 7) == 0)) {
2811 /* Return empty string to indicate trace, TIE wire debug are unsupported */
2812 strcpy(*response_p, "");
2813 return ERROR_OK;
2814 }
2815
2816 /* Warn for all other queries, but do not return errors */
2817 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
2818 strcpy(*response_p, "");
2819 return ERROR_OK;
2820
2821 xtensa_gdb_query_custom_fail:
2822 strcpy(*response_p, xt_qerr[error].chrval);
2823 return xt_qerr[error].intval;
2824 }
2825
2826 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
2827 const struct xtensa_debug_module_config *dm_cfg)
2828 {
2829 target->arch_info = xtensa;
2830 xtensa->common_magic = XTENSA_COMMON_MAGIC;
2831 xtensa->target = target;
2832 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
2833
2834 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
2835 if (!xtensa->core_config) {
2836 LOG_ERROR("Xtensa configuration alloc failed\n");
2837 return ERROR_FAIL;
2838 }
2839
2840 /* Default cache settings are disabled with 1 way */
2841 xtensa->core_config->icache.way_count = 1;
2842 xtensa->core_config->dcache.way_count = 1;
2843
2844 /* chrval: AR3/AR4 register names will change with window mapping.
2845 * intval: tracks whether scratch register was set through gdb P packet.
2846 */
2847 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
2848 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
2849 if (!xtensa->scratch_ars[s].chrval) {
2850 for (enum xtensa_ar_scratch_set_e f = s - 1; s >= 0; s--)
2851 free(xtensa->scratch_ars[f].chrval);
2852 free(xtensa->core_config);
2853 LOG_ERROR("Xtensa scratch AR alloc failed\n");
2854 return ERROR_FAIL;
2855 }
2856 xtensa->scratch_ars[s].intval = false;
2857 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
2858 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
2859 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
2860 }
2861
2862 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2863 }
2864
2865 void xtensa_set_permissive_mode(struct target *target, bool state)
2866 {
2867 target_to_xtensa(target)->permissive_mode = state;
2868 }
2869
2870 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2871 {
2872 struct xtensa *xtensa = target_to_xtensa(target);
2873
2874 xtensa->come_online_probes_num = 3;
2875 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
2876 if (!xtensa->hw_brps) {
2877 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2878 return ERROR_FAIL;
2879 }
2880 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
2881 if (!xtensa->hw_wps) {
2882 free(xtensa->hw_brps);
2883 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2884 return ERROR_FAIL;
2885 }
2886 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2887 if (!xtensa->sw_brps) {
2888 free(xtensa->hw_brps);
2889 free(xtensa->hw_wps);
2890 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2891 return ERROR_FAIL;
2892 }
2893
2894 xtensa->spill_loc = 0xffffffff;
2895 xtensa->spill_bytes = 0;
2896 xtensa->spill_buf = NULL;
2897 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
2898
2899 return xtensa_build_reg_cache(target);
2900 }
2901
2902 static void xtensa_free_reg_cache(struct target *target)
2903 {
2904 struct xtensa *xtensa = target_to_xtensa(target);
2905 struct reg_cache *cache = xtensa->core_cache;
2906
2907 if (cache) {
2908 register_unlink_cache(&target->reg_cache, cache);
2909 for (unsigned int i = 0; i < cache->num_regs; i++) {
2910 free(xtensa->algo_context_backup[i]);
2911 free(cache->reg_list[i].value);
2912 }
2913 free(xtensa->algo_context_backup);
2914 free(cache->reg_list);
2915 free(cache);
2916 }
2917 xtensa->core_cache = NULL;
2918 xtensa->algo_context_backup = NULL;
2919
2920 if (xtensa->empty_regs) {
2921 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2922 free((void *)xtensa->empty_regs[i].name);
2923 free(xtensa->empty_regs[i].value);
2924 }
2925 free(xtensa->empty_regs);
2926 }
2927 xtensa->empty_regs = NULL;
2928 if (xtensa->optregs) {
2929 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
2930 free((void *)xtensa->optregs[i].name);
2931 free(xtensa->optregs);
2932 }
2933 xtensa->optregs = NULL;
2934 }
2935
2936 void xtensa_target_deinit(struct target *target)
2937 {
2938 struct xtensa *xtensa = target_to_xtensa(target);
2939
2940 LOG_DEBUG("start");
2941
2942 if (target_was_examined(target)) {
2943 int ret = xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRCLR, OCDDCR_ENABLEOCD);
2944 if (ret != ERROR_OK) {
2945 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2946 return;
2947 }
2948 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
2949 ret = jtag_execute_queue();
2950 if (ret != ERROR_OK) {
2951 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2952 return;
2953 }
2954 }
2955 xtensa_free_reg_cache(target);
2956 free(xtensa->hw_brps);
2957 free(xtensa->hw_wps);
2958 free(xtensa->sw_brps);
2959 if (xtensa->spill_buf) {
2960 free(xtensa->spill_buf);
2961 xtensa->spill_buf = NULL;
2962 }
2963 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
2964 free(xtensa->scratch_ars[s].chrval);
2965 free(xtensa->core_config);
2966 }
2967
2968 const char *xtensa_get_gdb_arch(struct target *target)
2969 {
2970 return "xtensa";
2971 }
2972
2973 /* exe <ascii-encoded hexadecimal instruction bytes> */
2974 COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
2975 {
2976 struct xtensa *xtensa = target_to_xtensa(target);
2977
2978 if (CMD_ARGC != 1)
2979 return ERROR_COMMAND_SYNTAX_ERROR;
2980
2981 /* Process ascii-encoded hex byte string */
2982 const char *parm = CMD_ARGV[0];
2983 unsigned int parm_len = strlen(parm);
2984 if ((parm_len >= 64) || (parm_len & 1)) {
2985 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
2986 return ERROR_FAIL;
2987 }
2988
2989 uint8_t ops[32];
2990 unsigned int oplen = parm_len / 2;
2991 char encoded_byte[3] = { 0, 0, 0 };
2992 for (unsigned int i = 0; i < oplen; i++) {
2993 encoded_byte[0] = *parm++;
2994 encoded_byte[1] = *parm++;
2995 ops[i] = strtoul(encoded_byte, NULL, 16);
2996 }
2997
2998 /* GDB must handle state save/restore.
2999 * Flush reg cache in case spill location is in an AR
3000 * Update CPENABLE only for this execution; later restore cached copy
3001 * Keep a copy of exccause in case executed code triggers an exception
3002 */
3003 int status = xtensa_write_dirty_registers(target);
3004 if (status != ERROR_OK) {
3005 LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3006 return ERROR_FAIL;
3007 }
3008 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3009 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3010 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3011 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, 0xffffffff);
3012 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3013 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3014 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3015 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, a3);
3016 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3017
3018 /* Queue instruction list and execute everything */
3019 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3020 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3021 status = jtag_execute_queue();
3022 if (status != ERROR_OK)
3023 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3024 status = xtensa_core_status_check(target);
3025 if (status != ERROR_OK)
3026 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3027
3028 /* Reread register cache and restore saved regs after instruction execution */
3029 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3030 LOG_TARGET_ERROR(target, "%s: Failed to fetch register cache (post-exec).", target_name(target));
3031 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3032 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3033 return status;
3034 }
3035
3036 COMMAND_HANDLER(xtensa_cmd_exe)
3037 {
3038 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3039 }
3040
3041 /* xtdef <name> */
3042 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3043 {
3044 if (CMD_ARGC != 1)
3045 return ERROR_COMMAND_SYNTAX_ERROR;
3046
3047 const char *core_name = CMD_ARGV[0];
3048 if (strcasecmp(core_name, "LX") == 0) {
3049 xtensa->core_config->core_type = XT_LX;
3050 } else {
3051 LOG_ERROR("xtdef [LX]\n");
3052 return ERROR_COMMAND_SYNTAX_ERROR;
3053 }
3054 return ERROR_OK;
3055 }
3056
3057 COMMAND_HANDLER(xtensa_cmd_xtdef)
3058 {
3059 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3060 target_to_xtensa(get_current_target(CMD_CTX)));
3061 }
3062
3063 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3064 {
3065 if ((val < min) || (val > max)) {
3066 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3067 return false;
3068 }
3069 return true;
3070 }
3071
3072 /* xtopt <name> <value> */
3073 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3074 {
3075 if (CMD_ARGC != 2)
3076 return ERROR_COMMAND_SYNTAX_ERROR;
3077
3078 const char *opt_name = CMD_ARGV[0];
3079 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3080 if (strcasecmp(opt_name, "arnum") == 0) {
3081 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3082 return ERROR_COMMAND_ARGUMENT_INVALID;
3083 xtensa->core_config->aregs_num = opt_val;
3084 } else if (strcasecmp(opt_name, "windowed") == 0) {
3085 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3086 return ERROR_COMMAND_ARGUMENT_INVALID;
3087 xtensa->core_config->windowed = opt_val;
3088 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3089 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3090 return ERROR_COMMAND_ARGUMENT_INVALID;
3091 xtensa->core_config->coproc = opt_val;
3092 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3093 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3094 return ERROR_COMMAND_ARGUMENT_INVALID;
3095 xtensa->core_config->exceptions = opt_val;
3096 } else if (strcasecmp(opt_name, "intnum") == 0) {
3097 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3098 return ERROR_COMMAND_ARGUMENT_INVALID;
3099 xtensa->core_config->irq.enabled = (opt_val > 0);
3100 xtensa->core_config->irq.irq_num = opt_val;
3101 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3102 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3103 return ERROR_COMMAND_ARGUMENT_INVALID;
3104 xtensa->core_config->high_irq.enabled = opt_val;
3105 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3106 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3107 return ERROR_COMMAND_ARGUMENT_INVALID;
3108 if (!xtensa->core_config->high_irq.enabled) {
3109 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3110 return ERROR_COMMAND_ARGUMENT_INVALID;
3111 }
3112 xtensa->core_config->high_irq.excm_level = opt_val;
3113 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3114 if (xtensa->core_config->core_type == XT_LX) {
3115 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3116 return ERROR_COMMAND_ARGUMENT_INVALID;
3117 } else {
3118 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3119 return ERROR_COMMAND_ARGUMENT_INVALID;
3120 }
3121 if (!xtensa->core_config->high_irq.enabled) {
3122 LOG_ERROR("xtopt intlevels requires hipriints\n");
3123 return ERROR_COMMAND_ARGUMENT_INVALID;
3124 }
3125 xtensa->core_config->high_irq.level_num = opt_val;
3126 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3127 if (xtensa->core_config->core_type == XT_LX) {
3128 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3129 return ERROR_COMMAND_ARGUMENT_INVALID;
3130 } else {
3131 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3132 return ERROR_COMMAND_ARGUMENT_INVALID;
3133 }
3134 xtensa->core_config->debug.enabled = 1;
3135 xtensa->core_config->debug.irq_level = opt_val;
3136 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3137 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3138 return ERROR_COMMAND_ARGUMENT_INVALID;
3139 xtensa->core_config->debug.ibreaks_num = opt_val;
3140 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3141 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3142 return ERROR_COMMAND_ARGUMENT_INVALID;
3143 xtensa->core_config->debug.dbreaks_num = opt_val;
3144 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3145 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3146 return ERROR_COMMAND_ARGUMENT_INVALID;
3147 xtensa->core_config->trace.mem_sz = opt_val;
3148 xtensa->core_config->trace.enabled = (opt_val > 0);
3149 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3150 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3151 return ERROR_COMMAND_ARGUMENT_INVALID;
3152 xtensa->core_config->trace.reversed_mem_access = opt_val;
3153 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3154 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3155 return ERROR_COMMAND_ARGUMENT_INVALID;
3156 xtensa->core_config->debug.perfcount_num = opt_val;
3157 } else {
3158 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3159 return ERROR_OK;
3160 }
3161
3162 return ERROR_OK;
3163 }
3164
3165 COMMAND_HANDLER(xtensa_cmd_xtopt)
3166 {
3167 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3168 target_to_xtensa(get_current_target(CMD_CTX)));
3169 }
3170
3171 /* xtmem <type> [parameters] */
3172 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3173 {
3174 struct xtensa_cache_config *cachep = NULL;
3175 struct xtensa_local_mem_config *memp = NULL;
3176 int mem_access = 0;
3177 bool is_dcache = false;
3178
3179 if (CMD_ARGC == 0) {
3180 LOG_ERROR("xtmem <type> [parameters]\n");
3181 return ERROR_COMMAND_SYNTAX_ERROR;
3182 }
3183
3184 const char *mem_name = CMD_ARGV[0];
3185 if (strcasecmp(mem_name, "icache") == 0) {
3186 cachep = &xtensa->core_config->icache;
3187 } else if (strcasecmp(mem_name, "dcache") == 0) {
3188 cachep = &xtensa->core_config->dcache;
3189 is_dcache = true;
3190 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3191 /* TODO: support L2 cache */
3192 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3193 /* TODO: support L2 cache */
3194 } else if (strcasecmp(mem_name, "iram") == 0) {
3195 memp = &xtensa->core_config->iram;
3196 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3197 } else if (strcasecmp(mem_name, "dram") == 0) {
3198 memp = &xtensa->core_config->dram;
3199 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3200 } else if (strcasecmp(mem_name, "sram") == 0) {
3201 memp = &xtensa->core_config->sram;
3202 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3203 } else if (strcasecmp(mem_name, "irom") == 0) {
3204 memp = &xtensa->core_config->irom;
3205 mem_access = XT_MEM_ACCESS_READ;
3206 } else if (strcasecmp(mem_name, "drom") == 0) {
3207 memp = &xtensa->core_config->drom;
3208 mem_access = XT_MEM_ACCESS_READ;
3209 } else if (strcasecmp(mem_name, "srom") == 0) {
3210 memp = &xtensa->core_config->srom;
3211 mem_access = XT_MEM_ACCESS_READ;
3212 } else {
3213 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3214 return ERROR_COMMAND_ARGUMENT_INVALID;
3215 }
3216
3217 if (cachep) {
3218 if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3219 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3220 return ERROR_COMMAND_SYNTAX_ERROR;
3221 }
3222 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3223 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3224 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3225 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3226 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3227 } else if (memp) {
3228 if (CMD_ARGC != 3) {
3229 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3230 return ERROR_COMMAND_SYNTAX_ERROR;
3231 }
3232 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3233 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3234 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3235 memcfgp->access = mem_access;
3236 memp->count++;
3237 }
3238
3239 return ERROR_OK;
3240 }
3241
3242 COMMAND_HANDLER(xtensa_cmd_xtmem)
3243 {
3244 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3245 target_to_xtensa(get_current_target(CMD_CTX)));
3246 }
3247
3248 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3249 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3250 {
3251 if (CMD_ARGC != 4) {
3252 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3253 return ERROR_COMMAND_SYNTAX_ERROR;
3254 }
3255
3256 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3257 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3258 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3259 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3260
3261 if ((nfgseg > 32)) {
3262 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3263 return ERROR_COMMAND_ARGUMENT_INVALID;
3264 } else if (minsegsize & (minsegsize - 1)) {
3265 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3266 return ERROR_COMMAND_ARGUMENT_INVALID;
3267 } else if (lockable > 1) {
3268 LOG_ERROR("<lockable> must be 0 or 1\n");
3269 return ERROR_COMMAND_ARGUMENT_INVALID;
3270 } else if (execonly > 1) {
3271 LOG_ERROR("<execonly> must be 0 or 1\n");
3272 return ERROR_COMMAND_ARGUMENT_INVALID;
3273 }
3274
3275 xtensa->core_config->mpu.enabled = true;
3276 xtensa->core_config->mpu.nfgseg = nfgseg;
3277 xtensa->core_config->mpu.minsegsize = minsegsize;
3278 xtensa->core_config->mpu.lockable = lockable;
3279 xtensa->core_config->mpu.execonly = execonly;
3280 return ERROR_OK;
3281 }
3282
3283 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3284 {
3285 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3286 target_to_xtensa(get_current_target(CMD_CTX)));
3287 }
3288
3289 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3290 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3291 {
3292 if (CMD_ARGC != 2) {
3293 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3294 return ERROR_COMMAND_SYNTAX_ERROR;
3295 }
3296
3297 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3298 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3299 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3300 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3301 return ERROR_COMMAND_ARGUMENT_INVALID;
3302 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3303 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3304 return ERROR_COMMAND_ARGUMENT_INVALID;
3305 }
3306
3307 xtensa->core_config->mmu.enabled = true;
3308 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3309 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3310 return ERROR_OK;
3311 }
3312
3313 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3314 {
3315 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3316 target_to_xtensa(get_current_target(CMD_CTX)));
3317 }
3318
3319 /* xtregs <numregs>
3320 * xtreg <regname> <regnum> */
3321 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3322 {
3323 if (CMD_ARGC == 1) {
3324 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3325 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3326 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3327 return ERROR_COMMAND_SYNTAX_ERROR;
3328 }
3329 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3330 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3331 numregs, xtensa->genpkt_regs_num);
3332 return ERROR_COMMAND_SYNTAX_ERROR;
3333 }
3334 xtensa->total_regs_num = numregs;
3335 xtensa->core_regs_num = 0;
3336 xtensa->num_optregs = 0;
3337 /* A little more memory than required, but saves a second initialization pass */
3338 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3339 if (!xtensa->optregs) {
3340 LOG_ERROR("Failed to allocate xtensa->optregs!");
3341 return ERROR_FAIL;
3342 }
3343 return ERROR_OK;
3344 } else if (CMD_ARGC != 2)
3345 return ERROR_COMMAND_SYNTAX_ERROR;
3346
3347 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3348 * if general register (g-packet) requests or contiguous register maps are supported */
3349 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3350 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3351 if (!xtensa->contiguous_regs_desc) {
3352 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3353 return ERROR_FAIL;
3354 }
3355 }
3356
3357 const char *regname = CMD_ARGV[0];
3358 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3359 if (regnum > UINT16_MAX) {
3360 LOG_ERROR("<regnum> must be a 16-bit number");
3361 return ERROR_COMMAND_ARGUMENT_INVALID;
3362 }
3363
3364 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3365 if (xtensa->total_regs_num)
3366 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3367 regname, regnum,
3368 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3369 else
3370 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3371 regname, regnum);
3372 return ERROR_FAIL;
3373 }
3374
3375 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3376 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3377 bool is_extended_reg = true;
3378 unsigned int ridx;
3379 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3380 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3381 /* Flag core register as defined */
3382 rptr = &xtensa_regs[ridx];
3383 xtensa->core_regs_num++;
3384 is_extended_reg = false;
3385 break;
3386 }
3387 }
3388
3389 rptr->exist = true;
3390 if (is_extended_reg) {
3391 /* Register ID, debugger-visible register ID */
3392 rptr->name = strdup(CMD_ARGV[0]);
3393 rptr->dbreg_num = regnum;
3394 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3395 xtensa->num_optregs++;
3396
3397 /* Register type */
3398 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3399 rptr->type = XT_REG_GENERAL;
3400 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3401 rptr->type = XT_REG_USER;
3402 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3403 rptr->type = XT_REG_FR;
3404 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3405 rptr->type = XT_REG_SPECIAL;
3406 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3407 /* WARNING: For these registers, regnum points to the
3408 * index of the corresponding ARx registers, NOT to
3409 * the processor register number! */
3410 rptr->type = XT_REG_RELGEN;
3411 rptr->reg_num += XT_REG_IDX_ARFIRST;
3412 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3413 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3414 rptr->type = XT_REG_TIE;
3415 } else {
3416 rptr->type = XT_REG_OTHER;
3417 }
3418
3419 /* Register flags */
3420 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3421 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3422 (strcmp(rptr->name, "intclear") == 0))
3423 rptr->flags = XT_REGF_NOREAD;
3424 else
3425 rptr->flags = 0;
3426
3427 if ((rptr->reg_num == (XT_PS_REG_NUM_BASE + xtensa->core_config->debug.irq_level)) &&
3428 (xtensa->core_config->core_type == XT_LX) && (rptr->type == XT_REG_SPECIAL)) {
3429 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3430 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3431 }
3432 } else if (strcmp(rptr->name, "cpenable") == 0) {
3433 xtensa->core_config->coproc = true;
3434 }
3435
3436 /* Build out list of contiguous registers in specified order */
3437 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3438 if (xtensa->contiguous_regs_desc) {
3439 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3440 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3441 }
3442 if (xtensa_extra_debug_log)
3443 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3444 is_extended_reg ? "config-specific" : "core",
3445 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3446 is_extended_reg ? xtensa->num_optregs : ridx,
3447 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3448 return ERROR_OK;
3449 }
3450
3451 COMMAND_HANDLER(xtensa_cmd_xtreg)
3452 {
3453 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3454 target_to_xtensa(get_current_target(CMD_CTX)));
3455 }
3456
3457 /* xtregfmt <contiguous|sparse> [numgregs] */
3458 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3459 {
3460 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3461 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3462 return ERROR_OK;
3463 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3464 xtensa->regmap_contiguous = true;
3465 if (CMD_ARGC == 2) {
3466 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3467 if ((numgregs <= 0) ||
3468 ((numgregs > xtensa->total_regs_num) &&
3469 (xtensa->total_regs_num > 0))) {
3470 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3471 numgregs, xtensa->total_regs_num);
3472 return ERROR_COMMAND_SYNTAX_ERROR;
3473 }
3474 xtensa->genpkt_regs_num = numgregs;
3475 }
3476 return ERROR_OK;
3477 }
3478 }
3479 return ERROR_COMMAND_SYNTAX_ERROR;
3480 }
3481
3482 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3483 {
3484 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3485 target_to_xtensa(get_current_target(CMD_CTX)));
3486 }
3487
3488 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3489 {
3490 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3491 &xtensa->permissive_mode, "xtensa permissive mode");
3492 }
3493
3494 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3495 {
3496 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3497 target_to_xtensa(get_current_target(CMD_CTX)));
3498 }
3499
3500 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3501 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
3502 {
3503 struct xtensa_perfmon_config config = {
3504 .mask = 0xffff,
3505 .kernelcnt = 0,
3506 .tracelevel = -1 /* use DEBUGLEVEL by default */
3507 };
3508
3509 if (CMD_ARGC < 2 || CMD_ARGC > 6)
3510 return ERROR_COMMAND_SYNTAX_ERROR;
3511
3512 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
3513 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
3514 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3515 return ERROR_COMMAND_ARGUMENT_INVALID;
3516 }
3517
3518 config.select = strtoul(CMD_ARGV[1], NULL, 0);
3519 if (config.select > XTENSA_MAX_PERF_SELECT) {
3520 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
3521 return ERROR_COMMAND_ARGUMENT_INVALID;
3522 }
3523
3524 if (CMD_ARGC >= 3) {
3525 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
3526 if (config.mask > XTENSA_MAX_PERF_MASK) {
3527 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
3528 return ERROR_COMMAND_ARGUMENT_INVALID;
3529 }
3530 }
3531
3532 if (CMD_ARGC >= 4) {
3533 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
3534 if (config.kernelcnt > 1) {
3535 command_print(CMD, "kernelcnt should be 0 or 1");
3536 return ERROR_COMMAND_ARGUMENT_INVALID;
3537 }
3538 }
3539
3540 if (CMD_ARGC >= 5) {
3541 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
3542 if (config.tracelevel > 7) {
3543 command_print(CMD, "tracelevel should be <=7");
3544 return ERROR_COMMAND_ARGUMENT_INVALID;
3545 }
3546 }
3547
3548 if (config.tracelevel == -1)
3549 config.tracelevel = xtensa->core_config->debug.irq_level;
3550
3551 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
3552 }
3553
3554 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
3555 {
3556 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
3557 target_to_xtensa(get_current_target(CMD_CTX)));
3558 }
3559
3560 /* perfmon_dump [counter_id] */
3561 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
3562 {
3563 if (CMD_ARGC > 1)
3564 return ERROR_COMMAND_SYNTAX_ERROR;
3565
3566 int counter_id = -1;
3567 if (CMD_ARGC == 1) {
3568 counter_id = strtol(CMD_ARGV[0], NULL, 0);
3569 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
3570 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3571 return ERROR_COMMAND_ARGUMENT_INVALID;
3572 }
3573 }
3574
3575 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
3576 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
3577 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
3578 char result_buf[128] = { 0 };
3579 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
3580 struct xtensa_perfmon_result result;
3581 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
3582 if (res != ERROR_OK)
3583 return res;
3584 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
3585 "%-12" PRIu64 "%s",
3586 result.value,
3587 result.overflow ? " (overflow)" : "");
3588 LOG_INFO("%s", result_buf);
3589 }
3590
3591 return ERROR_OK;
3592 }
3593
3594 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
3595 {
3596 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
3597 target_to_xtensa(get_current_target(CMD_CTX)));
3598 }
3599
3600 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
3601 {
3602 int state = -1;
3603
3604 if (CMD_ARGC < 1) {
3605 const char *st;
3606 state = xtensa->stepping_isr_mode;
3607 if (state == XT_STEPPING_ISR_ON)
3608 st = "OFF";
3609 else if (state == XT_STEPPING_ISR_OFF)
3610 st = "ON";
3611 else
3612 st = "UNKNOWN";
3613 command_print(CMD, "Current ISR step mode: %s", st);
3614 return ERROR_OK;
3615 }
3616 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3617 if (!strcasecmp(CMD_ARGV[0], "off"))
3618 state = XT_STEPPING_ISR_ON;
3619 else if (!strcasecmp(CMD_ARGV[0], "on"))
3620 state = XT_STEPPING_ISR_OFF;
3621
3622 if (state == -1) {
3623 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
3624 return ERROR_FAIL;
3625 }
3626 xtensa->stepping_isr_mode = state;
3627 return ERROR_OK;
3628 }
3629
3630 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
3631 {
3632 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
3633 target_to_xtensa(get_current_target(CMD_CTX)));
3634 }
3635
3636 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
3637 {
3638 int res;
3639 uint32_t val = 0;
3640
3641 if (CMD_ARGC >= 1) {
3642 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3643 if (!strcasecmp(CMD_ARGV[0], "none")) {
3644 val = 0;
3645 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
3646 val |= OCDDCR_BREAKINEN;
3647 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
3648 val |= OCDDCR_BREAKOUTEN;
3649 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
3650 val |= OCDDCR_RUNSTALLINEN;
3651 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
3652 val |= OCDDCR_DEBUGMODEOUTEN;
3653 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
3654 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
3655 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
3656 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
3657 } else {
3658 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
3659 command_print(
3660 CMD,
3661 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3662 return ERROR_OK;
3663 }
3664 }
3665 res = xtensa_smpbreak_set(target, val);
3666 if (res != ERROR_OK)
3667 command_print(CMD, "Failed to set smpbreak config %d", res);
3668 } else {
3669 struct xtensa *xtensa = target_to_xtensa(target);
3670 res = xtensa_smpbreak_read(xtensa, &val);
3671 if (res == ERROR_OK)
3672 command_print(CMD, "Current bits set:%s%s%s%s",
3673 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
3674 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
3675 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
3676 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
3677 );
3678 else
3679 command_print(CMD, "Failed to get smpbreak config %d", res);
3680 }
3681 return res;
3682 }
3683
3684 COMMAND_HANDLER(xtensa_cmd_smpbreak)
3685 {
3686 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
3687 get_current_target(CMD_CTX));
3688 }
3689
3690 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
3691 {
3692 struct xtensa_trace_status trace_status;
3693 struct xtensa_trace_start_config cfg = {
3694 .stoppc = 0,
3695 .stopmask = XTENSA_STOPMASK_DISABLED,
3696 .after = 0,
3697 .after_is_words = false
3698 };
3699
3700 /* Parse arguments */
3701 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3702 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
3703 char *e;
3704 i++;
3705 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
3706 cfg.stopmask = 0;
3707 if (*e == '/')
3708 cfg.stopmask = strtol(e, NULL, 0);
3709 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
3710 i++;
3711 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
3712 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
3713 cfg.after_is_words = 0;
3714 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
3715 cfg.after_is_words = 1;
3716 } else {
3717 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
3718 return ERROR_FAIL;
3719 }
3720 }
3721
3722 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3723 if (res != ERROR_OK)
3724 return res;
3725 if (trace_status.stat & TRAXSTAT_TRACT) {
3726 LOG_WARNING("Silently stop active tracing!");
3727 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
3728 if (res != ERROR_OK)
3729 return res;
3730 }
3731
3732 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
3733 if (res != ERROR_OK)
3734 return res;
3735
3736 xtensa->trace_active = true;
3737 command_print(CMD, "Trace started.");
3738 return ERROR_OK;
3739 }
3740
3741 COMMAND_HANDLER(xtensa_cmd_tracestart)
3742 {
3743 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
3744 target_to_xtensa(get_current_target(CMD_CTX)));
3745 }
3746
3747 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
3748 {
3749 struct xtensa_trace_status trace_status;
3750
3751 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3752 if (res != ERROR_OK)
3753 return res;
3754
3755 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
3756 command_print(CMD, "No trace is currently active.");
3757 return ERROR_FAIL;
3758 }
3759
3760 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
3761 if (res != ERROR_OK)
3762 return res;
3763
3764 xtensa->trace_active = false;
3765 command_print(CMD, "Trace stop triggered.");
3766 return ERROR_OK;
3767 }
3768
3769 COMMAND_HANDLER(xtensa_cmd_tracestop)
3770 {
3771 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
3772 target_to_xtensa(get_current_target(CMD_CTX)));
3773 }
3774
3775 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
3776 {
3777 struct xtensa_trace_config trace_config;
3778 struct xtensa_trace_status trace_status;
3779 uint32_t memsz, wmem;
3780
3781 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3782 if (res != ERROR_OK)
3783 return res;
3784
3785 if (trace_status.stat & TRAXSTAT_TRACT) {
3786 command_print(CMD, "Tracing is still active. Please stop it first.");
3787 return ERROR_FAIL;
3788 }
3789
3790 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
3791 if (res != ERROR_OK)
3792 return res;
3793
3794 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
3795 command_print(CMD, "No active trace found; nothing to dump.");
3796 return ERROR_FAIL;
3797 }
3798
3799 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
3800 LOG_INFO("Total trace memory: %d words", memsz);
3801 if ((trace_config.addr &
3802 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
3803 /*Memory hasn't overwritten itself yet. */
3804 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
3805 LOG_INFO("...but trace is only %d words", wmem);
3806 if (wmem < memsz)
3807 memsz = wmem;
3808 } else {
3809 if (trace_config.addr & TRAXADDR_TWSAT) {
3810 LOG_INFO("Real trace is many times longer than that (overflow)");
3811 } else {
3812 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
3813 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
3814 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
3815 }
3816 }
3817
3818 uint8_t *tracemem = malloc(memsz * 4);
3819 if (!tracemem) {
3820 command_print(CMD, "Failed to alloc memory for trace data!");
3821 return ERROR_FAIL;
3822 }
3823 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
3824 if (res != ERROR_OK) {
3825 free(tracemem);
3826 return res;
3827 }
3828
3829 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
3830 if (f <= 0) {
3831 free(tracemem);
3832 command_print(CMD, "Unable to open file %s", fname);
3833 return ERROR_FAIL;
3834 }
3835 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
3836 command_print(CMD, "Unable to write to file %s", fname);
3837 else
3838 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
3839 close(f);
3840
3841 bool is_all_zeroes = true;
3842 for (unsigned int i = 0; i < memsz * 4; i++) {
3843 if (tracemem[i] != 0) {
3844 is_all_zeroes = false;
3845 break;
3846 }
3847 }
3848 free(tracemem);
3849 if (is_all_zeroes)
3850 command_print(
3851 CMD,
3852 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
3853
3854 return ERROR_OK;
3855 }
3856
3857 COMMAND_HANDLER(xtensa_cmd_tracedump)
3858 {
3859 if (CMD_ARGC != 1) {
3860 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
3861 return ERROR_FAIL;
3862 }
3863
3864 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
3865 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
3866 }
3867
3868 static const struct command_registration xtensa_any_command_handlers[] = {
3869 {
3870 .name = "xtdef",
3871 .handler = xtensa_cmd_xtdef,
3872 .mode = COMMAND_CONFIG,
3873 .help = "Configure Xtensa core type",
3874 .usage = "<type>",
3875 },
3876 {
3877 .name = "xtopt",
3878 .handler = xtensa_cmd_xtopt,
3879 .mode = COMMAND_CONFIG,
3880 .help = "Configure Xtensa core option",
3881 .usage = "<name> <value>",
3882 },
3883 {
3884 .name = "xtmem",
3885 .handler = xtensa_cmd_xtmem,
3886 .mode = COMMAND_CONFIG,
3887 .help = "Configure Xtensa memory/cache option",
3888 .usage = "<type> [parameters]",
3889 },
3890 {
3891 .name = "xtmmu",
3892 .handler = xtensa_cmd_xtmmu,
3893 .mode = COMMAND_CONFIG,
3894 .help = "Configure Xtensa MMU option",
3895 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
3896 },
3897 {
3898 .name = "xtmpu",
3899 .handler = xtensa_cmd_xtmpu,
3900 .mode = COMMAND_CONFIG,
3901 .help = "Configure Xtensa MPU option",
3902 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
3903 },
3904 {
3905 .name = "xtreg",
3906 .handler = xtensa_cmd_xtreg,
3907 .mode = COMMAND_CONFIG,
3908 .help = "Configure Xtensa register",
3909 .usage = "<regname> <regnum>",
3910 },
3911 {
3912 .name = "xtregs",
3913 .handler = xtensa_cmd_xtreg,
3914 .mode = COMMAND_CONFIG,
3915 .help = "Configure number of Xtensa registers",
3916 .usage = "<numregs>",
3917 },
3918 {
3919 .name = "xtregfmt",
3920 .handler = xtensa_cmd_xtregfmt,
3921 .mode = COMMAND_CONFIG,
3922 .help = "Configure format of Xtensa register map",
3923 .usage = "<contiguous|sparse> [numgregs]",
3924 },
3925 {
3926 .name = "set_permissive",
3927 .handler = xtensa_cmd_permissive_mode,
3928 .mode = COMMAND_ANY,
3929 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
3930 .usage = "[0|1]",
3931 },
3932 {
3933 .name = "maskisr",
3934 .handler = xtensa_cmd_mask_interrupts,
3935 .mode = COMMAND_ANY,
3936 .help = "mask Xtensa interrupts at step",
3937 .usage = "['on'|'off']",
3938 },
3939 {
3940 .name = "smpbreak",
3941 .handler = xtensa_cmd_smpbreak,
3942 .mode = COMMAND_ANY,
3943 .help = "Set the way the CPU chains OCD breaks",
3944 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
3945 },
3946 {
3947 .name = "perfmon_enable",
3948 .handler = xtensa_cmd_perfmon_enable,
3949 .mode = COMMAND_EXEC,
3950 .help = "Enable and start performance counter",
3951 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
3952 },
3953 {
3954 .name = "perfmon_dump",
3955 .handler = xtensa_cmd_perfmon_dump,
3956 .mode = COMMAND_EXEC,
3957 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
3958 .usage = "[counter_id]",
3959 },
3960 {
3961 .name = "tracestart",
3962 .handler = xtensa_cmd_tracestart,
3963 .mode = COMMAND_EXEC,
3964 .help =
3965 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
3966 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
3967 },
3968 {
3969 .name = "tracestop",
3970 .handler = xtensa_cmd_tracestop,
3971 .mode = COMMAND_EXEC,
3972 .help = "Tracing: Stop current trace as started by the tracestart command",
3973 .usage = "",
3974 },
3975 {
3976 .name = "tracedump",
3977 .handler = xtensa_cmd_tracedump,
3978 .mode = COMMAND_EXEC,
3979 .help = "Tracing: Dump trace memory to a files. One file per core.",
3980 .usage = "<outfile>",
3981 },
3982 {
3983 .name = "exe",
3984 .handler = xtensa_cmd_exe,
3985 .mode = COMMAND_ANY,
3986 .help = "Xtensa stub execution",
3987 .usage = "<ascii-encoded hexadecimal instruction bytes>",
3988 },
3989 COMMAND_REGISTRATION_DONE
3990 };
3991
3992 const struct command_registration xtensa_command_handlers[] = {
3993 {
3994 .name = "xtensa",
3995 .mode = COMMAND_ANY,
3996 .help = "Xtensa command group",
3997 .usage = "",
3998 .chain = xtensa_any_command_handlers,
3999 },
4000 COMMAND_REGISTRATION_DONE
4001 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)