target/xtensa: fix clang analyzer warnings
[openocd.git] / src / target / xtensa / xtensa.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
10
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19
20 #include "xtensa_chip.h"
21 #include "xtensa.h"
22
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25 ((((V) & 0x0F) << 4) \
26 | (((V) & 0xF0) >> 4))
27
28 #define XT_NIBSWAP16(V) \
29 ((((V) & 0x000F) << 12) \
30 | (((V) & 0x00F0) << 4) \
31 | (((V) & 0x0F00) >> 4) \
32 | (((V) & 0xF000) >> 12))
33
34 #define XT_NIBSWAP24(V) \
35 ((((V) & 0x00000F) << 20) \
36 | (((V) & 0x0000F0) << 12) \
37 | (((V) & 0x000F00) << 4) \
38 | (((V) & 0x00F000) >> 4) \
39 | (((V) & 0x0F0000) >> 12) \
40 | (((V) & 0xF00000) >> 20))
41
42 /* _XT_INS_FORMAT_*()
43 * Instruction formatting converted from little-endian inputs
44 * and shifted to the MSB-side of DIR for BE systems.
45 */
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48 | (((T) & 0x0F) << 16) \
49 | (((SR) & 0xFF) << 8)) << 8 \
50 : (OPCODE) \
51 | (((SR) & 0xFF) << 8) \
52 | (((T) & 0x0F) << 4))
53
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57 | (((R) & 0x0F) << 8)) << 8 \
58 : (OPCODE) \
59 | (((ST) & 0xFF) << 4) \
60 | (((R) & 0x0F) << 12))
61
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64 | (((T) & 0x0F) << 8) \
65 | (((S) & 0x0F) << 4) \
66 | ((IMM4) & 0x0F)) << 16 \
67 : (OPCODE) \
68 | (((T) & 0x0F) << 4) \
69 | (((S) & 0x0F) << 8) \
70 | (((IMM4) & 0x0F) << 12))
71
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74 | (((T) & 0x0F) << 16) \
75 | (((S) & 0x0F) << 12) \
76 | (((R) & 0x0F) << 8) \
77 | ((IMM8) & 0xFF)) << 8 \
78 : (OPCODE) \
79 | (((IMM8) & 0xFF) << 16) \
80 | (((R) & 0x0F) << 12) \
81 | (((S) & 0x0F) << 8) \
82 | (((T) & 0x0F) << 4))
83
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86 | (((T) & 0x0F) << 16) \
87 | (((S) & 0x0F) << 12) \
88 | (((R) & 0x0F) << 8)) << 8 \
89 | ((IMM4) & 0x0F) \
90 : (OPCODE) \
91 | (((IMM4) & 0x0F) << 20) \
92 | (((R) & 0x0F) << 12) \
93 | (((S) & 0x0F) << 8) \
94 | (((T) & 0x0F) << 4))
95
96 /* Xtensa processor instruction opcodes
97 */
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
102
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
107
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
114
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
121
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
127
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
131
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
138
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
141
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
146
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
151
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
155
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
159
160 #define XT_WATCHPOINTS_NUM_MAX 2
161
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163 * These get used a lot so making a shortcut is useful.
164 */
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
169 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
170
171 #define XT_PS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
172 #define XT_PC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
173 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
174 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
175
176 #define XT_SW_BREAKPOINTS_MAX_NUM 32
177 #define XT_HW_IBREAK_MAX_NUM 2
178 #define XT_HW_DBREAK_MAX_NUM 2
179
180 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
181 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
182 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
183 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
184 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
185 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
186 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
187 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
188 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
189 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
190 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
191 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
192 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
247 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
248 XT_MK_REG_DESC("ps", 0xE6, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
249 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
250 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
251 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
252 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
253 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
254 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
255 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
256 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
259 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
261 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
262
263 /* WARNING: For these registers, regnum points to the
264 * index of the corresponding ARx registers, NOT to
265 * the processor register number! */
266 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
267 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
268 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
269 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
270 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
271 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
272 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
273 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
274 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
275 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
276 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
282 };
283
284 /**
285 * Types of memory used at xtensa target
286 */
287 enum xtensa_mem_region_type {
288 XTENSA_MEM_REG_IROM = 0x0,
289 XTENSA_MEM_REG_IRAM,
290 XTENSA_MEM_REG_DROM,
291 XTENSA_MEM_REG_DRAM,
292 XTENSA_MEM_REG_SRAM,
293 XTENSA_MEM_REG_SROM,
294 XTENSA_MEM_REGS_NUM
295 };
296
297 /* Register definition as union for list allocation */
298 union xtensa_reg_val_u {
299 xtensa_reg_val_t val;
300 uint8_t buf[4];
301 };
302
303 const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
304 { .chrval = "E00", .intval = ERROR_FAIL },
305 { .chrval = "E01", .intval = ERROR_FAIL },
306 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
307 { .chrval = "E03", .intval = ERROR_FAIL },
308 };
309
310 /* Set to true for extra debug logging */
311 static const bool xtensa_extra_debug_log;
312
313 /**
314 * Gets a config for the specific mem type
315 */
316 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
317 struct xtensa *xtensa,
318 enum xtensa_mem_region_type type)
319 {
320 switch (type) {
321 case XTENSA_MEM_REG_IROM:
322 return &xtensa->core_config->irom;
323 case XTENSA_MEM_REG_IRAM:
324 return &xtensa->core_config->iram;
325 case XTENSA_MEM_REG_DROM:
326 return &xtensa->core_config->drom;
327 case XTENSA_MEM_REG_DRAM:
328 return &xtensa->core_config->dram;
329 case XTENSA_MEM_REG_SRAM:
330 return &xtensa->core_config->sram;
331 case XTENSA_MEM_REG_SROM:
332 return &xtensa->core_config->srom;
333 default:
334 return NULL;
335 }
336 }
337
338 /**
339 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
340 * for a given address
341 * Returns NULL if nothing found
342 */
343 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
344 const struct xtensa_local_mem_config *mem,
345 target_addr_t address)
346 {
347 for (unsigned int i = 0; i < mem->count; i++) {
348 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
349 if (address >= region->base && address < (region->base + region->size))
350 return region;
351 }
352 return NULL;
353 }
354
355 /**
356 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
357 * for a given address
358 * Returns NULL if nothing found
359 */
360 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
361 struct xtensa *xtensa,
362 target_addr_t address)
363 {
364 const struct xtensa_local_mem_region_config *result;
365 const struct xtensa_local_mem_config *mcgf;
366 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
367 mcgf = xtensa_get_mem_config(xtensa, mtype);
368 result = xtensa_memory_region_find(mcgf, address);
369 if (result)
370 return result;
371 }
372 return NULL;
373 }
374
375 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
376 const struct xtensa_local_mem_config *mem,
377 target_addr_t address)
378 {
379 if (!cache->size)
380 return false;
381 return xtensa_memory_region_find(mem, address);
382 }
383
384 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
385 {
386 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
387 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
388 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
389 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
390 }
391
392 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
393 {
394 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
395 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
396 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
397 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
398 }
399
400 static int xtensa_core_reg_get(struct reg *reg)
401 {
402 /* We don't need this because we read all registers on halt anyway. */
403 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
404 struct target *target = xtensa->target;
405
406 if (target->state != TARGET_HALTED)
407 return ERROR_TARGET_NOT_HALTED;
408 if (!reg->exist) {
409 if (strncmp(reg->name, "?0x", 3) == 0) {
410 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
411 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
412 return ERROR_OK;
413 }
414 return ERROR_COMMAND_ARGUMENT_INVALID;
415 }
416 return ERROR_OK;
417 }
418
419 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
420 {
421 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
422 struct target *target = xtensa->target;
423
424 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
425 if (target->state != TARGET_HALTED)
426 return ERROR_TARGET_NOT_HALTED;
427
428 if (!reg->exist) {
429 if (strncmp(reg->name, "?0x", 3) == 0) {
430 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
431 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
432 return ERROR_OK;
433 }
434 return ERROR_COMMAND_ARGUMENT_INVALID;
435 }
436
437 buf_cpy(buf, reg->value, reg->size);
438
439 if (xtensa->core_config->windowed) {
440 /* If the user updates a potential scratch register, track for conflicts */
441 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
442 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
443 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
444 buf_get_u32(reg->value, 0, 32));
445 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
446 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
447 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
448 xtensa->scratch_ars[s].intval = true;
449 break;
450 }
451 }
452 }
453 reg->dirty = true;
454 reg->valid = true;
455
456 return ERROR_OK;
457 }
458
459 static const struct reg_arch_type xtensa_reg_type = {
460 .get = xtensa_core_reg_get,
461 .set = xtensa_core_reg_set,
462 };
463
464 /* Convert a register index that's indexed relative to windowbase, to the real address. */
465 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
466 enum xtensa_reg_id reg_idx,
467 int windowbase)
468 {
469 unsigned int idx;
470 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
471 idx = reg_idx - XT_REG_IDX_AR0;
472 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
473 idx = reg_idx - XT_REG_IDX_A0;
474 } else {
475 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
476 return -1;
477 }
478 return ((idx + windowbase * 4) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
479 }
480
481 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
482 enum xtensa_reg_id reg_idx,
483 int windowbase)
484 {
485 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
486 }
487
488 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
489 {
490 struct reg *reg_list = xtensa->core_cache->reg_list;
491 reg_list[reg_idx].dirty = true;
492 }
493
494 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
495 {
496 xtensa_queue_dbg_reg_write(xtensa, NARADR_DIR0EXEC, ins);
497 }
498
499 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
500 {
501 if ((oplen > 0) && (oplen <= 64)) {
502 uint32_t opsw[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; /* 8 DIRx regs: max width 64B */
503 uint8_t oplenw = (oplen + 3) / 4;
504 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
505 buf_bswap32((uint8_t *)opsw, ops, oplenw * 4);
506 else
507 memcpy(opsw, ops, oplen);
508 for (int32_t i = oplenw - 1; i > 0; i--)
509 xtensa_queue_dbg_reg_write(xtensa, NARADR_DIR0 + i, opsw[i]);
510 /* Write DIR0EXEC last */
511 xtensa_queue_dbg_reg_write(xtensa, NARADR_DIR0EXEC, opsw[0]);
512 }
513 }
514
515 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
516 {
517 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
518 return dm->pwr_ops->queue_reg_write(dm, reg, data);
519 }
520
521 /* NOTE: Assumes A3 has already been saved */
522 int xtensa_window_state_save(struct target *target, uint32_t *woe)
523 {
524 struct xtensa *xtensa = target_to_xtensa(target);
525 int woe_dis;
526 uint8_t woe_buf[4];
527
528 if (xtensa->core_config->windowed) {
529 /* Save PS (LX) and disable window overflow exceptions prior to AR save */
530 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_PS, XT_REG_A3));
531 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
532 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, woe_buf);
533 int res = jtag_execute_queue();
534 if (res != ERROR_OK) {
535 LOG_ERROR("Failed to read PS (%d)!", res);
536 return res;
537 }
538 xtensa_core_status_check(target);
539 *woe = buf_get_u32(woe_buf, 0, 32);
540 woe_dis = *woe & ~XT_PS_WOE_MSK;
541 LOG_DEBUG("Clearing PS.WOE (0x%08" PRIx32 " -> 0x%08" PRIx32 ")", *woe, woe_dis);
542 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, woe_dis);
543 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
544 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
545 }
546 return ERROR_OK;
547 }
548
549 /* NOTE: Assumes A3 has already been saved */
550 void xtensa_window_state_restore(struct target *target, uint32_t woe)
551 {
552 struct xtensa *xtensa = target_to_xtensa(target);
553 if (xtensa->core_config->windowed) {
554 /* Restore window overflow exception state */
555 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, woe);
556 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
557 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
558 LOG_DEBUG("Restored PS.WOE (0x%08" PRIx32 ")", woe);
559 }
560 }
561
562 static bool xtensa_reg_is_readable(int flags, int cpenable)
563 {
564 if (flags & XT_REGF_NOREAD)
565 return false;
566 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
567 return false;
568 return true;
569 }
570
571 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
572 {
573 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
574 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
575 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
576 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
577 } else {
578 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
579 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
580 }
581 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
582 }
583
584 static int xtensa_write_dirty_registers(struct target *target)
585 {
586 struct xtensa *xtensa = target_to_xtensa(target);
587 int res;
588 xtensa_reg_val_t regval, windowbase = 0;
589 bool scratch_reg_dirty = false, delay_cpenable = false;
590 struct reg *reg_list = xtensa->core_cache->reg_list;
591 unsigned int reg_list_size = xtensa->core_cache->num_regs;
592 bool preserve_a3 = false;
593 uint8_t a3_buf[4];
594 xtensa_reg_val_t a3 = 0, woe;
595
596 LOG_TARGET_DEBUG(target, "start");
597
598 /* We need to write the dirty registers in the cache list back to the processor.
599 * Start by writing the SFR/user registers. */
600 for (unsigned int i = 0; i < reg_list_size; i++) {
601 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
602 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
603 if (reg_list[i].dirty) {
604 if (rlist[ridx].type == XT_REG_SPECIAL ||
605 rlist[ridx].type == XT_REG_USER ||
606 rlist[ridx].type == XT_REG_FR) {
607 scratch_reg_dirty = true;
608 if (i == XT_REG_IDX_CPENABLE) {
609 delay_cpenable = true;
610 continue;
611 }
612 regval = xtensa_reg_get(target, i);
613 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
614 reg_list[i].name,
615 rlist[ridx].reg_num,
616 regval);
617 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
618 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
619 if (reg_list[i].exist) {
620 unsigned int reg_num = rlist[ridx].reg_num;
621 if (rlist[ridx].type == XT_REG_USER) {
622 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
623 } else if (rlist[ridx].type == XT_REG_FR) {
624 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
625 } else {/*SFR */
626 if (reg_num == XT_PC_REG_NUM_VIRTUAL)
627 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL
628 **/
629 reg_num =
630 (XT_PC_REG_NUM_BASE +
631 xtensa->core_config->debug.irq_level);
632 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
633 }
634 }
635 reg_list[i].dirty = false;
636 }
637 }
638 }
639 if (scratch_reg_dirty)
640 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
641 if (delay_cpenable) {
642 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
643 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
644 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
645 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
646 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
647 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
648 XT_REG_A3));
649 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
650 }
651
652 preserve_a3 = (xtensa->core_config->windowed);
653 if (preserve_a3) {
654 /* Save (windowed) A3 for scratch use */
655 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
656 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, a3_buf);
657 res = jtag_execute_queue();
658 if (res != ERROR_OK)
659 return res;
660 xtensa_core_status_check(target);
661 a3 = buf_get_u32(a3_buf, 0, 32);
662 }
663
664 if (xtensa->core_config->windowed) {
665 res = xtensa_window_state_save(target, &woe);
666 if (res != ERROR_OK)
667 return res;
668 /* Grab the windowbase, we need it. */
669 windowbase = xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE);
670 /* Check if there are mismatches between the ARx and corresponding Ax registers.
671 * When the user sets a register on a windowed config, xt-gdb may set the ARx
672 * register directly. Thus we take ARx as priority over Ax if both are dirty
673 * and it's unclear if the user set one over the other explicitly.
674 */
675 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
676 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
677 if (reg_list[i].dirty && reg_list[j].dirty) {
678 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
679 bool show_warning = true;
680 if (i == XT_REG_IDX_A3)
681 show_warning = xtensa_scratch_regs_fixup(xtensa,
682 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
683 else if (i == XT_REG_IDX_A4)
684 show_warning = xtensa_scratch_regs_fixup(xtensa,
685 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
686 if (show_warning)
687 LOG_WARNING(
688 "Warning: Both A%d [0x%08" PRIx32
689 "] as well as its underlying physical register "
690 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
691 i - XT_REG_IDX_A0,
692 buf_get_u32(reg_list[i].value, 0, 32),
693 j - XT_REG_IDX_AR0,
694 buf_get_u32(reg_list[j].value, 0, 32));
695 }
696 }
697 }
698 }
699
700 /* Write A0-A16. */
701 for (unsigned int i = 0; i < 16; i++) {
702 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
703 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
704 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
705 xtensa_regs[XT_REG_IDX_A0 + i].name,
706 regval,
707 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
708 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
709 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
710 reg_list[XT_REG_IDX_A0 + i].dirty = false;
711 if (i == 3) {
712 /* Avoid stomping A3 during restore at end of function */
713 a3 = regval;
714 }
715 }
716 }
717
718 if (xtensa->core_config->windowed) {
719 /* Now write AR registers */
720 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
721 /* Write the 16 registers we can see */
722 for (unsigned int i = 0; i < 16; i++) {
723 if (i + j < xtensa->core_config->aregs_num) {
724 enum xtensa_reg_id realadr =
725 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
726 windowbase);
727 /* Write back any dirty un-windowed registers */
728 if (reg_list[realadr].dirty) {
729 regval = xtensa_reg_get(target, realadr);
730 LOG_TARGET_DEBUG(
731 target,
732 "Writing back reg %s value %08" PRIX32 ", num =%i",
733 xtensa_regs[realadr].name,
734 regval,
735 xtensa_regs[realadr].reg_num);
736 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
737 xtensa_queue_exec_ins(xtensa,
738 XT_INS_RSR(xtensa, XT_SR_DDR,
739 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
740 reg_list[realadr].dirty = false;
741 if ((i + j) == 3)
742 /* Avoid stomping AR during A3 restore at end of function */
743 a3 = regval;
744 }
745 }
746 }
747 /*Now rotate the window so we'll see the next 16 registers. The final rotate
748 * will wraparound, */
749 /*leaving us in the state we were. */
750 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
751 }
752
753 xtensa_window_state_restore(target, woe);
754
755 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
756 xtensa->scratch_ars[s].intval = false;
757 }
758
759 if (preserve_a3) {
760 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, a3);
761 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
762 }
763
764 res = jtag_execute_queue();
765 xtensa_core_status_check(target);
766
767 return res;
768 }
769
770 static inline bool xtensa_is_stopped(struct target *target)
771 {
772 struct xtensa *xtensa = target_to_xtensa(target);
773 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
774 }
775
776 int xtensa_examine(struct target *target)
777 {
778 struct xtensa *xtensa = target_to_xtensa(target);
779 unsigned int cmd = PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP;
780
781 LOG_DEBUG("coreid = %d", target->coreid);
782
783 if (xtensa->core_config->core_type == XT_UNDEF) {
784 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
785 return ERROR_FAIL;
786 }
787
788 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd);
789 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE);
790 xtensa_dm_queue_enable(&xtensa->dbg_mod);
791 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
792 int res = jtag_execute_queue();
793 if (res != ERROR_OK)
794 return res;
795 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
796 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
797 return ERROR_TARGET_FAILURE;
798 }
799 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
800 if (!target_was_examined(target))
801 target_set_examined(target);
802 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
803 return ERROR_OK;
804 }
805
806 int xtensa_wakeup(struct target *target)
807 {
808 struct xtensa *xtensa = target_to_xtensa(target);
809 unsigned int cmd = PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP;
810
811 if (xtensa->reset_asserted)
812 cmd |= PWRCTL_CORERESET;
813 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd);
814 /* TODO: can we join this with the write above? */
815 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE);
816 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
817 return jtag_execute_queue();
818 }
819
820 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
821 {
822 uint32_t dsr_data = 0x00110000;
823 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
824 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
825 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
826
827 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
828 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRSET, set | OCDDCR_ENABLEOCD);
829 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRCLR, clear);
830 xtensa_queue_dbg_reg_write(xtensa, NARADR_DSR, dsr_data);
831 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
832 return jtag_execute_queue();
833 }
834
835 int xtensa_smpbreak_set(struct target *target, uint32_t set)
836 {
837 struct xtensa *xtensa = target_to_xtensa(target);
838 int res = ERROR_OK;
839
840 xtensa->smp_break = set;
841 if (target_was_examined(target))
842 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
843 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
844 return res;
845 }
846
847 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
848 {
849 uint8_t dcr_buf[sizeof(uint32_t)];
850
851 xtensa_queue_dbg_reg_read(xtensa, NARADR_DCRSET, dcr_buf);
852 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
853 int res = jtag_execute_queue();
854 *val = buf_get_u32(dcr_buf, 0, 32);
855
856 return res;
857 }
858
859 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
860 {
861 struct xtensa *xtensa = target_to_xtensa(target);
862 *val = xtensa->smp_break;
863 return ERROR_OK;
864 }
865
866 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
867 {
868 return buf_get_u32(reg->value, 0, 32);
869 }
870
871 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
872 {
873 buf_set_u32(reg->value, 0, 32, value);
874 reg->dirty = true;
875 }
876
877 int xtensa_core_status_check(struct target *target)
878 {
879 struct xtensa *xtensa = target_to_xtensa(target);
880 int res, needclear = 0;
881
882 xtensa_dm_core_status_read(&xtensa->dbg_mod);
883 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
884 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
885 if (dsr & OCDDSR_EXECBUSY) {
886 if (!xtensa->suppress_dsr_errors)
887 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
888 needclear = 1;
889 }
890 if (dsr & OCDDSR_EXECEXCEPTION) {
891 if (!xtensa->suppress_dsr_errors)
892 LOG_TARGET_ERROR(target,
893 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
894 dsr);
895 needclear = 1;
896 }
897 if (dsr & OCDDSR_EXECOVERRUN) {
898 if (!xtensa->suppress_dsr_errors)
899 LOG_TARGET_ERROR(target,
900 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
901 dsr);
902 needclear = 1;
903 }
904 if (needclear) {
905 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
906 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
907 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
908 LOG_TARGET_ERROR(target, "clearing DSR failed!");
909 return ERROR_FAIL;
910 }
911 return ERROR_OK;
912 }
913
914 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
915 {
916 struct xtensa *xtensa = target_to_xtensa(target);
917 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
918 return xtensa_reg_get_value(reg);
919 }
920
921 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
922 {
923 struct xtensa *xtensa = target_to_xtensa(target);
924 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
925 if (xtensa_reg_get_value(reg) == value)
926 return;
927 xtensa_reg_set_value(reg, value);
928 }
929
930 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
931 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
932 {
933 struct xtensa *xtensa = target_to_xtensa(target);
934 uint32_t windowbase = (xtensa->core_config->windowed ?
935 xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE) : 0);
936 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
937 xtensa_reg_set(target, a_idx, value);
938 xtensa_reg_set(target, ar_idx, value);
939 }
940
941 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
942 uint32_t xtensa_cause_get(struct target *target)
943 {
944 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
945 }
946
947 void xtensa_cause_clear(struct target *target)
948 {
949 struct xtensa *xtensa = target_to_xtensa(target);
950 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
951 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
952 }
953
954 int xtensa_assert_reset(struct target *target)
955 {
956 struct xtensa *xtensa = target_to_xtensa(target);
957
958 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
959 target->state = TARGET_RESET;
960 xtensa_queue_pwr_reg_write(xtensa,
961 DMREG_PWRCTL,
962 PWRCTL_JTAGDEBUGUSE | PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP |
963 PWRCTL_CORERESET);
964 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
965 int res = jtag_execute_queue();
966 if (res != ERROR_OK)
967 return res;
968 xtensa->reset_asserted = true;
969 return res;
970 }
971
972 int xtensa_deassert_reset(struct target *target)
973 {
974 struct xtensa *xtensa = target_to_xtensa(target);
975
976 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
977 if (target->reset_halt)
978 xtensa_queue_dbg_reg_write(xtensa,
979 NARADR_DCRSET,
980 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
981 xtensa_queue_pwr_reg_write(xtensa,
982 DMREG_PWRCTL,
983 PWRCTL_JTAGDEBUGUSE | PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP);
984 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
985 int res = jtag_execute_queue();
986 if (res != ERROR_OK)
987 return res;
988 target->state = TARGET_RUNNING;
989 xtensa->reset_asserted = false;
990 return res;
991 }
992
993 int xtensa_soft_reset_halt(struct target *target)
994 {
995 LOG_TARGET_DEBUG(target, "begin");
996 return xtensa_assert_reset(target);
997 }
998
999 int xtensa_fetch_all_regs(struct target *target)
1000 {
1001 struct xtensa *xtensa = target_to_xtensa(target);
1002 struct reg *reg_list = xtensa->core_cache->reg_list;
1003 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1004 xtensa_reg_val_t cpenable = 0, windowbase = 0, a3;
1005 uint32_t woe;
1006 uint8_t a3_buf[4];
1007 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1008
1009 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1010 if (!regvals) {
1011 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1012 return ERROR_FAIL;
1013 }
1014 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1015 if (!dsrs) {
1016 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1017 free(regvals);
1018 return ERROR_FAIL;
1019 }
1020
1021 LOG_TARGET_DEBUG(target, "start");
1022
1023 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1024 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1025 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, a3_buf);
1026 int res = xtensa_window_state_save(target, &woe);
1027 if (res != ERROR_OK)
1028 goto xtensa_fetch_all_regs_done;
1029
1030 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1031 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1032 * in one go, then sort everything out from the regvals variable. */
1033
1034 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1035 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1036 /*Grab the 16 registers we can see */
1037 for (unsigned int i = 0; i < 16; i++) {
1038 if (i + j < xtensa->core_config->aregs_num) {
1039 xtensa_queue_exec_ins(xtensa,
1040 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1041 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR,
1042 regvals[XT_REG_IDX_AR0 + i + j].buf);
1043 if (debug_dsrs)
1044 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR,
1045 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1046 }
1047 }
1048 if (xtensa->core_config->windowed)
1049 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1050 * will wraparound, */
1051 /* leaving us in the state we were. */
1052 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
1053 }
1054 xtensa_window_state_restore(target, woe);
1055
1056 if (xtensa->core_config->coproc) {
1057 /* As the very first thing after AREGS, go grab CPENABLE */
1058 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1059 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1060 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1061 }
1062 res = jtag_execute_queue();
1063 if (res != ERROR_OK) {
1064 LOG_ERROR("Failed to read ARs (%d)!", res);
1065 goto xtensa_fetch_all_regs_done;
1066 }
1067 xtensa_core_status_check(target);
1068
1069 a3 = buf_get_u32(a3_buf, 0, 32);
1070
1071 if (xtensa->core_config->coproc) {
1072 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1073
1074 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1075 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, 0xffffffff);
1076 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1077 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1078
1079 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1080 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1081 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1082 }
1083 /* We're now free to use any of A0-A15 as scratch registers
1084 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1085 for (unsigned int i = 0; i < reg_list_size; i++) {
1086 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1087 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1088 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1089 bool reg_fetched = true;
1090 unsigned int reg_num = rlist[ridx].reg_num;
1091 switch (rlist[ridx].type) {
1092 case XT_REG_USER:
1093 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1094 break;
1095 case XT_REG_FR:
1096 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1097 break;
1098 case XT_REG_SPECIAL:
1099 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1100 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1101 reg_num = (XT_PC_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
1102 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1103 /* CPENABLE already read/updated; don't re-read */
1104 reg_fetched = false;
1105 break;
1106 }
1107 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1108 break;
1109 default:
1110 reg_fetched = false;
1111 }
1112 if (reg_fetched) {
1113 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1114 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[i].buf);
1115 if (debug_dsrs)
1116 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR, dsrs[i].buf);
1117 }
1118 }
1119 }
1120 /* Ok, send the whole mess to the CPU. */
1121 res = jtag_execute_queue();
1122 if (res != ERROR_OK) {
1123 LOG_ERROR("Failed to fetch AR regs!");
1124 goto xtensa_fetch_all_regs_done;
1125 }
1126 xtensa_core_status_check(target);
1127
1128 if (debug_dsrs) {
1129 /* DSR checking: follows order in which registers are requested. */
1130 for (unsigned int i = 0; i < reg_list_size; i++) {
1131 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1132 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1133 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1134 (rlist[ridx].type != XT_REG_DEBUG) &&
1135 (rlist[ridx].type != XT_REG_RELGEN) &&
1136 (rlist[ridx].type != XT_REG_TIE) &&
1137 (rlist[ridx].type != XT_REG_OTHER)) {
1138 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1139 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1140 res = ERROR_FAIL;
1141 goto xtensa_fetch_all_regs_done;
1142 }
1143 }
1144 }
1145 }
1146
1147 if (xtensa->core_config->windowed)
1148 /* We need the windowbase to decode the general addresses. */
1149 windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE].buf, 0, 32);
1150 /* Decode the result and update the cache. */
1151 for (unsigned int i = 0; i < reg_list_size; i++) {
1152 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1153 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1154 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1155 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1156 /* The 64-value general register set is read from (windowbase) on down.
1157 * We need to get the real register address by subtracting windowbase and
1158 * wrapping around. */
1159 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1160 windowbase);
1161 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1162 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1163 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1164 if (xtensa_extra_debug_log) {
1165 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1166 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1167 }
1168 } else {
1169 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1170 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1171 if (xtensa_extra_debug_log)
1172 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1173 xtensa_reg_set(target, i, regval);
1174 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1175 }
1176 reg_list[i].valid = true;
1177 } else {
1178 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1179 /* Report read-only registers all-zero but valid */
1180 reg_list[i].valid = true;
1181 xtensa_reg_set(target, i, 0);
1182 } else {
1183 reg_list[i].valid = false;
1184 }
1185 }
1186 }
1187
1188 if (xtensa->core_config->windowed) {
1189 /* We have used A3 as a scratch register.
1190 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1191 */
1192 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1193 xtensa_reg_set(target, ar3_idx, a3);
1194 xtensa_mark_register_dirty(xtensa, ar3_idx);
1195
1196 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1197 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1198 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1199 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1200 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1201 xtensa->scratch_ars[s].intval = false;
1202 }
1203
1204 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1205 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1206 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1207 xtensa->regs_fetched = true;
1208 xtensa_fetch_all_regs_done:
1209 free(regvals);
1210 free(dsrs);
1211 return res;
1212 }
1213
1214 int xtensa_get_gdb_reg_list(struct target *target,
1215 struct reg **reg_list[],
1216 int *reg_list_size,
1217 enum target_register_class reg_class)
1218 {
1219 struct xtensa *xtensa = target_to_xtensa(target);
1220 unsigned int num_regs;
1221
1222 if (reg_class == REG_CLASS_GENERAL) {
1223 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1224 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1225 return ERROR_FAIL;
1226 }
1227 num_regs = xtensa->genpkt_regs_num;
1228 } else {
1229 /* Determine whether to return a contiguous or sparse register map */
1230 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1231 }
1232
1233 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1234
1235 *reg_list = calloc(num_regs, sizeof(struct reg *));
1236 if (!*reg_list)
1237 return ERROR_FAIL;
1238
1239 *reg_list_size = num_regs;
1240 if (xtensa->regmap_contiguous) {
1241 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1242 for (unsigned int i = 0; i < num_regs; i++)
1243 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1244 return ERROR_OK;
1245 }
1246
1247 for (unsigned int i = 0; i < num_regs; i++)
1248 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1249 unsigned int k = 0;
1250 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1251 if (xtensa->core_cache->reg_list[i].exist) {
1252 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1253 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1254 int sparse_idx = rlist[ridx].dbreg_num;
1255 if (i == XT_REG_IDX_PS) {
1256 if (xtensa->eps_dbglevel_idx == 0) {
1257 LOG_ERROR("eps_dbglevel_idx not set\n");
1258 return ERROR_FAIL;
1259 }
1260 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1261 if (xtensa_extra_debug_log)
1262 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1263 sparse_idx, xtensa->core_config->debug.irq_level,
1264 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1265 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1266 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1267 } else {
1268 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1269 }
1270 if (i == XT_REG_IDX_PC)
1271 /* Make a duplicate copy of PC for external access */
1272 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1273 k++;
1274 }
1275 }
1276
1277 if (k == num_regs)
1278 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1279
1280 return ERROR_OK;
1281 }
1282
1283 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1284 {
1285 struct xtensa *xtensa = target_to_xtensa(target);
1286 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1287 xtensa->core_config->mmu.dtlb_entries_count > 0;
1288 return ERROR_OK;
1289 }
1290
1291 int xtensa_halt(struct target *target)
1292 {
1293 struct xtensa *xtensa = target_to_xtensa(target);
1294
1295 LOG_TARGET_DEBUG(target, "start");
1296 if (target->state == TARGET_HALTED) {
1297 LOG_TARGET_DEBUG(target, "target was already halted");
1298 return ERROR_OK;
1299 }
1300 /* First we have to read dsr and check if the target stopped */
1301 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1302 if (res != ERROR_OK) {
1303 LOG_TARGET_ERROR(target, "Failed to read core status!");
1304 return res;
1305 }
1306 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1307 if (!xtensa_is_stopped(target)) {
1308 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1309 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1310 res = jtag_execute_queue();
1311 if (res != ERROR_OK)
1312 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1313 }
1314
1315 return res;
1316 }
1317
1318 int xtensa_prepare_resume(struct target *target,
1319 int current,
1320 target_addr_t address,
1321 int handle_breakpoints,
1322 int debug_execution)
1323 {
1324 struct xtensa *xtensa = target_to_xtensa(target);
1325 uint32_t bpena = 0;
1326
1327 LOG_TARGET_DEBUG(target,
1328 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1329 current,
1330 address,
1331 handle_breakpoints,
1332 debug_execution);
1333
1334 if (target->state != TARGET_HALTED) {
1335 LOG_TARGET_WARNING(target, "target not halted");
1336 return ERROR_TARGET_NOT_HALTED;
1337 }
1338
1339 if (address && !current) {
1340 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1341 } else {
1342 uint32_t cause = xtensa_cause_get(target);
1343 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1344 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1345 if (cause & DEBUGCAUSE_DB)
1346 /* We stopped due to a watchpoint. We can't just resume executing the
1347 * instruction again because */
1348 /* that would trigger the watchpoint again. To fix this, we single-step,
1349 * which ignores watchpoints. */
1350 xtensa_do_step(target, current, address, handle_breakpoints);
1351 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1352 /* We stopped due to a break instruction. We can't just resume executing the
1353 * instruction again because */
1354 /* that would trigger the break again. To fix this, we single-step, which
1355 * ignores break. */
1356 xtensa_do_step(target, current, address, handle_breakpoints);
1357 }
1358
1359 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1360 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1361 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1362 if (xtensa->hw_brps[slot]) {
1363 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1364 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1365 bpena |= BIT(slot);
1366 }
1367 }
1368 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1369
1370 /* Here we write all registers to the targets */
1371 int res = xtensa_write_dirty_registers(target);
1372 if (res != ERROR_OK)
1373 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1374 return res;
1375 }
1376
1377 int xtensa_do_resume(struct target *target)
1378 {
1379 struct xtensa *xtensa = target_to_xtensa(target);
1380
1381 LOG_TARGET_DEBUG(target, "start");
1382
1383 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1384 int res = jtag_execute_queue();
1385 if (res != ERROR_OK) {
1386 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1387 return res;
1388 }
1389 xtensa_core_status_check(target);
1390 return ERROR_OK;
1391 }
1392
1393 int xtensa_resume(struct target *target,
1394 int current,
1395 target_addr_t address,
1396 int handle_breakpoints,
1397 int debug_execution)
1398 {
1399 LOG_TARGET_DEBUG(target, "start");
1400 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1401 if (res != ERROR_OK) {
1402 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1403 return res;
1404 }
1405 res = xtensa_do_resume(target);
1406 if (res != ERROR_OK) {
1407 LOG_TARGET_ERROR(target, "Failed to resume!");
1408 return res;
1409 }
1410
1411 target->debug_reason = DBG_REASON_NOTHALTED;
1412 if (!debug_execution)
1413 target->state = TARGET_RUNNING;
1414 else
1415 target->state = TARGET_DEBUG_RUNNING;
1416
1417 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1418
1419 return ERROR_OK;
1420 }
1421
1422 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1423 {
1424 struct xtensa *xtensa = target_to_xtensa(target);
1425 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1426 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1427 if (err != ERROR_OK)
1428 return false;
1429
1430 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1431 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1432 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1433 return true;
1434
1435 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1436 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1437 return true;
1438
1439 return false;
1440 }
1441
1442 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1443 {
1444 struct xtensa *xtensa = target_to_xtensa(target);
1445 int res;
1446 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1447 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1448 xtensa_reg_val_t icountlvl, cause;
1449 xtensa_reg_val_t oldps, oldpc, cur_pc;
1450 bool ps_lowered = false;
1451
1452 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1453 current, address, handle_breakpoints);
1454
1455 if (target->state != TARGET_HALTED) {
1456 LOG_TARGET_WARNING(target, "target not halted");
1457 return ERROR_TARGET_NOT_HALTED;
1458 }
1459
1460 if (xtensa->eps_dbglevel_idx == 0) {
1461 LOG_ERROR("eps_dbglevel_idx not set\n");
1462 return ERROR_FAIL;
1463 }
1464
1465 /* Save old ps (EPS[dbglvl] on LX), pc */
1466 oldps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
1467 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1468
1469 cause = xtensa_cause_get(target);
1470 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1471 oldps,
1472 oldpc,
1473 cause,
1474 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1475 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1476 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1477 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1478 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1479 /* pretend that we have stepped */
1480 if (cause & DEBUGCAUSE_BI)
1481 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1482 else
1483 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1484 return ERROR_OK;
1485 }
1486
1487 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1488 * at which the instructions are to be counted while stepping.
1489 *
1490 * For example, if we need to step by 2 instructions, and an interrupt occurs
1491 * in between, the processor will trigger the interrupt and halt after the 2nd
1492 * instruction within the interrupt vector and/or handler.
1493 *
1494 * However, sometimes we don't want the interrupt handlers to be executed at all
1495 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1496 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1497 * code from being counted during stepping. Note that C exception handlers must
1498 * run at level 0 and hence will be counted and stepped into, should one occur.
1499 *
1500 * TODO: Certain instructions should never be single-stepped and should instead
1501 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1502 * RFI >= DBGLEVEL.
1503 */
1504 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1505 if (!xtensa->core_config->high_irq.enabled) {
1506 LOG_TARGET_WARNING(
1507 target,
1508 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1509 return ERROR_FAIL;
1510 }
1511 /* Update ICOUNTLEVEL accordingly */
1512 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1513 } else {
1514 icountlvl = xtensa->core_config->debug.irq_level;
1515 }
1516
1517 if (cause & DEBUGCAUSE_DB) {
1518 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1519 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1520 * re-enable the watchpoint. */
1521 LOG_TARGET_DEBUG(
1522 target,
1523 "Single-stepping to get past instruction that triggered the watchpoint...");
1524 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1525 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1526 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1527 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1528 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1529 }
1530 }
1531
1532 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1533 /* handle normal SW breakpoint */
1534 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1535 if ((oldps & 0xf) >= icountlvl) {
1536 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1537 ps_lowered = true;
1538 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1539 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1540 LOG_TARGET_DEBUG(target,
1541 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1542 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1543 newps,
1544 oldps);
1545 }
1546 do {
1547 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1548 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1549
1550 /* Now ICOUNT is set, we can resume as if we were going to run */
1551 res = xtensa_prepare_resume(target, current, address, 0, 0);
1552 if (res != ERROR_OK) {
1553 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1554 return res;
1555 }
1556 res = xtensa_do_resume(target);
1557 if (res != ERROR_OK) {
1558 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1559 return res;
1560 }
1561
1562 /* Wait for stepping to complete */
1563 long long start = timeval_ms();
1564 while (timeval_ms() < start + 500) {
1565 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1566 *until stepping is complete. */
1567 usleep(1000);
1568 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1569 if (res != ERROR_OK) {
1570 LOG_TARGET_ERROR(target, "Failed to read core status!");
1571 return res;
1572 }
1573 if (xtensa_is_stopped(target))
1574 break;
1575 usleep(1000);
1576 }
1577 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1578 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1579 if (!xtensa_is_stopped(target)) {
1580 LOG_TARGET_WARNING(
1581 target,
1582 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1583 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1584 target->debug_reason = DBG_REASON_NOTHALTED;
1585 target->state = TARGET_RUNNING;
1586 return ERROR_FAIL;
1587 }
1588 target->debug_reason = DBG_REASON_SINGLESTEP;
1589 target->state = TARGET_HALTED;
1590
1591 xtensa_fetch_all_regs(target);
1592
1593 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1594
1595 LOG_TARGET_DEBUG(target,
1596 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1597 xtensa_reg_get(target, XT_REG_IDX_PS),
1598 cur_pc,
1599 xtensa_cause_get(target),
1600 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1601
1602 /* Do not step into WindowOverflow if ISRs are masked.
1603 If we stop in WindowOverflow at breakpoint with masked ISRs and
1604 try to do a step it will get us out of that handler */
1605 if (xtensa->core_config->windowed &&
1606 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1607 xtensa_pc_in_winexc(target, cur_pc)) {
1608 /* isrmask = on, need to step out of the window exception handler */
1609 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1610 oldpc = cur_pc;
1611 address = oldpc + 3;
1612 continue;
1613 }
1614
1615 if (oldpc == cur_pc)
1616 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1617 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1618 else
1619 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1620 break;
1621 } while (true);
1622 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1623
1624 if (cause & DEBUGCAUSE_DB) {
1625 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1626 /* Restore the DBREAKCx registers */
1627 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1628 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1629 }
1630
1631 /* Restore int level */
1632 if (ps_lowered) {
1633 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1634 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1635 oldps);
1636 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1637 }
1638
1639 /* write ICOUNTLEVEL back to zero */
1640 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1641 /* TODO: can we skip writing dirty registers and re-fetching them? */
1642 res = xtensa_write_dirty_registers(target);
1643 xtensa_fetch_all_regs(target);
1644 return res;
1645 }
1646
1647 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1648 {
1649 return xtensa_do_step(target, current, address, handle_breakpoints);
1650 }
1651
1652 /**
1653 * Returns true if two ranges are overlapping
1654 */
1655 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1656 target_addr_t r1_end,
1657 target_addr_t r2_start,
1658 target_addr_t r2_end)
1659 {
1660 if ((r2_start >= r1_start) && (r2_start < r1_end))
1661 return true; /* r2_start is in r1 region */
1662 if ((r2_end > r1_start) && (r2_end <= r1_end))
1663 return true; /* r2_end is in r1 region */
1664 return false;
1665 }
1666
1667 /**
1668 * Returns a size of overlapped region of two ranges.
1669 */
1670 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1671 target_addr_t r1_end,
1672 target_addr_t r2_start,
1673 target_addr_t r2_end)
1674 {
1675 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1676 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1677 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1678 return ov_end - ov_start;
1679 }
1680 return 0;
1681 }
1682
1683 /**
1684 * Check if the address gets to memory regions, and its access mode
1685 */
1686 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1687 {
1688 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1689 target_addr_t adr_end = address + size; /* region end */
1690 target_addr_t overlap_size;
1691 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1692
1693 while (adr_pos < adr_end) {
1694 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1695 if (!cm) /* address is not belong to anything */
1696 return false;
1697 if ((cm->access & access) != access) /* access check */
1698 return false;
1699 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1700 assert(overlap_size != 0);
1701 adr_pos += overlap_size;
1702 }
1703 return true;
1704 }
1705
1706 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1707 {
1708 struct xtensa *xtensa = target_to_xtensa(target);
1709 /* We are going to read memory in 32-bit increments. This may not be what the calling
1710 * function expects, so we may need to allocate a temp buffer and read into that first. */
1711 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1712 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1713 target_addr_t adr = addrstart_al;
1714 uint8_t *albuff;
1715 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1716
1717 if (target->state != TARGET_HALTED) {
1718 LOG_TARGET_WARNING(target, "target not halted");
1719 return ERROR_TARGET_NOT_HALTED;
1720 }
1721
1722 if (!xtensa->permissive_mode) {
1723 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1724 XT_MEM_ACCESS_READ)) {
1725 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1726 return ERROR_FAIL;
1727 }
1728 }
1729
1730 if (addrstart_al == address && addrend_al == address + (size * count)) {
1731 albuff = buffer;
1732 } else {
1733 albuff = malloc(addrend_al - addrstart_al);
1734 if (!albuff) {
1735 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1736 addrend_al - addrstart_al);
1737 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1738 }
1739 }
1740
1741 /* We're going to use A3 here */
1742 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1743 /* Write start address to A3 */
1744 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1745 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1746 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1747 if (xtensa->probe_lsddr32p != 0) {
1748 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1749 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1750 xtensa_queue_dbg_reg_read(xtensa,
1751 (adr + sizeof(uint32_t) == addrend_al) ? NARADR_DDR : NARADR_DDREXEC,
1752 &albuff[i]);
1753 } else {
1754 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1755 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1756 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1757 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
1758 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, &albuff[i]);
1759 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, adr + sizeof(uint32_t));
1760 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1761 }
1762 }
1763 int res = jtag_execute_queue();
1764 if (res == ERROR_OK) {
1765 bool prev_suppress = xtensa->suppress_dsr_errors;
1766 xtensa->suppress_dsr_errors = true;
1767 res = xtensa_core_status_check(target);
1768 if (xtensa->probe_lsddr32p == -1)
1769 xtensa->probe_lsddr32p = 1;
1770 xtensa->suppress_dsr_errors = prev_suppress;
1771 }
1772 if (res != ERROR_OK) {
1773 if (xtensa->probe_lsddr32p != 0) {
1774 /* Disable fast memory access instructions and retry before reporting an error */
1775 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1776 xtensa->probe_lsddr32p = 0;
1777 res = xtensa_read_memory(target, address, size, count, buffer);
1778 bswap = false;
1779 } else {
1780 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
1781 count * size, address);
1782 }
1783 }
1784
1785 if (bswap)
1786 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
1787 if (albuff != buffer) {
1788 memcpy(buffer, albuff + (address & 3), (size * count));
1789 free(albuff);
1790 }
1791
1792 return res;
1793 }
1794
1795 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1796 {
1797 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1798 return xtensa_read_memory(target, address, 1, count, buffer);
1799 }
1800
1801 int xtensa_write_memory(struct target *target,
1802 target_addr_t address,
1803 uint32_t size,
1804 uint32_t count,
1805 const uint8_t *buffer)
1806 {
1807 /* This memory write function can get thrown nigh everything into it, from
1808 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1809 * accept anything but aligned uint32 writes, though. That is why we convert
1810 * everything into that. */
1811 struct xtensa *xtensa = target_to_xtensa(target);
1812 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1813 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1814 target_addr_t adr = addrstart_al;
1815 int res;
1816 uint8_t *albuff;
1817 bool fill_head_tail = false;
1818
1819 if (target->state != TARGET_HALTED) {
1820 LOG_TARGET_WARNING(target, "target not halted");
1821 return ERROR_TARGET_NOT_HALTED;
1822 }
1823
1824 if (!xtensa->permissive_mode) {
1825 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
1826 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1827 return ERROR_FAIL;
1828 }
1829 }
1830
1831 if (size == 0 || count == 0 || !buffer)
1832 return ERROR_COMMAND_SYNTAX_ERROR;
1833
1834 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1835 if (addrstart_al == address && addrend_al == address + (size * count)) {
1836 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1837 /* Need a buffer for byte-swapping */
1838 albuff = malloc(addrend_al - addrstart_al);
1839 else
1840 /* We discard the const here because albuff can also be non-const */
1841 albuff = (uint8_t *)buffer;
1842 } else {
1843 fill_head_tail = true;
1844 albuff = malloc(addrend_al - addrstart_al);
1845 }
1846 if (!albuff) {
1847 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1848 addrend_al - addrstart_al);
1849 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1850 }
1851
1852 /* We're going to use A3 here */
1853 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1854
1855 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1856 if (fill_head_tail) {
1857 /* See if we need to read the first and/or last word. */
1858 if (address & 3) {
1859 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1860 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1861 if (xtensa->probe_lsddr32p == 1) {
1862 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1863 } else {
1864 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1865 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1866 }
1867 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, &albuff[0]);
1868 }
1869 if ((address + (size * count)) & 3) {
1870 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrend_al - 4);
1871 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1872 if (xtensa->probe_lsddr32p == 1) {
1873 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1874 } else {
1875 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1876 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1877 }
1878 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR,
1879 &albuff[addrend_al - addrstart_al - 4]);
1880 }
1881 /* Grab bytes */
1882 res = jtag_execute_queue();
1883 if (res != ERROR_OK) {
1884 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1885 if (albuff != buffer)
1886 free(albuff);
1887 return res;
1888 }
1889 xtensa_core_status_check(target);
1890 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
1891 bool swapped_w0 = false;
1892 if (address & 3) {
1893 buf_bswap32(&albuff[0], &albuff[0], 4);
1894 swapped_w0 = true;
1895 }
1896 if ((address + (size * count)) & 3) {
1897 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
1898 /* Don't double-swap if buffer start/end are within the same word */
1899 } else {
1900 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
1901 &albuff[addrend_al - addrstart_al - 4], 4);
1902 }
1903 }
1904 }
1905 /* Copy data to be written into the aligned buffer (in host-endianness) */
1906 memcpy(&albuff[address & 3], buffer, size * count);
1907 /* Now we can write albuff in aligned uint32s. */
1908 }
1909
1910 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1911 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
1912
1913 /* Write start address to A3 */
1914 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1915 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1916 /* Write the aligned buffer */
1917 if (xtensa->probe_lsddr32p != 0) {
1918 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1919 if (i == 0) {
1920 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, buf_get_u32(&albuff[i], 0, 32));
1921 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
1922 } else {
1923 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
1924 }
1925 }
1926 } else {
1927 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1928 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1929 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, buf_get_u32(&albuff[i], 0, 32));
1930 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
1931 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1932 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, adr + sizeof(uint32_t));
1933 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1934 }
1935 }
1936
1937 res = jtag_execute_queue();
1938 if (res == ERROR_OK) {
1939 bool prev_suppress = xtensa->suppress_dsr_errors;
1940 xtensa->suppress_dsr_errors = true;
1941 res = xtensa_core_status_check(target);
1942 if (xtensa->probe_lsddr32p == -1)
1943 xtensa->probe_lsddr32p = 1;
1944 xtensa->suppress_dsr_errors = prev_suppress;
1945 }
1946 if (res != ERROR_OK) {
1947 if (xtensa->probe_lsddr32p != 0) {
1948 /* Disable fast memory access instructions and retry before reporting an error */
1949 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1950 xtensa->probe_lsddr32p = 0;
1951 res = xtensa_write_memory(target, address, size, count, buffer);
1952 } else {
1953 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
1954 count * size, address);
1955 }
1956 } else {
1957 /* Invalidate ICACHE, writeback DCACHE if present */
1958 uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
1959 uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
1960 if (issue_ihi || issue_dhwb) {
1961 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
1962 uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
1963 uint32_t linesize = MIN(ilinesize, dlinesize);
1964 uint32_t off = 0;
1965 adr = addrstart_al;
1966
1967 while ((adr + off) < addrend_al) {
1968 if (off == 0) {
1969 /* Write start address to A3 */
1970 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, adr);
1971 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1972 }
1973 if (issue_ihi)
1974 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
1975 if (issue_dhwb)
1976 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
1977 off += linesize;
1978 if (off > 1020) {
1979 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
1980 adr += off;
1981 off = 0;
1982 }
1983 }
1984
1985 /* Execute cache WB/INV instructions */
1986 res = jtag_execute_queue();
1987 xtensa_core_status_check(target);
1988 if (res != ERROR_OK)
1989 LOG_TARGET_ERROR(target,
1990 "Error issuing cache writeback/invaldate instruction(s): %d",
1991 res);
1992 }
1993 }
1994 if (albuff != buffer)
1995 free(albuff);
1996
1997 return res;
1998 }
1999
2000 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2001 {
2002 /* xtensa_write_memory can handle everything. Just pass on to that. */
2003 return xtensa_write_memory(target, address, 1, count, buffer);
2004 }
2005
2006 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2007 {
2008 LOG_WARNING("not implemented yet");
2009 return ERROR_FAIL;
2010 }
2011
2012 int xtensa_poll(struct target *target)
2013 {
2014 struct xtensa *xtensa = target_to_xtensa(target);
2015
2016 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET | PWRSTAT_COREWASRESET);
2017 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2018 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2019 xtensa->dbg_mod.power_status.stat,
2020 PWRSTAT_DEBUGWASRESET | PWRSTAT_COREWASRESET,
2021 xtensa->dbg_mod.power_status.stath);
2022 if (res != ERROR_OK)
2023 return res;
2024
2025 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2026 LOG_TARGET_INFO(target, "Debug controller was reset.");
2027 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2028 if (res != ERROR_OK)
2029 return res;
2030 }
2031 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2032 LOG_TARGET_INFO(target, "Core was reset.");
2033 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2034 /* Enable JTAG, set reset if needed */
2035 res = xtensa_wakeup(target);
2036 if (res != ERROR_OK)
2037 return res;
2038
2039 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2040 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2041 if (res != ERROR_OK)
2042 return res;
2043 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2044 LOG_TARGET_DEBUG(target,
2045 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2046 prev_dsr,
2047 xtensa->dbg_mod.core_status.dsr);
2048 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET) {
2049 /* if RESET state is persitent */
2050 target->state = TARGET_RESET;
2051 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2052 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2053 xtensa->dbg_mod.core_status.dsr,
2054 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2055 target->state = TARGET_UNKNOWN;
2056 if (xtensa->come_online_probes_num == 0)
2057 target->examined = false;
2058 else
2059 xtensa->come_online_probes_num--;
2060 } else if (xtensa_is_stopped(target)) {
2061 if (target->state != TARGET_HALTED) {
2062 enum target_state oldstate = target->state;
2063 target->state = TARGET_HALTED;
2064 /* Examine why the target has been halted */
2065 target->debug_reason = DBG_REASON_DBGRQ;
2066 xtensa_fetch_all_regs(target);
2067 /* When setting debug reason DEBUGCAUSE events have the following
2068 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2069 /* Watchpoint and breakpoint events at the same time results in special
2070 * debug reason: DBG_REASON_WPTANDBKPT. */
2071 uint32_t halt_cause = xtensa_cause_get(target);
2072 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2073 if (halt_cause & DEBUGCAUSE_IC)
2074 target->debug_reason = DBG_REASON_SINGLESTEP;
2075 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2076 if (halt_cause & DEBUGCAUSE_DB)
2077 target->debug_reason = DBG_REASON_WPTANDBKPT;
2078 else
2079 target->debug_reason = DBG_REASON_BREAKPOINT;
2080 } else if (halt_cause & DEBUGCAUSE_DB) {
2081 target->debug_reason = DBG_REASON_WATCHPOINT;
2082 }
2083 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2084 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2085 xtensa_reg_get(target, XT_REG_IDX_PC),
2086 target->debug_reason,
2087 oldstate);
2088 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2089 halt_cause,
2090 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2091 xtensa->dbg_mod.core_status.dsr);
2092 xtensa_dm_core_status_clear(
2093 &xtensa->dbg_mod,
2094 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2095 OCDDSR_DEBUGINTTRAX |
2096 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2097 }
2098 } else {
2099 target->debug_reason = DBG_REASON_NOTHALTED;
2100 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2101 target->state = TARGET_RUNNING;
2102 target->debug_reason = DBG_REASON_NOTHALTED;
2103 }
2104 }
2105 if (xtensa->trace_active) {
2106 /* Detect if tracing was active but has stopped. */
2107 struct xtensa_trace_status trace_status;
2108 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2109 if (res == ERROR_OK) {
2110 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2111 LOG_INFO("Detected end of trace.");
2112 if (trace_status.stat & TRAXSTAT_PCMTG)
2113 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2114 if (trace_status.stat & TRAXSTAT_PTITG)
2115 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2116 if (trace_status.stat & TRAXSTAT_CTITG)
2117 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2118 xtensa->trace_active = false;
2119 }
2120 }
2121 }
2122 return ERROR_OK;
2123 }
2124
2125 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2126 {
2127 struct xtensa *xtensa = target_to_xtensa(target);
2128 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2129 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2130 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2131 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2132 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2133 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2134 int ret;
2135
2136 if (size > icache_line_size)
2137 return ERROR_FAIL;
2138
2139 if (issue_ihi || issue_dhwbi) {
2140 /* We're going to use A3 here */
2141 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2142
2143 /* Write start address to A3 and invalidate */
2144 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, address);
2145 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2146 LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2147 if (issue_dhwbi) {
2148 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2149 if (!same_dc_line) {
2150 LOG_TARGET_DEBUG(target,
2151 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2152 address + 4);
2153 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2154 }
2155 }
2156 if (issue_ihi) {
2157 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2158 if (!same_ic_line) {
2159 LOG_TARGET_DEBUG(target,
2160 "IHI second icache line for address "TARGET_ADDR_FMT,
2161 address + 4);
2162 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2163 }
2164 }
2165
2166 /* Execute invalidate instructions */
2167 ret = jtag_execute_queue();
2168 xtensa_core_status_check(target);
2169 if (ret != ERROR_OK) {
2170 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2171 return ret;
2172 }
2173 }
2174
2175 /* Write new instructions to memory */
2176 ret = target_write_buffer(target, address, size, buffer);
2177 if (ret != ERROR_OK) {
2178 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2179 return ret;
2180 }
2181
2182 if (issue_dhwbi) {
2183 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2184 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, address);
2185 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2186 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2187 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2188 if (!same_dc_line) {
2189 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2190 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2191 }
2192
2193 /* Execute invalidate instructions */
2194 ret = jtag_execute_queue();
2195 xtensa_core_status_check(target);
2196 }
2197
2198 /* TODO: Handle L2 cache if present */
2199 return ret;
2200 }
2201
2202 static int xtensa_sw_breakpoint_add(struct target *target,
2203 struct breakpoint *breakpoint,
2204 struct xtensa_sw_breakpoint *sw_bp)
2205 {
2206 struct xtensa *xtensa = target_to_xtensa(target);
2207 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2208 if (ret != ERROR_OK) {
2209 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2210 return ret;
2211 }
2212
2213 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2214 sw_bp->oocd_bp = breakpoint;
2215
2216 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2217
2218 /* Underlying memory write will convert instruction endianness, don't do that here */
2219 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2220 if (ret != ERROR_OK) {
2221 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2222 return ret;
2223 }
2224
2225 return ERROR_OK;
2226 }
2227
2228 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2229 {
2230 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2231 if (ret != ERROR_OK) {
2232 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2233 return ret;
2234 }
2235 sw_bp->oocd_bp = NULL;
2236 return ERROR_OK;
2237 }
2238
2239 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2240 {
2241 struct xtensa *xtensa = target_to_xtensa(target);
2242 unsigned int slot;
2243
2244 if (breakpoint->type == BKPT_SOFT) {
2245 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2246 if (!xtensa->sw_brps[slot].oocd_bp ||
2247 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2248 break;
2249 }
2250 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2251 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2252 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2253 }
2254 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2255 if (ret != ERROR_OK) {
2256 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2257 return ret;
2258 }
2259 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2260 slot,
2261 breakpoint->address);
2262 return ERROR_OK;
2263 }
2264
2265 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2266 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2267 break;
2268 }
2269 if (slot == xtensa->core_config->debug.ibreaks_num) {
2270 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2271 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2272 }
2273
2274 xtensa->hw_brps[slot] = breakpoint;
2275 /* We will actually write the breakpoints when we resume the target. */
2276 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2277 slot,
2278 breakpoint->address);
2279
2280 return ERROR_OK;
2281 }
2282
2283 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2284 {
2285 struct xtensa *xtensa = target_to_xtensa(target);
2286 unsigned int slot;
2287
2288 if (breakpoint->type == BKPT_SOFT) {
2289 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2290 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2291 break;
2292 }
2293 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2294 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2295 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2296 }
2297 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2298 if (ret != ERROR_OK) {
2299 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2300 return ret;
2301 }
2302 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2303 return ERROR_OK;
2304 }
2305
2306 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2307 if (xtensa->hw_brps[slot] == breakpoint)
2308 break;
2309 }
2310 if (slot == xtensa->core_config->debug.ibreaks_num) {
2311 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2312 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2313 }
2314 xtensa->hw_brps[slot] = NULL;
2315 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2316 return ERROR_OK;
2317 }
2318
2319 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2320 {
2321 struct xtensa *xtensa = target_to_xtensa(target);
2322 unsigned int slot;
2323 xtensa_reg_val_t dbreakcval;
2324
2325 if (target->state != TARGET_HALTED) {
2326 LOG_TARGET_WARNING(target, "target not halted");
2327 return ERROR_TARGET_NOT_HALTED;
2328 }
2329
2330 if (watchpoint->mask != ~(uint32_t)0) {
2331 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2332 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2333 }
2334
2335 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2336 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2337 break;
2338 }
2339 if (slot == xtensa->core_config->debug.dbreaks_num) {
2340 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2341 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2342 }
2343
2344 /* Figure out value for dbreakc5..0
2345 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2346 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2347 !IS_PWR_OF_2(watchpoint->length) ||
2348 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2349 LOG_TARGET_WARNING(
2350 target,
2351 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2352 " not supported by hardware.",
2353 watchpoint->length,
2354 watchpoint->address);
2355 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2356 }
2357 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2358
2359 if (watchpoint->rw == WPT_READ)
2360 dbreakcval |= BIT(30);
2361 if (watchpoint->rw == WPT_WRITE)
2362 dbreakcval |= BIT(31);
2363 if (watchpoint->rw == WPT_ACCESS)
2364 dbreakcval |= BIT(30) | BIT(31);
2365
2366 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2367 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2368 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2369 xtensa->hw_wps[slot] = watchpoint;
2370 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2371 watchpoint->address);
2372 return ERROR_OK;
2373 }
2374
2375 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2376 {
2377 struct xtensa *xtensa = target_to_xtensa(target);
2378 unsigned int slot;
2379
2380 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2381 if (xtensa->hw_wps[slot] == watchpoint)
2382 break;
2383 }
2384 if (slot == xtensa->core_config->debug.dbreaks_num) {
2385 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2386 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2387 }
2388 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2389 xtensa->hw_wps[slot] = NULL;
2390 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2391 watchpoint->address);
2392 return ERROR_OK;
2393 }
2394
2395 static int xtensa_build_reg_cache(struct target *target)
2396 {
2397 struct xtensa *xtensa = target_to_xtensa(target);
2398 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2399 unsigned int last_dbreg_num = 0;
2400
2401 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2402 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2403 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2404
2405 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2406
2407 if (!reg_cache) {
2408 LOG_ERROR("Failed to alloc reg cache!");
2409 return ERROR_FAIL;
2410 }
2411 reg_cache->name = "Xtensa registers";
2412 reg_cache->next = NULL;
2413 /* Init reglist */
2414 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2415 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2416 if (!reg_list) {
2417 LOG_ERROR("Failed to alloc reg list!");
2418 goto fail;
2419 }
2420 xtensa->dbregs_num = 0;
2421 unsigned int didx = 0;
2422 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2423 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2424 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2425 for (unsigned int i = 0; i < listsize; i++, didx++) {
2426 reg_list[didx].exist = rlist[i].exist;
2427 reg_list[didx].name = rlist[i].name;
2428 reg_list[didx].size = 32;
2429 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2430 if (!reg_list[didx].value) {
2431 LOG_ERROR("Failed to alloc reg list value!");
2432 goto fail;
2433 }
2434 reg_list[didx].dirty = false;
2435 reg_list[didx].valid = false;
2436 reg_list[didx].type = &xtensa_reg_type;
2437 reg_list[didx].arch_info = xtensa;
2438 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2439 last_dbreg_num = rlist[i].dbreg_num;
2440
2441 if (xtensa_extra_debug_log) {
2442 LOG_TARGET_DEBUG(target,
2443 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2444 reg_list[didx].name,
2445 whichlist,
2446 reg_list[didx].exist,
2447 didx,
2448 rlist[i].type,
2449 rlist[i].dbreg_num);
2450 }
2451 }
2452 }
2453
2454 xtensa->dbregs_num = last_dbreg_num + 1;
2455 reg_cache->reg_list = reg_list;
2456 reg_cache->num_regs = reg_list_size;
2457
2458 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2459 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2460
2461 /* Construct empty-register list for handling unknown register requests */
2462 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2463 if (!xtensa->empty_regs) {
2464 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2465 goto fail;
2466 }
2467 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2468 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2469 if (!xtensa->empty_regs[i].name) {
2470 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2471 goto fail;
2472 }
2473 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
2474 xtensa->empty_regs[i].size = 32;
2475 xtensa->empty_regs[i].type = &xtensa_reg_type;
2476 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2477 if (!xtensa->empty_regs[i].value) {
2478 LOG_ERROR("Failed to alloc empty reg list value!");
2479 goto fail;
2480 }
2481 xtensa->empty_regs[i].arch_info = xtensa;
2482 }
2483
2484 /* Construct contiguous register list from contiguous descriptor list */
2485 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
2486 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2487 if (!xtensa->contiguous_regs_list) {
2488 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2489 goto fail;
2490 }
2491 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2492 unsigned int j;
2493 for (j = 0; j < reg_cache->num_regs; j++) {
2494 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2495 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
2496 LOG_TARGET_DEBUG(target,
2497 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2498 xtensa->contiguous_regs_list[i]->name,
2499 xtensa->contiguous_regs_desc[i]->dbreg_num);
2500 break;
2501 }
2502 }
2503 if (j == reg_cache->num_regs)
2504 LOG_TARGET_WARNING(target, "contiguous register %s not found",
2505 xtensa->contiguous_regs_desc[i]->name);
2506 }
2507 }
2508
2509 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2510 if (!xtensa->algo_context_backup) {
2511 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2512 goto fail;
2513 }
2514 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2515 struct reg *reg = &reg_cache->reg_list[i];
2516 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2517 if (!xtensa->algo_context_backup[i]) {
2518 LOG_ERROR("Failed to alloc mem for algorithm context!");
2519 goto fail;
2520 }
2521 }
2522 xtensa->core_cache = reg_cache;
2523 if (cache_p)
2524 *cache_p = reg_cache;
2525 return ERROR_OK;
2526
2527 fail:
2528 if (reg_list) {
2529 for (unsigned int i = 0; i < reg_list_size; i++)
2530 free(reg_list[i].value);
2531 free(reg_list);
2532 }
2533 if (xtensa->empty_regs) {
2534 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2535 free((void *)xtensa->empty_regs[i].name);
2536 free(xtensa->empty_regs[i].value);
2537 }
2538 free(xtensa->empty_regs);
2539 }
2540 if (xtensa->algo_context_backup) {
2541 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2542 free(xtensa->algo_context_backup[i]);
2543 free(xtensa->algo_context_backup);
2544 }
2545 free(reg_cache);
2546
2547 return ERROR_FAIL;
2548 }
2549
2550 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
2551 {
2552 struct xtensa *xtensa = target_to_xtensa(target);
2553 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
2554 /* Process op[] list */
2555 while (opstr && (*opstr == ':')) {
2556 uint8_t ops[32];
2557 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
2558 if (oplen > 32) {
2559 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
2560 break;
2561 }
2562 unsigned int i = 0;
2563 while ((i < oplen) && opstr && (*opstr == ':'))
2564 ops[i++] = strtoul(opstr + 1, &opstr, 16);
2565 if (i != oplen) {
2566 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
2567 break;
2568 }
2569
2570 char insn_buf[128];
2571 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
2572 for (i = 0; i < oplen; i++)
2573 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
2574 LOG_TARGET_DEBUG(target, "%s", insn_buf);
2575 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
2576 status = ERROR_OK;
2577 }
2578 return status;
2579 }
2580
2581 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
2582 {
2583 struct xtensa *xtensa = target_to_xtensa(target);
2584 bool iswrite = (packet[0] == 'Q');
2585 enum xtensa_qerr_e error;
2586
2587 /* Read/write TIE register. Requires spill location.
2588 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2589 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2590 */
2591 if (!(xtensa->spill_buf)) {
2592 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2593 error = XT_QERR_FAIL;
2594 goto xtensa_gdbqc_qxtreg_fail;
2595 }
2596
2597 char *delim;
2598 uint32_t regnum = strtoul(packet + 6, &delim, 16);
2599 if (*delim != ':') {
2600 LOG_ERROR("Malformed qxtreg packet");
2601 error = XT_QERR_INVAL;
2602 goto xtensa_gdbqc_qxtreg_fail;
2603 }
2604 uint32_t reglen = strtoul(delim + 1, &delim, 16);
2605 if (*delim != ':') {
2606 LOG_ERROR("Malformed qxtreg packet");
2607 error = XT_QERR_INVAL;
2608 goto xtensa_gdbqc_qxtreg_fail;
2609 }
2610 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
2611 memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
2612 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
2613 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
2614 LOG_ERROR("TIE register too large");
2615 error = XT_QERR_MEM;
2616 goto xtensa_gdbqc_qxtreg_fail;
2617 }
2618
2619 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2620 * (2) read old a4, (3) write spill address to a4.
2621 * NOTE: ensure a4 is restored properly by all error handling logic
2622 */
2623 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
2624 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
2625 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2626 if (status != ERROR_OK) {
2627 LOG_ERROR("Spill memory save");
2628 error = XT_QERR_MEM;
2629 goto xtensa_gdbqc_qxtreg_fail;
2630 }
2631 if (iswrite) {
2632 /* Extract value and store in spill memory */
2633 unsigned int b = 0;
2634 char *valbuf = strchr(delim, '=');
2635 if (!(valbuf && (*valbuf == '='))) {
2636 LOG_ERROR("Malformed Qxtreg packet");
2637 error = XT_QERR_INVAL;
2638 goto xtensa_gdbqc_qxtreg_fail;
2639 }
2640 valbuf++;
2641 while (*valbuf && *(valbuf + 1)) {
2642 char bytestr[3] = { 0, 0, 0 };
2643 strncpy(bytestr, valbuf, 2);
2644 regbuf[b++] = strtoul(bytestr, NULL, 16);
2645 valbuf += 2;
2646 }
2647 if (b != reglen) {
2648 LOG_ERROR("Malformed Qxtreg packet");
2649 error = XT_QERR_INVAL;
2650 goto xtensa_gdbqc_qxtreg_fail;
2651 }
2652 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2653 reglen / memop_size, regbuf);
2654 if (status != ERROR_OK) {
2655 LOG_ERROR("TIE value store");
2656 error = XT_QERR_MEM;
2657 goto xtensa_gdbqc_qxtreg_fail;
2658 }
2659 }
2660 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
2661 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, xtensa->spill_loc);
2662 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2663
2664 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
2665
2666 /* Restore a4 but not yet spill memory. Execute it all... */
2667 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, orig_a4);
2668 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2669 status = jtag_execute_queue();
2670 if (status != ERROR_OK) {
2671 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
2672 tieop_status = status;
2673 }
2674 status = xtensa_core_status_check(target);
2675 if (status != ERROR_OK) {
2676 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
2677 tieop_status = status;
2678 }
2679
2680 if (tieop_status == ERROR_OK) {
2681 if (iswrite) {
2682 /* TIE write succeeded; send OK */
2683 strcpy(*response_p, "OK");
2684 } else {
2685 /* TIE read succeeded; copy result from spill memory */
2686 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
2687 if (status != ERROR_OK) {
2688 LOG_TARGET_ERROR(target, "TIE result read");
2689 tieop_status = status;
2690 }
2691 unsigned int i;
2692 for (i = 0; i < reglen; i++)
2693 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
2694 *(*response_p + 2 * i) = '\0';
2695 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
2696 }
2697 }
2698
2699 /* Restore spill memory first, then report any previous errors */
2700 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2701 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2702 if (status != ERROR_OK) {
2703 LOG_ERROR("Spill memory restore");
2704 error = XT_QERR_MEM;
2705 goto xtensa_gdbqc_qxtreg_fail;
2706 }
2707 if (tieop_status != ERROR_OK) {
2708 LOG_ERROR("TIE execution");
2709 error = XT_QERR_FAIL;
2710 goto xtensa_gdbqc_qxtreg_fail;
2711 }
2712 return ERROR_OK;
2713
2714 xtensa_gdbqc_qxtreg_fail:
2715 strcpy(*response_p, xt_qerr[error].chrval);
2716 return xt_qerr[error].intval;
2717 }
2718
2719 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
2720 {
2721 struct xtensa *xtensa = target_to_xtensa(target);
2722 enum xtensa_qerr_e error;
2723 if (!packet || !response_p) {
2724 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
2725 return ERROR_FAIL;
2726 }
2727
2728 *response_p = xtensa->qpkt_resp;
2729 if (strncmp(packet, "qxtn", 4) == 0) {
2730 strcpy(*response_p, "OpenOCD");
2731 return ERROR_OK;
2732 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
2733 return ERROR_OK;
2734 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
2735 /* Confirm host cache params match core .cfg file */
2736 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
2737 &xtensa->core_config->icache : &xtensa->core_config->dcache;
2738 unsigned int line_size = 0, size = 0, way_count = 0;
2739 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
2740 if ((cachep->line_size != line_size) ||
2741 (cachep->size != size) ||
2742 (cachep->way_count != way_count)) {
2743 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2744 cachep == &xtensa->core_config->icache ? 'I' : 'D');
2745 }
2746 strcpy(*response_p, "OK");
2747 return ERROR_OK;
2748 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
2749 /* Confirm host IRAM/IROM params match core .cfg file */
2750 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
2751 &xtensa->core_config->iram : &xtensa->core_config->irom;
2752 unsigned int base = 0, size = 0, i;
2753 char *pkt = (char *)&packet[7];
2754 do {
2755 pkt++;
2756 size = strtoul(pkt, &pkt, 16);
2757 pkt++;
2758 base = strtoul(pkt, &pkt, 16);
2759 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
2760 for (i = 0; i < memp->count; i++) {
2761 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
2762 break;
2763 }
2764 if (i == memp->count) {
2765 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
2766 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
2767 break;
2768 }
2769 for (i = 0; i < 11; i++) {
2770 pkt++;
2771 strtoul(pkt, &pkt, 16);
2772 }
2773 } while (pkt && (pkt[0] == ','));
2774 strcpy(*response_p, "OK");
2775 return ERROR_OK;
2776 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
2777 /* Confirm host EXCM_LEVEL matches core .cfg file */
2778 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
2779 if (!xtensa->core_config->high_irq.enabled ||
2780 (excm_level != xtensa->core_config->high_irq.excm_level))
2781 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
2782 strcpy(*response_p, "OK");
2783 return ERROR_OK;
2784 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
2785 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
2786 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
2787 strcpy(*response_p, "OK");
2788 return ERROR_OK;
2789 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
2790 char *delim;
2791 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
2792 if (*delim != ':') {
2793 LOG_ERROR("Malformed Qxtspill packet");
2794 error = XT_QERR_INVAL;
2795 goto xtensa_gdb_query_custom_fail;
2796 }
2797 xtensa->spill_loc = spill_loc;
2798 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
2799 if (xtensa->spill_buf)
2800 free(xtensa->spill_buf);
2801 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
2802 if (!xtensa->spill_buf) {
2803 LOG_ERROR("Spill buf alloc");
2804 error = XT_QERR_MEM;
2805 goto xtensa_gdb_query_custom_fail;
2806 }
2807 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
2808 strcpy(*response_p, "OK");
2809 return ERROR_OK;
2810 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
2811 return xtensa_gdbqc_qxtreg(target, packet, response_p);
2812 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
2813 (strncmp(packet, "qxtftie", 7) == 0) ||
2814 (strncmp(packet, "qxtstie", 7) == 0)) {
2815 /* Return empty string to indicate trace, TIE wire debug are unsupported */
2816 strcpy(*response_p, "");
2817 return ERROR_OK;
2818 }
2819
2820 /* Warn for all other queries, but do not return errors */
2821 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
2822 strcpy(*response_p, "");
2823 return ERROR_OK;
2824
2825 xtensa_gdb_query_custom_fail:
2826 strcpy(*response_p, xt_qerr[error].chrval);
2827 return xt_qerr[error].intval;
2828 }
2829
2830 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
2831 const struct xtensa_debug_module_config *dm_cfg)
2832 {
2833 target->arch_info = xtensa;
2834 xtensa->common_magic = XTENSA_COMMON_MAGIC;
2835 xtensa->target = target;
2836 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
2837
2838 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
2839 if (!xtensa->core_config) {
2840 LOG_ERROR("Xtensa configuration alloc failed\n");
2841 return ERROR_FAIL;
2842 }
2843
2844 /* Default cache settings are disabled with 1 way */
2845 xtensa->core_config->icache.way_count = 1;
2846 xtensa->core_config->dcache.way_count = 1;
2847
2848 /* chrval: AR3/AR4 register names will change with window mapping.
2849 * intval: tracks whether scratch register was set through gdb P packet.
2850 */
2851 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
2852 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
2853 if (!xtensa->scratch_ars[s].chrval) {
2854 for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
2855 free(xtensa->scratch_ars[f].chrval);
2856 free(xtensa->core_config);
2857 LOG_ERROR("Xtensa scratch AR alloc failed\n");
2858 return ERROR_FAIL;
2859 }
2860 xtensa->scratch_ars[s].intval = false;
2861 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
2862 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
2863 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
2864 }
2865
2866 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2867 }
2868
2869 void xtensa_set_permissive_mode(struct target *target, bool state)
2870 {
2871 target_to_xtensa(target)->permissive_mode = state;
2872 }
2873
2874 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2875 {
2876 struct xtensa *xtensa = target_to_xtensa(target);
2877
2878 xtensa->come_online_probes_num = 3;
2879 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
2880 if (!xtensa->hw_brps) {
2881 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2882 return ERROR_FAIL;
2883 }
2884 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
2885 if (!xtensa->hw_wps) {
2886 free(xtensa->hw_brps);
2887 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2888 return ERROR_FAIL;
2889 }
2890 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2891 if (!xtensa->sw_brps) {
2892 free(xtensa->hw_brps);
2893 free(xtensa->hw_wps);
2894 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2895 return ERROR_FAIL;
2896 }
2897
2898 xtensa->spill_loc = 0xffffffff;
2899 xtensa->spill_bytes = 0;
2900 xtensa->spill_buf = NULL;
2901 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
2902
2903 return xtensa_build_reg_cache(target);
2904 }
2905
2906 static void xtensa_free_reg_cache(struct target *target)
2907 {
2908 struct xtensa *xtensa = target_to_xtensa(target);
2909 struct reg_cache *cache = xtensa->core_cache;
2910
2911 if (cache) {
2912 register_unlink_cache(&target->reg_cache, cache);
2913 for (unsigned int i = 0; i < cache->num_regs; i++) {
2914 free(xtensa->algo_context_backup[i]);
2915 free(cache->reg_list[i].value);
2916 }
2917 free(xtensa->algo_context_backup);
2918 free(cache->reg_list);
2919 free(cache);
2920 }
2921 xtensa->core_cache = NULL;
2922 xtensa->algo_context_backup = NULL;
2923
2924 if (xtensa->empty_regs) {
2925 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2926 free((void *)xtensa->empty_regs[i].name);
2927 free(xtensa->empty_regs[i].value);
2928 }
2929 free(xtensa->empty_regs);
2930 }
2931 xtensa->empty_regs = NULL;
2932 if (xtensa->optregs) {
2933 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
2934 free((void *)xtensa->optregs[i].name);
2935 free(xtensa->optregs);
2936 }
2937 xtensa->optregs = NULL;
2938 }
2939
2940 void xtensa_target_deinit(struct target *target)
2941 {
2942 struct xtensa *xtensa = target_to_xtensa(target);
2943
2944 LOG_DEBUG("start");
2945
2946 if (target_was_examined(target)) {
2947 int ret = xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRCLR, OCDDCR_ENABLEOCD);
2948 if (ret != ERROR_OK) {
2949 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2950 return;
2951 }
2952 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
2953 ret = jtag_execute_queue();
2954 if (ret != ERROR_OK) {
2955 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2956 return;
2957 }
2958 }
2959 xtensa_free_reg_cache(target);
2960 free(xtensa->hw_brps);
2961 free(xtensa->hw_wps);
2962 free(xtensa->sw_brps);
2963 if (xtensa->spill_buf) {
2964 free(xtensa->spill_buf);
2965 xtensa->spill_buf = NULL;
2966 }
2967 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
2968 free(xtensa->scratch_ars[s].chrval);
2969 free(xtensa->core_config);
2970 }
2971
2972 const char *xtensa_get_gdb_arch(struct target *target)
2973 {
2974 return "xtensa";
2975 }
2976
2977 /* exe <ascii-encoded hexadecimal instruction bytes> */
2978 COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
2979 {
2980 struct xtensa *xtensa = target_to_xtensa(target);
2981
2982 if (CMD_ARGC != 1)
2983 return ERROR_COMMAND_SYNTAX_ERROR;
2984
2985 /* Process ascii-encoded hex byte string */
2986 const char *parm = CMD_ARGV[0];
2987 unsigned int parm_len = strlen(parm);
2988 if ((parm_len >= 64) || (parm_len & 1)) {
2989 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
2990 return ERROR_FAIL;
2991 }
2992
2993 uint8_t ops[32];
2994 memset(ops, 0, 32);
2995 unsigned int oplen = parm_len / 2;
2996 char encoded_byte[3] = { 0, 0, 0 };
2997 for (unsigned int i = 0; i < oplen; i++) {
2998 encoded_byte[0] = *parm++;
2999 encoded_byte[1] = *parm++;
3000 ops[i] = strtoul(encoded_byte, NULL, 16);
3001 }
3002
3003 /* GDB must handle state save/restore.
3004 * Flush reg cache in case spill location is in an AR
3005 * Update CPENABLE only for this execution; later restore cached copy
3006 * Keep a copy of exccause in case executed code triggers an exception
3007 */
3008 int status = xtensa_write_dirty_registers(target);
3009 if (status != ERROR_OK) {
3010 LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3011 return ERROR_FAIL;
3012 }
3013 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3014 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3015 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3016 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, 0xffffffff);
3017 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3018 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3019 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3020 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, a3);
3021 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3022
3023 /* Queue instruction list and execute everything */
3024 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3025 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3026 status = jtag_execute_queue();
3027 if (status != ERROR_OK)
3028 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3029 status = xtensa_core_status_check(target);
3030 if (status != ERROR_OK)
3031 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3032
3033 /* Reread register cache and restore saved regs after instruction execution */
3034 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3035 LOG_TARGET_ERROR(target, "%s: Failed to fetch register cache (post-exec).", target_name(target));
3036 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3037 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3038 return status;
3039 }
3040
3041 COMMAND_HANDLER(xtensa_cmd_exe)
3042 {
3043 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3044 }
3045
3046 /* xtdef <name> */
3047 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3048 {
3049 if (CMD_ARGC != 1)
3050 return ERROR_COMMAND_SYNTAX_ERROR;
3051
3052 const char *core_name = CMD_ARGV[0];
3053 if (strcasecmp(core_name, "LX") == 0) {
3054 xtensa->core_config->core_type = XT_LX;
3055 } else {
3056 LOG_ERROR("xtdef [LX]\n");
3057 return ERROR_COMMAND_SYNTAX_ERROR;
3058 }
3059 return ERROR_OK;
3060 }
3061
3062 COMMAND_HANDLER(xtensa_cmd_xtdef)
3063 {
3064 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3065 target_to_xtensa(get_current_target(CMD_CTX)));
3066 }
3067
3068 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3069 {
3070 if ((val < min) || (val > max)) {
3071 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3072 return false;
3073 }
3074 return true;
3075 }
3076
3077 /* xtopt <name> <value> */
3078 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3079 {
3080 if (CMD_ARGC != 2)
3081 return ERROR_COMMAND_SYNTAX_ERROR;
3082
3083 const char *opt_name = CMD_ARGV[0];
3084 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3085 if (strcasecmp(opt_name, "arnum") == 0) {
3086 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3087 return ERROR_COMMAND_ARGUMENT_INVALID;
3088 xtensa->core_config->aregs_num = opt_val;
3089 } else if (strcasecmp(opt_name, "windowed") == 0) {
3090 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3091 return ERROR_COMMAND_ARGUMENT_INVALID;
3092 xtensa->core_config->windowed = opt_val;
3093 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3094 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3095 return ERROR_COMMAND_ARGUMENT_INVALID;
3096 xtensa->core_config->coproc = opt_val;
3097 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3098 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3099 return ERROR_COMMAND_ARGUMENT_INVALID;
3100 xtensa->core_config->exceptions = opt_val;
3101 } else if (strcasecmp(opt_name, "intnum") == 0) {
3102 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3103 return ERROR_COMMAND_ARGUMENT_INVALID;
3104 xtensa->core_config->irq.enabled = (opt_val > 0);
3105 xtensa->core_config->irq.irq_num = opt_val;
3106 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3107 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3108 return ERROR_COMMAND_ARGUMENT_INVALID;
3109 xtensa->core_config->high_irq.enabled = opt_val;
3110 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3111 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3112 return ERROR_COMMAND_ARGUMENT_INVALID;
3113 if (!xtensa->core_config->high_irq.enabled) {
3114 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3115 return ERROR_COMMAND_ARGUMENT_INVALID;
3116 }
3117 xtensa->core_config->high_irq.excm_level = opt_val;
3118 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3119 if (xtensa->core_config->core_type == XT_LX) {
3120 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3121 return ERROR_COMMAND_ARGUMENT_INVALID;
3122 } else {
3123 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3124 return ERROR_COMMAND_ARGUMENT_INVALID;
3125 }
3126 if (!xtensa->core_config->high_irq.enabled) {
3127 LOG_ERROR("xtopt intlevels requires hipriints\n");
3128 return ERROR_COMMAND_ARGUMENT_INVALID;
3129 }
3130 xtensa->core_config->high_irq.level_num = opt_val;
3131 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3132 if (xtensa->core_config->core_type == XT_LX) {
3133 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3134 return ERROR_COMMAND_ARGUMENT_INVALID;
3135 } else {
3136 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3137 return ERROR_COMMAND_ARGUMENT_INVALID;
3138 }
3139 xtensa->core_config->debug.enabled = 1;
3140 xtensa->core_config->debug.irq_level = opt_val;
3141 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3142 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3143 return ERROR_COMMAND_ARGUMENT_INVALID;
3144 xtensa->core_config->debug.ibreaks_num = opt_val;
3145 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3146 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3147 return ERROR_COMMAND_ARGUMENT_INVALID;
3148 xtensa->core_config->debug.dbreaks_num = opt_val;
3149 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3150 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3151 return ERROR_COMMAND_ARGUMENT_INVALID;
3152 xtensa->core_config->trace.mem_sz = opt_val;
3153 xtensa->core_config->trace.enabled = (opt_val > 0);
3154 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3155 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3156 return ERROR_COMMAND_ARGUMENT_INVALID;
3157 xtensa->core_config->trace.reversed_mem_access = opt_val;
3158 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3159 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3160 return ERROR_COMMAND_ARGUMENT_INVALID;
3161 xtensa->core_config->debug.perfcount_num = opt_val;
3162 } else {
3163 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3164 return ERROR_OK;
3165 }
3166
3167 return ERROR_OK;
3168 }
3169
3170 COMMAND_HANDLER(xtensa_cmd_xtopt)
3171 {
3172 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3173 target_to_xtensa(get_current_target(CMD_CTX)));
3174 }
3175
3176 /* xtmem <type> [parameters] */
3177 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3178 {
3179 struct xtensa_cache_config *cachep = NULL;
3180 struct xtensa_local_mem_config *memp = NULL;
3181 int mem_access = 0;
3182 bool is_dcache = false;
3183
3184 if (CMD_ARGC == 0) {
3185 LOG_ERROR("xtmem <type> [parameters]\n");
3186 return ERROR_COMMAND_SYNTAX_ERROR;
3187 }
3188
3189 const char *mem_name = CMD_ARGV[0];
3190 if (strcasecmp(mem_name, "icache") == 0) {
3191 cachep = &xtensa->core_config->icache;
3192 } else if (strcasecmp(mem_name, "dcache") == 0) {
3193 cachep = &xtensa->core_config->dcache;
3194 is_dcache = true;
3195 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3196 /* TODO: support L2 cache */
3197 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3198 /* TODO: support L2 cache */
3199 } else if (strcasecmp(mem_name, "iram") == 0) {
3200 memp = &xtensa->core_config->iram;
3201 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3202 } else if (strcasecmp(mem_name, "dram") == 0) {
3203 memp = &xtensa->core_config->dram;
3204 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3205 } else if (strcasecmp(mem_name, "sram") == 0) {
3206 memp = &xtensa->core_config->sram;
3207 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3208 } else if (strcasecmp(mem_name, "irom") == 0) {
3209 memp = &xtensa->core_config->irom;
3210 mem_access = XT_MEM_ACCESS_READ;
3211 } else if (strcasecmp(mem_name, "drom") == 0) {
3212 memp = &xtensa->core_config->drom;
3213 mem_access = XT_MEM_ACCESS_READ;
3214 } else if (strcasecmp(mem_name, "srom") == 0) {
3215 memp = &xtensa->core_config->srom;
3216 mem_access = XT_MEM_ACCESS_READ;
3217 } else {
3218 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3219 return ERROR_COMMAND_ARGUMENT_INVALID;
3220 }
3221
3222 if (cachep) {
3223 if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3224 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3225 return ERROR_COMMAND_SYNTAX_ERROR;
3226 }
3227 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3228 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3229 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3230 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3231 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3232 } else if (memp) {
3233 if (CMD_ARGC != 3) {
3234 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3235 return ERROR_COMMAND_SYNTAX_ERROR;
3236 }
3237 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3238 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3239 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3240 memcfgp->access = mem_access;
3241 memp->count++;
3242 }
3243
3244 return ERROR_OK;
3245 }
3246
3247 COMMAND_HANDLER(xtensa_cmd_xtmem)
3248 {
3249 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3250 target_to_xtensa(get_current_target(CMD_CTX)));
3251 }
3252
3253 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3254 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3255 {
3256 if (CMD_ARGC != 4) {
3257 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3258 return ERROR_COMMAND_SYNTAX_ERROR;
3259 }
3260
3261 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3262 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3263 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3264 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3265
3266 if ((nfgseg > 32)) {
3267 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3268 return ERROR_COMMAND_ARGUMENT_INVALID;
3269 } else if (minsegsize & (minsegsize - 1)) {
3270 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3271 return ERROR_COMMAND_ARGUMENT_INVALID;
3272 } else if (lockable > 1) {
3273 LOG_ERROR("<lockable> must be 0 or 1\n");
3274 return ERROR_COMMAND_ARGUMENT_INVALID;
3275 } else if (execonly > 1) {
3276 LOG_ERROR("<execonly> must be 0 or 1\n");
3277 return ERROR_COMMAND_ARGUMENT_INVALID;
3278 }
3279
3280 xtensa->core_config->mpu.enabled = true;
3281 xtensa->core_config->mpu.nfgseg = nfgseg;
3282 xtensa->core_config->mpu.minsegsize = minsegsize;
3283 xtensa->core_config->mpu.lockable = lockable;
3284 xtensa->core_config->mpu.execonly = execonly;
3285 return ERROR_OK;
3286 }
3287
3288 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3289 {
3290 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3291 target_to_xtensa(get_current_target(CMD_CTX)));
3292 }
3293
3294 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3295 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3296 {
3297 if (CMD_ARGC != 2) {
3298 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3299 return ERROR_COMMAND_SYNTAX_ERROR;
3300 }
3301
3302 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3303 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3304 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3305 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3306 return ERROR_COMMAND_ARGUMENT_INVALID;
3307 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3308 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3309 return ERROR_COMMAND_ARGUMENT_INVALID;
3310 }
3311
3312 xtensa->core_config->mmu.enabled = true;
3313 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3314 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3315 return ERROR_OK;
3316 }
3317
3318 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3319 {
3320 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3321 target_to_xtensa(get_current_target(CMD_CTX)));
3322 }
3323
3324 /* xtregs <numregs>
3325 * xtreg <regname> <regnum> */
3326 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3327 {
3328 if (CMD_ARGC == 1) {
3329 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3330 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3331 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3332 return ERROR_COMMAND_SYNTAX_ERROR;
3333 }
3334 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3335 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3336 numregs, xtensa->genpkt_regs_num);
3337 return ERROR_COMMAND_SYNTAX_ERROR;
3338 }
3339 xtensa->total_regs_num = numregs;
3340 xtensa->core_regs_num = 0;
3341 xtensa->num_optregs = 0;
3342 /* A little more memory than required, but saves a second initialization pass */
3343 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3344 if (!xtensa->optregs) {
3345 LOG_ERROR("Failed to allocate xtensa->optregs!");
3346 return ERROR_FAIL;
3347 }
3348 return ERROR_OK;
3349 } else if (CMD_ARGC != 2)
3350 return ERROR_COMMAND_SYNTAX_ERROR;
3351
3352 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3353 * if general register (g-packet) requests or contiguous register maps are supported */
3354 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3355 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3356 if (!xtensa->contiguous_regs_desc) {
3357 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3358 return ERROR_FAIL;
3359 }
3360 }
3361
3362 const char *regname = CMD_ARGV[0];
3363 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3364 if (regnum > UINT16_MAX) {
3365 LOG_ERROR("<regnum> must be a 16-bit number");
3366 return ERROR_COMMAND_ARGUMENT_INVALID;
3367 }
3368
3369 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3370 if (xtensa->total_regs_num)
3371 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3372 regname, regnum,
3373 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3374 else
3375 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3376 regname, regnum);
3377 return ERROR_FAIL;
3378 }
3379
3380 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3381 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3382 bool is_extended_reg = true;
3383 unsigned int ridx;
3384 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3385 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3386 /* Flag core register as defined */
3387 rptr = &xtensa_regs[ridx];
3388 xtensa->core_regs_num++;
3389 is_extended_reg = false;
3390 break;
3391 }
3392 }
3393
3394 rptr->exist = true;
3395 if (is_extended_reg) {
3396 /* Register ID, debugger-visible register ID */
3397 rptr->name = strdup(CMD_ARGV[0]);
3398 rptr->dbreg_num = regnum;
3399 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3400 xtensa->num_optregs++;
3401
3402 /* Register type */
3403 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3404 rptr->type = XT_REG_GENERAL;
3405 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3406 rptr->type = XT_REG_USER;
3407 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3408 rptr->type = XT_REG_FR;
3409 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3410 rptr->type = XT_REG_SPECIAL;
3411 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3412 /* WARNING: For these registers, regnum points to the
3413 * index of the corresponding ARx registers, NOT to
3414 * the processor register number! */
3415 rptr->type = XT_REG_RELGEN;
3416 rptr->reg_num += XT_REG_IDX_ARFIRST;
3417 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3418 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3419 rptr->type = XT_REG_TIE;
3420 } else {
3421 rptr->type = XT_REG_OTHER;
3422 }
3423
3424 /* Register flags */
3425 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3426 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3427 (strcmp(rptr->name, "intclear") == 0))
3428 rptr->flags = XT_REGF_NOREAD;
3429 else
3430 rptr->flags = 0;
3431
3432 if ((rptr->reg_num == (XT_PS_REG_NUM_BASE + xtensa->core_config->debug.irq_level)) &&
3433 (xtensa->core_config->core_type == XT_LX) && (rptr->type == XT_REG_SPECIAL)) {
3434 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3435 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3436 }
3437 } else if (strcmp(rptr->name, "cpenable") == 0) {
3438 xtensa->core_config->coproc = true;
3439 }
3440
3441 /* Build out list of contiguous registers in specified order */
3442 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3443 if (xtensa->contiguous_regs_desc) {
3444 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3445 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3446 }
3447 if (xtensa_extra_debug_log)
3448 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3449 is_extended_reg ? "config-specific" : "core",
3450 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3451 is_extended_reg ? xtensa->num_optregs : ridx,
3452 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3453 return ERROR_OK;
3454 }
3455
3456 COMMAND_HANDLER(xtensa_cmd_xtreg)
3457 {
3458 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3459 target_to_xtensa(get_current_target(CMD_CTX)));
3460 }
3461
3462 /* xtregfmt <contiguous|sparse> [numgregs] */
3463 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3464 {
3465 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3466 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3467 return ERROR_OK;
3468 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3469 xtensa->regmap_contiguous = true;
3470 if (CMD_ARGC == 2) {
3471 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3472 if ((numgregs <= 0) ||
3473 ((numgregs > xtensa->total_regs_num) &&
3474 (xtensa->total_regs_num > 0))) {
3475 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3476 numgregs, xtensa->total_regs_num);
3477 return ERROR_COMMAND_SYNTAX_ERROR;
3478 }
3479 xtensa->genpkt_regs_num = numgregs;
3480 }
3481 return ERROR_OK;
3482 }
3483 }
3484 return ERROR_COMMAND_SYNTAX_ERROR;
3485 }
3486
3487 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3488 {
3489 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3490 target_to_xtensa(get_current_target(CMD_CTX)));
3491 }
3492
3493 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3494 {
3495 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3496 &xtensa->permissive_mode, "xtensa permissive mode");
3497 }
3498
3499 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3500 {
3501 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3502 target_to_xtensa(get_current_target(CMD_CTX)));
3503 }
3504
3505 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3506 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
3507 {
3508 struct xtensa_perfmon_config config = {
3509 .mask = 0xffff,
3510 .kernelcnt = 0,
3511 .tracelevel = -1 /* use DEBUGLEVEL by default */
3512 };
3513
3514 if (CMD_ARGC < 2 || CMD_ARGC > 6)
3515 return ERROR_COMMAND_SYNTAX_ERROR;
3516
3517 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
3518 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
3519 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3520 return ERROR_COMMAND_ARGUMENT_INVALID;
3521 }
3522
3523 config.select = strtoul(CMD_ARGV[1], NULL, 0);
3524 if (config.select > XTENSA_MAX_PERF_SELECT) {
3525 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
3526 return ERROR_COMMAND_ARGUMENT_INVALID;
3527 }
3528
3529 if (CMD_ARGC >= 3) {
3530 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
3531 if (config.mask > XTENSA_MAX_PERF_MASK) {
3532 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
3533 return ERROR_COMMAND_ARGUMENT_INVALID;
3534 }
3535 }
3536
3537 if (CMD_ARGC >= 4) {
3538 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
3539 if (config.kernelcnt > 1) {
3540 command_print(CMD, "kernelcnt should be 0 or 1");
3541 return ERROR_COMMAND_ARGUMENT_INVALID;
3542 }
3543 }
3544
3545 if (CMD_ARGC >= 5) {
3546 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
3547 if (config.tracelevel > 7) {
3548 command_print(CMD, "tracelevel should be <=7");
3549 return ERROR_COMMAND_ARGUMENT_INVALID;
3550 }
3551 }
3552
3553 if (config.tracelevel == -1)
3554 config.tracelevel = xtensa->core_config->debug.irq_level;
3555
3556 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
3557 }
3558
3559 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
3560 {
3561 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
3562 target_to_xtensa(get_current_target(CMD_CTX)));
3563 }
3564
3565 /* perfmon_dump [counter_id] */
3566 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
3567 {
3568 if (CMD_ARGC > 1)
3569 return ERROR_COMMAND_SYNTAX_ERROR;
3570
3571 int counter_id = -1;
3572 if (CMD_ARGC == 1) {
3573 counter_id = strtol(CMD_ARGV[0], NULL, 0);
3574 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
3575 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3576 return ERROR_COMMAND_ARGUMENT_INVALID;
3577 }
3578 }
3579
3580 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
3581 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
3582 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
3583 char result_buf[128] = { 0 };
3584 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
3585 struct xtensa_perfmon_result result;
3586 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
3587 if (res != ERROR_OK)
3588 return res;
3589 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
3590 "%-12" PRIu64 "%s",
3591 result.value,
3592 result.overflow ? " (overflow)" : "");
3593 LOG_INFO("%s", result_buf);
3594 }
3595
3596 return ERROR_OK;
3597 }
3598
3599 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
3600 {
3601 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
3602 target_to_xtensa(get_current_target(CMD_CTX)));
3603 }
3604
3605 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
3606 {
3607 int state = -1;
3608
3609 if (CMD_ARGC < 1) {
3610 const char *st;
3611 state = xtensa->stepping_isr_mode;
3612 if (state == XT_STEPPING_ISR_ON)
3613 st = "OFF";
3614 else if (state == XT_STEPPING_ISR_OFF)
3615 st = "ON";
3616 else
3617 st = "UNKNOWN";
3618 command_print(CMD, "Current ISR step mode: %s", st);
3619 return ERROR_OK;
3620 }
3621 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3622 if (!strcasecmp(CMD_ARGV[0], "off"))
3623 state = XT_STEPPING_ISR_ON;
3624 else if (!strcasecmp(CMD_ARGV[0], "on"))
3625 state = XT_STEPPING_ISR_OFF;
3626
3627 if (state == -1) {
3628 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
3629 return ERROR_FAIL;
3630 }
3631 xtensa->stepping_isr_mode = state;
3632 return ERROR_OK;
3633 }
3634
3635 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
3636 {
3637 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
3638 target_to_xtensa(get_current_target(CMD_CTX)));
3639 }
3640
3641 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
3642 {
3643 int res;
3644 uint32_t val = 0;
3645
3646 if (CMD_ARGC >= 1) {
3647 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3648 if (!strcasecmp(CMD_ARGV[0], "none")) {
3649 val = 0;
3650 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
3651 val |= OCDDCR_BREAKINEN;
3652 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
3653 val |= OCDDCR_BREAKOUTEN;
3654 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
3655 val |= OCDDCR_RUNSTALLINEN;
3656 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
3657 val |= OCDDCR_DEBUGMODEOUTEN;
3658 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
3659 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
3660 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
3661 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
3662 } else {
3663 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
3664 command_print(
3665 CMD,
3666 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3667 return ERROR_OK;
3668 }
3669 }
3670 res = xtensa_smpbreak_set(target, val);
3671 if (res != ERROR_OK)
3672 command_print(CMD, "Failed to set smpbreak config %d", res);
3673 } else {
3674 struct xtensa *xtensa = target_to_xtensa(target);
3675 res = xtensa_smpbreak_read(xtensa, &val);
3676 if (res == ERROR_OK)
3677 command_print(CMD, "Current bits set:%s%s%s%s",
3678 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
3679 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
3680 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
3681 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
3682 );
3683 else
3684 command_print(CMD, "Failed to get smpbreak config %d", res);
3685 }
3686 return res;
3687 }
3688
3689 COMMAND_HANDLER(xtensa_cmd_smpbreak)
3690 {
3691 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
3692 get_current_target(CMD_CTX));
3693 }
3694
3695 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
3696 {
3697 struct xtensa_trace_status trace_status;
3698 struct xtensa_trace_start_config cfg = {
3699 .stoppc = 0,
3700 .stopmask = XTENSA_STOPMASK_DISABLED,
3701 .after = 0,
3702 .after_is_words = false
3703 };
3704
3705 /* Parse arguments */
3706 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3707 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
3708 char *e;
3709 i++;
3710 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
3711 cfg.stopmask = 0;
3712 if (*e == '/')
3713 cfg.stopmask = strtol(e, NULL, 0);
3714 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
3715 i++;
3716 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
3717 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
3718 cfg.after_is_words = 0;
3719 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
3720 cfg.after_is_words = 1;
3721 } else {
3722 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
3723 return ERROR_FAIL;
3724 }
3725 }
3726
3727 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3728 if (res != ERROR_OK)
3729 return res;
3730 if (trace_status.stat & TRAXSTAT_TRACT) {
3731 LOG_WARNING("Silently stop active tracing!");
3732 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
3733 if (res != ERROR_OK)
3734 return res;
3735 }
3736
3737 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
3738 if (res != ERROR_OK)
3739 return res;
3740
3741 xtensa->trace_active = true;
3742 command_print(CMD, "Trace started.");
3743 return ERROR_OK;
3744 }
3745
3746 COMMAND_HANDLER(xtensa_cmd_tracestart)
3747 {
3748 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
3749 target_to_xtensa(get_current_target(CMD_CTX)));
3750 }
3751
3752 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
3753 {
3754 struct xtensa_trace_status trace_status;
3755
3756 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3757 if (res != ERROR_OK)
3758 return res;
3759
3760 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
3761 command_print(CMD, "No trace is currently active.");
3762 return ERROR_FAIL;
3763 }
3764
3765 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
3766 if (res != ERROR_OK)
3767 return res;
3768
3769 xtensa->trace_active = false;
3770 command_print(CMD, "Trace stop triggered.");
3771 return ERROR_OK;
3772 }
3773
3774 COMMAND_HANDLER(xtensa_cmd_tracestop)
3775 {
3776 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
3777 target_to_xtensa(get_current_target(CMD_CTX)));
3778 }
3779
3780 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
3781 {
3782 struct xtensa_trace_config trace_config;
3783 struct xtensa_trace_status trace_status;
3784 uint32_t memsz, wmem;
3785
3786 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3787 if (res != ERROR_OK)
3788 return res;
3789
3790 if (trace_status.stat & TRAXSTAT_TRACT) {
3791 command_print(CMD, "Tracing is still active. Please stop it first.");
3792 return ERROR_FAIL;
3793 }
3794
3795 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
3796 if (res != ERROR_OK)
3797 return res;
3798
3799 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
3800 command_print(CMD, "No active trace found; nothing to dump.");
3801 return ERROR_FAIL;
3802 }
3803
3804 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
3805 LOG_INFO("Total trace memory: %d words", memsz);
3806 if ((trace_config.addr &
3807 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
3808 /*Memory hasn't overwritten itself yet. */
3809 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
3810 LOG_INFO("...but trace is only %d words", wmem);
3811 if (wmem < memsz)
3812 memsz = wmem;
3813 } else {
3814 if (trace_config.addr & TRAXADDR_TWSAT) {
3815 LOG_INFO("Real trace is many times longer than that (overflow)");
3816 } else {
3817 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
3818 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
3819 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
3820 }
3821 }
3822
3823 uint8_t *tracemem = malloc(memsz * 4);
3824 if (!tracemem) {
3825 command_print(CMD, "Failed to alloc memory for trace data!");
3826 return ERROR_FAIL;
3827 }
3828 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
3829 if (res != ERROR_OK) {
3830 free(tracemem);
3831 return res;
3832 }
3833
3834 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
3835 if (f <= 0) {
3836 free(tracemem);
3837 command_print(CMD, "Unable to open file %s", fname);
3838 return ERROR_FAIL;
3839 }
3840 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
3841 command_print(CMD, "Unable to write to file %s", fname);
3842 else
3843 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
3844 close(f);
3845
3846 bool is_all_zeroes = true;
3847 for (unsigned int i = 0; i < memsz * 4; i++) {
3848 if (tracemem[i] != 0) {
3849 is_all_zeroes = false;
3850 break;
3851 }
3852 }
3853 free(tracemem);
3854 if (is_all_zeroes)
3855 command_print(
3856 CMD,
3857 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
3858
3859 return ERROR_OK;
3860 }
3861
3862 COMMAND_HANDLER(xtensa_cmd_tracedump)
3863 {
3864 if (CMD_ARGC != 1) {
3865 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
3866 return ERROR_FAIL;
3867 }
3868
3869 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
3870 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
3871 }
3872
3873 static const struct command_registration xtensa_any_command_handlers[] = {
3874 {
3875 .name = "xtdef",
3876 .handler = xtensa_cmd_xtdef,
3877 .mode = COMMAND_CONFIG,
3878 .help = "Configure Xtensa core type",
3879 .usage = "<type>",
3880 },
3881 {
3882 .name = "xtopt",
3883 .handler = xtensa_cmd_xtopt,
3884 .mode = COMMAND_CONFIG,
3885 .help = "Configure Xtensa core option",
3886 .usage = "<name> <value>",
3887 },
3888 {
3889 .name = "xtmem",
3890 .handler = xtensa_cmd_xtmem,
3891 .mode = COMMAND_CONFIG,
3892 .help = "Configure Xtensa memory/cache option",
3893 .usage = "<type> [parameters]",
3894 },
3895 {
3896 .name = "xtmmu",
3897 .handler = xtensa_cmd_xtmmu,
3898 .mode = COMMAND_CONFIG,
3899 .help = "Configure Xtensa MMU option",
3900 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
3901 },
3902 {
3903 .name = "xtmpu",
3904 .handler = xtensa_cmd_xtmpu,
3905 .mode = COMMAND_CONFIG,
3906 .help = "Configure Xtensa MPU option",
3907 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
3908 },
3909 {
3910 .name = "xtreg",
3911 .handler = xtensa_cmd_xtreg,
3912 .mode = COMMAND_CONFIG,
3913 .help = "Configure Xtensa register",
3914 .usage = "<regname> <regnum>",
3915 },
3916 {
3917 .name = "xtregs",
3918 .handler = xtensa_cmd_xtreg,
3919 .mode = COMMAND_CONFIG,
3920 .help = "Configure number of Xtensa registers",
3921 .usage = "<numregs>",
3922 },
3923 {
3924 .name = "xtregfmt",
3925 .handler = xtensa_cmd_xtregfmt,
3926 .mode = COMMAND_CONFIG,
3927 .help = "Configure format of Xtensa register map",
3928 .usage = "<contiguous|sparse> [numgregs]",
3929 },
3930 {
3931 .name = "set_permissive",
3932 .handler = xtensa_cmd_permissive_mode,
3933 .mode = COMMAND_ANY,
3934 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
3935 .usage = "[0|1]",
3936 },
3937 {
3938 .name = "maskisr",
3939 .handler = xtensa_cmd_mask_interrupts,
3940 .mode = COMMAND_ANY,
3941 .help = "mask Xtensa interrupts at step",
3942 .usage = "['on'|'off']",
3943 },
3944 {
3945 .name = "smpbreak",
3946 .handler = xtensa_cmd_smpbreak,
3947 .mode = COMMAND_ANY,
3948 .help = "Set the way the CPU chains OCD breaks",
3949 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
3950 },
3951 {
3952 .name = "perfmon_enable",
3953 .handler = xtensa_cmd_perfmon_enable,
3954 .mode = COMMAND_EXEC,
3955 .help = "Enable and start performance counter",
3956 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
3957 },
3958 {
3959 .name = "perfmon_dump",
3960 .handler = xtensa_cmd_perfmon_dump,
3961 .mode = COMMAND_EXEC,
3962 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
3963 .usage = "[counter_id]",
3964 },
3965 {
3966 .name = "tracestart",
3967 .handler = xtensa_cmd_tracestart,
3968 .mode = COMMAND_EXEC,
3969 .help =
3970 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
3971 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
3972 },
3973 {
3974 .name = "tracestop",
3975 .handler = xtensa_cmd_tracestop,
3976 .mode = COMMAND_EXEC,
3977 .help = "Tracing: Stop current trace as started by the tracestart command",
3978 .usage = "",
3979 },
3980 {
3981 .name = "tracedump",
3982 .handler = xtensa_cmd_tracedump,
3983 .mode = COMMAND_EXEC,
3984 .help = "Tracing: Dump trace memory to a files. One file per core.",
3985 .usage = "<outfile>",
3986 },
3987 {
3988 .name = "exe",
3989 .handler = xtensa_cmd_exe,
3990 .mode = COMMAND_ANY,
3991 .help = "Xtensa stub execution",
3992 .usage = "<ascii-encoded hexadecimal instruction bytes>",
3993 },
3994 COMMAND_REGISTRATION_DONE
3995 };
3996
3997 const struct command_registration xtensa_command_handlers[] = {
3998 {
3999 .name = "xtensa",
4000 .mode = COMMAND_ANY,
4001 .help = "Xtensa command group",
4002 .usage = "",
4003 .chain = xtensa_any_command_handlers,
4004 },
4005 COMMAND_REGISTRATION_DONE
4006 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)