a6e50ccc71f8a4c4811c36ac8bb8adbe1c5ba292
[openocd.git] / src / target / xtensa / xtensa.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
10
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19
20 #include "xtensa_chip.h"
21 #include "xtensa.h"
22
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25 ((((V) & 0x0F) << 4) \
26 | (((V) & 0xF0) >> 4))
27
28 #define XT_NIBSWAP16(V) \
29 ((((V) & 0x000F) << 12) \
30 | (((V) & 0x00F0) << 4) \
31 | (((V) & 0x0F00) >> 4) \
32 | (((V) & 0xF000) >> 12))
33
34 #define XT_NIBSWAP24(V) \
35 ((((V) & 0x00000F) << 20) \
36 | (((V) & 0x0000F0) << 12) \
37 | (((V) & 0x000F00) << 4) \
38 | (((V) & 0x00F000) >> 4) \
39 | (((V) & 0x0F0000) >> 12) \
40 | (((V) & 0xF00000) >> 20))
41
42 /* _XT_INS_FORMAT_*()
43 * Instruction formatting converted from little-endian inputs
44 * and shifted to the MSB-side of DIR for BE systems.
45 */
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48 | (((T) & 0x0F) << 16) \
49 | (((SR) & 0xFF) << 8)) << 8 \
50 : (OPCODE) \
51 | (((SR) & 0xFF) << 8) \
52 | (((T) & 0x0F) << 4))
53
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57 | (((R) & 0x0F) << 8)) << 8 \
58 : (OPCODE) \
59 | (((ST) & 0xFF) << 4) \
60 | (((R) & 0x0F) << 12))
61
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64 | (((T) & 0x0F) << 8) \
65 | (((S) & 0x0F) << 4) \
66 | ((IMM4) & 0x0F)) << 16 \
67 : (OPCODE) \
68 | (((T) & 0x0F) << 4) \
69 | (((S) & 0x0F) << 8) \
70 | (((IMM4) & 0x0F) << 12))
71
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74 | (((T) & 0x0F) << 16) \
75 | (((S) & 0x0F) << 12) \
76 | (((R) & 0x0F) << 8) \
77 | ((IMM8) & 0xFF)) << 8 \
78 : (OPCODE) \
79 | (((IMM8) & 0xFF) << 16) \
80 | (((R) & 0x0F) << 12) \
81 | (((S) & 0x0F) << 8) \
82 | (((T) & 0x0F) << 4))
83
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86 | (((T) & 0x0F) << 16) \
87 | (((S) & 0x0F) << 12) \
88 | (((R) & 0x0F) << 8)) << 8 \
89 | ((IMM4) & 0x0F) \
90 : (OPCODE) \
91 | (((IMM4) & 0x0F) << 20) \
92 | (((R) & 0x0F) << 12) \
93 | (((S) & 0x0F) << 8) \
94 | (((T) & 0x0F) << 4))
95
96 /* Xtensa processor instruction opcodes
97 */
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
102
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
107
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
114
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
121
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
127
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
131
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
138
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
141
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
146
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
151
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
155
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
159
160 #define XT_WATCHPOINTS_NUM_MAX 2
161
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163 * These get used a lot so making a shortcut is useful.
164 */
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
169 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
170
171 #define XT_PS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
172 #define XT_PC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
173 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
174 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
175
176 #define XT_SW_BREAKPOINTS_MAX_NUM 32
177 #define XT_HW_IBREAK_MAX_NUM 2
178 #define XT_HW_DBREAK_MAX_NUM 2
179
180 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
181 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
182 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
183 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
184 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
185 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
186 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
187 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
188 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
189 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
190 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
191 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
192 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
247 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
248 XT_MK_REG_DESC("ps", 0xE6, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
249 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
250 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
251 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
252 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
253 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
254 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
255 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
256 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
259 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
261 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
262
263 /* WARNING: For these registers, regnum points to the
264 * index of the corresponding ARx registers, NOT to
265 * the processor register number! */
266 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
267 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
268 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
269 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
270 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
271 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
272 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
273 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
274 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
275 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
276 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
282 };
283
284 /**
285 * Types of memory used at xtensa target
286 */
287 enum xtensa_mem_region_type {
288 XTENSA_MEM_REG_IROM = 0x0,
289 XTENSA_MEM_REG_IRAM,
290 XTENSA_MEM_REG_DROM,
291 XTENSA_MEM_REG_DRAM,
292 XTENSA_MEM_REG_SRAM,
293 XTENSA_MEM_REG_SROM,
294 XTENSA_MEM_REGS_NUM
295 };
296
297 /* Register definition as union for list allocation */
298 union xtensa_reg_val_u {
299 xtensa_reg_val_t val;
300 uint8_t buf[4];
301 };
302
303 static const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
304 { .chrval = "E00", .intval = ERROR_FAIL },
305 { .chrval = "E01", .intval = ERROR_FAIL },
306 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
307 { .chrval = "E03", .intval = ERROR_FAIL },
308 };
309
310 /* Set to true for extra debug logging */
311 static const bool xtensa_extra_debug_log;
312
313 /**
314 * Gets a config for the specific mem type
315 */
316 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
317 struct xtensa *xtensa,
318 enum xtensa_mem_region_type type)
319 {
320 switch (type) {
321 case XTENSA_MEM_REG_IROM:
322 return &xtensa->core_config->irom;
323 case XTENSA_MEM_REG_IRAM:
324 return &xtensa->core_config->iram;
325 case XTENSA_MEM_REG_DROM:
326 return &xtensa->core_config->drom;
327 case XTENSA_MEM_REG_DRAM:
328 return &xtensa->core_config->dram;
329 case XTENSA_MEM_REG_SRAM:
330 return &xtensa->core_config->sram;
331 case XTENSA_MEM_REG_SROM:
332 return &xtensa->core_config->srom;
333 default:
334 return NULL;
335 }
336 }
337
338 /**
339 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
340 * for a given address
341 * Returns NULL if nothing found
342 */
343 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
344 const struct xtensa_local_mem_config *mem,
345 target_addr_t address)
346 {
347 for (unsigned int i = 0; i < mem->count; i++) {
348 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
349 if (address >= region->base && address < (region->base + region->size))
350 return region;
351 }
352 return NULL;
353 }
354
355 /**
356 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
357 * for a given address
358 * Returns NULL if nothing found
359 */
360 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
361 struct xtensa *xtensa,
362 target_addr_t address)
363 {
364 const struct xtensa_local_mem_region_config *result;
365 const struct xtensa_local_mem_config *mcgf;
366 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
367 mcgf = xtensa_get_mem_config(xtensa, mtype);
368 result = xtensa_memory_region_find(mcgf, address);
369 if (result)
370 return result;
371 }
372 return NULL;
373 }
374
375 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
376 const struct xtensa_local_mem_config *mem,
377 target_addr_t address)
378 {
379 if (!cache->size)
380 return false;
381 return xtensa_memory_region_find(mem, address);
382 }
383
384 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
385 {
386 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
387 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
388 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
389 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
390 }
391
392 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
393 {
394 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
395 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
396 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
397 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
398 }
399
400 static int xtensa_core_reg_get(struct reg *reg)
401 {
402 /* We don't need this because we read all registers on halt anyway. */
403 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
404 struct target *target = xtensa->target;
405
406 if (target->state != TARGET_HALTED)
407 return ERROR_TARGET_NOT_HALTED;
408 if (!reg->exist) {
409 if (strncmp(reg->name, "?0x", 3) == 0) {
410 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
411 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
412 return ERROR_OK;
413 }
414 return ERROR_COMMAND_ARGUMENT_INVALID;
415 }
416 return ERROR_OK;
417 }
418
419 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
420 {
421 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
422 struct target *target = xtensa->target;
423
424 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
425 if (target->state != TARGET_HALTED)
426 return ERROR_TARGET_NOT_HALTED;
427
428 if (!reg->exist) {
429 if (strncmp(reg->name, "?0x", 3) == 0) {
430 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
431 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
432 return ERROR_OK;
433 }
434 return ERROR_COMMAND_ARGUMENT_INVALID;
435 }
436
437 buf_cpy(buf, reg->value, reg->size);
438
439 if (xtensa->core_config->windowed) {
440 /* If the user updates a potential scratch register, track for conflicts */
441 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
442 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
443 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
444 buf_get_u32(reg->value, 0, 32));
445 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
446 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
447 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
448 xtensa->scratch_ars[s].intval = true;
449 break;
450 }
451 }
452 }
453 reg->dirty = true;
454 reg->valid = true;
455
456 return ERROR_OK;
457 }
458
459 static const struct reg_arch_type xtensa_reg_type = {
460 .get = xtensa_core_reg_get,
461 .set = xtensa_core_reg_set,
462 };
463
464 /* Convert a register index that's indexed relative to windowbase, to the real address. */
465 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
466 enum xtensa_reg_id reg_idx,
467 int windowbase)
468 {
469 unsigned int idx;
470 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
471 idx = reg_idx - XT_REG_IDX_AR0;
472 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
473 idx = reg_idx - XT_REG_IDX_A0;
474 } else {
475 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
476 return -1;
477 }
478 return ((idx + windowbase * 4) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
479 }
480
481 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
482 enum xtensa_reg_id reg_idx,
483 int windowbase)
484 {
485 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
486 }
487
488 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
489 {
490 struct reg *reg_list = xtensa->core_cache->reg_list;
491 reg_list[reg_idx].dirty = true;
492 }
493
494 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
495 {
496 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DIR0EXEC, ins);
497 }
498
499 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
500 {
501 if ((oplen > 0) && (oplen <= 64)) {
502 uint32_t opsw[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; /* 8 DIRx regs: max width 64B */
503 uint8_t oplenw = (oplen + 3) / 4;
504 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
505 buf_bswap32((uint8_t *)opsw, ops, oplenw * 4);
506 else
507 memcpy(opsw, ops, oplen);
508 for (int32_t i = oplenw - 1; i > 0; i--)
509 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DIR0 + i, opsw[i]);
510 /* Write DIR0EXEC last */
511 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DIR0EXEC, opsw[0]);
512 }
513 }
514
515 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
516 {
517 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
518 return dm->pwr_ops->queue_reg_write(dm, reg, data);
519 }
520
521 /* NOTE: Assumes A3 has already been saved */
522 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
523 {
524 struct xtensa *xtensa = target_to_xtensa(target);
525 int woe_dis;
526 uint8_t woe_buf[4];
527
528 if (xtensa->core_config->windowed) {
529 /* Save PS (LX) and disable window overflow exceptions prior to AR save */
530 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_PS, XT_REG_A3));
531 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
532 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, woe_buf);
533 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
534 if (res != ERROR_OK) {
535 LOG_ERROR("Failed to read PS (%d)!", res);
536 return res;
537 }
538 xtensa_core_status_check(target);
539 *woe = buf_get_u32(woe_buf, 0, 32);
540 woe_dis = *woe & ~XT_PS_WOE_MSK;
541 LOG_DEBUG("Clearing PS.WOE (0x%08" PRIx32 " -> 0x%08" PRIx32 ")", *woe, woe_dis);
542 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe_dis);
543 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
544 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
545 }
546 return ERROR_OK;
547 }
548
549 /* NOTE: Assumes A3 has already been saved */
550 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
551 {
552 struct xtensa *xtensa = target_to_xtensa(target);
553 if (xtensa->core_config->windowed) {
554 /* Restore window overflow exception state */
555 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe);
556 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
557 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
558 LOG_DEBUG("Restored PS.WOE (0x%08" PRIx32 ")", woe);
559 }
560 }
561
562 static bool xtensa_reg_is_readable(int flags, int cpenable)
563 {
564 if (flags & XT_REGF_NOREAD)
565 return false;
566 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
567 return false;
568 return true;
569 }
570
571 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
572 {
573 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
574 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
575 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
576 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
577 } else {
578 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
579 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
580 }
581 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
582 }
583
584 static int xtensa_write_dirty_registers(struct target *target)
585 {
586 struct xtensa *xtensa = target_to_xtensa(target);
587 int res;
588 xtensa_reg_val_t regval, windowbase = 0;
589 bool scratch_reg_dirty = false, delay_cpenable = false;
590 struct reg *reg_list = xtensa->core_cache->reg_list;
591 unsigned int reg_list_size = xtensa->core_cache->num_regs;
592 bool preserve_a3 = false;
593 uint8_t a3_buf[4];
594 xtensa_reg_val_t a3 = 0, woe;
595
596 LOG_TARGET_DEBUG(target, "start");
597
598 /* We need to write the dirty registers in the cache list back to the processor.
599 * Start by writing the SFR/user registers. */
600 for (unsigned int i = 0; i < reg_list_size; i++) {
601 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
602 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
603 if (reg_list[i].dirty) {
604 if (rlist[ridx].type == XT_REG_SPECIAL ||
605 rlist[ridx].type == XT_REG_USER ||
606 rlist[ridx].type == XT_REG_FR) {
607 scratch_reg_dirty = true;
608 if (i == XT_REG_IDX_CPENABLE) {
609 delay_cpenable = true;
610 continue;
611 }
612 regval = xtensa_reg_get(target, i);
613 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
614 reg_list[i].name,
615 rlist[ridx].reg_num,
616 regval);
617 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
618 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
619 if (reg_list[i].exist) {
620 unsigned int reg_num = rlist[ridx].reg_num;
621 if (rlist[ridx].type == XT_REG_USER) {
622 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
623 } else if (rlist[ridx].type == XT_REG_FR) {
624 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
625 } else {/*SFR */
626 if (reg_num == XT_PC_REG_NUM_VIRTUAL)
627 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL
628 **/
629 reg_num =
630 (XT_PC_REG_NUM_BASE +
631 xtensa->core_config->debug.irq_level);
632 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
633 }
634 }
635 reg_list[i].dirty = false;
636 }
637 }
638 }
639 if (scratch_reg_dirty)
640 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
641 if (delay_cpenable) {
642 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
643 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
644 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
645 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
646 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
647 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
648 XT_REG_A3));
649 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
650 }
651
652 preserve_a3 = (xtensa->core_config->windowed);
653 if (preserve_a3) {
654 /* Save (windowed) A3 for scratch use */
655 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
656 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
657 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
658 if (res != ERROR_OK)
659 return res;
660 xtensa_core_status_check(target);
661 a3 = buf_get_u32(a3_buf, 0, 32);
662 }
663
664 if (xtensa->core_config->windowed) {
665 res = xtensa_window_state_save(target, &woe);
666 if (res != ERROR_OK)
667 return res;
668 /* Grab the windowbase, we need it. */
669 windowbase = xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE);
670 /* Check if there are mismatches between the ARx and corresponding Ax registers.
671 * When the user sets a register on a windowed config, xt-gdb may set the ARx
672 * register directly. Thus we take ARx as priority over Ax if both are dirty
673 * and it's unclear if the user set one over the other explicitly.
674 */
675 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
676 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
677 if (reg_list[i].dirty && reg_list[j].dirty) {
678 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
679 bool show_warning = true;
680 if (i == XT_REG_IDX_A3)
681 show_warning = xtensa_scratch_regs_fixup(xtensa,
682 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
683 else if (i == XT_REG_IDX_A4)
684 show_warning = xtensa_scratch_regs_fixup(xtensa,
685 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
686 if (show_warning)
687 LOG_WARNING(
688 "Warning: Both A%d [0x%08" PRIx32
689 "] as well as its underlying physical register "
690 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
691 i - XT_REG_IDX_A0,
692 buf_get_u32(reg_list[i].value, 0, 32),
693 j - XT_REG_IDX_AR0,
694 buf_get_u32(reg_list[j].value, 0, 32));
695 }
696 }
697 }
698 }
699
700 /* Write A0-A16. */
701 for (unsigned int i = 0; i < 16; i++) {
702 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
703 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
704 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
705 xtensa_regs[XT_REG_IDX_A0 + i].name,
706 regval,
707 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
708 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
709 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
710 reg_list[XT_REG_IDX_A0 + i].dirty = false;
711 if (i == 3) {
712 /* Avoid stomping A3 during restore at end of function */
713 a3 = regval;
714 }
715 }
716 }
717
718 if (xtensa->core_config->windowed) {
719 /* Now write AR registers */
720 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
721 /* Write the 16 registers we can see */
722 for (unsigned int i = 0; i < 16; i++) {
723 if (i + j < xtensa->core_config->aregs_num) {
724 enum xtensa_reg_id realadr =
725 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
726 windowbase);
727 /* Write back any dirty un-windowed registers */
728 if (reg_list[realadr].dirty) {
729 regval = xtensa_reg_get(target, realadr);
730 LOG_TARGET_DEBUG(
731 target,
732 "Writing back reg %s value %08" PRIX32 ", num =%i",
733 xtensa_regs[realadr].name,
734 regval,
735 xtensa_regs[realadr].reg_num);
736 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
737 xtensa_queue_exec_ins(xtensa,
738 XT_INS_RSR(xtensa, XT_SR_DDR,
739 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
740 reg_list[realadr].dirty = false;
741 if ((i + j) == 3)
742 /* Avoid stomping AR during A3 restore at end of function */
743 a3 = regval;
744 }
745 }
746 }
747 /*Now rotate the window so we'll see the next 16 registers. The final rotate
748 * will wraparound, */
749 /*leaving us in the state we were. */
750 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
751 }
752
753 xtensa_window_state_restore(target, woe);
754
755 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
756 xtensa->scratch_ars[s].intval = false;
757 }
758
759 if (preserve_a3) {
760 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
761 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
762 }
763
764 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
765 xtensa_core_status_check(target);
766
767 return res;
768 }
769
770 static inline bool xtensa_is_stopped(struct target *target)
771 {
772 struct xtensa *xtensa = target_to_xtensa(target);
773 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
774 }
775
776 int xtensa_examine(struct target *target)
777 {
778 struct xtensa *xtensa = target_to_xtensa(target);
779 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
780
781 LOG_DEBUG("coreid = %d", target->coreid);
782
783 if (xtensa->core_config->core_type == XT_UNDEF) {
784 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
785 return ERROR_FAIL;
786 }
787
788 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
789 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
790 xtensa_dm_queue_enable(&xtensa->dbg_mod);
791 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
792 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
793 if (res != ERROR_OK)
794 return res;
795 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
796 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
797 return ERROR_TARGET_FAILURE;
798 }
799 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
800 if (!target_was_examined(target))
801 target_set_examined(target);
802 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
803 return ERROR_OK;
804 }
805
806 int xtensa_wakeup(struct target *target)
807 {
808 struct xtensa *xtensa = target_to_xtensa(target);
809 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
810
811 if (xtensa->reset_asserted)
812 cmd |= PWRCTL_CORERESET(xtensa);
813 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
814 /* TODO: can we join this with the write above? */
815 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
816 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
817 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
818 }
819
820 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
821 {
822 uint32_t dsr_data = 0x00110000;
823 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
824 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
825 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
826
827 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
828 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, set | OCDDCR_ENABLEOCD);
829 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, clear);
830 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DSR, dsr_data);
831 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
832 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
833 }
834
835 int xtensa_smpbreak_set(struct target *target, uint32_t set)
836 {
837 struct xtensa *xtensa = target_to_xtensa(target);
838 int res = ERROR_OK;
839
840 xtensa->smp_break = set;
841 if (target_was_examined(target))
842 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
843 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
844 return res;
845 }
846
847 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
848 {
849 uint8_t dcr_buf[sizeof(uint32_t)];
850
851 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DCRSET, dcr_buf);
852 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
853 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
854 *val = buf_get_u32(dcr_buf, 0, 32);
855
856 return res;
857 }
858
859 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
860 {
861 struct xtensa *xtensa = target_to_xtensa(target);
862 *val = xtensa->smp_break;
863 return ERROR_OK;
864 }
865
866 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
867 {
868 return buf_get_u32(reg->value, 0, 32);
869 }
870
871 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
872 {
873 buf_set_u32(reg->value, 0, 32, value);
874 reg->dirty = true;
875 }
876
877 int xtensa_core_status_check(struct target *target)
878 {
879 struct xtensa *xtensa = target_to_xtensa(target);
880 int res, needclear = 0;
881
882 xtensa_dm_core_status_read(&xtensa->dbg_mod);
883 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
884 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
885 if (dsr & OCDDSR_EXECBUSY) {
886 if (!xtensa->suppress_dsr_errors)
887 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
888 needclear = 1;
889 }
890 if (dsr & OCDDSR_EXECEXCEPTION) {
891 if (!xtensa->suppress_dsr_errors)
892 LOG_TARGET_ERROR(target,
893 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
894 dsr);
895 needclear = 1;
896 }
897 if (dsr & OCDDSR_EXECOVERRUN) {
898 if (!xtensa->suppress_dsr_errors)
899 LOG_TARGET_ERROR(target,
900 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
901 dsr);
902 needclear = 1;
903 }
904 if (needclear) {
905 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
906 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
907 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
908 LOG_TARGET_ERROR(target, "clearing DSR failed!");
909 return ERROR_FAIL;
910 }
911 return ERROR_OK;
912 }
913
914 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
915 {
916 struct xtensa *xtensa = target_to_xtensa(target);
917 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
918 return xtensa_reg_get_value(reg);
919 }
920
921 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
922 {
923 struct xtensa *xtensa = target_to_xtensa(target);
924 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
925 if (xtensa_reg_get_value(reg) == value)
926 return;
927 xtensa_reg_set_value(reg, value);
928 }
929
930 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
931 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
932 {
933 struct xtensa *xtensa = target_to_xtensa(target);
934 uint32_t windowbase = (xtensa->core_config->windowed ?
935 xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE) : 0);
936 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
937 xtensa_reg_set(target, a_idx, value);
938 xtensa_reg_set(target, ar_idx, value);
939 }
940
941 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
942 uint32_t xtensa_cause_get(struct target *target)
943 {
944 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
945 }
946
947 void xtensa_cause_clear(struct target *target)
948 {
949 struct xtensa *xtensa = target_to_xtensa(target);
950 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
951 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
952 }
953
954 int xtensa_assert_reset(struct target *target)
955 {
956 struct xtensa *xtensa = target_to_xtensa(target);
957
958 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
959 target->state = TARGET_RESET;
960 xtensa_queue_pwr_reg_write(xtensa,
961 XDMREG_PWRCTL,
962 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
963 PWRCTL_COREWAKEUP(xtensa) | PWRCTL_CORERESET(xtensa));
964 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
965 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
966 if (res != ERROR_OK)
967 return res;
968 xtensa->reset_asserted = true;
969 return res;
970 }
971
972 int xtensa_deassert_reset(struct target *target)
973 {
974 struct xtensa *xtensa = target_to_xtensa(target);
975
976 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
977 if (target->reset_halt)
978 xtensa_queue_dbg_reg_write(xtensa,
979 XDMREG_DCRSET,
980 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
981 xtensa_queue_pwr_reg_write(xtensa,
982 XDMREG_PWRCTL,
983 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
984 PWRCTL_COREWAKEUP(xtensa));
985 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
986 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
987 if (res != ERROR_OK)
988 return res;
989 target->state = TARGET_RUNNING;
990 xtensa->reset_asserted = false;
991 return res;
992 }
993
994 int xtensa_soft_reset_halt(struct target *target)
995 {
996 LOG_TARGET_DEBUG(target, "begin");
997 return xtensa_assert_reset(target);
998 }
999
1000 int xtensa_fetch_all_regs(struct target *target)
1001 {
1002 struct xtensa *xtensa = target_to_xtensa(target);
1003 struct reg *reg_list = xtensa->core_cache->reg_list;
1004 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1005 xtensa_reg_val_t cpenable = 0, windowbase = 0, a3;
1006 uint32_t woe;
1007 uint8_t a3_buf[4];
1008 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1009
1010 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1011 if (!regvals) {
1012 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1013 return ERROR_FAIL;
1014 }
1015 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1016 if (!dsrs) {
1017 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1018 free(regvals);
1019 return ERROR_FAIL;
1020 }
1021
1022 LOG_TARGET_DEBUG(target, "start");
1023
1024 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1025 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1026 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
1027 int res = xtensa_window_state_save(target, &woe);
1028 if (res != ERROR_OK)
1029 goto xtensa_fetch_all_regs_done;
1030
1031 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1032 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1033 * in one go, then sort everything out from the regvals variable. */
1034
1035 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1036 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1037 /*Grab the 16 registers we can see */
1038 for (unsigned int i = 0; i < 16; i++) {
1039 if (i + j < xtensa->core_config->aregs_num) {
1040 xtensa_queue_exec_ins(xtensa,
1041 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1042 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1043 regvals[XT_REG_IDX_AR0 + i + j].buf);
1044 if (debug_dsrs)
1045 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR,
1046 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1047 }
1048 }
1049 if (xtensa->core_config->windowed)
1050 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1051 * will wraparound, */
1052 /* leaving us in the state we were. */
1053 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
1054 }
1055 xtensa_window_state_restore(target, woe);
1056
1057 if (xtensa->core_config->coproc) {
1058 /* As the very first thing after AREGS, go grab CPENABLE */
1059 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1060 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1061 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1062 }
1063 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1064 if (res != ERROR_OK) {
1065 LOG_ERROR("Failed to read ARs (%d)!", res);
1066 goto xtensa_fetch_all_regs_done;
1067 }
1068 xtensa_core_status_check(target);
1069
1070 a3 = buf_get_u32(a3_buf, 0, 32);
1071
1072 if (xtensa->core_config->coproc) {
1073 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1074
1075 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1076 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
1077 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1078 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1079
1080 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1081 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1082 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1083 }
1084 /* We're now free to use any of A0-A15 as scratch registers
1085 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1086 for (unsigned int i = 0; i < reg_list_size; i++) {
1087 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1088 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1089 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1090 bool reg_fetched = true;
1091 unsigned int reg_num = rlist[ridx].reg_num;
1092 switch (rlist[ridx].type) {
1093 case XT_REG_USER:
1094 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1095 break;
1096 case XT_REG_FR:
1097 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1098 break;
1099 case XT_REG_SPECIAL:
1100 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1101 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1102 reg_num = (XT_PC_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
1103 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1104 /* CPENABLE already read/updated; don't re-read */
1105 reg_fetched = false;
1106 break;
1107 }
1108 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1109 break;
1110 default:
1111 reg_fetched = false;
1112 }
1113 if (reg_fetched) {
1114 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1115 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1116 if (debug_dsrs)
1117 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1118 }
1119 }
1120 }
1121 /* Ok, send the whole mess to the CPU. */
1122 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1123 if (res != ERROR_OK) {
1124 LOG_ERROR("Failed to fetch AR regs!");
1125 goto xtensa_fetch_all_regs_done;
1126 }
1127 xtensa_core_status_check(target);
1128
1129 if (debug_dsrs) {
1130 /* DSR checking: follows order in which registers are requested. */
1131 for (unsigned int i = 0; i < reg_list_size; i++) {
1132 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1133 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1134 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1135 (rlist[ridx].type != XT_REG_DEBUG) &&
1136 (rlist[ridx].type != XT_REG_RELGEN) &&
1137 (rlist[ridx].type != XT_REG_TIE) &&
1138 (rlist[ridx].type != XT_REG_OTHER)) {
1139 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1140 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1141 res = ERROR_FAIL;
1142 goto xtensa_fetch_all_regs_done;
1143 }
1144 }
1145 }
1146 }
1147
1148 if (xtensa->core_config->windowed)
1149 /* We need the windowbase to decode the general addresses. */
1150 windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE].buf, 0, 32);
1151 /* Decode the result and update the cache. */
1152 for (unsigned int i = 0; i < reg_list_size; i++) {
1153 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1154 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1155 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1156 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1157 /* The 64-value general register set is read from (windowbase) on down.
1158 * We need to get the real register address by subtracting windowbase and
1159 * wrapping around. */
1160 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1161 windowbase);
1162 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1163 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1164 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1165 if (xtensa_extra_debug_log) {
1166 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1167 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1168 }
1169 } else {
1170 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1171 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1172 if (xtensa_extra_debug_log)
1173 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1174 xtensa_reg_set(target, i, regval);
1175 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1176 }
1177 reg_list[i].valid = true;
1178 } else {
1179 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1180 /* Report read-only registers all-zero but valid */
1181 reg_list[i].valid = true;
1182 xtensa_reg_set(target, i, 0);
1183 } else {
1184 reg_list[i].valid = false;
1185 }
1186 }
1187 }
1188
1189 if (xtensa->core_config->windowed) {
1190 /* We have used A3 as a scratch register.
1191 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1192 */
1193 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1194 xtensa_reg_set(target, ar3_idx, a3);
1195 xtensa_mark_register_dirty(xtensa, ar3_idx);
1196
1197 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1198 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1199 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1200 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1201 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1202 xtensa->scratch_ars[s].intval = false;
1203 }
1204
1205 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1206 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1207 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1208 xtensa->regs_fetched = true;
1209 xtensa_fetch_all_regs_done:
1210 free(regvals);
1211 free(dsrs);
1212 return res;
1213 }
1214
1215 int xtensa_get_gdb_reg_list(struct target *target,
1216 struct reg **reg_list[],
1217 int *reg_list_size,
1218 enum target_register_class reg_class)
1219 {
1220 struct xtensa *xtensa = target_to_xtensa(target);
1221 unsigned int num_regs;
1222
1223 if (reg_class == REG_CLASS_GENERAL) {
1224 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1225 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1226 return ERROR_FAIL;
1227 }
1228 num_regs = xtensa->genpkt_regs_num;
1229 } else {
1230 /* Determine whether to return a contiguous or sparse register map */
1231 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1232 }
1233
1234 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1235
1236 *reg_list = calloc(num_regs, sizeof(struct reg *));
1237 if (!*reg_list)
1238 return ERROR_FAIL;
1239
1240 *reg_list_size = num_regs;
1241 if (xtensa->regmap_contiguous) {
1242 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1243 for (unsigned int i = 0; i < num_regs; i++)
1244 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1245 return ERROR_OK;
1246 }
1247
1248 for (unsigned int i = 0; i < num_regs; i++)
1249 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1250 unsigned int k = 0;
1251 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1252 if (xtensa->core_cache->reg_list[i].exist) {
1253 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1254 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1255 int sparse_idx = rlist[ridx].dbreg_num;
1256 if (i == XT_REG_IDX_PS) {
1257 if (xtensa->eps_dbglevel_idx == 0) {
1258 LOG_ERROR("eps_dbglevel_idx not set\n");
1259 return ERROR_FAIL;
1260 }
1261 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1262 if (xtensa_extra_debug_log)
1263 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1264 sparse_idx, xtensa->core_config->debug.irq_level,
1265 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1266 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1267 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1268 } else {
1269 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1270 }
1271 if (i == XT_REG_IDX_PC)
1272 /* Make a duplicate copy of PC for external access */
1273 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1274 k++;
1275 }
1276 }
1277
1278 if (k == num_regs)
1279 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1280
1281 return ERROR_OK;
1282 }
1283
1284 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1285 {
1286 struct xtensa *xtensa = target_to_xtensa(target);
1287 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1288 xtensa->core_config->mmu.dtlb_entries_count > 0;
1289 return ERROR_OK;
1290 }
1291
1292 int xtensa_halt(struct target *target)
1293 {
1294 struct xtensa *xtensa = target_to_xtensa(target);
1295
1296 LOG_TARGET_DEBUG(target, "start");
1297 if (target->state == TARGET_HALTED) {
1298 LOG_TARGET_DEBUG(target, "target was already halted");
1299 return ERROR_OK;
1300 }
1301 /* First we have to read dsr and check if the target stopped */
1302 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1303 if (res != ERROR_OK) {
1304 LOG_TARGET_ERROR(target, "Failed to read core status!");
1305 return res;
1306 }
1307 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1308 if (!xtensa_is_stopped(target)) {
1309 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1310 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1311 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1312 if (res != ERROR_OK)
1313 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1314 }
1315
1316 return res;
1317 }
1318
1319 int xtensa_prepare_resume(struct target *target,
1320 int current,
1321 target_addr_t address,
1322 int handle_breakpoints,
1323 int debug_execution)
1324 {
1325 struct xtensa *xtensa = target_to_xtensa(target);
1326 uint32_t bpena = 0;
1327
1328 LOG_TARGET_DEBUG(target,
1329 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1330 current,
1331 address,
1332 handle_breakpoints,
1333 debug_execution);
1334
1335 if (target->state != TARGET_HALTED) {
1336 LOG_TARGET_WARNING(target, "target not halted");
1337 return ERROR_TARGET_NOT_HALTED;
1338 }
1339
1340 if (address && !current) {
1341 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1342 } else {
1343 uint32_t cause = xtensa_cause_get(target);
1344 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1345 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1346 if (cause & DEBUGCAUSE_DB)
1347 /* We stopped due to a watchpoint. We can't just resume executing the
1348 * instruction again because */
1349 /* that would trigger the watchpoint again. To fix this, we single-step,
1350 * which ignores watchpoints. */
1351 xtensa_do_step(target, current, address, handle_breakpoints);
1352 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1353 /* We stopped due to a break instruction. We can't just resume executing the
1354 * instruction again because */
1355 /* that would trigger the break again. To fix this, we single-step, which
1356 * ignores break. */
1357 xtensa_do_step(target, current, address, handle_breakpoints);
1358 }
1359
1360 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1361 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1362 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1363 if (xtensa->hw_brps[slot]) {
1364 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1365 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1366 bpena |= BIT(slot);
1367 }
1368 }
1369 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1370
1371 /* Here we write all registers to the targets */
1372 int res = xtensa_write_dirty_registers(target);
1373 if (res != ERROR_OK)
1374 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1375 return res;
1376 }
1377
1378 int xtensa_do_resume(struct target *target)
1379 {
1380 struct xtensa *xtensa = target_to_xtensa(target);
1381
1382 LOG_TARGET_DEBUG(target, "start");
1383
1384 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1385 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1386 if (res != ERROR_OK) {
1387 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1388 return res;
1389 }
1390 xtensa_core_status_check(target);
1391 return ERROR_OK;
1392 }
1393
1394 int xtensa_resume(struct target *target,
1395 int current,
1396 target_addr_t address,
1397 int handle_breakpoints,
1398 int debug_execution)
1399 {
1400 LOG_TARGET_DEBUG(target, "start");
1401 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1402 if (res != ERROR_OK) {
1403 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1404 return res;
1405 }
1406 res = xtensa_do_resume(target);
1407 if (res != ERROR_OK) {
1408 LOG_TARGET_ERROR(target, "Failed to resume!");
1409 return res;
1410 }
1411
1412 target->debug_reason = DBG_REASON_NOTHALTED;
1413 if (!debug_execution)
1414 target->state = TARGET_RUNNING;
1415 else
1416 target->state = TARGET_DEBUG_RUNNING;
1417
1418 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1419
1420 return ERROR_OK;
1421 }
1422
1423 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1424 {
1425 struct xtensa *xtensa = target_to_xtensa(target);
1426 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1427 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1428 if (err != ERROR_OK)
1429 return false;
1430
1431 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1432 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1433 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1434 return true;
1435
1436 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1437 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1438 return true;
1439
1440 return false;
1441 }
1442
1443 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1444 {
1445 struct xtensa *xtensa = target_to_xtensa(target);
1446 int res;
1447 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1448 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1449 xtensa_reg_val_t icountlvl, cause;
1450 xtensa_reg_val_t oldps, oldpc, cur_pc;
1451 bool ps_lowered = false;
1452
1453 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1454 current, address, handle_breakpoints);
1455
1456 if (target->state != TARGET_HALTED) {
1457 LOG_TARGET_WARNING(target, "target not halted");
1458 return ERROR_TARGET_NOT_HALTED;
1459 }
1460
1461 if (xtensa->eps_dbglevel_idx == 0) {
1462 LOG_ERROR("eps_dbglevel_idx not set\n");
1463 return ERROR_FAIL;
1464 }
1465
1466 /* Save old ps (EPS[dbglvl] on LX), pc */
1467 oldps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
1468 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1469
1470 cause = xtensa_cause_get(target);
1471 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1472 oldps,
1473 oldpc,
1474 cause,
1475 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1476 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1477 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1478 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1479 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1480 /* pretend that we have stepped */
1481 if (cause & DEBUGCAUSE_BI)
1482 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1483 else
1484 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1485 return ERROR_OK;
1486 }
1487
1488 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1489 * at which the instructions are to be counted while stepping.
1490 *
1491 * For example, if we need to step by 2 instructions, and an interrupt occurs
1492 * in between, the processor will trigger the interrupt and halt after the 2nd
1493 * instruction within the interrupt vector and/or handler.
1494 *
1495 * However, sometimes we don't want the interrupt handlers to be executed at all
1496 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1497 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1498 * code from being counted during stepping. Note that C exception handlers must
1499 * run at level 0 and hence will be counted and stepped into, should one occur.
1500 *
1501 * TODO: Certain instructions should never be single-stepped and should instead
1502 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1503 * RFI >= DBGLEVEL.
1504 */
1505 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1506 if (!xtensa->core_config->high_irq.enabled) {
1507 LOG_TARGET_WARNING(
1508 target,
1509 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1510 return ERROR_FAIL;
1511 }
1512 /* Update ICOUNTLEVEL accordingly */
1513 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1514 } else {
1515 icountlvl = xtensa->core_config->debug.irq_level;
1516 }
1517
1518 if (cause & DEBUGCAUSE_DB) {
1519 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1520 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1521 * re-enable the watchpoint. */
1522 LOG_TARGET_DEBUG(
1523 target,
1524 "Single-stepping to get past instruction that triggered the watchpoint...");
1525 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1526 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1527 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1528 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1529 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1530 }
1531 }
1532
1533 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1534 /* handle normal SW breakpoint */
1535 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1536 if ((oldps & 0xf) >= icountlvl) {
1537 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1538 ps_lowered = true;
1539 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1540 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1541 LOG_TARGET_DEBUG(target,
1542 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1543 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1544 newps,
1545 oldps);
1546 }
1547 do {
1548 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1549 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1550
1551 /* Now ICOUNT is set, we can resume as if we were going to run */
1552 res = xtensa_prepare_resume(target, current, address, 0, 0);
1553 if (res != ERROR_OK) {
1554 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1555 return res;
1556 }
1557 res = xtensa_do_resume(target);
1558 if (res != ERROR_OK) {
1559 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1560 return res;
1561 }
1562
1563 /* Wait for stepping to complete */
1564 long long start = timeval_ms();
1565 while (timeval_ms() < start + 500) {
1566 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1567 *until stepping is complete. */
1568 usleep(1000);
1569 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1570 if (res != ERROR_OK) {
1571 LOG_TARGET_ERROR(target, "Failed to read core status!");
1572 return res;
1573 }
1574 if (xtensa_is_stopped(target))
1575 break;
1576 usleep(1000);
1577 }
1578 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1579 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1580 if (!xtensa_is_stopped(target)) {
1581 LOG_TARGET_WARNING(
1582 target,
1583 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1584 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1585 target->debug_reason = DBG_REASON_NOTHALTED;
1586 target->state = TARGET_RUNNING;
1587 return ERROR_FAIL;
1588 }
1589
1590 xtensa_fetch_all_regs(target);
1591 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1592
1593 LOG_TARGET_DEBUG(target,
1594 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1595 xtensa_reg_get(target, XT_REG_IDX_PS),
1596 cur_pc,
1597 xtensa_cause_get(target),
1598 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1599
1600 /* Do not step into WindowOverflow if ISRs are masked.
1601 If we stop in WindowOverflow at breakpoint with masked ISRs and
1602 try to do a step it will get us out of that handler */
1603 if (xtensa->core_config->windowed &&
1604 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1605 xtensa_pc_in_winexc(target, cur_pc)) {
1606 /* isrmask = on, need to step out of the window exception handler */
1607 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1608 oldpc = cur_pc;
1609 address = oldpc + 3;
1610 continue;
1611 }
1612
1613 if (oldpc == cur_pc)
1614 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1615 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1616 else
1617 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1618 break;
1619 } while (true);
1620
1621 target->debug_reason = DBG_REASON_SINGLESTEP;
1622 target->state = TARGET_HALTED;
1623 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1624 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1625
1626 if (cause & DEBUGCAUSE_DB) {
1627 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1628 /* Restore the DBREAKCx registers */
1629 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1630 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1631 }
1632
1633 /* Restore int level */
1634 if (ps_lowered) {
1635 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1636 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1637 oldps);
1638 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1639 }
1640
1641 /* write ICOUNTLEVEL back to zero */
1642 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1643 /* TODO: can we skip writing dirty registers and re-fetching them? */
1644 res = xtensa_write_dirty_registers(target);
1645 xtensa_fetch_all_regs(target);
1646 return res;
1647 }
1648
1649 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1650 {
1651 return xtensa_do_step(target, current, address, handle_breakpoints);
1652 }
1653
1654 /**
1655 * Returns true if two ranges are overlapping
1656 */
1657 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1658 target_addr_t r1_end,
1659 target_addr_t r2_start,
1660 target_addr_t r2_end)
1661 {
1662 if ((r2_start >= r1_start) && (r2_start < r1_end))
1663 return true; /* r2_start is in r1 region */
1664 if ((r2_end > r1_start) && (r2_end <= r1_end))
1665 return true; /* r2_end is in r1 region */
1666 return false;
1667 }
1668
1669 /**
1670 * Returns a size of overlapped region of two ranges.
1671 */
1672 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1673 target_addr_t r1_end,
1674 target_addr_t r2_start,
1675 target_addr_t r2_end)
1676 {
1677 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1678 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1679 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1680 return ov_end - ov_start;
1681 }
1682 return 0;
1683 }
1684
1685 /**
1686 * Check if the address gets to memory regions, and its access mode
1687 */
1688 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1689 {
1690 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1691 target_addr_t adr_end = address + size; /* region end */
1692 target_addr_t overlap_size;
1693 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1694
1695 while (adr_pos < adr_end) {
1696 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1697 if (!cm) /* address is not belong to anything */
1698 return false;
1699 if ((cm->access & access) != access) /* access check */
1700 return false;
1701 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1702 assert(overlap_size != 0);
1703 adr_pos += overlap_size;
1704 }
1705 return true;
1706 }
1707
1708 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1709 {
1710 struct xtensa *xtensa = target_to_xtensa(target);
1711 /* We are going to read memory in 32-bit increments. This may not be what the calling
1712 * function expects, so we may need to allocate a temp buffer and read into that first. */
1713 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1714 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1715 target_addr_t adr = addrstart_al;
1716 uint8_t *albuff;
1717 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1718
1719 if (target->state != TARGET_HALTED) {
1720 LOG_TARGET_WARNING(target, "target not halted");
1721 return ERROR_TARGET_NOT_HALTED;
1722 }
1723
1724 if (!xtensa->permissive_mode) {
1725 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1726 XT_MEM_ACCESS_READ)) {
1727 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1728 return ERROR_FAIL;
1729 }
1730 }
1731
1732 if (addrstart_al == address && addrend_al == address + (size * count)) {
1733 albuff = buffer;
1734 } else {
1735 albuff = malloc(addrend_al - addrstart_al);
1736 if (!albuff) {
1737 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1738 addrend_al - addrstart_al);
1739 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1740 }
1741 }
1742
1743 /* We're going to use A3 here */
1744 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1745 /* Write start address to A3 */
1746 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1747 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1748 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1749 if (xtensa->probe_lsddr32p != 0) {
1750 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1751 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1752 xtensa_queue_dbg_reg_read(xtensa,
1753 (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
1754 &albuff[i]);
1755 } else {
1756 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1757 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1758 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1759 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
1760 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[i]);
1761 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1762 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1763 }
1764 }
1765 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1766 if (res == ERROR_OK) {
1767 bool prev_suppress = xtensa->suppress_dsr_errors;
1768 xtensa->suppress_dsr_errors = true;
1769 res = xtensa_core_status_check(target);
1770 if (xtensa->probe_lsddr32p == -1)
1771 xtensa->probe_lsddr32p = 1;
1772 xtensa->suppress_dsr_errors = prev_suppress;
1773 }
1774 if (res != ERROR_OK) {
1775 if (xtensa->probe_lsddr32p != 0) {
1776 /* Disable fast memory access instructions and retry before reporting an error */
1777 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1778 xtensa->probe_lsddr32p = 0;
1779 res = xtensa_read_memory(target, address, size, count, buffer);
1780 bswap = false;
1781 } else {
1782 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
1783 count * size, address);
1784 }
1785 }
1786
1787 if (bswap)
1788 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
1789 if (albuff != buffer) {
1790 memcpy(buffer, albuff + (address & 3), (size * count));
1791 free(albuff);
1792 }
1793
1794 return res;
1795 }
1796
1797 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1798 {
1799 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1800 return xtensa_read_memory(target, address, 1, count, buffer);
1801 }
1802
1803 int xtensa_write_memory(struct target *target,
1804 target_addr_t address,
1805 uint32_t size,
1806 uint32_t count,
1807 const uint8_t *buffer)
1808 {
1809 /* This memory write function can get thrown nigh everything into it, from
1810 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1811 * accept anything but aligned uint32 writes, though. That is why we convert
1812 * everything into that. */
1813 struct xtensa *xtensa = target_to_xtensa(target);
1814 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1815 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1816 target_addr_t adr = addrstart_al;
1817 int res;
1818 uint8_t *albuff;
1819 bool fill_head_tail = false;
1820
1821 if (target->state != TARGET_HALTED) {
1822 LOG_TARGET_WARNING(target, "target not halted");
1823 return ERROR_TARGET_NOT_HALTED;
1824 }
1825
1826 if (!xtensa->permissive_mode) {
1827 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
1828 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1829 return ERROR_FAIL;
1830 }
1831 }
1832
1833 if (size == 0 || count == 0 || !buffer)
1834 return ERROR_COMMAND_SYNTAX_ERROR;
1835
1836 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1837 if (addrstart_al == address && addrend_al == address + (size * count)) {
1838 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1839 /* Need a buffer for byte-swapping */
1840 albuff = malloc(addrend_al - addrstart_al);
1841 else
1842 /* We discard the const here because albuff can also be non-const */
1843 albuff = (uint8_t *)buffer;
1844 } else {
1845 fill_head_tail = true;
1846 albuff = malloc(addrend_al - addrstart_al);
1847 }
1848 if (!albuff) {
1849 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1850 addrend_al - addrstart_al);
1851 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1852 }
1853
1854 /* We're going to use A3 here */
1855 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1856
1857 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1858 if (fill_head_tail) {
1859 /* See if we need to read the first and/or last word. */
1860 if (address & 3) {
1861 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1862 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1863 if (xtensa->probe_lsddr32p == 1) {
1864 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1865 } else {
1866 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1867 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1868 }
1869 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[0]);
1870 }
1871 if ((address + (size * count)) & 3) {
1872 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
1873 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1874 if (xtensa->probe_lsddr32p == 1) {
1875 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1876 } else {
1877 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1878 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1879 }
1880 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1881 &albuff[addrend_al - addrstart_al - 4]);
1882 }
1883 /* Grab bytes */
1884 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1885 if (res != ERROR_OK) {
1886 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1887 if (albuff != buffer)
1888 free(albuff);
1889 return res;
1890 }
1891 xtensa_core_status_check(target);
1892 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
1893 bool swapped_w0 = false;
1894 if (address & 3) {
1895 buf_bswap32(&albuff[0], &albuff[0], 4);
1896 swapped_w0 = true;
1897 }
1898 if ((address + (size * count)) & 3) {
1899 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
1900 /* Don't double-swap if buffer start/end are within the same word */
1901 } else {
1902 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
1903 &albuff[addrend_al - addrstart_al - 4], 4);
1904 }
1905 }
1906 }
1907 /* Copy data to be written into the aligned buffer (in host-endianness) */
1908 memcpy(&albuff[address & 3], buffer, size * count);
1909 /* Now we can write albuff in aligned uint32s. */
1910 }
1911
1912 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1913 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
1914
1915 /* Write start address to A3 */
1916 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
1917 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1918 /* Write the aligned buffer */
1919 if (xtensa->probe_lsddr32p != 0) {
1920 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1921 if (i == 0) {
1922 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
1923 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
1924 } else {
1925 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
1926 }
1927 }
1928 } else {
1929 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1930 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1931 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
1932 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
1933 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1934 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1935 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1936 }
1937 }
1938
1939 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1940 if (res == ERROR_OK) {
1941 bool prev_suppress = xtensa->suppress_dsr_errors;
1942 xtensa->suppress_dsr_errors = true;
1943 res = xtensa_core_status_check(target);
1944 if (xtensa->probe_lsddr32p == -1)
1945 xtensa->probe_lsddr32p = 1;
1946 xtensa->suppress_dsr_errors = prev_suppress;
1947 }
1948 if (res != ERROR_OK) {
1949 if (xtensa->probe_lsddr32p != 0) {
1950 /* Disable fast memory access instructions and retry before reporting an error */
1951 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1952 xtensa->probe_lsddr32p = 0;
1953 res = xtensa_write_memory(target, address, size, count, buffer);
1954 } else {
1955 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
1956 count * size, address);
1957 }
1958 } else {
1959 /* Invalidate ICACHE, writeback DCACHE if present */
1960 uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
1961 uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
1962 if (issue_ihi || issue_dhwb) {
1963 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
1964 uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
1965 uint32_t linesize = MIN(ilinesize, dlinesize);
1966 uint32_t off = 0;
1967 adr = addrstart_al;
1968
1969 while ((adr + off) < addrend_al) {
1970 if (off == 0) {
1971 /* Write start address to A3 */
1972 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr);
1973 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1974 }
1975 if (issue_ihi)
1976 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
1977 if (issue_dhwb)
1978 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
1979 off += linesize;
1980 if (off > 1020) {
1981 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
1982 adr += off;
1983 off = 0;
1984 }
1985 }
1986
1987 /* Execute cache WB/INV instructions */
1988 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1989 xtensa_core_status_check(target);
1990 if (res != ERROR_OK)
1991 LOG_TARGET_ERROR(target,
1992 "Error issuing cache writeback/invaldate instruction(s): %d",
1993 res);
1994 }
1995 }
1996 if (albuff != buffer)
1997 free(albuff);
1998
1999 return res;
2000 }
2001
2002 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2003 {
2004 /* xtensa_write_memory can handle everything. Just pass on to that. */
2005 return xtensa_write_memory(target, address, 1, count, buffer);
2006 }
2007
2008 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2009 {
2010 LOG_WARNING("not implemented yet");
2011 return ERROR_FAIL;
2012 }
2013
2014 int xtensa_poll(struct target *target)
2015 {
2016 struct xtensa *xtensa = target_to_xtensa(target);
2017 if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2018 target->state = TARGET_UNKNOWN;
2019 return ERROR_TARGET_NOT_EXAMINED;
2020 }
2021
2022 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET(xtensa) |
2023 PWRSTAT_COREWASRESET(xtensa));
2024 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2025 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2026 xtensa->dbg_mod.power_status.stat,
2027 PWRSTAT_DEBUGWASRESET(xtensa) | PWRSTAT_COREWASRESET(xtensa),
2028 xtensa->dbg_mod.power_status.stath);
2029 if (res != ERROR_OK)
2030 return res;
2031
2032 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2033 LOG_TARGET_INFO(target, "Debug controller was reset.");
2034 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2035 if (res != ERROR_OK)
2036 return res;
2037 }
2038 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2039 LOG_TARGET_INFO(target, "Core was reset.");
2040 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2041 /* Enable JTAG, set reset if needed */
2042 res = xtensa_wakeup(target);
2043 if (res != ERROR_OK)
2044 return res;
2045
2046 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2047 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2048 if (res != ERROR_OK)
2049 return res;
2050 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2051 LOG_TARGET_DEBUG(target,
2052 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2053 prev_dsr,
2054 xtensa->dbg_mod.core_status.dsr);
2055 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET(xtensa)) {
2056 /* if RESET state is persitent */
2057 target->state = TARGET_RESET;
2058 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2059 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2060 xtensa->dbg_mod.core_status.dsr,
2061 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2062 target->state = TARGET_UNKNOWN;
2063 if (xtensa->come_online_probes_num == 0)
2064 target->examined = false;
2065 else
2066 xtensa->come_online_probes_num--;
2067 } else if (xtensa_is_stopped(target)) {
2068 if (target->state != TARGET_HALTED) {
2069 enum target_state oldstate = target->state;
2070 target->state = TARGET_HALTED;
2071 /* Examine why the target has been halted */
2072 target->debug_reason = DBG_REASON_DBGRQ;
2073 xtensa_fetch_all_regs(target);
2074 /* When setting debug reason DEBUGCAUSE events have the following
2075 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2076 /* Watchpoint and breakpoint events at the same time results in special
2077 * debug reason: DBG_REASON_WPTANDBKPT. */
2078 uint32_t halt_cause = xtensa_cause_get(target);
2079 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2080 if (halt_cause & DEBUGCAUSE_IC)
2081 target->debug_reason = DBG_REASON_SINGLESTEP;
2082 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2083 if (halt_cause & DEBUGCAUSE_DB)
2084 target->debug_reason = DBG_REASON_WPTANDBKPT;
2085 else
2086 target->debug_reason = DBG_REASON_BREAKPOINT;
2087 } else if (halt_cause & DEBUGCAUSE_DB) {
2088 target->debug_reason = DBG_REASON_WATCHPOINT;
2089 }
2090 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2091 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2092 xtensa_reg_get(target, XT_REG_IDX_PC),
2093 target->debug_reason,
2094 oldstate);
2095 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2096 halt_cause,
2097 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2098 xtensa->dbg_mod.core_status.dsr);
2099 xtensa_dm_core_status_clear(
2100 &xtensa->dbg_mod,
2101 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2102 OCDDSR_DEBUGINTTRAX |
2103 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2104 }
2105 } else {
2106 target->debug_reason = DBG_REASON_NOTHALTED;
2107 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2108 target->state = TARGET_RUNNING;
2109 target->debug_reason = DBG_REASON_NOTHALTED;
2110 }
2111 }
2112 if (xtensa->trace_active) {
2113 /* Detect if tracing was active but has stopped. */
2114 struct xtensa_trace_status trace_status;
2115 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2116 if (res == ERROR_OK) {
2117 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2118 LOG_INFO("Detected end of trace.");
2119 if (trace_status.stat & TRAXSTAT_PCMTG)
2120 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2121 if (trace_status.stat & TRAXSTAT_PTITG)
2122 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2123 if (trace_status.stat & TRAXSTAT_CTITG)
2124 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2125 xtensa->trace_active = false;
2126 }
2127 }
2128 }
2129 return ERROR_OK;
2130 }
2131
2132 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2133 {
2134 struct xtensa *xtensa = target_to_xtensa(target);
2135 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2136 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2137 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2138 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2139 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2140 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2141 int ret;
2142
2143 if (size > icache_line_size)
2144 return ERROR_FAIL;
2145
2146 if (issue_ihi || issue_dhwbi) {
2147 /* We're going to use A3 here */
2148 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2149
2150 /* Write start address to A3 and invalidate */
2151 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2152 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2153 LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2154 if (issue_dhwbi) {
2155 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2156 if (!same_dc_line) {
2157 LOG_TARGET_DEBUG(target,
2158 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2159 address + 4);
2160 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2161 }
2162 }
2163 if (issue_ihi) {
2164 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2165 if (!same_ic_line) {
2166 LOG_TARGET_DEBUG(target,
2167 "IHI second icache line for address "TARGET_ADDR_FMT,
2168 address + 4);
2169 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2170 }
2171 }
2172
2173 /* Execute invalidate instructions */
2174 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2175 xtensa_core_status_check(target);
2176 if (ret != ERROR_OK) {
2177 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2178 return ret;
2179 }
2180 }
2181
2182 /* Write new instructions to memory */
2183 ret = target_write_buffer(target, address, size, buffer);
2184 if (ret != ERROR_OK) {
2185 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2186 return ret;
2187 }
2188
2189 if (issue_dhwbi) {
2190 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2191 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2192 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2193 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2194 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2195 if (!same_dc_line) {
2196 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2197 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2198 }
2199
2200 /* Execute invalidate instructions */
2201 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2202 xtensa_core_status_check(target);
2203 }
2204
2205 /* TODO: Handle L2 cache if present */
2206 return ret;
2207 }
2208
2209 static int xtensa_sw_breakpoint_add(struct target *target,
2210 struct breakpoint *breakpoint,
2211 struct xtensa_sw_breakpoint *sw_bp)
2212 {
2213 struct xtensa *xtensa = target_to_xtensa(target);
2214 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2215 if (ret != ERROR_OK) {
2216 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2217 return ret;
2218 }
2219
2220 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2221 sw_bp->oocd_bp = breakpoint;
2222
2223 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2224
2225 /* Underlying memory write will convert instruction endianness, don't do that here */
2226 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2227 if (ret != ERROR_OK) {
2228 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2229 return ret;
2230 }
2231
2232 return ERROR_OK;
2233 }
2234
2235 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2236 {
2237 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2238 if (ret != ERROR_OK) {
2239 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2240 return ret;
2241 }
2242 sw_bp->oocd_bp = NULL;
2243 return ERROR_OK;
2244 }
2245
2246 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2247 {
2248 struct xtensa *xtensa = target_to_xtensa(target);
2249 unsigned int slot;
2250
2251 if (breakpoint->type == BKPT_SOFT) {
2252 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2253 if (!xtensa->sw_brps[slot].oocd_bp ||
2254 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2255 break;
2256 }
2257 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2258 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2259 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2260 }
2261 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2262 if (ret != ERROR_OK) {
2263 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2264 return ret;
2265 }
2266 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2267 slot,
2268 breakpoint->address);
2269 return ERROR_OK;
2270 }
2271
2272 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2273 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2274 break;
2275 }
2276 if (slot == xtensa->core_config->debug.ibreaks_num) {
2277 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2278 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2279 }
2280
2281 xtensa->hw_brps[slot] = breakpoint;
2282 /* We will actually write the breakpoints when we resume the target. */
2283 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2284 slot,
2285 breakpoint->address);
2286
2287 return ERROR_OK;
2288 }
2289
2290 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2291 {
2292 struct xtensa *xtensa = target_to_xtensa(target);
2293 unsigned int slot;
2294
2295 if (breakpoint->type == BKPT_SOFT) {
2296 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2297 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2298 break;
2299 }
2300 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2301 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2302 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2303 }
2304 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2305 if (ret != ERROR_OK) {
2306 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2307 return ret;
2308 }
2309 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2310 return ERROR_OK;
2311 }
2312
2313 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2314 if (xtensa->hw_brps[slot] == breakpoint)
2315 break;
2316 }
2317 if (slot == xtensa->core_config->debug.ibreaks_num) {
2318 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2319 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2320 }
2321 xtensa->hw_brps[slot] = NULL;
2322 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2323 return ERROR_OK;
2324 }
2325
2326 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2327 {
2328 struct xtensa *xtensa = target_to_xtensa(target);
2329 unsigned int slot;
2330 xtensa_reg_val_t dbreakcval;
2331
2332 if (target->state != TARGET_HALTED) {
2333 LOG_TARGET_WARNING(target, "target not halted");
2334 return ERROR_TARGET_NOT_HALTED;
2335 }
2336
2337 if (watchpoint->mask != ~(uint32_t)0) {
2338 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2339 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2340 }
2341
2342 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2343 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2344 break;
2345 }
2346 if (slot == xtensa->core_config->debug.dbreaks_num) {
2347 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2348 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2349 }
2350
2351 /* Figure out value for dbreakc5..0
2352 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2353 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2354 !IS_PWR_OF_2(watchpoint->length) ||
2355 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2356 LOG_TARGET_WARNING(
2357 target,
2358 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2359 " not supported by hardware.",
2360 watchpoint->length,
2361 watchpoint->address);
2362 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2363 }
2364 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2365
2366 if (watchpoint->rw == WPT_READ)
2367 dbreakcval |= BIT(30);
2368 if (watchpoint->rw == WPT_WRITE)
2369 dbreakcval |= BIT(31);
2370 if (watchpoint->rw == WPT_ACCESS)
2371 dbreakcval |= BIT(30) | BIT(31);
2372
2373 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2374 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2375 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2376 xtensa->hw_wps[slot] = watchpoint;
2377 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2378 watchpoint->address);
2379 return ERROR_OK;
2380 }
2381
2382 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2383 {
2384 struct xtensa *xtensa = target_to_xtensa(target);
2385 unsigned int slot;
2386
2387 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2388 if (xtensa->hw_wps[slot] == watchpoint)
2389 break;
2390 }
2391 if (slot == xtensa->core_config->debug.dbreaks_num) {
2392 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2393 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2394 }
2395 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2396 xtensa->hw_wps[slot] = NULL;
2397 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2398 watchpoint->address);
2399 return ERROR_OK;
2400 }
2401
2402 static int xtensa_build_reg_cache(struct target *target)
2403 {
2404 struct xtensa *xtensa = target_to_xtensa(target);
2405 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2406 unsigned int last_dbreg_num = 0;
2407
2408 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2409 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2410 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2411
2412 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2413
2414 if (!reg_cache) {
2415 LOG_ERROR("Failed to alloc reg cache!");
2416 return ERROR_FAIL;
2417 }
2418 reg_cache->name = "Xtensa registers";
2419 reg_cache->next = NULL;
2420 /* Init reglist */
2421 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2422 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2423 if (!reg_list) {
2424 LOG_ERROR("Failed to alloc reg list!");
2425 goto fail;
2426 }
2427 xtensa->dbregs_num = 0;
2428 unsigned int didx = 0;
2429 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2430 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2431 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2432 for (unsigned int i = 0; i < listsize; i++, didx++) {
2433 reg_list[didx].exist = rlist[i].exist;
2434 reg_list[didx].name = rlist[i].name;
2435 reg_list[didx].size = 32;
2436 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2437 if (!reg_list[didx].value) {
2438 LOG_ERROR("Failed to alloc reg list value!");
2439 goto fail;
2440 }
2441 reg_list[didx].dirty = false;
2442 reg_list[didx].valid = false;
2443 reg_list[didx].type = &xtensa_reg_type;
2444 reg_list[didx].arch_info = xtensa;
2445 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2446 last_dbreg_num = rlist[i].dbreg_num;
2447
2448 if (xtensa_extra_debug_log) {
2449 LOG_TARGET_DEBUG(target,
2450 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2451 reg_list[didx].name,
2452 whichlist,
2453 reg_list[didx].exist,
2454 didx,
2455 rlist[i].type,
2456 rlist[i].dbreg_num);
2457 }
2458 }
2459 }
2460
2461 xtensa->dbregs_num = last_dbreg_num + 1;
2462 reg_cache->reg_list = reg_list;
2463 reg_cache->num_regs = reg_list_size;
2464
2465 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2466 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2467
2468 /* Construct empty-register list for handling unknown register requests */
2469 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2470 if (!xtensa->empty_regs) {
2471 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2472 goto fail;
2473 }
2474 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2475 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2476 if (!xtensa->empty_regs[i].name) {
2477 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2478 goto fail;
2479 }
2480 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
2481 xtensa->empty_regs[i].size = 32;
2482 xtensa->empty_regs[i].type = &xtensa_reg_type;
2483 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2484 if (!xtensa->empty_regs[i].value) {
2485 LOG_ERROR("Failed to alloc empty reg list value!");
2486 goto fail;
2487 }
2488 xtensa->empty_regs[i].arch_info = xtensa;
2489 }
2490
2491 /* Construct contiguous register list from contiguous descriptor list */
2492 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
2493 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2494 if (!xtensa->contiguous_regs_list) {
2495 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2496 goto fail;
2497 }
2498 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2499 unsigned int j;
2500 for (j = 0; j < reg_cache->num_regs; j++) {
2501 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2502 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
2503 LOG_TARGET_DEBUG(target,
2504 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2505 xtensa->contiguous_regs_list[i]->name,
2506 xtensa->contiguous_regs_desc[i]->dbreg_num);
2507 break;
2508 }
2509 }
2510 if (j == reg_cache->num_regs)
2511 LOG_TARGET_WARNING(target, "contiguous register %s not found",
2512 xtensa->contiguous_regs_desc[i]->name);
2513 }
2514 }
2515
2516 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2517 if (!xtensa->algo_context_backup) {
2518 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2519 goto fail;
2520 }
2521 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2522 struct reg *reg = &reg_cache->reg_list[i];
2523 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2524 if (!xtensa->algo_context_backup[i]) {
2525 LOG_ERROR("Failed to alloc mem for algorithm context!");
2526 goto fail;
2527 }
2528 }
2529 xtensa->core_cache = reg_cache;
2530 if (cache_p)
2531 *cache_p = reg_cache;
2532 return ERROR_OK;
2533
2534 fail:
2535 if (reg_list) {
2536 for (unsigned int i = 0; i < reg_list_size; i++)
2537 free(reg_list[i].value);
2538 free(reg_list);
2539 }
2540 if (xtensa->empty_regs) {
2541 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2542 free((void *)xtensa->empty_regs[i].name);
2543 free(xtensa->empty_regs[i].value);
2544 }
2545 free(xtensa->empty_regs);
2546 }
2547 if (xtensa->algo_context_backup) {
2548 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2549 free(xtensa->algo_context_backup[i]);
2550 free(xtensa->algo_context_backup);
2551 }
2552 free(reg_cache);
2553
2554 return ERROR_FAIL;
2555 }
2556
2557 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
2558 {
2559 struct xtensa *xtensa = target_to_xtensa(target);
2560 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
2561 /* Process op[] list */
2562 while (opstr && (*opstr == ':')) {
2563 uint8_t ops[32];
2564 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
2565 if (oplen > 32) {
2566 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
2567 break;
2568 }
2569 unsigned int i = 0;
2570 while ((i < oplen) && opstr && (*opstr == ':'))
2571 ops[i++] = strtoul(opstr + 1, &opstr, 16);
2572 if (i != oplen) {
2573 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
2574 break;
2575 }
2576
2577 char insn_buf[128];
2578 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
2579 for (i = 0; i < oplen; i++)
2580 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
2581 LOG_TARGET_DEBUG(target, "%s", insn_buf);
2582 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
2583 status = ERROR_OK;
2584 }
2585 return status;
2586 }
2587
2588 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
2589 {
2590 struct xtensa *xtensa = target_to_xtensa(target);
2591 bool iswrite = (packet[0] == 'Q');
2592 enum xtensa_qerr_e error;
2593
2594 /* Read/write TIE register. Requires spill location.
2595 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2596 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2597 */
2598 if (!(xtensa->spill_buf)) {
2599 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2600 error = XT_QERR_FAIL;
2601 goto xtensa_gdbqc_qxtreg_fail;
2602 }
2603
2604 char *delim;
2605 uint32_t regnum = strtoul(packet + 6, &delim, 16);
2606 if (*delim != ':') {
2607 LOG_ERROR("Malformed qxtreg packet");
2608 error = XT_QERR_INVAL;
2609 goto xtensa_gdbqc_qxtreg_fail;
2610 }
2611 uint32_t reglen = strtoul(delim + 1, &delim, 16);
2612 if (*delim != ':') {
2613 LOG_ERROR("Malformed qxtreg packet");
2614 error = XT_QERR_INVAL;
2615 goto xtensa_gdbqc_qxtreg_fail;
2616 }
2617 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
2618 memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
2619 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
2620 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
2621 LOG_ERROR("TIE register too large");
2622 error = XT_QERR_MEM;
2623 goto xtensa_gdbqc_qxtreg_fail;
2624 }
2625
2626 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2627 * (2) read old a4, (3) write spill address to a4.
2628 * NOTE: ensure a4 is restored properly by all error handling logic
2629 */
2630 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
2631 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
2632 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2633 if (status != ERROR_OK) {
2634 LOG_ERROR("Spill memory save");
2635 error = XT_QERR_MEM;
2636 goto xtensa_gdbqc_qxtreg_fail;
2637 }
2638 if (iswrite) {
2639 /* Extract value and store in spill memory */
2640 unsigned int b = 0;
2641 char *valbuf = strchr(delim, '=');
2642 if (!(valbuf && (*valbuf == '='))) {
2643 LOG_ERROR("Malformed Qxtreg packet");
2644 error = XT_QERR_INVAL;
2645 goto xtensa_gdbqc_qxtreg_fail;
2646 }
2647 valbuf++;
2648 while (*valbuf && *(valbuf + 1)) {
2649 char bytestr[3] = { 0, 0, 0 };
2650 strncpy(bytestr, valbuf, 2);
2651 regbuf[b++] = strtoul(bytestr, NULL, 16);
2652 valbuf += 2;
2653 }
2654 if (b != reglen) {
2655 LOG_ERROR("Malformed Qxtreg packet");
2656 error = XT_QERR_INVAL;
2657 goto xtensa_gdbqc_qxtreg_fail;
2658 }
2659 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2660 reglen / memop_size, regbuf);
2661 if (status != ERROR_OK) {
2662 LOG_ERROR("TIE value store");
2663 error = XT_QERR_MEM;
2664 goto xtensa_gdbqc_qxtreg_fail;
2665 }
2666 }
2667 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
2668 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, xtensa->spill_loc);
2669 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2670
2671 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
2672
2673 /* Restore a4 but not yet spill memory. Execute it all... */
2674 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, orig_a4);
2675 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2676 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2677 if (status != ERROR_OK) {
2678 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
2679 tieop_status = status;
2680 }
2681 status = xtensa_core_status_check(target);
2682 if (status != ERROR_OK) {
2683 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
2684 tieop_status = status;
2685 }
2686
2687 if (tieop_status == ERROR_OK) {
2688 if (iswrite) {
2689 /* TIE write succeeded; send OK */
2690 strcpy(*response_p, "OK");
2691 } else {
2692 /* TIE read succeeded; copy result from spill memory */
2693 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
2694 if (status != ERROR_OK) {
2695 LOG_TARGET_ERROR(target, "TIE result read");
2696 tieop_status = status;
2697 }
2698 unsigned int i;
2699 for (i = 0; i < reglen; i++)
2700 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
2701 *(*response_p + 2 * i) = '\0';
2702 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
2703 }
2704 }
2705
2706 /* Restore spill memory first, then report any previous errors */
2707 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2708 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2709 if (status != ERROR_OK) {
2710 LOG_ERROR("Spill memory restore");
2711 error = XT_QERR_MEM;
2712 goto xtensa_gdbqc_qxtreg_fail;
2713 }
2714 if (tieop_status != ERROR_OK) {
2715 LOG_ERROR("TIE execution");
2716 error = XT_QERR_FAIL;
2717 goto xtensa_gdbqc_qxtreg_fail;
2718 }
2719 return ERROR_OK;
2720
2721 xtensa_gdbqc_qxtreg_fail:
2722 strcpy(*response_p, xt_qerr[error].chrval);
2723 return xt_qerr[error].intval;
2724 }
2725
2726 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
2727 {
2728 struct xtensa *xtensa = target_to_xtensa(target);
2729 enum xtensa_qerr_e error;
2730 if (!packet || !response_p) {
2731 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
2732 return ERROR_FAIL;
2733 }
2734
2735 *response_p = xtensa->qpkt_resp;
2736 if (strncmp(packet, "qxtn", 4) == 0) {
2737 strcpy(*response_p, "OpenOCD");
2738 return ERROR_OK;
2739 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
2740 return ERROR_OK;
2741 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
2742 /* Confirm host cache params match core .cfg file */
2743 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
2744 &xtensa->core_config->icache : &xtensa->core_config->dcache;
2745 unsigned int line_size = 0, size = 0, way_count = 0;
2746 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
2747 if ((cachep->line_size != line_size) ||
2748 (cachep->size != size) ||
2749 (cachep->way_count != way_count)) {
2750 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2751 cachep == &xtensa->core_config->icache ? 'I' : 'D');
2752 }
2753 strcpy(*response_p, "OK");
2754 return ERROR_OK;
2755 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
2756 /* Confirm host IRAM/IROM params match core .cfg file */
2757 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
2758 &xtensa->core_config->iram : &xtensa->core_config->irom;
2759 unsigned int base = 0, size = 0, i;
2760 char *pkt = (char *)&packet[7];
2761 do {
2762 pkt++;
2763 size = strtoul(pkt, &pkt, 16);
2764 pkt++;
2765 base = strtoul(pkt, &pkt, 16);
2766 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
2767 for (i = 0; i < memp->count; i++) {
2768 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
2769 break;
2770 }
2771 if (i == memp->count) {
2772 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
2773 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
2774 break;
2775 }
2776 for (i = 0; i < 11; i++) {
2777 pkt++;
2778 strtoul(pkt, &pkt, 16);
2779 }
2780 } while (pkt && (pkt[0] == ','));
2781 strcpy(*response_p, "OK");
2782 return ERROR_OK;
2783 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
2784 /* Confirm host EXCM_LEVEL matches core .cfg file */
2785 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
2786 if (!xtensa->core_config->high_irq.enabled ||
2787 (excm_level != xtensa->core_config->high_irq.excm_level))
2788 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
2789 strcpy(*response_p, "OK");
2790 return ERROR_OK;
2791 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
2792 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
2793 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
2794 strcpy(*response_p, "OK");
2795 return ERROR_OK;
2796 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
2797 char *delim;
2798 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
2799 if (*delim != ':') {
2800 LOG_ERROR("Malformed Qxtspill packet");
2801 error = XT_QERR_INVAL;
2802 goto xtensa_gdb_query_custom_fail;
2803 }
2804 xtensa->spill_loc = spill_loc;
2805 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
2806 if (xtensa->spill_buf)
2807 free(xtensa->spill_buf);
2808 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
2809 if (!xtensa->spill_buf) {
2810 LOG_ERROR("Spill buf alloc");
2811 error = XT_QERR_MEM;
2812 goto xtensa_gdb_query_custom_fail;
2813 }
2814 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
2815 strcpy(*response_p, "OK");
2816 return ERROR_OK;
2817 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
2818 return xtensa_gdbqc_qxtreg(target, packet, response_p);
2819 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
2820 (strncmp(packet, "qxtftie", 7) == 0) ||
2821 (strncmp(packet, "qxtstie", 7) == 0)) {
2822 /* Return empty string to indicate trace, TIE wire debug are unsupported */
2823 strcpy(*response_p, "");
2824 return ERROR_OK;
2825 }
2826
2827 /* Warn for all other queries, but do not return errors */
2828 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
2829 strcpy(*response_p, "");
2830 return ERROR_OK;
2831
2832 xtensa_gdb_query_custom_fail:
2833 strcpy(*response_p, xt_qerr[error].chrval);
2834 return xt_qerr[error].intval;
2835 }
2836
2837 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
2838 const struct xtensa_debug_module_config *dm_cfg)
2839 {
2840 target->arch_info = xtensa;
2841 xtensa->common_magic = XTENSA_COMMON_MAGIC;
2842 xtensa->target = target;
2843 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
2844
2845 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
2846 if (!xtensa->core_config) {
2847 LOG_ERROR("Xtensa configuration alloc failed\n");
2848 return ERROR_FAIL;
2849 }
2850
2851 /* Default cache settings are disabled with 1 way */
2852 xtensa->core_config->icache.way_count = 1;
2853 xtensa->core_config->dcache.way_count = 1;
2854
2855 /* chrval: AR3/AR4 register names will change with window mapping.
2856 * intval: tracks whether scratch register was set through gdb P packet.
2857 */
2858 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
2859 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
2860 if (!xtensa->scratch_ars[s].chrval) {
2861 for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
2862 free(xtensa->scratch_ars[f].chrval);
2863 free(xtensa->core_config);
2864 LOG_ERROR("Xtensa scratch AR alloc failed\n");
2865 return ERROR_FAIL;
2866 }
2867 xtensa->scratch_ars[s].intval = false;
2868 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
2869 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
2870 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
2871 }
2872
2873 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2874 }
2875
2876 void xtensa_set_permissive_mode(struct target *target, bool state)
2877 {
2878 target_to_xtensa(target)->permissive_mode = state;
2879 }
2880
2881 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2882 {
2883 struct xtensa *xtensa = target_to_xtensa(target);
2884
2885 xtensa->come_online_probes_num = 3;
2886 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
2887 if (!xtensa->hw_brps) {
2888 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2889 return ERROR_FAIL;
2890 }
2891 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
2892 if (!xtensa->hw_wps) {
2893 free(xtensa->hw_brps);
2894 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2895 return ERROR_FAIL;
2896 }
2897 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2898 if (!xtensa->sw_brps) {
2899 free(xtensa->hw_brps);
2900 free(xtensa->hw_wps);
2901 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2902 return ERROR_FAIL;
2903 }
2904
2905 xtensa->spill_loc = 0xffffffff;
2906 xtensa->spill_bytes = 0;
2907 xtensa->spill_buf = NULL;
2908 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
2909
2910 return xtensa_build_reg_cache(target);
2911 }
2912
2913 static void xtensa_free_reg_cache(struct target *target)
2914 {
2915 struct xtensa *xtensa = target_to_xtensa(target);
2916 struct reg_cache *cache = xtensa->core_cache;
2917
2918 if (cache) {
2919 register_unlink_cache(&target->reg_cache, cache);
2920 for (unsigned int i = 0; i < cache->num_regs; i++) {
2921 free(xtensa->algo_context_backup[i]);
2922 free(cache->reg_list[i].value);
2923 }
2924 free(xtensa->algo_context_backup);
2925 free(cache->reg_list);
2926 free(cache);
2927 }
2928 xtensa->core_cache = NULL;
2929 xtensa->algo_context_backup = NULL;
2930
2931 if (xtensa->empty_regs) {
2932 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2933 free((void *)xtensa->empty_regs[i].name);
2934 free(xtensa->empty_regs[i].value);
2935 }
2936 free(xtensa->empty_regs);
2937 }
2938 xtensa->empty_regs = NULL;
2939 if (xtensa->optregs) {
2940 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
2941 free((void *)xtensa->optregs[i].name);
2942 free(xtensa->optregs);
2943 }
2944 xtensa->optregs = NULL;
2945 }
2946
2947 void xtensa_target_deinit(struct target *target)
2948 {
2949 struct xtensa *xtensa = target_to_xtensa(target);
2950
2951 LOG_DEBUG("start");
2952
2953 if (target_was_examined(target)) {
2954 int ret = xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, OCDDCR_ENABLEOCD);
2955 if (ret != ERROR_OK) {
2956 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2957 return;
2958 }
2959 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
2960 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2961 if (ret != ERROR_OK) {
2962 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2963 return;
2964 }
2965 xtensa_dm_deinit(&xtensa->dbg_mod);
2966 }
2967 xtensa_free_reg_cache(target);
2968 free(xtensa->hw_brps);
2969 free(xtensa->hw_wps);
2970 free(xtensa->sw_brps);
2971 if (xtensa->spill_buf) {
2972 free(xtensa->spill_buf);
2973 xtensa->spill_buf = NULL;
2974 }
2975 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
2976 free(xtensa->scratch_ars[s].chrval);
2977 free(xtensa->core_config);
2978 }
2979
2980 const char *xtensa_get_gdb_arch(struct target *target)
2981 {
2982 return "xtensa";
2983 }
2984
2985 /* exe <ascii-encoded hexadecimal instruction bytes> */
2986 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
2987 {
2988 struct xtensa *xtensa = target_to_xtensa(target);
2989
2990 if (CMD_ARGC != 1)
2991 return ERROR_COMMAND_SYNTAX_ERROR;
2992
2993 /* Process ascii-encoded hex byte string */
2994 const char *parm = CMD_ARGV[0];
2995 unsigned int parm_len = strlen(parm);
2996 if ((parm_len >= 64) || (parm_len & 1)) {
2997 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
2998 return ERROR_FAIL;
2999 }
3000
3001 uint8_t ops[32];
3002 memset(ops, 0, 32);
3003 unsigned int oplen = parm_len / 2;
3004 char encoded_byte[3] = { 0, 0, 0 };
3005 for (unsigned int i = 0; i < oplen; i++) {
3006 encoded_byte[0] = *parm++;
3007 encoded_byte[1] = *parm++;
3008 ops[i] = strtoul(encoded_byte, NULL, 16);
3009 }
3010
3011 /* GDB must handle state save/restore.
3012 * Flush reg cache in case spill location is in an AR
3013 * Update CPENABLE only for this execution; later restore cached copy
3014 * Keep a copy of exccause in case executed code triggers an exception
3015 */
3016 int status = xtensa_write_dirty_registers(target);
3017 if (status != ERROR_OK) {
3018 LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3019 return ERROR_FAIL;
3020 }
3021 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3022 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3023 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3024 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
3025 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3026 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3027 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3028 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
3029 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3030
3031 /* Queue instruction list and execute everything */
3032 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3033 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3034 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3035 if (status != ERROR_OK)
3036 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3037 status = xtensa_core_status_check(target);
3038 if (status != ERROR_OK)
3039 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3040
3041 /* Reread register cache and restore saved regs after instruction execution */
3042 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3043 LOG_TARGET_ERROR(target, "%s: Failed to fetch register cache (post-exec).", target_name(target));
3044 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3045 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3046 return status;
3047 }
3048
3049 COMMAND_HANDLER(xtensa_cmd_exe)
3050 {
3051 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3052 }
3053
3054 /* xtdef <name> */
3055 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3056 {
3057 if (CMD_ARGC != 1)
3058 return ERROR_COMMAND_SYNTAX_ERROR;
3059
3060 const char *core_name = CMD_ARGV[0];
3061 if (strcasecmp(core_name, "LX") == 0) {
3062 xtensa->core_config->core_type = XT_LX;
3063 } else {
3064 LOG_ERROR("xtdef [LX]\n");
3065 return ERROR_COMMAND_SYNTAX_ERROR;
3066 }
3067 return ERROR_OK;
3068 }
3069
3070 COMMAND_HANDLER(xtensa_cmd_xtdef)
3071 {
3072 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3073 target_to_xtensa(get_current_target(CMD_CTX)));
3074 }
3075
3076 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3077 {
3078 if ((val < min) || (val > max)) {
3079 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3080 return false;
3081 }
3082 return true;
3083 }
3084
3085 /* xtopt <name> <value> */
3086 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3087 {
3088 if (CMD_ARGC != 2)
3089 return ERROR_COMMAND_SYNTAX_ERROR;
3090
3091 const char *opt_name = CMD_ARGV[0];
3092 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3093 if (strcasecmp(opt_name, "arnum") == 0) {
3094 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3095 return ERROR_COMMAND_ARGUMENT_INVALID;
3096 xtensa->core_config->aregs_num = opt_val;
3097 } else if (strcasecmp(opt_name, "windowed") == 0) {
3098 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3099 return ERROR_COMMAND_ARGUMENT_INVALID;
3100 xtensa->core_config->windowed = opt_val;
3101 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3102 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3103 return ERROR_COMMAND_ARGUMENT_INVALID;
3104 xtensa->core_config->coproc = opt_val;
3105 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3106 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3107 return ERROR_COMMAND_ARGUMENT_INVALID;
3108 xtensa->core_config->exceptions = opt_val;
3109 } else if (strcasecmp(opt_name, "intnum") == 0) {
3110 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3111 return ERROR_COMMAND_ARGUMENT_INVALID;
3112 xtensa->core_config->irq.enabled = (opt_val > 0);
3113 xtensa->core_config->irq.irq_num = opt_val;
3114 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3115 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3116 return ERROR_COMMAND_ARGUMENT_INVALID;
3117 xtensa->core_config->high_irq.enabled = opt_val;
3118 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3119 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3120 return ERROR_COMMAND_ARGUMENT_INVALID;
3121 if (!xtensa->core_config->high_irq.enabled) {
3122 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3123 return ERROR_COMMAND_ARGUMENT_INVALID;
3124 }
3125 xtensa->core_config->high_irq.excm_level = opt_val;
3126 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3127 if (xtensa->core_config->core_type == XT_LX) {
3128 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3129 return ERROR_COMMAND_ARGUMENT_INVALID;
3130 } else {
3131 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3132 return ERROR_COMMAND_ARGUMENT_INVALID;
3133 }
3134 if (!xtensa->core_config->high_irq.enabled) {
3135 LOG_ERROR("xtopt intlevels requires hipriints\n");
3136 return ERROR_COMMAND_ARGUMENT_INVALID;
3137 }
3138 xtensa->core_config->high_irq.level_num = opt_val;
3139 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3140 if (xtensa->core_config->core_type == XT_LX) {
3141 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3142 return ERROR_COMMAND_ARGUMENT_INVALID;
3143 } else {
3144 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3145 return ERROR_COMMAND_ARGUMENT_INVALID;
3146 }
3147 xtensa->core_config->debug.enabled = 1;
3148 xtensa->core_config->debug.irq_level = opt_val;
3149 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3150 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3151 return ERROR_COMMAND_ARGUMENT_INVALID;
3152 xtensa->core_config->debug.ibreaks_num = opt_val;
3153 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3154 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3155 return ERROR_COMMAND_ARGUMENT_INVALID;
3156 xtensa->core_config->debug.dbreaks_num = opt_val;
3157 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3158 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3159 return ERROR_COMMAND_ARGUMENT_INVALID;
3160 xtensa->core_config->trace.mem_sz = opt_val;
3161 xtensa->core_config->trace.enabled = (opt_val > 0);
3162 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3163 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3164 return ERROR_COMMAND_ARGUMENT_INVALID;
3165 xtensa->core_config->trace.reversed_mem_access = opt_val;
3166 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3167 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3168 return ERROR_COMMAND_ARGUMENT_INVALID;
3169 xtensa->core_config->debug.perfcount_num = opt_val;
3170 } else {
3171 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3172 return ERROR_OK;
3173 }
3174
3175 return ERROR_OK;
3176 }
3177
3178 COMMAND_HANDLER(xtensa_cmd_xtopt)
3179 {
3180 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3181 target_to_xtensa(get_current_target(CMD_CTX)));
3182 }
3183
3184 /* xtmem <type> [parameters] */
3185 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3186 {
3187 struct xtensa_cache_config *cachep = NULL;
3188 struct xtensa_local_mem_config *memp = NULL;
3189 int mem_access = 0;
3190 bool is_dcache = false;
3191
3192 if (CMD_ARGC == 0) {
3193 LOG_ERROR("xtmem <type> [parameters]\n");
3194 return ERROR_COMMAND_SYNTAX_ERROR;
3195 }
3196
3197 const char *mem_name = CMD_ARGV[0];
3198 if (strcasecmp(mem_name, "icache") == 0) {
3199 cachep = &xtensa->core_config->icache;
3200 } else if (strcasecmp(mem_name, "dcache") == 0) {
3201 cachep = &xtensa->core_config->dcache;
3202 is_dcache = true;
3203 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3204 /* TODO: support L2 cache */
3205 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3206 /* TODO: support L2 cache */
3207 } else if (strcasecmp(mem_name, "iram") == 0) {
3208 memp = &xtensa->core_config->iram;
3209 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3210 } else if (strcasecmp(mem_name, "dram") == 0) {
3211 memp = &xtensa->core_config->dram;
3212 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3213 } else if (strcasecmp(mem_name, "sram") == 0) {
3214 memp = &xtensa->core_config->sram;
3215 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3216 } else if (strcasecmp(mem_name, "irom") == 0) {
3217 memp = &xtensa->core_config->irom;
3218 mem_access = XT_MEM_ACCESS_READ;
3219 } else if (strcasecmp(mem_name, "drom") == 0) {
3220 memp = &xtensa->core_config->drom;
3221 mem_access = XT_MEM_ACCESS_READ;
3222 } else if (strcasecmp(mem_name, "srom") == 0) {
3223 memp = &xtensa->core_config->srom;
3224 mem_access = XT_MEM_ACCESS_READ;
3225 } else {
3226 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3227 return ERROR_COMMAND_ARGUMENT_INVALID;
3228 }
3229
3230 if (cachep) {
3231 if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3232 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3233 return ERROR_COMMAND_SYNTAX_ERROR;
3234 }
3235 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3236 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3237 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3238 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3239 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3240 } else if (memp) {
3241 if (CMD_ARGC != 3) {
3242 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3243 return ERROR_COMMAND_SYNTAX_ERROR;
3244 }
3245 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3246 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3247 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3248 memcfgp->access = mem_access;
3249 memp->count++;
3250 }
3251
3252 return ERROR_OK;
3253 }
3254
3255 COMMAND_HANDLER(xtensa_cmd_xtmem)
3256 {
3257 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3258 target_to_xtensa(get_current_target(CMD_CTX)));
3259 }
3260
3261 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3262 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3263 {
3264 if (CMD_ARGC != 4) {
3265 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3266 return ERROR_COMMAND_SYNTAX_ERROR;
3267 }
3268
3269 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3270 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3271 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3272 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3273
3274 if ((nfgseg > 32)) {
3275 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3276 return ERROR_COMMAND_ARGUMENT_INVALID;
3277 } else if (minsegsize & (minsegsize - 1)) {
3278 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3279 return ERROR_COMMAND_ARGUMENT_INVALID;
3280 } else if (lockable > 1) {
3281 LOG_ERROR("<lockable> must be 0 or 1\n");
3282 return ERROR_COMMAND_ARGUMENT_INVALID;
3283 } else if (execonly > 1) {
3284 LOG_ERROR("<execonly> must be 0 or 1\n");
3285 return ERROR_COMMAND_ARGUMENT_INVALID;
3286 }
3287
3288 xtensa->core_config->mpu.enabled = true;
3289 xtensa->core_config->mpu.nfgseg = nfgseg;
3290 xtensa->core_config->mpu.minsegsize = minsegsize;
3291 xtensa->core_config->mpu.lockable = lockable;
3292 xtensa->core_config->mpu.execonly = execonly;
3293 return ERROR_OK;
3294 }
3295
3296 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3297 {
3298 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3299 target_to_xtensa(get_current_target(CMD_CTX)));
3300 }
3301
3302 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3303 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3304 {
3305 if (CMD_ARGC != 2) {
3306 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3307 return ERROR_COMMAND_SYNTAX_ERROR;
3308 }
3309
3310 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3311 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3312 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3313 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3314 return ERROR_COMMAND_ARGUMENT_INVALID;
3315 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3316 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3317 return ERROR_COMMAND_ARGUMENT_INVALID;
3318 }
3319
3320 xtensa->core_config->mmu.enabled = true;
3321 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3322 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3323 return ERROR_OK;
3324 }
3325
3326 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3327 {
3328 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3329 target_to_xtensa(get_current_target(CMD_CTX)));
3330 }
3331
3332 /* xtregs <numregs>
3333 * xtreg <regname> <regnum> */
3334 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3335 {
3336 if (CMD_ARGC == 1) {
3337 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3338 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3339 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3340 return ERROR_COMMAND_SYNTAX_ERROR;
3341 }
3342 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3343 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3344 numregs, xtensa->genpkt_regs_num);
3345 return ERROR_COMMAND_SYNTAX_ERROR;
3346 }
3347 xtensa->total_regs_num = numregs;
3348 xtensa->core_regs_num = 0;
3349 xtensa->num_optregs = 0;
3350 /* A little more memory than required, but saves a second initialization pass */
3351 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3352 if (!xtensa->optregs) {
3353 LOG_ERROR("Failed to allocate xtensa->optregs!");
3354 return ERROR_FAIL;
3355 }
3356 return ERROR_OK;
3357 } else if (CMD_ARGC != 2) {
3358 return ERROR_COMMAND_SYNTAX_ERROR;
3359 }
3360
3361 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3362 * if general register (g-packet) requests or contiguous register maps are supported */
3363 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3364 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3365 if (!xtensa->contiguous_regs_desc) {
3366 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3367 return ERROR_FAIL;
3368 }
3369 }
3370
3371 const char *regname = CMD_ARGV[0];
3372 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3373 if (regnum > UINT16_MAX) {
3374 LOG_ERROR("<regnum> must be a 16-bit number");
3375 return ERROR_COMMAND_ARGUMENT_INVALID;
3376 }
3377
3378 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3379 if (xtensa->total_regs_num)
3380 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3381 regname, regnum,
3382 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3383 else
3384 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3385 regname, regnum);
3386 return ERROR_FAIL;
3387 }
3388
3389 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3390 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3391 bool is_extended_reg = true;
3392 unsigned int ridx;
3393 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3394 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3395 /* Flag core register as defined */
3396 rptr = &xtensa_regs[ridx];
3397 xtensa->core_regs_num++;
3398 is_extended_reg = false;
3399 break;
3400 }
3401 }
3402
3403 rptr->exist = true;
3404 if (is_extended_reg) {
3405 /* Register ID, debugger-visible register ID */
3406 rptr->name = strdup(CMD_ARGV[0]);
3407 rptr->dbreg_num = regnum;
3408 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3409 xtensa->num_optregs++;
3410
3411 /* Register type */
3412 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3413 rptr->type = XT_REG_GENERAL;
3414 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3415 rptr->type = XT_REG_USER;
3416 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3417 rptr->type = XT_REG_FR;
3418 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3419 rptr->type = XT_REG_SPECIAL;
3420 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3421 /* WARNING: For these registers, regnum points to the
3422 * index of the corresponding ARx registers, NOT to
3423 * the processor register number! */
3424 rptr->type = XT_REG_RELGEN;
3425 rptr->reg_num += XT_REG_IDX_ARFIRST;
3426 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3427 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3428 rptr->type = XT_REG_TIE;
3429 } else {
3430 rptr->type = XT_REG_OTHER;
3431 }
3432
3433 /* Register flags */
3434 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3435 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3436 (strcmp(rptr->name, "intclear") == 0))
3437 rptr->flags = XT_REGF_NOREAD;
3438 else
3439 rptr->flags = 0;
3440
3441 if ((rptr->reg_num == (XT_PS_REG_NUM_BASE + xtensa->core_config->debug.irq_level)) &&
3442 (xtensa->core_config->core_type == XT_LX) && (rptr->type == XT_REG_SPECIAL)) {
3443 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3444 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3445 }
3446 } else if (strcmp(rptr->name, "cpenable") == 0) {
3447 xtensa->core_config->coproc = true;
3448 }
3449
3450 /* Build out list of contiguous registers in specified order */
3451 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3452 if (xtensa->contiguous_regs_desc) {
3453 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3454 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3455 }
3456 if (xtensa_extra_debug_log)
3457 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3458 is_extended_reg ? "config-specific" : "core",
3459 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3460 is_extended_reg ? xtensa->num_optregs : ridx,
3461 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3462 return ERROR_OK;
3463 }
3464
3465 COMMAND_HANDLER(xtensa_cmd_xtreg)
3466 {
3467 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3468 target_to_xtensa(get_current_target(CMD_CTX)));
3469 }
3470
3471 /* xtregfmt <contiguous|sparse> [numgregs] */
3472 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3473 {
3474 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3475 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3476 return ERROR_OK;
3477 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3478 xtensa->regmap_contiguous = true;
3479 if (CMD_ARGC == 2) {
3480 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3481 if ((numgregs <= 0) ||
3482 ((numgregs > xtensa->total_regs_num) &&
3483 (xtensa->total_regs_num > 0))) {
3484 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3485 numgregs, xtensa->total_regs_num);
3486 return ERROR_COMMAND_SYNTAX_ERROR;
3487 }
3488 xtensa->genpkt_regs_num = numgregs;
3489 }
3490 return ERROR_OK;
3491 }
3492 }
3493 return ERROR_COMMAND_SYNTAX_ERROR;
3494 }
3495
3496 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3497 {
3498 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3499 target_to_xtensa(get_current_target(CMD_CTX)));
3500 }
3501
3502 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3503 {
3504 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3505 &xtensa->permissive_mode, "xtensa permissive mode");
3506 }
3507
3508 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3509 {
3510 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3511 target_to_xtensa(get_current_target(CMD_CTX)));
3512 }
3513
3514 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3515 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
3516 {
3517 struct xtensa_perfmon_config config = {
3518 .mask = 0xffff,
3519 .kernelcnt = 0,
3520 .tracelevel = -1 /* use DEBUGLEVEL by default */
3521 };
3522
3523 if (CMD_ARGC < 2 || CMD_ARGC > 6)
3524 return ERROR_COMMAND_SYNTAX_ERROR;
3525
3526 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
3527 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
3528 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3529 return ERROR_COMMAND_ARGUMENT_INVALID;
3530 }
3531
3532 config.select = strtoul(CMD_ARGV[1], NULL, 0);
3533 if (config.select > XTENSA_MAX_PERF_SELECT) {
3534 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
3535 return ERROR_COMMAND_ARGUMENT_INVALID;
3536 }
3537
3538 if (CMD_ARGC >= 3) {
3539 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
3540 if (config.mask > XTENSA_MAX_PERF_MASK) {
3541 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
3542 return ERROR_COMMAND_ARGUMENT_INVALID;
3543 }
3544 }
3545
3546 if (CMD_ARGC >= 4) {
3547 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
3548 if (config.kernelcnt > 1) {
3549 command_print(CMD, "kernelcnt should be 0 or 1");
3550 return ERROR_COMMAND_ARGUMENT_INVALID;
3551 }
3552 }
3553
3554 if (CMD_ARGC >= 5) {
3555 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
3556 if (config.tracelevel > 7) {
3557 command_print(CMD, "tracelevel should be <=7");
3558 return ERROR_COMMAND_ARGUMENT_INVALID;
3559 }
3560 }
3561
3562 if (config.tracelevel == -1)
3563 config.tracelevel = xtensa->core_config->debug.irq_level;
3564
3565 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
3566 }
3567
3568 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
3569 {
3570 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
3571 target_to_xtensa(get_current_target(CMD_CTX)));
3572 }
3573
3574 /* perfmon_dump [counter_id] */
3575 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
3576 {
3577 if (CMD_ARGC > 1)
3578 return ERROR_COMMAND_SYNTAX_ERROR;
3579
3580 int counter_id = -1;
3581 if (CMD_ARGC == 1) {
3582 counter_id = strtol(CMD_ARGV[0], NULL, 0);
3583 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
3584 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3585 return ERROR_COMMAND_ARGUMENT_INVALID;
3586 }
3587 }
3588
3589 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
3590 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
3591 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
3592 char result_buf[128] = { 0 };
3593 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
3594 struct xtensa_perfmon_result result;
3595 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
3596 if (res != ERROR_OK)
3597 return res;
3598 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
3599 "%-12" PRIu64 "%s",
3600 result.value,
3601 result.overflow ? " (overflow)" : "");
3602 LOG_INFO("%s", result_buf);
3603 }
3604
3605 return ERROR_OK;
3606 }
3607
3608 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
3609 {
3610 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
3611 target_to_xtensa(get_current_target(CMD_CTX)));
3612 }
3613
3614 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
3615 {
3616 int state = -1;
3617
3618 if (CMD_ARGC < 1) {
3619 const char *st;
3620 state = xtensa->stepping_isr_mode;
3621 if (state == XT_STEPPING_ISR_ON)
3622 st = "OFF";
3623 else if (state == XT_STEPPING_ISR_OFF)
3624 st = "ON";
3625 else
3626 st = "UNKNOWN";
3627 command_print(CMD, "Current ISR step mode: %s", st);
3628 return ERROR_OK;
3629 }
3630 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3631 if (!strcasecmp(CMD_ARGV[0], "off"))
3632 state = XT_STEPPING_ISR_ON;
3633 else if (!strcasecmp(CMD_ARGV[0], "on"))
3634 state = XT_STEPPING_ISR_OFF;
3635
3636 if (state == -1) {
3637 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
3638 return ERROR_FAIL;
3639 }
3640 xtensa->stepping_isr_mode = state;
3641 return ERROR_OK;
3642 }
3643
3644 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
3645 {
3646 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
3647 target_to_xtensa(get_current_target(CMD_CTX)));
3648 }
3649
3650 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
3651 {
3652 int res;
3653 uint32_t val = 0;
3654
3655 if (CMD_ARGC >= 1) {
3656 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3657 if (!strcasecmp(CMD_ARGV[0], "none")) {
3658 val = 0;
3659 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
3660 val |= OCDDCR_BREAKINEN;
3661 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
3662 val |= OCDDCR_BREAKOUTEN;
3663 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
3664 val |= OCDDCR_RUNSTALLINEN;
3665 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
3666 val |= OCDDCR_DEBUGMODEOUTEN;
3667 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
3668 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
3669 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
3670 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
3671 } else {
3672 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
3673 command_print(
3674 CMD,
3675 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3676 return ERROR_OK;
3677 }
3678 }
3679 res = xtensa_smpbreak_set(target, val);
3680 if (res != ERROR_OK)
3681 command_print(CMD, "Failed to set smpbreak config %d", res);
3682 } else {
3683 struct xtensa *xtensa = target_to_xtensa(target);
3684 res = xtensa_smpbreak_read(xtensa, &val);
3685 if (res == ERROR_OK)
3686 command_print(CMD, "Current bits set:%s%s%s%s",
3687 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
3688 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
3689 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
3690 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
3691 );
3692 else
3693 command_print(CMD, "Failed to get smpbreak config %d", res);
3694 }
3695 return res;
3696 }
3697
3698 COMMAND_HANDLER(xtensa_cmd_smpbreak)
3699 {
3700 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
3701 get_current_target(CMD_CTX));
3702 }
3703
3704 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
3705 {
3706 struct xtensa_trace_status trace_status;
3707 struct xtensa_trace_start_config cfg = {
3708 .stoppc = 0,
3709 .stopmask = XTENSA_STOPMASK_DISABLED,
3710 .after = 0,
3711 .after_is_words = false
3712 };
3713
3714 /* Parse arguments */
3715 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3716 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
3717 char *e;
3718 i++;
3719 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
3720 cfg.stopmask = 0;
3721 if (*e == '/')
3722 cfg.stopmask = strtol(e, NULL, 0);
3723 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
3724 i++;
3725 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
3726 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
3727 cfg.after_is_words = 0;
3728 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
3729 cfg.after_is_words = 1;
3730 } else {
3731 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
3732 return ERROR_FAIL;
3733 }
3734 }
3735
3736 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3737 if (res != ERROR_OK)
3738 return res;
3739 if (trace_status.stat & TRAXSTAT_TRACT) {
3740 LOG_WARNING("Silently stop active tracing!");
3741 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
3742 if (res != ERROR_OK)
3743 return res;
3744 }
3745
3746 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
3747 if (res != ERROR_OK)
3748 return res;
3749
3750 xtensa->trace_active = true;
3751 command_print(CMD, "Trace started.");
3752 return ERROR_OK;
3753 }
3754
3755 COMMAND_HANDLER(xtensa_cmd_tracestart)
3756 {
3757 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
3758 target_to_xtensa(get_current_target(CMD_CTX)));
3759 }
3760
3761 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
3762 {
3763 struct xtensa_trace_status trace_status;
3764
3765 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3766 if (res != ERROR_OK)
3767 return res;
3768
3769 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
3770 command_print(CMD, "No trace is currently active.");
3771 return ERROR_FAIL;
3772 }
3773
3774 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
3775 if (res != ERROR_OK)
3776 return res;
3777
3778 xtensa->trace_active = false;
3779 command_print(CMD, "Trace stop triggered.");
3780 return ERROR_OK;
3781 }
3782
3783 COMMAND_HANDLER(xtensa_cmd_tracestop)
3784 {
3785 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
3786 target_to_xtensa(get_current_target(CMD_CTX)));
3787 }
3788
3789 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
3790 {
3791 struct xtensa_trace_config trace_config;
3792 struct xtensa_trace_status trace_status;
3793 uint32_t memsz, wmem;
3794
3795 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3796 if (res != ERROR_OK)
3797 return res;
3798
3799 if (trace_status.stat & TRAXSTAT_TRACT) {
3800 command_print(CMD, "Tracing is still active. Please stop it first.");
3801 return ERROR_FAIL;
3802 }
3803
3804 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
3805 if (res != ERROR_OK)
3806 return res;
3807
3808 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
3809 command_print(CMD, "No active trace found; nothing to dump.");
3810 return ERROR_FAIL;
3811 }
3812
3813 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
3814 LOG_INFO("Total trace memory: %d words", memsz);
3815 if ((trace_config.addr &
3816 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
3817 /*Memory hasn't overwritten itself yet. */
3818 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
3819 LOG_INFO("...but trace is only %d words", wmem);
3820 if (wmem < memsz)
3821 memsz = wmem;
3822 } else {
3823 if (trace_config.addr & TRAXADDR_TWSAT) {
3824 LOG_INFO("Real trace is many times longer than that (overflow)");
3825 } else {
3826 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
3827 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
3828 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
3829 }
3830 }
3831
3832 uint8_t *tracemem = malloc(memsz * 4);
3833 if (!tracemem) {
3834 command_print(CMD, "Failed to alloc memory for trace data!");
3835 return ERROR_FAIL;
3836 }
3837 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
3838 if (res != ERROR_OK) {
3839 free(tracemem);
3840 return res;
3841 }
3842
3843 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
3844 if (f <= 0) {
3845 free(tracemem);
3846 command_print(CMD, "Unable to open file %s", fname);
3847 return ERROR_FAIL;
3848 }
3849 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
3850 command_print(CMD, "Unable to write to file %s", fname);
3851 else
3852 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
3853 close(f);
3854
3855 bool is_all_zeroes = true;
3856 for (unsigned int i = 0; i < memsz * 4; i++) {
3857 if (tracemem[i] != 0) {
3858 is_all_zeroes = false;
3859 break;
3860 }
3861 }
3862 free(tracemem);
3863 if (is_all_zeroes)
3864 command_print(
3865 CMD,
3866 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
3867
3868 return ERROR_OK;
3869 }
3870
3871 COMMAND_HANDLER(xtensa_cmd_tracedump)
3872 {
3873 if (CMD_ARGC != 1) {
3874 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
3875 return ERROR_FAIL;
3876 }
3877
3878 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
3879 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
3880 }
3881
3882 static const struct command_registration xtensa_any_command_handlers[] = {
3883 {
3884 .name = "xtdef",
3885 .handler = xtensa_cmd_xtdef,
3886 .mode = COMMAND_CONFIG,
3887 .help = "Configure Xtensa core type",
3888 .usage = "<type>",
3889 },
3890 {
3891 .name = "xtopt",
3892 .handler = xtensa_cmd_xtopt,
3893 .mode = COMMAND_CONFIG,
3894 .help = "Configure Xtensa core option",
3895 .usage = "<name> <value>",
3896 },
3897 {
3898 .name = "xtmem",
3899 .handler = xtensa_cmd_xtmem,
3900 .mode = COMMAND_CONFIG,
3901 .help = "Configure Xtensa memory/cache option",
3902 .usage = "<type> [parameters]",
3903 },
3904 {
3905 .name = "xtmmu",
3906 .handler = xtensa_cmd_xtmmu,
3907 .mode = COMMAND_CONFIG,
3908 .help = "Configure Xtensa MMU option",
3909 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
3910 },
3911 {
3912 .name = "xtmpu",
3913 .handler = xtensa_cmd_xtmpu,
3914 .mode = COMMAND_CONFIG,
3915 .help = "Configure Xtensa MPU option",
3916 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
3917 },
3918 {
3919 .name = "xtreg",
3920 .handler = xtensa_cmd_xtreg,
3921 .mode = COMMAND_CONFIG,
3922 .help = "Configure Xtensa register",
3923 .usage = "<regname> <regnum>",
3924 },
3925 {
3926 .name = "xtregs",
3927 .handler = xtensa_cmd_xtreg,
3928 .mode = COMMAND_CONFIG,
3929 .help = "Configure number of Xtensa registers",
3930 .usage = "<numregs>",
3931 },
3932 {
3933 .name = "xtregfmt",
3934 .handler = xtensa_cmd_xtregfmt,
3935 .mode = COMMAND_CONFIG,
3936 .help = "Configure format of Xtensa register map",
3937 .usage = "<contiguous|sparse> [numgregs]",
3938 },
3939 {
3940 .name = "set_permissive",
3941 .handler = xtensa_cmd_permissive_mode,
3942 .mode = COMMAND_ANY,
3943 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
3944 .usage = "[0|1]",
3945 },
3946 {
3947 .name = "maskisr",
3948 .handler = xtensa_cmd_mask_interrupts,
3949 .mode = COMMAND_ANY,
3950 .help = "mask Xtensa interrupts at step",
3951 .usage = "['on'|'off']",
3952 },
3953 {
3954 .name = "smpbreak",
3955 .handler = xtensa_cmd_smpbreak,
3956 .mode = COMMAND_ANY,
3957 .help = "Set the way the CPU chains OCD breaks",
3958 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
3959 },
3960 {
3961 .name = "perfmon_enable",
3962 .handler = xtensa_cmd_perfmon_enable,
3963 .mode = COMMAND_EXEC,
3964 .help = "Enable and start performance counter",
3965 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
3966 },
3967 {
3968 .name = "perfmon_dump",
3969 .handler = xtensa_cmd_perfmon_dump,
3970 .mode = COMMAND_EXEC,
3971 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
3972 .usage = "[counter_id]",
3973 },
3974 {
3975 .name = "tracestart",
3976 .handler = xtensa_cmd_tracestart,
3977 .mode = COMMAND_EXEC,
3978 .help =
3979 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
3980 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
3981 },
3982 {
3983 .name = "tracestop",
3984 .handler = xtensa_cmd_tracestop,
3985 .mode = COMMAND_EXEC,
3986 .help = "Tracing: Stop current trace as started by the tracestart command",
3987 .usage = "",
3988 },
3989 {
3990 .name = "tracedump",
3991 .handler = xtensa_cmd_tracedump,
3992 .mode = COMMAND_EXEC,
3993 .help = "Tracing: Dump trace memory to a files. One file per core.",
3994 .usage = "<outfile>",
3995 },
3996 {
3997 .name = "exe",
3998 .handler = xtensa_cmd_exe,
3999 .mode = COMMAND_ANY,
4000 .help = "Xtensa stub execution",
4001 .usage = "<ascii-encoded hexadecimal instruction bytes>",
4002 },
4003 COMMAND_REGISTRATION_DONE
4004 };
4005
4006 const struct command_registration xtensa_command_handlers[] = {
4007 {
4008 .name = "xtensa",
4009 .mode = COMMAND_ANY,
4010 .help = "Xtensa command group",
4011 .usage = "",
4012 .chain = xtensa_any_command_handlers,
4013 },
4014 COMMAND_REGISTRATION_DONE
4015 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)