Documentation: fix warning "unbalanced square brackets"
[openocd.git] / src / target / xtensa / xtensa.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
10
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19 #include <target/algorithm.h>
20
21 #include "xtensa_chip.h"
22 #include "xtensa.h"
23
24 /* Swap 4-bit Xtensa opcodes and fields */
25 #define XT_NIBSWAP8(V) \
26 ((((V) & 0x0F) << 4) \
27 | (((V) & 0xF0) >> 4))
28
29 #define XT_NIBSWAP16(V) \
30 ((((V) & 0x000F) << 12) \
31 | (((V) & 0x00F0) << 4) \
32 | (((V) & 0x0F00) >> 4) \
33 | (((V) & 0xF000) >> 12))
34
35 #define XT_NIBSWAP24(V) \
36 ((((V) & 0x00000F) << 20) \
37 | (((V) & 0x0000F0) << 12) \
38 | (((V) & 0x000F00) << 4) \
39 | (((V) & 0x00F000) >> 4) \
40 | (((V) & 0x0F0000) >> 12) \
41 | (((V) & 0xF00000) >> 20))
42
43 /* _XT_INS_FORMAT_*()
44 * Instruction formatting converted from little-endian inputs
45 * and shifted to the MSB-side of DIR for BE systems.
46 */
47 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
48 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
49 | (((T) & 0x0F) << 16) \
50 | (((SR) & 0xFF) << 8)) << 8 \
51 : (OPCODE) \
52 | (((SR) & 0xFF) << 8) \
53 | (((T) & 0x0F) << 4))
54
55 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
56 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
57 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
58 | (((R) & 0x0F) << 8)) << 8 \
59 : (OPCODE) \
60 | (((ST) & 0xFF) << 4) \
61 | (((R) & 0x0F) << 12))
62
63 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
64 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
65 | (((T) & 0x0F) << 8) \
66 | (((S) & 0x0F) << 4) \
67 | ((IMM4) & 0x0F)) << 16 \
68 : (OPCODE) \
69 | (((T) & 0x0F) << 4) \
70 | (((S) & 0x0F) << 8) \
71 | (((IMM4) & 0x0F) << 12))
72
73 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
74 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
75 | (((T) & 0x0F) << 16) \
76 | (((S) & 0x0F) << 12) \
77 | (((R) & 0x0F) << 8) \
78 | ((IMM8) & 0xFF)) << 8 \
79 : (OPCODE) \
80 | (((IMM8) & 0xFF) << 16) \
81 | (((R) & 0x0F) << 12) \
82 | (((S) & 0x0F) << 8) \
83 | (((T) & 0x0F) << 4))
84
85 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
86 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
87 | (((T) & 0x0F) << 16) \
88 | (((S) & 0x0F) << 12) \
89 | (((R) & 0x0F) << 8)) << 8 \
90 | ((IMM4) & 0x0F) \
91 : (OPCODE) \
92 | (((IMM4) & 0x0F) << 20) \
93 | (((R) & 0x0F) << 12) \
94 | (((S) & 0x0F) << 8) \
95 | (((T) & 0x0F) << 4))
96
97 /* Xtensa processor instruction opcodes
98 */
99 /* "Return From Debug Operation" to Normal */
100 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
101 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
102 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
103
104 /* Load to DDR register, increase addr register */
105 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
106 /* Store from DDR register, increase addr register */
107 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
108
109 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
110 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
111 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
112 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
113 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
114 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
115
116 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
117 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
118 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
119 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
120 /* Store 8-bit to A(S)+IMM8 from A(T) */
121 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
122
123 /* Cache Instructions */
124 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
125 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
126 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
127 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
128
129 /* Control Instructions */
130 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
131 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
132
133 /* Read Special Register */
134 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
135 /* Write Special Register */
136 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
137 /* Swap Special Register */
138 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
139
140 /* Rotate Window by (-8..7) */
141 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
142
143 /* Read User Register */
144 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
145 /* Write User Register */
146 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
147
148 /* Read Floating-Point Register */
149 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
150 /* Write Floating-Point Register */
151 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
152
153 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
154 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
155 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
156
157 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
158 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
159 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
160
161 /* Read Protection TLB Entry Info */
162 #define XT_INS_PPTLB(X, S, T) _XT_INS_FORMAT_RRR(X, 0x500000, ((S) << 4) | (T), 0xD)
163
164 #define XT_TLB1_ACC_SHIFT 8
165 #define XT_TLB1_ACC_MSK 0xF
166
167 #define XT_WATCHPOINTS_NUM_MAX 2
168
169 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
170 * These get used a lot so making a shortcut is useful.
171 */
172 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
173 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
174 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
175 #define XT_REG_A0 (xtensa_regs[XT_REG_IDX_AR0].reg_num)
176 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
177 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
178
179 #define XT_PS_REG_NUM (0xe6U)
180 #define XT_EPS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
181 #define XT_EPC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
182 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
183 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
184 #define XT_NX_IBREAKC_BASE (0xc0U) /* (IBREAKC0..IBREAKC1) for NX */
185
186 #define XT_SW_BREAKPOINTS_MAX_NUM 32
187 #define XT_HW_IBREAK_MAX_NUM 2
188 #define XT_HW_DBREAK_MAX_NUM 2
189
190 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
191 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
192 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
247 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
248 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
249 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
250 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
251 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
252 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
253 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
254 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
255 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
256 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("ps", XT_PS_REG_NUM, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
259 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
261 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
262 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
263 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
264 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
265 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
266 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
267 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
268 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
269 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
270 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
271 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
272
273 /* WARNING: For these registers, regnum points to the
274 * index of the corresponding ARx registers, NOT to
275 * the processor register number! */
276 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
282 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
283 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
284 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
285 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
286 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
287 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
288 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
289 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
290 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
291 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
292 };
293
294 /**
295 * Types of memory used at xtensa target
296 */
297 enum xtensa_mem_region_type {
298 XTENSA_MEM_REG_IROM = 0x0,
299 XTENSA_MEM_REG_IRAM,
300 XTENSA_MEM_REG_DROM,
301 XTENSA_MEM_REG_DRAM,
302 XTENSA_MEM_REG_SRAM,
303 XTENSA_MEM_REG_SROM,
304 XTENSA_MEM_REGS_NUM
305 };
306
307 /**
308 * Types of access rights for MPU option
309 * The first block is kernel RWX ARs; the second block is user rwx ARs.
310 */
311 enum xtensa_mpu_access_type {
312 XTENSA_ACC_00X_000 = 0x2,
313 XTENSA_ACC_000_00X,
314 XTENSA_ACC_R00_000,
315 XTENSA_ACC_R0X_000,
316 XTENSA_ACC_RW0_000,
317 XTENSA_ACC_RWX_000,
318 XTENSA_ACC_0W0_0W0,
319 XTENSA_ACC_RW0_RWX,
320 XTENSA_ACC_RW0_R00,
321 XTENSA_ACC_RWX_R0X,
322 XTENSA_ACC_R00_R00,
323 XTENSA_ACC_R0X_R0X,
324 XTENSA_ACC_RW0_RW0,
325 XTENSA_ACC_RWX_RWX
326 };
327
328 /* Register definition as union for list allocation */
329 union xtensa_reg_val_u {
330 xtensa_reg_val_t val;
331 uint8_t buf[4];
332 };
333
334 static const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
335 { .chrval = "E00", .intval = ERROR_FAIL },
336 { .chrval = "E01", .intval = ERROR_FAIL },
337 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
338 { .chrval = "E03", .intval = ERROR_FAIL },
339 };
340
341 /* Set to true for extra debug logging */
342 static const bool xtensa_extra_debug_log;
343
344 /**
345 * Gets a config for the specific mem type
346 */
347 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
348 struct xtensa *xtensa,
349 enum xtensa_mem_region_type type)
350 {
351 switch (type) {
352 case XTENSA_MEM_REG_IROM:
353 return &xtensa->core_config->irom;
354 case XTENSA_MEM_REG_IRAM:
355 return &xtensa->core_config->iram;
356 case XTENSA_MEM_REG_DROM:
357 return &xtensa->core_config->drom;
358 case XTENSA_MEM_REG_DRAM:
359 return &xtensa->core_config->dram;
360 case XTENSA_MEM_REG_SRAM:
361 return &xtensa->core_config->sram;
362 case XTENSA_MEM_REG_SROM:
363 return &xtensa->core_config->srom;
364 default:
365 return NULL;
366 }
367 }
368
369 /**
370 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
371 * for a given address
372 * Returns NULL if nothing found
373 */
374 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
375 const struct xtensa_local_mem_config *mem,
376 target_addr_t address)
377 {
378 for (unsigned int i = 0; i < mem->count; i++) {
379 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
380 if (address >= region->base && address < (region->base + region->size))
381 return region;
382 }
383 return NULL;
384 }
385
386 /**
387 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
388 * for a given address
389 * Returns NULL if nothing found
390 */
391 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
392 struct xtensa *xtensa,
393 target_addr_t address)
394 {
395 const struct xtensa_local_mem_region_config *result;
396 const struct xtensa_local_mem_config *mcgf;
397 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
398 mcgf = xtensa_get_mem_config(xtensa, mtype);
399 result = xtensa_memory_region_find(mcgf, address);
400 if (result)
401 return result;
402 }
403 return NULL;
404 }
405
406 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
407 const struct xtensa_local_mem_config *mem,
408 target_addr_t address)
409 {
410 if (!cache->size)
411 return false;
412 return xtensa_memory_region_find(mem, address);
413 }
414
415 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
416 {
417 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
418 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
419 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
420 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
421 }
422
423 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
424 {
425 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
426 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
427 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
428 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
429 }
430
431 static int xtensa_core_reg_get(struct reg *reg)
432 {
433 /* We don't need this because we read all registers on halt anyway. */
434 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
435 struct target *target = xtensa->target;
436
437 if (target->state != TARGET_HALTED)
438 return ERROR_TARGET_NOT_HALTED;
439 if (!reg->exist) {
440 if (strncmp(reg->name, "?0x", 3) == 0) {
441 unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
442 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
443 return ERROR_OK;
444 }
445 return ERROR_COMMAND_ARGUMENT_INVALID;
446 }
447 return ERROR_OK;
448 }
449
450 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
451 {
452 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
453 struct target *target = xtensa->target;
454
455 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
456 if (target->state != TARGET_HALTED)
457 return ERROR_TARGET_NOT_HALTED;
458
459 if (!reg->exist) {
460 if (strncmp(reg->name, "?0x", 3) == 0) {
461 unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
462 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
463 return ERROR_OK;
464 }
465 return ERROR_COMMAND_ARGUMENT_INVALID;
466 }
467
468 buf_cpy(buf, reg->value, reg->size);
469
470 if (xtensa->core_config->windowed) {
471 /* If the user updates a potential scratch register, track for conflicts */
472 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
473 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
474 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
475 buf_get_u32(reg->value, 0, 32));
476 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
477 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
478 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
479 xtensa->scratch_ars[s].intval = true;
480 break;
481 }
482 }
483 }
484 reg->dirty = true;
485 reg->valid = true;
486
487 return ERROR_OK;
488 }
489
490 static const struct reg_arch_type xtensa_reg_type = {
491 .get = xtensa_core_reg_get,
492 .set = xtensa_core_reg_set,
493 };
494
495 /* Convert a register index that's indexed relative to windowbase, to the real address. */
496 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
497 enum xtensa_reg_id reg_idx,
498 int windowbase)
499 {
500 unsigned int idx;
501 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
502 idx = reg_idx - XT_REG_IDX_AR0;
503 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
504 idx = reg_idx - XT_REG_IDX_A0;
505 } else {
506 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
507 return -1;
508 }
509 /* Each windowbase value represents 4 registers on LX and 8 on NX */
510 int base_inc = (xtensa->core_config->core_type == XT_LX) ? 4 : 8;
511 return ((idx + windowbase * base_inc) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
512 }
513
514 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
515 enum xtensa_reg_id reg_idx,
516 int windowbase)
517 {
518 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
519 }
520
521 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
522 {
523 struct reg *reg_list = xtensa->core_cache->reg_list;
524 reg_list[reg_idx].dirty = true;
525 }
526
527 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
528 {
529 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DIR0EXEC, ins);
530 }
531
532 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
533 {
534 const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
535 if ((oplen > 0) && (oplen <= max_oplen)) {
536 uint8_t ops_padded[max_oplen];
537 memcpy(ops_padded, ops, oplen);
538 memset(ops_padded + oplen, 0, max_oplen - oplen);
539 unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
540 for (int32_t i = oplenw - 1; i > 0; i--)
541 xtensa_queue_dbg_reg_write(xtensa,
542 XDMREG_DIR0 + i,
543 target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t)*i]));
544 /* Write DIR0EXEC last */
545 xtensa_queue_dbg_reg_write(xtensa,
546 XDMREG_DIR0EXEC,
547 target_buffer_get_u32(xtensa->target, &ops_padded[0]));
548 }
549 }
550
551 /* NOTE: Assumes A3 has already been saved and marked dirty; A3 will be clobbered */
552 static inline bool xtensa_region_ar_exec(struct target *target, target_addr_t start, target_addr_t end)
553 {
554 struct xtensa *xtensa = target_to_xtensa(target);
555 if (xtensa->core_config->mpu.enabled) {
556 /* For cores with the MPU option, issue PPTLB on start and end addresses.
557 * Parse access rights field, and confirm both have execute permissions.
558 */
559 for (int i = 0; i <= 1; i++) {
560 uint32_t at, acc;
561 uint8_t at_buf[4];
562 bool exec_acc;
563 target_addr_t addr = i ? end : start;
564 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addr);
565 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
566 xtensa_queue_exec_ins(xtensa, XT_INS_PPTLB(xtensa, XT_REG_A3, XT_REG_A3));
567 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
568 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, at_buf);
569 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
570 if (res != ERROR_OK)
571 LOG_TARGET_ERROR(target, "Error queuing PPTLB: %d", res);
572 res = xtensa_core_status_check(target);
573 if (res != ERROR_OK)
574 LOG_TARGET_ERROR(target, "Error issuing PPTLB: %d", res);
575 at = buf_get_u32(at_buf, 0, 32);
576 acc = (at >> XT_TLB1_ACC_SHIFT) & XT_TLB1_ACC_MSK;
577 exec_acc = ((acc == XTENSA_ACC_00X_000) || (acc == XTENSA_ACC_R0X_000) ||
578 (acc == XTENSA_ACC_RWX_000) || (acc == XTENSA_ACC_RWX_R0X) ||
579 (acc == XTENSA_ACC_R0X_R0X) || (acc == XTENSA_ACC_RWX_RWX));
580 LOG_TARGET_DEBUG(target, "PPTLB(" TARGET_ADDR_FMT ") -> 0x%08" PRIx32 " exec_acc %d",
581 addr, at, exec_acc);
582 if (!exec_acc)
583 return false;
584 }
585 }
586 return true;
587 }
588
589 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
590 {
591 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
592 return dm->pwr_ops->queue_reg_write(dm, reg, data);
593 }
594
595 /* NOTE: Assumes A3 has already been saved */
596 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
597 {
598 struct xtensa *xtensa = target_to_xtensa(target);
599 unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
600 uint32_t woe_dis;
601 uint8_t woe_buf[4];
602
603 if (xtensa->core_config->windowed) {
604 /* Save PS (LX) or WB (NX) and disable window overflow exceptions prior to AR save */
605 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, woe_sr, XT_REG_A3));
606 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
607 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, woe_buf);
608 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
609 if (res != ERROR_OK) {
610 LOG_TARGET_ERROR(target, "Failed to read %s (%d)!",
611 (woe_sr == XT_SR_PS) ? "PS" : "WB", res);
612 return res;
613 }
614 xtensa_core_status_check(target);
615 *woe = buf_get_u32(woe_buf, 0, 32);
616 woe_dis = *woe & ~((woe_sr == XT_SR_PS) ? XT_PS_WOE_MSK : XT_WB_S_MSK);
617 LOG_TARGET_DEBUG(target, "Clearing %s (0x%08" PRIx32 " -> 0x%08" PRIx32 ")",
618 (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB.S", *woe, woe_dis);
619 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe_dis);
620 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
621 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, woe_sr, XT_REG_A3));
622 }
623 return ERROR_OK;
624 }
625
626 /* NOTE: Assumes A3 has already been saved */
627 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
628 {
629 struct xtensa *xtensa = target_to_xtensa(target);
630 unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
631 if (xtensa->core_config->windowed) {
632 /* Restore window overflow exception state */
633 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, woe);
634 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
635 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, woe_sr, XT_REG_A3));
636 LOG_TARGET_DEBUG(target, "Restored %s (0x%08" PRIx32 ")",
637 (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB", woe);
638 }
639 }
640
641 static bool xtensa_reg_is_readable(int flags, int cpenable)
642 {
643 if (flags & XT_REGF_NOREAD)
644 return false;
645 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
646 return false;
647 return true;
648 }
649
650 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
651 {
652 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
653 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
654 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
655 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
656 } else {
657 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
658 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
659 }
660 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
661 }
662
663 static int xtensa_write_dirty_registers(struct target *target)
664 {
665 struct xtensa *xtensa = target_to_xtensa(target);
666 int res;
667 xtensa_reg_val_t regval, windowbase = 0;
668 bool scratch_reg_dirty = false, delay_cpenable = false;
669 struct reg *reg_list = xtensa->core_cache->reg_list;
670 unsigned int reg_list_size = xtensa->core_cache->num_regs;
671 bool preserve_a3 = false;
672 uint8_t a3_buf[4];
673 xtensa_reg_val_t a3 = 0, woe;
674 unsigned int ms_idx = (xtensa->core_config->core_type == XT_NX) ?
675 xtensa->nx_reg_idx[XT_NX_REG_IDX_MS] : reg_list_size;
676 xtensa_reg_val_t ms = 0;
677 bool restore_ms = false;
678
679 LOG_TARGET_DEBUG(target, "start");
680
681 /* We need to write the dirty registers in the cache list back to the processor.
682 * Start by writing the SFR/user registers. */
683 for (unsigned int i = 0; i < reg_list_size; i++) {
684 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
685 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
686 if (reg_list[i].dirty) {
687 if (rlist[ridx].type == XT_REG_SPECIAL ||
688 rlist[ridx].type == XT_REG_USER ||
689 rlist[ridx].type == XT_REG_FR) {
690 scratch_reg_dirty = true;
691 if (i == XT_REG_IDX_CPENABLE) {
692 delay_cpenable = true;
693 continue;
694 }
695 regval = xtensa_reg_get(target, i);
696 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
697 reg_list[i].name,
698 rlist[ridx].reg_num,
699 regval);
700 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
701 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
702 if (reg_list[i].exist) {
703 unsigned int reg_num = rlist[ridx].reg_num;
704 if (rlist[ridx].type == XT_REG_USER) {
705 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
706 } else if (rlist[ridx].type == XT_REG_FR) {
707 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
708 } else {/*SFR */
709 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
710 if (xtensa->core_config->core_type == XT_LX) {
711 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
712 reg_num = (XT_EPC_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
713 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
714 } else {
715 /* NX PC set through issuing a jump instruction */
716 xtensa_queue_exec_ins(xtensa, XT_INS_JX(xtensa, XT_REG_A3));
717 }
718 } else if (i == ms_idx) {
719 /* MS must be restored after ARs. This ensures ARs remain in correct
720 * order even for reversed register groups (overflow/underflow).
721 */
722 ms = regval;
723 restore_ms = true;
724 LOG_TARGET_DEBUG(target, "Delaying MS write: 0x%x", ms);
725 } else {
726 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
727 }
728 }
729 }
730 reg_list[i].dirty = false;
731 }
732 }
733 }
734 if (scratch_reg_dirty)
735 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
736 if (delay_cpenable) {
737 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
738 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
739 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
740 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
741 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
742 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
743 XT_REG_A3));
744 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
745 }
746
747 preserve_a3 = (xtensa->core_config->windowed) || (xtensa->core_config->core_type == XT_NX);
748 if (preserve_a3) {
749 /* Save (windowed) A3 for scratch use */
750 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
751 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
752 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
753 if (res != ERROR_OK)
754 return res;
755 xtensa_core_status_check(target);
756 a3 = buf_get_u32(a3_buf, 0, 32);
757 }
758
759 if (xtensa->core_config->windowed) {
760 res = xtensa_window_state_save(target, &woe);
761 if (res != ERROR_OK)
762 return res;
763 /* Grab the windowbase, we need it. */
764 uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
765 XT_REG_IDX_WINDOWBASE : xtensa->nx_reg_idx[XT_NX_REG_IDX_WB];
766 windowbase = xtensa_reg_get(target, wb_idx);
767 if (xtensa->core_config->core_type == XT_NX)
768 windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
769
770 /* Check if there are mismatches between the ARx and corresponding Ax registers.
771 * When the user sets a register on a windowed config, xt-gdb may set the ARx
772 * register directly. Thus we take ARx as priority over Ax if both are dirty
773 * and it's unclear if the user set one over the other explicitly.
774 */
775 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
776 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
777 if (reg_list[i].dirty && reg_list[j].dirty) {
778 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
779 bool show_warning = true;
780 if (i == XT_REG_IDX_A3)
781 show_warning = xtensa_scratch_regs_fixup(xtensa,
782 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
783 else if (i == XT_REG_IDX_A4)
784 show_warning = xtensa_scratch_regs_fixup(xtensa,
785 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
786 if (show_warning)
787 LOG_WARNING(
788 "Warning: Both A%d [0x%08" PRIx32
789 "] as well as its underlying physical register "
790 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
791 i - XT_REG_IDX_A0,
792 buf_get_u32(reg_list[i].value, 0, 32),
793 j - XT_REG_IDX_AR0,
794 buf_get_u32(reg_list[j].value, 0, 32));
795 }
796 }
797 }
798 }
799
800 /* Write A0-A16. */
801 for (unsigned int i = 0; i < 16; i++) {
802 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
803 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
804 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
805 xtensa_regs[XT_REG_IDX_A0 + i].name,
806 regval,
807 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
808 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
809 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
810 reg_list[XT_REG_IDX_A0 + i].dirty = false;
811 if (i == 3) {
812 /* Avoid stomping A3 during restore at end of function */
813 a3 = regval;
814 }
815 }
816 }
817
818 if (xtensa->core_config->windowed) {
819 /* Now write AR registers */
820 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
821 /* Write the 16 registers we can see */
822 for (unsigned int i = 0; i < 16; i++) {
823 if (i + j < xtensa->core_config->aregs_num) {
824 enum xtensa_reg_id realadr =
825 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
826 windowbase);
827 /* Write back any dirty un-windowed registers */
828 if (reg_list[realadr].dirty) {
829 regval = xtensa_reg_get(target, realadr);
830 LOG_TARGET_DEBUG(
831 target,
832 "Writing back reg %s value %08" PRIX32 ", num =%i",
833 xtensa_regs[realadr].name,
834 regval,
835 xtensa_regs[realadr].reg_num);
836 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, regval);
837 xtensa_queue_exec_ins(xtensa,
838 XT_INS_RSR(xtensa, XT_SR_DDR,
839 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
840 reg_list[realadr].dirty = false;
841 if ((i + j) == 3)
842 /* Avoid stomping AR during A3 restore at end of function */
843 a3 = regval;
844 }
845 }
846 }
847
848 /* Now rotate the window so we'll see the next 16 registers. The final rotate
849 * will wraparound, leaving us in the state we were.
850 * Each ROTW rotates 4 registers on LX and 8 on NX */
851 int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
852 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, rotw_arg));
853 }
854
855 xtensa_window_state_restore(target, woe);
856
857 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
858 xtensa->scratch_ars[s].intval = false;
859 }
860
861 if (restore_ms) {
862 uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
863 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, ms);
864 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
865 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, ms_regno, XT_REG_A3));
866 LOG_TARGET_DEBUG(target, "Delayed MS (0x%x) write complete: 0x%x", ms_regno, ms);
867 }
868
869 if (preserve_a3) {
870 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
871 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
872 }
873
874 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
875 xtensa_core_status_check(target);
876
877 return res;
878 }
879
880 static inline bool xtensa_is_stopped(struct target *target)
881 {
882 struct xtensa *xtensa = target_to_xtensa(target);
883 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
884 }
885
886 int xtensa_examine(struct target *target)
887 {
888 struct xtensa *xtensa = target_to_xtensa(target);
889 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
890
891 LOG_TARGET_DEBUG(target, "");
892
893 if (xtensa->core_config->core_type == XT_UNDEF) {
894 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
895 return ERROR_FAIL;
896 }
897
898 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
899 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
900 xtensa_dm_queue_enable(&xtensa->dbg_mod);
901 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
902 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
903 if (res != ERROR_OK)
904 return res;
905 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
906 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
907 return ERROR_TARGET_FAILURE;
908 }
909 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
910 target_set_examined(target);
911 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
912 return ERROR_OK;
913 }
914
915 int xtensa_wakeup(struct target *target)
916 {
917 struct xtensa *xtensa = target_to_xtensa(target);
918 unsigned int cmd = PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) | PWRCTL_COREWAKEUP(xtensa);
919
920 if (xtensa->reset_asserted)
921 cmd |= PWRCTL_CORERESET(xtensa);
922 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd);
923 /* TODO: can we join this with the write above? */
924 xtensa_queue_pwr_reg_write(xtensa, XDMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE(xtensa));
925 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
926 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
927 }
928
929 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
930 {
931 uint32_t dsr_data = 0x00110000;
932 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
933 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
934 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
935
936 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
937 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, set | OCDDCR_ENABLEOCD);
938 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, clear);
939 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DSR, dsr_data);
940 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
941 return xtensa_dm_queue_execute(&xtensa->dbg_mod);
942 }
943
944 int xtensa_smpbreak_set(struct target *target, uint32_t set)
945 {
946 struct xtensa *xtensa = target_to_xtensa(target);
947 int res = ERROR_OK;
948
949 xtensa->smp_break = set;
950 if (target_was_examined(target))
951 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
952 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
953 return res;
954 }
955
956 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
957 {
958 uint8_t dcr_buf[sizeof(uint32_t)];
959
960 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DCRSET, dcr_buf);
961 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
962 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
963 *val = buf_get_u32(dcr_buf, 0, 32);
964
965 return res;
966 }
967
968 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
969 {
970 struct xtensa *xtensa = target_to_xtensa(target);
971 *val = xtensa->smp_break;
972 return ERROR_OK;
973 }
974
975 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
976 {
977 return buf_get_u32(reg->value, 0, 32);
978 }
979
980 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
981 {
982 buf_set_u32(reg->value, 0, 32, value);
983 reg->dirty = true;
984 }
985
986 static int xtensa_imprecise_exception_occurred(struct target *target)
987 {
988 struct xtensa *xtensa = target_to_xtensa(target);
989 for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESR; idx++) {
990 enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
991 if (xtensa->nx_reg_idx[idx]) {
992 xtensa_reg_val_t reg = xtensa_reg_get(target, xtensa->nx_reg_idx[idx]);
993 if (reg & XT_IMPR_EXC_MSK) {
994 LOG_TARGET_DEBUG(target, "Imprecise exception: %s: 0x%x",
995 xtensa->core_cache->reg_list[ridx].name, reg);
996 return true;
997 }
998 }
999 }
1000 return false;
1001 }
1002
1003 static void xtensa_imprecise_exception_clear(struct target *target)
1004 {
1005 struct xtensa *xtensa = target_to_xtensa(target);
1006 for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESRCLR; idx++) {
1007 enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
1008 if (ridx && idx != XT_NX_REG_IDX_MESR) {
1009 xtensa_reg_val_t value = (idx == XT_NX_REG_IDX_MESRCLR) ? XT_MESRCLR_IMPR_EXC_MSK : 0;
1010 xtensa_reg_set(target, ridx, value);
1011 LOG_TARGET_DEBUG(target, "Imprecise exception: clearing %s (0x%x)",
1012 xtensa->core_cache->reg_list[ridx].name, value);
1013 }
1014 }
1015 }
1016
1017 int xtensa_core_status_check(struct target *target)
1018 {
1019 struct xtensa *xtensa = target_to_xtensa(target);
1020 int res, needclear = 0, needimprclear = 0;
1021
1022 xtensa_dm_core_status_read(&xtensa->dbg_mod);
1023 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
1024 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
1025 if (dsr & OCDDSR_EXECBUSY) {
1026 if (!xtensa->suppress_dsr_errors)
1027 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
1028 needclear = 1;
1029 }
1030 if (dsr & OCDDSR_EXECEXCEPTION) {
1031 if (!xtensa->suppress_dsr_errors)
1032 LOG_TARGET_ERROR(target,
1033 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
1034 dsr);
1035 needclear = 1;
1036 }
1037 if (dsr & OCDDSR_EXECOVERRUN) {
1038 if (!xtensa->suppress_dsr_errors)
1039 LOG_TARGET_ERROR(target,
1040 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
1041 dsr);
1042 needclear = 1;
1043 }
1044 if (xtensa->core_config->core_type == XT_NX && (xtensa_imprecise_exception_occurred(target))) {
1045 if (!xtensa->suppress_dsr_errors)
1046 LOG_TARGET_ERROR(target,
1047 "%s: Imprecise exception occurred!", target_name(target));
1048 needclear = 1;
1049 needimprclear = 1;
1050 }
1051 if (needclear) {
1052 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
1053 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
1054 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
1055 LOG_TARGET_ERROR(target, "clearing DSR failed!");
1056 if (xtensa->core_config->core_type == XT_NX && needimprclear)
1057 xtensa_imprecise_exception_clear(target);
1058 return ERROR_FAIL;
1059 }
1060 return ERROR_OK;
1061 }
1062
1063 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
1064 {
1065 struct xtensa *xtensa = target_to_xtensa(target);
1066 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1067 return xtensa_reg_get_value(reg);
1068 }
1069
1070 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
1071 {
1072 struct xtensa *xtensa = target_to_xtensa(target);
1073 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1074 if (xtensa_reg_get_value(reg) == value)
1075 return;
1076 xtensa_reg_set_value(reg, value);
1077 }
1078
1079 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
1080 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
1081 {
1082 struct xtensa *xtensa = target_to_xtensa(target);
1083 uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1084 XT_REG_IDX_WINDOWBASE : xtensa->nx_reg_idx[XT_NX_REG_IDX_WB];
1085 uint32_t windowbase = (xtensa->core_config->windowed ?
1086 xtensa_reg_get(target, wb_idx) : 0);
1087 if (xtensa->core_config->core_type == XT_NX)
1088 windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1089 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
1090 xtensa_reg_set(target, a_idx, value);
1091 xtensa_reg_set(target, ar_idx, value);
1092 }
1093
1094 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
1095 uint32_t xtensa_cause_get(struct target *target)
1096 {
1097 struct xtensa *xtensa = target_to_xtensa(target);
1098 if (xtensa->core_config->core_type == XT_LX) {
1099 /* LX cause in DEBUGCAUSE */
1100 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1101 }
1102 if (xtensa->nx_stop_cause & DEBUGCAUSE_VALID)
1103 return xtensa->nx_stop_cause;
1104
1105 /* NX cause determined from DSR.StopCause */
1106 if (xtensa_dm_core_status_read(&xtensa->dbg_mod) != ERROR_OK) {
1107 LOG_TARGET_ERROR(target, "Read DSR error");
1108 } else {
1109 uint32_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
1110 /* NX causes are prioritized; only 1 bit can be set */
1111 switch ((dsr & OCDDSR_STOPCAUSE) >> OCDDSR_STOPCAUSE_SHIFT) {
1112 case OCDDSR_STOPCAUSE_DI:
1113 xtensa->nx_stop_cause = DEBUGCAUSE_DI;
1114 break;
1115 case OCDDSR_STOPCAUSE_SS:
1116 xtensa->nx_stop_cause = DEBUGCAUSE_IC;
1117 break;
1118 case OCDDSR_STOPCAUSE_IB:
1119 xtensa->nx_stop_cause = DEBUGCAUSE_IB;
1120 break;
1121 case OCDDSR_STOPCAUSE_B:
1122 case OCDDSR_STOPCAUSE_B1:
1123 xtensa->nx_stop_cause = DEBUGCAUSE_BI;
1124 break;
1125 case OCDDSR_STOPCAUSE_BN:
1126 xtensa->nx_stop_cause = DEBUGCAUSE_BN;
1127 break;
1128 case OCDDSR_STOPCAUSE_DB0:
1129 case OCDDSR_STOPCAUSE_DB1:
1130 xtensa->nx_stop_cause = DEBUGCAUSE_DB;
1131 break;
1132 default:
1133 LOG_TARGET_ERROR(target, "Unknown stop cause (DSR: 0x%08x)", dsr);
1134 break;
1135 }
1136 if (xtensa->nx_stop_cause)
1137 xtensa->nx_stop_cause |= DEBUGCAUSE_VALID;
1138 }
1139 return xtensa->nx_stop_cause;
1140 }
1141
1142 void xtensa_cause_clear(struct target *target)
1143 {
1144 struct xtensa *xtensa = target_to_xtensa(target);
1145 if (xtensa->core_config->core_type == XT_LX) {
1146 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
1147 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1148 } else {
1149 /* NX DSR.STOPCAUSE is not writeable; clear cached copy but leave it valid */
1150 xtensa->nx_stop_cause = DEBUGCAUSE_VALID;
1151 }
1152 }
1153
1154 void xtensa_cause_reset(struct target *target)
1155 {
1156 /* Clear DEBUGCAUSE_VALID to trigger re-read (on NX) */
1157 struct xtensa *xtensa = target_to_xtensa(target);
1158 xtensa->nx_stop_cause = 0;
1159 }
1160
1161 int xtensa_assert_reset(struct target *target)
1162 {
1163 struct xtensa *xtensa = target_to_xtensa(target);
1164
1165 LOG_TARGET_DEBUG(target, " begin");
1166 xtensa_queue_pwr_reg_write(xtensa,
1167 XDMREG_PWRCTL,
1168 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
1169 PWRCTL_COREWAKEUP(xtensa) | PWRCTL_CORERESET(xtensa));
1170 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1171 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1172 if (res != ERROR_OK)
1173 return res;
1174
1175 /* registers are now invalid */
1176 xtensa->reset_asserted = true;
1177 register_cache_invalidate(xtensa->core_cache);
1178 target->state = TARGET_RESET;
1179 return ERROR_OK;
1180 }
1181
1182 int xtensa_deassert_reset(struct target *target)
1183 {
1184 struct xtensa *xtensa = target_to_xtensa(target);
1185
1186 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
1187 if (target->reset_halt)
1188 xtensa_queue_dbg_reg_write(xtensa,
1189 XDMREG_DCRSET,
1190 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1191 xtensa_queue_pwr_reg_write(xtensa,
1192 XDMREG_PWRCTL,
1193 PWRCTL_JTAGDEBUGUSE(xtensa) | PWRCTL_DEBUGWAKEUP(xtensa) | PWRCTL_MEMWAKEUP(xtensa) |
1194 PWRCTL_COREWAKEUP(xtensa));
1195 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1196 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1197 if (res != ERROR_OK)
1198 return res;
1199 target->state = TARGET_RUNNING;
1200 xtensa->reset_asserted = false;
1201 return res;
1202 }
1203
1204 int xtensa_soft_reset_halt(struct target *target)
1205 {
1206 LOG_TARGET_DEBUG(target, "begin");
1207 return xtensa_assert_reset(target);
1208 }
1209
1210 int xtensa_fetch_all_regs(struct target *target)
1211 {
1212 struct xtensa *xtensa = target_to_xtensa(target);
1213 struct reg *reg_list = xtensa->core_cache->reg_list;
1214 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1215 xtensa_reg_val_t cpenable = 0, windowbase = 0, a0 = 0, a3;
1216 unsigned int ms_idx = reg_list_size;
1217 uint32_t ms = 0;
1218 uint32_t woe;
1219 uint8_t a0_buf[4], a3_buf[4], ms_buf[4];
1220 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1221
1222 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1223 if (!regvals) {
1224 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1225 return ERROR_FAIL;
1226 }
1227 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1228 if (!dsrs) {
1229 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1230 free(regvals);
1231 return ERROR_FAIL;
1232 }
1233
1234 LOG_TARGET_DEBUG(target, "start");
1235
1236 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1237 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1238 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a3_buf);
1239 if (xtensa->core_config->core_type == XT_NX) {
1240 /* Save (windowed) A0 as well--it will be required for reading PC */
1241 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A0));
1242 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, a0_buf);
1243
1244 /* Set MS.DispSt, clear MS.DE prior to accessing ARs. This ensures ARs remain
1245 * in correct order even for reversed register groups (overflow/underflow).
1246 */
1247 ms_idx = xtensa->nx_reg_idx[XT_NX_REG_IDX_MS];
1248 uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
1249 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, ms_regno, XT_REG_A3));
1250 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1251 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, ms_buf);
1252 LOG_TARGET_DEBUG(target, "Overriding MS (0x%x): 0x%x", ms_regno, XT_MS_DISPST_DBG);
1253 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, XT_MS_DISPST_DBG);
1254 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1255 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, ms_regno, XT_REG_A3));
1256 }
1257
1258 int res = xtensa_window_state_save(target, &woe);
1259 if (res != ERROR_OK)
1260 goto xtensa_fetch_all_regs_done;
1261
1262 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1263 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1264 * in one go, then sort everything out from the regvals variable. */
1265
1266 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1267 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1268 /*Grab the 16 registers we can see */
1269 for (unsigned int i = 0; i < 16; i++) {
1270 if (i + j < xtensa->core_config->aregs_num) {
1271 xtensa_queue_exec_ins(xtensa,
1272 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1273 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
1274 regvals[XT_REG_IDX_AR0 + i + j].buf);
1275 if (debug_dsrs)
1276 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR,
1277 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1278 }
1279 }
1280 if (xtensa->core_config->windowed) {
1281 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1282 * will wraparound, leaving us in the state we were.
1283 * Each ROTW rotates 4 registers on LX and 8 on NX */
1284 int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
1285 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, rotw_arg));
1286 }
1287 }
1288 xtensa_window_state_restore(target, woe);
1289
1290 if (xtensa->core_config->coproc) {
1291 /* As the very first thing after AREGS, go grab CPENABLE */
1292 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1293 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1294 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1295 }
1296 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1297 if (res != ERROR_OK) {
1298 LOG_ERROR("Failed to read ARs (%d)!", res);
1299 goto xtensa_fetch_all_regs_done;
1300 }
1301 xtensa_core_status_check(target);
1302
1303 a3 = buf_get_u32(a3_buf, 0, 32);
1304 if (xtensa->core_config->core_type == XT_NX) {
1305 a0 = buf_get_u32(a0_buf, 0, 32);
1306 ms = buf_get_u32(ms_buf, 0, 32);
1307 }
1308
1309 if (xtensa->core_config->coproc) {
1310 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1311
1312 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1313 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
1314 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1315 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1316
1317 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1318 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1319 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1320 }
1321 /* We're now free to use any of A0-A15 as scratch registers
1322 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1323 for (unsigned int i = 0; i < reg_list_size; i++) {
1324 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1325 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1326 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1327 bool reg_fetched = true;
1328 unsigned int reg_num = rlist[ridx].reg_num;
1329 switch (rlist[ridx].type) {
1330 case XT_REG_USER:
1331 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1332 break;
1333 case XT_REG_FR:
1334 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1335 break;
1336 case XT_REG_SPECIAL:
1337 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1338 if (xtensa->core_config->core_type == XT_LX) {
1339 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1340 reg_num = XT_EPC_REG_NUM_BASE + xtensa->core_config->debug.irq_level;
1341 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1342 } else {
1343 /* NX PC read through CALL0(0) and reading A0 */
1344 xtensa_queue_exec_ins(xtensa, XT_INS_CALL0(xtensa, 0));
1345 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A0));
1346 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1347 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1348 reg_fetched = false;
1349 }
1350 } else if ((xtensa->core_config->core_type == XT_LX)
1351 && (reg_num == xtensa_regs[XT_REG_IDX_PS].reg_num)) {
1352 /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1353 reg_num = XT_EPS_REG_NUM_BASE + xtensa->core_config->debug.irq_level;
1354 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1355 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1356 /* CPENABLE already read/updated; don't re-read */
1357 reg_fetched = false;
1358 break;
1359 } else {
1360 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1361 }
1362 break;
1363 default:
1364 reg_fetched = false;
1365 }
1366 if (reg_fetched) {
1367 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1368 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1369 if (debug_dsrs)
1370 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DSR, dsrs[i].buf);
1371 }
1372 }
1373 }
1374 /* Ok, send the whole mess to the CPU. */
1375 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1376 if (res != ERROR_OK) {
1377 LOG_ERROR("Failed to fetch AR regs!");
1378 goto xtensa_fetch_all_regs_done;
1379 }
1380 xtensa_core_status_check(target);
1381
1382 if (debug_dsrs) {
1383 /* DSR checking: follows order in which registers are requested. */
1384 for (unsigned int i = 0; i < reg_list_size; i++) {
1385 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1386 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1387 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1388 (rlist[ridx].type != XT_REG_DEBUG) &&
1389 (rlist[ridx].type != XT_REG_RELGEN) &&
1390 (rlist[ridx].type != XT_REG_TIE) &&
1391 (rlist[ridx].type != XT_REG_OTHER)) {
1392 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1393 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1394 res = ERROR_FAIL;
1395 goto xtensa_fetch_all_regs_done;
1396 }
1397 }
1398 }
1399 }
1400
1401 if (xtensa->core_config->windowed) {
1402 /* We need the windowbase to decode the general addresses. */
1403 uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1404 XT_REG_IDX_WINDOWBASE : xtensa->nx_reg_idx[XT_NX_REG_IDX_WB];
1405 windowbase = buf_get_u32(regvals[wb_idx].buf, 0, 32);
1406 if (xtensa->core_config->core_type == XT_NX)
1407 windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1408 }
1409
1410 /* Decode the result and update the cache. */
1411 for (unsigned int i = 0; i < reg_list_size; i++) {
1412 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1413 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1414 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1415 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1416 /* The 64-value general register set is read from (windowbase) on down.
1417 * We need to get the real register address by subtracting windowbase and
1418 * wrapping around. */
1419 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1420 windowbase);
1421 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1422 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1423 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1424 if (xtensa_extra_debug_log) {
1425 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1426 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1427 }
1428 } else {
1429 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1430 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1431 if (xtensa_extra_debug_log)
1432 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1433 if (rlist[ridx].reg_num == XT_PC_REG_NUM_VIRTUAL &&
1434 xtensa->core_config->core_type == XT_NX) {
1435 /* A0 from prior CALL0 points to next instruction; decrement it */
1436 regval -= 3;
1437 is_dirty = 1;
1438 } else if (i == ms_idx) {
1439 LOG_TARGET_DEBUG(target, "Caching MS: 0x%x", ms);
1440 regval = ms;
1441 is_dirty = 1;
1442 }
1443 xtensa_reg_set(target, i, regval);
1444 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1445 }
1446 reg_list[i].valid = true;
1447 } else {
1448 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1449 /* Report read-only registers all-zero but valid */
1450 reg_list[i].valid = true;
1451 xtensa_reg_set(target, i, 0);
1452 } else {
1453 reg_list[i].valid = false;
1454 }
1455 }
1456 }
1457
1458 if (xtensa->core_config->windowed) {
1459 /* We have used A3 as a scratch register.
1460 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1461 */
1462 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1463 xtensa_reg_set(target, ar3_idx, a3);
1464 xtensa_mark_register_dirty(xtensa, ar3_idx);
1465
1466 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1467 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1468 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1469 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1470 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1471 xtensa->scratch_ars[s].intval = false;
1472 }
1473
1474 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1475 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1476 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1477 if (xtensa->core_config->core_type == XT_NX) {
1478 xtensa_reg_set(target, XT_REG_IDX_A0, a0);
1479 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A0);
1480 }
1481
1482 xtensa->regs_fetched = true;
1483 xtensa_fetch_all_regs_done:
1484 free(regvals);
1485 free(dsrs);
1486 return res;
1487 }
1488
1489 int xtensa_get_gdb_reg_list(struct target *target,
1490 struct reg **reg_list[],
1491 int *reg_list_size,
1492 enum target_register_class reg_class)
1493 {
1494 struct xtensa *xtensa = target_to_xtensa(target);
1495 unsigned int num_regs;
1496
1497 if (reg_class == REG_CLASS_GENERAL) {
1498 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1499 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1500 return ERROR_FAIL;
1501 }
1502 num_regs = xtensa->genpkt_regs_num;
1503 } else {
1504 /* Determine whether to return a contiguous or sparse register map */
1505 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1506 }
1507
1508 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1509
1510 *reg_list = calloc(num_regs, sizeof(struct reg *));
1511 if (!*reg_list)
1512 return ERROR_FAIL;
1513
1514 *reg_list_size = num_regs;
1515 if (xtensa->regmap_contiguous) {
1516 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1517 for (unsigned int i = 0; i < num_regs; i++)
1518 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1519 return ERROR_OK;
1520 }
1521
1522 for (unsigned int i = 0; i < num_regs; i++)
1523 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1524 unsigned int k = 0;
1525 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1526 if (xtensa->core_cache->reg_list[i].exist) {
1527 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1528 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1529 int sparse_idx = rlist[ridx].dbreg_num;
1530 if (i == XT_REG_IDX_PS && xtensa->core_config->core_type == XT_LX) {
1531 if (xtensa->eps_dbglevel_idx == 0) {
1532 LOG_ERROR("eps_dbglevel_idx not set\n");
1533 return ERROR_FAIL;
1534 }
1535 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1536 if (xtensa_extra_debug_log)
1537 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1538 sparse_idx, xtensa->core_config->debug.irq_level,
1539 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1540 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1541 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1542 } else {
1543 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1544 }
1545 if (i == XT_REG_IDX_PC)
1546 /* Make a duplicate copy of PC for external access */
1547 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1548 k++;
1549 }
1550 }
1551
1552 if (k == num_regs)
1553 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1554
1555 return ERROR_OK;
1556 }
1557
1558 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1559 {
1560 struct xtensa *xtensa = target_to_xtensa(target);
1561 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1562 xtensa->core_config->mmu.dtlb_entries_count > 0;
1563 return ERROR_OK;
1564 }
1565
1566 int xtensa_halt(struct target *target)
1567 {
1568 struct xtensa *xtensa = target_to_xtensa(target);
1569
1570 LOG_TARGET_DEBUG(target, "start");
1571 if (target->state == TARGET_HALTED) {
1572 LOG_TARGET_DEBUG(target, "target was already halted");
1573 return ERROR_OK;
1574 }
1575 /* First we have to read dsr and check if the target stopped */
1576 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1577 if (res != ERROR_OK) {
1578 LOG_TARGET_ERROR(target, "Failed to read core status!");
1579 return res;
1580 }
1581 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1582 if (!xtensa_is_stopped(target)) {
1583 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1584 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1585 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1586 if (res != ERROR_OK)
1587 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1588 }
1589
1590 return res;
1591 }
1592
1593 int xtensa_prepare_resume(struct target *target,
1594 int current,
1595 target_addr_t address,
1596 int handle_breakpoints,
1597 int debug_execution)
1598 {
1599 struct xtensa *xtensa = target_to_xtensa(target);
1600 uint32_t bpena = 0;
1601
1602 LOG_TARGET_DEBUG(target,
1603 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1604 current,
1605 address,
1606 handle_breakpoints,
1607 debug_execution);
1608
1609 if (target->state != TARGET_HALTED) {
1610 LOG_TARGET_ERROR(target, "not halted");
1611 return ERROR_TARGET_NOT_HALTED;
1612 }
1613 xtensa->halt_request = false;
1614
1615 if (address && !current) {
1616 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1617 } else {
1618 uint32_t cause = xtensa_cause_get(target);
1619 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1620 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1621 if (cause & DEBUGCAUSE_DB)
1622 /* We stopped due to a watchpoint. We can't just resume executing the
1623 * instruction again because */
1624 /* that would trigger the watchpoint again. To fix this, we single-step,
1625 * which ignores watchpoints. */
1626 xtensa_do_step(target, current, address, handle_breakpoints);
1627 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1628 /* We stopped due to a break instruction. We can't just resume executing the
1629 * instruction again because */
1630 /* that would trigger the break again. To fix this, we single-step, which
1631 * ignores break. */
1632 xtensa_do_step(target, current, address, handle_breakpoints);
1633 }
1634
1635 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1636 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1637 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1638 if (xtensa->hw_brps[slot]) {
1639 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1640 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1641 if (xtensa->core_config->core_type == XT_NX)
1642 xtensa_reg_set(target, xtensa->nx_reg_idx[XT_NX_REG_IDX_IBREAKC0] + slot, XT_IBREAKC_FB);
1643 bpena |= BIT(slot);
1644 }
1645 }
1646 if (xtensa->core_config->core_type == XT_LX)
1647 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1648
1649 /* Here we write all registers to the targets */
1650 int res = xtensa_write_dirty_registers(target);
1651 if (res != ERROR_OK)
1652 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1653 return res;
1654 }
1655
1656 int xtensa_do_resume(struct target *target)
1657 {
1658 struct xtensa *xtensa = target_to_xtensa(target);
1659
1660 LOG_TARGET_DEBUG(target, "start");
1661
1662 xtensa_cause_reset(target);
1663 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1664 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1665 if (res != ERROR_OK) {
1666 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1667 return res;
1668 }
1669 xtensa_core_status_check(target);
1670 return ERROR_OK;
1671 }
1672
1673 int xtensa_resume(struct target *target,
1674 int current,
1675 target_addr_t address,
1676 int handle_breakpoints,
1677 int debug_execution)
1678 {
1679 LOG_TARGET_DEBUG(target, "start");
1680 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1681 if (res != ERROR_OK) {
1682 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1683 return res;
1684 }
1685 res = xtensa_do_resume(target);
1686 if (res != ERROR_OK) {
1687 LOG_TARGET_ERROR(target, "Failed to resume!");
1688 return res;
1689 }
1690
1691 target->debug_reason = DBG_REASON_NOTHALTED;
1692 if (!debug_execution)
1693 target->state = TARGET_RUNNING;
1694 else
1695 target->state = TARGET_DEBUG_RUNNING;
1696
1697 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1698
1699 return ERROR_OK;
1700 }
1701
1702 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1703 {
1704 struct xtensa *xtensa = target_to_xtensa(target);
1705 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1706 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1707 if (err != ERROR_OK)
1708 return false;
1709
1710 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1711 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1712 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1713 return true;
1714
1715 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1716 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1717 return true;
1718
1719 return false;
1720 }
1721
1722 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1723 {
1724 struct xtensa *xtensa = target_to_xtensa(target);
1725 int res;
1726 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1727 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1728 xtensa_reg_val_t icountlvl, cause;
1729 xtensa_reg_val_t oldps, oldpc, cur_pc;
1730 bool ps_lowered = false;
1731
1732 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1733 current, address, handle_breakpoints);
1734
1735 if (target->state != TARGET_HALTED) {
1736 LOG_TARGET_ERROR(target, "not halted");
1737 return ERROR_TARGET_NOT_HALTED;
1738 }
1739
1740 if (xtensa->eps_dbglevel_idx == 0 && xtensa->core_config->core_type == XT_LX) {
1741 LOG_TARGET_ERROR(target, "eps_dbglevel_idx not set\n");
1742 return ERROR_FAIL;
1743 }
1744
1745 /* Save old ps (EPS[dbglvl] on LX), pc */
1746 oldps = xtensa_reg_get(target, (xtensa->core_config->core_type == XT_LX) ?
1747 xtensa->eps_dbglevel_idx : XT_REG_IDX_PS);
1748 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1749
1750 cause = xtensa_cause_get(target);
1751 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1752 oldps,
1753 oldpc,
1754 cause,
1755 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1756 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1757 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1758 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1759 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1760 /* pretend that we have stepped */
1761 if (cause & DEBUGCAUSE_BI)
1762 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1763 else
1764 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1765 return ERROR_OK;
1766 }
1767
1768 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1769 * at which the instructions are to be counted while stepping.
1770 *
1771 * For example, if we need to step by 2 instructions, and an interrupt occurs
1772 * in between, the processor will trigger the interrupt and halt after the 2nd
1773 * instruction within the interrupt vector and/or handler.
1774 *
1775 * However, sometimes we don't want the interrupt handlers to be executed at all
1776 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1777 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1778 * code from being counted during stepping. Note that C exception handlers must
1779 * run at level 0 and hence will be counted and stepped into, should one occur.
1780 *
1781 * TODO: Certain instructions should never be single-stepped and should instead
1782 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1783 * RFI >= DBGLEVEL.
1784 */
1785 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1786 if (!xtensa->core_config->high_irq.enabled) {
1787 LOG_TARGET_WARNING(
1788 target,
1789 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1790 return ERROR_FAIL;
1791 }
1792 /* Update ICOUNTLEVEL accordingly */
1793 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1794 } else {
1795 icountlvl = xtensa->core_config->debug.irq_level;
1796 }
1797
1798 if (cause & DEBUGCAUSE_DB) {
1799 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1800 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1801 * re-enable the watchpoint. */
1802 LOG_TARGET_DEBUG(
1803 target,
1804 "Single-stepping to get past instruction that triggered the watchpoint...");
1805 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1806 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1807 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1808 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1809 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1810 }
1811 }
1812
1813 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1814 /* handle normal SW breakpoint */
1815 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1816 if (xtensa->core_config->core_type == XT_LX && ((oldps & 0xf) >= icountlvl)) {
1817 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1818 ps_lowered = true;
1819 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1820 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1821 LOG_TARGET_DEBUG(target,
1822 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1823 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1824 newps,
1825 oldps);
1826 }
1827 do {
1828 if (xtensa->core_config->core_type == XT_LX) {
1829 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1830 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1831 } else {
1832 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRSET, OCDDCR_STEPREQUEST);
1833 }
1834
1835 /* Now that ICOUNT (LX) or DCR.StepRequest (NX) is set,
1836 * we can resume as if we were going to run
1837 */
1838 res = xtensa_prepare_resume(target, current, address, 0, 0);
1839 if (res != ERROR_OK) {
1840 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1841 return res;
1842 }
1843 res = xtensa_do_resume(target);
1844 if (res != ERROR_OK) {
1845 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1846 return res;
1847 }
1848
1849 /* Wait for stepping to complete */
1850 long long start = timeval_ms();
1851 while (timeval_ms() < start + 500) {
1852 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1853 *until stepping is complete. */
1854 usleep(1000);
1855 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1856 if (res != ERROR_OK) {
1857 LOG_TARGET_ERROR(target, "Failed to read core status!");
1858 return res;
1859 }
1860 if (xtensa_is_stopped(target))
1861 break;
1862 usleep(1000);
1863 }
1864 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1865 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1866 if (!xtensa_is_stopped(target)) {
1867 LOG_TARGET_WARNING(
1868 target,
1869 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1870 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1871 target->debug_reason = DBG_REASON_NOTHALTED;
1872 target->state = TARGET_RUNNING;
1873 return ERROR_FAIL;
1874 }
1875
1876 xtensa_fetch_all_regs(target);
1877 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1878
1879 LOG_TARGET_DEBUG(target,
1880 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1881 xtensa_reg_get(target, XT_REG_IDX_PS),
1882 cur_pc,
1883 xtensa_cause_get(target),
1884 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1885
1886 /* Do not step into WindowOverflow if ISRs are masked.
1887 If we stop in WindowOverflow at breakpoint with masked ISRs and
1888 try to do a step it will get us out of that handler */
1889 if (xtensa->core_config->windowed &&
1890 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1891 xtensa_pc_in_winexc(target, cur_pc)) {
1892 /* isrmask = on, need to step out of the window exception handler */
1893 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1894 oldpc = cur_pc;
1895 address = oldpc + 3;
1896 continue;
1897 }
1898
1899 if (oldpc == cur_pc)
1900 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1901 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1902 else
1903 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1904 break;
1905 } while (true);
1906
1907 target->debug_reason = DBG_REASON_SINGLESTEP;
1908 target->state = TARGET_HALTED;
1909 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1910
1911 if (cause & DEBUGCAUSE_DB) {
1912 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1913 /* Restore the DBREAKCx registers */
1914 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1915 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1916 }
1917
1918 /* Restore int level */
1919 if (ps_lowered) {
1920 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1921 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1922 oldps);
1923 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1924 }
1925
1926 /* write ICOUNTLEVEL back to zero */
1927 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1928 /* TODO: can we skip writing dirty registers and re-fetching them? */
1929 res = xtensa_write_dirty_registers(target);
1930 xtensa_fetch_all_regs(target);
1931 return res;
1932 }
1933
1934 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1935 {
1936 int retval = xtensa_do_step(target, current, address, handle_breakpoints);
1937 if (retval != ERROR_OK)
1938 return retval;
1939 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1940
1941 return ERROR_OK;
1942 }
1943
1944 /**
1945 * Returns true if two ranges are overlapping
1946 */
1947 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1948 target_addr_t r1_end,
1949 target_addr_t r2_start,
1950 target_addr_t r2_end)
1951 {
1952 if ((r2_start >= r1_start) && (r2_start < r1_end))
1953 return true; /* r2_start is in r1 region */
1954 if ((r2_end > r1_start) && (r2_end <= r1_end))
1955 return true; /* r2_end is in r1 region */
1956 return false;
1957 }
1958
1959 /**
1960 * Returns a size of overlapped region of two ranges.
1961 */
1962 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1963 target_addr_t r1_end,
1964 target_addr_t r2_start,
1965 target_addr_t r2_end)
1966 {
1967 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1968 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1969 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1970 return ov_end - ov_start;
1971 }
1972 return 0;
1973 }
1974
1975 /**
1976 * Check if the address gets to memory regions, and its access mode
1977 */
1978 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1979 {
1980 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1981 target_addr_t adr_end = address + size; /* region end */
1982 target_addr_t overlap_size;
1983 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1984
1985 while (adr_pos < adr_end) {
1986 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1987 if (!cm) /* address is not belong to anything */
1988 return false;
1989 if ((cm->access & access) != access) /* access check */
1990 return false;
1991 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1992 assert(overlap_size != 0);
1993 adr_pos += overlap_size;
1994 }
1995 return true;
1996 }
1997
1998 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1999 {
2000 struct xtensa *xtensa = target_to_xtensa(target);
2001 /* We are going to read memory in 32-bit increments. This may not be what the calling
2002 * function expects, so we may need to allocate a temp buffer and read into that first. */
2003 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
2004 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
2005 target_addr_t adr = addrstart_al;
2006 uint8_t *albuff;
2007 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
2008
2009 if (target->state != TARGET_HALTED) {
2010 LOG_TARGET_ERROR(target, "not halted");
2011 return ERROR_TARGET_NOT_HALTED;
2012 }
2013
2014 if (!xtensa->permissive_mode) {
2015 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
2016 XT_MEM_ACCESS_READ)) {
2017 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
2018 return ERROR_FAIL;
2019 }
2020 }
2021
2022 unsigned int alloc_bytes = ALIGN_UP(addrend_al - addrstart_al, sizeof(uint32_t));
2023 albuff = calloc(alloc_bytes, 1);
2024 if (!albuff) {
2025 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
2026 addrend_al - addrstart_al);
2027 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2028 }
2029
2030 /* We're going to use A3 here */
2031 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2032 /* Write start address to A3 */
2033 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
2034 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2035 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
2036 if (xtensa->probe_lsddr32p != 0) {
2037 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
2038 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
2039 xtensa_queue_dbg_reg_read(xtensa,
2040 (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
2041 &albuff[i]);
2042 } else {
2043 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
2044 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2045 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
2046 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
2047 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[i]);
2048 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
2049 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2050 }
2051 }
2052 int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2053 if (res == ERROR_OK) {
2054 bool prev_suppress = xtensa->suppress_dsr_errors;
2055 xtensa->suppress_dsr_errors = true;
2056 res = xtensa_core_status_check(target);
2057 if (xtensa->probe_lsddr32p == -1)
2058 xtensa->probe_lsddr32p = 1;
2059 xtensa->suppress_dsr_errors = prev_suppress;
2060 }
2061 if (res != ERROR_OK) {
2062 if (xtensa->probe_lsddr32p != 0) {
2063 /* Disable fast memory access instructions and retry before reporting an error */
2064 LOG_TARGET_DEBUG(target, "Disabling LDDR32.P/SDDR32.P");
2065 xtensa->probe_lsddr32p = 0;
2066 res = xtensa_read_memory(target, address, size, count, albuff);
2067 bswap = false;
2068 } else {
2069 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
2070 count * size, address);
2071 }
2072 }
2073
2074 if (bswap)
2075 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
2076 memcpy(buffer, albuff + (address & 3), (size * count));
2077 free(albuff);
2078 return res;
2079 }
2080
2081 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2082 {
2083 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
2084 return xtensa_read_memory(target, address, 1, count, buffer);
2085 }
2086
2087 int xtensa_write_memory(struct target *target,
2088 target_addr_t address,
2089 uint32_t size,
2090 uint32_t count,
2091 const uint8_t *buffer)
2092 {
2093 /* This memory write function can get thrown nigh everything into it, from
2094 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
2095 * accept anything but aligned uint32 writes, though. That is why we convert
2096 * everything into that. */
2097 struct xtensa *xtensa = target_to_xtensa(target);
2098 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
2099 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
2100 target_addr_t adr = addrstart_al;
2101 int res;
2102 uint8_t *albuff;
2103 bool fill_head_tail = false;
2104
2105 if (target->state != TARGET_HALTED) {
2106 LOG_TARGET_ERROR(target, "not halted");
2107 return ERROR_TARGET_NOT_HALTED;
2108 }
2109
2110 if (!xtensa->permissive_mode) {
2111 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
2112 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
2113 return ERROR_FAIL;
2114 }
2115 }
2116
2117 if (size == 0 || count == 0 || !buffer)
2118 return ERROR_COMMAND_SYNTAX_ERROR;
2119
2120 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
2121 if (addrstart_al == address && addrend_al == address + (size * count)) {
2122 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
2123 /* Need a buffer for byte-swapping */
2124 albuff = malloc(addrend_al - addrstart_al);
2125 else
2126 /* We discard the const here because albuff can also be non-const */
2127 albuff = (uint8_t *)buffer;
2128 } else {
2129 fill_head_tail = true;
2130 albuff = malloc(addrend_al - addrstart_al);
2131 }
2132 if (!albuff) {
2133 LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
2134 addrend_al - addrstart_al);
2135 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2136 }
2137
2138 /* We're going to use A3 here */
2139 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2140
2141 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
2142 if (fill_head_tail) {
2143 /* See if we need to read the first and/or last word. */
2144 if (address & 3) {
2145 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
2146 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2147 if (xtensa->probe_lsddr32p == 1) {
2148 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
2149 } else {
2150 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
2151 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
2152 }
2153 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, &albuff[0]);
2154 }
2155 if ((address + (size * count)) & 3) {
2156 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
2157 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2158 if (xtensa->probe_lsddr32p == 1) {
2159 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
2160 } else {
2161 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
2162 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
2163 }
2164 xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR,
2165 &albuff[addrend_al - addrstart_al - 4]);
2166 }
2167 /* Grab bytes */
2168 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2169 if (res != ERROR_OK) {
2170 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
2171 if (albuff != buffer)
2172 free(albuff);
2173 return res;
2174 }
2175 xtensa_core_status_check(target);
2176 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
2177 bool swapped_w0 = false;
2178 if (address & 3) {
2179 buf_bswap32(&albuff[0], &albuff[0], 4);
2180 swapped_w0 = true;
2181 }
2182 if ((address + (size * count)) & 3) {
2183 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
2184 /* Don't double-swap if buffer start/end are within the same word */
2185 } else {
2186 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
2187 &albuff[addrend_al - addrstart_al - 4], 4);
2188 }
2189 }
2190 }
2191 /* Copy data to be written into the aligned buffer (in host-endianness) */
2192 memcpy(&albuff[address & 3], buffer, size * count);
2193 /* Now we can write albuff in aligned uint32s. */
2194 }
2195
2196 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
2197 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
2198
2199 /* Write start address to A3 */
2200 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrstart_al);
2201 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2202 /* Write the aligned buffer */
2203 if (xtensa->probe_lsddr32p != 0) {
2204 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2205 if (i == 0) {
2206 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
2207 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
2208 } else {
2209 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
2210 }
2211 }
2212 } else {
2213 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
2214 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2215 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, buf_get_u32(&albuff[i], 0, 32));
2216 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2217 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
2218 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
2219 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2220 }
2221 }
2222
2223 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2224 if (res == ERROR_OK) {
2225 bool prev_suppress = xtensa->suppress_dsr_errors;
2226 xtensa->suppress_dsr_errors = true;
2227 res = xtensa_core_status_check(target);
2228 if (xtensa->probe_lsddr32p == -1)
2229 xtensa->probe_lsddr32p = 1;
2230 xtensa->suppress_dsr_errors = prev_suppress;
2231 }
2232 if (res != ERROR_OK) {
2233 if (xtensa->probe_lsddr32p != 0) {
2234 /* Disable fast memory access instructions and retry before reporting an error */
2235 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
2236 xtensa->probe_lsddr32p = 0;
2237 res = xtensa_write_memory(target, address, size, count, buffer);
2238 } else {
2239 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
2240 count * size, address);
2241 }
2242 } else {
2243 /* Invalidate ICACHE, writeback DCACHE if present */
2244 bool issue_ihi = xtensa_is_icacheable(xtensa, address) &&
2245 xtensa_region_ar_exec(target, addrstart_al, addrend_al);
2246 bool issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2247 LOG_TARGET_DEBUG(target, "Cache OPs: IHI %d, DHWBI %d", issue_ihi, issue_dhwbi);
2248 if (issue_ihi || issue_dhwbi) {
2249 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2250 uint32_t dlinesize = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2251 uint32_t linesize = MIN(ilinesize, dlinesize);
2252 uint32_t off = 0;
2253 adr = addrstart_al;
2254
2255 while ((adr + off) < addrend_al) {
2256 if (off == 0) {
2257 /* Write start address to A3 */
2258 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr);
2259 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2260 }
2261 if (issue_ihi)
2262 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
2263 if (issue_dhwbi)
2264 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
2265 off += linesize;
2266 if (off > 1020) {
2267 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
2268 adr += off;
2269 off = 0;
2270 }
2271 }
2272
2273 /* Execute cache WB/INV instructions */
2274 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2275 if (res != ERROR_OK)
2276 LOG_TARGET_ERROR(target,
2277 "Error queuing cache writeback/invaldate instruction(s): %d",
2278 res);
2279 res = xtensa_core_status_check(target);
2280 if (res != ERROR_OK)
2281 LOG_TARGET_ERROR(target,
2282 "Error issuing cache writeback/invaldate instruction(s): %d",
2283 res);
2284 }
2285 }
2286 if (albuff != buffer)
2287 free(albuff);
2288
2289 return res;
2290 }
2291
2292 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2293 {
2294 /* xtensa_write_memory can handle everything. Just pass on to that. */
2295 return xtensa_write_memory(target, address, 1, count, buffer);
2296 }
2297
2298 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2299 {
2300 LOG_WARNING("not implemented yet");
2301 return ERROR_FAIL;
2302 }
2303
2304 int xtensa_poll(struct target *target)
2305 {
2306 struct xtensa *xtensa = target_to_xtensa(target);
2307 if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2308 target->state = TARGET_UNKNOWN;
2309 return ERROR_TARGET_NOT_EXAMINED;
2310 }
2311
2312 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET(xtensa) |
2313 PWRSTAT_COREWASRESET(xtensa));
2314 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2315 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2316 xtensa->dbg_mod.power_status.stat,
2317 PWRSTAT_DEBUGWASRESET(xtensa) | PWRSTAT_COREWASRESET(xtensa),
2318 xtensa->dbg_mod.power_status.stath);
2319 if (res != ERROR_OK)
2320 return res;
2321
2322 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2323 LOG_TARGET_INFO(target, "Debug controller was reset.");
2324 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2325 if (res != ERROR_OK)
2326 return res;
2327 }
2328 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2329 LOG_TARGET_INFO(target, "Core was reset.");
2330 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2331 /* Enable JTAG, set reset if needed */
2332 res = xtensa_wakeup(target);
2333 if (res != ERROR_OK)
2334 return res;
2335
2336 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2337 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2338 if (res != ERROR_OK)
2339 return res;
2340 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2341 LOG_TARGET_DEBUG(target,
2342 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2343 prev_dsr,
2344 xtensa->dbg_mod.core_status.dsr);
2345 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET(xtensa)) {
2346 /* if RESET state is persitent */
2347 target->state = TARGET_RESET;
2348 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2349 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2350 xtensa->dbg_mod.core_status.dsr,
2351 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2352 target->state = TARGET_UNKNOWN;
2353 if (xtensa->come_online_probes_num == 0)
2354 target->examined = false;
2355 else
2356 xtensa->come_online_probes_num--;
2357 } else if (xtensa_is_stopped(target)) {
2358 if (target->state != TARGET_HALTED) {
2359 enum target_state oldstate = target->state;
2360 target->state = TARGET_HALTED;
2361 /* Examine why the target has been halted */
2362 target->debug_reason = DBG_REASON_DBGRQ;
2363 xtensa_fetch_all_regs(target);
2364 /* When setting debug reason DEBUGCAUSE events have the following
2365 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2366 /* Watchpoint and breakpoint events at the same time results in special
2367 * debug reason: DBG_REASON_WPTANDBKPT. */
2368 uint32_t halt_cause = xtensa_cause_get(target);
2369 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2370 if (halt_cause & DEBUGCAUSE_IC)
2371 target->debug_reason = DBG_REASON_SINGLESTEP;
2372 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2373 if (halt_cause & DEBUGCAUSE_DB)
2374 target->debug_reason = DBG_REASON_WPTANDBKPT;
2375 else
2376 target->debug_reason = DBG_REASON_BREAKPOINT;
2377 } else if (halt_cause & DEBUGCAUSE_DB) {
2378 target->debug_reason = DBG_REASON_WATCHPOINT;
2379 }
2380 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2381 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2382 xtensa_reg_get(target, XT_REG_IDX_PC),
2383 target->debug_reason,
2384 oldstate);
2385 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2386 halt_cause,
2387 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2388 xtensa->dbg_mod.core_status.dsr);
2389 xtensa_dm_core_status_clear(
2390 &xtensa->dbg_mod,
2391 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2392 OCDDSR_DEBUGINTTRAX |
2393 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2394 if (xtensa->core_config->core_type == XT_NX) {
2395 /* Enable imprecise exceptions while in halted state */
2396 xtensa_reg_val_t ps = xtensa_reg_get(target, XT_REG_IDX_PS);
2397 xtensa_reg_val_t newps = ps & ~(XT_PS_DIEXC_MSK);
2398 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_PS);
2399 LOG_TARGET_DEBUG(target, "Enabling PS.DIEXC: 0x%08x -> 0x%08x", ps, newps);
2400 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, newps);
2401 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2402 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
2403 res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2404 if (res != ERROR_OK) {
2405 LOG_TARGET_ERROR(target, "Failed to write PS.DIEXC (%d)!", res);
2406 return res;
2407 }
2408 xtensa_core_status_check(target);
2409 }
2410 }
2411 } else {
2412 target->debug_reason = DBG_REASON_NOTHALTED;
2413 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2414 target->state = TARGET_RUNNING;
2415 target->debug_reason = DBG_REASON_NOTHALTED;
2416 }
2417 }
2418 if (xtensa->trace_active) {
2419 /* Detect if tracing was active but has stopped. */
2420 struct xtensa_trace_status trace_status;
2421 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2422 if (res == ERROR_OK) {
2423 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2424 LOG_INFO("Detected end of trace.");
2425 if (trace_status.stat & TRAXSTAT_PCMTG)
2426 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2427 if (trace_status.stat & TRAXSTAT_PTITG)
2428 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2429 if (trace_status.stat & TRAXSTAT_CTITG)
2430 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2431 xtensa->trace_active = false;
2432 }
2433 }
2434 }
2435 return ERROR_OK;
2436 }
2437
2438 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2439 {
2440 struct xtensa *xtensa = target_to_xtensa(target);
2441 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address) &&
2442 xtensa_region_ar_exec(target, address, address + size);
2443 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2444 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2445 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2446 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2447 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2448 int ret;
2449
2450 if (size > icache_line_size)
2451 return ERROR_FAIL;
2452
2453 if (issue_ihi || issue_dhwbi) {
2454 /* We're going to use A3 here */
2455 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2456
2457 /* Write start address to A3 and invalidate */
2458 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2459 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2460 LOG_TARGET_DEBUG(target, "IHI %d, DHWBI %d for address " TARGET_ADDR_FMT,
2461 issue_ihi, issue_dhwbi, address);
2462 if (issue_dhwbi) {
2463 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2464 if (!same_dc_line) {
2465 LOG_TARGET_DEBUG(target,
2466 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2467 address + 4);
2468 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2469 }
2470 }
2471 if (issue_ihi) {
2472 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2473 if (!same_ic_line) {
2474 LOG_TARGET_DEBUG(target,
2475 "IHI second icache line for address "TARGET_ADDR_FMT,
2476 address + 4);
2477 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2478 }
2479 }
2480
2481 /* Execute invalidate instructions */
2482 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2483 xtensa_core_status_check(target);
2484 if (ret != ERROR_OK) {
2485 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2486 return ret;
2487 }
2488 }
2489
2490 /* Write new instructions to memory */
2491 ret = target_write_buffer(target, address, size, buffer);
2492 if (ret != ERROR_OK) {
2493 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2494 return ret;
2495 }
2496
2497 if (issue_dhwbi) {
2498 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2499 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
2500 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2501 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2502 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2503 if (!same_dc_line) {
2504 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2505 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2506 }
2507
2508 /* Execute invalidate instructions */
2509 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2510 xtensa_core_status_check(target);
2511 }
2512
2513 /* TODO: Handle L2 cache if present */
2514 return ret;
2515 }
2516
2517 static int xtensa_sw_breakpoint_add(struct target *target,
2518 struct breakpoint *breakpoint,
2519 struct xtensa_sw_breakpoint *sw_bp)
2520 {
2521 struct xtensa *xtensa = target_to_xtensa(target);
2522 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2523 if (ret != ERROR_OK) {
2524 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2525 return ret;
2526 }
2527
2528 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2529 sw_bp->oocd_bp = breakpoint;
2530
2531 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2532
2533 /* Underlying memory write will convert instruction endianness, don't do that here */
2534 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2535 if (ret != ERROR_OK) {
2536 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2537 return ret;
2538 }
2539
2540 return ERROR_OK;
2541 }
2542
2543 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2544 {
2545 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2546 if (ret != ERROR_OK) {
2547 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2548 return ret;
2549 }
2550 sw_bp->oocd_bp = NULL;
2551 return ERROR_OK;
2552 }
2553
2554 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2555 {
2556 struct xtensa *xtensa = target_to_xtensa(target);
2557 unsigned int slot;
2558
2559 if (breakpoint->type == BKPT_SOFT) {
2560 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2561 if (!xtensa->sw_brps[slot].oocd_bp ||
2562 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2563 break;
2564 }
2565 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2566 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2567 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2568 }
2569 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2570 if (ret != ERROR_OK) {
2571 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2572 return ret;
2573 }
2574 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2575 slot,
2576 breakpoint->address);
2577 return ERROR_OK;
2578 }
2579
2580 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2581 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2582 break;
2583 }
2584 if (slot == xtensa->core_config->debug.ibreaks_num) {
2585 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2586 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2587 }
2588
2589 xtensa->hw_brps[slot] = breakpoint;
2590 /* We will actually write the breakpoints when we resume the target. */
2591 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2592 slot,
2593 breakpoint->address);
2594
2595 return ERROR_OK;
2596 }
2597
2598 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2599 {
2600 struct xtensa *xtensa = target_to_xtensa(target);
2601 unsigned int slot;
2602
2603 if (breakpoint->type == BKPT_SOFT) {
2604 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2605 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2606 break;
2607 }
2608 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2609 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2610 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2611 }
2612 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2613 if (ret != ERROR_OK) {
2614 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2615 return ret;
2616 }
2617 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2618 return ERROR_OK;
2619 }
2620
2621 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2622 if (xtensa->hw_brps[slot] == breakpoint)
2623 break;
2624 }
2625 if (slot == xtensa->core_config->debug.ibreaks_num) {
2626 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2627 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2628 }
2629 xtensa->hw_brps[slot] = NULL;
2630 if (xtensa->core_config->core_type == XT_NX)
2631 xtensa_reg_set(target, xtensa->nx_reg_idx[XT_NX_REG_IDX_IBREAKC0] + slot, 0);
2632 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2633 return ERROR_OK;
2634 }
2635
2636 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2637 {
2638 struct xtensa *xtensa = target_to_xtensa(target);
2639 unsigned int slot;
2640 xtensa_reg_val_t dbreakcval;
2641
2642 if (target->state != TARGET_HALTED) {
2643 LOG_TARGET_ERROR(target, "not halted");
2644 return ERROR_TARGET_NOT_HALTED;
2645 }
2646
2647 if (watchpoint->mask != WATCHPOINT_IGNORE_DATA_VALUE_MASK) {
2648 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2649 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2650 }
2651
2652 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2653 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2654 break;
2655 }
2656 if (slot == xtensa->core_config->debug.dbreaks_num) {
2657 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2658 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2659 }
2660
2661 /* Figure out value for dbreakc5..0
2662 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2663 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2664 !IS_PWR_OF_2(watchpoint->length) ||
2665 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2666 LOG_TARGET_WARNING(
2667 target,
2668 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2669 " not supported by hardware.",
2670 watchpoint->length,
2671 watchpoint->address);
2672 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2673 }
2674 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2675
2676 if (watchpoint->rw == WPT_READ)
2677 dbreakcval |= BIT(30);
2678 if (watchpoint->rw == WPT_WRITE)
2679 dbreakcval |= BIT(31);
2680 if (watchpoint->rw == WPT_ACCESS)
2681 dbreakcval |= BIT(30) | BIT(31);
2682
2683 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2684 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2685 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2686 xtensa->hw_wps[slot] = watchpoint;
2687 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2688 watchpoint->address);
2689 return ERROR_OK;
2690 }
2691
2692 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2693 {
2694 struct xtensa *xtensa = target_to_xtensa(target);
2695 unsigned int slot;
2696
2697 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2698 if (xtensa->hw_wps[slot] == watchpoint)
2699 break;
2700 }
2701 if (slot == xtensa->core_config->debug.dbreaks_num) {
2702 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2703 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2704 }
2705 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2706 xtensa->hw_wps[slot] = NULL;
2707 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2708 watchpoint->address);
2709 return ERROR_OK;
2710 }
2711
2712 int xtensa_start_algorithm(struct target *target,
2713 int num_mem_params, struct mem_param *mem_params,
2714 int num_reg_params, struct reg_param *reg_params,
2715 target_addr_t entry_point, target_addr_t exit_point,
2716 void *arch_info)
2717 {
2718 struct xtensa *xtensa = target_to_xtensa(target);
2719 struct xtensa_algorithm *algorithm_info = arch_info;
2720 int retval = ERROR_OK;
2721 bool usr_ps = false;
2722 uint32_t newps;
2723
2724 /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
2725 * at the exit point */
2726
2727 if (target->state != TARGET_HALTED) {
2728 LOG_WARNING("Target not halted!");
2729 return ERROR_TARGET_NOT_HALTED;
2730 }
2731
2732 for (unsigned int i = 0; i < xtensa->core_cache->num_regs; i++) {
2733 struct reg *reg = &xtensa->core_cache->reg_list[i];
2734 buf_cpy(reg->value, xtensa->algo_context_backup[i], reg->size);
2735 }
2736 /* save debug reason, it will be changed */
2737 if (!algorithm_info) {
2738 LOG_ERROR("BUG: arch_info not specified");
2739 return ERROR_FAIL;
2740 }
2741 algorithm_info->ctx_debug_reason = target->debug_reason;
2742 if (xtensa->core_config->core_type == XT_LX) {
2743 /* save PS and set to debug_level - 1 */
2744 algorithm_info->ctx_ps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
2745 newps = (algorithm_info->ctx_ps & ~0xf) | (xtensa->core_config->debug.irq_level - 1);
2746 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
2747 }
2748 /* write mem params */
2749 for (int i = 0; i < num_mem_params; i++) {
2750 if (mem_params[i].direction != PARAM_IN) {
2751 retval = target_write_buffer(target, mem_params[i].address,
2752 mem_params[i].size,
2753 mem_params[i].value);
2754 if (retval != ERROR_OK)
2755 return retval;
2756 }
2757 }
2758 /* write reg params */
2759 for (int i = 0; i < num_reg_params; i++) {
2760 if (reg_params[i].size > 32) {
2761 LOG_ERROR("BUG: not supported register size (%d)", reg_params[i].size);
2762 return ERROR_FAIL;
2763 }
2764 struct reg *reg = register_get_by_name(xtensa->core_cache, reg_params[i].reg_name, 0);
2765 if (!reg) {
2766 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
2767 return ERROR_FAIL;
2768 }
2769 if (reg->size != reg_params[i].size) {
2770 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
2771 return ERROR_FAIL;
2772 }
2773 if (memcmp(reg_params[i].reg_name, "ps", 3)) {
2774 usr_ps = true;
2775 } else if (xtensa->core_config->core_type == XT_LX) {
2776 unsigned int reg_id = xtensa->eps_dbglevel_idx;
2777 assert(reg_id < xtensa->core_cache->num_regs && "Attempt to access non-existing reg!");
2778 reg = &xtensa->core_cache->reg_list[reg_id];
2779 }
2780 xtensa_reg_set_value(reg, buf_get_u32(reg_params[i].value, 0, reg->size));
2781 reg->valid = 1;
2782 }
2783 /* ignore custom core mode if custom PS value is specified */
2784 if (!usr_ps && xtensa->core_config->core_type == XT_LX) {
2785 unsigned int eps_reg_idx = xtensa->eps_dbglevel_idx;
2786 xtensa_reg_val_t ps = xtensa_reg_get(target, eps_reg_idx);
2787 enum xtensa_mode core_mode = XT_PS_RING_GET(ps);
2788 if (algorithm_info->core_mode != XT_MODE_ANY && algorithm_info->core_mode != core_mode) {
2789 LOG_DEBUG("setting core_mode: 0x%x", algorithm_info->core_mode);
2790 xtensa_reg_val_t new_ps = (ps & ~XT_PS_RING_MSK) | XT_PS_RING(algorithm_info->core_mode);
2791 /* save previous core mode */
2792 /* TODO: core_mode is not restored for now. Can be added to the end of wait_algorithm */
2793 algorithm_info->core_mode = core_mode;
2794 xtensa_reg_set(target, eps_reg_idx, new_ps);
2795 xtensa->core_cache->reg_list[eps_reg_idx].valid = 1;
2796 }
2797 }
2798
2799 return xtensa_resume(target, 0, entry_point, 1, 1);
2800 }
2801
2802 /** Waits for an algorithm in the target. */
2803 int xtensa_wait_algorithm(struct target *target,
2804 int num_mem_params, struct mem_param *mem_params,
2805 int num_reg_params, struct reg_param *reg_params,
2806 target_addr_t exit_point, unsigned int timeout_ms,
2807 void *arch_info)
2808 {
2809 struct xtensa *xtensa = target_to_xtensa(target);
2810 struct xtensa_algorithm *algorithm_info = arch_info;
2811 int retval = ERROR_OK;
2812 xtensa_reg_val_t pc;
2813
2814 /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
2815 * at the exit point */
2816
2817 retval = target_wait_state(target, TARGET_HALTED, timeout_ms);
2818 /* If the target fails to halt due to the breakpoint, force a halt */
2819 if (retval != ERROR_OK || target->state != TARGET_HALTED) {
2820 retval = target_halt(target);
2821 if (retval != ERROR_OK)
2822 return retval;
2823 retval = target_wait_state(target, TARGET_HALTED, 500);
2824 if (retval != ERROR_OK)
2825 return retval;
2826 LOG_TARGET_ERROR(target, "not halted %d, pc 0x%" PRIx32 ", ps 0x%" PRIx32, retval,
2827 xtensa_reg_get(target, XT_REG_IDX_PC),
2828 xtensa_reg_get(target, (xtensa->core_config->core_type == XT_LX) ?
2829 xtensa->eps_dbglevel_idx : XT_REG_IDX_PS));
2830 return ERROR_TARGET_TIMEOUT;
2831 }
2832 pc = xtensa_reg_get(target, XT_REG_IDX_PC);
2833 if (exit_point && pc != exit_point) {
2834 LOG_ERROR("failed algorithm halted at 0x%" PRIx32 ", expected " TARGET_ADDR_FMT, pc, exit_point);
2835 return ERROR_TARGET_TIMEOUT;
2836 }
2837 /* Copy core register values to reg_params[] */
2838 for (int i = 0; i < num_reg_params; i++) {
2839 if (reg_params[i].direction != PARAM_OUT) {
2840 struct reg *reg = register_get_by_name(xtensa->core_cache, reg_params[i].reg_name, 0);
2841 if (!reg) {
2842 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
2843 return ERROR_FAIL;
2844 }
2845 if (reg->size != reg_params[i].size) {
2846 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
2847 return ERROR_FAIL;
2848 }
2849 buf_set_u32(reg_params[i].value, 0, 32, xtensa_reg_get_value(reg));
2850 }
2851 }
2852 /* Read memory values to mem_params */
2853 LOG_DEBUG("Read mem params");
2854 for (int i = 0; i < num_mem_params; i++) {
2855 LOG_DEBUG("Check mem param @ " TARGET_ADDR_FMT, mem_params[i].address);
2856 if (mem_params[i].direction != PARAM_OUT) {
2857 LOG_DEBUG("Read mem param @ " TARGET_ADDR_FMT, mem_params[i].address);
2858 retval = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value);
2859 if (retval != ERROR_OK)
2860 return retval;
2861 }
2862 }
2863
2864 /* avoid gdb keep_alive warning */
2865 keep_alive();
2866
2867 for (int i = xtensa->core_cache->num_regs - 1; i >= 0; i--) {
2868 struct reg *reg = &xtensa->core_cache->reg_list[i];
2869 if (i == XT_REG_IDX_PS) {
2870 continue; /* restore mapped reg number of PS depends on NDEBUGLEVEL */
2871 } else if (i == XT_REG_IDX_DEBUGCAUSE) {
2872 /*FIXME: restoring DEBUGCAUSE causes exception when executing corresponding
2873 * instruction in DIR */
2874 LOG_DEBUG("Skip restoring register %s: 0x%8.8" PRIx32 " -> 0x%8.8" PRIx32,
2875 xtensa->core_cache->reg_list[i].name,
2876 buf_get_u32(reg->value, 0, 32),
2877 buf_get_u32(xtensa->algo_context_backup[i], 0, 32));
2878 buf_cpy(xtensa->algo_context_backup[i], reg->value, reg->size);
2879 xtensa->core_cache->reg_list[i].dirty = 0;
2880 xtensa->core_cache->reg_list[i].valid = 0;
2881 } else if (memcmp(xtensa->algo_context_backup[i], reg->value, reg->size / 8)) {
2882 if (reg->size <= 32) {
2883 LOG_DEBUG("restoring register %s: 0x%8.8" PRIx32 " -> 0x%8.8" PRIx32,
2884 xtensa->core_cache->reg_list[i].name,
2885 buf_get_u32(reg->value, 0, reg->size),
2886 buf_get_u32(xtensa->algo_context_backup[i], 0, reg->size));
2887 } else if (reg->size <= 64) {
2888 LOG_DEBUG("restoring register %s: 0x%8.8" PRIx64 " -> 0x%8.8" PRIx64,
2889 xtensa->core_cache->reg_list[i].name,
2890 buf_get_u64(reg->value, 0, reg->size),
2891 buf_get_u64(xtensa->algo_context_backup[i], 0, reg->size));
2892 } else {
2893 LOG_DEBUG("restoring register %s %u-bits", xtensa->core_cache->reg_list[i].name, reg->size);
2894 }
2895 buf_cpy(xtensa->algo_context_backup[i], reg->value, reg->size);
2896 xtensa->core_cache->reg_list[i].dirty = 1;
2897 xtensa->core_cache->reg_list[i].valid = 1;
2898 }
2899 }
2900 target->debug_reason = algorithm_info->ctx_debug_reason;
2901 if (xtensa->core_config->core_type == XT_LX)
2902 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, algorithm_info->ctx_ps);
2903
2904 retval = xtensa_write_dirty_registers(target);
2905 if (retval != ERROR_OK)
2906 LOG_ERROR("Failed to write dirty regs (%d)!", retval);
2907
2908 return retval;
2909 }
2910
2911 int xtensa_run_algorithm(struct target *target,
2912 int num_mem_params, struct mem_param *mem_params,
2913 int num_reg_params, struct reg_param *reg_params,
2914 target_addr_t entry_point, target_addr_t exit_point,
2915 unsigned int timeout_ms, void *arch_info)
2916 {
2917 int retval = xtensa_start_algorithm(target,
2918 num_mem_params, mem_params,
2919 num_reg_params, reg_params,
2920 entry_point, exit_point,
2921 arch_info);
2922
2923 if (retval == ERROR_OK) {
2924 retval = xtensa_wait_algorithm(target,
2925 num_mem_params, mem_params,
2926 num_reg_params, reg_params,
2927 exit_point, timeout_ms,
2928 arch_info);
2929 }
2930
2931 return retval;
2932 }
2933
2934 static int xtensa_build_reg_cache(struct target *target)
2935 {
2936 struct xtensa *xtensa = target_to_xtensa(target);
2937 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2938 unsigned int last_dbreg_num = 0;
2939
2940 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2941 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2942 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2943
2944 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2945
2946 if (!reg_cache) {
2947 LOG_ERROR("Failed to alloc reg cache!");
2948 return ERROR_FAIL;
2949 }
2950 reg_cache->name = "Xtensa registers";
2951 reg_cache->next = NULL;
2952 /* Init reglist */
2953 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2954 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2955 if (!reg_list) {
2956 LOG_ERROR("Failed to alloc reg list!");
2957 goto fail;
2958 }
2959 xtensa->dbregs_num = 0;
2960 unsigned int didx = 0;
2961 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2962 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2963 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2964 for (unsigned int i = 0; i < listsize; i++, didx++) {
2965 reg_list[didx].exist = rlist[i].exist;
2966 reg_list[didx].name = rlist[i].name;
2967 reg_list[didx].size = 32;
2968 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2969 if (!reg_list[didx].value) {
2970 LOG_ERROR("Failed to alloc reg list value!");
2971 goto fail;
2972 }
2973 reg_list[didx].dirty = false;
2974 reg_list[didx].valid = false;
2975 reg_list[didx].type = &xtensa_reg_type;
2976 reg_list[didx].arch_info = xtensa;
2977 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2978 last_dbreg_num = rlist[i].dbreg_num;
2979
2980 if (xtensa_extra_debug_log) {
2981 LOG_TARGET_DEBUG(target,
2982 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2983 reg_list[didx].name,
2984 whichlist,
2985 reg_list[didx].exist,
2986 didx,
2987 rlist[i].type,
2988 rlist[i].dbreg_num);
2989 }
2990 }
2991 }
2992
2993 xtensa->dbregs_num = last_dbreg_num + 1;
2994 reg_cache->reg_list = reg_list;
2995 reg_cache->num_regs = reg_list_size;
2996
2997 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2998 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2999
3000 /* Construct empty-register list for handling unknown register requests */
3001 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
3002 if (!xtensa->empty_regs) {
3003 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
3004 goto fail;
3005 }
3006 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3007 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
3008 if (!xtensa->empty_regs[i].name) {
3009 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
3010 goto fail;
3011 }
3012 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
3013 xtensa->empty_regs[i].size = 32;
3014 xtensa->empty_regs[i].type = &xtensa_reg_type;
3015 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
3016 if (!xtensa->empty_regs[i].value) {
3017 LOG_ERROR("Failed to alloc empty reg list value!");
3018 goto fail;
3019 }
3020 xtensa->empty_regs[i].arch_info = xtensa;
3021 }
3022
3023 /* Construct contiguous register list from contiguous descriptor list */
3024 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
3025 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
3026 if (!xtensa->contiguous_regs_list) {
3027 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
3028 goto fail;
3029 }
3030 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
3031 unsigned int j;
3032 for (j = 0; j < reg_cache->num_regs; j++) {
3033 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
3034 /* Register number field is not filled above.
3035 Here we are assigning the corresponding index from the contiguous reg list.
3036 These indexes are in the same order with gdb g-packet request/response.
3037 Some more changes may be required for sparse reg lists.
3038 */
3039 reg_cache->reg_list[j].number = i;
3040 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
3041 LOG_TARGET_DEBUG(target,
3042 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
3043 xtensa->contiguous_regs_list[i]->name,
3044 xtensa->contiguous_regs_desc[i]->dbreg_num);
3045 break;
3046 }
3047 }
3048 if (j == reg_cache->num_regs)
3049 LOG_TARGET_WARNING(target, "contiguous register %s not found",
3050 xtensa->contiguous_regs_desc[i]->name);
3051 }
3052 }
3053
3054 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
3055 if (!xtensa->algo_context_backup) {
3056 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
3057 goto fail;
3058 }
3059 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
3060 struct reg *reg = &reg_cache->reg_list[i];
3061 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
3062 if (!xtensa->algo_context_backup[i]) {
3063 LOG_ERROR("Failed to alloc mem for algorithm context!");
3064 goto fail;
3065 }
3066 }
3067 xtensa->core_cache = reg_cache;
3068 if (cache_p)
3069 *cache_p = reg_cache;
3070 return ERROR_OK;
3071
3072 fail:
3073 if (reg_list) {
3074 for (unsigned int i = 0; i < reg_list_size; i++)
3075 free(reg_list[i].value);
3076 free(reg_list);
3077 }
3078 if (xtensa->empty_regs) {
3079 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3080 free((void *)xtensa->empty_regs[i].name);
3081 free(xtensa->empty_regs[i].value);
3082 }
3083 free(xtensa->empty_regs);
3084 }
3085 if (xtensa->algo_context_backup) {
3086 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
3087 free(xtensa->algo_context_backup[i]);
3088 free(xtensa->algo_context_backup);
3089 }
3090 free(reg_cache);
3091
3092 return ERROR_FAIL;
3093 }
3094
3095 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
3096 {
3097 struct xtensa *xtensa = target_to_xtensa(target);
3098 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
3099 /* Process op[] list */
3100 while (opstr && (*opstr == ':')) {
3101 uint8_t ops[32];
3102 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
3103 if (oplen > 32) {
3104 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
3105 break;
3106 }
3107 unsigned int i = 0;
3108 while ((i < oplen) && opstr && (*opstr == ':'))
3109 ops[i++] = strtoul(opstr + 1, &opstr, 16);
3110 if (i != oplen) {
3111 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
3112 break;
3113 }
3114
3115 char insn_buf[128];
3116 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
3117 for (i = 0; i < oplen; i++)
3118 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
3119 LOG_TARGET_DEBUG(target, "%s", insn_buf);
3120 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3121 status = ERROR_OK;
3122 }
3123 return status;
3124 }
3125
3126 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
3127 {
3128 struct xtensa *xtensa = target_to_xtensa(target);
3129 bool iswrite = (packet[0] == 'Q');
3130 enum xtensa_qerr_e error;
3131
3132 /* Read/write TIE register. Requires spill location.
3133 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
3134 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
3135 */
3136 if (!(xtensa->spill_buf)) {
3137 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
3138 error = XT_QERR_FAIL;
3139 goto xtensa_gdbqc_qxtreg_fail;
3140 }
3141
3142 char *delim;
3143 uint32_t regnum = strtoul(packet + 6, &delim, 16);
3144 if (*delim != ':') {
3145 LOG_ERROR("Malformed qxtreg packet");
3146 error = XT_QERR_INVAL;
3147 goto xtensa_gdbqc_qxtreg_fail;
3148 }
3149 uint32_t reglen = strtoul(delim + 1, &delim, 16);
3150 if (*delim != ':') {
3151 LOG_ERROR("Malformed qxtreg packet");
3152 error = XT_QERR_INVAL;
3153 goto xtensa_gdbqc_qxtreg_fail;
3154 }
3155 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
3156 memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
3157 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
3158 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
3159 LOG_ERROR("TIE register too large");
3160 error = XT_QERR_MEM;
3161 goto xtensa_gdbqc_qxtreg_fail;
3162 }
3163
3164 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
3165 * (2) read old a4, (3) write spill address to a4.
3166 * NOTE: ensure a4 is restored properly by all error handling logic
3167 */
3168 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
3169 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
3170 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
3171 if (status != ERROR_OK) {
3172 LOG_ERROR("Spill memory save");
3173 error = XT_QERR_MEM;
3174 goto xtensa_gdbqc_qxtreg_fail;
3175 }
3176 if (iswrite) {
3177 /* Extract value and store in spill memory */
3178 unsigned int b = 0;
3179 char *valbuf = strchr(delim, '=');
3180 if (!(valbuf && (*valbuf == '='))) {
3181 LOG_ERROR("Malformed Qxtreg packet");
3182 error = XT_QERR_INVAL;
3183 goto xtensa_gdbqc_qxtreg_fail;
3184 }
3185 valbuf++;
3186 while (*valbuf && *(valbuf + 1)) {
3187 char bytestr[3] = { 0, 0, 0 };
3188 strncpy(bytestr, valbuf, 2);
3189 regbuf[b++] = strtoul(bytestr, NULL, 16);
3190 valbuf += 2;
3191 }
3192 if (b != reglen) {
3193 LOG_ERROR("Malformed Qxtreg packet");
3194 error = XT_QERR_INVAL;
3195 goto xtensa_gdbqc_qxtreg_fail;
3196 }
3197 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
3198 reglen / memop_size, regbuf);
3199 if (status != ERROR_OK) {
3200 LOG_ERROR("TIE value store");
3201 error = XT_QERR_MEM;
3202 goto xtensa_gdbqc_qxtreg_fail;
3203 }
3204 }
3205 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
3206 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, xtensa->spill_loc);
3207 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
3208
3209 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
3210
3211 /* Restore a4 but not yet spill memory. Execute it all... */
3212 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, orig_a4);
3213 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
3214 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3215 if (status != ERROR_OK) {
3216 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3217 tieop_status = status;
3218 }
3219 status = xtensa_core_status_check(target);
3220 if (status != ERROR_OK) {
3221 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3222 tieop_status = status;
3223 }
3224
3225 if (tieop_status == ERROR_OK) {
3226 if (iswrite) {
3227 /* TIE write succeeded; send OK */
3228 strcpy(*response_p, "OK");
3229 } else {
3230 /* TIE read succeeded; copy result from spill memory */
3231 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
3232 if (status != ERROR_OK) {
3233 LOG_TARGET_ERROR(target, "TIE result read");
3234 tieop_status = status;
3235 }
3236 unsigned int i;
3237 for (i = 0; i < reglen; i++)
3238 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
3239 *(*response_p + 2 * i) = '\0';
3240 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
3241 }
3242 }
3243
3244 /* Restore spill memory first, then report any previous errors */
3245 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
3246 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
3247 if (status != ERROR_OK) {
3248 LOG_ERROR("Spill memory restore");
3249 error = XT_QERR_MEM;
3250 goto xtensa_gdbqc_qxtreg_fail;
3251 }
3252 if (tieop_status != ERROR_OK) {
3253 LOG_ERROR("TIE execution");
3254 error = XT_QERR_FAIL;
3255 goto xtensa_gdbqc_qxtreg_fail;
3256 }
3257 return ERROR_OK;
3258
3259 xtensa_gdbqc_qxtreg_fail:
3260 strcpy(*response_p, xt_qerr[error].chrval);
3261 return xt_qerr[error].intval;
3262 }
3263
3264 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
3265 {
3266 struct xtensa *xtensa = target_to_xtensa(target);
3267 enum xtensa_qerr_e error;
3268 if (!packet || !response_p) {
3269 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
3270 return ERROR_FAIL;
3271 }
3272
3273 *response_p = xtensa->qpkt_resp;
3274 if (strncmp(packet, "qxtn", 4) == 0) {
3275 strcpy(*response_p, "OpenOCD");
3276 return ERROR_OK;
3277 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
3278 return ERROR_OK;
3279 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
3280 /* Confirm host cache params match core .cfg file */
3281 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
3282 &xtensa->core_config->icache : &xtensa->core_config->dcache;
3283 unsigned int line_size = 0, size = 0, way_count = 0;
3284 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
3285 if ((cachep->line_size != line_size) ||
3286 (cachep->size != size) ||
3287 (cachep->way_count != way_count)) {
3288 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
3289 cachep == &xtensa->core_config->icache ? 'I' : 'D');
3290 }
3291 strcpy(*response_p, "OK");
3292 return ERROR_OK;
3293 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
3294 /* Confirm host IRAM/IROM params match core .cfg file */
3295 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
3296 &xtensa->core_config->iram : &xtensa->core_config->irom;
3297 unsigned int base = 0, size = 0, i;
3298 char *pkt = (char *)&packet[7];
3299 do {
3300 pkt++;
3301 size = strtoul(pkt, &pkt, 16);
3302 pkt++;
3303 base = strtoul(pkt, &pkt, 16);
3304 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
3305 for (i = 0; i < memp->count; i++) {
3306 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
3307 break;
3308 }
3309 if (i == memp->count) {
3310 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
3311 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
3312 break;
3313 }
3314 for (i = 0; i < 11; i++) {
3315 pkt++;
3316 strtoul(pkt, &pkt, 16);
3317 }
3318 } while (pkt && (pkt[0] == ','));
3319 strcpy(*response_p, "OK");
3320 return ERROR_OK;
3321 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
3322 /* Confirm host EXCM_LEVEL matches core .cfg file */
3323 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
3324 if (!xtensa->core_config->high_irq.enabled ||
3325 (excm_level != xtensa->core_config->high_irq.excm_level))
3326 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
3327 strcpy(*response_p, "OK");
3328 return ERROR_OK;
3329 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
3330 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
3331 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
3332 strcpy(*response_p, "OK");
3333 return ERROR_OK;
3334 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
3335 char *delim;
3336 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
3337 if (*delim != ':') {
3338 LOG_ERROR("Malformed Qxtspill packet");
3339 error = XT_QERR_INVAL;
3340 goto xtensa_gdb_query_custom_fail;
3341 }
3342 xtensa->spill_loc = spill_loc;
3343 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
3344 if (xtensa->spill_buf)
3345 free(xtensa->spill_buf);
3346 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
3347 if (!xtensa->spill_buf) {
3348 LOG_ERROR("Spill buf alloc");
3349 error = XT_QERR_MEM;
3350 goto xtensa_gdb_query_custom_fail;
3351 }
3352 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
3353 strcpy(*response_p, "OK");
3354 return ERROR_OK;
3355 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
3356 return xtensa_gdbqc_qxtreg(target, packet, response_p);
3357 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
3358 (strncmp(packet, "qxtftie", 7) == 0) ||
3359 (strncmp(packet, "qxtstie", 7) == 0)) {
3360 /* Return empty string to indicate trace, TIE wire debug are unsupported */
3361 strcpy(*response_p, "");
3362 return ERROR_OK;
3363 }
3364
3365 /* Warn for all other queries, but do not return errors */
3366 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
3367 strcpy(*response_p, "");
3368 return ERROR_OK;
3369
3370 xtensa_gdb_query_custom_fail:
3371 strcpy(*response_p, xt_qerr[error].chrval);
3372 return xt_qerr[error].intval;
3373 }
3374
3375 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
3376 const struct xtensa_debug_module_config *dm_cfg)
3377 {
3378 target->arch_info = xtensa;
3379 xtensa->common_magic = XTENSA_COMMON_MAGIC;
3380 xtensa->target = target;
3381 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
3382
3383 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
3384 if (!xtensa->core_config) {
3385 LOG_ERROR("Xtensa configuration alloc failed\n");
3386 return ERROR_FAIL;
3387 }
3388
3389 /* Default cache settings are disabled with 1 way */
3390 xtensa->core_config->icache.way_count = 1;
3391 xtensa->core_config->dcache.way_count = 1;
3392
3393 /* chrval: AR3/AR4 register names will change with window mapping.
3394 * intval: tracks whether scratch register was set through gdb P packet.
3395 */
3396 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
3397 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
3398 if (!xtensa->scratch_ars[s].chrval) {
3399 for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
3400 free(xtensa->scratch_ars[f].chrval);
3401 free(xtensa->core_config);
3402 LOG_ERROR("Xtensa scratch AR alloc failed\n");
3403 return ERROR_FAIL;
3404 }
3405 xtensa->scratch_ars[s].intval = false;
3406 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
3407 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
3408 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
3409 }
3410
3411 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
3412 }
3413
3414 void xtensa_set_permissive_mode(struct target *target, bool state)
3415 {
3416 target_to_xtensa(target)->permissive_mode = state;
3417 }
3418
3419 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
3420 {
3421 struct xtensa *xtensa = target_to_xtensa(target);
3422
3423 xtensa->come_online_probes_num = 3;
3424 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
3425 if (!xtensa->hw_brps) {
3426 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
3427 return ERROR_FAIL;
3428 }
3429 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
3430 if (!xtensa->hw_wps) {
3431 free(xtensa->hw_brps);
3432 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
3433 return ERROR_FAIL;
3434 }
3435 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
3436 if (!xtensa->sw_brps) {
3437 free(xtensa->hw_brps);
3438 free(xtensa->hw_wps);
3439 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
3440 return ERROR_FAIL;
3441 }
3442
3443 xtensa->spill_loc = 0xffffffff;
3444 xtensa->spill_bytes = 0;
3445 xtensa->spill_buf = NULL;
3446 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
3447
3448 return xtensa_build_reg_cache(target);
3449 }
3450
3451 static void xtensa_free_reg_cache(struct target *target)
3452 {
3453 struct xtensa *xtensa = target_to_xtensa(target);
3454 struct reg_cache *cache = xtensa->core_cache;
3455
3456 if (cache) {
3457 register_unlink_cache(&target->reg_cache, cache);
3458 for (unsigned int i = 0; i < cache->num_regs; i++) {
3459 free(xtensa->algo_context_backup[i]);
3460 free(cache->reg_list[i].value);
3461 }
3462 free(xtensa->algo_context_backup);
3463 free(cache->reg_list);
3464 free(cache);
3465 }
3466 xtensa->core_cache = NULL;
3467 xtensa->algo_context_backup = NULL;
3468
3469 if (xtensa->empty_regs) {
3470 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3471 free((void *)xtensa->empty_regs[i].name);
3472 free(xtensa->empty_regs[i].value);
3473 }
3474 free(xtensa->empty_regs);
3475 }
3476 xtensa->empty_regs = NULL;
3477 if (xtensa->optregs) {
3478 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
3479 free((void *)xtensa->optregs[i].name);
3480 free(xtensa->optregs);
3481 }
3482 xtensa->optregs = NULL;
3483 }
3484
3485 void xtensa_target_deinit(struct target *target)
3486 {
3487 struct xtensa *xtensa = target_to_xtensa(target);
3488
3489 LOG_DEBUG("start");
3490
3491 if (target_was_examined(target)) {
3492 int ret = xtensa_queue_dbg_reg_write(xtensa, XDMREG_DCRCLR, OCDDCR_ENABLEOCD);
3493 if (ret != ERROR_OK) {
3494 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
3495 return;
3496 }
3497 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
3498 ret = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3499 if (ret != ERROR_OK) {
3500 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
3501 return;
3502 }
3503 xtensa_dm_deinit(&xtensa->dbg_mod);
3504 }
3505 xtensa_free_reg_cache(target);
3506 free(xtensa->hw_brps);
3507 free(xtensa->hw_wps);
3508 free(xtensa->sw_brps);
3509 if (xtensa->spill_buf) {
3510 free(xtensa->spill_buf);
3511 xtensa->spill_buf = NULL;
3512 }
3513 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
3514 free(xtensa->scratch_ars[s].chrval);
3515 free(xtensa->core_config);
3516 }
3517
3518 const char *xtensa_get_gdb_arch(const struct target *target)
3519 {
3520 return "xtensa";
3521 }
3522
3523 /* exe <ascii-encoded hexadecimal instruction bytes> */
3524 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
3525 {
3526 struct xtensa *xtensa = target_to_xtensa(target);
3527
3528 if (CMD_ARGC != 1)
3529 return ERROR_COMMAND_SYNTAX_ERROR;
3530
3531 /* Process ascii-encoded hex byte string */
3532 const char *parm = CMD_ARGV[0];
3533 unsigned int parm_len = strlen(parm);
3534 if ((parm_len >= 64) || (parm_len & 1)) {
3535 command_print(CMD, "Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3536 return ERROR_COMMAND_ARGUMENT_INVALID;
3537 }
3538
3539 uint8_t ops[32];
3540 memset(ops, 0, 32);
3541 unsigned int oplen = parm_len / 2;
3542 char encoded_byte[3] = { 0, 0, 0 };
3543 for (unsigned int i = 0; i < oplen; i++) {
3544 encoded_byte[0] = *parm++;
3545 encoded_byte[1] = *parm++;
3546 ops[i] = strtoul(encoded_byte, NULL, 16);
3547 }
3548
3549 /* GDB must handle state save/restore.
3550 * Flush reg cache in case spill location is in an AR
3551 * Update CPENABLE only for this execution; later restore cached copy
3552 * Keep a copy of exccause in case executed code triggers an exception
3553 */
3554 int status = xtensa_write_dirty_registers(target);
3555 if (status != ERROR_OK) {
3556 command_print(CMD, "%s: Failed to write back register cache.", target_name(target));
3557 return ERROR_FAIL;
3558 }
3559 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3560 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3561 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3562 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, 0xffffffff);
3563 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3564 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3565 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3566 xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, a3);
3567 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3568
3569 /* Queue instruction list and execute everything */
3570 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3571 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3572 status = xtensa_dm_queue_execute(&xtensa->dbg_mod);
3573 if (status != ERROR_OK) {
3574 command_print(CMD, "exec: queue error %d", status);
3575 } else {
3576 status = xtensa_core_status_check(target);
3577 if (status != ERROR_OK)
3578 command_print(CMD, "exec: status error %d", status);
3579 }
3580
3581 /* Reread register cache and restore saved regs after instruction execution */
3582 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3583 command_print(CMD, "post-exec: register fetch error");
3584 if (status != ERROR_OK) {
3585 command_print(CMD, "post-exec: EXCCAUSE 0x%02" PRIx32,
3586 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
3587 }
3588 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3589 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3590 return status;
3591 }
3592
3593 COMMAND_HANDLER(xtensa_cmd_exe)
3594 {
3595 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3596 }
3597
3598 /* xtdef <name> */
3599 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3600 {
3601 if (CMD_ARGC != 1)
3602 return ERROR_COMMAND_SYNTAX_ERROR;
3603
3604 const char *core_name = CMD_ARGV[0];
3605 if (strcasecmp(core_name, "LX") == 0) {
3606 xtensa->core_config->core_type = XT_LX;
3607 } else if (strcasecmp(core_name, "NX") == 0) {
3608 xtensa->core_config->core_type = XT_NX;
3609 } else {
3610 command_print(CMD, "xtdef [LX|NX]\n");
3611 return ERROR_COMMAND_ARGUMENT_INVALID;
3612 }
3613 return ERROR_OK;
3614 }
3615
3616 COMMAND_HANDLER(xtensa_cmd_xtdef)
3617 {
3618 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3619 target_to_xtensa(get_current_target(CMD_CTX)));
3620 }
3621
3622 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3623 {
3624 if ((val < min) || (val > max)) {
3625 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3626 return false;
3627 }
3628 return true;
3629 }
3630
3631 /* xtopt <name> <value> */
3632 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3633 {
3634 if (CMD_ARGC != 2)
3635 return ERROR_COMMAND_SYNTAX_ERROR;
3636
3637 const char *opt_name = CMD_ARGV[0];
3638 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3639 if (strcasecmp(opt_name, "arnum") == 0) {
3640 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3641 return ERROR_COMMAND_ARGUMENT_INVALID;
3642 xtensa->core_config->aregs_num = opt_val;
3643 } else if (strcasecmp(opt_name, "windowed") == 0) {
3644 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3645 return ERROR_COMMAND_ARGUMENT_INVALID;
3646 xtensa->core_config->windowed = opt_val;
3647 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3648 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3649 return ERROR_COMMAND_ARGUMENT_INVALID;
3650 xtensa->core_config->coproc = opt_val;
3651 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3652 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3653 return ERROR_COMMAND_ARGUMENT_INVALID;
3654 xtensa->core_config->exceptions = opt_val;
3655 } else if (strcasecmp(opt_name, "intnum") == 0) {
3656 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3657 return ERROR_COMMAND_ARGUMENT_INVALID;
3658 xtensa->core_config->irq.enabled = (opt_val > 0);
3659 xtensa->core_config->irq.irq_num = opt_val;
3660 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3661 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3662 return ERROR_COMMAND_ARGUMENT_INVALID;
3663 xtensa->core_config->high_irq.enabled = opt_val;
3664 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3665 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3666 return ERROR_COMMAND_ARGUMENT_INVALID;
3667 if (!xtensa->core_config->high_irq.enabled) {
3668 command_print(CMD, "xtopt excmlevel requires hipriints\n");
3669 return ERROR_COMMAND_ARGUMENT_INVALID;
3670 }
3671 xtensa->core_config->high_irq.excm_level = opt_val;
3672 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3673 if (xtensa->core_config->core_type == XT_LX) {
3674 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3675 return ERROR_COMMAND_ARGUMENT_INVALID;
3676 } else {
3677 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3678 return ERROR_COMMAND_ARGUMENT_INVALID;
3679 }
3680 if (!xtensa->core_config->high_irq.enabled) {
3681 command_print(CMD, "xtopt intlevels requires hipriints\n");
3682 return ERROR_COMMAND_ARGUMENT_INVALID;
3683 }
3684 xtensa->core_config->high_irq.level_num = opt_val;
3685 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3686 if (xtensa->core_config->core_type == XT_LX) {
3687 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3688 return ERROR_COMMAND_ARGUMENT_INVALID;
3689 } else {
3690 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3691 return ERROR_COMMAND_ARGUMENT_INVALID;
3692 }
3693 xtensa->core_config->debug.enabled = 1;
3694 xtensa->core_config->debug.irq_level = opt_val;
3695 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3696 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3697 return ERROR_COMMAND_ARGUMENT_INVALID;
3698 xtensa->core_config->debug.ibreaks_num = opt_val;
3699 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3700 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3701 return ERROR_COMMAND_ARGUMENT_INVALID;
3702 xtensa->core_config->debug.dbreaks_num = opt_val;
3703 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3704 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3705 return ERROR_COMMAND_ARGUMENT_INVALID;
3706 xtensa->core_config->trace.mem_sz = opt_val;
3707 xtensa->core_config->trace.enabled = (opt_val > 0);
3708 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3709 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3710 return ERROR_COMMAND_ARGUMENT_INVALID;
3711 xtensa->core_config->trace.reversed_mem_access = opt_val;
3712 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3713 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3714 return ERROR_COMMAND_ARGUMENT_INVALID;
3715 xtensa->core_config->debug.perfcount_num = opt_val;
3716 } else {
3717 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3718 return ERROR_OK;
3719 }
3720
3721 return ERROR_OK;
3722 }
3723
3724 COMMAND_HANDLER(xtensa_cmd_xtopt)
3725 {
3726 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3727 target_to_xtensa(get_current_target(CMD_CTX)));
3728 }
3729
3730 /* xtmem <type> [parameters] */
3731 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3732 {
3733 struct xtensa_cache_config *cachep = NULL;
3734 struct xtensa_local_mem_config *memp = NULL;
3735 int mem_access = 0;
3736 bool is_dcache = false;
3737
3738 if (CMD_ARGC == 0)
3739 return ERROR_COMMAND_SYNTAX_ERROR;
3740
3741 const char *mem_name = CMD_ARGV[0];
3742 if (strcasecmp(mem_name, "icache") == 0) {
3743 cachep = &xtensa->core_config->icache;
3744 } else if (strcasecmp(mem_name, "dcache") == 0) {
3745 cachep = &xtensa->core_config->dcache;
3746 is_dcache = true;
3747 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3748 /* TODO: support L2 cache */
3749 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3750 /* TODO: support L2 cache */
3751 } else if (strcasecmp(mem_name, "iram") == 0) {
3752 memp = &xtensa->core_config->iram;
3753 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3754 } else if (strcasecmp(mem_name, "dram") == 0) {
3755 memp = &xtensa->core_config->dram;
3756 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3757 } else if (strcasecmp(mem_name, "sram") == 0) {
3758 memp = &xtensa->core_config->sram;
3759 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3760 } else if (strcasecmp(mem_name, "irom") == 0) {
3761 memp = &xtensa->core_config->irom;
3762 mem_access = XT_MEM_ACCESS_READ;
3763 } else if (strcasecmp(mem_name, "drom") == 0) {
3764 memp = &xtensa->core_config->drom;
3765 mem_access = XT_MEM_ACCESS_READ;
3766 } else if (strcasecmp(mem_name, "srom") == 0) {
3767 memp = &xtensa->core_config->srom;
3768 mem_access = XT_MEM_ACCESS_READ;
3769 } else {
3770 command_print(CMD, "xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3771 return ERROR_COMMAND_ARGUMENT_INVALID;
3772 }
3773
3774 if (cachep) {
3775 if (CMD_ARGC != 4 && CMD_ARGC != 5)
3776 return ERROR_COMMAND_SYNTAX_ERROR;
3777 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3778 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3779 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3780 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3781 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3782 } else if (memp) {
3783 if (CMD_ARGC != 3)
3784 return ERROR_COMMAND_SYNTAX_ERROR;
3785 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3786 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3787 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3788 memcfgp->access = mem_access;
3789 memp->count++;
3790 }
3791
3792 return ERROR_OK;
3793 }
3794
3795 COMMAND_HANDLER(xtensa_cmd_xtmem)
3796 {
3797 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3798 target_to_xtensa(get_current_target(CMD_CTX)));
3799 }
3800
3801 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3802 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3803 {
3804 if (CMD_ARGC != 4)
3805 return ERROR_COMMAND_SYNTAX_ERROR;
3806
3807 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3808 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3809 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3810 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3811
3812 if ((nfgseg > 32)) {
3813 command_print(CMD, "<nfgseg> must be within [0..32]\n");
3814 return ERROR_COMMAND_ARGUMENT_INVALID;
3815 } else if (minsegsize & (minsegsize - 1)) {
3816 command_print(CMD, "<minsegsize> must be a power of 2 >= 32\n");
3817 return ERROR_COMMAND_ARGUMENT_INVALID;
3818 } else if (lockable > 1) {
3819 command_print(CMD, "<lockable> must be 0 or 1\n");
3820 return ERROR_COMMAND_ARGUMENT_INVALID;
3821 } else if (execonly > 1) {
3822 command_print(CMD, "<execonly> must be 0 or 1\n");
3823 return ERROR_COMMAND_ARGUMENT_INVALID;
3824 }
3825
3826 xtensa->core_config->mpu.enabled = true;
3827 xtensa->core_config->mpu.nfgseg = nfgseg;
3828 xtensa->core_config->mpu.minsegsize = minsegsize;
3829 xtensa->core_config->mpu.lockable = lockable;
3830 xtensa->core_config->mpu.execonly = execonly;
3831 return ERROR_OK;
3832 }
3833
3834 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3835 {
3836 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3837 target_to_xtensa(get_current_target(CMD_CTX)));
3838 }
3839
3840 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3841 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3842 {
3843 if (CMD_ARGC != 2)
3844 return ERROR_COMMAND_SYNTAX_ERROR;
3845
3846 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3847 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3848 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3849 command_print(CMD, "<nirefillentries> must be 16 or 32\n");
3850 return ERROR_COMMAND_ARGUMENT_INVALID;
3851 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3852 command_print(CMD, "<ndrefillentries> must be 16 or 32\n");
3853 return ERROR_COMMAND_ARGUMENT_INVALID;
3854 }
3855
3856 xtensa->core_config->mmu.enabled = true;
3857 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3858 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3859 return ERROR_OK;
3860 }
3861
3862 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3863 {
3864 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3865 target_to_xtensa(get_current_target(CMD_CTX)));
3866 }
3867
3868 /* xtregs <numregs>
3869 * xtreg <regname> <regnum> */
3870 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3871 {
3872 if (CMD_ARGC == 1) {
3873 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3874 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3875 command_print(CMD, "xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3876 return ERROR_COMMAND_ARGUMENT_INVALID;
3877 }
3878 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3879 command_print(CMD, "xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3880 numregs, xtensa->genpkt_regs_num);
3881 return ERROR_COMMAND_ARGUMENT_INVALID;
3882 }
3883 xtensa->total_regs_num = numregs;
3884 xtensa->core_regs_num = 0;
3885 xtensa->num_optregs = 0;
3886 /* A little more memory than required, but saves a second initialization pass */
3887 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3888 if (!xtensa->optregs) {
3889 LOG_ERROR("Failed to allocate xtensa->optregs!");
3890 return ERROR_FAIL;
3891 }
3892 return ERROR_OK;
3893 } else if (CMD_ARGC != 2) {
3894 return ERROR_COMMAND_SYNTAX_ERROR;
3895 }
3896
3897 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3898 * if general register (g-packet) requests or contiguous register maps are supported */
3899 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3900 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3901 if (!xtensa->contiguous_regs_desc) {
3902 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3903 return ERROR_FAIL;
3904 }
3905 }
3906
3907 const char *regname = CMD_ARGV[0];
3908 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3909 if (regnum > UINT16_MAX) {
3910 command_print(CMD, "<regnum> must be a 16-bit number");
3911 return ERROR_COMMAND_ARGUMENT_INVALID;
3912 }
3913
3914 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3915 if (xtensa->total_regs_num)
3916 command_print(CMD, "'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3917 regname, regnum,
3918 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3919 else
3920 command_print(CMD, "'xtreg %s 0x%04x': Number of registers unspecified",
3921 regname, regnum);
3922 return ERROR_FAIL;
3923 }
3924
3925 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3926 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3927 bool is_extended_reg = true;
3928 unsigned int ridx;
3929 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3930 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3931 /* Flag core register as defined */
3932 rptr = &xtensa_regs[ridx];
3933 xtensa->core_regs_num++;
3934 is_extended_reg = false;
3935 break;
3936 }
3937 }
3938
3939 rptr->exist = true;
3940 if (is_extended_reg) {
3941 /* Register ID, debugger-visible register ID */
3942 rptr->name = strdup(CMD_ARGV[0]);
3943 rptr->dbreg_num = regnum;
3944 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3945 xtensa->num_optregs++;
3946
3947 /* Register type */
3948 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3949 rptr->type = XT_REG_GENERAL;
3950 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3951 rptr->type = XT_REG_USER;
3952 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3953 rptr->type = XT_REG_FR;
3954 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3955 rptr->type = XT_REG_SPECIAL;
3956 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3957 /* WARNING: For these registers, regnum points to the
3958 * index of the corresponding ARx registers, NOT to
3959 * the processor register number! */
3960 rptr->type = XT_REG_RELGEN;
3961 rptr->reg_num += XT_REG_IDX_ARFIRST;
3962 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3963 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3964 rptr->type = XT_REG_TIE;
3965 } else {
3966 rptr->type = XT_REG_OTHER;
3967 }
3968
3969 /* Register flags */
3970 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3971 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3972 (strcmp(rptr->name, "intclear") == 0))
3973 rptr->flags = XT_REGF_NOREAD;
3974 else
3975 rptr->flags = 0;
3976
3977 if (rptr->reg_num == (XT_EPS_REG_NUM_BASE + xtensa->core_config->debug.irq_level) &&
3978 xtensa->core_config->core_type == XT_LX && rptr->type == XT_REG_SPECIAL) {
3979 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3980 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3981 }
3982 if (xtensa->core_config->core_type == XT_NX) {
3983 enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_NUM;
3984 if (strcmp(rptr->name, "ibreakc0") == 0)
3985 idx = XT_NX_REG_IDX_IBREAKC0;
3986 else if (strcmp(rptr->name, "wb") == 0)
3987 idx = XT_NX_REG_IDX_WB;
3988 else if (strcmp(rptr->name, "ms") == 0)
3989 idx = XT_NX_REG_IDX_MS;
3990 else if (strcmp(rptr->name, "ievec") == 0)
3991 idx = XT_NX_REG_IDX_IEVEC;
3992 else if (strcmp(rptr->name, "ieextern") == 0)
3993 idx = XT_NX_REG_IDX_IEEXTERN;
3994 else if (strcmp(rptr->name, "mesr") == 0)
3995 idx = XT_NX_REG_IDX_MESR;
3996 else if (strcmp(rptr->name, "mesrclr") == 0)
3997 idx = XT_NX_REG_IDX_MESRCLR;
3998 if (idx < XT_NX_REG_IDX_NUM) {
3999 if (xtensa->nx_reg_idx[idx] != 0) {
4000 command_print(CMD, "nx_reg_idx[%d] previously set to %d",
4001 idx, xtensa->nx_reg_idx[idx]);
4002 return ERROR_FAIL;
4003 }
4004 xtensa->nx_reg_idx[idx] = XT_NUM_REGS + xtensa->num_optregs - 1;
4005 LOG_DEBUG("NX reg %s: index %d (%d)",
4006 rptr->name, xtensa->nx_reg_idx[idx], idx);
4007 }
4008 }
4009 } else if (strcmp(rptr->name, "cpenable") == 0) {
4010 xtensa->core_config->coproc = true;
4011 }
4012
4013 /* Build out list of contiguous registers in specified order */
4014 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
4015 if (xtensa->contiguous_regs_desc) {
4016 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
4017 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
4018 }
4019 if (xtensa_extra_debug_log)
4020 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
4021 is_extended_reg ? "config-specific" : "core",
4022 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
4023 is_extended_reg ? xtensa->num_optregs : ridx,
4024 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
4025 return ERROR_OK;
4026 }
4027
4028 COMMAND_HANDLER(xtensa_cmd_xtreg)
4029 {
4030 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
4031 target_to_xtensa(get_current_target(CMD_CTX)));
4032 }
4033
4034 /* xtregfmt <contiguous|sparse> [numgregs] */
4035 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
4036 {
4037 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
4038 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
4039 return ERROR_OK;
4040 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
4041 xtensa->regmap_contiguous = true;
4042 if (CMD_ARGC == 2) {
4043 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
4044 if ((numgregs <= 0) ||
4045 ((numgregs > xtensa->total_regs_num) &&
4046 (xtensa->total_regs_num > 0))) {
4047 command_print(CMD, "xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
4048 numgregs, xtensa->total_regs_num);
4049 return ERROR_COMMAND_ARGUMENT_INVALID;
4050 }
4051 xtensa->genpkt_regs_num = numgregs;
4052 }
4053 return ERROR_OK;
4054 }
4055 }
4056 return ERROR_COMMAND_SYNTAX_ERROR;
4057 }
4058
4059 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
4060 {
4061 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
4062 target_to_xtensa(get_current_target(CMD_CTX)));
4063 }
4064
4065 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
4066 {
4067 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
4068 &xtensa->permissive_mode, "xtensa permissive mode");
4069 }
4070
4071 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
4072 {
4073 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
4074 target_to_xtensa(get_current_target(CMD_CTX)));
4075 }
4076
4077 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
4078 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
4079 {
4080 struct xtensa_perfmon_config config = {
4081 .mask = 0xffff,
4082 .kernelcnt = 0,
4083 .tracelevel = -1 /* use DEBUGLEVEL by default */
4084 };
4085
4086 if (CMD_ARGC < 2 || CMD_ARGC > 6)
4087 return ERROR_COMMAND_SYNTAX_ERROR;
4088
4089 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
4090 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
4091 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
4092 return ERROR_COMMAND_ARGUMENT_INVALID;
4093 }
4094
4095 config.select = strtoul(CMD_ARGV[1], NULL, 0);
4096 if (config.select > XTENSA_MAX_PERF_SELECT) {
4097 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
4098 return ERROR_COMMAND_ARGUMENT_INVALID;
4099 }
4100
4101 if (CMD_ARGC >= 3) {
4102 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
4103 if (config.mask > XTENSA_MAX_PERF_MASK) {
4104 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
4105 return ERROR_COMMAND_ARGUMENT_INVALID;
4106 }
4107 }
4108
4109 if (CMD_ARGC >= 4) {
4110 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
4111 if (config.kernelcnt > 1) {
4112 command_print(CMD, "kernelcnt should be 0 or 1");
4113 return ERROR_COMMAND_ARGUMENT_INVALID;
4114 }
4115 }
4116
4117 if (CMD_ARGC >= 5) {
4118 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
4119 if (config.tracelevel > 7) {
4120 command_print(CMD, "tracelevel should be <=7");
4121 return ERROR_COMMAND_ARGUMENT_INVALID;
4122 }
4123 }
4124
4125 if (config.tracelevel == -1)
4126 config.tracelevel = xtensa->core_config->debug.irq_level;
4127
4128 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
4129 }
4130
4131 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
4132 {
4133 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
4134 target_to_xtensa(get_current_target(CMD_CTX)));
4135 }
4136
4137 /* perfmon_dump [counter_id] */
4138 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
4139 {
4140 if (CMD_ARGC > 1)
4141 return ERROR_COMMAND_SYNTAX_ERROR;
4142
4143 int counter_id = -1;
4144 if (CMD_ARGC == 1) {
4145 counter_id = strtol(CMD_ARGV[0], NULL, 0);
4146 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
4147 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
4148 return ERROR_COMMAND_ARGUMENT_INVALID;
4149 }
4150 }
4151
4152 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
4153 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
4154 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
4155 char result_buf[128] = { 0 };
4156 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
4157 struct xtensa_perfmon_result result;
4158 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
4159 if (res != ERROR_OK)
4160 return res;
4161 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
4162 "%-12" PRIu64 "%s",
4163 result.value,
4164 result.overflow ? " (overflow)" : "");
4165 command_print(CMD, "%s", result_buf);
4166 }
4167
4168 return ERROR_OK;
4169 }
4170
4171 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
4172 {
4173 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
4174 target_to_xtensa(get_current_target(CMD_CTX)));
4175 }
4176
4177 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
4178 {
4179 int state = -1;
4180
4181 if (CMD_ARGC < 1) {
4182 const char *st;
4183 state = xtensa->stepping_isr_mode;
4184 if (state == XT_STEPPING_ISR_ON)
4185 st = "OFF";
4186 else if (state == XT_STEPPING_ISR_OFF)
4187 st = "ON";
4188 else
4189 st = "UNKNOWN";
4190 command_print(CMD, "Current ISR step mode: %s", st);
4191 return ERROR_OK;
4192 }
4193
4194 if (xtensa->core_config->core_type == XT_NX) {
4195 command_print(CMD, "ERROR: ISR step mode only supported on Xtensa LX");
4196 return ERROR_FAIL;
4197 }
4198
4199 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
4200 if (!strcasecmp(CMD_ARGV[0], "off"))
4201 state = XT_STEPPING_ISR_ON;
4202 else if (!strcasecmp(CMD_ARGV[0], "on"))
4203 state = XT_STEPPING_ISR_OFF;
4204
4205 if (state == -1) {
4206 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
4207 return ERROR_FAIL;
4208 }
4209 xtensa->stepping_isr_mode = state;
4210 return ERROR_OK;
4211 }
4212
4213 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
4214 {
4215 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
4216 target_to_xtensa(get_current_target(CMD_CTX)));
4217 }
4218
4219 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
4220 {
4221 int res;
4222 uint32_t val = 0;
4223
4224 if (CMD_ARGC >= 1) {
4225 for (unsigned int i = 0; i < CMD_ARGC; i++) {
4226 if (!strcasecmp(CMD_ARGV[0], "none")) {
4227 val = 0;
4228 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
4229 val |= OCDDCR_BREAKINEN;
4230 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
4231 val |= OCDDCR_BREAKOUTEN;
4232 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
4233 val |= OCDDCR_RUNSTALLINEN;
4234 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
4235 val |= OCDDCR_DEBUGMODEOUTEN;
4236 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
4237 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
4238 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
4239 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
4240 } else {
4241 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
4242 command_print(
4243 CMD,
4244 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
4245 return ERROR_OK;
4246 }
4247 }
4248 res = xtensa_smpbreak_set(target, val);
4249 if (res != ERROR_OK)
4250 command_print(CMD, "Failed to set smpbreak config %d", res);
4251 } else {
4252 struct xtensa *xtensa = target_to_xtensa(target);
4253 res = xtensa_smpbreak_read(xtensa, &val);
4254 if (res == ERROR_OK)
4255 command_print(CMD, "Current bits set:%s%s%s%s",
4256 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
4257 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
4258 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
4259 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
4260 );
4261 else
4262 command_print(CMD, "Failed to get smpbreak config %d", res);
4263 }
4264 return res;
4265 }
4266
4267 COMMAND_HANDLER(xtensa_cmd_smpbreak)
4268 {
4269 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
4270 get_current_target(CMD_CTX));
4271 }
4272
4273 COMMAND_HELPER(xtensa_cmd_dm_rw_do, struct xtensa *xtensa)
4274 {
4275 if (CMD_ARGC == 1) {
4276 // read: xtensa dm addr
4277 uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
4278 uint32_t val;
4279 int res = xtensa_dm_read(&xtensa->dbg_mod, addr, &val);
4280 if (res == ERROR_OK)
4281 command_print(CMD, "xtensa DM(0x%08" PRIx32 ") -> 0x%08" PRIx32, addr, val);
4282 else
4283 command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : read ERROR %" PRId32, addr, res);
4284 return res;
4285 } else if (CMD_ARGC == 2) {
4286 // write: xtensa dm addr value
4287 uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
4288 uint32_t val = strtoul(CMD_ARGV[1], NULL, 0);
4289 int res = xtensa_dm_write(&xtensa->dbg_mod, addr, val);
4290 if (res == ERROR_OK)
4291 command_print(CMD, "xtensa DM(0x%08" PRIx32 ") <- 0x%08" PRIx32, addr, val);
4292 else
4293 command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : write ERROR %" PRId32, addr, res);
4294 return res;
4295 }
4296 return ERROR_COMMAND_SYNTAX_ERROR;
4297 }
4298
4299 COMMAND_HANDLER(xtensa_cmd_dm_rw)
4300 {
4301 return CALL_COMMAND_HANDLER(xtensa_cmd_dm_rw_do,
4302 target_to_xtensa(get_current_target(CMD_CTX)));
4303 }
4304
4305 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
4306 {
4307 struct xtensa_trace_status trace_status;
4308 struct xtensa_trace_start_config cfg = {
4309 .stoppc = 0,
4310 .stopmask = XTENSA_STOPMASK_DISABLED,
4311 .after = 0,
4312 .after_is_words = false
4313 };
4314
4315 /* Parse arguments */
4316 for (unsigned int i = 0; i < CMD_ARGC; i++) {
4317 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
4318 char *e;
4319 i++;
4320 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
4321 cfg.stopmask = 0;
4322 if (*e == '/')
4323 cfg.stopmask = strtol(e, NULL, 0);
4324 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
4325 i++;
4326 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
4327 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
4328 cfg.after_is_words = 0;
4329 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
4330 cfg.after_is_words = 1;
4331 } else {
4332 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
4333 return ERROR_FAIL;
4334 }
4335 }
4336
4337 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
4338 if (res != ERROR_OK)
4339 return res;
4340 if (trace_status.stat & TRAXSTAT_TRACT) {
4341 LOG_WARNING("Silently stop active tracing!");
4342 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
4343 if (res != ERROR_OK)
4344 return res;
4345 }
4346
4347 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
4348 if (res != ERROR_OK)
4349 return res;
4350
4351 xtensa->trace_active = true;
4352 command_print(CMD, "Trace started.");
4353 return ERROR_OK;
4354 }
4355
4356 COMMAND_HANDLER(xtensa_cmd_tracestart)
4357 {
4358 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
4359 target_to_xtensa(get_current_target(CMD_CTX)));
4360 }
4361
4362 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
4363 {
4364 struct xtensa_trace_status trace_status;
4365
4366 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
4367 if (res != ERROR_OK)
4368 return res;
4369
4370 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
4371 command_print(CMD, "No trace is currently active.");
4372 return ERROR_FAIL;
4373 }
4374
4375 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
4376 if (res != ERROR_OK)
4377 return res;
4378
4379 xtensa->trace_active = false;
4380 command_print(CMD, "Trace stop triggered.");
4381 return ERROR_OK;
4382 }
4383
4384 COMMAND_HANDLER(xtensa_cmd_tracestop)
4385 {
4386 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
4387 target_to_xtensa(get_current_target(CMD_CTX)));
4388 }
4389
4390 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
4391 {
4392 struct xtensa_trace_config trace_config;
4393 struct xtensa_trace_status trace_status;
4394 uint32_t memsz, wmem;
4395
4396 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
4397 if (res != ERROR_OK)
4398 return res;
4399
4400 if (trace_status.stat & TRAXSTAT_TRACT) {
4401 command_print(CMD, "Tracing is still active. Please stop it first.");
4402 return ERROR_FAIL;
4403 }
4404
4405 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
4406 if (res != ERROR_OK)
4407 return res;
4408
4409 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
4410 command_print(CMD, "No active trace found; nothing to dump.");
4411 return ERROR_FAIL;
4412 }
4413
4414 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
4415 command_print(CMD, "Total trace memory: %d words", memsz);
4416 if ((trace_config.addr &
4417 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
4418 /*Memory hasn't overwritten itself yet. */
4419 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
4420 command_print(CMD, "...but trace is only %d words", wmem);
4421 if (wmem < memsz)
4422 memsz = wmem;
4423 } else {
4424 if (trace_config.addr & TRAXADDR_TWSAT) {
4425 command_print(CMD, "Real trace is many times longer than that (overflow)");
4426 } else {
4427 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
4428 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
4429 command_print(CMD, "Real trace is %d words, but the start has been truncated.", trc_sz);
4430 }
4431 }
4432
4433 uint8_t *tracemem = malloc(memsz * 4);
4434 if (!tracemem) {
4435 command_print(CMD, "Failed to alloc memory for trace data!");
4436 return ERROR_FAIL;
4437 }
4438 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
4439 if (res != ERROR_OK) {
4440 free(tracemem);
4441 return res;
4442 }
4443
4444 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4445 if (f <= 0) {
4446 free(tracemem);
4447 command_print(CMD, "Unable to open file %s", fname);
4448 return ERROR_FAIL;
4449 }
4450 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
4451 command_print(CMD, "Unable to write to file %s", fname);
4452 else
4453 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
4454 close(f);
4455
4456 bool is_all_zeroes = true;
4457 for (unsigned int i = 0; i < memsz * 4; i++) {
4458 if (tracemem[i] != 0) {
4459 is_all_zeroes = false;
4460 break;
4461 }
4462 }
4463 free(tracemem);
4464 if (is_all_zeroes)
4465 command_print(
4466 CMD,
4467 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
4468
4469 return ERROR_OK;
4470 }
4471
4472 COMMAND_HANDLER(xtensa_cmd_tracedump)
4473 {
4474 if (CMD_ARGC != 1) {
4475 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
4476 return ERROR_FAIL;
4477 }
4478
4479 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
4480 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
4481 }
4482
4483 static const struct command_registration xtensa_any_command_handlers[] = {
4484 {
4485 .name = "xtdef",
4486 .handler = xtensa_cmd_xtdef,
4487 .mode = COMMAND_CONFIG,
4488 .help = "Configure Xtensa core type",
4489 .usage = "<type>",
4490 },
4491 {
4492 .name = "xtopt",
4493 .handler = xtensa_cmd_xtopt,
4494 .mode = COMMAND_CONFIG,
4495 .help = "Configure Xtensa core option",
4496 .usage = "<name> <value>",
4497 },
4498 {
4499 .name = "xtmem",
4500 .handler = xtensa_cmd_xtmem,
4501 .mode = COMMAND_CONFIG,
4502 .help = "Configure Xtensa memory/cache option",
4503 .usage = "<type> [parameters]",
4504 },
4505 {
4506 .name = "xtmmu",
4507 .handler = xtensa_cmd_xtmmu,
4508 .mode = COMMAND_CONFIG,
4509 .help = "Configure Xtensa MMU option",
4510 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
4511 },
4512 {
4513 .name = "xtmpu",
4514 .handler = xtensa_cmd_xtmpu,
4515 .mode = COMMAND_CONFIG,
4516 .help = "Configure Xtensa MPU option",
4517 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
4518 },
4519 {
4520 .name = "xtreg",
4521 .handler = xtensa_cmd_xtreg,
4522 .mode = COMMAND_CONFIG,
4523 .help = "Configure Xtensa register",
4524 .usage = "<regname> <regnum>",
4525 },
4526 {
4527 .name = "xtregs",
4528 .handler = xtensa_cmd_xtreg,
4529 .mode = COMMAND_CONFIG,
4530 .help = "Configure number of Xtensa registers",
4531 .usage = "<numregs>",
4532 },
4533 {
4534 .name = "xtregfmt",
4535 .handler = xtensa_cmd_xtregfmt,
4536 .mode = COMMAND_CONFIG,
4537 .help = "Configure format of Xtensa register map",
4538 .usage = "<contiguous|sparse> [numgregs]",
4539 },
4540 {
4541 .name = "set_permissive",
4542 .handler = xtensa_cmd_permissive_mode,
4543 .mode = COMMAND_ANY,
4544 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
4545 .usage = "[0|1]",
4546 },
4547 {
4548 .name = "maskisr",
4549 .handler = xtensa_cmd_mask_interrupts,
4550 .mode = COMMAND_ANY,
4551 .help = "mask Xtensa interrupts at step",
4552 .usage = "['on'|'off']",
4553 },
4554 {
4555 .name = "smpbreak",
4556 .handler = xtensa_cmd_smpbreak,
4557 .mode = COMMAND_ANY,
4558 .help = "Set the way the CPU chains OCD breaks",
4559 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
4560 },
4561 {
4562 .name = "dm",
4563 .handler = xtensa_cmd_dm_rw,
4564 .mode = COMMAND_ANY,
4565 .help = "Xtensa DM read/write",
4566 .usage = "addr [value]"
4567 },
4568 {
4569 .name = "perfmon_enable",
4570 .handler = xtensa_cmd_perfmon_enable,
4571 .mode = COMMAND_EXEC,
4572 .help = "Enable and start performance counter",
4573 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
4574 },
4575 {
4576 .name = "perfmon_dump",
4577 .handler = xtensa_cmd_perfmon_dump,
4578 .mode = COMMAND_EXEC,
4579 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
4580 .usage = "[counter_id]",
4581 },
4582 {
4583 .name = "tracestart",
4584 .handler = xtensa_cmd_tracestart,
4585 .mode = COMMAND_EXEC,
4586 .help =
4587 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
4588 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
4589 },
4590 {
4591 .name = "tracestop",
4592 .handler = xtensa_cmd_tracestop,
4593 .mode = COMMAND_EXEC,
4594 .help = "Tracing: Stop current trace as started by the tracestart command",
4595 .usage = "",
4596 },
4597 {
4598 .name = "tracedump",
4599 .handler = xtensa_cmd_tracedump,
4600 .mode = COMMAND_EXEC,
4601 .help = "Tracing: Dump trace memory to a files. One file per core.",
4602 .usage = "<outfile>",
4603 },
4604 {
4605 .name = "exe",
4606 .handler = xtensa_cmd_exe,
4607 .mode = COMMAND_ANY,
4608 .help = "Xtensa stub execution",
4609 .usage = "<ascii-encoded hexadecimal instruction bytes>",
4610 },
4611 COMMAND_REGISTRATION_DONE
4612 };
4613
4614 const struct command_registration xtensa_command_handlers[] = {
4615 {
4616 .name = "xtensa",
4617 .mode = COMMAND_ANY,
4618 .help = "Xtensa command group",
4619 .usage = "",
4620 .chain = xtensa_any_command_handlers,
4621 },
4622 COMMAND_REGISTRATION_DONE
4623 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)