target: add Espressif ESP32-S2 basic support
[openocd.git] / src / target / xtensa / xtensa.c
1 /***************************************************************************
2 * Generic Xtensa target API for OpenOCD *
3 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
4 * Derived from esp108.c *
5 * Author: Angus Gratton gus@projectgus.com *
6 * Author: Jeroen Domburg <jeroen@espressif.com> *
7 * Author: Alexey Gerenkov <alexey@espressif.com> *
8 * Author: Andrey Gramakov <andrei.gramakov@espressif.com> *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License as published by *
12 * the Free Software Foundation; either version 2 of the License, or *
13 * (at your option) any later version. *
14 * *
15 * This program is distributed in the hope that it will be useful, *
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
18 * GNU General Public License for more details. *
19 * *
20 * You should have received a copy of the GNU General Public License *
21 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
22 ***************************************************************************/
23
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27
28 #include <stdlib.h>
29 #include <helper/time_support.h>
30 #include <helper/align.h>
31 #include <target/register.h>
32
33 #include "xtensa.h"
34
35
36 #define _XT_INS_FORMAT_RSR(OPCODE, SR, T) ((OPCODE) \
37 | (((SR) & 0xFF) << 8) \
38 | (((T) & 0x0F) << 4))
39
40 #define _XT_INS_FORMAT_RRR(OPCODE, ST, R) ((OPCODE) \
41 | (((ST) & 0xFF) << 4) \
42 | (((R) & 0x0F) << 12))
43
44 #define _XT_INS_FORMAT_RRRN(OPCODE, S, T, IMM4) ((OPCODE) \
45 | (((T) & 0x0F) << 4) \
46 | (((S) & 0x0F) << 8) \
47 | (((IMM4) & 0x0F) << 12))
48
49 #define _XT_INS_FORMAT_RRI8(OPCODE, R, S, T, IMM8) ((OPCODE) \
50 | (((IMM8) & 0xFF) << 16) \
51 | (((R) & 0x0F) << 12) \
52 | (((S) & 0x0F) << 8) \
53 | (((T) & 0x0F) << 4))
54
55 #define _XT_INS_FORMAT_RRI4(OPCODE, IMM4, R, S, T) ((OPCODE) \
56 | (((IMM4) & 0x0F) << 20) \
57 | (((R) & 0x0F) << 12) \
58 | (((S) & 0x0F) << 8) \
59 | (((T) & 0x0F) << 4))
60
61 /* Xtensa processor instruction opcodes
62 * "Return From Debug Operation" to Normal */
63 #define XT_INS_RFDO 0xf1e000
64 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
65 #define XT_INS_RFDD 0xf1e010
66
67 /* Load to DDR register, increase addr register */
68 #define XT_INS_LDDR32P(S) (0x0070E0 | ((S) << 8))
69 /* Store from DDR register, increase addr register */
70 #define XT_INS_SDDR32P(S) (0x0070F0 | ((S) << 8))
71
72 /* Load 32-bit Indirect from A(S) + 4 * IMM8 to A(T) */
73 #define XT_INS_L32I(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x002002, 0, S, T, IMM8)
74 /* Load 16-bit Unsigned from A(S) + 2 * IMM8 to A(T) */
75 #define XT_INS_L16UI(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x001002, 0, S, T, IMM8)
76 /* Load 8-bit Unsigned from A(S) + IMM8 to A(T) */
77 #define XT_INS_L8UI(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x000002, 0, S, T, IMM8)
78
79 /* Store 32-bit Indirect to A(S) + 4 * IMM8 from A(T) */
80 #define XT_INS_S32I(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x006002, 0, S, T, IMM8)
81 /* Store 16-bit to A(S) + 2 * IMM8 from A(T) */
82 #define XT_INS_S16I(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x005002, 0, S, T, IMM8)
83 /* Store 8-bit to A(S) + IMM8 from A(T) */
84 #define XT_INS_S8I(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x004002, 0, S, T, IMM8)
85
86 /* Read Special Register */
87 #define XT_INS_RSR(SR, T) _XT_INS_FORMAT_RSR(0x030000, SR, T)
88 /* Write Special Register */
89 #define XT_INS_WSR(SR, T) _XT_INS_FORMAT_RSR(0x130000, SR, T)
90 /* Swap Special Register */
91 #define XT_INS_XSR(SR, T) _XT_INS_FORMAT_RSR(0x610000, SR, T)
92
93 /* Rotate Window by (-8..7) */
94 #define XT_INS_ROTW(N) ((0x408000) | (((N) & 15) << 4))
95
96 /* Read User Register */
97 #define XT_INS_RUR(UR, T) _XT_INS_FORMAT_RRR(0xE30000, UR, T)
98 /* Write User Register */
99 #define XT_INS_WUR(UR, T) _XT_INS_FORMAT_RSR(0xF30000, UR, T)
100
101 /* Read Floating-Point Register */
102 #define XT_INS_RFR(FR, T) _XT_INS_FORMAT_RRR(0xFA0000, (((FR) << 4) | 0x4), T)
103 /* Write Floating-Point Register */
104 #define XT_INS_WFR(FR, T) _XT_INS_FORMAT_RRR(0xFA0000, (((FR) << 4) | 0x5), T)
105
106 /* 32-bit break */
107 #define XT_INS_BREAK(IMM1, IMM2) _XT_INS_FORMAT_RRR(0x000000, \
108 (((IMM1) & 0x0F) << 4) | ((IMM2) & 0x0F), 0x4)
109 /* 16-bit break */
110 #define XT_INS_BREAKN(IMM4) _XT_INS_FORMAT_RRRN(0x00000D, IMM4, 0x2, 0xF)
111
112 #define XT_INS_L32E(R, S, T) _XT_INS_FORMAT_RRI4(0x90000, 0, R, S, T)
113 #define XT_INS_S32E(R, S, T) _XT_INS_FORMAT_RRI4(0x490000, 0, R, S, T)
114 #define XT_INS_L32E_S32E_MASK 0xFF000F
115
116 #define XT_INS_RFWO 0x3400
117 #define XT_INS_RFWU 0x3500
118 #define XT_INS_RFWO_RFWU_MASK 0xFFFFFF
119
120 #define XT_WATCHPOINTS_NUM_MAX 2
121
122 /* Special register number macro for DDR register.
123 * this gets used a lot so making a shortcut to it is
124 * useful.
125 */
126 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_OCD_DDR].reg_num)
127
128 /*Same thing for A3/A4 */
129 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
130 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
131
132 #define XT_PC_REG_NUM_BASE (176)
133 #define XT_SW_BREAKPOINTS_MAX_NUM 32
134
135 const struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
136 { "pc", XT_PC_REG_NUM_BASE /*+XT_DEBUGLEVEL*/, XT_REG_SPECIAL, 0 }, /* actually epc[debuglevel] */
137 { "ar0", 0x00, XT_REG_GENERAL, 0 },
138 { "ar1", 0x01, XT_REG_GENERAL, 0 },
139 { "ar2", 0x02, XT_REG_GENERAL, 0 },
140 { "ar3", 0x03, XT_REG_GENERAL, 0 },
141 { "ar4", 0x04, XT_REG_GENERAL, 0 },
142 { "ar5", 0x05, XT_REG_GENERAL, 0 },
143 { "ar6", 0x06, XT_REG_GENERAL, 0 },
144 { "ar7", 0x07, XT_REG_GENERAL, 0 },
145 { "ar8", 0x08, XT_REG_GENERAL, 0 },
146 { "ar9", 0x09, XT_REG_GENERAL, 0 },
147 { "ar10", 0x0A, XT_REG_GENERAL, 0 },
148 { "ar11", 0x0B, XT_REG_GENERAL, 0 },
149 { "ar12", 0x0C, XT_REG_GENERAL, 0 },
150 { "ar13", 0x0D, XT_REG_GENERAL, 0 },
151 { "ar14", 0x0E, XT_REG_GENERAL, 0 },
152 { "ar15", 0x0F, XT_REG_GENERAL, 0 },
153 { "ar16", 0x10, XT_REG_GENERAL, 0 },
154 { "ar17", 0x11, XT_REG_GENERAL, 0 },
155 { "ar18", 0x12, XT_REG_GENERAL, 0 },
156 { "ar19", 0x13, XT_REG_GENERAL, 0 },
157 { "ar20", 0x14, XT_REG_GENERAL, 0 },
158 { "ar21", 0x15, XT_REG_GENERAL, 0 },
159 { "ar22", 0x16, XT_REG_GENERAL, 0 },
160 { "ar23", 0x17, XT_REG_GENERAL, 0 },
161 { "ar24", 0x18, XT_REG_GENERAL, 0 },
162 { "ar25", 0x19, XT_REG_GENERAL, 0 },
163 { "ar26", 0x1A, XT_REG_GENERAL, 0 },
164 { "ar27", 0x1B, XT_REG_GENERAL, 0 },
165 { "ar28", 0x1C, XT_REG_GENERAL, 0 },
166 { "ar29", 0x1D, XT_REG_GENERAL, 0 },
167 { "ar30", 0x1E, XT_REG_GENERAL, 0 },
168 { "ar31", 0x1F, XT_REG_GENERAL, 0 },
169 { "ar32", 0x20, XT_REG_GENERAL, 0 },
170 { "ar33", 0x21, XT_REG_GENERAL, 0 },
171 { "ar34", 0x22, XT_REG_GENERAL, 0 },
172 { "ar35", 0x23, XT_REG_GENERAL, 0 },
173 { "ar36", 0x24, XT_REG_GENERAL, 0 },
174 { "ar37", 0x25, XT_REG_GENERAL, 0 },
175 { "ar38", 0x26, XT_REG_GENERAL, 0 },
176 { "ar39", 0x27, XT_REG_GENERAL, 0 },
177 { "ar40", 0x28, XT_REG_GENERAL, 0 },
178 { "ar41", 0x29, XT_REG_GENERAL, 0 },
179 { "ar42", 0x2A, XT_REG_GENERAL, 0 },
180 { "ar43", 0x2B, XT_REG_GENERAL, 0 },
181 { "ar44", 0x2C, XT_REG_GENERAL, 0 },
182 { "ar45", 0x2D, XT_REG_GENERAL, 0 },
183 { "ar46", 0x2E, XT_REG_GENERAL, 0 },
184 { "ar47", 0x2F, XT_REG_GENERAL, 0 },
185 { "ar48", 0x30, XT_REG_GENERAL, 0 },
186 { "ar49", 0x31, XT_REG_GENERAL, 0 },
187 { "ar50", 0x32, XT_REG_GENERAL, 0 },
188 { "ar51", 0x33, XT_REG_GENERAL, 0 },
189 { "ar52", 0x34, XT_REG_GENERAL, 0 },
190 { "ar53", 0x35, XT_REG_GENERAL, 0 },
191 { "ar54", 0x36, XT_REG_GENERAL, 0 },
192 { "ar55", 0x37, XT_REG_GENERAL, 0 },
193 { "ar56", 0x38, XT_REG_GENERAL, 0 },
194 { "ar57", 0x39, XT_REG_GENERAL, 0 },
195 { "ar58", 0x3A, XT_REG_GENERAL, 0 },
196 { "ar59", 0x3B, XT_REG_GENERAL, 0 },
197 { "ar60", 0x3C, XT_REG_GENERAL, 0 },
198 { "ar61", 0x3D, XT_REG_GENERAL, 0 },
199 { "ar62", 0x3E, XT_REG_GENERAL, 0 },
200 { "ar63", 0x3F, XT_REG_GENERAL, 0 },
201 { "lbeg", 0x00, XT_REG_SPECIAL, 0 },
202 { "lend", 0x01, XT_REG_SPECIAL, 0 },
203 { "lcount", 0x02, XT_REG_SPECIAL, 0 },
204 { "sar", 0x03, XT_REG_SPECIAL, 0 },
205 { "windowbase", 0x48, XT_REG_SPECIAL, 0 },
206 { "windowstart", 0x49, XT_REG_SPECIAL, 0 },
207 { "configid0", 0xB0, XT_REG_SPECIAL, 0 },
208 { "configid1", 0xD0, XT_REG_SPECIAL, 0 },
209 { "ps", 0xC6, XT_REG_SPECIAL, 0 }, /* actually EPS[debuglevel] */
210 { "threadptr", 0xE7, XT_REG_USER, 0 },
211 { "br", 0x04, XT_REG_SPECIAL, 0 },
212 { "scompare1", 0x0C, XT_REG_SPECIAL, 0 },
213 { "acclo", 0x10, XT_REG_SPECIAL, 0 },
214 { "acchi", 0x11, XT_REG_SPECIAL, 0 },
215 { "m0", 0x20, XT_REG_SPECIAL, 0 },
216 { "m1", 0x21, XT_REG_SPECIAL, 0 },
217 { "m2", 0x22, XT_REG_SPECIAL, 0 },
218 { "m3", 0x23, XT_REG_SPECIAL, 0 },
219 { "f0", 0x00, XT_REG_FR, XT_REGF_COPROC0 },
220 { "f1", 0x01, XT_REG_FR, XT_REGF_COPROC0 },
221 { "f2", 0x02, XT_REG_FR, XT_REGF_COPROC0 },
222 { "f3", 0x03, XT_REG_FR, XT_REGF_COPROC0 },
223 { "f4", 0x04, XT_REG_FR, XT_REGF_COPROC0 },
224 { "f5", 0x05, XT_REG_FR, XT_REGF_COPROC0 },
225 { "f6", 0x06, XT_REG_FR, XT_REGF_COPROC0 },
226 { "f7", 0x07, XT_REG_FR, XT_REGF_COPROC0 },
227 { "f8", 0x08, XT_REG_FR, XT_REGF_COPROC0 },
228 { "f9", 0x09, XT_REG_FR, XT_REGF_COPROC0 },
229 { "f10", 0x0A, XT_REG_FR, XT_REGF_COPROC0 },
230 { "f11", 0x0B, XT_REG_FR, XT_REGF_COPROC0 },
231 { "f12", 0x0C, XT_REG_FR, XT_REGF_COPROC0 },
232 { "f13", 0x0D, XT_REG_FR, XT_REGF_COPROC0 },
233 { "f14", 0x0E, XT_REG_FR, XT_REGF_COPROC0 },
234 { "f15", 0x0F, XT_REG_FR, XT_REGF_COPROC0 },
235 { "fcr", 0xE8, XT_REG_USER, XT_REGF_COPROC0 },
236 { "fsr", 0xE9, XT_REG_USER, XT_REGF_COPROC0 },
237 { "mmid", 0x59, XT_REG_SPECIAL, XT_REGF_NOREAD },
238 { "ibreakenable", 0x60, XT_REG_SPECIAL, 0 },
239 { "memctl", 0x61, XT_REG_SPECIAL, 0 },
240 { "atomctl", 0x63, XT_REG_SPECIAL, 0 },
241 { "ibreaka0", 0x80, XT_REG_SPECIAL, 0 },
242 { "ibreaka1", 0x81, XT_REG_SPECIAL, 0 },
243 { "dbreaka0", 0x90, XT_REG_SPECIAL, 0 },
244 { "dbreaka1", 0x91, XT_REG_SPECIAL, 0 },
245 { "dbreakc0", 0xA0, XT_REG_SPECIAL, 0 },
246 { "dbreakc1", 0xA1, XT_REG_SPECIAL, 0 },
247 { "epc1", 0xB1, XT_REG_SPECIAL, 0 },
248 { "epc2", 0xB2, XT_REG_SPECIAL, 0 },
249 { "epc3", 0xB3, XT_REG_SPECIAL, 0 },
250 { "epc4", 0xB4, XT_REG_SPECIAL, 0 },
251 { "epc5", 0xB5, XT_REG_SPECIAL, 0 },
252 { "epc6", 0xB6, XT_REG_SPECIAL, 0 },
253 { "epc7", 0xB7, XT_REG_SPECIAL, 0 },
254 { "depc", 0xC0, XT_REG_SPECIAL, 0 },
255 { "eps2", 0xC2, XT_REG_SPECIAL, 0 },
256 { "eps3", 0xC3, XT_REG_SPECIAL, 0 },
257 { "eps4", 0xC4, XT_REG_SPECIAL, 0 },
258 { "eps5", 0xC5, XT_REG_SPECIAL, 0 },
259 { "eps6", 0xC6, XT_REG_SPECIAL, 0 },
260 { "eps7", 0xC7, XT_REG_SPECIAL, 0 },
261 { "excsave1", 0xD1, XT_REG_SPECIAL, 0 },
262 { "excsave2", 0xD2, XT_REG_SPECIAL, 0 },
263 { "excsave3", 0xD3, XT_REG_SPECIAL, 0 },
264 { "excsave4", 0xD4, XT_REG_SPECIAL, 0 },
265 { "excsave5", 0xD5, XT_REG_SPECIAL, 0 },
266 { "excsave6", 0xD6, XT_REG_SPECIAL, 0 },
267 { "excsave7", 0xD7, XT_REG_SPECIAL, 0 },
268 { "cpenable", 0xE0, XT_REG_SPECIAL, 0 },
269 { "interrupt", 0xE2, XT_REG_SPECIAL, 0 },
270 { "intset", 0xE2, XT_REG_SPECIAL, XT_REGF_NOREAD },
271 { "intclear", 0xE3, XT_REG_SPECIAL, XT_REGF_NOREAD },
272 { "intenable", 0xE4, XT_REG_SPECIAL, 0 },
273 { "vecbase", 0xE7, XT_REG_SPECIAL, 0 },
274 { "exccause", 0xE8, XT_REG_SPECIAL, 0 },
275 { "debugcause", 0xE9, XT_REG_SPECIAL, 0 },
276 { "ccount", 0xEA, XT_REG_SPECIAL, 0 },
277 { "prid", 0xEB, XT_REG_SPECIAL, 0 },
278 { "icount", 0xEC, XT_REG_SPECIAL, 0 },
279 { "icountlevel", 0xED, XT_REG_SPECIAL, 0 },
280 { "excvaddr", 0xEE, XT_REG_SPECIAL, 0 },
281 { "ccompare0", 0xF0, XT_REG_SPECIAL, 0 },
282 { "ccompare1", 0xF1, XT_REG_SPECIAL, 0 },
283 { "ccompare2", 0xF2, XT_REG_SPECIAL, 0 },
284 { "misc0", 0xF4, XT_REG_SPECIAL, 0 },
285 { "misc1", 0xF5, XT_REG_SPECIAL, 0 },
286 { "misc2", 0xF6, XT_REG_SPECIAL, 0 },
287 { "misc3", 0xF7, XT_REG_SPECIAL, 0 },
288 { "litbase", 0x05, XT_REG_SPECIAL, 0 },
289 { "ptevaddr", 0x53, XT_REG_SPECIAL, 0 },
290 { "rasid", 0x5A, XT_REG_SPECIAL, 0 },
291 { "itlbcfg", 0x5B, XT_REG_SPECIAL, 0 },
292 { "dtlbcfg", 0x5C, XT_REG_SPECIAL, 0 },
293 { "mepc", 0x6A, XT_REG_SPECIAL, 0 },
294 { "meps", 0x6B, XT_REG_SPECIAL, 0 },
295 { "mesave", 0x6C, XT_REG_SPECIAL, 0 },
296 { "mesr", 0x6D, XT_REG_SPECIAL, 0 },
297 { "mecr", 0x6E, XT_REG_SPECIAL, 0 },
298 { "mevaddr", 0x6F, XT_REG_SPECIAL, 0 },
299 { "a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0 }, /* WARNING: For these registers, regnum points to the */
300 { "a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0 }, /* index of the corresponding ARxregisters, NOT to */
301 { "a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0 }, /* the processor register number! */
302 { "a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0 },
303 { "a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0 },
304 { "a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0 },
305 { "a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0 },
306 { "a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0 },
307 { "a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0 },
308 { "a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0 },
309 { "a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0 },
310 { "a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0 },
311 { "a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0 },
312 { "a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0 },
313 { "a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0 },
314 { "a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0 },
315
316 { "pwrctl", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
317 { "pwrstat", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
318 { "eristat", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
319 { "cs_itctrl", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
320 { "cs_claimset", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
321 { "cs_claimclr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
322 { "cs_lockaccess", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
323 { "cs_lockstatus", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
324 { "cs_authstatus", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
325 { "fault_info", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
326 { "trax_id", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
327 { "trax_ctrl", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
328 { "trax_stat", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
329 { "trax_data", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
330 { "trax_addr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
331 { "trax_pctrigger", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
332 { "trax_pcmatch", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
333 { "trax_delay", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
334 { "trax_memstart", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
335 { "trax_memend", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
336 { "pmg", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
337 { "pmoc", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
338 { "pm0", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
339 { "pm1", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
340 { "pmctrl0", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
341 { "pmctrl1", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
342 { "pmstat0", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
343 { "pmstat1", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
344 { "ocd_id", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
345 { "ocd_dcrclr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
346 { "ocd_dcrset", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
347 { "ocd_dsr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
348 { "ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
349 };
350
351
352 /**
353 * Types of memory used at xtensa target
354 */
355 enum xtensa_mem_region_type {
356 XTENSA_MEM_REG_IROM = 0x0,
357 XTENSA_MEM_REG_IRAM,
358 XTENSA_MEM_REG_DROM,
359 XTENSA_MEM_REG_DRAM,
360 XTENSA_MEM_REG_URAM,
361 XTENSA_MEM_REG_XLMI,
362 XTENSA_MEM_REGS_NUM
363 };
364
365 /**
366 * Gets a config for the specific mem type
367 */
368 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
369 struct xtensa *xtensa,
370 enum xtensa_mem_region_type type)
371 {
372 switch (type) {
373 case XTENSA_MEM_REG_IROM:
374 return &xtensa->core_config->irom;
375 case XTENSA_MEM_REG_IRAM:
376 return &xtensa->core_config->iram;
377 case XTENSA_MEM_REG_DROM:
378 return &xtensa->core_config->drom;
379 case XTENSA_MEM_REG_DRAM:
380 return &xtensa->core_config->dram;
381 case XTENSA_MEM_REG_URAM:
382 return &xtensa->core_config->uram;
383 case XTENSA_MEM_REG_XLMI:
384 return &xtensa->core_config->xlmi;
385 default:
386 return NULL;
387 }
388 }
389
390 /**
391 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
392 * for a given address
393 * Returns NULL if nothing found
394 */
395 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
396 const struct xtensa_local_mem_config *mem,
397 target_addr_t address)
398 {
399 for (unsigned int i = 0; i < mem->count; i++) {
400 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
401 if (address >= region->base && address < (region->base + region->size))
402 return region;
403 }
404 return NULL;
405 }
406
407 /**
408 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
409 * for a given address
410 * Returns NULL if nothing found
411 */
412 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
413 struct xtensa *xtensa,
414 target_addr_t address)
415 {
416 const struct xtensa_local_mem_region_config *result;
417 const struct xtensa_local_mem_config *mcgf;
418 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
419 mcgf = xtensa_get_mem_config(xtensa, mtype);
420 result = xtensa_memory_region_find(mcgf, address);
421 if (result)
422 return result;
423 }
424 return NULL;
425 }
426
427 static int xtensa_core_reg_get(struct reg *reg)
428 {
429 /*We don't need this because we read all registers on halt anyway. */
430 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
431 struct target *target = xtensa->target;
432
433 if (target->state != TARGET_HALTED)
434 return ERROR_TARGET_NOT_HALTED;
435 return ERROR_OK;
436 }
437
438 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
439 {
440 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
441 struct target *target = xtensa->target;
442
443 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
444 if (target->state != TARGET_HALTED)
445 return ERROR_TARGET_NOT_HALTED;
446
447 buf_cpy(buf, reg->value, reg->size);
448 reg->dirty = true;
449 reg->valid = true;
450
451 return ERROR_OK;
452 }
453
454 static const struct reg_arch_type xtensa_reg_type = {
455 .get = xtensa_core_reg_get,
456 .set = xtensa_core_reg_set,
457 };
458
459 const struct reg_arch_type xtensa_user_reg_u32_type = {
460 .get = xtensa_core_reg_get,
461 .set = xtensa_core_reg_set,
462 };
463
464 const struct reg_arch_type xtensa_user_reg_u128_type = {
465 .get = xtensa_core_reg_get,
466 .set = xtensa_core_reg_set,
467 };
468
469 static inline size_t xtensa_insn_size_get(uint32_t insn)
470 {
471 return insn & BIT(3) ? 2 : XT_ISNS_SZ_MAX;
472 }
473
474 /* Convert a register index that's indexed relative to windowbase, to the real address. */
475 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(enum xtensa_reg_id reg_idx, int windowbase)
476 {
477 unsigned int idx;
478 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_AR63) {
479 idx = reg_idx - XT_REG_IDX_AR0;
480 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
481 idx = reg_idx - XT_REG_IDX_A0;
482 } else {
483 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
484 return -1;
485 }
486 return ((idx + windowbase * 4) & 63) + XT_REG_IDX_AR0;
487 }
488
489 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(enum xtensa_reg_id reg_idx, int windowbase)
490 {
491 return xtensa_windowbase_offset_to_canonical(reg_idx, -windowbase);
492 }
493
494 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
495 {
496 struct reg *reg_list = xtensa->core_cache->reg_list;
497 reg_list[reg_idx].dirty = true;
498 }
499
500 static int xtensa_queue_dbg_reg_read(struct xtensa *xtensa, unsigned int reg, uint8_t *data)
501 {
502 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
503
504 if (!xtensa->core_config->trace.enabled &&
505 (reg <= NARADR_MEMADDREND || (reg >= NARADR_PMG && reg <= NARADR_PMSTAT7))) {
506 LOG_ERROR("Can not access %u reg when Trace Port option disabled!", reg);
507 return ERROR_FAIL;
508 }
509 return dm->dbg_ops->queue_reg_read(dm, reg, data);
510 }
511
512 static int xtensa_queue_dbg_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
513 {
514 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
515
516 if (!xtensa->core_config->trace.enabled &&
517 (reg <= NARADR_MEMADDREND || (reg >= NARADR_PMG && reg <= NARADR_PMSTAT7))) {
518 LOG_ERROR("Can not access %u reg when Trace Port option disabled!", reg);
519 return ERROR_FAIL;
520 }
521 return dm->dbg_ops->queue_reg_write(dm, reg, data);
522 }
523
524 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
525 {
526 xtensa_queue_dbg_reg_write(xtensa, NARADR_DIR0EXEC, ins);
527 }
528
529 static bool xtensa_reg_is_readable(enum xtensa_reg_flags flags, xtensa_reg_val_t cpenable)
530 {
531 if (flags & XT_REGF_NOREAD)
532 return false;
533 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
534 return false;
535 return true;
536 }
537
538 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
539 {
540 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
541 return dm->pwr_ops->queue_reg_write(dm, reg, data);
542 }
543
544 static bool xtensa_special_reg_exists(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
545 {
546 /* TODO: array of size XT_NUM_REGS can be used here to map special register ID to
547 * corresponding config option 'enabled' flag */
548 if (reg_idx >= XT_REG_IDX_LBEG && reg_idx <= XT_REG_IDX_LCOUNT)
549 return xtensa->core_config->loop;
550 else if (reg_idx == XT_REG_IDX_BR)
551 return xtensa->core_config->boolean;
552 else if (reg_idx == XT_REG_IDX_LITBASE)
553 return xtensa->core_config->ext_l32r;
554 else if (reg_idx == XT_REG_IDX_SCOMPARE1 || reg_idx == XT_REG_IDX_ATOMCTL)
555 return xtensa->core_config->cond_store;
556 else if (reg_idx >= XT_REG_IDX_ACCLO && reg_idx <= XT_REG_IDX_M3)
557 return xtensa->core_config->mac16;
558 else if (reg_idx == XT_REG_IDX_WINDOWBASE || reg_idx == XT_REG_IDX_WINDOWSTART)
559 return xtensa->core_config->windowed;
560 else if (reg_idx >= XT_REG_IDX_PTEVADDR && reg_idx <= XT_REG_IDX_DTLBCFG)
561 return xtensa->core_config->mmu.enabled;
562 else if (reg_idx == XT_REG_IDX_MMID)
563 return xtensa->core_config->trace.enabled;
564 else if (reg_idx >= XT_REG_IDX_MEPC && reg_idx <= XT_REG_IDX_MEVADDR)
565 return xtensa->core_config->mem_err_check;
566 else if (reg_idx == XT_REG_IDX_CPENABLE)
567 return xtensa->core_config->coproc;
568 else if (reg_idx == XT_REG_IDX_VECBASE)
569 return xtensa->core_config->reloc_vec;
570 else if (reg_idx == XT_REG_IDX_CCOUNT)
571 return xtensa->core_config->tim_irq.enabled;
572 else if (reg_idx >= XT_REG_IDX_CCOMPARE0 && reg_idx <= XT_REG_IDX_CCOMPARE2)
573 return xtensa->core_config->tim_irq.enabled &&
574 (reg_idx - XT_REG_IDX_CCOMPARE0 < xtensa->core_config->tim_irq.comp_num);
575 else if (reg_idx == XT_REG_IDX_PRID)
576 return xtensa->core_config->proc_id;
577 else if (reg_idx >= XT_REG_IDX_MISC0 && reg_idx <= XT_REG_IDX_MISC3)
578 return reg_idx - XT_REG_IDX_MISC0 < xtensa->core_config->miscregs_num;
579 return true;
580 }
581
582 static bool xtensa_user_reg_exists(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
583 {
584 if (reg_idx == XT_REG_IDX_THREADPTR)
585 return xtensa->core_config->threadptr;
586 if (reg_idx == XT_REG_IDX_FCR || reg_idx == XT_REG_IDX_FSR)
587 return xtensa->core_config->fp_coproc;
588 return false;
589 }
590
591 static inline bool xtensa_fp_reg_exists(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
592 {
593 return xtensa->core_config->fp_coproc;
594 }
595
596 static inline bool xtensa_regular_reg_exists(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
597 {
598 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_AR63)
599 return reg_idx - XT_REG_IDX_AR0 < xtensa->core_config->aregs_num;
600 return true;
601 }
602
603 static int xtensa_write_dirty_registers(struct target *target)
604 {
605 struct xtensa *xtensa = target_to_xtensa(target);
606 int res;
607 xtensa_reg_val_t regval, windowbase = 0;
608 bool scratch_reg_dirty = false;
609 struct reg *reg_list = xtensa->core_cache->reg_list;
610
611 LOG_TARGET_DEBUG(target, "start");
612
613 /*We need to write the dirty registers in the cache list back to the processor.
614 *Start by writing the SFR/user registers. */
615 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
616 if (reg_list[i].dirty) {
617 if (xtensa_regs[i].type == XT_REG_SPECIAL ||
618 xtensa_regs[i].type == XT_REG_USER ||
619 xtensa_regs[i].type == XT_REG_FR) {
620 scratch_reg_dirty = true;
621 regval = xtensa_reg_get(target, i);
622 LOG_TARGET_DEBUG(target, "Writing back reg %s val %08" PRIX32,
623 xtensa_regs[i].name,
624 regval);
625 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
626 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
627 if (xtensa_regs[i].type == XT_REG_USER) {
628 if (reg_list[i].exist)
629 xtensa_queue_exec_ins(xtensa,
630 XT_INS_WUR(xtensa_regs[i].reg_num,
631 XT_REG_A3));
632 } else if (xtensa_regs[i].type == XT_REG_FR) {
633 if (reg_list[i].exist)
634 xtensa_queue_exec_ins(xtensa,
635 XT_INS_WFR(xtensa_regs[i].reg_num,
636 XT_REG_A3));
637 } else {/*SFR */
638 if (reg_list[i].exist) {
639 unsigned int reg_num = xtensa_regs[i].reg_num;
640 if (reg_num == XT_PC_REG_NUM_BASE)
641 /* reg number of PC for debug interrupt
642 * depends on NDEBUGLEVEL */
643 reg_num += xtensa->core_config->debug.irq_level;
644
645 xtensa_queue_exec_ins(xtensa,
646 XT_INS_WSR(reg_num, XT_REG_A3));
647 }
648 }
649 reg_list[i].dirty = false;
650 }
651 }
652 }
653 if (scratch_reg_dirty)
654 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
655
656 if (xtensa->core_config->user_regs_num > 0 &&
657 xtensa->core_config->queue_write_dirty_user_regs)
658 xtensa->core_config->queue_write_dirty_user_regs(target);
659
660 if (xtensa->core_config->windowed) {
661 /*Grab the windowbase, we need it. */
662 windowbase = xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE);
663 /*Check if there are problems with both the ARx as well as the corresponding Rx
664 * registers set and dirty. */
665 /*Warn the user if this happens, not much else we can do... */
666 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
667 unsigned int j = xtensa_windowbase_offset_to_canonical(i, windowbase);
668 if (reg_list[i].dirty && reg_list[j].dirty) {
669 if (memcmp(reg_list[i].value, reg_list[j].value,
670 sizeof(xtensa_reg_val_t)) != 0)
671 LOG_WARNING(
672 "Warning: Both A%d as well as the physical register it points to (AR%d) are dirty and differs in value. Results are undefined!",
673 i - XT_REG_IDX_A0,
674 j - XT_REG_IDX_AR0);
675 }
676 }
677 }
678
679 /*Write A0-A16 */
680 for (unsigned int i = 0; i < 16; i++) {
681 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
682 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
683 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
684 xtensa_regs[XT_REG_IDX_A0 + i].name,
685 regval,
686 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
687 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
688 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, i));
689 reg_list[XT_REG_IDX_A0 + i].dirty = false;
690 }
691 }
692
693 if (xtensa->core_config->windowed) {
694 /*Now write AR0-AR63. */
695 for (unsigned int j = 0; j < 64; j += 16) {
696 /*Write the 16 registers we can see */
697 for (unsigned int i = 0; i < 16; i++) {
698 if (i + j < xtensa->core_config->aregs_num) {
699 enum xtensa_reg_id realadr =
700 xtensa_windowbase_offset_to_canonical(XT_REG_IDX_AR0 + i + j,
701 windowbase);
702 /*Write back any dirty un-windowed registers */
703 if (reg_list[realadr].dirty) {
704 regval = xtensa_reg_get(target, realadr);
705 LOG_TARGET_DEBUG(
706 target,
707 "Writing back reg %s value %08" PRIX32 ", num =%i",
708 xtensa_regs[realadr].name,
709 regval,
710 xtensa_regs[realadr].reg_num);
711 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
712 xtensa_queue_exec_ins(xtensa,
713 XT_INS_RSR(XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
714 reg_list[realadr].dirty = false;
715 }
716 }
717 }
718 /*Now rotate the window so we'll see the next 16 registers. The final rotate
719 * will wraparound, */
720 /*leaving us in the state we were. */
721 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(4));
722 }
723 }
724 res = jtag_execute_queue();
725 xtensa_core_status_check(target);
726
727 return res;
728 }
729
730 int xtensa_queue_write_dirty_user_regs_u32(struct target *target)
731 {
732 struct xtensa *xtensa = target_to_xtensa(target);
733 struct reg *reg_list = xtensa->core_cache->reg_list;
734 xtensa_reg_val_t reg_val;
735 bool scratch_reg_dirty = false;
736
737 LOG_TARGET_DEBUG(target, "start");
738
739 /* We need to write the dirty registers in the cache list back to the processor.
740 * Start by writing the SFR/user registers. */
741 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
742 if (!reg_list[XT_USR_REG_START + i].dirty)
743 continue;
744 scratch_reg_dirty = true;
745 reg_val = xtensa_reg_get(target, XT_USR_REG_START + i);
746 LOG_TARGET_DEBUG(target, "Writing back reg %s val %08" PRIX32,
747 xtensa->core_config->user_regs[i].name,
748 reg_val);
749 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, reg_val);
750 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
751 xtensa_queue_exec_ins(xtensa,
752 XT_INS_WUR(xtensa->core_config->user_regs[i].reg_num,
753 XT_REG_A3));
754 reg_list[XT_USR_REG_START + i].dirty = false;
755 }
756 if (scratch_reg_dirty)
757 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
758
759 return ERROR_OK;
760 }
761
762 static inline bool xtensa_is_stopped(struct target *target)
763 {
764 struct xtensa *xtensa = target_to_xtensa(target);
765 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
766 }
767
768 int xtensa_examine(struct target *target)
769 {
770 struct xtensa *xtensa = target_to_xtensa(target);
771 unsigned int cmd = PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP;
772
773 LOG_DEBUG("coreid = %d", target->coreid);
774 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd);
775 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE);
776 xtensa_dm_queue_enable(&xtensa->dbg_mod);
777 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
778 int res = jtag_execute_queue();
779 if (res != ERROR_OK)
780 return res;
781 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
782 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
783 return ERROR_TARGET_FAILURE;
784 }
785 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
786 if (!target_was_examined(target))
787 target_set_examined(target);
788 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
789 return ERROR_OK;
790 }
791
792 int xtensa_wakeup(struct target *target)
793 {
794 struct xtensa *xtensa = target_to_xtensa(target);
795 unsigned int cmd = PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP;
796
797 if (xtensa->reset_asserted)
798 cmd |= PWRCTL_CORERESET;
799 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd);
800 /* TODO: can we join this with the write above? */
801 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE);
802 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
803 return jtag_execute_queue();
804 }
805
806 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
807 {
808 uint32_t dsr_data = 0x00110000;
809 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
810 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
811 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
812
813 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
814 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRSET, set | OCDDCR_ENABLEOCD);
815 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRCLR, clear);
816 xtensa_queue_dbg_reg_write(xtensa, NARADR_DSR, dsr_data);
817 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
818 return jtag_execute_queue();
819 }
820
821 int xtensa_smpbreak_set(struct target *target, uint32_t set)
822 {
823 struct xtensa *xtensa = target_to_xtensa(target);
824 int res = ERROR_OK;
825
826 xtensa->smp_break = set;
827 if (target_was_examined(target))
828 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
829 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
830 return res;
831 }
832
833 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
834 {
835 uint8_t dcr_buf[sizeof(uint32_t)];
836
837 xtensa_queue_dbg_reg_read(xtensa, NARADR_DCRSET, dcr_buf);
838 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
839 int res = jtag_execute_queue();
840 *val = buf_get_u32(dcr_buf, 0, 32);
841
842 return res;
843 }
844
845 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
846 {
847 struct xtensa *xtensa = target_to_xtensa(target);
848 *val = xtensa->smp_break;
849 return ERROR_OK;
850 }
851
852 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
853 {
854 return buf_get_u32(reg->value, 0, 32);
855 }
856
857 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
858 {
859 buf_set_u32(reg->value, 0, 32, value);
860 reg->dirty = true;
861 }
862
863 int xtensa_core_status_check(struct target *target)
864 {
865 struct xtensa *xtensa = target_to_xtensa(target);
866 int res, needclear = 0;
867
868 xtensa_dm_core_status_read(&xtensa->dbg_mod);
869 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
870 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
871 if (dsr & OCDDSR_EXECBUSY) {
872 if (!xtensa->suppress_dsr_errors)
873 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
874 needclear = 1;
875 }
876 if (dsr & OCDDSR_EXECEXCEPTION) {
877 if (!xtensa->suppress_dsr_errors)
878 LOG_TARGET_ERROR(target,
879 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
880 dsr);
881 needclear = 1;
882 }
883 if (dsr & OCDDSR_EXECOVERRUN) {
884 if (!xtensa->suppress_dsr_errors)
885 LOG_TARGET_ERROR(target,
886 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
887 dsr);
888 needclear = 1;
889 }
890 if (needclear) {
891 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
892 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
893 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
894 LOG_TARGET_ERROR(target, "clearing DSR failed!");
895 return xtensa->suppress_dsr_errors ? ERROR_OK : ERROR_FAIL;
896 }
897 return ERROR_OK;
898 }
899
900 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
901 {
902 struct xtensa *xtensa = target_to_xtensa(target);
903 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
904 assert(reg_id < xtensa->core_cache->num_regs && "Attempt to access non-existing reg!");
905 return xtensa_reg_get_value(reg);
906 }
907
908 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
909 {
910 struct xtensa *xtensa = target_to_xtensa(target);
911 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
912 assert(reg_id < xtensa->core_cache->num_regs && "Attempt to access non-existing reg!");
913 if (xtensa_reg_get_value(reg) == value)
914 return;
915 xtensa_reg_set_value(reg, value);
916 }
917
918 int xtensa_assert_reset(struct target *target)
919 {
920 struct xtensa *xtensa = target_to_xtensa(target);
921
922 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
923 target->state = TARGET_RESET;
924 xtensa_queue_pwr_reg_write(xtensa,
925 DMREG_PWRCTL,
926 PWRCTL_JTAGDEBUGUSE | PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP |
927 PWRCTL_CORERESET);
928 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
929 int res = jtag_execute_queue();
930 if (res != ERROR_OK)
931 return res;
932 xtensa->reset_asserted = true;
933 return res;
934 }
935
936 int xtensa_deassert_reset(struct target *target)
937 {
938 struct xtensa *xtensa = target_to_xtensa(target);
939
940 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
941 if (target->reset_halt)
942 xtensa_queue_dbg_reg_write(xtensa,
943 NARADR_DCRSET,
944 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
945 xtensa_queue_pwr_reg_write(xtensa,
946 DMREG_PWRCTL,
947 PWRCTL_JTAGDEBUGUSE | PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP);
948 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
949 int res = jtag_execute_queue();
950 if (res != ERROR_OK)
951 return res;
952 target->state = TARGET_RUNNING;
953 xtensa->reset_asserted = false;
954 return res;
955 }
956
957 int xtensa_fetch_all_regs(struct target *target)
958 {
959 struct xtensa *xtensa = target_to_xtensa(target);
960 struct reg *reg_list = xtensa->core_cache->reg_list;
961 xtensa_reg_val_t cpenable = 0, windowbase = 0;
962 uint8_t regvals[XT_NUM_REGS][sizeof(xtensa_reg_val_t)];
963 uint8_t dsrs[XT_NUM_REGS][sizeof(xtensa_dsr_t)];
964 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
965
966 LOG_TARGET_DEBUG(target, "start");
967
968 /* Assume the CPU has just halted. We now want to fill the register cache with all the
969 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
970 * in one go, then sort everything out from the regvals variable. */
971
972 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
973 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
974 /*Grab the 16 registers we can see */
975 for (unsigned int i = 0; i < 16; i++) {
976 if (i + j < xtensa->core_config->aregs_num) {
977 xtensa_queue_exec_ins(xtensa,
978 XT_INS_WSR(XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
979 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[XT_REG_IDX_AR0 + i + j]);
980 if (debug_dsrs)
981 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR, dsrs[XT_REG_IDX_AR0 + i + j]);
982 }
983 }
984 if (xtensa->core_config->windowed) {
985 /* Now rotate the window so we'll see the next 16 registers. The final rotate
986 * will wraparound, */
987 /* leaving us in the state we were. */
988 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(4));
989 }
990 }
991 if (xtensa->core_config->coproc) {
992 /* As the very first thing after AREGS, go grab the CPENABLE registers. It indicates
993 * if we can also grab the FP */
994 /* (and theoretically other coprocessor) registers, or if this is a bad thing to do.*/
995 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
996 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(XT_SR_DDR, XT_REG_A3));
997 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[XT_REG_IDX_CPENABLE]);
998 }
999 int res = jtag_execute_queue();
1000 if (res != ERROR_OK) {
1001 LOG_ERROR("Failed to read ARs (%d)!", res);
1002 return res;
1003 }
1004 xtensa_core_status_check(target);
1005
1006 if (xtensa->core_config->coproc)
1007 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE], 0, 32);
1008 /* We're now free to use any of A0-A15 as scratch registers
1009 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1010 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
1011 if (xtensa_reg_is_readable(xtensa_regs[i].flags, cpenable) && reg_list[i].exist &&
1012 (xtensa_regs[i].type == XT_REG_SPECIAL ||
1013 xtensa_regs[i].type == XT_REG_USER || xtensa_regs[i].type == XT_REG_FR)) {
1014 if (xtensa_regs[i].type == XT_REG_USER) {
1015 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa_regs[i].reg_num, XT_REG_A3));
1016 } else if (xtensa_regs[i].type == XT_REG_FR) {
1017 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa_regs[i].reg_num, XT_REG_A3));
1018 } else { /*SFR */
1019 unsigned int reg_num = xtensa_regs[i].reg_num;
1020 if (reg_num == XT_PC_REG_NUM_BASE) {
1021 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1022 reg_num += xtensa->core_config->debug.irq_level;
1023 }
1024 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(reg_num, XT_REG_A3));
1025 }
1026 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(XT_SR_DDR, XT_REG_A3));
1027 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[i]);
1028 if (debug_dsrs)
1029 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR, dsrs[i]);
1030 }
1031 }
1032 /* Ok, send the whole mess to the CPU. */
1033 res = jtag_execute_queue();
1034 if (res != ERROR_OK) {
1035 LOG_ERROR("Failed to fetch AR regs!");
1036 return res;
1037 }
1038 xtensa_core_status_check(target);
1039
1040 if (debug_dsrs) {
1041 /* DSR checking: follows order in which registers are requested. */
1042 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
1043 if (xtensa_reg_is_readable(xtensa_regs[i].flags, cpenable) && reg_list[i].exist &&
1044 (xtensa_regs[i].type == XT_REG_SPECIAL || xtensa_regs[i].type == XT_REG_USER ||
1045 xtensa_regs[i].type == XT_REG_FR)) {
1046 if (buf_get_u32(dsrs[i], 0, 32) & OCDDSR_EXECEXCEPTION) {
1047 LOG_ERROR("Exception reading %s!", xtensa_regs[i].name);
1048 return ERROR_FAIL;
1049 }
1050 }
1051 }
1052 }
1053
1054 if (xtensa->core_config->user_regs_num > 0 && xtensa->core_config->fetch_user_regs) {
1055 res = xtensa->core_config->fetch_user_regs(target);
1056 if (res != ERROR_OK)
1057 return res;
1058 }
1059
1060 if (xtensa->core_config->windowed) {
1061 /* We need the windowbase to decode the general addresses. */
1062 windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE], 0, 32);
1063 }
1064 /* Decode the result and update the cache. */
1065 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
1066 if (xtensa_reg_is_readable(xtensa_regs[i].flags, cpenable) && reg_list[i].exist) {
1067 if (xtensa_regs[i].type == XT_REG_GENERAL) {
1068 /* TODO: add support for non-windowed configs */
1069 assert(
1070 xtensa->core_config->windowed &&
1071 "Regs fetch is not supported for non-windowed configs!");
1072 /* The 64-value general register set is read from (windowbase) on down.
1073 * We need to get the real register address by subtracting windowbase and
1074 * wrapping around. */
1075 int realadr = xtensa_canonical_to_windowbase_offset(i, windowbase);
1076 buf_cpy(regvals[realadr], reg_list[i].value, reg_list[i].size);
1077 } else if (xtensa_regs[i].type == XT_REG_RELGEN) {
1078 buf_cpy(regvals[xtensa_regs[i].reg_num], reg_list[i].value, reg_list[i].size);
1079 } else {
1080 buf_cpy(regvals[i], reg_list[i].value, reg_list[i].size);
1081 }
1082 reg_list[i].valid = true;
1083 } else {
1084 reg_list[i].valid = false;
1085 }
1086 }
1087 /* We have used A3 as a scratch register and we will need to write that back. */
1088 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1089 xtensa->regs_fetched = true;
1090
1091 return ERROR_OK;
1092 }
1093
1094 int xtensa_fetch_user_regs_u32(struct target *target)
1095 {
1096 struct xtensa *xtensa = target_to_xtensa(target);
1097 struct reg *reg_list = xtensa->core_cache->reg_list;
1098 xtensa_reg_val_t cpenable = 0;
1099 uint8_t regvals[XT_USER_REGS_NUM_MAX][sizeof(xtensa_reg_val_t)];
1100 uint8_t dsrs[XT_USER_REGS_NUM_MAX][sizeof(xtensa_dsr_t)];
1101 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1102
1103 assert(xtensa->core_config->user_regs_num < XT_USER_REGS_NUM_MAX && "Too many user regs configured!");
1104 if (xtensa->core_config->coproc)
1105 cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
1106
1107 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
1108 if (!xtensa_reg_is_readable(xtensa->core_config->user_regs[i].flags, cpenable))
1109 continue;
1110 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa->core_config->user_regs[i].reg_num, XT_REG_A3));
1111 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(XT_SR_DDR, XT_REG_A3));
1112 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[i]);
1113 if (debug_dsrs)
1114 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR, dsrs[i]);
1115 }
1116 /* Ok, send the whole mess to the CPU. */
1117 int res = jtag_execute_queue();
1118 if (res != ERROR_OK) {
1119 LOG_ERROR("Failed to fetch AR regs!");
1120 return res;
1121 }
1122 xtensa_core_status_check(target);
1123
1124 if (debug_dsrs) {
1125 /* DSR checking: follows order in which registers are requested. */
1126 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
1127 if (!xtensa_reg_is_readable(xtensa->core_config->user_regs[i].flags, cpenable))
1128 continue;
1129 if (buf_get_u32(dsrs[i], 0, 32) & OCDDSR_EXECEXCEPTION) {
1130 LOG_ERROR("Exception reading %s!", xtensa->core_config->user_regs[i].name);
1131 return ERROR_FAIL;
1132 }
1133 }
1134 }
1135
1136 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
1137 if (xtensa_reg_is_readable(xtensa->core_config->user_regs[i].flags, cpenable)) {
1138 buf_cpy(regvals[i], reg_list[XT_USR_REG_START + i].value, reg_list[XT_USR_REG_START + i].size);
1139 reg_list[XT_USR_REG_START + i].valid = true;
1140 } else {
1141 reg_list[XT_USR_REG_START + i].valid = false;
1142 }
1143 }
1144
1145 /* We have used A3 as a scratch register and we will need to write that back. */
1146 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1147 return ERROR_OK;
1148 }
1149
1150 int xtensa_get_gdb_reg_list(struct target *target,
1151 struct reg **reg_list[],
1152 int *reg_list_size,
1153 enum target_register_class reg_class)
1154 {
1155 struct xtensa *xtensa = target_to_xtensa(target);
1156 unsigned int num_regs = xtensa->core_config->gdb_general_regs_num;
1157
1158 if (reg_class == REG_CLASS_ALL)
1159 num_regs = xtensa->regs_num;
1160
1161 LOG_DEBUG("reg_class=%i, num_regs=%d", reg_class, num_regs);
1162
1163 *reg_list = malloc(num_regs * sizeof(struct reg *));
1164 if (!*reg_list)
1165 return ERROR_FAIL;
1166
1167 for (unsigned int k = 0; k < num_regs; k++) {
1168 unsigned int reg_id = xtensa->core_config->gdb_regs_mapping[k];
1169 (*reg_list)[k] = &xtensa->core_cache->reg_list[reg_id];
1170 }
1171
1172 *reg_list_size = num_regs;
1173
1174 return ERROR_OK;
1175 }
1176
1177 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1178 {
1179 struct xtensa *xtensa = target_to_xtensa(target);
1180 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1181 xtensa->core_config->mmu.dtlb_entries_count > 0;
1182 return ERROR_OK;
1183 }
1184
1185 int xtensa_halt(struct target *target)
1186 {
1187 struct xtensa *xtensa = target_to_xtensa(target);
1188
1189 LOG_TARGET_DEBUG(target, "start");
1190 if (target->state == TARGET_HALTED) {
1191 LOG_TARGET_DEBUG(target, "target was already halted");
1192 return ERROR_OK;
1193 }
1194 /* First we have to read dsr and check if the target stopped */
1195 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1196 if (res != ERROR_OK) {
1197 LOG_TARGET_ERROR(target, "Failed to read core status!");
1198 return res;
1199 }
1200 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1201 if (!xtensa_is_stopped(target)) {
1202 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1203 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1204 res = jtag_execute_queue();
1205 if (res != ERROR_OK)
1206 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1207 }
1208
1209 return res;
1210 }
1211
1212 int xtensa_prepare_resume(struct target *target,
1213 int current,
1214 target_addr_t address,
1215 int handle_breakpoints,
1216 int debug_execution)
1217 {
1218 struct xtensa *xtensa = target_to_xtensa(target);
1219 uint32_t bpena = 0;
1220
1221 LOG_TARGET_DEBUG(target,
1222 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1223 current,
1224 address,
1225 handle_breakpoints,
1226 debug_execution);
1227
1228 if (target->state != TARGET_HALTED) {
1229 LOG_TARGET_WARNING(target, "target not halted");
1230 return ERROR_TARGET_NOT_HALTED;
1231 }
1232
1233 if (address && !current) {
1234 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1235 } else {
1236 xtensa_reg_val_t cause = xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1237 if (cause & DEBUGCAUSE_DB) {
1238 /* We stopped due to a watchpoint. We can't just resume executing the
1239 * instruction again because */
1240 /* that would trigger the watchpoint again. To fix this, we single-step,
1241 * which ignores watchpoints. */
1242 xtensa_do_step(target, current, address, handle_breakpoints);
1243 }
1244 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
1245 /* We stopped due to a break instruction. We can't just resume executing the
1246 * instruction again because */
1247 /* that would trigger the break again. To fix this, we single-step, which
1248 * ignores break. */
1249 xtensa_do_step(target, current, address, handle_breakpoints);
1250 }
1251 }
1252
1253 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1254 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1255 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1256 if (xtensa->hw_brps[slot]) {
1257 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1258 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1259 bpena |= BIT(slot);
1260 }
1261 }
1262 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1263
1264 /* Here we write all registers to the targets */
1265 int res = xtensa_write_dirty_registers(target);
1266 if (res != ERROR_OK)
1267 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1268 return res;
1269 }
1270
1271 int xtensa_do_resume(struct target *target)
1272 {
1273 struct xtensa *xtensa = target_to_xtensa(target);
1274
1275 LOG_TARGET_DEBUG(target, "start");
1276
1277 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO);
1278 int res = jtag_execute_queue();
1279 if (res != ERROR_OK) {
1280 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1281 return res;
1282 }
1283 xtensa_core_status_check(target);
1284 return ERROR_OK;
1285 }
1286
1287 int xtensa_resume(struct target *target,
1288 int current,
1289 target_addr_t address,
1290 int handle_breakpoints,
1291 int debug_execution)
1292 {
1293 LOG_TARGET_DEBUG(target, "start");
1294 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1295 if (res != ERROR_OK) {
1296 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1297 return res;
1298 }
1299 res = xtensa_do_resume(target);
1300 if (res != ERROR_OK) {
1301 LOG_TARGET_ERROR(target, "Failed to resume!");
1302 return res;
1303 }
1304
1305 target->debug_reason = DBG_REASON_NOTHALTED;
1306 if (!debug_execution)
1307 target->state = TARGET_RUNNING;
1308 else
1309 target->state = TARGET_DEBUG_RUNNING;
1310
1311 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1312
1313 return ERROR_OK;
1314 }
1315
1316 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1317 {
1318 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1319 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1320 if (err != ERROR_OK)
1321 return false;
1322
1323 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1324 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK;
1325 if (masked == XT_INS_L32E(0, 0, 0) || masked == XT_INS_S32E(0, 0, 0))
1326 return true;
1327
1328 masked = insn & XT_INS_RFWO_RFWU_MASK;
1329 if (masked == XT_INS_RFWO || masked == XT_INS_RFWU)
1330 return true;
1331
1332 return false;
1333 }
1334
1335 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1336 {
1337 struct xtensa *xtensa = target_to_xtensa(target);
1338 int res;
1339 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1340 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1341 xtensa_reg_val_t icountlvl, cause;
1342 xtensa_reg_val_t oldps, newps, oldpc, cur_pc;
1343
1344 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1345 current, address, handle_breakpoints);
1346
1347 if (target->state != TARGET_HALTED) {
1348 LOG_TARGET_WARNING(target, "target not halted");
1349 return ERROR_TARGET_NOT_HALTED;
1350 }
1351
1352 if (xtensa->core_config->debug.icount_sz != 32) {
1353 LOG_TARGET_WARNING(target, "stepping for ICOUNT less then 32 bits is not implemented!");
1354 return ERROR_FAIL;
1355 }
1356
1357 /* Save old ps/pc */
1358 oldps = xtensa_reg_get(target, XT_REG_IDX_PS);
1359 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1360
1361 cause = xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1362 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1363 oldps,
1364 oldpc,
1365 cause,
1366 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1367 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1368 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1369 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1370 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0); /* so we don't recurse into the same routine */
1371 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1372 /* pretend that we have stepped */
1373 if (cause & DEBUGCAUSE_BI)
1374 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1375 else
1376 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1377 return ERROR_OK;
1378 }
1379
1380 /* Xtensa has an ICOUNTLEVEL register which sets the maximum interrupt level at which the
1381 * instructions are to be counted while stepping.
1382 * For example, if we need to step by 2 instructions, and an interrupt occurs inbetween,
1383 * the processor will execute the interrupt, return, and halt after the 2nd instruction.
1384 * However, sometimes we don't want the interrupt handlers to be executed at all, while
1385 * stepping through the code. In this case (XT_STEPPING_ISR_OFF), PS.INTLEVEL can be raised
1386 * to only allow Debug and NMI interrupts.
1387 */
1388 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1389 if (!xtensa->core_config->high_irq.enabled) {
1390 LOG_TARGET_WARNING(
1391 target,
1392 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1393 return ERROR_FAIL;
1394 }
1395 /* Mask all interrupts below Debug, i.e. PS.INTLEVEL = DEBUGLEVEL - 1 */
1396 xtensa_reg_val_t temp_ps = (oldps & ~0xF) | (xtensa->core_config->debug.irq_level - 1);
1397 xtensa_reg_set(target, XT_REG_IDX_PS, temp_ps);
1398 }
1399 /* Regardless of ISRs masking mode we need to count instructions at any CINTLEVEL during step.
1400 So set `icountlvl` to DEBUGLEVEL.
1401 If ISRs are masked they are disabled in PS (see above), so having `icountlvl` set to DEBUGLEVEL
1402 will allow to step through any type of the code, e.g. 'high int level' ISR.
1403 If ISRs are not masked With `icountlvl` set to DEBUGLEVEL, we can step into any ISR
1404 which can happen (enabled in PS).
1405 */
1406 icountlvl = xtensa->core_config->debug.irq_level;
1407
1408 if (cause & DEBUGCAUSE_DB) {
1409 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1410 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1411 * re-enable the watchpoint. */
1412 LOG_TARGET_DEBUG(
1413 target,
1414 "Single-stepping to get past instruction that triggered the watchpoint...");
1415 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0); /*so we don't recurse into
1416 * the same routine */
1417 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1418 /*Save all DBREAKCx registers and set to 0 to disable watchpoints */
1419 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1420 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1421 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1422 }
1423 }
1424
1425 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1426 /* handle normal SW breakpoint */
1427 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0); /*so we don't recurse into
1428 * the same routine */
1429 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1430 }
1431 do {
1432 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1433 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1434
1435 /* Now ICOUNT is set, we can resume as if we were going to run */
1436 res = xtensa_prepare_resume(target, current, address, 0, 0);
1437 if (res != ERROR_OK) {
1438 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1439 return res;
1440 }
1441 res = xtensa_do_resume(target);
1442 if (res != ERROR_OK) {
1443 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1444 return res;
1445 }
1446
1447 /* Wait for stepping to complete */
1448 long long start = timeval_ms();
1449 while (timeval_ms() < start + 500) {
1450 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1451 *until stepping is complete. */
1452 usleep(1000);
1453 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1454 if (res != ERROR_OK) {
1455 LOG_TARGET_ERROR(target, "Failed to read core status!");
1456 return res;
1457 }
1458 if (xtensa_is_stopped(target))
1459 break;
1460 usleep(1000);
1461 }
1462 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1463 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1464 if (!xtensa_is_stopped(target)) {
1465 LOG_TARGET_WARNING(
1466 target,
1467 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1468 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1469 target->debug_reason = DBG_REASON_NOTHALTED;
1470 target->state = TARGET_RUNNING;
1471 return ERROR_FAIL;
1472 }
1473 target->debug_reason = DBG_REASON_SINGLESTEP;
1474 target->state = TARGET_HALTED;
1475
1476 xtensa_fetch_all_regs(target);
1477
1478 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1479
1480 LOG_TARGET_DEBUG(target,
1481 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1482 xtensa_reg_get(target, XT_REG_IDX_PS),
1483 cur_pc,
1484 xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE),
1485 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1486
1487 /* Do not step into WindowOverflow if ISRs are masked.
1488 If we stop in WindowOverflow at breakpoint with masked ISRs and
1489 try to do a step it will get us out of that handler */
1490 if (xtensa->core_config->windowed &&
1491 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1492 xtensa_pc_in_winexc(target, cur_pc)) {
1493 /* isrmask = on, need to step out of the window exception handler */
1494 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1495 oldpc = cur_pc;
1496 address = oldpc + 3;
1497 continue;
1498 }
1499
1500 if (oldpc == cur_pc)
1501 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1502 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1503 else
1504 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1505 break;
1506 } while (true);
1507 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1508
1509 if (cause & DEBUGCAUSE_DB) {
1510 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1511 /* Restore the DBREAKCx registers */
1512 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1513 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1514 }
1515
1516 /* Restore int level */
1517 /* TODO: Theoretically, this can mess up stepping over an instruction that modifies
1518 * ps.intlevel by itself. TODO: Look into this. */
1519 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1520 newps = xtensa_reg_get(target, XT_REG_IDX_PS);
1521 newps = (newps & ~0xF) | (oldps & 0xf);
1522 xtensa_reg_set(target, XT_REG_IDX_PS, newps);
1523 }
1524
1525 /* write ICOUNTLEVEL back to zero */
1526 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1527 /* TODO: can we skip writing dirty registers and re-fetching them? */
1528 res = xtensa_write_dirty_registers(target);
1529 xtensa_fetch_all_regs(target);
1530 return res;
1531 }
1532
1533 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1534 {
1535 return xtensa_do_step(target, current, address, handle_breakpoints);
1536 }
1537
1538 /**
1539 * Returns true if two ranges are overlapping
1540 */
1541 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1542 target_addr_t r1_end,
1543 target_addr_t r2_start,
1544 target_addr_t r2_end)
1545 {
1546 if ((r2_start >= r1_start) && (r2_start < r1_end))
1547 return true; /* r2_start is in r1 region */
1548 if ((r2_end > r1_start) && (r2_end <= r1_end))
1549 return true; /* r2_end is in r1 region */
1550 return false;
1551 }
1552
1553 /**
1554 * Returns a size of overlapped region of two ranges.
1555 */
1556 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1557 target_addr_t r1_end,
1558 target_addr_t r2_start,
1559 target_addr_t r2_end)
1560 {
1561 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1562 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1563 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1564 return ov_end - ov_start;
1565 }
1566 return 0;
1567 }
1568
1569 /**
1570 * Check if the address gets to memory regions, and it's access mode
1571 */
1572 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1573 {
1574 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1575 target_addr_t adr_end = address + size; /* region end */
1576 target_addr_t overlap_size;
1577 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1578
1579 while (adr_pos < adr_end) {
1580 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1581 if (!cm) /* address is not belong to anything */
1582 return false;
1583 if ((cm->access & access) != access) /* access check */
1584 return false;
1585 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1586 assert(overlap_size != 0);
1587 adr_pos += overlap_size;
1588 }
1589 return true;
1590 }
1591
1592 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1593 {
1594 struct xtensa *xtensa = target_to_xtensa(target);
1595 /* We are going to read memory in 32-bit increments. This may not be what the calling
1596 * function expects, so we may need to allocate a temp buffer and read into that first. */
1597 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1598 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1599 target_addr_t adr = addrstart_al;
1600 uint8_t *albuff;
1601
1602 if (target->state != TARGET_HALTED) {
1603 LOG_TARGET_WARNING(target, "target not halted");
1604 return ERROR_TARGET_NOT_HALTED;
1605 }
1606
1607 if (!xtensa->permissive_mode) {
1608 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1609 XT_MEM_ACCESS_READ)) {
1610 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1611 return ERROR_FAIL;
1612 }
1613 }
1614
1615 if (addrstart_al == address && addrend_al == address + (size * count)) {
1616 albuff = buffer;
1617 } else {
1618 albuff = malloc(addrend_al - addrstart_al);
1619 if (!albuff) {
1620 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1621 addrend_al - addrstart_al);
1622 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1623 }
1624 }
1625
1626 /* We're going to use A3 here */
1627 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1628 /* Write start address to A3 */
1629 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1630 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
1631 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1632 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1633 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(XT_REG_A3));
1634 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, &albuff[i]);
1635 }
1636 int res = jtag_execute_queue();
1637 if (res == ERROR_OK)
1638 res = xtensa_core_status_check(target);
1639 if (res != ERROR_OK)
1640 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address " TARGET_ADDR_FMT,
1641 count * size, address);
1642
1643 if (albuff != buffer) {
1644 memcpy(buffer, albuff + (address & 3), (size * count));
1645 free(albuff);
1646 }
1647
1648 return res;
1649 }
1650
1651 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1652 {
1653 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1654 return xtensa_read_memory(target, address, 1, count, buffer);
1655 }
1656
1657 int xtensa_write_memory(struct target *target,
1658 target_addr_t address,
1659 uint32_t size,
1660 uint32_t count,
1661 const uint8_t *buffer)
1662 {
1663 /* This memory write function can get thrown nigh everything into it, from
1664 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1665 * accept anything but aligned uint32 writes, though. That is why we convert
1666 * everything into that. */
1667 struct xtensa *xtensa = target_to_xtensa(target);
1668 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1669 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1670 target_addr_t adr = addrstart_al;
1671 int res;
1672 uint8_t *albuff;
1673
1674 if (target->state != TARGET_HALTED) {
1675 LOG_TARGET_WARNING(target, "target not halted");
1676 return ERROR_TARGET_NOT_HALTED;
1677 }
1678
1679 if (!xtensa->permissive_mode) {
1680 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
1681 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1682 return ERROR_FAIL;
1683 }
1684 }
1685
1686 if (size == 0 || count == 0 || !buffer)
1687 return ERROR_COMMAND_SYNTAX_ERROR;
1688
1689 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1690 if (addrstart_al == address && addrend_al == address + (size * count)) {
1691 /* We discard the const here because albuff can also be non-const */
1692 albuff = (uint8_t *)buffer;
1693 } else {
1694 albuff = malloc(addrend_al - addrstart_al);
1695 if (!albuff) {
1696 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1697 addrend_al - addrstart_al);
1698 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1699 }
1700 }
1701
1702 /* We're going to use A3 here */
1703 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1704
1705 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1706 if (albuff != buffer) {
1707 /* See if we need to read the first and/or last word. */
1708 if (address & 3) {
1709 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1710 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
1711 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(XT_REG_A3));
1712 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, &albuff[0]);
1713 }
1714 if ((address + (size * count)) & 3) {
1715 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrend_al - 4);
1716 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
1717 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(XT_REG_A3));
1718 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR,
1719 &albuff[addrend_al - addrstart_al - 4]);
1720 }
1721 /* Grab bytes */
1722 res = jtag_execute_queue();
1723 if (res != ERROR_OK) {
1724 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1725 if (albuff != buffer)
1726 free(albuff);
1727 return res;
1728 }
1729 xtensa_core_status_check(target);
1730 /* Copy data to be written into the aligned buffer */
1731 memcpy(&albuff[address & 3], buffer, size * count);
1732 /* Now we can write albuff in aligned uint32s. */
1733 }
1734
1735 /* Write start address to A3 */
1736 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1737 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
1738 /* Write the aligned buffer */
1739 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1740 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, buf_get_u32(&albuff[i], 0, 32));
1741 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(XT_REG_A3));
1742 }
1743 res = jtag_execute_queue();
1744 if (res == ERROR_OK)
1745 res = xtensa_core_status_check(target);
1746 if (res != ERROR_OK)
1747 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address " TARGET_ADDR_FMT, count * size, address);
1748 if (albuff != buffer)
1749 free(albuff);
1750
1751 return res;
1752 }
1753
1754 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
1755 {
1756 /* xtensa_write_memory can handle everything. Just pass on to that. */
1757 return xtensa_write_memory(target, address, 1, count, buffer);
1758 }
1759
1760 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
1761 {
1762 LOG_WARNING("not implemented yet");
1763 return ERROR_FAIL;
1764 }
1765
1766 int xtensa_poll(struct target *target)
1767 {
1768 struct xtensa *xtensa = target_to_xtensa(target);
1769
1770 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET | PWRSTAT_COREWASRESET);
1771 if (res != ERROR_OK)
1772 return res;
1773
1774 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
1775 LOG_TARGET_INFO(target, "Debug controller was reset.");
1776 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
1777 if (res != ERROR_OK)
1778 return res;
1779 }
1780 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
1781 LOG_TARGET_INFO(target, "Core was reset.");
1782 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
1783 /* Enable JTAG, set reset if needed */
1784 res = xtensa_wakeup(target);
1785 if (res != ERROR_OK)
1786 return res;
1787
1788 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1789 if (res != ERROR_OK)
1790 return res;
1791 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET) {
1792 /* if RESET state is persitent */
1793 target->state = TARGET_RESET;
1794 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
1795 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
1796 xtensa->dbg_mod.core_status.dsr,
1797 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
1798 target->state = TARGET_UNKNOWN;
1799 if (xtensa->come_online_probes_num == 0)
1800 target->examined = false;
1801 else
1802 xtensa->come_online_probes_num--;
1803 } else if (xtensa_is_stopped(target)) {
1804 if (target->state != TARGET_HALTED) {
1805 enum target_state oldstate = target->state;
1806 target->state = TARGET_HALTED;
1807 /* Examine why the target has been halted */
1808 target->debug_reason = DBG_REASON_DBGRQ;
1809 xtensa_fetch_all_regs(target);
1810 /* When setting debug reason DEBUGCAUSE events have the following
1811 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
1812 /* Watchpoint and breakpoint events at the same time results in special
1813 * debug reason: DBG_REASON_WPTANDBKPT. */
1814 xtensa_reg_val_t halt_cause = xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1815 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
1816 if (halt_cause & DEBUGCAUSE_IC)
1817 target->debug_reason = DBG_REASON_SINGLESTEP;
1818 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
1819 if (halt_cause & DEBUGCAUSE_DB)
1820 target->debug_reason = DBG_REASON_WPTANDBKPT;
1821 else
1822 target->debug_reason = DBG_REASON_BREAKPOINT;
1823 } else if (halt_cause & DEBUGCAUSE_DB) {
1824 target->debug_reason = DBG_REASON_WATCHPOINT;
1825 }
1826 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIX32 ", debug_reason=%08x, oldstate=%08x",
1827 xtensa_reg_get(target, XT_REG_IDX_PC),
1828 target->debug_reason,
1829 oldstate);
1830 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
1831 halt_cause,
1832 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
1833 xtensa->dbg_mod.core_status.dsr);
1834 LOG_TARGET_INFO(target, "Target halted, PC=0x%08" PRIX32 ", debug_reason=%08x",
1835 xtensa_reg_get(target, XT_REG_IDX_PC), target->debug_reason);
1836 xtensa_dm_core_status_clear(
1837 &xtensa->dbg_mod,
1838 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
1839 OCDDSR_DEBUGINTTRAX |
1840 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
1841 }
1842 } else {
1843 target->debug_reason = DBG_REASON_NOTHALTED;
1844 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
1845 target->state = TARGET_RUNNING;
1846 target->debug_reason = DBG_REASON_NOTHALTED;
1847 }
1848 }
1849 if (xtensa->trace_active) {
1850 /* Detect if tracing was active but has stopped. */
1851 struct xtensa_trace_status trace_status;
1852 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
1853 if (res == ERROR_OK) {
1854 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
1855 LOG_INFO("Detected end of trace.");
1856 if (trace_status.stat & TRAXSTAT_PCMTG)
1857 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
1858 if (trace_status.stat & TRAXSTAT_PTITG)
1859 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
1860 if (trace_status.stat & TRAXSTAT_CTITG)
1861 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
1862 xtensa->trace_active = false;
1863 }
1864 }
1865 }
1866 return ERROR_OK;
1867 }
1868
1869 static int xtensa_sw_breakpoint_add(struct target *target,
1870 struct breakpoint *breakpoint,
1871 struct xtensa_sw_breakpoint *sw_bp)
1872 {
1873 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
1874 if (ret != ERROR_OK) {
1875 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
1876 return ret;
1877 }
1878
1879 sw_bp->insn_sz = xtensa_insn_size_get(buf_get_u32(sw_bp->insn, 0, 24));
1880 sw_bp->oocd_bp = breakpoint;
1881
1882 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(0, 0) : XT_INS_BREAKN(0);
1883 /* convert to target endianness */
1884 uint8_t break_insn_buff[4];
1885 target_buffer_set_u32(target, break_insn_buff, break_insn);
1886
1887 ret = target_write_buffer(target, breakpoint->address, sw_bp->insn_sz, break_insn_buff);
1888 if (ret != ERROR_OK) {
1889 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
1890 return ret;
1891 }
1892
1893 return ERROR_OK;
1894 }
1895
1896 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
1897 {
1898 int ret = target_write_buffer(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
1899 if (ret != ERROR_OK) {
1900 LOG_TARGET_ERROR(target, "Failed to read insn (%d)!", ret);
1901 return ret;
1902 }
1903 sw_bp->oocd_bp = NULL;
1904 return ERROR_OK;
1905 }
1906
1907 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
1908 {
1909 struct xtensa *xtensa = target_to_xtensa(target);
1910 unsigned int slot;
1911
1912 if (breakpoint->type == BKPT_SOFT) {
1913 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
1914 if (!xtensa->sw_brps[slot].oocd_bp ||
1915 xtensa->sw_brps[slot].oocd_bp == breakpoint)
1916 break;
1917 }
1918 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
1919 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
1920 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1921 }
1922 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
1923 if (ret != ERROR_OK) {
1924 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
1925 return ret;
1926 }
1927 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
1928 slot,
1929 breakpoint->address);
1930 return ERROR_OK;
1931 }
1932
1933 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1934 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
1935 break;
1936 }
1937 if (slot == xtensa->core_config->debug.ibreaks_num) {
1938 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
1939 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1940 }
1941
1942 xtensa->hw_brps[slot] = breakpoint;
1943 /* We will actually write the breakpoints when we resume the target. */
1944 LOG_TARGET_DEBUG(target, "placed HW breakpoint @ " TARGET_ADDR_FMT,
1945 breakpoint->address);
1946
1947 return ERROR_OK;
1948 }
1949
1950 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
1951 {
1952 struct xtensa *xtensa = target_to_xtensa(target);
1953 unsigned int slot;
1954
1955 if (breakpoint->type == BKPT_SOFT) {
1956 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
1957 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
1958 break;
1959 }
1960 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
1961 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
1962 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1963 }
1964 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
1965 if (ret != ERROR_OK) {
1966 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
1967 return ret;
1968 }
1969 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
1970 return ERROR_OK;
1971 }
1972
1973 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1974 if (xtensa->hw_brps[slot] == breakpoint)
1975 break;
1976 }
1977 if (slot == xtensa->core_config->debug.ibreaks_num) {
1978 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
1979 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1980 }
1981 xtensa->hw_brps[slot] = NULL;
1982 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
1983 return ERROR_OK;
1984 }
1985
1986 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
1987 {
1988 struct xtensa *xtensa = target_to_xtensa(target);
1989 unsigned int slot;
1990 xtensa_reg_val_t dbreakcval;
1991
1992 if (target->state != TARGET_HALTED) {
1993 LOG_TARGET_WARNING(target, "target not halted");
1994 return ERROR_TARGET_NOT_HALTED;
1995 }
1996
1997 if (watchpoint->mask != ~(uint32_t)0) {
1998 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
1999 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2000 }
2001
2002 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2003 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2004 break;
2005 }
2006 if (slot == xtensa->core_config->debug.dbreaks_num) {
2007 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2008 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2009 }
2010
2011 /* Figure out value for dbreakc5..0
2012 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2013 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2014 !IS_PWR_OF_2(watchpoint->length) ||
2015 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2016 LOG_TARGET_WARNING(
2017 target,
2018 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2019 " not supported by hardware.",
2020 watchpoint->length,
2021 watchpoint->address);
2022 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2023 }
2024 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2025
2026 if (watchpoint->rw == WPT_READ)
2027 dbreakcval |= BIT(30);
2028 if (watchpoint->rw == WPT_WRITE)
2029 dbreakcval |= BIT(31);
2030 if (watchpoint->rw == WPT_ACCESS)
2031 dbreakcval |= BIT(30) | BIT(31);
2032
2033 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2034 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2035 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2036 xtensa->hw_wps[slot] = watchpoint;
2037 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2038 watchpoint->address);
2039 return ERROR_OK;
2040 }
2041
2042 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2043 {
2044 struct xtensa *xtensa = target_to_xtensa(target);
2045 unsigned int slot;
2046
2047 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2048 if (xtensa->hw_wps[slot] == watchpoint)
2049 break;
2050 }
2051 if (slot == xtensa->core_config->debug.dbreaks_num) {
2052 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2053 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2054 }
2055 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2056 xtensa->hw_wps[slot] = NULL;
2057 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2058 watchpoint->address);
2059 return ERROR_OK;
2060 }
2061
2062 static int xtensa_build_reg_cache(struct target *target)
2063 {
2064 struct xtensa *xtensa = target_to_xtensa(target);
2065 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2066 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2067
2068 if (!reg_cache) {
2069 LOG_ERROR("Failed to alloc reg cache!");
2070 return ERROR_FAIL;
2071 }
2072 reg_cache->name = "Xtensa registers";
2073 reg_cache->next = NULL;
2074 reg_cache->num_regs = XT_NUM_REGS + xtensa->core_config->user_regs_num;
2075 /* Init reglist */
2076 struct reg *reg_list = calloc(reg_cache->num_regs, sizeof(struct reg));
2077 if (!reg_list) {
2078 LOG_ERROR("Failed to alloc reg list!");
2079 goto fail;
2080 }
2081 xtensa->regs_num = 0;
2082
2083 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
2084 reg_list[i].exist = false;
2085 if (xtensa_regs[i].type == XT_REG_USER) {
2086 if (xtensa_user_reg_exists(xtensa, i))
2087 reg_list[i].exist = true;
2088 else
2089 LOG_DEBUG("User reg '%s' (%d) does not exist", xtensa_regs[i].name, i);
2090 } else if (xtensa_regs[i].type == XT_REG_FR) {
2091 if (xtensa_fp_reg_exists(xtensa, i))
2092 reg_list[i].exist = true;
2093 else
2094 LOG_DEBUG("FP reg '%s' (%d) does not exist", xtensa_regs[i].name, i);
2095 } else if (xtensa_regs[i].type == XT_REG_SPECIAL) {
2096 if (xtensa_special_reg_exists(xtensa, i))
2097 reg_list[i].exist = true;
2098 else
2099 LOG_DEBUG("Special reg '%s' (%d) does not exist", xtensa_regs[i].name, i);
2100 } else {
2101 if (xtensa_regular_reg_exists(xtensa, i))
2102 reg_list[i].exist = true;
2103 else
2104 LOG_DEBUG("Regular reg '%s' (%d) does not exist", xtensa_regs[i].name, i);
2105 }
2106 reg_list[i].name = xtensa_regs[i].name;
2107 reg_list[i].size = 32;
2108 reg_list[i].value = calloc(1, 4 /*XT_REG_LEN*/);/* make Clang Static Analyzer happy */
2109 if (!reg_list[i].value) {
2110 LOG_ERROR("Failed to alloc reg list value!");
2111 goto fail;
2112 }
2113 reg_list[i].dirty = false;
2114 reg_list[i].valid = false;
2115 reg_list[i].type = &xtensa_reg_type;
2116 reg_list[i].arch_info = xtensa;
2117 if (reg_list[i].exist)
2118 xtensa->regs_num++;
2119 }
2120 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
2121 reg_list[XT_USR_REG_START + i].exist = true;
2122 reg_list[XT_USR_REG_START + i].name = xtensa->core_config->user_regs[i].name;
2123 reg_list[XT_USR_REG_START + i].size = xtensa->core_config->user_regs[i].size;
2124 reg_list[XT_USR_REG_START + i].value = calloc(1, reg_list[XT_USR_REG_START + i].size / 8);
2125 if (!reg_list[XT_USR_REG_START + i].value) {
2126 LOG_ERROR("Failed to alloc user reg list value!");
2127 goto fail;
2128 }
2129 reg_list[XT_USR_REG_START + i].dirty = false;
2130 reg_list[XT_USR_REG_START + i].valid = false;
2131 reg_list[XT_USR_REG_START + i].type = xtensa->core_config->user_regs[i].type;
2132 reg_list[XT_USR_REG_START + i].arch_info = xtensa;
2133 xtensa->regs_num++;
2134 }
2135 if (xtensa->core_config->gdb_general_regs_num >= xtensa->regs_num) {
2136 LOG_ERROR("Regs number less then GDB general regs number!");
2137 goto fail;
2138 }
2139
2140 /* assign GDB reg numbers to registers */
2141 for (unsigned int gdb_reg_id = 0; gdb_reg_id < xtensa->regs_num; gdb_reg_id++) {
2142 unsigned int reg_id = xtensa->core_config->gdb_regs_mapping[gdb_reg_id];
2143 if (reg_id >= reg_cache->num_regs) {
2144 LOG_ERROR("Invalid GDB map!");
2145 goto fail;
2146 }
2147 if (!reg_list[reg_id].exist) {
2148 LOG_ERROR("Non-existing reg in GDB map!");
2149 goto fail;
2150 }
2151 reg_list[reg_id].number = gdb_reg_id;
2152 }
2153 reg_cache->reg_list = reg_list;
2154
2155 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2156 if (!xtensa->algo_context_backup) {
2157 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2158 goto fail;
2159 }
2160 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2161 struct reg *reg = &reg_cache->reg_list[i];
2162 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2163 if (!xtensa->algo_context_backup[i]) {
2164 LOG_ERROR("Failed to alloc mem for algorithm context!");
2165 goto fail;
2166 }
2167 }
2168
2169 xtensa->core_cache = reg_cache;
2170 if (cache_p)
2171 *cache_p = reg_cache;
2172 return ERROR_OK;
2173
2174 fail:
2175 if (reg_list) {
2176 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2177 free(reg_list[i].value);
2178 free(reg_list);
2179 }
2180 if (xtensa->algo_context_backup) {
2181 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2182 free(xtensa->algo_context_backup[i]);
2183 free(xtensa->algo_context_backup);
2184 }
2185 free(reg_cache);
2186
2187 return ERROR_FAIL;
2188 }
2189
2190 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
2191 const struct xtensa_config *xtensa_config,
2192 const struct xtensa_debug_module_config *dm_cfg)
2193 {
2194 target->arch_info = xtensa;
2195 xtensa->common_magic = XTENSA_COMMON_MAGIC;
2196 xtensa->target = target;
2197 xtensa->core_config = xtensa_config;
2198 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
2199
2200 if (!xtensa->core_config->exc.enabled || !xtensa->core_config->irq.enabled ||
2201 !xtensa->core_config->high_irq.enabled || !xtensa->core_config->debug.enabled) {
2202 LOG_ERROR("Xtensa configuration does not support debugging!");
2203 return ERROR_FAIL;
2204 }
2205 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2206 }
2207
2208 void xtensa_set_permissive_mode(struct target *target, bool state)
2209 {
2210 target_to_xtensa(target)->permissive_mode = state;
2211 }
2212
2213 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2214 {
2215 struct xtensa *xtensa = target_to_xtensa(target);
2216
2217 xtensa->come_online_probes_num = 3;
2218 xtensa->hw_brps = calloc(xtensa->core_config->debug.ibreaks_num, sizeof(struct breakpoint *));
2219 if (!xtensa->hw_brps) {
2220 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2221 return ERROR_FAIL;
2222 }
2223 xtensa->hw_wps = calloc(xtensa->core_config->debug.dbreaks_num, sizeof(struct watchpoint *));
2224 if (!xtensa->hw_wps) {
2225 free(xtensa->hw_brps);
2226 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2227 return ERROR_FAIL;
2228 }
2229 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2230 if (!xtensa->sw_brps) {
2231 free(xtensa->hw_brps);
2232 free(xtensa->hw_wps);
2233 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2234 return ERROR_FAIL;
2235 }
2236
2237 return xtensa_build_reg_cache(target);
2238 }
2239
2240 static void xtensa_free_reg_cache(struct target *target)
2241 {
2242 struct xtensa *xtensa = target_to_xtensa(target);
2243 struct reg_cache *cache = xtensa->core_cache;
2244
2245 if (cache) {
2246 register_unlink_cache(&target->reg_cache, cache);
2247 for (unsigned int i = 0; i < cache->num_regs; i++) {
2248 free(xtensa->algo_context_backup[i]);
2249 free(cache->reg_list[i].value);
2250 }
2251 free(xtensa->algo_context_backup);
2252 free(cache->reg_list);
2253 free(cache);
2254 }
2255 xtensa->core_cache = NULL;
2256 xtensa->algo_context_backup = NULL;
2257 }
2258
2259 void xtensa_target_deinit(struct target *target)
2260 {
2261 struct xtensa *xtensa = target_to_xtensa(target);
2262
2263 LOG_DEBUG("start");
2264
2265 if (target_was_examined(target)) {
2266 int ret = xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRCLR, OCDDCR_ENABLEOCD);
2267 if (ret != ERROR_OK) {
2268 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2269 return;
2270 }
2271 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
2272 ret = jtag_execute_queue();
2273 if (ret != ERROR_OK) {
2274 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2275 return;
2276 }
2277 }
2278 xtensa_free_reg_cache(target);
2279 free(xtensa->hw_brps);
2280 free(xtensa->hw_wps);
2281 free(xtensa->sw_brps);
2282 }
2283
2284 const char *xtensa_get_gdb_arch(struct target *target)
2285 {
2286 return "xtensa";
2287 }
2288
2289 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
2290 {
2291 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
2292 &xtensa->permissive_mode, "xtensa permissive mode");
2293 }
2294
2295 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
2296 {
2297 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
2298 target_to_xtensa(get_current_target(CMD_CTX)));
2299 }
2300
2301 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
2302 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
2303 {
2304 struct xtensa_perfmon_config config = {
2305 .mask = 0xffff,
2306 .kernelcnt = 0,
2307 .tracelevel = -1 /* use DEBUGLEVEL by default */
2308 };
2309
2310 if (CMD_ARGC < 2 || CMD_ARGC > 6)
2311 return ERROR_COMMAND_SYNTAX_ERROR;
2312
2313 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
2314 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
2315 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
2316 return ERROR_COMMAND_ARGUMENT_INVALID;
2317 }
2318
2319 config.select = strtoul(CMD_ARGV[1], NULL, 0);
2320 if (config.select > XTENSA_MAX_PERF_SELECT) {
2321 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
2322 return ERROR_COMMAND_ARGUMENT_INVALID;
2323 }
2324
2325 if (CMD_ARGC >= 3) {
2326 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
2327 if (config.mask > XTENSA_MAX_PERF_MASK) {
2328 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
2329 return ERROR_COMMAND_ARGUMENT_INVALID;
2330 }
2331 }
2332
2333 if (CMD_ARGC >= 4) {
2334 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
2335 if (config.kernelcnt > 1) {
2336 command_print(CMD, "kernelcnt should be 0 or 1");
2337 return ERROR_COMMAND_ARGUMENT_INVALID;
2338 }
2339 }
2340
2341 if (CMD_ARGC >= 5) {
2342 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
2343 if (config.tracelevel > 7) {
2344 command_print(CMD, "tracelevel should be <=7");
2345 return ERROR_COMMAND_ARGUMENT_INVALID;
2346 }
2347 }
2348
2349 if (config.tracelevel == -1)
2350 config.tracelevel = xtensa->core_config->debug.irq_level;
2351
2352 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
2353 }
2354
2355 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
2356 {
2357 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
2358 target_to_xtensa(get_current_target(CMD_CTX)));
2359 }
2360
2361 /* perfmon_dump [counter_id] */
2362 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
2363 {
2364 if (CMD_ARGC > 1)
2365 return ERROR_COMMAND_SYNTAX_ERROR;
2366
2367 int counter_id = -1;
2368 if (CMD_ARGC == 1) {
2369 counter_id = strtol(CMD_ARGV[0], NULL, 0);
2370 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
2371 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
2372 return ERROR_COMMAND_ARGUMENT_INVALID;
2373 }
2374 }
2375
2376 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
2377 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
2378 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
2379 char result_buf[128] = { 0 };
2380 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
2381 struct xtensa_perfmon_result result;
2382 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
2383 if (res != ERROR_OK)
2384 return res;
2385 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
2386 "%-12" PRIu64 "%s",
2387 result.value,
2388 result.overflow ? " (overflow)" : "");
2389 LOG_INFO("%s", result_buf);
2390 }
2391
2392 return ERROR_OK;
2393 }
2394
2395 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
2396 {
2397 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
2398 target_to_xtensa(get_current_target(CMD_CTX)));
2399 }
2400
2401 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
2402 {
2403 int state = -1;
2404
2405 if (CMD_ARGC < 1) {
2406 const char *st;
2407 state = xtensa->stepping_isr_mode;
2408 if (state == XT_STEPPING_ISR_ON)
2409 st = "OFF";
2410 else if (state == XT_STEPPING_ISR_OFF)
2411 st = "ON";
2412 else
2413 st = "UNKNOWN";
2414 command_print(CMD, "Current ISR step mode: %s", st);
2415 return ERROR_OK;
2416 }
2417 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
2418 if (!strcasecmp(CMD_ARGV[0], "off"))
2419 state = XT_STEPPING_ISR_ON;
2420 else if (!strcasecmp(CMD_ARGV[0], "on"))
2421 state = XT_STEPPING_ISR_OFF;
2422
2423 if (state == -1) {
2424 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
2425 return ERROR_FAIL;
2426 }
2427 xtensa->stepping_isr_mode = state;
2428 return ERROR_OK;
2429 }
2430
2431 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
2432 {
2433 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
2434 target_to_xtensa(get_current_target(CMD_CTX)));
2435 }
2436
2437 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
2438 {
2439 int res = ERROR_OK;
2440 uint32_t val = 0;
2441
2442 if (CMD_ARGC >= 1) {
2443 for (unsigned int i = 0; i < CMD_ARGC; i++) {
2444 if (!strcasecmp(CMD_ARGV[0], "none")) {
2445 val = 0;
2446 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
2447 val |= OCDDCR_BREAKINEN;
2448 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
2449 val |= OCDDCR_BREAKOUTEN;
2450 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
2451 val |= OCDDCR_RUNSTALLINEN;
2452 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
2453 val |= OCDDCR_DEBUGMODEOUTEN;
2454 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
2455 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
2456 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
2457 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
2458 } else {
2459 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
2460 command_print(
2461 CMD,
2462 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
2463 return ERROR_OK;
2464 }
2465 }
2466 res = xtensa_smpbreak_set(target, val);
2467 if (res != ERROR_OK)
2468 command_print(CMD, "Failed to set smpbreak config %d", res);
2469 } else {
2470 struct xtensa *xtensa = target_to_xtensa(target);
2471 res = xtensa_smpbreak_read(xtensa, &val);
2472 if (res == ERROR_OK) {
2473 command_print(CMD, "Current bits set:%s%s%s%s",
2474 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
2475 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
2476 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
2477 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
2478 );
2479 } else {
2480 command_print(CMD, "Failed to get smpbreak config %d", res);
2481 }
2482 }
2483 return res;
2484 }
2485
2486 COMMAND_HANDLER(xtensa_cmd_smpbreak)
2487 {
2488 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
2489 get_current_target(CMD_CTX));
2490 }
2491
2492 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
2493 {
2494 struct xtensa_trace_status trace_status;
2495 struct xtensa_trace_start_config cfg = {
2496 .stoppc = 0,
2497 .stopmask = XTENSA_STOPMASK_DISABLED,
2498 .after = 0,
2499 .after_is_words = false
2500 };
2501
2502 /* Parse arguments */
2503 for (unsigned int i = 0; i < CMD_ARGC; i++) {
2504 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
2505 char *e;
2506 i++;
2507 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
2508 cfg.stopmask = 0;
2509 if (*e == '/')
2510 cfg.stopmask = strtol(e, NULL, 0);
2511 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
2512 i++;
2513 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
2514 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
2515 cfg.after_is_words = 0;
2516 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
2517 cfg.after_is_words = 1;
2518 } else {
2519 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
2520 return ERROR_FAIL;
2521 }
2522 }
2523
2524 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2525 if (res != ERROR_OK)
2526 return res;
2527 if (trace_status.stat & TRAXSTAT_TRACT) {
2528 LOG_WARNING("Silently stop active tracing!");
2529 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
2530 if (res != ERROR_OK)
2531 return res;
2532 }
2533
2534 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
2535 if (res != ERROR_OK)
2536 return res;
2537
2538 xtensa->trace_active = true;
2539 command_print(CMD, "Trace started.");
2540 return ERROR_OK;
2541 }
2542
2543 COMMAND_HANDLER(xtensa_cmd_tracestart)
2544 {
2545 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
2546 target_to_xtensa(get_current_target(CMD_CTX)));
2547 }
2548
2549 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
2550 {
2551 struct xtensa_trace_status trace_status;
2552
2553 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2554 if (res != ERROR_OK)
2555 return res;
2556
2557 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2558 command_print(CMD, "No trace is currently active.");
2559 return ERROR_FAIL;
2560 }
2561
2562 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
2563 if (res != ERROR_OK)
2564 return res;
2565
2566 xtensa->trace_active = false;
2567 command_print(CMD, "Trace stop triggered.");
2568 return ERROR_OK;
2569 }
2570
2571 COMMAND_HANDLER(xtensa_cmd_tracestop)
2572 {
2573 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
2574 target_to_xtensa(get_current_target(CMD_CTX)));
2575 }
2576
2577 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
2578 {
2579 struct xtensa_trace_config trace_config;
2580 struct xtensa_trace_status trace_status;
2581 uint32_t memsz, wmem;
2582
2583 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2584 if (res != ERROR_OK)
2585 return res;
2586
2587 if (trace_status.stat & TRAXSTAT_TRACT) {
2588 command_print(CMD, "Tracing is still active. Please stop it first.");
2589 return ERROR_FAIL;
2590 }
2591
2592 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
2593 if (res != ERROR_OK)
2594 return res;
2595
2596 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
2597 command_print(CMD, "No active trace found; nothing to dump.");
2598 return ERROR_FAIL;
2599 }
2600
2601 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
2602 LOG_INFO("Total trace memory: %d words", memsz);
2603 if ((trace_config.addr &
2604 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
2605 /*Memory hasn't overwritten itself yet. */
2606 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
2607 LOG_INFO("...but trace is only %d words", wmem);
2608 if (wmem < memsz)
2609 memsz = wmem;
2610 } else {
2611 if (trace_config.addr & TRAXADDR_TWSAT) {
2612 LOG_INFO("Real trace is many times longer than that (overflow)");
2613 } else {
2614 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
2615 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
2616 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
2617 }
2618 }
2619
2620 uint8_t *tracemem = malloc(memsz * 4);
2621 if (!tracemem) {
2622 command_print(CMD, "Failed to alloc memory for trace data!");
2623 return ERROR_FAIL;
2624 }
2625 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
2626 if (res != ERROR_OK) {
2627 free(tracemem);
2628 return res;
2629 }
2630
2631 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
2632 if (f <= 0) {
2633 free(tracemem);
2634 command_print(CMD, "Unable to open file %s", fname);
2635 return ERROR_FAIL;
2636 }
2637 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
2638 command_print(CMD, "Unable to write to file %s", fname);
2639 else
2640 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
2641 close(f);
2642
2643 bool is_all_zeroes = true;
2644 for (unsigned int i = 0; i < memsz * 4; i++) {
2645 if (tracemem[i] != 0) {
2646 is_all_zeroes = false;
2647 break;
2648 }
2649 }
2650 free(tracemem);
2651 if (is_all_zeroes)
2652 command_print(
2653 CMD,
2654 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
2655
2656 return ERROR_OK;
2657 }
2658
2659 COMMAND_HANDLER(xtensa_cmd_tracedump)
2660 {
2661 if (CMD_ARGC != 1) {
2662 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
2663 return ERROR_FAIL;
2664 }
2665
2666 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
2667 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
2668 }
2669
2670 const struct command_registration xtensa_command_handlers[] = {
2671 {
2672 .name = "set_permissive",
2673 .handler = xtensa_cmd_permissive_mode,
2674 .mode = COMMAND_ANY,
2675 .help = "When set to 1, enable Xtensa permissive mode (less client-side checks)",
2676 .usage = "[0|1]",
2677 },
2678 {
2679 .name = "maskisr",
2680 .handler = xtensa_cmd_mask_interrupts,
2681 .mode = COMMAND_ANY,
2682 .help = "mask Xtensa interrupts at step",
2683 .usage = "['on'|'off']",
2684 },
2685 {
2686 .name = "smpbreak",
2687 .handler = xtensa_cmd_smpbreak,
2688 .mode = COMMAND_ANY,
2689 .help = "Set the way the CPU chains OCD breaks",
2690 .usage =
2691 "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
2692 },
2693 {
2694 .name = "perfmon_enable",
2695 .handler = xtensa_cmd_perfmon_enable,
2696 .mode = COMMAND_EXEC,
2697 .help = "Enable and start performance counter",
2698 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
2699 },
2700 {
2701 .name = "perfmon_dump",
2702 .handler = xtensa_cmd_perfmon_dump,
2703 .mode = COMMAND_EXEC,
2704 .help =
2705 "Dump performance counter value. If no argument specified, dumps all counters.",
2706 .usage = "[counter_id]",
2707 },
2708 {
2709 .name = "tracestart",
2710 .handler = xtensa_cmd_tracestart,
2711 .mode = COMMAND_EXEC,
2712 .help =
2713 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
2714 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
2715 },
2716 {
2717 .name = "tracestop",
2718 .handler = xtensa_cmd_tracestop,
2719 .mode = COMMAND_EXEC,
2720 .help = "Tracing: Stop current trace as started by the tracestart command",
2721 .usage = "",
2722 },
2723 {
2724 .name = "tracedump",
2725 .handler = xtensa_cmd_tracedump,
2726 .mode = COMMAND_EXEC,
2727 .help = "Tracing: Dump trace memory to a files. One file per core.",
2728 .usage = "<outfile>",
2729 },
2730 COMMAND_REGISTRATION_DONE
2731 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)