drivers/linuxgpiod: Migrate to adapter gpio commands
[openocd.git] / src / target / xtensa / xtensa.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
6 * Derived from esp108.c *
7 * Author: Angus Gratton gus@projectgus.com *
8 * Author: Jeroen Domburg <jeroen@espressif.com> *
9 * Author: Alexey Gerenkov <alexey@espressif.com> *
10 * Author: Andrey Gramakov <andrei.gramakov@espressif.com> *
11 ***************************************************************************/
12
13 #ifdef HAVE_CONFIG_H
14 #include "config.h"
15 #endif
16
17 #include <stdlib.h>
18 #include <helper/time_support.h>
19 #include <helper/align.h>
20 #include <target/register.h>
21
22 #include "xtensa.h"
23
24
25 #define _XT_INS_FORMAT_RSR(OPCODE, SR, T) ((OPCODE) \
26 | (((SR) & 0xFF) << 8) \
27 | (((T) & 0x0F) << 4))
28
29 #define _XT_INS_FORMAT_RRR(OPCODE, ST, R) ((OPCODE) \
30 | (((ST) & 0xFF) << 4) \
31 | (((R) & 0x0F) << 12))
32
33 #define _XT_INS_FORMAT_RRRN(OPCODE, S, T, IMM4) ((OPCODE) \
34 | (((T) & 0x0F) << 4) \
35 | (((S) & 0x0F) << 8) \
36 | (((IMM4) & 0x0F) << 12))
37
38 #define _XT_INS_FORMAT_RRI8(OPCODE, R, S, T, IMM8) ((OPCODE) \
39 | (((IMM8) & 0xFF) << 16) \
40 | (((R) & 0x0F) << 12) \
41 | (((S) & 0x0F) << 8) \
42 | (((T) & 0x0F) << 4))
43
44 #define _XT_INS_FORMAT_RRI4(OPCODE, IMM4, R, S, T) ((OPCODE) \
45 | (((IMM4) & 0x0F) << 20) \
46 | (((R) & 0x0F) << 12) \
47 | (((S) & 0x0F) << 8) \
48 | (((T) & 0x0F) << 4))
49
50 /* Xtensa processor instruction opcodes
51 * "Return From Debug Operation" to Normal */
52 #define XT_INS_RFDO 0xf1e000
53 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
54 #define XT_INS_RFDD 0xf1e010
55
56 /* Load to DDR register, increase addr register */
57 #define XT_INS_LDDR32P(S) (0x0070E0 | ((S) << 8))
58 /* Store from DDR register, increase addr register */
59 #define XT_INS_SDDR32P(S) (0x0070F0 | ((S) << 8))
60
61 /* Load 32-bit Indirect from A(S) + 4 * IMM8 to A(T) */
62 #define XT_INS_L32I(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x002002, 0, S, T, IMM8)
63 /* Load 16-bit Unsigned from A(S) + 2 * IMM8 to A(T) */
64 #define XT_INS_L16UI(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x001002, 0, S, T, IMM8)
65 /* Load 8-bit Unsigned from A(S) + IMM8 to A(T) */
66 #define XT_INS_L8UI(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x000002, 0, S, T, IMM8)
67
68 /* Store 32-bit Indirect to A(S) + 4 * IMM8 from A(T) */
69 #define XT_INS_S32I(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x006002, 0, S, T, IMM8)
70 /* Store 16-bit to A(S) + 2 * IMM8 from A(T) */
71 #define XT_INS_S16I(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x005002, 0, S, T, IMM8)
72 /* Store 8-bit to A(S) + IMM8 from A(T) */
73 #define XT_INS_S8I(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x004002, 0, S, T, IMM8)
74
75 /* Read Special Register */
76 #define XT_INS_RSR(SR, T) _XT_INS_FORMAT_RSR(0x030000, SR, T)
77 /* Write Special Register */
78 #define XT_INS_WSR(SR, T) _XT_INS_FORMAT_RSR(0x130000, SR, T)
79 /* Swap Special Register */
80 #define XT_INS_XSR(SR, T) _XT_INS_FORMAT_RSR(0x610000, SR, T)
81
82 /* Rotate Window by (-8..7) */
83 #define XT_INS_ROTW(N) ((0x408000) | (((N) & 15) << 4))
84
85 /* Read User Register */
86 #define XT_INS_RUR(UR, T) _XT_INS_FORMAT_RRR(0xE30000, UR, T)
87 /* Write User Register */
88 #define XT_INS_WUR(UR, T) _XT_INS_FORMAT_RSR(0xF30000, UR, T)
89
90 /* Read Floating-Point Register */
91 #define XT_INS_RFR(FR, T) _XT_INS_FORMAT_RRR(0xFA0000, (((FR) << 4) | 0x4), T)
92 /* Write Floating-Point Register */
93 #define XT_INS_WFR(FR, T) _XT_INS_FORMAT_RRR(0xFA0000, (((FR) << 4) | 0x5), T)
94
95 /* 32-bit break */
96 #define XT_INS_BREAK(IMM1, IMM2) _XT_INS_FORMAT_RRR(0x000000, \
97 (((IMM1) & 0x0F) << 4) | ((IMM2) & 0x0F), 0x4)
98 /* 16-bit break */
99 #define XT_INS_BREAKN(IMM4) _XT_INS_FORMAT_RRRN(0x00000D, IMM4, 0x2, 0xF)
100
101 #define XT_INS_L32E(R, S, T) _XT_INS_FORMAT_RRI4(0x90000, 0, R, S, T)
102 #define XT_INS_S32E(R, S, T) _XT_INS_FORMAT_RRI4(0x490000, 0, R, S, T)
103 #define XT_INS_L32E_S32E_MASK 0xFF000F
104
105 #define XT_INS_RFWO 0x3400
106 #define XT_INS_RFWU 0x3500
107 #define XT_INS_RFWO_RFWU_MASK 0xFFFFFF
108
109 #define XT_WATCHPOINTS_NUM_MAX 2
110
111 /* Special register number macro for DDR register.
112 * this gets used a lot so making a shortcut to it is
113 * useful.
114 */
115 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_OCD_DDR].reg_num)
116
117 /*Same thing for A3/A4 */
118 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
119 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
120
121 #define XT_PC_REG_NUM_BASE (176)
122 #define XT_SW_BREAKPOINTS_MAX_NUM 32
123
124 const struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
125 { "pc", XT_PC_REG_NUM_BASE /*+XT_DEBUGLEVEL*/, XT_REG_SPECIAL, 0 }, /* actually epc[debuglevel] */
126 { "ar0", 0x00, XT_REG_GENERAL, 0 },
127 { "ar1", 0x01, XT_REG_GENERAL, 0 },
128 { "ar2", 0x02, XT_REG_GENERAL, 0 },
129 { "ar3", 0x03, XT_REG_GENERAL, 0 },
130 { "ar4", 0x04, XT_REG_GENERAL, 0 },
131 { "ar5", 0x05, XT_REG_GENERAL, 0 },
132 { "ar6", 0x06, XT_REG_GENERAL, 0 },
133 { "ar7", 0x07, XT_REG_GENERAL, 0 },
134 { "ar8", 0x08, XT_REG_GENERAL, 0 },
135 { "ar9", 0x09, XT_REG_GENERAL, 0 },
136 { "ar10", 0x0A, XT_REG_GENERAL, 0 },
137 { "ar11", 0x0B, XT_REG_GENERAL, 0 },
138 { "ar12", 0x0C, XT_REG_GENERAL, 0 },
139 { "ar13", 0x0D, XT_REG_GENERAL, 0 },
140 { "ar14", 0x0E, XT_REG_GENERAL, 0 },
141 { "ar15", 0x0F, XT_REG_GENERAL, 0 },
142 { "ar16", 0x10, XT_REG_GENERAL, 0 },
143 { "ar17", 0x11, XT_REG_GENERAL, 0 },
144 { "ar18", 0x12, XT_REG_GENERAL, 0 },
145 { "ar19", 0x13, XT_REG_GENERAL, 0 },
146 { "ar20", 0x14, XT_REG_GENERAL, 0 },
147 { "ar21", 0x15, XT_REG_GENERAL, 0 },
148 { "ar22", 0x16, XT_REG_GENERAL, 0 },
149 { "ar23", 0x17, XT_REG_GENERAL, 0 },
150 { "ar24", 0x18, XT_REG_GENERAL, 0 },
151 { "ar25", 0x19, XT_REG_GENERAL, 0 },
152 { "ar26", 0x1A, XT_REG_GENERAL, 0 },
153 { "ar27", 0x1B, XT_REG_GENERAL, 0 },
154 { "ar28", 0x1C, XT_REG_GENERAL, 0 },
155 { "ar29", 0x1D, XT_REG_GENERAL, 0 },
156 { "ar30", 0x1E, XT_REG_GENERAL, 0 },
157 { "ar31", 0x1F, XT_REG_GENERAL, 0 },
158 { "ar32", 0x20, XT_REG_GENERAL, 0 },
159 { "ar33", 0x21, XT_REG_GENERAL, 0 },
160 { "ar34", 0x22, XT_REG_GENERAL, 0 },
161 { "ar35", 0x23, XT_REG_GENERAL, 0 },
162 { "ar36", 0x24, XT_REG_GENERAL, 0 },
163 { "ar37", 0x25, XT_REG_GENERAL, 0 },
164 { "ar38", 0x26, XT_REG_GENERAL, 0 },
165 { "ar39", 0x27, XT_REG_GENERAL, 0 },
166 { "ar40", 0x28, XT_REG_GENERAL, 0 },
167 { "ar41", 0x29, XT_REG_GENERAL, 0 },
168 { "ar42", 0x2A, XT_REG_GENERAL, 0 },
169 { "ar43", 0x2B, XT_REG_GENERAL, 0 },
170 { "ar44", 0x2C, XT_REG_GENERAL, 0 },
171 { "ar45", 0x2D, XT_REG_GENERAL, 0 },
172 { "ar46", 0x2E, XT_REG_GENERAL, 0 },
173 { "ar47", 0x2F, XT_REG_GENERAL, 0 },
174 { "ar48", 0x30, XT_REG_GENERAL, 0 },
175 { "ar49", 0x31, XT_REG_GENERAL, 0 },
176 { "ar50", 0x32, XT_REG_GENERAL, 0 },
177 { "ar51", 0x33, XT_REG_GENERAL, 0 },
178 { "ar52", 0x34, XT_REG_GENERAL, 0 },
179 { "ar53", 0x35, XT_REG_GENERAL, 0 },
180 { "ar54", 0x36, XT_REG_GENERAL, 0 },
181 { "ar55", 0x37, XT_REG_GENERAL, 0 },
182 { "ar56", 0x38, XT_REG_GENERAL, 0 },
183 { "ar57", 0x39, XT_REG_GENERAL, 0 },
184 { "ar58", 0x3A, XT_REG_GENERAL, 0 },
185 { "ar59", 0x3B, XT_REG_GENERAL, 0 },
186 { "ar60", 0x3C, XT_REG_GENERAL, 0 },
187 { "ar61", 0x3D, XT_REG_GENERAL, 0 },
188 { "ar62", 0x3E, XT_REG_GENERAL, 0 },
189 { "ar63", 0x3F, XT_REG_GENERAL, 0 },
190 { "lbeg", 0x00, XT_REG_SPECIAL, 0 },
191 { "lend", 0x01, XT_REG_SPECIAL, 0 },
192 { "lcount", 0x02, XT_REG_SPECIAL, 0 },
193 { "sar", 0x03, XT_REG_SPECIAL, 0 },
194 { "windowbase", 0x48, XT_REG_SPECIAL, 0 },
195 { "windowstart", 0x49, XT_REG_SPECIAL, 0 },
196 { "configid0", 0xB0, XT_REG_SPECIAL, 0 },
197 { "configid1", 0xD0, XT_REG_SPECIAL, 0 },
198 { "ps", 0xC6, XT_REG_SPECIAL, 0 }, /* actually EPS[debuglevel] */
199 { "threadptr", 0xE7, XT_REG_USER, 0 },
200 { "br", 0x04, XT_REG_SPECIAL, 0 },
201 { "scompare1", 0x0C, XT_REG_SPECIAL, 0 },
202 { "acclo", 0x10, XT_REG_SPECIAL, 0 },
203 { "acchi", 0x11, XT_REG_SPECIAL, 0 },
204 { "m0", 0x20, XT_REG_SPECIAL, 0 },
205 { "m1", 0x21, XT_REG_SPECIAL, 0 },
206 { "m2", 0x22, XT_REG_SPECIAL, 0 },
207 { "m3", 0x23, XT_REG_SPECIAL, 0 },
208 { "f0", 0x00, XT_REG_FR, XT_REGF_COPROC0 },
209 { "f1", 0x01, XT_REG_FR, XT_REGF_COPROC0 },
210 { "f2", 0x02, XT_REG_FR, XT_REGF_COPROC0 },
211 { "f3", 0x03, XT_REG_FR, XT_REGF_COPROC0 },
212 { "f4", 0x04, XT_REG_FR, XT_REGF_COPROC0 },
213 { "f5", 0x05, XT_REG_FR, XT_REGF_COPROC0 },
214 { "f6", 0x06, XT_REG_FR, XT_REGF_COPROC0 },
215 { "f7", 0x07, XT_REG_FR, XT_REGF_COPROC0 },
216 { "f8", 0x08, XT_REG_FR, XT_REGF_COPROC0 },
217 { "f9", 0x09, XT_REG_FR, XT_REGF_COPROC0 },
218 { "f10", 0x0A, XT_REG_FR, XT_REGF_COPROC0 },
219 { "f11", 0x0B, XT_REG_FR, XT_REGF_COPROC0 },
220 { "f12", 0x0C, XT_REG_FR, XT_REGF_COPROC0 },
221 { "f13", 0x0D, XT_REG_FR, XT_REGF_COPROC0 },
222 { "f14", 0x0E, XT_REG_FR, XT_REGF_COPROC0 },
223 { "f15", 0x0F, XT_REG_FR, XT_REGF_COPROC0 },
224 { "fcr", 0xE8, XT_REG_USER, XT_REGF_COPROC0 },
225 { "fsr", 0xE9, XT_REG_USER, XT_REGF_COPROC0 },
226 { "mmid", 0x59, XT_REG_SPECIAL, XT_REGF_NOREAD },
227 { "ibreakenable", 0x60, XT_REG_SPECIAL, 0 },
228 { "memctl", 0x61, XT_REG_SPECIAL, 0 },
229 { "atomctl", 0x63, XT_REG_SPECIAL, 0 },
230 { "ibreaka0", 0x80, XT_REG_SPECIAL, 0 },
231 { "ibreaka1", 0x81, XT_REG_SPECIAL, 0 },
232 { "dbreaka0", 0x90, XT_REG_SPECIAL, 0 },
233 { "dbreaka1", 0x91, XT_REG_SPECIAL, 0 },
234 { "dbreakc0", 0xA0, XT_REG_SPECIAL, 0 },
235 { "dbreakc1", 0xA1, XT_REG_SPECIAL, 0 },
236 { "epc1", 0xB1, XT_REG_SPECIAL, 0 },
237 { "epc2", 0xB2, XT_REG_SPECIAL, 0 },
238 { "epc3", 0xB3, XT_REG_SPECIAL, 0 },
239 { "epc4", 0xB4, XT_REG_SPECIAL, 0 },
240 { "epc5", 0xB5, XT_REG_SPECIAL, 0 },
241 { "epc6", 0xB6, XT_REG_SPECIAL, 0 },
242 { "epc7", 0xB7, XT_REG_SPECIAL, 0 },
243 { "depc", 0xC0, XT_REG_SPECIAL, 0 },
244 { "eps2", 0xC2, XT_REG_SPECIAL, 0 },
245 { "eps3", 0xC3, XT_REG_SPECIAL, 0 },
246 { "eps4", 0xC4, XT_REG_SPECIAL, 0 },
247 { "eps5", 0xC5, XT_REG_SPECIAL, 0 },
248 { "eps6", 0xC6, XT_REG_SPECIAL, 0 },
249 { "eps7", 0xC7, XT_REG_SPECIAL, 0 },
250 { "excsave1", 0xD1, XT_REG_SPECIAL, 0 },
251 { "excsave2", 0xD2, XT_REG_SPECIAL, 0 },
252 { "excsave3", 0xD3, XT_REG_SPECIAL, 0 },
253 { "excsave4", 0xD4, XT_REG_SPECIAL, 0 },
254 { "excsave5", 0xD5, XT_REG_SPECIAL, 0 },
255 { "excsave6", 0xD6, XT_REG_SPECIAL, 0 },
256 { "excsave7", 0xD7, XT_REG_SPECIAL, 0 },
257 { "cpenable", 0xE0, XT_REG_SPECIAL, 0 },
258 { "interrupt", 0xE2, XT_REG_SPECIAL, 0 },
259 { "intset", 0xE2, XT_REG_SPECIAL, XT_REGF_NOREAD },
260 { "intclear", 0xE3, XT_REG_SPECIAL, XT_REGF_NOREAD },
261 { "intenable", 0xE4, XT_REG_SPECIAL, 0 },
262 { "vecbase", 0xE7, XT_REG_SPECIAL, 0 },
263 { "exccause", 0xE8, XT_REG_SPECIAL, 0 },
264 { "debugcause", 0xE9, XT_REG_SPECIAL, 0 },
265 { "ccount", 0xEA, XT_REG_SPECIAL, 0 },
266 { "prid", 0xEB, XT_REG_SPECIAL, 0 },
267 { "icount", 0xEC, XT_REG_SPECIAL, 0 },
268 { "icountlevel", 0xED, XT_REG_SPECIAL, 0 },
269 { "excvaddr", 0xEE, XT_REG_SPECIAL, 0 },
270 { "ccompare0", 0xF0, XT_REG_SPECIAL, 0 },
271 { "ccompare1", 0xF1, XT_REG_SPECIAL, 0 },
272 { "ccompare2", 0xF2, XT_REG_SPECIAL, 0 },
273 { "misc0", 0xF4, XT_REG_SPECIAL, 0 },
274 { "misc1", 0xF5, XT_REG_SPECIAL, 0 },
275 { "misc2", 0xF6, XT_REG_SPECIAL, 0 },
276 { "misc3", 0xF7, XT_REG_SPECIAL, 0 },
277 { "litbase", 0x05, XT_REG_SPECIAL, 0 },
278 { "ptevaddr", 0x53, XT_REG_SPECIAL, 0 },
279 { "rasid", 0x5A, XT_REG_SPECIAL, 0 },
280 { "itlbcfg", 0x5B, XT_REG_SPECIAL, 0 },
281 { "dtlbcfg", 0x5C, XT_REG_SPECIAL, 0 },
282 { "mepc", 0x6A, XT_REG_SPECIAL, 0 },
283 { "meps", 0x6B, XT_REG_SPECIAL, 0 },
284 { "mesave", 0x6C, XT_REG_SPECIAL, 0 },
285 { "mesr", 0x6D, XT_REG_SPECIAL, 0 },
286 { "mecr", 0x6E, XT_REG_SPECIAL, 0 },
287 { "mevaddr", 0x6F, XT_REG_SPECIAL, 0 },
288 { "a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0 }, /* WARNING: For these registers, regnum points to the */
289 { "a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0 }, /* index of the corresponding ARxregisters, NOT to */
290 { "a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0 }, /* the processor register number! */
291 { "a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0 },
292 { "a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0 },
293 { "a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0 },
294 { "a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0 },
295 { "a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0 },
296 { "a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0 },
297 { "a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0 },
298 { "a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0 },
299 { "a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0 },
300 { "a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0 },
301 { "a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0 },
302 { "a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0 },
303 { "a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0 },
304
305 { "pwrctl", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
306 { "pwrstat", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
307 { "eristat", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
308 { "cs_itctrl", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
309 { "cs_claimset", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
310 { "cs_claimclr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
311 { "cs_lockaccess", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
312 { "cs_lockstatus", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
313 { "cs_authstatus", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
314 { "fault_info", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
315 { "trax_id", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
316 { "trax_ctrl", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
317 { "trax_stat", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
318 { "trax_data", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
319 { "trax_addr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
320 { "trax_pctrigger", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
321 { "trax_pcmatch", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
322 { "trax_delay", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
323 { "trax_memstart", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
324 { "trax_memend", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
325 { "pmg", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
326 { "pmoc", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
327 { "pm0", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
328 { "pm1", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
329 { "pmctrl0", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
330 { "pmctrl1", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
331 { "pmstat0", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
332 { "pmstat1", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
333 { "ocd_id", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
334 { "ocd_dcrclr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
335 { "ocd_dcrset", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
336 { "ocd_dsr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
337 { "ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
338 };
339
340
341 /**
342 * Types of memory used at xtensa target
343 */
344 enum xtensa_mem_region_type {
345 XTENSA_MEM_REG_IROM = 0x0,
346 XTENSA_MEM_REG_IRAM,
347 XTENSA_MEM_REG_DROM,
348 XTENSA_MEM_REG_DRAM,
349 XTENSA_MEM_REG_URAM,
350 XTENSA_MEM_REG_XLMI,
351 XTENSA_MEM_REGS_NUM
352 };
353
354 /**
355 * Gets a config for the specific mem type
356 */
357 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
358 struct xtensa *xtensa,
359 enum xtensa_mem_region_type type)
360 {
361 switch (type) {
362 case XTENSA_MEM_REG_IROM:
363 return &xtensa->core_config->irom;
364 case XTENSA_MEM_REG_IRAM:
365 return &xtensa->core_config->iram;
366 case XTENSA_MEM_REG_DROM:
367 return &xtensa->core_config->drom;
368 case XTENSA_MEM_REG_DRAM:
369 return &xtensa->core_config->dram;
370 case XTENSA_MEM_REG_URAM:
371 return &xtensa->core_config->uram;
372 case XTENSA_MEM_REG_XLMI:
373 return &xtensa->core_config->xlmi;
374 default:
375 return NULL;
376 }
377 }
378
379 /**
380 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
381 * for a given address
382 * Returns NULL if nothing found
383 */
384 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
385 const struct xtensa_local_mem_config *mem,
386 target_addr_t address)
387 {
388 for (unsigned int i = 0; i < mem->count; i++) {
389 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
390 if (address >= region->base && address < (region->base + region->size))
391 return region;
392 }
393 return NULL;
394 }
395
396 /**
397 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
398 * for a given address
399 * Returns NULL if nothing found
400 */
401 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
402 struct xtensa *xtensa,
403 target_addr_t address)
404 {
405 const struct xtensa_local_mem_region_config *result;
406 const struct xtensa_local_mem_config *mcgf;
407 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
408 mcgf = xtensa_get_mem_config(xtensa, mtype);
409 result = xtensa_memory_region_find(mcgf, address);
410 if (result)
411 return result;
412 }
413 return NULL;
414 }
415
416 static int xtensa_core_reg_get(struct reg *reg)
417 {
418 /*We don't need this because we read all registers on halt anyway. */
419 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
420 struct target *target = xtensa->target;
421
422 if (target->state != TARGET_HALTED)
423 return ERROR_TARGET_NOT_HALTED;
424 return ERROR_OK;
425 }
426
427 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
428 {
429 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
430 struct target *target = xtensa->target;
431
432 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
433 if (target->state != TARGET_HALTED)
434 return ERROR_TARGET_NOT_HALTED;
435
436 buf_cpy(buf, reg->value, reg->size);
437 reg->dirty = true;
438 reg->valid = true;
439
440 return ERROR_OK;
441 }
442
443 static const struct reg_arch_type xtensa_reg_type = {
444 .get = xtensa_core_reg_get,
445 .set = xtensa_core_reg_set,
446 };
447
448 const struct reg_arch_type xtensa_user_reg_u32_type = {
449 .get = xtensa_core_reg_get,
450 .set = xtensa_core_reg_set,
451 };
452
453 const struct reg_arch_type xtensa_user_reg_u128_type = {
454 .get = xtensa_core_reg_get,
455 .set = xtensa_core_reg_set,
456 };
457
458 static inline size_t xtensa_insn_size_get(uint32_t insn)
459 {
460 return insn & BIT(3) ? 2 : XT_ISNS_SZ_MAX;
461 }
462
463 /* Convert a register index that's indexed relative to windowbase, to the real address. */
464 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(enum xtensa_reg_id reg_idx, int windowbase)
465 {
466 unsigned int idx;
467 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_AR63) {
468 idx = reg_idx - XT_REG_IDX_AR0;
469 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
470 idx = reg_idx - XT_REG_IDX_A0;
471 } else {
472 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
473 return -1;
474 }
475 return ((idx + windowbase * 4) & 63) + XT_REG_IDX_AR0;
476 }
477
478 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(enum xtensa_reg_id reg_idx, int windowbase)
479 {
480 return xtensa_windowbase_offset_to_canonical(reg_idx, -windowbase);
481 }
482
483 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
484 {
485 struct reg *reg_list = xtensa->core_cache->reg_list;
486 reg_list[reg_idx].dirty = true;
487 }
488
489 static int xtensa_queue_dbg_reg_read(struct xtensa *xtensa, unsigned int reg, uint8_t *data)
490 {
491 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
492
493 if (!xtensa->core_config->trace.enabled &&
494 (reg <= NARADR_MEMADDREND || (reg >= NARADR_PMG && reg <= NARADR_PMSTAT7))) {
495 LOG_ERROR("Can not access %u reg when Trace Port option disabled!", reg);
496 return ERROR_FAIL;
497 }
498 return dm->dbg_ops->queue_reg_read(dm, reg, data);
499 }
500
501 static int xtensa_queue_dbg_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
502 {
503 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
504
505 if (!xtensa->core_config->trace.enabled &&
506 (reg <= NARADR_MEMADDREND || (reg >= NARADR_PMG && reg <= NARADR_PMSTAT7))) {
507 LOG_ERROR("Can not access %u reg when Trace Port option disabled!", reg);
508 return ERROR_FAIL;
509 }
510 return dm->dbg_ops->queue_reg_write(dm, reg, data);
511 }
512
513 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
514 {
515 xtensa_queue_dbg_reg_write(xtensa, NARADR_DIR0EXEC, ins);
516 }
517
518 static bool xtensa_reg_is_readable(enum xtensa_reg_flags flags, xtensa_reg_val_t cpenable)
519 {
520 if (flags & XT_REGF_NOREAD)
521 return false;
522 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
523 return false;
524 return true;
525 }
526
527 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
528 {
529 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
530 return dm->pwr_ops->queue_reg_write(dm, reg, data);
531 }
532
533 static bool xtensa_special_reg_exists(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
534 {
535 /* TODO: array of size XT_NUM_REGS can be used here to map special register ID to
536 * corresponding config option 'enabled' flag */
537 if (reg_idx >= XT_REG_IDX_LBEG && reg_idx <= XT_REG_IDX_LCOUNT)
538 return xtensa->core_config->loop;
539 else if (reg_idx == XT_REG_IDX_BR)
540 return xtensa->core_config->boolean;
541 else if (reg_idx == XT_REG_IDX_LITBASE)
542 return xtensa->core_config->ext_l32r;
543 else if (reg_idx == XT_REG_IDX_SCOMPARE1 || reg_idx == XT_REG_IDX_ATOMCTL)
544 return xtensa->core_config->cond_store;
545 else if (reg_idx >= XT_REG_IDX_ACCLO && reg_idx <= XT_REG_IDX_M3)
546 return xtensa->core_config->mac16;
547 else if (reg_idx == XT_REG_IDX_WINDOWBASE || reg_idx == XT_REG_IDX_WINDOWSTART)
548 return xtensa->core_config->windowed;
549 else if (reg_idx >= XT_REG_IDX_PTEVADDR && reg_idx <= XT_REG_IDX_DTLBCFG)
550 return xtensa->core_config->mmu.enabled;
551 else if (reg_idx == XT_REG_IDX_MMID)
552 return xtensa->core_config->trace.enabled;
553 else if (reg_idx >= XT_REG_IDX_MEPC && reg_idx <= XT_REG_IDX_MEVADDR)
554 return xtensa->core_config->mem_err_check;
555 else if (reg_idx == XT_REG_IDX_CPENABLE)
556 return xtensa->core_config->coproc;
557 else if (reg_idx == XT_REG_IDX_VECBASE)
558 return xtensa->core_config->reloc_vec;
559 else if (reg_idx == XT_REG_IDX_CCOUNT)
560 return xtensa->core_config->tim_irq.enabled;
561 else if (reg_idx >= XT_REG_IDX_CCOMPARE0 && reg_idx <= XT_REG_IDX_CCOMPARE2)
562 return xtensa->core_config->tim_irq.enabled &&
563 (reg_idx - XT_REG_IDX_CCOMPARE0 < xtensa->core_config->tim_irq.comp_num);
564 else if (reg_idx == XT_REG_IDX_PRID)
565 return xtensa->core_config->proc_id;
566 else if (reg_idx >= XT_REG_IDX_MISC0 && reg_idx <= XT_REG_IDX_MISC3)
567 return reg_idx - XT_REG_IDX_MISC0 < xtensa->core_config->miscregs_num;
568 return true;
569 }
570
571 static bool xtensa_user_reg_exists(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
572 {
573 if (reg_idx == XT_REG_IDX_THREADPTR)
574 return xtensa->core_config->threadptr;
575 if (reg_idx == XT_REG_IDX_FCR || reg_idx == XT_REG_IDX_FSR)
576 return xtensa->core_config->fp_coproc;
577 return false;
578 }
579
580 static inline bool xtensa_fp_reg_exists(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
581 {
582 return xtensa->core_config->fp_coproc;
583 }
584
585 static inline bool xtensa_regular_reg_exists(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
586 {
587 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_AR63)
588 return reg_idx - XT_REG_IDX_AR0 < xtensa->core_config->aregs_num;
589 return true;
590 }
591
592 static int xtensa_write_dirty_registers(struct target *target)
593 {
594 struct xtensa *xtensa = target_to_xtensa(target);
595 int res;
596 xtensa_reg_val_t regval, windowbase = 0;
597 bool scratch_reg_dirty = false;
598 struct reg *reg_list = xtensa->core_cache->reg_list;
599
600 LOG_TARGET_DEBUG(target, "start");
601
602 /*We need to write the dirty registers in the cache list back to the processor.
603 *Start by writing the SFR/user registers. */
604 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
605 if (reg_list[i].dirty) {
606 if (xtensa_regs[i].type == XT_REG_SPECIAL ||
607 xtensa_regs[i].type == XT_REG_USER ||
608 xtensa_regs[i].type == XT_REG_FR) {
609 scratch_reg_dirty = true;
610 regval = xtensa_reg_get(target, i);
611 LOG_TARGET_DEBUG(target, "Writing back reg %s val %08" PRIX32,
612 xtensa_regs[i].name,
613 regval);
614 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
615 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
616 if (xtensa_regs[i].type == XT_REG_USER) {
617 if (reg_list[i].exist)
618 xtensa_queue_exec_ins(xtensa,
619 XT_INS_WUR(xtensa_regs[i].reg_num,
620 XT_REG_A3));
621 } else if (xtensa_regs[i].type == XT_REG_FR) {
622 if (reg_list[i].exist)
623 xtensa_queue_exec_ins(xtensa,
624 XT_INS_WFR(xtensa_regs[i].reg_num,
625 XT_REG_A3));
626 } else {/*SFR */
627 if (reg_list[i].exist) {
628 unsigned int reg_num = xtensa_regs[i].reg_num;
629 if (reg_num == XT_PC_REG_NUM_BASE)
630 /* reg number of PC for debug interrupt
631 * depends on NDEBUGLEVEL */
632 reg_num += xtensa->core_config->debug.irq_level;
633
634 xtensa_queue_exec_ins(xtensa,
635 XT_INS_WSR(reg_num, XT_REG_A3));
636 }
637 }
638 reg_list[i].dirty = false;
639 }
640 }
641 }
642 if (scratch_reg_dirty)
643 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
644
645 if (xtensa->core_config->user_regs_num > 0 &&
646 xtensa->core_config->queue_write_dirty_user_regs)
647 xtensa->core_config->queue_write_dirty_user_regs(target);
648
649 if (xtensa->core_config->windowed) {
650 /*Grab the windowbase, we need it. */
651 windowbase = xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE);
652 /*Check if there are problems with both the ARx as well as the corresponding Rx
653 * registers set and dirty. */
654 /*Warn the user if this happens, not much else we can do... */
655 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
656 unsigned int j = xtensa_windowbase_offset_to_canonical(i, windowbase);
657 if (reg_list[i].dirty && reg_list[j].dirty) {
658 if (memcmp(reg_list[i].value, reg_list[j].value,
659 sizeof(xtensa_reg_val_t)) != 0)
660 LOG_WARNING(
661 "Warning: Both A%d as well as the physical register it points to (AR%d) are dirty and differs in value. Results are undefined!",
662 i - XT_REG_IDX_A0,
663 j - XT_REG_IDX_AR0);
664 }
665 }
666 }
667
668 /*Write A0-A16 */
669 for (unsigned int i = 0; i < 16; i++) {
670 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
671 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
672 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
673 xtensa_regs[XT_REG_IDX_A0 + i].name,
674 regval,
675 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
676 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
677 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, i));
678 reg_list[XT_REG_IDX_A0 + i].dirty = false;
679 }
680 }
681
682 if (xtensa->core_config->windowed) {
683 /*Now write AR0-AR63. */
684 for (unsigned int j = 0; j < 64; j += 16) {
685 /*Write the 16 registers we can see */
686 for (unsigned int i = 0; i < 16; i++) {
687 if (i + j < xtensa->core_config->aregs_num) {
688 enum xtensa_reg_id realadr =
689 xtensa_windowbase_offset_to_canonical(XT_REG_IDX_AR0 + i + j,
690 windowbase);
691 /*Write back any dirty un-windowed registers */
692 if (reg_list[realadr].dirty) {
693 regval = xtensa_reg_get(target, realadr);
694 LOG_TARGET_DEBUG(
695 target,
696 "Writing back reg %s value %08" PRIX32 ", num =%i",
697 xtensa_regs[realadr].name,
698 regval,
699 xtensa_regs[realadr].reg_num);
700 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
701 xtensa_queue_exec_ins(xtensa,
702 XT_INS_RSR(XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
703 reg_list[realadr].dirty = false;
704 }
705 }
706 }
707 /*Now rotate the window so we'll see the next 16 registers. The final rotate
708 * will wraparound, */
709 /*leaving us in the state we were. */
710 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(4));
711 }
712 }
713 res = jtag_execute_queue();
714 xtensa_core_status_check(target);
715
716 return res;
717 }
718
719 int xtensa_queue_write_dirty_user_regs_u32(struct target *target)
720 {
721 struct xtensa *xtensa = target_to_xtensa(target);
722 struct reg *reg_list = xtensa->core_cache->reg_list;
723 xtensa_reg_val_t reg_val;
724 bool scratch_reg_dirty = false;
725
726 LOG_TARGET_DEBUG(target, "start");
727
728 /* We need to write the dirty registers in the cache list back to the processor.
729 * Start by writing the SFR/user registers. */
730 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
731 if (!reg_list[XT_USR_REG_START + i].dirty)
732 continue;
733 scratch_reg_dirty = true;
734 reg_val = xtensa_reg_get(target, XT_USR_REG_START + i);
735 LOG_TARGET_DEBUG(target, "Writing back reg %s val %08" PRIX32,
736 xtensa->core_config->user_regs[i].name,
737 reg_val);
738 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, reg_val);
739 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
740 xtensa_queue_exec_ins(xtensa,
741 XT_INS_WUR(xtensa->core_config->user_regs[i].reg_num,
742 XT_REG_A3));
743 reg_list[XT_USR_REG_START + i].dirty = false;
744 }
745 if (scratch_reg_dirty)
746 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
747
748 return ERROR_OK;
749 }
750
751 static inline bool xtensa_is_stopped(struct target *target)
752 {
753 struct xtensa *xtensa = target_to_xtensa(target);
754 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
755 }
756
757 int xtensa_examine(struct target *target)
758 {
759 struct xtensa *xtensa = target_to_xtensa(target);
760 unsigned int cmd = PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP;
761
762 LOG_DEBUG("coreid = %d", target->coreid);
763 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd);
764 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE);
765 xtensa_dm_queue_enable(&xtensa->dbg_mod);
766 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
767 int res = jtag_execute_queue();
768 if (res != ERROR_OK)
769 return res;
770 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
771 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
772 return ERROR_TARGET_FAILURE;
773 }
774 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
775 if (!target_was_examined(target))
776 target_set_examined(target);
777 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
778 return ERROR_OK;
779 }
780
781 int xtensa_wakeup(struct target *target)
782 {
783 struct xtensa *xtensa = target_to_xtensa(target);
784 unsigned int cmd = PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP;
785
786 if (xtensa->reset_asserted)
787 cmd |= PWRCTL_CORERESET;
788 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd);
789 /* TODO: can we join this with the write above? */
790 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE);
791 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
792 return jtag_execute_queue();
793 }
794
795 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
796 {
797 uint32_t dsr_data = 0x00110000;
798 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
799 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
800 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
801
802 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
803 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRSET, set | OCDDCR_ENABLEOCD);
804 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRCLR, clear);
805 xtensa_queue_dbg_reg_write(xtensa, NARADR_DSR, dsr_data);
806 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
807 return jtag_execute_queue();
808 }
809
810 int xtensa_smpbreak_set(struct target *target, uint32_t set)
811 {
812 struct xtensa *xtensa = target_to_xtensa(target);
813 int res = ERROR_OK;
814
815 xtensa->smp_break = set;
816 if (target_was_examined(target))
817 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
818 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
819 return res;
820 }
821
822 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
823 {
824 uint8_t dcr_buf[sizeof(uint32_t)];
825
826 xtensa_queue_dbg_reg_read(xtensa, NARADR_DCRSET, dcr_buf);
827 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
828 int res = jtag_execute_queue();
829 *val = buf_get_u32(dcr_buf, 0, 32);
830
831 return res;
832 }
833
834 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
835 {
836 struct xtensa *xtensa = target_to_xtensa(target);
837 *val = xtensa->smp_break;
838 return ERROR_OK;
839 }
840
841 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
842 {
843 return buf_get_u32(reg->value, 0, 32);
844 }
845
846 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
847 {
848 buf_set_u32(reg->value, 0, 32, value);
849 reg->dirty = true;
850 }
851
852 int xtensa_core_status_check(struct target *target)
853 {
854 struct xtensa *xtensa = target_to_xtensa(target);
855 int res, needclear = 0;
856
857 xtensa_dm_core_status_read(&xtensa->dbg_mod);
858 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
859 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
860 if (dsr & OCDDSR_EXECBUSY) {
861 if (!xtensa->suppress_dsr_errors)
862 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
863 needclear = 1;
864 }
865 if (dsr & OCDDSR_EXECEXCEPTION) {
866 if (!xtensa->suppress_dsr_errors)
867 LOG_TARGET_ERROR(target,
868 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
869 dsr);
870 needclear = 1;
871 }
872 if (dsr & OCDDSR_EXECOVERRUN) {
873 if (!xtensa->suppress_dsr_errors)
874 LOG_TARGET_ERROR(target,
875 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
876 dsr);
877 needclear = 1;
878 }
879 if (needclear) {
880 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
881 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
882 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
883 LOG_TARGET_ERROR(target, "clearing DSR failed!");
884 return xtensa->suppress_dsr_errors ? ERROR_OK : ERROR_FAIL;
885 }
886 return ERROR_OK;
887 }
888
889 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
890 {
891 struct xtensa *xtensa = target_to_xtensa(target);
892 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
893 assert(reg_id < xtensa->core_cache->num_regs && "Attempt to access non-existing reg!");
894 return xtensa_reg_get_value(reg);
895 }
896
897 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
898 {
899 struct xtensa *xtensa = target_to_xtensa(target);
900 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
901 assert(reg_id < xtensa->core_cache->num_regs && "Attempt to access non-existing reg!");
902 if (xtensa_reg_get_value(reg) == value)
903 return;
904 xtensa_reg_set_value(reg, value);
905 }
906
907 int xtensa_assert_reset(struct target *target)
908 {
909 struct xtensa *xtensa = target_to_xtensa(target);
910
911 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
912 target->state = TARGET_RESET;
913 xtensa_queue_pwr_reg_write(xtensa,
914 DMREG_PWRCTL,
915 PWRCTL_JTAGDEBUGUSE | PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP |
916 PWRCTL_CORERESET);
917 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
918 int res = jtag_execute_queue();
919 if (res != ERROR_OK)
920 return res;
921 xtensa->reset_asserted = true;
922 return res;
923 }
924
925 int xtensa_deassert_reset(struct target *target)
926 {
927 struct xtensa *xtensa = target_to_xtensa(target);
928
929 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
930 if (target->reset_halt)
931 xtensa_queue_dbg_reg_write(xtensa,
932 NARADR_DCRSET,
933 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
934 xtensa_queue_pwr_reg_write(xtensa,
935 DMREG_PWRCTL,
936 PWRCTL_JTAGDEBUGUSE | PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP);
937 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
938 int res = jtag_execute_queue();
939 if (res != ERROR_OK)
940 return res;
941 target->state = TARGET_RUNNING;
942 xtensa->reset_asserted = false;
943 return res;
944 }
945
946 int xtensa_fetch_all_regs(struct target *target)
947 {
948 struct xtensa *xtensa = target_to_xtensa(target);
949 struct reg *reg_list = xtensa->core_cache->reg_list;
950 xtensa_reg_val_t cpenable = 0, windowbase = 0;
951 uint8_t regvals[XT_NUM_REGS][sizeof(xtensa_reg_val_t)];
952 uint8_t dsrs[XT_NUM_REGS][sizeof(xtensa_dsr_t)];
953 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
954
955 LOG_TARGET_DEBUG(target, "start");
956
957 /* Assume the CPU has just halted. We now want to fill the register cache with all the
958 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
959 * in one go, then sort everything out from the regvals variable. */
960
961 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
962 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
963 /*Grab the 16 registers we can see */
964 for (unsigned int i = 0; i < 16; i++) {
965 if (i + j < xtensa->core_config->aregs_num) {
966 xtensa_queue_exec_ins(xtensa,
967 XT_INS_WSR(XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
968 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[XT_REG_IDX_AR0 + i + j]);
969 if (debug_dsrs)
970 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR, dsrs[XT_REG_IDX_AR0 + i + j]);
971 }
972 }
973 if (xtensa->core_config->windowed) {
974 /* Now rotate the window so we'll see the next 16 registers. The final rotate
975 * will wraparound, */
976 /* leaving us in the state we were. */
977 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(4));
978 }
979 }
980 if (xtensa->core_config->coproc) {
981 /* As the very first thing after AREGS, go grab the CPENABLE registers. It indicates
982 * if we can also grab the FP */
983 /* (and theoretically other coprocessor) registers, or if this is a bad thing to do.*/
984 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
985 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(XT_SR_DDR, XT_REG_A3));
986 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[XT_REG_IDX_CPENABLE]);
987 }
988 int res = jtag_execute_queue();
989 if (res != ERROR_OK) {
990 LOG_ERROR("Failed to read ARs (%d)!", res);
991 return res;
992 }
993 xtensa_core_status_check(target);
994
995 if (xtensa->core_config->coproc)
996 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE], 0, 32);
997 /* We're now free to use any of A0-A15 as scratch registers
998 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
999 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
1000 if (xtensa_reg_is_readable(xtensa_regs[i].flags, cpenable) && reg_list[i].exist &&
1001 (xtensa_regs[i].type == XT_REG_SPECIAL ||
1002 xtensa_regs[i].type == XT_REG_USER || xtensa_regs[i].type == XT_REG_FR)) {
1003 if (xtensa_regs[i].type == XT_REG_USER) {
1004 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa_regs[i].reg_num, XT_REG_A3));
1005 } else if (xtensa_regs[i].type == XT_REG_FR) {
1006 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa_regs[i].reg_num, XT_REG_A3));
1007 } else { /*SFR */
1008 unsigned int reg_num = xtensa_regs[i].reg_num;
1009 if (reg_num == XT_PC_REG_NUM_BASE) {
1010 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1011 reg_num += xtensa->core_config->debug.irq_level;
1012 }
1013 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(reg_num, XT_REG_A3));
1014 }
1015 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(XT_SR_DDR, XT_REG_A3));
1016 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[i]);
1017 if (debug_dsrs)
1018 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR, dsrs[i]);
1019 }
1020 }
1021 /* Ok, send the whole mess to the CPU. */
1022 res = jtag_execute_queue();
1023 if (res != ERROR_OK) {
1024 LOG_ERROR("Failed to fetch AR regs!");
1025 return res;
1026 }
1027 xtensa_core_status_check(target);
1028
1029 if (debug_dsrs) {
1030 /* DSR checking: follows order in which registers are requested. */
1031 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
1032 if (xtensa_reg_is_readable(xtensa_regs[i].flags, cpenable) && reg_list[i].exist &&
1033 (xtensa_regs[i].type == XT_REG_SPECIAL || xtensa_regs[i].type == XT_REG_USER ||
1034 xtensa_regs[i].type == XT_REG_FR)) {
1035 if (buf_get_u32(dsrs[i], 0, 32) & OCDDSR_EXECEXCEPTION) {
1036 LOG_ERROR("Exception reading %s!", xtensa_regs[i].name);
1037 return ERROR_FAIL;
1038 }
1039 }
1040 }
1041 }
1042
1043 if (xtensa->core_config->user_regs_num > 0 && xtensa->core_config->fetch_user_regs) {
1044 res = xtensa->core_config->fetch_user_regs(target);
1045 if (res != ERROR_OK)
1046 return res;
1047 }
1048
1049 if (xtensa->core_config->windowed) {
1050 /* We need the windowbase to decode the general addresses. */
1051 windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE], 0, 32);
1052 }
1053 /* Decode the result and update the cache. */
1054 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
1055 if (xtensa_reg_is_readable(xtensa_regs[i].flags, cpenable) && reg_list[i].exist) {
1056 if (xtensa_regs[i].type == XT_REG_GENERAL) {
1057 /* TODO: add support for non-windowed configs */
1058 assert(
1059 xtensa->core_config->windowed &&
1060 "Regs fetch is not supported for non-windowed configs!");
1061 /* The 64-value general register set is read from (windowbase) on down.
1062 * We need to get the real register address by subtracting windowbase and
1063 * wrapping around. */
1064 int realadr = xtensa_canonical_to_windowbase_offset(i, windowbase);
1065 buf_cpy(regvals[realadr], reg_list[i].value, reg_list[i].size);
1066 } else if (xtensa_regs[i].type == XT_REG_RELGEN) {
1067 buf_cpy(regvals[xtensa_regs[i].reg_num], reg_list[i].value, reg_list[i].size);
1068 } else {
1069 buf_cpy(regvals[i], reg_list[i].value, reg_list[i].size);
1070 }
1071 reg_list[i].valid = true;
1072 } else {
1073 reg_list[i].valid = false;
1074 }
1075 }
1076 /* We have used A3 as a scratch register and we will need to write that back. */
1077 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1078 xtensa->regs_fetched = true;
1079
1080 return ERROR_OK;
1081 }
1082
1083 int xtensa_fetch_user_regs_u32(struct target *target)
1084 {
1085 struct xtensa *xtensa = target_to_xtensa(target);
1086 struct reg *reg_list = xtensa->core_cache->reg_list;
1087 xtensa_reg_val_t cpenable = 0;
1088 uint8_t regvals[XT_USER_REGS_NUM_MAX][sizeof(xtensa_reg_val_t)];
1089 uint8_t dsrs[XT_USER_REGS_NUM_MAX][sizeof(xtensa_dsr_t)];
1090 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1091
1092 assert(xtensa->core_config->user_regs_num < XT_USER_REGS_NUM_MAX && "Too many user regs configured!");
1093 if (xtensa->core_config->coproc)
1094 cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
1095
1096 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
1097 if (!xtensa_reg_is_readable(xtensa->core_config->user_regs[i].flags, cpenable))
1098 continue;
1099 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa->core_config->user_regs[i].reg_num, XT_REG_A3));
1100 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(XT_SR_DDR, XT_REG_A3));
1101 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[i]);
1102 if (debug_dsrs)
1103 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR, dsrs[i]);
1104 }
1105 /* Ok, send the whole mess to the CPU. */
1106 int res = jtag_execute_queue();
1107 if (res != ERROR_OK) {
1108 LOG_ERROR("Failed to fetch AR regs!");
1109 return res;
1110 }
1111 xtensa_core_status_check(target);
1112
1113 if (debug_dsrs) {
1114 /* DSR checking: follows order in which registers are requested. */
1115 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
1116 if (!xtensa_reg_is_readable(xtensa->core_config->user_regs[i].flags, cpenable))
1117 continue;
1118 if (buf_get_u32(dsrs[i], 0, 32) & OCDDSR_EXECEXCEPTION) {
1119 LOG_ERROR("Exception reading %s!", xtensa->core_config->user_regs[i].name);
1120 return ERROR_FAIL;
1121 }
1122 }
1123 }
1124
1125 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
1126 if (xtensa_reg_is_readable(xtensa->core_config->user_regs[i].flags, cpenable)) {
1127 buf_cpy(regvals[i], reg_list[XT_USR_REG_START + i].value, reg_list[XT_USR_REG_START + i].size);
1128 reg_list[XT_USR_REG_START + i].valid = true;
1129 } else {
1130 reg_list[XT_USR_REG_START + i].valid = false;
1131 }
1132 }
1133
1134 /* We have used A3 as a scratch register and we will need to write that back. */
1135 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1136 return ERROR_OK;
1137 }
1138
1139 int xtensa_get_gdb_reg_list(struct target *target,
1140 struct reg **reg_list[],
1141 int *reg_list_size,
1142 enum target_register_class reg_class)
1143 {
1144 struct xtensa *xtensa = target_to_xtensa(target);
1145 unsigned int num_regs = xtensa->core_config->gdb_general_regs_num;
1146
1147 if (reg_class == REG_CLASS_ALL)
1148 num_regs = xtensa->regs_num;
1149
1150 LOG_DEBUG("reg_class=%i, num_regs=%d", reg_class, num_regs);
1151
1152 *reg_list = malloc(num_regs * sizeof(struct reg *));
1153 if (!*reg_list)
1154 return ERROR_FAIL;
1155
1156 for (unsigned int k = 0; k < num_regs; k++) {
1157 unsigned int reg_id = xtensa->core_config->gdb_regs_mapping[k];
1158 (*reg_list)[k] = &xtensa->core_cache->reg_list[reg_id];
1159 }
1160
1161 *reg_list_size = num_regs;
1162
1163 return ERROR_OK;
1164 }
1165
1166 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1167 {
1168 struct xtensa *xtensa = target_to_xtensa(target);
1169 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1170 xtensa->core_config->mmu.dtlb_entries_count > 0;
1171 return ERROR_OK;
1172 }
1173
1174 int xtensa_halt(struct target *target)
1175 {
1176 struct xtensa *xtensa = target_to_xtensa(target);
1177
1178 LOG_TARGET_DEBUG(target, "start");
1179 if (target->state == TARGET_HALTED) {
1180 LOG_TARGET_DEBUG(target, "target was already halted");
1181 return ERROR_OK;
1182 }
1183 /* First we have to read dsr and check if the target stopped */
1184 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1185 if (res != ERROR_OK) {
1186 LOG_TARGET_ERROR(target, "Failed to read core status!");
1187 return res;
1188 }
1189 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1190 if (!xtensa_is_stopped(target)) {
1191 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1192 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1193 res = jtag_execute_queue();
1194 if (res != ERROR_OK)
1195 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1196 }
1197
1198 return res;
1199 }
1200
1201 int xtensa_prepare_resume(struct target *target,
1202 int current,
1203 target_addr_t address,
1204 int handle_breakpoints,
1205 int debug_execution)
1206 {
1207 struct xtensa *xtensa = target_to_xtensa(target);
1208 uint32_t bpena = 0;
1209
1210 LOG_TARGET_DEBUG(target,
1211 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1212 current,
1213 address,
1214 handle_breakpoints,
1215 debug_execution);
1216
1217 if (target->state != TARGET_HALTED) {
1218 LOG_TARGET_WARNING(target, "target not halted");
1219 return ERROR_TARGET_NOT_HALTED;
1220 }
1221
1222 if (address && !current) {
1223 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1224 } else {
1225 xtensa_reg_val_t cause = xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1226 if (cause & DEBUGCAUSE_DB) {
1227 /* We stopped due to a watchpoint. We can't just resume executing the
1228 * instruction again because */
1229 /* that would trigger the watchpoint again. To fix this, we single-step,
1230 * which ignores watchpoints. */
1231 xtensa_do_step(target, current, address, handle_breakpoints);
1232 }
1233 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
1234 /* We stopped due to a break instruction. We can't just resume executing the
1235 * instruction again because */
1236 /* that would trigger the break again. To fix this, we single-step, which
1237 * ignores break. */
1238 xtensa_do_step(target, current, address, handle_breakpoints);
1239 }
1240 }
1241
1242 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1243 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1244 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1245 if (xtensa->hw_brps[slot]) {
1246 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1247 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1248 bpena |= BIT(slot);
1249 }
1250 }
1251 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1252
1253 /* Here we write all registers to the targets */
1254 int res = xtensa_write_dirty_registers(target);
1255 if (res != ERROR_OK)
1256 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1257 return res;
1258 }
1259
1260 int xtensa_do_resume(struct target *target)
1261 {
1262 struct xtensa *xtensa = target_to_xtensa(target);
1263
1264 LOG_TARGET_DEBUG(target, "start");
1265
1266 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO);
1267 int res = jtag_execute_queue();
1268 if (res != ERROR_OK) {
1269 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1270 return res;
1271 }
1272 xtensa_core_status_check(target);
1273 return ERROR_OK;
1274 }
1275
1276 int xtensa_resume(struct target *target,
1277 int current,
1278 target_addr_t address,
1279 int handle_breakpoints,
1280 int debug_execution)
1281 {
1282 LOG_TARGET_DEBUG(target, "start");
1283 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1284 if (res != ERROR_OK) {
1285 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1286 return res;
1287 }
1288 res = xtensa_do_resume(target);
1289 if (res != ERROR_OK) {
1290 LOG_TARGET_ERROR(target, "Failed to resume!");
1291 return res;
1292 }
1293
1294 target->debug_reason = DBG_REASON_NOTHALTED;
1295 if (!debug_execution)
1296 target->state = TARGET_RUNNING;
1297 else
1298 target->state = TARGET_DEBUG_RUNNING;
1299
1300 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1301
1302 return ERROR_OK;
1303 }
1304
1305 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1306 {
1307 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1308 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1309 if (err != ERROR_OK)
1310 return false;
1311
1312 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1313 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK;
1314 if (masked == XT_INS_L32E(0, 0, 0) || masked == XT_INS_S32E(0, 0, 0))
1315 return true;
1316
1317 masked = insn & XT_INS_RFWO_RFWU_MASK;
1318 if (masked == XT_INS_RFWO || masked == XT_INS_RFWU)
1319 return true;
1320
1321 return false;
1322 }
1323
1324 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1325 {
1326 struct xtensa *xtensa = target_to_xtensa(target);
1327 int res;
1328 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1329 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1330 xtensa_reg_val_t icountlvl, cause;
1331 xtensa_reg_val_t oldps, newps, oldpc, cur_pc;
1332
1333 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1334 current, address, handle_breakpoints);
1335
1336 if (target->state != TARGET_HALTED) {
1337 LOG_TARGET_WARNING(target, "target not halted");
1338 return ERROR_TARGET_NOT_HALTED;
1339 }
1340
1341 if (xtensa->core_config->debug.icount_sz != 32) {
1342 LOG_TARGET_WARNING(target, "stepping for ICOUNT less then 32 bits is not implemented!");
1343 return ERROR_FAIL;
1344 }
1345
1346 /* Save old ps/pc */
1347 oldps = xtensa_reg_get(target, XT_REG_IDX_PS);
1348 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1349
1350 cause = xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1351 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1352 oldps,
1353 oldpc,
1354 cause,
1355 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1356 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1357 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1358 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1359 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0); /* so we don't recurse into the same routine */
1360 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1361 /* pretend that we have stepped */
1362 if (cause & DEBUGCAUSE_BI)
1363 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1364 else
1365 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1366 return ERROR_OK;
1367 }
1368
1369 /* Xtensa has an ICOUNTLEVEL register which sets the maximum interrupt level at which the
1370 * instructions are to be counted while stepping.
1371 * For example, if we need to step by 2 instructions, and an interrupt occurs inbetween,
1372 * the processor will execute the interrupt, return, and halt after the 2nd instruction.
1373 * However, sometimes we don't want the interrupt handlers to be executed at all, while
1374 * stepping through the code. In this case (XT_STEPPING_ISR_OFF), PS.INTLEVEL can be raised
1375 * to only allow Debug and NMI interrupts.
1376 */
1377 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1378 if (!xtensa->core_config->high_irq.enabled) {
1379 LOG_TARGET_WARNING(
1380 target,
1381 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1382 return ERROR_FAIL;
1383 }
1384 /* Mask all interrupts below Debug, i.e. PS.INTLEVEL = DEBUGLEVEL - 1 */
1385 xtensa_reg_val_t temp_ps = (oldps & ~0xF) | (xtensa->core_config->debug.irq_level - 1);
1386 xtensa_reg_set(target, XT_REG_IDX_PS, temp_ps);
1387 }
1388 /* Regardless of ISRs masking mode we need to count instructions at any CINTLEVEL during step.
1389 So set `icountlvl` to DEBUGLEVEL.
1390 If ISRs are masked they are disabled in PS (see above), so having `icountlvl` set to DEBUGLEVEL
1391 will allow to step through any type of the code, e.g. 'high int level' ISR.
1392 If ISRs are not masked With `icountlvl` set to DEBUGLEVEL, we can step into any ISR
1393 which can happen (enabled in PS).
1394 */
1395 icountlvl = xtensa->core_config->debug.irq_level;
1396
1397 if (cause & DEBUGCAUSE_DB) {
1398 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1399 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1400 * re-enable the watchpoint. */
1401 LOG_TARGET_DEBUG(
1402 target,
1403 "Single-stepping to get past instruction that triggered the watchpoint...");
1404 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0); /*so we don't recurse into
1405 * the same routine */
1406 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1407 /*Save all DBREAKCx registers and set to 0 to disable watchpoints */
1408 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1409 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1410 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1411 }
1412 }
1413
1414 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1415 /* handle normal SW breakpoint */
1416 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0); /*so we don't recurse into
1417 * the same routine */
1418 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1419 }
1420 do {
1421 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1422 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1423
1424 /* Now ICOUNT is set, we can resume as if we were going to run */
1425 res = xtensa_prepare_resume(target, current, address, 0, 0);
1426 if (res != ERROR_OK) {
1427 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1428 return res;
1429 }
1430 res = xtensa_do_resume(target);
1431 if (res != ERROR_OK) {
1432 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1433 return res;
1434 }
1435
1436 /* Wait for stepping to complete */
1437 long long start = timeval_ms();
1438 while (timeval_ms() < start + 500) {
1439 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1440 *until stepping is complete. */
1441 usleep(1000);
1442 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1443 if (res != ERROR_OK) {
1444 LOG_TARGET_ERROR(target, "Failed to read core status!");
1445 return res;
1446 }
1447 if (xtensa_is_stopped(target))
1448 break;
1449 usleep(1000);
1450 }
1451 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1452 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1453 if (!xtensa_is_stopped(target)) {
1454 LOG_TARGET_WARNING(
1455 target,
1456 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1457 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1458 target->debug_reason = DBG_REASON_NOTHALTED;
1459 target->state = TARGET_RUNNING;
1460 return ERROR_FAIL;
1461 }
1462 target->debug_reason = DBG_REASON_SINGLESTEP;
1463 target->state = TARGET_HALTED;
1464
1465 xtensa_fetch_all_regs(target);
1466
1467 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1468
1469 LOG_TARGET_DEBUG(target,
1470 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1471 xtensa_reg_get(target, XT_REG_IDX_PS),
1472 cur_pc,
1473 xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE),
1474 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1475
1476 /* Do not step into WindowOverflow if ISRs are masked.
1477 If we stop in WindowOverflow at breakpoint with masked ISRs and
1478 try to do a step it will get us out of that handler */
1479 if (xtensa->core_config->windowed &&
1480 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1481 xtensa_pc_in_winexc(target, cur_pc)) {
1482 /* isrmask = on, need to step out of the window exception handler */
1483 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1484 oldpc = cur_pc;
1485 address = oldpc + 3;
1486 continue;
1487 }
1488
1489 if (oldpc == cur_pc)
1490 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1491 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1492 else
1493 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1494 break;
1495 } while (true);
1496 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1497
1498 if (cause & DEBUGCAUSE_DB) {
1499 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1500 /* Restore the DBREAKCx registers */
1501 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1502 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1503 }
1504
1505 /* Restore int level */
1506 /* TODO: Theoretically, this can mess up stepping over an instruction that modifies
1507 * ps.intlevel by itself. TODO: Look into this. */
1508 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1509 newps = xtensa_reg_get(target, XT_REG_IDX_PS);
1510 newps = (newps & ~0xF) | (oldps & 0xf);
1511 xtensa_reg_set(target, XT_REG_IDX_PS, newps);
1512 }
1513
1514 /* write ICOUNTLEVEL back to zero */
1515 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1516 /* TODO: can we skip writing dirty registers and re-fetching them? */
1517 res = xtensa_write_dirty_registers(target);
1518 xtensa_fetch_all_regs(target);
1519 return res;
1520 }
1521
1522 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1523 {
1524 return xtensa_do_step(target, current, address, handle_breakpoints);
1525 }
1526
1527 /**
1528 * Returns true if two ranges are overlapping
1529 */
1530 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1531 target_addr_t r1_end,
1532 target_addr_t r2_start,
1533 target_addr_t r2_end)
1534 {
1535 if ((r2_start >= r1_start) && (r2_start < r1_end))
1536 return true; /* r2_start is in r1 region */
1537 if ((r2_end > r1_start) && (r2_end <= r1_end))
1538 return true; /* r2_end is in r1 region */
1539 return false;
1540 }
1541
1542 /**
1543 * Returns a size of overlapped region of two ranges.
1544 */
1545 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1546 target_addr_t r1_end,
1547 target_addr_t r2_start,
1548 target_addr_t r2_end)
1549 {
1550 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1551 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1552 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1553 return ov_end - ov_start;
1554 }
1555 return 0;
1556 }
1557
1558 /**
1559 * Check if the address gets to memory regions, and it's access mode
1560 */
1561 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1562 {
1563 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1564 target_addr_t adr_end = address + size; /* region end */
1565 target_addr_t overlap_size;
1566 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1567
1568 while (adr_pos < adr_end) {
1569 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1570 if (!cm) /* address is not belong to anything */
1571 return false;
1572 if ((cm->access & access) != access) /* access check */
1573 return false;
1574 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1575 assert(overlap_size != 0);
1576 adr_pos += overlap_size;
1577 }
1578 return true;
1579 }
1580
1581 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1582 {
1583 struct xtensa *xtensa = target_to_xtensa(target);
1584 /* We are going to read memory in 32-bit increments. This may not be what the calling
1585 * function expects, so we may need to allocate a temp buffer and read into that first. */
1586 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1587 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1588 target_addr_t adr = addrstart_al;
1589 uint8_t *albuff;
1590
1591 if (target->state != TARGET_HALTED) {
1592 LOG_TARGET_WARNING(target, "target not halted");
1593 return ERROR_TARGET_NOT_HALTED;
1594 }
1595
1596 if (!xtensa->permissive_mode) {
1597 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1598 XT_MEM_ACCESS_READ)) {
1599 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1600 return ERROR_FAIL;
1601 }
1602 }
1603
1604 if (addrstart_al == address && addrend_al == address + (size * count)) {
1605 albuff = buffer;
1606 } else {
1607 albuff = malloc(addrend_al - addrstart_al);
1608 if (!albuff) {
1609 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1610 addrend_al - addrstart_al);
1611 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1612 }
1613 }
1614
1615 /* We're going to use A3 here */
1616 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1617 /* Write start address to A3 */
1618 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1619 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
1620 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1621 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1622 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(XT_REG_A3));
1623 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, &albuff[i]);
1624 }
1625 int res = jtag_execute_queue();
1626 if (res == ERROR_OK)
1627 res = xtensa_core_status_check(target);
1628 if (res != ERROR_OK)
1629 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address " TARGET_ADDR_FMT,
1630 count * size, address);
1631
1632 if (albuff != buffer) {
1633 memcpy(buffer, albuff + (address & 3), (size * count));
1634 free(albuff);
1635 }
1636
1637 return res;
1638 }
1639
1640 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1641 {
1642 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1643 return xtensa_read_memory(target, address, 1, count, buffer);
1644 }
1645
1646 int xtensa_write_memory(struct target *target,
1647 target_addr_t address,
1648 uint32_t size,
1649 uint32_t count,
1650 const uint8_t *buffer)
1651 {
1652 /* This memory write function can get thrown nigh everything into it, from
1653 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1654 * accept anything but aligned uint32 writes, though. That is why we convert
1655 * everything into that. */
1656 struct xtensa *xtensa = target_to_xtensa(target);
1657 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1658 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1659 target_addr_t adr = addrstart_al;
1660 int res;
1661 uint8_t *albuff;
1662
1663 if (target->state != TARGET_HALTED) {
1664 LOG_TARGET_WARNING(target, "target not halted");
1665 return ERROR_TARGET_NOT_HALTED;
1666 }
1667
1668 if (!xtensa->permissive_mode) {
1669 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
1670 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1671 return ERROR_FAIL;
1672 }
1673 }
1674
1675 if (size == 0 || count == 0 || !buffer)
1676 return ERROR_COMMAND_SYNTAX_ERROR;
1677
1678 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1679 if (addrstart_al == address && addrend_al == address + (size * count)) {
1680 /* We discard the const here because albuff can also be non-const */
1681 albuff = (uint8_t *)buffer;
1682 } else {
1683 albuff = malloc(addrend_al - addrstart_al);
1684 if (!albuff) {
1685 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1686 addrend_al - addrstart_al);
1687 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1688 }
1689 }
1690
1691 /* We're going to use A3 here */
1692 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1693
1694 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1695 if (albuff != buffer) {
1696 /* See if we need to read the first and/or last word. */
1697 if (address & 3) {
1698 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1699 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
1700 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(XT_REG_A3));
1701 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, &albuff[0]);
1702 }
1703 if ((address + (size * count)) & 3) {
1704 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrend_al - 4);
1705 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
1706 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(XT_REG_A3));
1707 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR,
1708 &albuff[addrend_al - addrstart_al - 4]);
1709 }
1710 /* Grab bytes */
1711 res = jtag_execute_queue();
1712 if (res != ERROR_OK) {
1713 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1714 if (albuff != buffer)
1715 free(albuff);
1716 return res;
1717 }
1718 xtensa_core_status_check(target);
1719 /* Copy data to be written into the aligned buffer */
1720 memcpy(&albuff[address & 3], buffer, size * count);
1721 /* Now we can write albuff in aligned uint32s. */
1722 }
1723
1724 /* Write start address to A3 */
1725 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1726 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
1727 /* Write the aligned buffer */
1728 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1729 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, buf_get_u32(&albuff[i], 0, 32));
1730 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(XT_REG_A3));
1731 }
1732 res = jtag_execute_queue();
1733 if (res == ERROR_OK)
1734 res = xtensa_core_status_check(target);
1735 if (res != ERROR_OK)
1736 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address " TARGET_ADDR_FMT, count * size, address);
1737 if (albuff != buffer)
1738 free(albuff);
1739
1740 return res;
1741 }
1742
1743 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
1744 {
1745 /* xtensa_write_memory can handle everything. Just pass on to that. */
1746 return xtensa_write_memory(target, address, 1, count, buffer);
1747 }
1748
1749 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
1750 {
1751 LOG_WARNING("not implemented yet");
1752 return ERROR_FAIL;
1753 }
1754
1755 int xtensa_poll(struct target *target)
1756 {
1757 struct xtensa *xtensa = target_to_xtensa(target);
1758
1759 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET | PWRSTAT_COREWASRESET);
1760 if (res != ERROR_OK)
1761 return res;
1762
1763 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
1764 LOG_TARGET_INFO(target, "Debug controller was reset.");
1765 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
1766 if (res != ERROR_OK)
1767 return res;
1768 }
1769 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
1770 LOG_TARGET_INFO(target, "Core was reset.");
1771 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
1772 /* Enable JTAG, set reset if needed */
1773 res = xtensa_wakeup(target);
1774 if (res != ERROR_OK)
1775 return res;
1776
1777 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1778 if (res != ERROR_OK)
1779 return res;
1780 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET) {
1781 /* if RESET state is persitent */
1782 target->state = TARGET_RESET;
1783 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
1784 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
1785 xtensa->dbg_mod.core_status.dsr,
1786 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
1787 target->state = TARGET_UNKNOWN;
1788 if (xtensa->come_online_probes_num == 0)
1789 target->examined = false;
1790 else
1791 xtensa->come_online_probes_num--;
1792 } else if (xtensa_is_stopped(target)) {
1793 if (target->state != TARGET_HALTED) {
1794 enum target_state oldstate = target->state;
1795 target->state = TARGET_HALTED;
1796 /* Examine why the target has been halted */
1797 target->debug_reason = DBG_REASON_DBGRQ;
1798 xtensa_fetch_all_regs(target);
1799 /* When setting debug reason DEBUGCAUSE events have the following
1800 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
1801 /* Watchpoint and breakpoint events at the same time results in special
1802 * debug reason: DBG_REASON_WPTANDBKPT. */
1803 xtensa_reg_val_t halt_cause = xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1804 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
1805 if (halt_cause & DEBUGCAUSE_IC)
1806 target->debug_reason = DBG_REASON_SINGLESTEP;
1807 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
1808 if (halt_cause & DEBUGCAUSE_DB)
1809 target->debug_reason = DBG_REASON_WPTANDBKPT;
1810 else
1811 target->debug_reason = DBG_REASON_BREAKPOINT;
1812 } else if (halt_cause & DEBUGCAUSE_DB) {
1813 target->debug_reason = DBG_REASON_WATCHPOINT;
1814 }
1815 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIX32 ", debug_reason=%08x, oldstate=%08x",
1816 xtensa_reg_get(target, XT_REG_IDX_PC),
1817 target->debug_reason,
1818 oldstate);
1819 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
1820 halt_cause,
1821 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
1822 xtensa->dbg_mod.core_status.dsr);
1823 LOG_TARGET_INFO(target, "Target halted, PC=0x%08" PRIX32 ", debug_reason=%08x",
1824 xtensa_reg_get(target, XT_REG_IDX_PC), target->debug_reason);
1825 xtensa_dm_core_status_clear(
1826 &xtensa->dbg_mod,
1827 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
1828 OCDDSR_DEBUGINTTRAX |
1829 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
1830 }
1831 } else {
1832 target->debug_reason = DBG_REASON_NOTHALTED;
1833 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
1834 target->state = TARGET_RUNNING;
1835 target->debug_reason = DBG_REASON_NOTHALTED;
1836 }
1837 }
1838 if (xtensa->trace_active) {
1839 /* Detect if tracing was active but has stopped. */
1840 struct xtensa_trace_status trace_status;
1841 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
1842 if (res == ERROR_OK) {
1843 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
1844 LOG_INFO("Detected end of trace.");
1845 if (trace_status.stat & TRAXSTAT_PCMTG)
1846 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
1847 if (trace_status.stat & TRAXSTAT_PTITG)
1848 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
1849 if (trace_status.stat & TRAXSTAT_CTITG)
1850 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
1851 xtensa->trace_active = false;
1852 }
1853 }
1854 }
1855 return ERROR_OK;
1856 }
1857
1858 static int xtensa_sw_breakpoint_add(struct target *target,
1859 struct breakpoint *breakpoint,
1860 struct xtensa_sw_breakpoint *sw_bp)
1861 {
1862 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
1863 if (ret != ERROR_OK) {
1864 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
1865 return ret;
1866 }
1867
1868 sw_bp->insn_sz = xtensa_insn_size_get(buf_get_u32(sw_bp->insn, 0, 24));
1869 sw_bp->oocd_bp = breakpoint;
1870
1871 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(0, 0) : XT_INS_BREAKN(0);
1872 /* convert to target endianness */
1873 uint8_t break_insn_buff[4];
1874 target_buffer_set_u32(target, break_insn_buff, break_insn);
1875
1876 ret = target_write_buffer(target, breakpoint->address, sw_bp->insn_sz, break_insn_buff);
1877 if (ret != ERROR_OK) {
1878 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
1879 return ret;
1880 }
1881
1882 return ERROR_OK;
1883 }
1884
1885 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
1886 {
1887 int ret = target_write_buffer(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
1888 if (ret != ERROR_OK) {
1889 LOG_TARGET_ERROR(target, "Failed to read insn (%d)!", ret);
1890 return ret;
1891 }
1892 sw_bp->oocd_bp = NULL;
1893 return ERROR_OK;
1894 }
1895
1896 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
1897 {
1898 struct xtensa *xtensa = target_to_xtensa(target);
1899 unsigned int slot;
1900
1901 if (breakpoint->type == BKPT_SOFT) {
1902 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
1903 if (!xtensa->sw_brps[slot].oocd_bp ||
1904 xtensa->sw_brps[slot].oocd_bp == breakpoint)
1905 break;
1906 }
1907 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
1908 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
1909 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1910 }
1911 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
1912 if (ret != ERROR_OK) {
1913 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
1914 return ret;
1915 }
1916 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
1917 slot,
1918 breakpoint->address);
1919 return ERROR_OK;
1920 }
1921
1922 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1923 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
1924 break;
1925 }
1926 if (slot == xtensa->core_config->debug.ibreaks_num) {
1927 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
1928 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1929 }
1930
1931 xtensa->hw_brps[slot] = breakpoint;
1932 /* We will actually write the breakpoints when we resume the target. */
1933 LOG_TARGET_DEBUG(target, "placed HW breakpoint @ " TARGET_ADDR_FMT,
1934 breakpoint->address);
1935
1936 return ERROR_OK;
1937 }
1938
1939 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
1940 {
1941 struct xtensa *xtensa = target_to_xtensa(target);
1942 unsigned int slot;
1943
1944 if (breakpoint->type == BKPT_SOFT) {
1945 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
1946 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
1947 break;
1948 }
1949 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
1950 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
1951 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1952 }
1953 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
1954 if (ret != ERROR_OK) {
1955 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
1956 return ret;
1957 }
1958 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
1959 return ERROR_OK;
1960 }
1961
1962 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1963 if (xtensa->hw_brps[slot] == breakpoint)
1964 break;
1965 }
1966 if (slot == xtensa->core_config->debug.ibreaks_num) {
1967 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
1968 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1969 }
1970 xtensa->hw_brps[slot] = NULL;
1971 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
1972 return ERROR_OK;
1973 }
1974
1975 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
1976 {
1977 struct xtensa *xtensa = target_to_xtensa(target);
1978 unsigned int slot;
1979 xtensa_reg_val_t dbreakcval;
1980
1981 if (target->state != TARGET_HALTED) {
1982 LOG_TARGET_WARNING(target, "target not halted");
1983 return ERROR_TARGET_NOT_HALTED;
1984 }
1985
1986 if (watchpoint->mask != ~(uint32_t)0) {
1987 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
1988 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1989 }
1990
1991 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1992 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
1993 break;
1994 }
1995 if (slot == xtensa->core_config->debug.dbreaks_num) {
1996 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
1997 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1998 }
1999
2000 /* Figure out value for dbreakc5..0
2001 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2002 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2003 !IS_PWR_OF_2(watchpoint->length) ||
2004 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2005 LOG_TARGET_WARNING(
2006 target,
2007 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2008 " not supported by hardware.",
2009 watchpoint->length,
2010 watchpoint->address);
2011 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2012 }
2013 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2014
2015 if (watchpoint->rw == WPT_READ)
2016 dbreakcval |= BIT(30);
2017 if (watchpoint->rw == WPT_WRITE)
2018 dbreakcval |= BIT(31);
2019 if (watchpoint->rw == WPT_ACCESS)
2020 dbreakcval |= BIT(30) | BIT(31);
2021
2022 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2023 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2024 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2025 xtensa->hw_wps[slot] = watchpoint;
2026 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2027 watchpoint->address);
2028 return ERROR_OK;
2029 }
2030
2031 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2032 {
2033 struct xtensa *xtensa = target_to_xtensa(target);
2034 unsigned int slot;
2035
2036 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2037 if (xtensa->hw_wps[slot] == watchpoint)
2038 break;
2039 }
2040 if (slot == xtensa->core_config->debug.dbreaks_num) {
2041 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2042 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2043 }
2044 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2045 xtensa->hw_wps[slot] = NULL;
2046 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2047 watchpoint->address);
2048 return ERROR_OK;
2049 }
2050
2051 static int xtensa_build_reg_cache(struct target *target)
2052 {
2053 struct xtensa *xtensa = target_to_xtensa(target);
2054 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2055 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2056
2057 if (!reg_cache) {
2058 LOG_ERROR("Failed to alloc reg cache!");
2059 return ERROR_FAIL;
2060 }
2061 reg_cache->name = "Xtensa registers";
2062 reg_cache->next = NULL;
2063 reg_cache->num_regs = XT_NUM_REGS + xtensa->core_config->user_regs_num;
2064 /* Init reglist */
2065 struct reg *reg_list = calloc(reg_cache->num_regs, sizeof(struct reg));
2066 if (!reg_list) {
2067 LOG_ERROR("Failed to alloc reg list!");
2068 goto fail;
2069 }
2070 xtensa->regs_num = 0;
2071
2072 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
2073 reg_list[i].exist = false;
2074 if (xtensa_regs[i].type == XT_REG_USER) {
2075 if (xtensa_user_reg_exists(xtensa, i))
2076 reg_list[i].exist = true;
2077 else
2078 LOG_DEBUG("User reg '%s' (%d) does not exist", xtensa_regs[i].name, i);
2079 } else if (xtensa_regs[i].type == XT_REG_FR) {
2080 if (xtensa_fp_reg_exists(xtensa, i))
2081 reg_list[i].exist = true;
2082 else
2083 LOG_DEBUG("FP reg '%s' (%d) does not exist", xtensa_regs[i].name, i);
2084 } else if (xtensa_regs[i].type == XT_REG_SPECIAL) {
2085 if (xtensa_special_reg_exists(xtensa, i))
2086 reg_list[i].exist = true;
2087 else
2088 LOG_DEBUG("Special reg '%s' (%d) does not exist", xtensa_regs[i].name, i);
2089 } else {
2090 if (xtensa_regular_reg_exists(xtensa, i))
2091 reg_list[i].exist = true;
2092 else
2093 LOG_DEBUG("Regular reg '%s' (%d) does not exist", xtensa_regs[i].name, i);
2094 }
2095 reg_list[i].name = xtensa_regs[i].name;
2096 reg_list[i].size = 32;
2097 reg_list[i].value = calloc(1, 4 /*XT_REG_LEN*/);/* make Clang Static Analyzer happy */
2098 if (!reg_list[i].value) {
2099 LOG_ERROR("Failed to alloc reg list value!");
2100 goto fail;
2101 }
2102 reg_list[i].dirty = false;
2103 reg_list[i].valid = false;
2104 reg_list[i].type = &xtensa_reg_type;
2105 reg_list[i].arch_info = xtensa;
2106 if (reg_list[i].exist)
2107 xtensa->regs_num++;
2108 }
2109 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
2110 reg_list[XT_USR_REG_START + i].exist = true;
2111 reg_list[XT_USR_REG_START + i].name = xtensa->core_config->user_regs[i].name;
2112 reg_list[XT_USR_REG_START + i].size = xtensa->core_config->user_regs[i].size;
2113 reg_list[XT_USR_REG_START + i].value = calloc(1, reg_list[XT_USR_REG_START + i].size / 8);
2114 if (!reg_list[XT_USR_REG_START + i].value) {
2115 LOG_ERROR("Failed to alloc user reg list value!");
2116 goto fail;
2117 }
2118 reg_list[XT_USR_REG_START + i].dirty = false;
2119 reg_list[XT_USR_REG_START + i].valid = false;
2120 reg_list[XT_USR_REG_START + i].type = xtensa->core_config->user_regs[i].type;
2121 reg_list[XT_USR_REG_START + i].arch_info = xtensa;
2122 xtensa->regs_num++;
2123 }
2124 if (xtensa->core_config->gdb_general_regs_num >= xtensa->regs_num) {
2125 LOG_ERROR("Regs number less then GDB general regs number!");
2126 goto fail;
2127 }
2128
2129 /* assign GDB reg numbers to registers */
2130 for (unsigned int gdb_reg_id = 0; gdb_reg_id < xtensa->regs_num; gdb_reg_id++) {
2131 unsigned int reg_id = xtensa->core_config->gdb_regs_mapping[gdb_reg_id];
2132 if (reg_id >= reg_cache->num_regs) {
2133 LOG_ERROR("Invalid GDB map!");
2134 goto fail;
2135 }
2136 if (!reg_list[reg_id].exist) {
2137 LOG_ERROR("Non-existing reg in GDB map!");
2138 goto fail;
2139 }
2140 reg_list[reg_id].number = gdb_reg_id;
2141 }
2142 reg_cache->reg_list = reg_list;
2143
2144 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2145 if (!xtensa->algo_context_backup) {
2146 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2147 goto fail;
2148 }
2149 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2150 struct reg *reg = &reg_cache->reg_list[i];
2151 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2152 if (!xtensa->algo_context_backup[i]) {
2153 LOG_ERROR("Failed to alloc mem for algorithm context!");
2154 goto fail;
2155 }
2156 }
2157
2158 xtensa->core_cache = reg_cache;
2159 if (cache_p)
2160 *cache_p = reg_cache;
2161 return ERROR_OK;
2162
2163 fail:
2164 if (reg_list) {
2165 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2166 free(reg_list[i].value);
2167 free(reg_list);
2168 }
2169 if (xtensa->algo_context_backup) {
2170 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2171 free(xtensa->algo_context_backup[i]);
2172 free(xtensa->algo_context_backup);
2173 }
2174 free(reg_cache);
2175
2176 return ERROR_FAIL;
2177 }
2178
2179 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
2180 const struct xtensa_config *xtensa_config,
2181 const struct xtensa_debug_module_config *dm_cfg)
2182 {
2183 target->arch_info = xtensa;
2184 xtensa->common_magic = XTENSA_COMMON_MAGIC;
2185 xtensa->target = target;
2186 xtensa->core_config = xtensa_config;
2187 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
2188
2189 if (!xtensa->core_config->exc.enabled || !xtensa->core_config->irq.enabled ||
2190 !xtensa->core_config->high_irq.enabled || !xtensa->core_config->debug.enabled) {
2191 LOG_ERROR("Xtensa configuration does not support debugging!");
2192 return ERROR_FAIL;
2193 }
2194 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2195 }
2196
2197 void xtensa_set_permissive_mode(struct target *target, bool state)
2198 {
2199 target_to_xtensa(target)->permissive_mode = state;
2200 }
2201
2202 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2203 {
2204 struct xtensa *xtensa = target_to_xtensa(target);
2205
2206 xtensa->come_online_probes_num = 3;
2207 xtensa->hw_brps = calloc(xtensa->core_config->debug.ibreaks_num, sizeof(struct breakpoint *));
2208 if (!xtensa->hw_brps) {
2209 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2210 return ERROR_FAIL;
2211 }
2212 xtensa->hw_wps = calloc(xtensa->core_config->debug.dbreaks_num, sizeof(struct watchpoint *));
2213 if (!xtensa->hw_wps) {
2214 free(xtensa->hw_brps);
2215 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2216 return ERROR_FAIL;
2217 }
2218 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2219 if (!xtensa->sw_brps) {
2220 free(xtensa->hw_brps);
2221 free(xtensa->hw_wps);
2222 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2223 return ERROR_FAIL;
2224 }
2225
2226 return xtensa_build_reg_cache(target);
2227 }
2228
2229 static void xtensa_free_reg_cache(struct target *target)
2230 {
2231 struct xtensa *xtensa = target_to_xtensa(target);
2232 struct reg_cache *cache = xtensa->core_cache;
2233
2234 if (cache) {
2235 register_unlink_cache(&target->reg_cache, cache);
2236 for (unsigned int i = 0; i < cache->num_regs; i++) {
2237 free(xtensa->algo_context_backup[i]);
2238 free(cache->reg_list[i].value);
2239 }
2240 free(xtensa->algo_context_backup);
2241 free(cache->reg_list);
2242 free(cache);
2243 }
2244 xtensa->core_cache = NULL;
2245 xtensa->algo_context_backup = NULL;
2246 }
2247
2248 void xtensa_target_deinit(struct target *target)
2249 {
2250 struct xtensa *xtensa = target_to_xtensa(target);
2251
2252 LOG_DEBUG("start");
2253
2254 if (target_was_examined(target)) {
2255 int ret = xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRCLR, OCDDCR_ENABLEOCD);
2256 if (ret != ERROR_OK) {
2257 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2258 return;
2259 }
2260 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
2261 ret = jtag_execute_queue();
2262 if (ret != ERROR_OK) {
2263 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2264 return;
2265 }
2266 }
2267 xtensa_free_reg_cache(target);
2268 free(xtensa->hw_brps);
2269 free(xtensa->hw_wps);
2270 free(xtensa->sw_brps);
2271 }
2272
2273 const char *xtensa_get_gdb_arch(struct target *target)
2274 {
2275 return "xtensa";
2276 }
2277
2278 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
2279 {
2280 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
2281 &xtensa->permissive_mode, "xtensa permissive mode");
2282 }
2283
2284 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
2285 {
2286 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
2287 target_to_xtensa(get_current_target(CMD_CTX)));
2288 }
2289
2290 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
2291 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
2292 {
2293 struct xtensa_perfmon_config config = {
2294 .mask = 0xffff,
2295 .kernelcnt = 0,
2296 .tracelevel = -1 /* use DEBUGLEVEL by default */
2297 };
2298
2299 if (CMD_ARGC < 2 || CMD_ARGC > 6)
2300 return ERROR_COMMAND_SYNTAX_ERROR;
2301
2302 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
2303 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
2304 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
2305 return ERROR_COMMAND_ARGUMENT_INVALID;
2306 }
2307
2308 config.select = strtoul(CMD_ARGV[1], NULL, 0);
2309 if (config.select > XTENSA_MAX_PERF_SELECT) {
2310 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
2311 return ERROR_COMMAND_ARGUMENT_INVALID;
2312 }
2313
2314 if (CMD_ARGC >= 3) {
2315 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
2316 if (config.mask > XTENSA_MAX_PERF_MASK) {
2317 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
2318 return ERROR_COMMAND_ARGUMENT_INVALID;
2319 }
2320 }
2321
2322 if (CMD_ARGC >= 4) {
2323 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
2324 if (config.kernelcnt > 1) {
2325 command_print(CMD, "kernelcnt should be 0 or 1");
2326 return ERROR_COMMAND_ARGUMENT_INVALID;
2327 }
2328 }
2329
2330 if (CMD_ARGC >= 5) {
2331 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
2332 if (config.tracelevel > 7) {
2333 command_print(CMD, "tracelevel should be <=7");
2334 return ERROR_COMMAND_ARGUMENT_INVALID;
2335 }
2336 }
2337
2338 if (config.tracelevel == -1)
2339 config.tracelevel = xtensa->core_config->debug.irq_level;
2340
2341 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
2342 }
2343
2344 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
2345 {
2346 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
2347 target_to_xtensa(get_current_target(CMD_CTX)));
2348 }
2349
2350 /* perfmon_dump [counter_id] */
2351 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
2352 {
2353 if (CMD_ARGC > 1)
2354 return ERROR_COMMAND_SYNTAX_ERROR;
2355
2356 int counter_id = -1;
2357 if (CMD_ARGC == 1) {
2358 counter_id = strtol(CMD_ARGV[0], NULL, 0);
2359 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
2360 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
2361 return ERROR_COMMAND_ARGUMENT_INVALID;
2362 }
2363 }
2364
2365 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
2366 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
2367 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
2368 char result_buf[128] = { 0 };
2369 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
2370 struct xtensa_perfmon_result result;
2371 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
2372 if (res != ERROR_OK)
2373 return res;
2374 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
2375 "%-12" PRIu64 "%s",
2376 result.value,
2377 result.overflow ? " (overflow)" : "");
2378 LOG_INFO("%s", result_buf);
2379 }
2380
2381 return ERROR_OK;
2382 }
2383
2384 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
2385 {
2386 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
2387 target_to_xtensa(get_current_target(CMD_CTX)));
2388 }
2389
2390 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
2391 {
2392 int state = -1;
2393
2394 if (CMD_ARGC < 1) {
2395 const char *st;
2396 state = xtensa->stepping_isr_mode;
2397 if (state == XT_STEPPING_ISR_ON)
2398 st = "OFF";
2399 else if (state == XT_STEPPING_ISR_OFF)
2400 st = "ON";
2401 else
2402 st = "UNKNOWN";
2403 command_print(CMD, "Current ISR step mode: %s", st);
2404 return ERROR_OK;
2405 }
2406 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
2407 if (!strcasecmp(CMD_ARGV[0], "off"))
2408 state = XT_STEPPING_ISR_ON;
2409 else if (!strcasecmp(CMD_ARGV[0], "on"))
2410 state = XT_STEPPING_ISR_OFF;
2411
2412 if (state == -1) {
2413 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
2414 return ERROR_FAIL;
2415 }
2416 xtensa->stepping_isr_mode = state;
2417 return ERROR_OK;
2418 }
2419
2420 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
2421 {
2422 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
2423 target_to_xtensa(get_current_target(CMD_CTX)));
2424 }
2425
2426 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
2427 {
2428 int res = ERROR_OK;
2429 uint32_t val = 0;
2430
2431 if (CMD_ARGC >= 1) {
2432 for (unsigned int i = 0; i < CMD_ARGC; i++) {
2433 if (!strcasecmp(CMD_ARGV[0], "none")) {
2434 val = 0;
2435 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
2436 val |= OCDDCR_BREAKINEN;
2437 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
2438 val |= OCDDCR_BREAKOUTEN;
2439 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
2440 val |= OCDDCR_RUNSTALLINEN;
2441 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
2442 val |= OCDDCR_DEBUGMODEOUTEN;
2443 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
2444 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
2445 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
2446 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
2447 } else {
2448 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
2449 command_print(
2450 CMD,
2451 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
2452 return ERROR_OK;
2453 }
2454 }
2455 res = xtensa_smpbreak_set(target, val);
2456 if (res != ERROR_OK)
2457 command_print(CMD, "Failed to set smpbreak config %d", res);
2458 } else {
2459 struct xtensa *xtensa = target_to_xtensa(target);
2460 res = xtensa_smpbreak_read(xtensa, &val);
2461 if (res == ERROR_OK) {
2462 command_print(CMD, "Current bits set:%s%s%s%s",
2463 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
2464 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
2465 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
2466 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
2467 );
2468 } else {
2469 command_print(CMD, "Failed to get smpbreak config %d", res);
2470 }
2471 }
2472 return res;
2473 }
2474
2475 COMMAND_HANDLER(xtensa_cmd_smpbreak)
2476 {
2477 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
2478 get_current_target(CMD_CTX));
2479 }
2480
2481 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
2482 {
2483 struct xtensa_trace_status trace_status;
2484 struct xtensa_trace_start_config cfg = {
2485 .stoppc = 0,
2486 .stopmask = XTENSA_STOPMASK_DISABLED,
2487 .after = 0,
2488 .after_is_words = false
2489 };
2490
2491 /* Parse arguments */
2492 for (unsigned int i = 0; i < CMD_ARGC; i++) {
2493 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
2494 char *e;
2495 i++;
2496 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
2497 cfg.stopmask = 0;
2498 if (*e == '/')
2499 cfg.stopmask = strtol(e, NULL, 0);
2500 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
2501 i++;
2502 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
2503 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
2504 cfg.after_is_words = 0;
2505 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
2506 cfg.after_is_words = 1;
2507 } else {
2508 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
2509 return ERROR_FAIL;
2510 }
2511 }
2512
2513 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2514 if (res != ERROR_OK)
2515 return res;
2516 if (trace_status.stat & TRAXSTAT_TRACT) {
2517 LOG_WARNING("Silently stop active tracing!");
2518 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
2519 if (res != ERROR_OK)
2520 return res;
2521 }
2522
2523 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
2524 if (res != ERROR_OK)
2525 return res;
2526
2527 xtensa->trace_active = true;
2528 command_print(CMD, "Trace started.");
2529 return ERROR_OK;
2530 }
2531
2532 COMMAND_HANDLER(xtensa_cmd_tracestart)
2533 {
2534 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
2535 target_to_xtensa(get_current_target(CMD_CTX)));
2536 }
2537
2538 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
2539 {
2540 struct xtensa_trace_status trace_status;
2541
2542 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2543 if (res != ERROR_OK)
2544 return res;
2545
2546 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2547 command_print(CMD, "No trace is currently active.");
2548 return ERROR_FAIL;
2549 }
2550
2551 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
2552 if (res != ERROR_OK)
2553 return res;
2554
2555 xtensa->trace_active = false;
2556 command_print(CMD, "Trace stop triggered.");
2557 return ERROR_OK;
2558 }
2559
2560 COMMAND_HANDLER(xtensa_cmd_tracestop)
2561 {
2562 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
2563 target_to_xtensa(get_current_target(CMD_CTX)));
2564 }
2565
2566 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
2567 {
2568 struct xtensa_trace_config trace_config;
2569 struct xtensa_trace_status trace_status;
2570 uint32_t memsz, wmem;
2571
2572 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2573 if (res != ERROR_OK)
2574 return res;
2575
2576 if (trace_status.stat & TRAXSTAT_TRACT) {
2577 command_print(CMD, "Tracing is still active. Please stop it first.");
2578 return ERROR_FAIL;
2579 }
2580
2581 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
2582 if (res != ERROR_OK)
2583 return res;
2584
2585 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
2586 command_print(CMD, "No active trace found; nothing to dump.");
2587 return ERROR_FAIL;
2588 }
2589
2590 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
2591 LOG_INFO("Total trace memory: %d words", memsz);
2592 if ((trace_config.addr &
2593 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
2594 /*Memory hasn't overwritten itself yet. */
2595 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
2596 LOG_INFO("...but trace is only %d words", wmem);
2597 if (wmem < memsz)
2598 memsz = wmem;
2599 } else {
2600 if (trace_config.addr & TRAXADDR_TWSAT) {
2601 LOG_INFO("Real trace is many times longer than that (overflow)");
2602 } else {
2603 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
2604 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
2605 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
2606 }
2607 }
2608
2609 uint8_t *tracemem = malloc(memsz * 4);
2610 if (!tracemem) {
2611 command_print(CMD, "Failed to alloc memory for trace data!");
2612 return ERROR_FAIL;
2613 }
2614 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
2615 if (res != ERROR_OK) {
2616 free(tracemem);
2617 return res;
2618 }
2619
2620 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
2621 if (f <= 0) {
2622 free(tracemem);
2623 command_print(CMD, "Unable to open file %s", fname);
2624 return ERROR_FAIL;
2625 }
2626 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
2627 command_print(CMD, "Unable to write to file %s", fname);
2628 else
2629 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
2630 close(f);
2631
2632 bool is_all_zeroes = true;
2633 for (unsigned int i = 0; i < memsz * 4; i++) {
2634 if (tracemem[i] != 0) {
2635 is_all_zeroes = false;
2636 break;
2637 }
2638 }
2639 free(tracemem);
2640 if (is_all_zeroes)
2641 command_print(
2642 CMD,
2643 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
2644
2645 return ERROR_OK;
2646 }
2647
2648 COMMAND_HANDLER(xtensa_cmd_tracedump)
2649 {
2650 if (CMD_ARGC != 1) {
2651 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
2652 return ERROR_FAIL;
2653 }
2654
2655 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
2656 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
2657 }
2658
2659 const struct command_registration xtensa_command_handlers[] = {
2660 {
2661 .name = "set_permissive",
2662 .handler = xtensa_cmd_permissive_mode,
2663 .mode = COMMAND_ANY,
2664 .help = "When set to 1, enable Xtensa permissive mode (less client-side checks)",
2665 .usage = "[0|1]",
2666 },
2667 {
2668 .name = "maskisr",
2669 .handler = xtensa_cmd_mask_interrupts,
2670 .mode = COMMAND_ANY,
2671 .help = "mask Xtensa interrupts at step",
2672 .usage = "['on'|'off']",
2673 },
2674 {
2675 .name = "smpbreak",
2676 .handler = xtensa_cmd_smpbreak,
2677 .mode = COMMAND_ANY,
2678 .help = "Set the way the CPU chains OCD breaks",
2679 .usage =
2680 "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
2681 },
2682 {
2683 .name = "perfmon_enable",
2684 .handler = xtensa_cmd_perfmon_enable,
2685 .mode = COMMAND_EXEC,
2686 .help = "Enable and start performance counter",
2687 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
2688 },
2689 {
2690 .name = "perfmon_dump",
2691 .handler = xtensa_cmd_perfmon_dump,
2692 .mode = COMMAND_EXEC,
2693 .help =
2694 "Dump performance counter value. If no argument specified, dumps all counters.",
2695 .usage = "[counter_id]",
2696 },
2697 {
2698 .name = "tracestart",
2699 .handler = xtensa_cmd_tracestart,
2700 .mode = COMMAND_EXEC,
2701 .help =
2702 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
2703 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
2704 },
2705 {
2706 .name = "tracestop",
2707 .handler = xtensa_cmd_tracestop,
2708 .mode = COMMAND_EXEC,
2709 .help = "Tracing: Stop current trace as started by the tracestart command",
2710 .usage = "",
2711 },
2712 {
2713 .name = "tracedump",
2714 .handler = xtensa_cmd_tracedump,
2715 .mode = COMMAND_EXEC,
2716 .help = "Tracing: Dump trace memory to a files. One file per core.",
2717 .usage = "<outfile>",
2718 },
2719 COMMAND_REGISTRATION_DONE
2720 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)