target/espressif: remove author lines from esp32xx and xtensa files
[openocd.git] / src / target / xtensa / xtensa.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
6 * Derived from esp108.c *
7 * Author: Angus Gratton gus@projectgus.com *
8 ***************************************************************************/
9
10 #ifdef HAVE_CONFIG_H
11 #include "config.h"
12 #endif
13
14 #include <stdlib.h>
15 #include <helper/time_support.h>
16 #include <helper/align.h>
17 #include <target/register.h>
18
19 #include "xtensa.h"
20
21
22 #define _XT_INS_FORMAT_RSR(OPCODE, SR, T) ((OPCODE) \
23 | (((SR) & 0xFF) << 8) \
24 | (((T) & 0x0F) << 4))
25
26 #define _XT_INS_FORMAT_RRR(OPCODE, ST, R) ((OPCODE) \
27 | (((ST) & 0xFF) << 4) \
28 | (((R) & 0x0F) << 12))
29
30 #define _XT_INS_FORMAT_RRRN(OPCODE, S, T, IMM4) ((OPCODE) \
31 | (((T) & 0x0F) << 4) \
32 | (((S) & 0x0F) << 8) \
33 | (((IMM4) & 0x0F) << 12))
34
35 #define _XT_INS_FORMAT_RRI8(OPCODE, R, S, T, IMM8) ((OPCODE) \
36 | (((IMM8) & 0xFF) << 16) \
37 | (((R) & 0x0F) << 12) \
38 | (((S) & 0x0F) << 8) \
39 | (((T) & 0x0F) << 4))
40
41 #define _XT_INS_FORMAT_RRI4(OPCODE, IMM4, R, S, T) ((OPCODE) \
42 | (((IMM4) & 0x0F) << 20) \
43 | (((R) & 0x0F) << 12) \
44 | (((S) & 0x0F) << 8) \
45 | (((T) & 0x0F) << 4))
46
47 /* Xtensa processor instruction opcodes
48 * "Return From Debug Operation" to Normal */
49 #define XT_INS_RFDO 0xf1e000
50 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
51 #define XT_INS_RFDD 0xf1e010
52
53 /* Load to DDR register, increase addr register */
54 #define XT_INS_LDDR32P(S) (0x0070E0 | ((S) << 8))
55 /* Store from DDR register, increase addr register */
56 #define XT_INS_SDDR32P(S) (0x0070F0 | ((S) << 8))
57
58 /* Load 32-bit Indirect from A(S) + 4 * IMM8 to A(T) */
59 #define XT_INS_L32I(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x002002, 0, S, T, IMM8)
60 /* Load 16-bit Unsigned from A(S) + 2 * IMM8 to A(T) */
61 #define XT_INS_L16UI(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x001002, 0, S, T, IMM8)
62 /* Load 8-bit Unsigned from A(S) + IMM8 to A(T) */
63 #define XT_INS_L8UI(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x000002, 0, S, T, IMM8)
64
65 /* Store 32-bit Indirect to A(S) + 4 * IMM8 from A(T) */
66 #define XT_INS_S32I(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x006002, 0, S, T, IMM8)
67 /* Store 16-bit to A(S) + 2 * IMM8 from A(T) */
68 #define XT_INS_S16I(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x005002, 0, S, T, IMM8)
69 /* Store 8-bit to A(S) + IMM8 from A(T) */
70 #define XT_INS_S8I(S, T, IMM8) _XT_INS_FORMAT_RRI8(0x004002, 0, S, T, IMM8)
71
72 /* Read Special Register */
73 #define XT_INS_RSR(SR, T) _XT_INS_FORMAT_RSR(0x030000, SR, T)
74 /* Write Special Register */
75 #define XT_INS_WSR(SR, T) _XT_INS_FORMAT_RSR(0x130000, SR, T)
76 /* Swap Special Register */
77 #define XT_INS_XSR(SR, T) _XT_INS_FORMAT_RSR(0x610000, SR, T)
78
79 /* Rotate Window by (-8..7) */
80 #define XT_INS_ROTW(N) ((0x408000) | (((N) & 15) << 4))
81
82 /* Read User Register */
83 #define XT_INS_RUR(UR, T) _XT_INS_FORMAT_RRR(0xE30000, UR, T)
84 /* Write User Register */
85 #define XT_INS_WUR(UR, T) _XT_INS_FORMAT_RSR(0xF30000, UR, T)
86
87 /* Read Floating-Point Register */
88 #define XT_INS_RFR(FR, T) _XT_INS_FORMAT_RRR(0xFA0000, (((FR) << 4) | 0x4), T)
89 /* Write Floating-Point Register */
90 #define XT_INS_WFR(FR, T) _XT_INS_FORMAT_RRR(0xFA0000, (((FR) << 4) | 0x5), T)
91
92 /* 32-bit break */
93 #define XT_INS_BREAK(IMM1, IMM2) _XT_INS_FORMAT_RRR(0x000000, \
94 (((IMM1) & 0x0F) << 4) | ((IMM2) & 0x0F), 0x4)
95 /* 16-bit break */
96 #define XT_INS_BREAKN(IMM4) _XT_INS_FORMAT_RRRN(0x00000D, IMM4, 0x2, 0xF)
97
98 #define XT_INS_L32E(R, S, T) _XT_INS_FORMAT_RRI4(0x90000, 0, R, S, T)
99 #define XT_INS_S32E(R, S, T) _XT_INS_FORMAT_RRI4(0x490000, 0, R, S, T)
100 #define XT_INS_L32E_S32E_MASK 0xFF000F
101
102 #define XT_INS_RFWO 0x3400
103 #define XT_INS_RFWU 0x3500
104 #define XT_INS_RFWO_RFWU_MASK 0xFFFFFF
105
106 #define XT_WATCHPOINTS_NUM_MAX 2
107
108 /* Special register number macro for DDR register.
109 * this gets used a lot so making a shortcut to it is
110 * useful.
111 */
112 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_OCD_DDR].reg_num)
113
114 /*Same thing for A3/A4 */
115 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
116 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
117
118 #define XT_PC_REG_NUM_BASE (176)
119 #define XT_SW_BREAKPOINTS_MAX_NUM 32
120
121 const struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
122 { "pc", XT_PC_REG_NUM_BASE /*+XT_DEBUGLEVEL*/, XT_REG_SPECIAL, 0 }, /* actually epc[debuglevel] */
123 { "ar0", 0x00, XT_REG_GENERAL, 0 },
124 { "ar1", 0x01, XT_REG_GENERAL, 0 },
125 { "ar2", 0x02, XT_REG_GENERAL, 0 },
126 { "ar3", 0x03, XT_REG_GENERAL, 0 },
127 { "ar4", 0x04, XT_REG_GENERAL, 0 },
128 { "ar5", 0x05, XT_REG_GENERAL, 0 },
129 { "ar6", 0x06, XT_REG_GENERAL, 0 },
130 { "ar7", 0x07, XT_REG_GENERAL, 0 },
131 { "ar8", 0x08, XT_REG_GENERAL, 0 },
132 { "ar9", 0x09, XT_REG_GENERAL, 0 },
133 { "ar10", 0x0A, XT_REG_GENERAL, 0 },
134 { "ar11", 0x0B, XT_REG_GENERAL, 0 },
135 { "ar12", 0x0C, XT_REG_GENERAL, 0 },
136 { "ar13", 0x0D, XT_REG_GENERAL, 0 },
137 { "ar14", 0x0E, XT_REG_GENERAL, 0 },
138 { "ar15", 0x0F, XT_REG_GENERAL, 0 },
139 { "ar16", 0x10, XT_REG_GENERAL, 0 },
140 { "ar17", 0x11, XT_REG_GENERAL, 0 },
141 { "ar18", 0x12, XT_REG_GENERAL, 0 },
142 { "ar19", 0x13, XT_REG_GENERAL, 0 },
143 { "ar20", 0x14, XT_REG_GENERAL, 0 },
144 { "ar21", 0x15, XT_REG_GENERAL, 0 },
145 { "ar22", 0x16, XT_REG_GENERAL, 0 },
146 { "ar23", 0x17, XT_REG_GENERAL, 0 },
147 { "ar24", 0x18, XT_REG_GENERAL, 0 },
148 { "ar25", 0x19, XT_REG_GENERAL, 0 },
149 { "ar26", 0x1A, XT_REG_GENERAL, 0 },
150 { "ar27", 0x1B, XT_REG_GENERAL, 0 },
151 { "ar28", 0x1C, XT_REG_GENERAL, 0 },
152 { "ar29", 0x1D, XT_REG_GENERAL, 0 },
153 { "ar30", 0x1E, XT_REG_GENERAL, 0 },
154 { "ar31", 0x1F, XT_REG_GENERAL, 0 },
155 { "ar32", 0x20, XT_REG_GENERAL, 0 },
156 { "ar33", 0x21, XT_REG_GENERAL, 0 },
157 { "ar34", 0x22, XT_REG_GENERAL, 0 },
158 { "ar35", 0x23, XT_REG_GENERAL, 0 },
159 { "ar36", 0x24, XT_REG_GENERAL, 0 },
160 { "ar37", 0x25, XT_REG_GENERAL, 0 },
161 { "ar38", 0x26, XT_REG_GENERAL, 0 },
162 { "ar39", 0x27, XT_REG_GENERAL, 0 },
163 { "ar40", 0x28, XT_REG_GENERAL, 0 },
164 { "ar41", 0x29, XT_REG_GENERAL, 0 },
165 { "ar42", 0x2A, XT_REG_GENERAL, 0 },
166 { "ar43", 0x2B, XT_REG_GENERAL, 0 },
167 { "ar44", 0x2C, XT_REG_GENERAL, 0 },
168 { "ar45", 0x2D, XT_REG_GENERAL, 0 },
169 { "ar46", 0x2E, XT_REG_GENERAL, 0 },
170 { "ar47", 0x2F, XT_REG_GENERAL, 0 },
171 { "ar48", 0x30, XT_REG_GENERAL, 0 },
172 { "ar49", 0x31, XT_REG_GENERAL, 0 },
173 { "ar50", 0x32, XT_REG_GENERAL, 0 },
174 { "ar51", 0x33, XT_REG_GENERAL, 0 },
175 { "ar52", 0x34, XT_REG_GENERAL, 0 },
176 { "ar53", 0x35, XT_REG_GENERAL, 0 },
177 { "ar54", 0x36, XT_REG_GENERAL, 0 },
178 { "ar55", 0x37, XT_REG_GENERAL, 0 },
179 { "ar56", 0x38, XT_REG_GENERAL, 0 },
180 { "ar57", 0x39, XT_REG_GENERAL, 0 },
181 { "ar58", 0x3A, XT_REG_GENERAL, 0 },
182 { "ar59", 0x3B, XT_REG_GENERAL, 0 },
183 { "ar60", 0x3C, XT_REG_GENERAL, 0 },
184 { "ar61", 0x3D, XT_REG_GENERAL, 0 },
185 { "ar62", 0x3E, XT_REG_GENERAL, 0 },
186 { "ar63", 0x3F, XT_REG_GENERAL, 0 },
187 { "lbeg", 0x00, XT_REG_SPECIAL, 0 },
188 { "lend", 0x01, XT_REG_SPECIAL, 0 },
189 { "lcount", 0x02, XT_REG_SPECIAL, 0 },
190 { "sar", 0x03, XT_REG_SPECIAL, 0 },
191 { "windowbase", 0x48, XT_REG_SPECIAL, 0 },
192 { "windowstart", 0x49, XT_REG_SPECIAL, 0 },
193 { "configid0", 0xB0, XT_REG_SPECIAL, 0 },
194 { "configid1", 0xD0, XT_REG_SPECIAL, 0 },
195 { "ps", 0xC6, XT_REG_SPECIAL, 0 }, /* actually EPS[debuglevel] */
196 { "threadptr", 0xE7, XT_REG_USER, 0 },
197 { "br", 0x04, XT_REG_SPECIAL, 0 },
198 { "scompare1", 0x0C, XT_REG_SPECIAL, 0 },
199 { "acclo", 0x10, XT_REG_SPECIAL, 0 },
200 { "acchi", 0x11, XT_REG_SPECIAL, 0 },
201 { "m0", 0x20, XT_REG_SPECIAL, 0 },
202 { "m1", 0x21, XT_REG_SPECIAL, 0 },
203 { "m2", 0x22, XT_REG_SPECIAL, 0 },
204 { "m3", 0x23, XT_REG_SPECIAL, 0 },
205 { "f0", 0x00, XT_REG_FR, XT_REGF_COPROC0 },
206 { "f1", 0x01, XT_REG_FR, XT_REGF_COPROC0 },
207 { "f2", 0x02, XT_REG_FR, XT_REGF_COPROC0 },
208 { "f3", 0x03, XT_REG_FR, XT_REGF_COPROC0 },
209 { "f4", 0x04, XT_REG_FR, XT_REGF_COPROC0 },
210 { "f5", 0x05, XT_REG_FR, XT_REGF_COPROC0 },
211 { "f6", 0x06, XT_REG_FR, XT_REGF_COPROC0 },
212 { "f7", 0x07, XT_REG_FR, XT_REGF_COPROC0 },
213 { "f8", 0x08, XT_REG_FR, XT_REGF_COPROC0 },
214 { "f9", 0x09, XT_REG_FR, XT_REGF_COPROC0 },
215 { "f10", 0x0A, XT_REG_FR, XT_REGF_COPROC0 },
216 { "f11", 0x0B, XT_REG_FR, XT_REGF_COPROC0 },
217 { "f12", 0x0C, XT_REG_FR, XT_REGF_COPROC0 },
218 { "f13", 0x0D, XT_REG_FR, XT_REGF_COPROC0 },
219 { "f14", 0x0E, XT_REG_FR, XT_REGF_COPROC0 },
220 { "f15", 0x0F, XT_REG_FR, XT_REGF_COPROC0 },
221 { "fcr", 0xE8, XT_REG_USER, XT_REGF_COPROC0 },
222 { "fsr", 0xE9, XT_REG_USER, XT_REGF_COPROC0 },
223 { "mmid", 0x59, XT_REG_SPECIAL, XT_REGF_NOREAD },
224 { "ibreakenable", 0x60, XT_REG_SPECIAL, 0 },
225 { "memctl", 0x61, XT_REG_SPECIAL, 0 },
226 { "atomctl", 0x63, XT_REG_SPECIAL, 0 },
227 { "ibreaka0", 0x80, XT_REG_SPECIAL, 0 },
228 { "ibreaka1", 0x81, XT_REG_SPECIAL, 0 },
229 { "dbreaka0", 0x90, XT_REG_SPECIAL, 0 },
230 { "dbreaka1", 0x91, XT_REG_SPECIAL, 0 },
231 { "dbreakc0", 0xA0, XT_REG_SPECIAL, 0 },
232 { "dbreakc1", 0xA1, XT_REG_SPECIAL, 0 },
233 { "epc1", 0xB1, XT_REG_SPECIAL, 0 },
234 { "epc2", 0xB2, XT_REG_SPECIAL, 0 },
235 { "epc3", 0xB3, XT_REG_SPECIAL, 0 },
236 { "epc4", 0xB4, XT_REG_SPECIAL, 0 },
237 { "epc5", 0xB5, XT_REG_SPECIAL, 0 },
238 { "epc6", 0xB6, XT_REG_SPECIAL, 0 },
239 { "epc7", 0xB7, XT_REG_SPECIAL, 0 },
240 { "depc", 0xC0, XT_REG_SPECIAL, 0 },
241 { "eps2", 0xC2, XT_REG_SPECIAL, 0 },
242 { "eps3", 0xC3, XT_REG_SPECIAL, 0 },
243 { "eps4", 0xC4, XT_REG_SPECIAL, 0 },
244 { "eps5", 0xC5, XT_REG_SPECIAL, 0 },
245 { "eps6", 0xC6, XT_REG_SPECIAL, 0 },
246 { "eps7", 0xC7, XT_REG_SPECIAL, 0 },
247 { "excsave1", 0xD1, XT_REG_SPECIAL, 0 },
248 { "excsave2", 0xD2, XT_REG_SPECIAL, 0 },
249 { "excsave3", 0xD3, XT_REG_SPECIAL, 0 },
250 { "excsave4", 0xD4, XT_REG_SPECIAL, 0 },
251 { "excsave5", 0xD5, XT_REG_SPECIAL, 0 },
252 { "excsave6", 0xD6, XT_REG_SPECIAL, 0 },
253 { "excsave7", 0xD7, XT_REG_SPECIAL, 0 },
254 { "cpenable", 0xE0, XT_REG_SPECIAL, 0 },
255 { "interrupt", 0xE2, XT_REG_SPECIAL, 0 },
256 { "intset", 0xE2, XT_REG_SPECIAL, XT_REGF_NOREAD },
257 { "intclear", 0xE3, XT_REG_SPECIAL, XT_REGF_NOREAD },
258 { "intenable", 0xE4, XT_REG_SPECIAL, 0 },
259 { "vecbase", 0xE7, XT_REG_SPECIAL, 0 },
260 { "exccause", 0xE8, XT_REG_SPECIAL, 0 },
261 { "debugcause", 0xE9, XT_REG_SPECIAL, 0 },
262 { "ccount", 0xEA, XT_REG_SPECIAL, 0 },
263 { "prid", 0xEB, XT_REG_SPECIAL, 0 },
264 { "icount", 0xEC, XT_REG_SPECIAL, 0 },
265 { "icountlevel", 0xED, XT_REG_SPECIAL, 0 },
266 { "excvaddr", 0xEE, XT_REG_SPECIAL, 0 },
267 { "ccompare0", 0xF0, XT_REG_SPECIAL, 0 },
268 { "ccompare1", 0xF1, XT_REG_SPECIAL, 0 },
269 { "ccompare2", 0xF2, XT_REG_SPECIAL, 0 },
270 { "misc0", 0xF4, XT_REG_SPECIAL, 0 },
271 { "misc1", 0xF5, XT_REG_SPECIAL, 0 },
272 { "misc2", 0xF6, XT_REG_SPECIAL, 0 },
273 { "misc3", 0xF7, XT_REG_SPECIAL, 0 },
274 { "litbase", 0x05, XT_REG_SPECIAL, 0 },
275 { "ptevaddr", 0x53, XT_REG_SPECIAL, 0 },
276 { "rasid", 0x5A, XT_REG_SPECIAL, 0 },
277 { "itlbcfg", 0x5B, XT_REG_SPECIAL, 0 },
278 { "dtlbcfg", 0x5C, XT_REG_SPECIAL, 0 },
279 { "mepc", 0x6A, XT_REG_SPECIAL, 0 },
280 { "meps", 0x6B, XT_REG_SPECIAL, 0 },
281 { "mesave", 0x6C, XT_REG_SPECIAL, 0 },
282 { "mesr", 0x6D, XT_REG_SPECIAL, 0 },
283 { "mecr", 0x6E, XT_REG_SPECIAL, 0 },
284 { "mevaddr", 0x6F, XT_REG_SPECIAL, 0 },
285 { "a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0 }, /* WARNING: For these registers, regnum points to the */
286 { "a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0 }, /* index of the corresponding ARxregisters, NOT to */
287 { "a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0 }, /* the processor register number! */
288 { "a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0 },
289 { "a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0 },
290 { "a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0 },
291 { "a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0 },
292 { "a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0 },
293 { "a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0 },
294 { "a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0 },
295 { "a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0 },
296 { "a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0 },
297 { "a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0 },
298 { "a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0 },
299 { "a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0 },
300 { "a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0 },
301
302 { "pwrctl", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
303 { "pwrstat", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
304 { "eristat", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
305 { "cs_itctrl", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
306 { "cs_claimset", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
307 { "cs_claimclr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
308 { "cs_lockaccess", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
309 { "cs_lockstatus", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
310 { "cs_authstatus", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
311 { "fault_info", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
312 { "trax_id", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
313 { "trax_ctrl", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
314 { "trax_stat", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
315 { "trax_data", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
316 { "trax_addr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
317 { "trax_pctrigger", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
318 { "trax_pcmatch", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
319 { "trax_delay", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
320 { "trax_memstart", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
321 { "trax_memend", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
322 { "pmg", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
323 { "pmoc", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
324 { "pm0", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
325 { "pm1", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
326 { "pmctrl0", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
327 { "pmctrl1", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
328 { "pmstat0", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
329 { "pmstat1", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
330 { "ocd_id", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
331 { "ocd_dcrclr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
332 { "ocd_dcrset", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
333 { "ocd_dsr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
334 { "ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD },
335 };
336
337
338 /**
339 * Types of memory used at xtensa target
340 */
341 enum xtensa_mem_region_type {
342 XTENSA_MEM_REG_IROM = 0x0,
343 XTENSA_MEM_REG_IRAM,
344 XTENSA_MEM_REG_DROM,
345 XTENSA_MEM_REG_DRAM,
346 XTENSA_MEM_REG_URAM,
347 XTENSA_MEM_REG_XLMI,
348 XTENSA_MEM_REGS_NUM
349 };
350
351 /**
352 * Gets a config for the specific mem type
353 */
354 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
355 struct xtensa *xtensa,
356 enum xtensa_mem_region_type type)
357 {
358 switch (type) {
359 case XTENSA_MEM_REG_IROM:
360 return &xtensa->core_config->irom;
361 case XTENSA_MEM_REG_IRAM:
362 return &xtensa->core_config->iram;
363 case XTENSA_MEM_REG_DROM:
364 return &xtensa->core_config->drom;
365 case XTENSA_MEM_REG_DRAM:
366 return &xtensa->core_config->dram;
367 case XTENSA_MEM_REG_URAM:
368 return &xtensa->core_config->uram;
369 case XTENSA_MEM_REG_XLMI:
370 return &xtensa->core_config->xlmi;
371 default:
372 return NULL;
373 }
374 }
375
376 /**
377 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
378 * for a given address
379 * Returns NULL if nothing found
380 */
381 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
382 const struct xtensa_local_mem_config *mem,
383 target_addr_t address)
384 {
385 for (unsigned int i = 0; i < mem->count; i++) {
386 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
387 if (address >= region->base && address < (region->base + region->size))
388 return region;
389 }
390 return NULL;
391 }
392
393 /**
394 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
395 * for a given address
396 * Returns NULL if nothing found
397 */
398 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
399 struct xtensa *xtensa,
400 target_addr_t address)
401 {
402 const struct xtensa_local_mem_region_config *result;
403 const struct xtensa_local_mem_config *mcgf;
404 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
405 mcgf = xtensa_get_mem_config(xtensa, mtype);
406 result = xtensa_memory_region_find(mcgf, address);
407 if (result)
408 return result;
409 }
410 return NULL;
411 }
412
413 static int xtensa_core_reg_get(struct reg *reg)
414 {
415 /*We don't need this because we read all registers on halt anyway. */
416 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
417 struct target *target = xtensa->target;
418
419 if (target->state != TARGET_HALTED)
420 return ERROR_TARGET_NOT_HALTED;
421 return ERROR_OK;
422 }
423
424 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
425 {
426 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
427 struct target *target = xtensa->target;
428
429 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
430 if (target->state != TARGET_HALTED)
431 return ERROR_TARGET_NOT_HALTED;
432
433 buf_cpy(buf, reg->value, reg->size);
434 reg->dirty = true;
435 reg->valid = true;
436
437 return ERROR_OK;
438 }
439
440 static const struct reg_arch_type xtensa_reg_type = {
441 .get = xtensa_core_reg_get,
442 .set = xtensa_core_reg_set,
443 };
444
445 const struct reg_arch_type xtensa_user_reg_u32_type = {
446 .get = xtensa_core_reg_get,
447 .set = xtensa_core_reg_set,
448 };
449
450 const struct reg_arch_type xtensa_user_reg_u128_type = {
451 .get = xtensa_core_reg_get,
452 .set = xtensa_core_reg_set,
453 };
454
455 static inline size_t xtensa_insn_size_get(uint32_t insn)
456 {
457 return insn & BIT(3) ? 2 : XT_ISNS_SZ_MAX;
458 }
459
460 /* Convert a register index that's indexed relative to windowbase, to the real address. */
461 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(enum xtensa_reg_id reg_idx, int windowbase)
462 {
463 unsigned int idx;
464 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_AR63) {
465 idx = reg_idx - XT_REG_IDX_AR0;
466 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
467 idx = reg_idx - XT_REG_IDX_A0;
468 } else {
469 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
470 return -1;
471 }
472 return ((idx + windowbase * 4) & 63) + XT_REG_IDX_AR0;
473 }
474
475 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(enum xtensa_reg_id reg_idx, int windowbase)
476 {
477 return xtensa_windowbase_offset_to_canonical(reg_idx, -windowbase);
478 }
479
480 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
481 {
482 struct reg *reg_list = xtensa->core_cache->reg_list;
483 reg_list[reg_idx].dirty = true;
484 }
485
486 static int xtensa_queue_dbg_reg_read(struct xtensa *xtensa, unsigned int reg, uint8_t *data)
487 {
488 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
489
490 if (!xtensa->core_config->trace.enabled &&
491 (reg <= NARADR_MEMADDREND || (reg >= NARADR_PMG && reg <= NARADR_PMSTAT7))) {
492 LOG_ERROR("Can not access %u reg when Trace Port option disabled!", reg);
493 return ERROR_FAIL;
494 }
495 return dm->dbg_ops->queue_reg_read(dm, reg, data);
496 }
497
498 static int xtensa_queue_dbg_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
499 {
500 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
501
502 if (!xtensa->core_config->trace.enabled &&
503 (reg <= NARADR_MEMADDREND || (reg >= NARADR_PMG && reg <= NARADR_PMSTAT7))) {
504 LOG_ERROR("Can not access %u reg when Trace Port option disabled!", reg);
505 return ERROR_FAIL;
506 }
507 return dm->dbg_ops->queue_reg_write(dm, reg, data);
508 }
509
510 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
511 {
512 xtensa_queue_dbg_reg_write(xtensa, NARADR_DIR0EXEC, ins);
513 }
514
515 static bool xtensa_reg_is_readable(enum xtensa_reg_flags flags, xtensa_reg_val_t cpenable)
516 {
517 if (flags & XT_REGF_NOREAD)
518 return false;
519 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
520 return false;
521 return true;
522 }
523
524 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
525 {
526 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
527 return dm->pwr_ops->queue_reg_write(dm, reg, data);
528 }
529
530 static bool xtensa_special_reg_exists(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
531 {
532 /* TODO: array of size XT_NUM_REGS can be used here to map special register ID to
533 * corresponding config option 'enabled' flag */
534 if (reg_idx >= XT_REG_IDX_LBEG && reg_idx <= XT_REG_IDX_LCOUNT)
535 return xtensa->core_config->loop;
536 else if (reg_idx == XT_REG_IDX_BR)
537 return xtensa->core_config->boolean;
538 else if (reg_idx == XT_REG_IDX_LITBASE)
539 return xtensa->core_config->ext_l32r;
540 else if (reg_idx == XT_REG_IDX_SCOMPARE1 || reg_idx == XT_REG_IDX_ATOMCTL)
541 return xtensa->core_config->cond_store;
542 else if (reg_idx >= XT_REG_IDX_ACCLO && reg_idx <= XT_REG_IDX_M3)
543 return xtensa->core_config->mac16;
544 else if (reg_idx == XT_REG_IDX_WINDOWBASE || reg_idx == XT_REG_IDX_WINDOWSTART)
545 return xtensa->core_config->windowed;
546 else if (reg_idx >= XT_REG_IDX_PTEVADDR && reg_idx <= XT_REG_IDX_DTLBCFG)
547 return xtensa->core_config->mmu.enabled;
548 else if (reg_idx == XT_REG_IDX_MMID)
549 return xtensa->core_config->trace.enabled;
550 else if (reg_idx >= XT_REG_IDX_MEPC && reg_idx <= XT_REG_IDX_MEVADDR)
551 return xtensa->core_config->mem_err_check;
552 else if (reg_idx == XT_REG_IDX_CPENABLE)
553 return xtensa->core_config->coproc;
554 else if (reg_idx == XT_REG_IDX_VECBASE)
555 return xtensa->core_config->reloc_vec;
556 else if (reg_idx == XT_REG_IDX_CCOUNT)
557 return xtensa->core_config->tim_irq.enabled;
558 else if (reg_idx >= XT_REG_IDX_CCOMPARE0 && reg_idx <= XT_REG_IDX_CCOMPARE2)
559 return xtensa->core_config->tim_irq.enabled &&
560 (reg_idx - XT_REG_IDX_CCOMPARE0 < xtensa->core_config->tim_irq.comp_num);
561 else if (reg_idx == XT_REG_IDX_PRID)
562 return xtensa->core_config->proc_id;
563 else if (reg_idx >= XT_REG_IDX_MISC0 && reg_idx <= XT_REG_IDX_MISC3)
564 return reg_idx - XT_REG_IDX_MISC0 < xtensa->core_config->miscregs_num;
565 return true;
566 }
567
568 static bool xtensa_user_reg_exists(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
569 {
570 if (reg_idx == XT_REG_IDX_THREADPTR)
571 return xtensa->core_config->threadptr;
572 if (reg_idx == XT_REG_IDX_FCR || reg_idx == XT_REG_IDX_FSR)
573 return xtensa->core_config->fp_coproc;
574 return false;
575 }
576
577 static inline bool xtensa_fp_reg_exists(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
578 {
579 return xtensa->core_config->fp_coproc;
580 }
581
582 static inline bool xtensa_regular_reg_exists(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
583 {
584 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_AR63)
585 return reg_idx - XT_REG_IDX_AR0 < xtensa->core_config->aregs_num;
586 return true;
587 }
588
589 static int xtensa_write_dirty_registers(struct target *target)
590 {
591 struct xtensa *xtensa = target_to_xtensa(target);
592 int res;
593 xtensa_reg_val_t regval, windowbase = 0;
594 bool scratch_reg_dirty = false;
595 struct reg *reg_list = xtensa->core_cache->reg_list;
596
597 LOG_TARGET_DEBUG(target, "start");
598
599 /*We need to write the dirty registers in the cache list back to the processor.
600 *Start by writing the SFR/user registers. */
601 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
602 if (reg_list[i].dirty) {
603 if (xtensa_regs[i].type == XT_REG_SPECIAL ||
604 xtensa_regs[i].type == XT_REG_USER ||
605 xtensa_regs[i].type == XT_REG_FR) {
606 scratch_reg_dirty = true;
607 regval = xtensa_reg_get(target, i);
608 LOG_TARGET_DEBUG(target, "Writing back reg %s val %08" PRIX32,
609 xtensa_regs[i].name,
610 regval);
611 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
612 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
613 if (xtensa_regs[i].type == XT_REG_USER) {
614 if (reg_list[i].exist)
615 xtensa_queue_exec_ins(xtensa,
616 XT_INS_WUR(xtensa_regs[i].reg_num,
617 XT_REG_A3));
618 } else if (xtensa_regs[i].type == XT_REG_FR) {
619 if (reg_list[i].exist)
620 xtensa_queue_exec_ins(xtensa,
621 XT_INS_WFR(xtensa_regs[i].reg_num,
622 XT_REG_A3));
623 } else {/*SFR */
624 if (reg_list[i].exist) {
625 unsigned int reg_num = xtensa_regs[i].reg_num;
626 if (reg_num == XT_PC_REG_NUM_BASE)
627 /* reg number of PC for debug interrupt
628 * depends on NDEBUGLEVEL */
629 reg_num += xtensa->core_config->debug.irq_level;
630
631 xtensa_queue_exec_ins(xtensa,
632 XT_INS_WSR(reg_num, XT_REG_A3));
633 }
634 }
635 reg_list[i].dirty = false;
636 }
637 }
638 }
639 if (scratch_reg_dirty)
640 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
641
642 if (xtensa->core_config->user_regs_num > 0 &&
643 xtensa->core_config->queue_write_dirty_user_regs)
644 xtensa->core_config->queue_write_dirty_user_regs(target);
645
646 if (xtensa->core_config->windowed) {
647 /*Grab the windowbase, we need it. */
648 windowbase = xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE);
649 /*Check if there are problems with both the ARx as well as the corresponding Rx
650 * registers set and dirty. */
651 /*Warn the user if this happens, not much else we can do... */
652 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
653 unsigned int j = xtensa_windowbase_offset_to_canonical(i, windowbase);
654 if (reg_list[i].dirty && reg_list[j].dirty) {
655 if (memcmp(reg_list[i].value, reg_list[j].value,
656 sizeof(xtensa_reg_val_t)) != 0)
657 LOG_WARNING(
658 "Warning: Both A%d as well as the physical register it points to (AR%d) are dirty and differs in value. Results are undefined!",
659 i - XT_REG_IDX_A0,
660 j - XT_REG_IDX_AR0);
661 }
662 }
663 }
664
665 /*Write A0-A16 */
666 for (unsigned int i = 0; i < 16; i++) {
667 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
668 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
669 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
670 xtensa_regs[XT_REG_IDX_A0 + i].name,
671 regval,
672 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
673 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
674 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, i));
675 reg_list[XT_REG_IDX_A0 + i].dirty = false;
676 }
677 }
678
679 if (xtensa->core_config->windowed) {
680 /*Now write AR0-AR63. */
681 for (unsigned int j = 0; j < 64; j += 16) {
682 /*Write the 16 registers we can see */
683 for (unsigned int i = 0; i < 16; i++) {
684 if (i + j < xtensa->core_config->aregs_num) {
685 enum xtensa_reg_id realadr =
686 xtensa_windowbase_offset_to_canonical(XT_REG_IDX_AR0 + i + j,
687 windowbase);
688 /*Write back any dirty un-windowed registers */
689 if (reg_list[realadr].dirty) {
690 regval = xtensa_reg_get(target, realadr);
691 LOG_TARGET_DEBUG(
692 target,
693 "Writing back reg %s value %08" PRIX32 ", num =%i",
694 xtensa_regs[realadr].name,
695 regval,
696 xtensa_regs[realadr].reg_num);
697 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
698 xtensa_queue_exec_ins(xtensa,
699 XT_INS_RSR(XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
700 reg_list[realadr].dirty = false;
701 }
702 }
703 }
704 /*Now rotate the window so we'll see the next 16 registers. The final rotate
705 * will wraparound, */
706 /*leaving us in the state we were. */
707 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(4));
708 }
709 }
710 res = jtag_execute_queue();
711 xtensa_core_status_check(target);
712
713 return res;
714 }
715
716 int xtensa_queue_write_dirty_user_regs_u32(struct target *target)
717 {
718 struct xtensa *xtensa = target_to_xtensa(target);
719 struct reg *reg_list = xtensa->core_cache->reg_list;
720 xtensa_reg_val_t reg_val;
721 bool scratch_reg_dirty = false;
722
723 LOG_TARGET_DEBUG(target, "start");
724
725 /* We need to write the dirty registers in the cache list back to the processor.
726 * Start by writing the SFR/user registers. */
727 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
728 if (!reg_list[XT_USR_REG_START + i].dirty)
729 continue;
730 scratch_reg_dirty = true;
731 reg_val = xtensa_reg_get(target, XT_USR_REG_START + i);
732 LOG_TARGET_DEBUG(target, "Writing back reg %s val %08" PRIX32,
733 xtensa->core_config->user_regs[i].name,
734 reg_val);
735 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, reg_val);
736 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
737 xtensa_queue_exec_ins(xtensa,
738 XT_INS_WUR(xtensa->core_config->user_regs[i].reg_num,
739 XT_REG_A3));
740 reg_list[XT_USR_REG_START + i].dirty = false;
741 }
742 if (scratch_reg_dirty)
743 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
744
745 return ERROR_OK;
746 }
747
748 static inline bool xtensa_is_stopped(struct target *target)
749 {
750 struct xtensa *xtensa = target_to_xtensa(target);
751 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
752 }
753
754 int xtensa_examine(struct target *target)
755 {
756 struct xtensa *xtensa = target_to_xtensa(target);
757 unsigned int cmd = PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP;
758
759 LOG_DEBUG("coreid = %d", target->coreid);
760 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd);
761 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE);
762 xtensa_dm_queue_enable(&xtensa->dbg_mod);
763 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
764 int res = jtag_execute_queue();
765 if (res != ERROR_OK)
766 return res;
767 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
768 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
769 return ERROR_TARGET_FAILURE;
770 }
771 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
772 if (!target_was_examined(target))
773 target_set_examined(target);
774 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
775 return ERROR_OK;
776 }
777
778 int xtensa_wakeup(struct target *target)
779 {
780 struct xtensa *xtensa = target_to_xtensa(target);
781 unsigned int cmd = PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP;
782
783 if (xtensa->reset_asserted)
784 cmd |= PWRCTL_CORERESET;
785 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd);
786 /* TODO: can we join this with the write above? */
787 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE);
788 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
789 return jtag_execute_queue();
790 }
791
792 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
793 {
794 uint32_t dsr_data = 0x00110000;
795 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
796 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
797 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
798
799 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
800 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRSET, set | OCDDCR_ENABLEOCD);
801 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRCLR, clear);
802 xtensa_queue_dbg_reg_write(xtensa, NARADR_DSR, dsr_data);
803 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
804 return jtag_execute_queue();
805 }
806
807 int xtensa_smpbreak_set(struct target *target, uint32_t set)
808 {
809 struct xtensa *xtensa = target_to_xtensa(target);
810 int res = ERROR_OK;
811
812 xtensa->smp_break = set;
813 if (target_was_examined(target))
814 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
815 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
816 return res;
817 }
818
819 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
820 {
821 uint8_t dcr_buf[sizeof(uint32_t)];
822
823 xtensa_queue_dbg_reg_read(xtensa, NARADR_DCRSET, dcr_buf);
824 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
825 int res = jtag_execute_queue();
826 *val = buf_get_u32(dcr_buf, 0, 32);
827
828 return res;
829 }
830
831 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
832 {
833 struct xtensa *xtensa = target_to_xtensa(target);
834 *val = xtensa->smp_break;
835 return ERROR_OK;
836 }
837
838 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
839 {
840 return buf_get_u32(reg->value, 0, 32);
841 }
842
843 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
844 {
845 buf_set_u32(reg->value, 0, 32, value);
846 reg->dirty = true;
847 }
848
849 int xtensa_core_status_check(struct target *target)
850 {
851 struct xtensa *xtensa = target_to_xtensa(target);
852 int res, needclear = 0;
853
854 xtensa_dm_core_status_read(&xtensa->dbg_mod);
855 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
856 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
857 if (dsr & OCDDSR_EXECBUSY) {
858 if (!xtensa->suppress_dsr_errors)
859 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
860 needclear = 1;
861 }
862 if (dsr & OCDDSR_EXECEXCEPTION) {
863 if (!xtensa->suppress_dsr_errors)
864 LOG_TARGET_ERROR(target,
865 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
866 dsr);
867 needclear = 1;
868 }
869 if (dsr & OCDDSR_EXECOVERRUN) {
870 if (!xtensa->suppress_dsr_errors)
871 LOG_TARGET_ERROR(target,
872 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
873 dsr);
874 needclear = 1;
875 }
876 if (needclear) {
877 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
878 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
879 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
880 LOG_TARGET_ERROR(target, "clearing DSR failed!");
881 return xtensa->suppress_dsr_errors ? ERROR_OK : ERROR_FAIL;
882 }
883 return ERROR_OK;
884 }
885
886 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
887 {
888 struct xtensa *xtensa = target_to_xtensa(target);
889 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
890 assert(reg_id < xtensa->core_cache->num_regs && "Attempt to access non-existing reg!");
891 return xtensa_reg_get_value(reg);
892 }
893
894 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
895 {
896 struct xtensa *xtensa = target_to_xtensa(target);
897 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
898 assert(reg_id < xtensa->core_cache->num_regs && "Attempt to access non-existing reg!");
899 if (xtensa_reg_get_value(reg) == value)
900 return;
901 xtensa_reg_set_value(reg, value);
902 }
903
904 int xtensa_assert_reset(struct target *target)
905 {
906 struct xtensa *xtensa = target_to_xtensa(target);
907
908 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
909 target->state = TARGET_RESET;
910 xtensa_queue_pwr_reg_write(xtensa,
911 DMREG_PWRCTL,
912 PWRCTL_JTAGDEBUGUSE | PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP |
913 PWRCTL_CORERESET);
914 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
915 int res = jtag_execute_queue();
916 if (res != ERROR_OK)
917 return res;
918 xtensa->reset_asserted = true;
919 return res;
920 }
921
922 int xtensa_deassert_reset(struct target *target)
923 {
924 struct xtensa *xtensa = target_to_xtensa(target);
925
926 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
927 if (target->reset_halt)
928 xtensa_queue_dbg_reg_write(xtensa,
929 NARADR_DCRSET,
930 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
931 xtensa_queue_pwr_reg_write(xtensa,
932 DMREG_PWRCTL,
933 PWRCTL_JTAGDEBUGUSE | PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP);
934 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
935 int res = jtag_execute_queue();
936 if (res != ERROR_OK)
937 return res;
938 target->state = TARGET_RUNNING;
939 xtensa->reset_asserted = false;
940 return res;
941 }
942
943 int xtensa_fetch_all_regs(struct target *target)
944 {
945 struct xtensa *xtensa = target_to_xtensa(target);
946 struct reg *reg_list = xtensa->core_cache->reg_list;
947 xtensa_reg_val_t cpenable = 0, windowbase = 0;
948 uint8_t regvals[XT_NUM_REGS][sizeof(xtensa_reg_val_t)];
949 uint8_t dsrs[XT_NUM_REGS][sizeof(xtensa_dsr_t)];
950 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
951
952 LOG_TARGET_DEBUG(target, "start");
953
954 /* Assume the CPU has just halted. We now want to fill the register cache with all the
955 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
956 * in one go, then sort everything out from the regvals variable. */
957
958 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
959 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
960 /*Grab the 16 registers we can see */
961 for (unsigned int i = 0; i < 16; i++) {
962 if (i + j < xtensa->core_config->aregs_num) {
963 xtensa_queue_exec_ins(xtensa,
964 XT_INS_WSR(XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
965 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[XT_REG_IDX_AR0 + i + j]);
966 if (debug_dsrs)
967 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR, dsrs[XT_REG_IDX_AR0 + i + j]);
968 }
969 }
970 if (xtensa->core_config->windowed) {
971 /* Now rotate the window so we'll see the next 16 registers. The final rotate
972 * will wraparound, */
973 /* leaving us in the state we were. */
974 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(4));
975 }
976 }
977 if (xtensa->core_config->coproc) {
978 /* As the very first thing after AREGS, go grab the CPENABLE registers. It indicates
979 * if we can also grab the FP */
980 /* (and theoretically other coprocessor) registers, or if this is a bad thing to do.*/
981 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
982 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(XT_SR_DDR, XT_REG_A3));
983 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[XT_REG_IDX_CPENABLE]);
984 }
985 int res = jtag_execute_queue();
986 if (res != ERROR_OK) {
987 LOG_ERROR("Failed to read ARs (%d)!", res);
988 return res;
989 }
990 xtensa_core_status_check(target);
991
992 if (xtensa->core_config->coproc)
993 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE], 0, 32);
994 /* We're now free to use any of A0-A15 as scratch registers
995 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
996 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
997 if (xtensa_reg_is_readable(xtensa_regs[i].flags, cpenable) && reg_list[i].exist &&
998 (xtensa_regs[i].type == XT_REG_SPECIAL ||
999 xtensa_regs[i].type == XT_REG_USER || xtensa_regs[i].type == XT_REG_FR)) {
1000 if (xtensa_regs[i].type == XT_REG_USER) {
1001 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa_regs[i].reg_num, XT_REG_A3));
1002 } else if (xtensa_regs[i].type == XT_REG_FR) {
1003 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa_regs[i].reg_num, XT_REG_A3));
1004 } else { /*SFR */
1005 unsigned int reg_num = xtensa_regs[i].reg_num;
1006 if (reg_num == XT_PC_REG_NUM_BASE) {
1007 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1008 reg_num += xtensa->core_config->debug.irq_level;
1009 }
1010 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(reg_num, XT_REG_A3));
1011 }
1012 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(XT_SR_DDR, XT_REG_A3));
1013 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[i]);
1014 if (debug_dsrs)
1015 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR, dsrs[i]);
1016 }
1017 }
1018 /* Ok, send the whole mess to the CPU. */
1019 res = jtag_execute_queue();
1020 if (res != ERROR_OK) {
1021 LOG_ERROR("Failed to fetch AR regs!");
1022 return res;
1023 }
1024 xtensa_core_status_check(target);
1025
1026 if (debug_dsrs) {
1027 /* DSR checking: follows order in which registers are requested. */
1028 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
1029 if (xtensa_reg_is_readable(xtensa_regs[i].flags, cpenable) && reg_list[i].exist &&
1030 (xtensa_regs[i].type == XT_REG_SPECIAL || xtensa_regs[i].type == XT_REG_USER ||
1031 xtensa_regs[i].type == XT_REG_FR)) {
1032 if (buf_get_u32(dsrs[i], 0, 32) & OCDDSR_EXECEXCEPTION) {
1033 LOG_ERROR("Exception reading %s!", xtensa_regs[i].name);
1034 return ERROR_FAIL;
1035 }
1036 }
1037 }
1038 }
1039
1040 if (xtensa->core_config->user_regs_num > 0 && xtensa->core_config->fetch_user_regs) {
1041 res = xtensa->core_config->fetch_user_regs(target);
1042 if (res != ERROR_OK)
1043 return res;
1044 }
1045
1046 if (xtensa->core_config->windowed) {
1047 /* We need the windowbase to decode the general addresses. */
1048 windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE], 0, 32);
1049 }
1050 /* Decode the result and update the cache. */
1051 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
1052 if (xtensa_reg_is_readable(xtensa_regs[i].flags, cpenable) && reg_list[i].exist) {
1053 if (xtensa_regs[i].type == XT_REG_GENERAL) {
1054 /* TODO: add support for non-windowed configs */
1055 assert(
1056 xtensa->core_config->windowed &&
1057 "Regs fetch is not supported for non-windowed configs!");
1058 /* The 64-value general register set is read from (windowbase) on down.
1059 * We need to get the real register address by subtracting windowbase and
1060 * wrapping around. */
1061 int realadr = xtensa_canonical_to_windowbase_offset(i, windowbase);
1062 buf_cpy(regvals[realadr], reg_list[i].value, reg_list[i].size);
1063 } else if (xtensa_regs[i].type == XT_REG_RELGEN) {
1064 buf_cpy(regvals[xtensa_regs[i].reg_num], reg_list[i].value, reg_list[i].size);
1065 } else {
1066 buf_cpy(regvals[i], reg_list[i].value, reg_list[i].size);
1067 }
1068 reg_list[i].valid = true;
1069 } else {
1070 reg_list[i].valid = false;
1071 }
1072 }
1073 /* We have used A3 as a scratch register and we will need to write that back. */
1074 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1075 xtensa->regs_fetched = true;
1076
1077 return ERROR_OK;
1078 }
1079
1080 int xtensa_fetch_user_regs_u32(struct target *target)
1081 {
1082 struct xtensa *xtensa = target_to_xtensa(target);
1083 struct reg *reg_list = xtensa->core_cache->reg_list;
1084 xtensa_reg_val_t cpenable = 0;
1085 uint8_t regvals[XT_USER_REGS_NUM_MAX][sizeof(xtensa_reg_val_t)];
1086 uint8_t dsrs[XT_USER_REGS_NUM_MAX][sizeof(xtensa_dsr_t)];
1087 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1088
1089 assert(xtensa->core_config->user_regs_num < XT_USER_REGS_NUM_MAX && "Too many user regs configured!");
1090 if (xtensa->core_config->coproc)
1091 cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
1092
1093 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
1094 if (!xtensa_reg_is_readable(xtensa->core_config->user_regs[i].flags, cpenable))
1095 continue;
1096 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa->core_config->user_regs[i].reg_num, XT_REG_A3));
1097 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(XT_SR_DDR, XT_REG_A3));
1098 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[i]);
1099 if (debug_dsrs)
1100 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR, dsrs[i]);
1101 }
1102 /* Ok, send the whole mess to the CPU. */
1103 int res = jtag_execute_queue();
1104 if (res != ERROR_OK) {
1105 LOG_ERROR("Failed to fetch AR regs!");
1106 return res;
1107 }
1108 xtensa_core_status_check(target);
1109
1110 if (debug_dsrs) {
1111 /* DSR checking: follows order in which registers are requested. */
1112 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
1113 if (!xtensa_reg_is_readable(xtensa->core_config->user_regs[i].flags, cpenable))
1114 continue;
1115 if (buf_get_u32(dsrs[i], 0, 32) & OCDDSR_EXECEXCEPTION) {
1116 LOG_ERROR("Exception reading %s!", xtensa->core_config->user_regs[i].name);
1117 return ERROR_FAIL;
1118 }
1119 }
1120 }
1121
1122 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
1123 if (xtensa_reg_is_readable(xtensa->core_config->user_regs[i].flags, cpenable)) {
1124 buf_cpy(regvals[i], reg_list[XT_USR_REG_START + i].value, reg_list[XT_USR_REG_START + i].size);
1125 reg_list[XT_USR_REG_START + i].valid = true;
1126 } else {
1127 reg_list[XT_USR_REG_START + i].valid = false;
1128 }
1129 }
1130
1131 /* We have used A3 as a scratch register and we will need to write that back. */
1132 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1133 return ERROR_OK;
1134 }
1135
1136 int xtensa_get_gdb_reg_list(struct target *target,
1137 struct reg **reg_list[],
1138 int *reg_list_size,
1139 enum target_register_class reg_class)
1140 {
1141 struct xtensa *xtensa = target_to_xtensa(target);
1142 unsigned int num_regs = xtensa->core_config->gdb_general_regs_num;
1143
1144 if (reg_class == REG_CLASS_ALL)
1145 num_regs = xtensa->regs_num;
1146
1147 LOG_DEBUG("reg_class=%i, num_regs=%d", reg_class, num_regs);
1148
1149 *reg_list = malloc(num_regs * sizeof(struct reg *));
1150 if (!*reg_list)
1151 return ERROR_FAIL;
1152
1153 for (unsigned int k = 0; k < num_regs; k++) {
1154 unsigned int reg_id = xtensa->core_config->gdb_regs_mapping[k];
1155 (*reg_list)[k] = &xtensa->core_cache->reg_list[reg_id];
1156 }
1157
1158 *reg_list_size = num_regs;
1159
1160 return ERROR_OK;
1161 }
1162
1163 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1164 {
1165 struct xtensa *xtensa = target_to_xtensa(target);
1166 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1167 xtensa->core_config->mmu.dtlb_entries_count > 0;
1168 return ERROR_OK;
1169 }
1170
1171 int xtensa_halt(struct target *target)
1172 {
1173 struct xtensa *xtensa = target_to_xtensa(target);
1174
1175 LOG_TARGET_DEBUG(target, "start");
1176 if (target->state == TARGET_HALTED) {
1177 LOG_TARGET_DEBUG(target, "target was already halted");
1178 return ERROR_OK;
1179 }
1180 /* First we have to read dsr and check if the target stopped */
1181 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1182 if (res != ERROR_OK) {
1183 LOG_TARGET_ERROR(target, "Failed to read core status!");
1184 return res;
1185 }
1186 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1187 if (!xtensa_is_stopped(target)) {
1188 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1189 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1190 res = jtag_execute_queue();
1191 if (res != ERROR_OK)
1192 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1193 }
1194
1195 return res;
1196 }
1197
1198 int xtensa_prepare_resume(struct target *target,
1199 int current,
1200 target_addr_t address,
1201 int handle_breakpoints,
1202 int debug_execution)
1203 {
1204 struct xtensa *xtensa = target_to_xtensa(target);
1205 uint32_t bpena = 0;
1206
1207 LOG_TARGET_DEBUG(target,
1208 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1209 current,
1210 address,
1211 handle_breakpoints,
1212 debug_execution);
1213
1214 if (target->state != TARGET_HALTED) {
1215 LOG_TARGET_WARNING(target, "target not halted");
1216 return ERROR_TARGET_NOT_HALTED;
1217 }
1218
1219 if (address && !current) {
1220 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1221 } else {
1222 xtensa_reg_val_t cause = xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1223 if (cause & DEBUGCAUSE_DB) {
1224 /* We stopped due to a watchpoint. We can't just resume executing the
1225 * instruction again because */
1226 /* that would trigger the watchpoint again. To fix this, we single-step,
1227 * which ignores watchpoints. */
1228 xtensa_do_step(target, current, address, handle_breakpoints);
1229 }
1230 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
1231 /* We stopped due to a break instruction. We can't just resume executing the
1232 * instruction again because */
1233 /* that would trigger the break again. To fix this, we single-step, which
1234 * ignores break. */
1235 xtensa_do_step(target, current, address, handle_breakpoints);
1236 }
1237 }
1238
1239 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1240 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1241 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1242 if (xtensa->hw_brps[slot]) {
1243 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1244 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1245 bpena |= BIT(slot);
1246 }
1247 }
1248 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1249
1250 /* Here we write all registers to the targets */
1251 int res = xtensa_write_dirty_registers(target);
1252 if (res != ERROR_OK)
1253 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1254 return res;
1255 }
1256
1257 int xtensa_do_resume(struct target *target)
1258 {
1259 struct xtensa *xtensa = target_to_xtensa(target);
1260
1261 LOG_TARGET_DEBUG(target, "start");
1262
1263 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO);
1264 int res = jtag_execute_queue();
1265 if (res != ERROR_OK) {
1266 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1267 return res;
1268 }
1269 xtensa_core_status_check(target);
1270 return ERROR_OK;
1271 }
1272
1273 int xtensa_resume(struct target *target,
1274 int current,
1275 target_addr_t address,
1276 int handle_breakpoints,
1277 int debug_execution)
1278 {
1279 LOG_TARGET_DEBUG(target, "start");
1280 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1281 if (res != ERROR_OK) {
1282 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1283 return res;
1284 }
1285 res = xtensa_do_resume(target);
1286 if (res != ERROR_OK) {
1287 LOG_TARGET_ERROR(target, "Failed to resume!");
1288 return res;
1289 }
1290
1291 target->debug_reason = DBG_REASON_NOTHALTED;
1292 if (!debug_execution)
1293 target->state = TARGET_RUNNING;
1294 else
1295 target->state = TARGET_DEBUG_RUNNING;
1296
1297 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1298
1299 return ERROR_OK;
1300 }
1301
1302 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1303 {
1304 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1305 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1306 if (err != ERROR_OK)
1307 return false;
1308
1309 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1310 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK;
1311 if (masked == XT_INS_L32E(0, 0, 0) || masked == XT_INS_S32E(0, 0, 0))
1312 return true;
1313
1314 masked = insn & XT_INS_RFWO_RFWU_MASK;
1315 if (masked == XT_INS_RFWO || masked == XT_INS_RFWU)
1316 return true;
1317
1318 return false;
1319 }
1320
1321 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1322 {
1323 struct xtensa *xtensa = target_to_xtensa(target);
1324 int res;
1325 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1326 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1327 xtensa_reg_val_t icountlvl, cause;
1328 xtensa_reg_val_t oldps, newps, oldpc, cur_pc;
1329
1330 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1331 current, address, handle_breakpoints);
1332
1333 if (target->state != TARGET_HALTED) {
1334 LOG_TARGET_WARNING(target, "target not halted");
1335 return ERROR_TARGET_NOT_HALTED;
1336 }
1337
1338 if (xtensa->core_config->debug.icount_sz != 32) {
1339 LOG_TARGET_WARNING(target, "stepping for ICOUNT less then 32 bits is not implemented!");
1340 return ERROR_FAIL;
1341 }
1342
1343 /* Save old ps/pc */
1344 oldps = xtensa_reg_get(target, XT_REG_IDX_PS);
1345 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1346
1347 cause = xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1348 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1349 oldps,
1350 oldpc,
1351 cause,
1352 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1353 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1354 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1355 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1356 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0); /* so we don't recurse into the same routine */
1357 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1358 /* pretend that we have stepped */
1359 if (cause & DEBUGCAUSE_BI)
1360 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1361 else
1362 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1363 return ERROR_OK;
1364 }
1365
1366 /* Xtensa has an ICOUNTLEVEL register which sets the maximum interrupt level at which the
1367 * instructions are to be counted while stepping.
1368 * For example, if we need to step by 2 instructions, and an interrupt occurs inbetween,
1369 * the processor will execute the interrupt, return, and halt after the 2nd instruction.
1370 * However, sometimes we don't want the interrupt handlers to be executed at all, while
1371 * stepping through the code. In this case (XT_STEPPING_ISR_OFF), PS.INTLEVEL can be raised
1372 * to only allow Debug and NMI interrupts.
1373 */
1374 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1375 if (!xtensa->core_config->high_irq.enabled) {
1376 LOG_TARGET_WARNING(
1377 target,
1378 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1379 return ERROR_FAIL;
1380 }
1381 /* Mask all interrupts below Debug, i.e. PS.INTLEVEL = DEBUGLEVEL - 1 */
1382 xtensa_reg_val_t temp_ps = (oldps & ~0xF) | (xtensa->core_config->debug.irq_level - 1);
1383 xtensa_reg_set(target, XT_REG_IDX_PS, temp_ps);
1384 }
1385 /* Regardless of ISRs masking mode we need to count instructions at any CINTLEVEL during step.
1386 So set `icountlvl` to DEBUGLEVEL.
1387 If ISRs are masked they are disabled in PS (see above), so having `icountlvl` set to DEBUGLEVEL
1388 will allow to step through any type of the code, e.g. 'high int level' ISR.
1389 If ISRs are not masked With `icountlvl` set to DEBUGLEVEL, we can step into any ISR
1390 which can happen (enabled in PS).
1391 */
1392 icountlvl = xtensa->core_config->debug.irq_level;
1393
1394 if (cause & DEBUGCAUSE_DB) {
1395 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1396 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1397 * re-enable the watchpoint. */
1398 LOG_TARGET_DEBUG(
1399 target,
1400 "Single-stepping to get past instruction that triggered the watchpoint...");
1401 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0); /*so we don't recurse into
1402 * the same routine */
1403 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1404 /*Save all DBREAKCx registers and set to 0 to disable watchpoints */
1405 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1406 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1407 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1408 }
1409 }
1410
1411 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1412 /* handle normal SW breakpoint */
1413 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0); /*so we don't recurse into
1414 * the same routine */
1415 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
1416 }
1417 do {
1418 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1419 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1420
1421 /* Now ICOUNT is set, we can resume as if we were going to run */
1422 res = xtensa_prepare_resume(target, current, address, 0, 0);
1423 if (res != ERROR_OK) {
1424 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1425 return res;
1426 }
1427 res = xtensa_do_resume(target);
1428 if (res != ERROR_OK) {
1429 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1430 return res;
1431 }
1432
1433 /* Wait for stepping to complete */
1434 long long start = timeval_ms();
1435 while (timeval_ms() < start + 500) {
1436 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1437 *until stepping is complete. */
1438 usleep(1000);
1439 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1440 if (res != ERROR_OK) {
1441 LOG_TARGET_ERROR(target, "Failed to read core status!");
1442 return res;
1443 }
1444 if (xtensa_is_stopped(target))
1445 break;
1446 usleep(1000);
1447 }
1448 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1449 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1450 if (!xtensa_is_stopped(target)) {
1451 LOG_TARGET_WARNING(
1452 target,
1453 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1454 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1455 target->debug_reason = DBG_REASON_NOTHALTED;
1456 target->state = TARGET_RUNNING;
1457 return ERROR_FAIL;
1458 }
1459 target->debug_reason = DBG_REASON_SINGLESTEP;
1460 target->state = TARGET_HALTED;
1461
1462 xtensa_fetch_all_regs(target);
1463
1464 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1465
1466 LOG_TARGET_DEBUG(target,
1467 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1468 xtensa_reg_get(target, XT_REG_IDX_PS),
1469 cur_pc,
1470 xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE),
1471 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1472
1473 /* Do not step into WindowOverflow if ISRs are masked.
1474 If we stop in WindowOverflow at breakpoint with masked ISRs and
1475 try to do a step it will get us out of that handler */
1476 if (xtensa->core_config->windowed &&
1477 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1478 xtensa_pc_in_winexc(target, cur_pc)) {
1479 /* isrmask = on, need to step out of the window exception handler */
1480 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1481 oldpc = cur_pc;
1482 address = oldpc + 3;
1483 continue;
1484 }
1485
1486 if (oldpc == cur_pc)
1487 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1488 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1489 else
1490 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1491 break;
1492 } while (true);
1493 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1494
1495 if (cause & DEBUGCAUSE_DB) {
1496 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1497 /* Restore the DBREAKCx registers */
1498 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1499 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1500 }
1501
1502 /* Restore int level */
1503 /* TODO: Theoretically, this can mess up stepping over an instruction that modifies
1504 * ps.intlevel by itself. TODO: Look into this. */
1505 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1506 newps = xtensa_reg_get(target, XT_REG_IDX_PS);
1507 newps = (newps & ~0xF) | (oldps & 0xf);
1508 xtensa_reg_set(target, XT_REG_IDX_PS, newps);
1509 }
1510
1511 /* write ICOUNTLEVEL back to zero */
1512 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1513 /* TODO: can we skip writing dirty registers and re-fetching them? */
1514 res = xtensa_write_dirty_registers(target);
1515 xtensa_fetch_all_regs(target);
1516 return res;
1517 }
1518
1519 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1520 {
1521 return xtensa_do_step(target, current, address, handle_breakpoints);
1522 }
1523
1524 /**
1525 * Returns true if two ranges are overlapping
1526 */
1527 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1528 target_addr_t r1_end,
1529 target_addr_t r2_start,
1530 target_addr_t r2_end)
1531 {
1532 if ((r2_start >= r1_start) && (r2_start < r1_end))
1533 return true; /* r2_start is in r1 region */
1534 if ((r2_end > r1_start) && (r2_end <= r1_end))
1535 return true; /* r2_end is in r1 region */
1536 return false;
1537 }
1538
1539 /**
1540 * Returns a size of overlapped region of two ranges.
1541 */
1542 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1543 target_addr_t r1_end,
1544 target_addr_t r2_start,
1545 target_addr_t r2_end)
1546 {
1547 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1548 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1549 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1550 return ov_end - ov_start;
1551 }
1552 return 0;
1553 }
1554
1555 /**
1556 * Check if the address gets to memory regions, and it's access mode
1557 */
1558 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1559 {
1560 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1561 target_addr_t adr_end = address + size; /* region end */
1562 target_addr_t overlap_size;
1563 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1564
1565 while (adr_pos < adr_end) {
1566 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1567 if (!cm) /* address is not belong to anything */
1568 return false;
1569 if ((cm->access & access) != access) /* access check */
1570 return false;
1571 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1572 assert(overlap_size != 0);
1573 adr_pos += overlap_size;
1574 }
1575 return true;
1576 }
1577
1578 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1579 {
1580 struct xtensa *xtensa = target_to_xtensa(target);
1581 /* We are going to read memory in 32-bit increments. This may not be what the calling
1582 * function expects, so we may need to allocate a temp buffer and read into that first. */
1583 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1584 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1585 target_addr_t adr = addrstart_al;
1586 uint8_t *albuff;
1587
1588 if (target->state != TARGET_HALTED) {
1589 LOG_TARGET_WARNING(target, "target not halted");
1590 return ERROR_TARGET_NOT_HALTED;
1591 }
1592
1593 if (!xtensa->permissive_mode) {
1594 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1595 XT_MEM_ACCESS_READ)) {
1596 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1597 return ERROR_FAIL;
1598 }
1599 }
1600
1601 if (addrstart_al == address && addrend_al == address + (size * count)) {
1602 albuff = buffer;
1603 } else {
1604 albuff = malloc(addrend_al - addrstart_al);
1605 if (!albuff) {
1606 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1607 addrend_al - addrstart_al);
1608 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1609 }
1610 }
1611
1612 /* We're going to use A3 here */
1613 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1614 /* Write start address to A3 */
1615 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1616 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
1617 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1618 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1619 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(XT_REG_A3));
1620 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, &albuff[i]);
1621 }
1622 int res = jtag_execute_queue();
1623 if (res == ERROR_OK)
1624 res = xtensa_core_status_check(target);
1625 if (res != ERROR_OK)
1626 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address " TARGET_ADDR_FMT,
1627 count * size, address);
1628
1629 if (albuff != buffer) {
1630 memcpy(buffer, albuff + (address & 3), (size * count));
1631 free(albuff);
1632 }
1633
1634 return res;
1635 }
1636
1637 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1638 {
1639 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1640 return xtensa_read_memory(target, address, 1, count, buffer);
1641 }
1642
1643 int xtensa_write_memory(struct target *target,
1644 target_addr_t address,
1645 uint32_t size,
1646 uint32_t count,
1647 const uint8_t *buffer)
1648 {
1649 /* This memory write function can get thrown nigh everything into it, from
1650 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1651 * accept anything but aligned uint32 writes, though. That is why we convert
1652 * everything into that. */
1653 struct xtensa *xtensa = target_to_xtensa(target);
1654 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1655 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1656 target_addr_t adr = addrstart_al;
1657 int res;
1658 uint8_t *albuff;
1659
1660 if (target->state != TARGET_HALTED) {
1661 LOG_TARGET_WARNING(target, "target not halted");
1662 return ERROR_TARGET_NOT_HALTED;
1663 }
1664
1665 if (!xtensa->permissive_mode) {
1666 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
1667 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1668 return ERROR_FAIL;
1669 }
1670 }
1671
1672 if (size == 0 || count == 0 || !buffer)
1673 return ERROR_COMMAND_SYNTAX_ERROR;
1674
1675 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1676 if (addrstart_al == address && addrend_al == address + (size * count)) {
1677 /* We discard the const here because albuff can also be non-const */
1678 albuff = (uint8_t *)buffer;
1679 } else {
1680 albuff = malloc(addrend_al - addrstart_al);
1681 if (!albuff) {
1682 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1683 addrend_al - addrstart_al);
1684 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1685 }
1686 }
1687
1688 /* We're going to use A3 here */
1689 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1690
1691 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1692 if (albuff != buffer) {
1693 /* See if we need to read the first and/or last word. */
1694 if (address & 3) {
1695 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1696 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
1697 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(XT_REG_A3));
1698 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, &albuff[0]);
1699 }
1700 if ((address + (size * count)) & 3) {
1701 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrend_al - 4);
1702 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
1703 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(XT_REG_A3));
1704 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR,
1705 &albuff[addrend_al - addrstart_al - 4]);
1706 }
1707 /* Grab bytes */
1708 res = jtag_execute_queue();
1709 if (res != ERROR_OK) {
1710 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1711 if (albuff != buffer)
1712 free(albuff);
1713 return res;
1714 }
1715 xtensa_core_status_check(target);
1716 /* Copy data to be written into the aligned buffer */
1717 memcpy(&albuff[address & 3], buffer, size * count);
1718 /* Now we can write albuff in aligned uint32s. */
1719 }
1720
1721 /* Write start address to A3 */
1722 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1723 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(XT_SR_DDR, XT_REG_A3));
1724 /* Write the aligned buffer */
1725 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1726 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, buf_get_u32(&albuff[i], 0, 32));
1727 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(XT_REG_A3));
1728 }
1729 res = jtag_execute_queue();
1730 if (res == ERROR_OK)
1731 res = xtensa_core_status_check(target);
1732 if (res != ERROR_OK)
1733 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address " TARGET_ADDR_FMT, count * size, address);
1734 if (albuff != buffer)
1735 free(albuff);
1736
1737 return res;
1738 }
1739
1740 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
1741 {
1742 /* xtensa_write_memory can handle everything. Just pass on to that. */
1743 return xtensa_write_memory(target, address, 1, count, buffer);
1744 }
1745
1746 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
1747 {
1748 LOG_WARNING("not implemented yet");
1749 return ERROR_FAIL;
1750 }
1751
1752 int xtensa_poll(struct target *target)
1753 {
1754 struct xtensa *xtensa = target_to_xtensa(target);
1755
1756 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET | PWRSTAT_COREWASRESET);
1757 if (res != ERROR_OK)
1758 return res;
1759
1760 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
1761 LOG_TARGET_INFO(target, "Debug controller was reset.");
1762 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
1763 if (res != ERROR_OK)
1764 return res;
1765 }
1766 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
1767 LOG_TARGET_INFO(target, "Core was reset.");
1768 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
1769 /* Enable JTAG, set reset if needed */
1770 res = xtensa_wakeup(target);
1771 if (res != ERROR_OK)
1772 return res;
1773
1774 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1775 if (res != ERROR_OK)
1776 return res;
1777 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET) {
1778 /* if RESET state is persitent */
1779 target->state = TARGET_RESET;
1780 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
1781 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
1782 xtensa->dbg_mod.core_status.dsr,
1783 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
1784 target->state = TARGET_UNKNOWN;
1785 if (xtensa->come_online_probes_num == 0)
1786 target->examined = false;
1787 else
1788 xtensa->come_online_probes_num--;
1789 } else if (xtensa_is_stopped(target)) {
1790 if (target->state != TARGET_HALTED) {
1791 enum target_state oldstate = target->state;
1792 target->state = TARGET_HALTED;
1793 /* Examine why the target has been halted */
1794 target->debug_reason = DBG_REASON_DBGRQ;
1795 xtensa_fetch_all_regs(target);
1796 /* When setting debug reason DEBUGCAUSE events have the following
1797 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
1798 /* Watchpoint and breakpoint events at the same time results in special
1799 * debug reason: DBG_REASON_WPTANDBKPT. */
1800 xtensa_reg_val_t halt_cause = xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
1801 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
1802 if (halt_cause & DEBUGCAUSE_IC)
1803 target->debug_reason = DBG_REASON_SINGLESTEP;
1804 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
1805 if (halt_cause & DEBUGCAUSE_DB)
1806 target->debug_reason = DBG_REASON_WPTANDBKPT;
1807 else
1808 target->debug_reason = DBG_REASON_BREAKPOINT;
1809 } else if (halt_cause & DEBUGCAUSE_DB) {
1810 target->debug_reason = DBG_REASON_WATCHPOINT;
1811 }
1812 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIX32 ", debug_reason=%08x, oldstate=%08x",
1813 xtensa_reg_get(target, XT_REG_IDX_PC),
1814 target->debug_reason,
1815 oldstate);
1816 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
1817 halt_cause,
1818 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
1819 xtensa->dbg_mod.core_status.dsr);
1820 LOG_TARGET_INFO(target, "Target halted, PC=0x%08" PRIX32 ", debug_reason=%08x",
1821 xtensa_reg_get(target, XT_REG_IDX_PC), target->debug_reason);
1822 xtensa_dm_core_status_clear(
1823 &xtensa->dbg_mod,
1824 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
1825 OCDDSR_DEBUGINTTRAX |
1826 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
1827 }
1828 } else {
1829 target->debug_reason = DBG_REASON_NOTHALTED;
1830 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
1831 target->state = TARGET_RUNNING;
1832 target->debug_reason = DBG_REASON_NOTHALTED;
1833 }
1834 }
1835 if (xtensa->trace_active) {
1836 /* Detect if tracing was active but has stopped. */
1837 struct xtensa_trace_status trace_status;
1838 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
1839 if (res == ERROR_OK) {
1840 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
1841 LOG_INFO("Detected end of trace.");
1842 if (trace_status.stat & TRAXSTAT_PCMTG)
1843 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
1844 if (trace_status.stat & TRAXSTAT_PTITG)
1845 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
1846 if (trace_status.stat & TRAXSTAT_CTITG)
1847 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
1848 xtensa->trace_active = false;
1849 }
1850 }
1851 }
1852 return ERROR_OK;
1853 }
1854
1855 static int xtensa_sw_breakpoint_add(struct target *target,
1856 struct breakpoint *breakpoint,
1857 struct xtensa_sw_breakpoint *sw_bp)
1858 {
1859 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
1860 if (ret != ERROR_OK) {
1861 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
1862 return ret;
1863 }
1864
1865 sw_bp->insn_sz = xtensa_insn_size_get(buf_get_u32(sw_bp->insn, 0, 24));
1866 sw_bp->oocd_bp = breakpoint;
1867
1868 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(0, 0) : XT_INS_BREAKN(0);
1869 /* convert to target endianness */
1870 uint8_t break_insn_buff[4];
1871 target_buffer_set_u32(target, break_insn_buff, break_insn);
1872
1873 ret = target_write_buffer(target, breakpoint->address, sw_bp->insn_sz, break_insn_buff);
1874 if (ret != ERROR_OK) {
1875 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
1876 return ret;
1877 }
1878
1879 return ERROR_OK;
1880 }
1881
1882 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
1883 {
1884 int ret = target_write_buffer(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
1885 if (ret != ERROR_OK) {
1886 LOG_TARGET_ERROR(target, "Failed to read insn (%d)!", ret);
1887 return ret;
1888 }
1889 sw_bp->oocd_bp = NULL;
1890 return ERROR_OK;
1891 }
1892
1893 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
1894 {
1895 struct xtensa *xtensa = target_to_xtensa(target);
1896 unsigned int slot;
1897
1898 if (breakpoint->type == BKPT_SOFT) {
1899 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
1900 if (!xtensa->sw_brps[slot].oocd_bp ||
1901 xtensa->sw_brps[slot].oocd_bp == breakpoint)
1902 break;
1903 }
1904 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
1905 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
1906 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1907 }
1908 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
1909 if (ret != ERROR_OK) {
1910 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
1911 return ret;
1912 }
1913 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
1914 slot,
1915 breakpoint->address);
1916 return ERROR_OK;
1917 }
1918
1919 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1920 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
1921 break;
1922 }
1923 if (slot == xtensa->core_config->debug.ibreaks_num) {
1924 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
1925 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1926 }
1927
1928 xtensa->hw_brps[slot] = breakpoint;
1929 /* We will actually write the breakpoints when we resume the target. */
1930 LOG_TARGET_DEBUG(target, "placed HW breakpoint @ " TARGET_ADDR_FMT,
1931 breakpoint->address);
1932
1933 return ERROR_OK;
1934 }
1935
1936 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
1937 {
1938 struct xtensa *xtensa = target_to_xtensa(target);
1939 unsigned int slot;
1940
1941 if (breakpoint->type == BKPT_SOFT) {
1942 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
1943 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
1944 break;
1945 }
1946 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
1947 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
1948 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1949 }
1950 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
1951 if (ret != ERROR_OK) {
1952 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
1953 return ret;
1954 }
1955 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
1956 return ERROR_OK;
1957 }
1958
1959 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1960 if (xtensa->hw_brps[slot] == breakpoint)
1961 break;
1962 }
1963 if (slot == xtensa->core_config->debug.ibreaks_num) {
1964 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
1965 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1966 }
1967 xtensa->hw_brps[slot] = NULL;
1968 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
1969 return ERROR_OK;
1970 }
1971
1972 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
1973 {
1974 struct xtensa *xtensa = target_to_xtensa(target);
1975 unsigned int slot;
1976 xtensa_reg_val_t dbreakcval;
1977
1978 if (target->state != TARGET_HALTED) {
1979 LOG_TARGET_WARNING(target, "target not halted");
1980 return ERROR_TARGET_NOT_HALTED;
1981 }
1982
1983 if (watchpoint->mask != ~(uint32_t)0) {
1984 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
1985 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1986 }
1987
1988 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1989 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
1990 break;
1991 }
1992 if (slot == xtensa->core_config->debug.dbreaks_num) {
1993 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
1994 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1995 }
1996
1997 /* Figure out value for dbreakc5..0
1998 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
1999 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2000 !IS_PWR_OF_2(watchpoint->length) ||
2001 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2002 LOG_TARGET_WARNING(
2003 target,
2004 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2005 " not supported by hardware.",
2006 watchpoint->length,
2007 watchpoint->address);
2008 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2009 }
2010 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2011
2012 if (watchpoint->rw == WPT_READ)
2013 dbreakcval |= BIT(30);
2014 if (watchpoint->rw == WPT_WRITE)
2015 dbreakcval |= BIT(31);
2016 if (watchpoint->rw == WPT_ACCESS)
2017 dbreakcval |= BIT(30) | BIT(31);
2018
2019 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2020 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2021 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2022 xtensa->hw_wps[slot] = watchpoint;
2023 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2024 watchpoint->address);
2025 return ERROR_OK;
2026 }
2027
2028 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2029 {
2030 struct xtensa *xtensa = target_to_xtensa(target);
2031 unsigned int slot;
2032
2033 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2034 if (xtensa->hw_wps[slot] == watchpoint)
2035 break;
2036 }
2037 if (slot == xtensa->core_config->debug.dbreaks_num) {
2038 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2039 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2040 }
2041 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2042 xtensa->hw_wps[slot] = NULL;
2043 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2044 watchpoint->address);
2045 return ERROR_OK;
2046 }
2047
2048 static int xtensa_build_reg_cache(struct target *target)
2049 {
2050 struct xtensa *xtensa = target_to_xtensa(target);
2051 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2052 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2053
2054 if (!reg_cache) {
2055 LOG_ERROR("Failed to alloc reg cache!");
2056 return ERROR_FAIL;
2057 }
2058 reg_cache->name = "Xtensa registers";
2059 reg_cache->next = NULL;
2060 reg_cache->num_regs = XT_NUM_REGS + xtensa->core_config->user_regs_num;
2061 /* Init reglist */
2062 struct reg *reg_list = calloc(reg_cache->num_regs, sizeof(struct reg));
2063 if (!reg_list) {
2064 LOG_ERROR("Failed to alloc reg list!");
2065 goto fail;
2066 }
2067 xtensa->regs_num = 0;
2068
2069 for (unsigned int i = 0; i < XT_NUM_REGS; i++) {
2070 reg_list[i].exist = false;
2071 if (xtensa_regs[i].type == XT_REG_USER) {
2072 if (xtensa_user_reg_exists(xtensa, i))
2073 reg_list[i].exist = true;
2074 else
2075 LOG_DEBUG("User reg '%s' (%d) does not exist", xtensa_regs[i].name, i);
2076 } else if (xtensa_regs[i].type == XT_REG_FR) {
2077 if (xtensa_fp_reg_exists(xtensa, i))
2078 reg_list[i].exist = true;
2079 else
2080 LOG_DEBUG("FP reg '%s' (%d) does not exist", xtensa_regs[i].name, i);
2081 } else if (xtensa_regs[i].type == XT_REG_SPECIAL) {
2082 if (xtensa_special_reg_exists(xtensa, i))
2083 reg_list[i].exist = true;
2084 else
2085 LOG_DEBUG("Special reg '%s' (%d) does not exist", xtensa_regs[i].name, i);
2086 } else {
2087 if (xtensa_regular_reg_exists(xtensa, i))
2088 reg_list[i].exist = true;
2089 else
2090 LOG_DEBUG("Regular reg '%s' (%d) does not exist", xtensa_regs[i].name, i);
2091 }
2092 reg_list[i].name = xtensa_regs[i].name;
2093 reg_list[i].size = 32;
2094 reg_list[i].value = calloc(1, 4 /*XT_REG_LEN*/);/* make Clang Static Analyzer happy */
2095 if (!reg_list[i].value) {
2096 LOG_ERROR("Failed to alloc reg list value!");
2097 goto fail;
2098 }
2099 reg_list[i].dirty = false;
2100 reg_list[i].valid = false;
2101 reg_list[i].type = &xtensa_reg_type;
2102 reg_list[i].arch_info = xtensa;
2103 if (reg_list[i].exist)
2104 xtensa->regs_num++;
2105 }
2106 for (unsigned int i = 0; i < xtensa->core_config->user_regs_num; i++) {
2107 reg_list[XT_USR_REG_START + i].exist = true;
2108 reg_list[XT_USR_REG_START + i].name = xtensa->core_config->user_regs[i].name;
2109 reg_list[XT_USR_REG_START + i].size = xtensa->core_config->user_regs[i].size;
2110 reg_list[XT_USR_REG_START + i].value = calloc(1, reg_list[XT_USR_REG_START + i].size / 8);
2111 if (!reg_list[XT_USR_REG_START + i].value) {
2112 LOG_ERROR("Failed to alloc user reg list value!");
2113 goto fail;
2114 }
2115 reg_list[XT_USR_REG_START + i].dirty = false;
2116 reg_list[XT_USR_REG_START + i].valid = false;
2117 reg_list[XT_USR_REG_START + i].type = xtensa->core_config->user_regs[i].type;
2118 reg_list[XT_USR_REG_START + i].arch_info = xtensa;
2119 xtensa->regs_num++;
2120 }
2121 if (xtensa->core_config->gdb_general_regs_num >= xtensa->regs_num) {
2122 LOG_ERROR("Regs number less then GDB general regs number!");
2123 goto fail;
2124 }
2125
2126 /* assign GDB reg numbers to registers */
2127 for (unsigned int gdb_reg_id = 0; gdb_reg_id < xtensa->regs_num; gdb_reg_id++) {
2128 unsigned int reg_id = xtensa->core_config->gdb_regs_mapping[gdb_reg_id];
2129 if (reg_id >= reg_cache->num_regs) {
2130 LOG_ERROR("Invalid GDB map!");
2131 goto fail;
2132 }
2133 if (!reg_list[reg_id].exist) {
2134 LOG_ERROR("Non-existing reg in GDB map!");
2135 goto fail;
2136 }
2137 reg_list[reg_id].number = gdb_reg_id;
2138 }
2139 reg_cache->reg_list = reg_list;
2140
2141 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2142 if (!xtensa->algo_context_backup) {
2143 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2144 goto fail;
2145 }
2146 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2147 struct reg *reg = &reg_cache->reg_list[i];
2148 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2149 if (!xtensa->algo_context_backup[i]) {
2150 LOG_ERROR("Failed to alloc mem for algorithm context!");
2151 goto fail;
2152 }
2153 }
2154
2155 xtensa->core_cache = reg_cache;
2156 if (cache_p)
2157 *cache_p = reg_cache;
2158 return ERROR_OK;
2159
2160 fail:
2161 if (reg_list) {
2162 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2163 free(reg_list[i].value);
2164 free(reg_list);
2165 }
2166 if (xtensa->algo_context_backup) {
2167 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2168 free(xtensa->algo_context_backup[i]);
2169 free(xtensa->algo_context_backup);
2170 }
2171 free(reg_cache);
2172
2173 return ERROR_FAIL;
2174 }
2175
2176 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
2177 const struct xtensa_config *xtensa_config,
2178 const struct xtensa_debug_module_config *dm_cfg)
2179 {
2180 target->arch_info = xtensa;
2181 xtensa->common_magic = XTENSA_COMMON_MAGIC;
2182 xtensa->target = target;
2183 xtensa->core_config = xtensa_config;
2184 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
2185
2186 if (!xtensa->core_config->exc.enabled || !xtensa->core_config->irq.enabled ||
2187 !xtensa->core_config->high_irq.enabled || !xtensa->core_config->debug.enabled) {
2188 LOG_ERROR("Xtensa configuration does not support debugging!");
2189 return ERROR_FAIL;
2190 }
2191 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2192 }
2193
2194 void xtensa_set_permissive_mode(struct target *target, bool state)
2195 {
2196 target_to_xtensa(target)->permissive_mode = state;
2197 }
2198
2199 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2200 {
2201 struct xtensa *xtensa = target_to_xtensa(target);
2202
2203 xtensa->come_online_probes_num = 3;
2204 xtensa->hw_brps = calloc(xtensa->core_config->debug.ibreaks_num, sizeof(struct breakpoint *));
2205 if (!xtensa->hw_brps) {
2206 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2207 return ERROR_FAIL;
2208 }
2209 xtensa->hw_wps = calloc(xtensa->core_config->debug.dbreaks_num, sizeof(struct watchpoint *));
2210 if (!xtensa->hw_wps) {
2211 free(xtensa->hw_brps);
2212 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2213 return ERROR_FAIL;
2214 }
2215 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2216 if (!xtensa->sw_brps) {
2217 free(xtensa->hw_brps);
2218 free(xtensa->hw_wps);
2219 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2220 return ERROR_FAIL;
2221 }
2222
2223 return xtensa_build_reg_cache(target);
2224 }
2225
2226 static void xtensa_free_reg_cache(struct target *target)
2227 {
2228 struct xtensa *xtensa = target_to_xtensa(target);
2229 struct reg_cache *cache = xtensa->core_cache;
2230
2231 if (cache) {
2232 register_unlink_cache(&target->reg_cache, cache);
2233 for (unsigned int i = 0; i < cache->num_regs; i++) {
2234 free(xtensa->algo_context_backup[i]);
2235 free(cache->reg_list[i].value);
2236 }
2237 free(xtensa->algo_context_backup);
2238 free(cache->reg_list);
2239 free(cache);
2240 }
2241 xtensa->core_cache = NULL;
2242 xtensa->algo_context_backup = NULL;
2243 }
2244
2245 void xtensa_target_deinit(struct target *target)
2246 {
2247 struct xtensa *xtensa = target_to_xtensa(target);
2248
2249 LOG_DEBUG("start");
2250
2251 if (target_was_examined(target)) {
2252 int ret = xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRCLR, OCDDCR_ENABLEOCD);
2253 if (ret != ERROR_OK) {
2254 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2255 return;
2256 }
2257 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
2258 ret = jtag_execute_queue();
2259 if (ret != ERROR_OK) {
2260 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2261 return;
2262 }
2263 }
2264 xtensa_free_reg_cache(target);
2265 free(xtensa->hw_brps);
2266 free(xtensa->hw_wps);
2267 free(xtensa->sw_brps);
2268 }
2269
2270 const char *xtensa_get_gdb_arch(struct target *target)
2271 {
2272 return "xtensa";
2273 }
2274
2275 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
2276 {
2277 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
2278 &xtensa->permissive_mode, "xtensa permissive mode");
2279 }
2280
2281 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
2282 {
2283 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
2284 target_to_xtensa(get_current_target(CMD_CTX)));
2285 }
2286
2287 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
2288 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
2289 {
2290 struct xtensa_perfmon_config config = {
2291 .mask = 0xffff,
2292 .kernelcnt = 0,
2293 .tracelevel = -1 /* use DEBUGLEVEL by default */
2294 };
2295
2296 if (CMD_ARGC < 2 || CMD_ARGC > 6)
2297 return ERROR_COMMAND_SYNTAX_ERROR;
2298
2299 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
2300 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
2301 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
2302 return ERROR_COMMAND_ARGUMENT_INVALID;
2303 }
2304
2305 config.select = strtoul(CMD_ARGV[1], NULL, 0);
2306 if (config.select > XTENSA_MAX_PERF_SELECT) {
2307 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
2308 return ERROR_COMMAND_ARGUMENT_INVALID;
2309 }
2310
2311 if (CMD_ARGC >= 3) {
2312 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
2313 if (config.mask > XTENSA_MAX_PERF_MASK) {
2314 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
2315 return ERROR_COMMAND_ARGUMENT_INVALID;
2316 }
2317 }
2318
2319 if (CMD_ARGC >= 4) {
2320 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
2321 if (config.kernelcnt > 1) {
2322 command_print(CMD, "kernelcnt should be 0 or 1");
2323 return ERROR_COMMAND_ARGUMENT_INVALID;
2324 }
2325 }
2326
2327 if (CMD_ARGC >= 5) {
2328 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
2329 if (config.tracelevel > 7) {
2330 command_print(CMD, "tracelevel should be <=7");
2331 return ERROR_COMMAND_ARGUMENT_INVALID;
2332 }
2333 }
2334
2335 if (config.tracelevel == -1)
2336 config.tracelevel = xtensa->core_config->debug.irq_level;
2337
2338 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
2339 }
2340
2341 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
2342 {
2343 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
2344 target_to_xtensa(get_current_target(CMD_CTX)));
2345 }
2346
2347 /* perfmon_dump [counter_id] */
2348 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
2349 {
2350 if (CMD_ARGC > 1)
2351 return ERROR_COMMAND_SYNTAX_ERROR;
2352
2353 int counter_id = -1;
2354 if (CMD_ARGC == 1) {
2355 counter_id = strtol(CMD_ARGV[0], NULL, 0);
2356 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
2357 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
2358 return ERROR_COMMAND_ARGUMENT_INVALID;
2359 }
2360 }
2361
2362 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
2363 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
2364 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
2365 char result_buf[128] = { 0 };
2366 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
2367 struct xtensa_perfmon_result result;
2368 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
2369 if (res != ERROR_OK)
2370 return res;
2371 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
2372 "%-12" PRIu64 "%s",
2373 result.value,
2374 result.overflow ? " (overflow)" : "");
2375 LOG_INFO("%s", result_buf);
2376 }
2377
2378 return ERROR_OK;
2379 }
2380
2381 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
2382 {
2383 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
2384 target_to_xtensa(get_current_target(CMD_CTX)));
2385 }
2386
2387 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
2388 {
2389 int state = -1;
2390
2391 if (CMD_ARGC < 1) {
2392 const char *st;
2393 state = xtensa->stepping_isr_mode;
2394 if (state == XT_STEPPING_ISR_ON)
2395 st = "OFF";
2396 else if (state == XT_STEPPING_ISR_OFF)
2397 st = "ON";
2398 else
2399 st = "UNKNOWN";
2400 command_print(CMD, "Current ISR step mode: %s", st);
2401 return ERROR_OK;
2402 }
2403 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
2404 if (!strcasecmp(CMD_ARGV[0], "off"))
2405 state = XT_STEPPING_ISR_ON;
2406 else if (!strcasecmp(CMD_ARGV[0], "on"))
2407 state = XT_STEPPING_ISR_OFF;
2408
2409 if (state == -1) {
2410 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
2411 return ERROR_FAIL;
2412 }
2413 xtensa->stepping_isr_mode = state;
2414 return ERROR_OK;
2415 }
2416
2417 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
2418 {
2419 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
2420 target_to_xtensa(get_current_target(CMD_CTX)));
2421 }
2422
2423 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
2424 {
2425 int res = ERROR_OK;
2426 uint32_t val = 0;
2427
2428 if (CMD_ARGC >= 1) {
2429 for (unsigned int i = 0; i < CMD_ARGC; i++) {
2430 if (!strcasecmp(CMD_ARGV[0], "none")) {
2431 val = 0;
2432 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
2433 val |= OCDDCR_BREAKINEN;
2434 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
2435 val |= OCDDCR_BREAKOUTEN;
2436 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
2437 val |= OCDDCR_RUNSTALLINEN;
2438 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
2439 val |= OCDDCR_DEBUGMODEOUTEN;
2440 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
2441 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
2442 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
2443 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
2444 } else {
2445 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
2446 command_print(
2447 CMD,
2448 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
2449 return ERROR_OK;
2450 }
2451 }
2452 res = xtensa_smpbreak_set(target, val);
2453 if (res != ERROR_OK)
2454 command_print(CMD, "Failed to set smpbreak config %d", res);
2455 } else {
2456 struct xtensa *xtensa = target_to_xtensa(target);
2457 res = xtensa_smpbreak_read(xtensa, &val);
2458 if (res == ERROR_OK) {
2459 command_print(CMD, "Current bits set:%s%s%s%s",
2460 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
2461 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
2462 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
2463 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
2464 );
2465 } else {
2466 command_print(CMD, "Failed to get smpbreak config %d", res);
2467 }
2468 }
2469 return res;
2470 }
2471
2472 COMMAND_HANDLER(xtensa_cmd_smpbreak)
2473 {
2474 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
2475 get_current_target(CMD_CTX));
2476 }
2477
2478 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
2479 {
2480 struct xtensa_trace_status trace_status;
2481 struct xtensa_trace_start_config cfg = {
2482 .stoppc = 0,
2483 .stopmask = XTENSA_STOPMASK_DISABLED,
2484 .after = 0,
2485 .after_is_words = false
2486 };
2487
2488 /* Parse arguments */
2489 for (unsigned int i = 0; i < CMD_ARGC; i++) {
2490 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
2491 char *e;
2492 i++;
2493 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
2494 cfg.stopmask = 0;
2495 if (*e == '/')
2496 cfg.stopmask = strtol(e, NULL, 0);
2497 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
2498 i++;
2499 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
2500 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
2501 cfg.after_is_words = 0;
2502 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
2503 cfg.after_is_words = 1;
2504 } else {
2505 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
2506 return ERROR_FAIL;
2507 }
2508 }
2509
2510 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2511 if (res != ERROR_OK)
2512 return res;
2513 if (trace_status.stat & TRAXSTAT_TRACT) {
2514 LOG_WARNING("Silently stop active tracing!");
2515 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
2516 if (res != ERROR_OK)
2517 return res;
2518 }
2519
2520 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
2521 if (res != ERROR_OK)
2522 return res;
2523
2524 xtensa->trace_active = true;
2525 command_print(CMD, "Trace started.");
2526 return ERROR_OK;
2527 }
2528
2529 COMMAND_HANDLER(xtensa_cmd_tracestart)
2530 {
2531 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
2532 target_to_xtensa(get_current_target(CMD_CTX)));
2533 }
2534
2535 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
2536 {
2537 struct xtensa_trace_status trace_status;
2538
2539 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2540 if (res != ERROR_OK)
2541 return res;
2542
2543 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2544 command_print(CMD, "No trace is currently active.");
2545 return ERROR_FAIL;
2546 }
2547
2548 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
2549 if (res != ERROR_OK)
2550 return res;
2551
2552 xtensa->trace_active = false;
2553 command_print(CMD, "Trace stop triggered.");
2554 return ERROR_OK;
2555 }
2556
2557 COMMAND_HANDLER(xtensa_cmd_tracestop)
2558 {
2559 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
2560 target_to_xtensa(get_current_target(CMD_CTX)));
2561 }
2562
2563 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
2564 {
2565 struct xtensa_trace_config trace_config;
2566 struct xtensa_trace_status trace_status;
2567 uint32_t memsz, wmem;
2568
2569 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2570 if (res != ERROR_OK)
2571 return res;
2572
2573 if (trace_status.stat & TRAXSTAT_TRACT) {
2574 command_print(CMD, "Tracing is still active. Please stop it first.");
2575 return ERROR_FAIL;
2576 }
2577
2578 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
2579 if (res != ERROR_OK)
2580 return res;
2581
2582 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
2583 command_print(CMD, "No active trace found; nothing to dump.");
2584 return ERROR_FAIL;
2585 }
2586
2587 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
2588 LOG_INFO("Total trace memory: %d words", memsz);
2589 if ((trace_config.addr &
2590 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
2591 /*Memory hasn't overwritten itself yet. */
2592 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
2593 LOG_INFO("...but trace is only %d words", wmem);
2594 if (wmem < memsz)
2595 memsz = wmem;
2596 } else {
2597 if (trace_config.addr & TRAXADDR_TWSAT) {
2598 LOG_INFO("Real trace is many times longer than that (overflow)");
2599 } else {
2600 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
2601 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
2602 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
2603 }
2604 }
2605
2606 uint8_t *tracemem = malloc(memsz * 4);
2607 if (!tracemem) {
2608 command_print(CMD, "Failed to alloc memory for trace data!");
2609 return ERROR_FAIL;
2610 }
2611 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
2612 if (res != ERROR_OK) {
2613 free(tracemem);
2614 return res;
2615 }
2616
2617 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
2618 if (f <= 0) {
2619 free(tracemem);
2620 command_print(CMD, "Unable to open file %s", fname);
2621 return ERROR_FAIL;
2622 }
2623 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
2624 command_print(CMD, "Unable to write to file %s", fname);
2625 else
2626 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
2627 close(f);
2628
2629 bool is_all_zeroes = true;
2630 for (unsigned int i = 0; i < memsz * 4; i++) {
2631 if (tracemem[i] != 0) {
2632 is_all_zeroes = false;
2633 break;
2634 }
2635 }
2636 free(tracemem);
2637 if (is_all_zeroes)
2638 command_print(
2639 CMD,
2640 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
2641
2642 return ERROR_OK;
2643 }
2644
2645 COMMAND_HANDLER(xtensa_cmd_tracedump)
2646 {
2647 if (CMD_ARGC != 1) {
2648 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
2649 return ERROR_FAIL;
2650 }
2651
2652 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
2653 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
2654 }
2655
2656 const struct command_registration xtensa_command_handlers[] = {
2657 {
2658 .name = "set_permissive",
2659 .handler = xtensa_cmd_permissive_mode,
2660 .mode = COMMAND_ANY,
2661 .help = "When set to 1, enable Xtensa permissive mode (less client-side checks)",
2662 .usage = "[0|1]",
2663 },
2664 {
2665 .name = "maskisr",
2666 .handler = xtensa_cmd_mask_interrupts,
2667 .mode = COMMAND_ANY,
2668 .help = "mask Xtensa interrupts at step",
2669 .usage = "['on'|'off']",
2670 },
2671 {
2672 .name = "smpbreak",
2673 .handler = xtensa_cmd_smpbreak,
2674 .mode = COMMAND_ANY,
2675 .help = "Set the way the CPU chains OCD breaks",
2676 .usage =
2677 "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
2678 },
2679 {
2680 .name = "perfmon_enable",
2681 .handler = xtensa_cmd_perfmon_enable,
2682 .mode = COMMAND_EXEC,
2683 .help = "Enable and start performance counter",
2684 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
2685 },
2686 {
2687 .name = "perfmon_dump",
2688 .handler = xtensa_cmd_perfmon_dump,
2689 .mode = COMMAND_EXEC,
2690 .help =
2691 "Dump performance counter value. If no argument specified, dumps all counters.",
2692 .usage = "[counter_id]",
2693 },
2694 {
2695 .name = "tracestart",
2696 .handler = xtensa_cmd_tracestart,
2697 .mode = COMMAND_EXEC,
2698 .help =
2699 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
2700 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
2701 },
2702 {
2703 .name = "tracestop",
2704 .handler = xtensa_cmd_tracestop,
2705 .mode = COMMAND_EXEC,
2706 .help = "Tracing: Stop current trace as started by the tracestart command",
2707 .usage = "",
2708 },
2709 {
2710 .name = "tracedump",
2711 .handler = xtensa_cmd_tracedump,
2712 .mode = COMMAND_EXEC,
2713 .help = "Tracing: Dump trace memory to a files. One file per core.",
2714 .usage = "<outfile>",
2715 },
2716 COMMAND_REGISTRATION_DONE
2717 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)