jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / lakemont.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4 * Copyright(c) 2013-2016 Intel Corporation.
5 *
6 * Adrian Burns (adrian.burns@intel.com)
7 * Thomas Faust (thomas.faust@intel.com)
8 * Ivan De Cesaris (ivan.de.cesaris@intel.com)
9 * Julien Carreno (julien.carreno@intel.com)
10 * Jeffrey Maxwell (jeffrey.r.maxwell@intel.com)
11 * Jessica Gomez (jessica.gomez.hernandez@intel.com)
12 *
13 * Contact Information:
14 * Intel Corporation
15 */
16
17 /*
18 * @file
19 * This implements the probemode operations for Lakemont 1 (LMT1).
20 */
21
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25
26 #include <helper/log.h>
27
28 #include "target.h"
29 #include "target_type.h"
30 #include "lakemont.h"
31 #include "register.h"
32 #include "breakpoints.h"
33 #include "x86_32_common.h"
34
35 static int irscan(struct target *t, uint8_t *out,
36 uint8_t *in, uint8_t ir_len);
37 static int drscan(struct target *t, uint8_t *out, uint8_t *in, uint8_t len);
38 static int save_context(struct target *target);
39 static int restore_context(struct target *target);
40 static uint32_t get_tapstatus(struct target *t);
41 static int enter_probemode(struct target *t);
42 static int exit_probemode(struct target *t);
43 static int halt_prep(struct target *t);
44 static int do_halt(struct target *t);
45 static int do_resume(struct target *t);
46 static int read_all_core_hw_regs(struct target *t);
47 static int write_all_core_hw_regs(struct target *t);
48 static int read_hw_reg(struct target *t,
49 int reg, uint32_t *regval, uint8_t cache);
50 static int write_hw_reg(struct target *t,
51 int reg, uint32_t regval, uint8_t cache);
52 static struct reg_cache *lakemont_build_reg_cache
53 (struct target *target);
54 static int submit_reg_pir(struct target *t, int num);
55 static int submit_instruction_pir(struct target *t, int num);
56 static int submit_pir(struct target *t, uint64_t op);
57 static int lakemont_get_core_reg(struct reg *reg);
58 static int lakemont_set_core_reg(struct reg *reg, uint8_t *buf);
59
60 static struct scan_blk scan;
61
62 /* registers and opcodes for register access, pm_idx is used to identify the
63 * registers that are modified for lakemont probemode specific operations
64 */
65 static const struct {
66 uint8_t id;
67 const char *name;
68 uint64_t op;
69 uint8_t pm_idx;
70 unsigned bits;
71 enum reg_type type;
72 const char *group;
73 const char *feature;
74 } regs[] = {
75 /* general purpose registers */
76 { EAX, "eax", 0x000000D01D660000, 0, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
77 { ECX, "ecx", 0x000000501D660000, 1, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
78 { EDX, "edx", 0x000000901D660000, 2, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
79 { EBX, "ebx", 0x000000101D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
80 { ESP, "esp", 0x000000E01D660000, NOT_PMREG, 32, REG_TYPE_DATA_PTR, "general", "org.gnu.gdb.i386.core" },
81 { EBP, "ebp", 0x000000601D660000, NOT_PMREG, 32, REG_TYPE_DATA_PTR, "general", "org.gnu.gdb.i386.core" },
82 { ESI, "esi", 0x000000A01D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
83 { EDI, "edi", 0x000000201D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
84
85 /* instruction pointer & flags */
86 { EIP, "eip", 0x000000C01D660000, 3, 32, REG_TYPE_CODE_PTR, "general", "org.gnu.gdb.i386.core" },
87 { EFLAGS, "eflags", 0x000000401D660000, 4, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
88
89 /* segment registers */
90 { CS, "cs", 0x000000281D660000, 5, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
91 { SS, "ss", 0x000000C81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
92 { DS, "ds", 0x000000481D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
93 { ES, "es", 0x000000A81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
94 { FS, "fs", 0x000000881D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
95 { GS, "gs", 0x000000081D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
96
97 /* floating point unit registers - not accessible via JTAG - here to satisfy GDB */
98 { ST0, "st0", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
99 { ST1, "st1", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
100 { ST2, "st2", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
101 { ST3, "st3", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
102 { ST4, "st4", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
103 { ST5, "st5", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
104 { ST6, "st6", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
105 { ST7, "st7", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
106 { FCTRL, "fctrl", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
107 { FSTAT, "fstat", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
108 { FTAG, "ftag", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
109 { FISEG, "fiseg", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
110 { FIOFF, "fioff", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
111 { FOSEG, "foseg", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
112 { FOOFF, "fooff", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
113 { FOP, "fop", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
114
115 /* control registers */
116 { CR0, "cr0", 0x000000001D660000, 6, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
117 { CR2, "cr2", 0x000000BC1D660000, 7, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
118 { CR3, "cr3", 0x000000801D660000, 8, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
119 { CR4, "cr4", 0x0000002C1D660000, 9, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
120
121 /* debug registers */
122 { DR0, "dr0", 0x0000007C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
123 { DR1, "dr1", 0x000000FC1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
124 { DR2, "dr2", 0x000000021D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
125 { DR3, "dr3", 0x000000821D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
126 { DR6, "dr6", 0x000000301D660000, 10, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
127 { DR7, "dr7", 0x000000B01D660000, 11, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
128
129 /* descriptor tables */
130 { IDTB, "idtbase", 0x000000581D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
131 { IDTL, "idtlimit", 0x000000D81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
132 { IDTAR, "idtar", 0x000000981D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
133 { GDTB, "gdtbase", 0x000000B81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
134 { GDTL, "gdtlimit", 0x000000781D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
135 { GDTAR, "gdtar", 0x000000381D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
136 { TR, "tr", 0x000000701D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
137 { LDTR, "ldtr", 0x000000F01D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
138 { LDTB, "ldbase", 0x000000041D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
139 { LDTL, "ldlimit", 0x000000841D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
140 { LDTAR, "ldtar", 0x000000F81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
141
142 /* segment registers */
143 { CSB, "csbase", 0x000000F41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
144 { CSL, "cslimit", 0x0000000C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
145 { CSAR, "csar", 0x000000741D660000, 12, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
146 { DSB, "dsbase", 0x000000941D660000, 13, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
147 { DSL, "dslimit", 0x000000541D660000, 14, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
148 { DSAR, "dsar", 0x000000141D660000, 15, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
149 { ESB, "esbase", 0x0000004C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
150 { ESL, "eslimit", 0x000000CC1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
151 { ESAR, "esar", 0x0000008C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
152 { FSB, "fsbase", 0x000000641D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
153 { FSL, "fslimit", 0x000000E41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
154 { FSAR, "fsar", 0x000000A41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
155 { GSB, "gsbase", 0x000000C41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
156 { GSL, "gslimit", 0x000000241D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
157 { GSAR, "gsar", 0x000000441D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
158 { SSB, "ssbase", 0x000000341D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
159 { SSL, "sslimit", 0x000000B41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
160 { SSAR, "ssar", 0x000000D41D660000, 16, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
161 { TSSB, "tssbase", 0x000000E81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
162 { TSSL, "tsslimit", 0x000000181D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
163 { TSSAR, "tssar", 0x000000681D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
164 /* probemode control register */
165 { PMCR, "pmcr", 0x000000421D660000, 17, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
166 };
167
168 static const struct {
169 uint8_t id;
170 const char *name;
171 uint64_t op;
172 } instructions[] = {
173 /* memory read/write */
174 { MEMRDB32, "MEMRDB32", 0x0909090909090851 },
175 { MEMRDB16, "MEMRDB16", 0x09090909090851E6 },
176 { MEMRDH32, "MEMRDH32", 0x090909090908D166 },
177 { MEMRDH16, "MEMRDH16", 0x090909090908D1E6 },
178 { MEMRDW32, "MEMRDW32", 0x09090909090908D1 },
179 { MEMRDW16, "MEMRDW16", 0x0909090908D1E666 },
180 { MEMWRB32, "MEMWRB32", 0x0909090909090811 },
181 { MEMWRB16, "MEMWRB16", 0x09090909090811E6 },
182 { MEMWRH32, "MEMWRH32", 0x0909090909089166 },
183 { MEMWRH16, "MEMWRH16", 0x09090909090891E6 },
184 { MEMWRW32, "MEMWRW32", 0x0909090909090891 },
185 { MEMWRW16, "MEMWRW16", 0x090909090891E666 },
186 /* IO read/write */
187 { IORDB32, "IORDB32", 0x0909090909090937 },
188 { IORDB16, "IORDB16", 0x09090909090937E6 },
189 { IORDH32, "IORDH32", 0x090909090909B766 },
190 { IORDH16, "IORDH16", 0x090909090909B7E6 },
191 { IORDW32, "IORDW32", 0x09090909090909B7 },
192 { IORDW16, "IORDW16", 0x0909090909B7E666 },
193 { IOWRB32, "IOWRB32", 0x0909090909090977 },
194 { IOWRB16, "IOWRB16", 0x09090909090977E6 },
195 { IOWRH32, "IOWRH32", 0x090909090909F766 },
196 { IOWRH16, "IOWRH16", 0x090909090909F7E6 },
197 { IOWRW32, "IOWRW32", 0x09090909090909F7 },
198 { IOWRW16, "IOWRW16", 0x0909090909F7E666 },
199 /* lakemont1 core shadow ram access opcodes */
200 { SRAMACCESS, "SRAMACCESS", 0x0000000E9D660000 },
201 { SRAM2PDR, "SRAM2PDR", 0x4CF0000000000000 },
202 { PDR2SRAM, "PDR2SRAM", 0x0CF0000000000000 },
203 { WBINVD, "WBINVD", 0x09090909090990F0 },
204 };
205
206 bool check_not_halted(const struct target *t)
207 {
208 bool halted = t->state == TARGET_HALTED;
209 if (!halted)
210 LOG_ERROR("target running, halt it first");
211 return !halted;
212 }
213
214 static int irscan(struct target *t, uint8_t *out,
215 uint8_t *in, uint8_t ir_len)
216 {
217 int retval = ERROR_OK;
218 struct x86_32_common *x86_32 = target_to_x86_32(t);
219 if (!t->tap) {
220 retval = ERROR_FAIL;
221 LOG_ERROR("%s invalid target tap", __func__);
222 return retval;
223 }
224 if (ir_len != t->tap->ir_length) {
225 retval = ERROR_FAIL;
226 if (t->tap->enabled)
227 LOG_ERROR("%s tap enabled but tap irlen=%d",
228 __func__, t->tap->ir_length);
229 else
230 LOG_ERROR("%s tap not enabled and irlen=%d",
231 __func__, t->tap->ir_length);
232 return retval;
233 }
234 struct scan_field *fields = &scan.field;
235 fields->num_bits = ir_len;
236 fields->out_value = out;
237 fields->in_value = in;
238 jtag_add_ir_scan(x86_32->curr_tap, fields, TAP_IDLE);
239 if (x86_32->flush) {
240 retval = jtag_execute_queue();
241 if (retval != ERROR_OK)
242 LOG_ERROR("%s failed to execute queue", __func__);
243 }
244 return retval;
245 }
246
247 static int drscan(struct target *t, uint8_t *out, uint8_t *in, uint8_t len)
248 {
249 int retval = ERROR_OK;
250 uint64_t data = 0;
251 struct x86_32_common *x86_32 = target_to_x86_32(t);
252 if (!t->tap) {
253 retval = ERROR_FAIL;
254 LOG_ERROR("%s invalid target tap", __func__);
255 return retval;
256 }
257 if (len > MAX_SCAN_SIZE || 0 == len) {
258 retval = ERROR_FAIL;
259 LOG_ERROR("%s data len is %d bits, max is %d bits",
260 __func__, len, MAX_SCAN_SIZE);
261 return retval;
262 }
263 struct scan_field *fields = &scan.field;
264 fields->out_value = out;
265 fields->in_value = in;
266 fields->num_bits = len;
267 jtag_add_dr_scan(x86_32->curr_tap, 1, fields, TAP_IDLE);
268 if (x86_32->flush) {
269 retval = jtag_execute_queue();
270 if (retval != ERROR_OK) {
271 LOG_ERROR("%s drscan failed to execute queue", __func__);
272 return retval;
273 }
274 }
275 if (in) {
276 if (len >= 8) {
277 for (int n = (len / 8) - 1 ; n >= 0; n--)
278 data = (data << 8) + *(in+n);
279 } else
280 LOG_DEBUG("dr in 0x%02" PRIx8, *in);
281 } else {
282 LOG_ERROR("%s no drscan data", __func__);
283 retval = ERROR_FAIL;
284 }
285 return retval;
286 }
287
288 static int save_context(struct target *t)
289 {
290 int err;
291 /* read core registers from lakemont sram */
292 err = read_all_core_hw_regs(t);
293 if (err != ERROR_OK) {
294 LOG_ERROR("%s error reading regs", __func__);
295 return err;
296 }
297 return ERROR_OK;
298 }
299
300 static int restore_context(struct target *t)
301 {
302 int err = ERROR_OK;
303 uint32_t i;
304 struct x86_32_common *x86_32 = target_to_x86_32(t);
305
306 /* write core regs into the core PM SRAM from the reg_cache */
307 err = write_all_core_hw_regs(t);
308 if (err != ERROR_OK) {
309 LOG_ERROR("%s error writing regs", __func__);
310 return err;
311 }
312
313 for (i = 0; i < (x86_32->cache->num_regs); i++) {
314 x86_32->cache->reg_list[i].dirty = false;
315 x86_32->cache->reg_list[i].valid = false;
316 }
317 return err;
318 }
319
320 /*
321 * we keep reg_cache in sync with hardware at halt/resume time, we avoid
322 * writing to real hardware here because pm_regs reflects the hardware
323 * while we are halted then reg_cache syncs with hw on resume
324 * TODO - in order for "reg eip force" to work it assume get/set reads
325 * and writes from hardware, may be other reasons also because generally
326 * other openocd targets read/write from hardware in get/set - watch this!
327 */
328 static int lakemont_get_core_reg(struct reg *reg)
329 {
330 int retval = ERROR_OK;
331 struct lakemont_core_reg *lakemont_reg = reg->arch_info;
332 struct target *t = lakemont_reg->target;
333 if (check_not_halted(t))
334 return ERROR_TARGET_NOT_HALTED;
335 LOG_DEBUG("reg=%s, value=0x%08" PRIx32, reg->name,
336 buf_get_u32(reg->value, 0, 32));
337 return retval;
338 }
339
340 static int lakemont_set_core_reg(struct reg *reg, uint8_t *buf)
341 {
342 struct lakemont_core_reg *lakemont_reg = reg->arch_info;
343 struct target *t = lakemont_reg->target;
344 uint32_t value = buf_get_u32(buf, 0, 32);
345 LOG_DEBUG("reg=%s, newval=0x%08" PRIx32, reg->name, value);
346 if (check_not_halted(t))
347 return ERROR_TARGET_NOT_HALTED;
348 buf_set_u32(reg->value, 0, 32, value);
349 reg->dirty = true;
350 reg->valid = true;
351 return ERROR_OK;
352 }
353
354 static const struct reg_arch_type lakemont_reg_type = {
355 /* these get called if reg_cache doesn't have a "valid" value
356 * of an individual reg eg "reg eip" but not for "reg" block
357 */
358 .get = lakemont_get_core_reg,
359 .set = lakemont_set_core_reg,
360 };
361
362 struct reg_cache *lakemont_build_reg_cache(struct target *t)
363 {
364 struct x86_32_common *x86_32 = target_to_x86_32(t);
365 int num_regs = ARRAY_SIZE(regs);
366 struct reg_cache **cache_p = register_get_last_cache_p(&t->reg_cache);
367 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
368 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
369 struct lakemont_core_reg *arch_info = malloc(sizeof(struct lakemont_core_reg) * num_regs);
370 struct reg_feature *feature;
371 int i;
372
373 if (!cache || !reg_list || !arch_info) {
374 free(cache);
375 free(reg_list);
376 free(arch_info);
377 LOG_ERROR("%s out of memory", __func__);
378 return NULL;
379 }
380
381 /* Build the process context cache */
382 cache->name = "lakemont registers";
383 cache->next = NULL;
384 cache->reg_list = reg_list;
385 cache->num_regs = num_regs;
386 (*cache_p) = cache;
387 x86_32->cache = cache;
388
389 for (i = 0; i < num_regs; i++) {
390 arch_info[i].target = t;
391 arch_info[i].x86_32_common = x86_32;
392 arch_info[i].op = regs[i].op;
393 arch_info[i].pm_idx = regs[i].pm_idx;
394 reg_list[i].name = regs[i].name;
395 reg_list[i].size = 32;
396 reg_list[i].value = calloc(1, 4);
397 reg_list[i].dirty = false;
398 reg_list[i].valid = false;
399 reg_list[i].type = &lakemont_reg_type;
400 reg_list[i].arch_info = &arch_info[i];
401
402 reg_list[i].group = regs[i].group;
403 reg_list[i].number = i;
404 reg_list[i].exist = true;
405 reg_list[i].caller_save = true; /* gdb defaults to true */
406
407 feature = calloc(1, sizeof(struct reg_feature));
408 if (feature) {
409 feature->name = regs[i].feature;
410 reg_list[i].feature = feature;
411 } else
412 LOG_ERROR("%s unable to allocate feature list", __func__);
413
414 reg_list[i].reg_data_type = calloc(1, sizeof(struct reg_data_type));
415 if (reg_list[i].reg_data_type)
416 reg_list[i].reg_data_type->type = regs[i].type;
417 else
418 LOG_ERROR("%s unable to allocate reg type list", __func__);
419 }
420 return cache;
421 }
422
423 static uint32_t get_tapstatus(struct target *t)
424 {
425 scan.out[0] = TAPSTATUS;
426 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
427 return 0;
428 if (drscan(t, NULL, scan.out, TS_SIZE) != ERROR_OK)
429 return 0;
430 return buf_get_u32(scan.out, 0, 32);
431 }
432
433 static int enter_probemode(struct target *t)
434 {
435 uint32_t tapstatus = 0;
436 int retries = 100;
437
438 tapstatus = get_tapstatus(t);
439 LOG_DEBUG("TS before PM enter = 0x%08" PRIx32, tapstatus);
440 if (tapstatus & TS_PM_BIT) {
441 LOG_DEBUG("core already in probemode");
442 return ERROR_OK;
443 }
444 scan.out[0] = PROBEMODE;
445 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
446 return ERROR_FAIL;
447 scan.out[0] = 1;
448 if (drscan(t, scan.out, scan.in, 1) != ERROR_OK)
449 return ERROR_FAIL;
450
451 while (retries--) {
452 tapstatus = get_tapstatus(t);
453 LOG_DEBUG("TS after PM enter = 0x%08" PRIx32, tapstatus);
454 if ((tapstatus & TS_PM_BIT) && (!(tapstatus & TS_EN_PM_BIT)))
455 return ERROR_OK;
456 }
457
458 LOG_ERROR("%s PM enter error, tapstatus = 0x%08" PRIx32
459 , __func__, tapstatus);
460 return ERROR_FAIL;
461 }
462
463 static int exit_probemode(struct target *t)
464 {
465 uint32_t tapstatus = get_tapstatus(t);
466 LOG_DEBUG("TS before PM exit = 0x%08" PRIx32, tapstatus);
467
468 if (!(tapstatus & TS_PM_BIT)) {
469 LOG_USER("core not in PM");
470 return ERROR_OK;
471 }
472 scan.out[0] = PROBEMODE;
473 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
474 return ERROR_FAIL;
475 scan.out[0] = 0;
476 if (drscan(t, scan.out, scan.in, 1) != ERROR_OK)
477 return ERROR_FAIL;
478 return ERROR_OK;
479 }
480
481 /* do whats needed to properly enter probemode for debug on lakemont */
482 static int halt_prep(struct target *t)
483 {
484 struct x86_32_common *x86_32 = target_to_x86_32(t);
485 if (write_hw_reg(t, DSB, PM_DSB, 0) != ERROR_OK)
486 return ERROR_FAIL;
487 LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSB].name, PM_DSB);
488 if (write_hw_reg(t, DSL, PM_DSL, 0) != ERROR_OK)
489 return ERROR_FAIL;
490 LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSL].name, PM_DSL);
491 if (write_hw_reg(t, DSAR, PM_DSAR, 0) != ERROR_OK)
492 return ERROR_FAIL;
493 LOG_DEBUG("write DSAR 0x%08" PRIx32, PM_DSAR);
494 if (write_hw_reg(t, CSB, PM_DSB, 0) != ERROR_OK)
495 return ERROR_FAIL;
496 LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSB].name, PM_DSB);
497 if (write_hw_reg(t, CSL, PM_DSL, 0) != ERROR_OK)
498 return ERROR_FAIL;
499 LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSL].name, PM_DSL);
500 if (write_hw_reg(t, DR7, PM_DR7, 0) != ERROR_OK)
501 return ERROR_FAIL;
502 LOG_DEBUG("write DR7 0x%08" PRIx32, PM_DR7);
503
504 uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
505 uint32_t csar = buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32);
506 uint32_t ssar = buf_get_u32(x86_32->cache->reg_list[SSAR].value, 0, 32);
507 uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);
508
509 /* clear VM86 and IF bits if they are set */
510 LOG_DEBUG("EFLAGS = 0x%08" PRIx32 ", VM86 = %d, IF = %d", eflags,
511 eflags & EFLAGS_VM86 ? 1 : 0,
512 eflags & EFLAGS_IF ? 1 : 0);
513 if ((eflags & EFLAGS_VM86) || (eflags & EFLAGS_IF)) {
514 x86_32->pm_regs[I(EFLAGS)] = eflags & ~(EFLAGS_VM86 | EFLAGS_IF);
515 if (write_hw_reg(t, EFLAGS, x86_32->pm_regs[I(EFLAGS)], 0) != ERROR_OK)
516 return ERROR_FAIL;
517 LOG_DEBUG("EFLAGS now = 0x%08" PRIx32 ", VM86 = %d, IF = %d",
518 x86_32->pm_regs[I(EFLAGS)],
519 x86_32->pm_regs[I(EFLAGS)] & EFLAGS_VM86 ? 1 : 0,
520 x86_32->pm_regs[I(EFLAGS)] & EFLAGS_IF ? 1 : 0);
521 }
522
523 /* set CPL to 0 for memory access */
524 if (csar & CSAR_DPL) {
525 x86_32->pm_regs[I(CSAR)] = csar & ~CSAR_DPL;
526 if (write_hw_reg(t, CSAR, x86_32->pm_regs[I(CSAR)], 0) != ERROR_OK)
527 return ERROR_FAIL;
528 LOG_DEBUG("write CSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(CSAR)]);
529 }
530 if (ssar & SSAR_DPL) {
531 x86_32->pm_regs[I(SSAR)] = ssar & ~SSAR_DPL;
532 if (write_hw_reg(t, SSAR, x86_32->pm_regs[I(SSAR)], 0) != ERROR_OK)
533 return ERROR_FAIL;
534 LOG_DEBUG("write SSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(SSAR)]);
535 }
536
537 /* if cache's are enabled, disable and flush, depending on the core version */
538 if (!(x86_32->core_type == LMT3_5) && !(cr0 & CR0_CD)) {
539 LOG_DEBUG("caching enabled CR0 = 0x%08" PRIx32, cr0);
540 if (cr0 & CR0_PG) {
541 x86_32->pm_regs[I(CR0)] = cr0 & ~CR0_PG;
542 if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
543 return ERROR_FAIL;
544 LOG_DEBUG("cleared paging CR0_PG = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
545 /* submit wbinvd to flush cache */
546 if (submit_reg_pir(t, WBINVD) != ERROR_OK)
547 return ERROR_FAIL;
548 x86_32->pm_regs[I(CR0)] =
549 x86_32->pm_regs[I(CR0)] | (CR0_CD | CR0_NW | CR0_PG);
550 if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
551 return ERROR_FAIL;
552 LOG_DEBUG("set CD, NW and PG, CR0 = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
553 }
554 }
555 return ERROR_OK;
556 }
557
558 static int do_halt(struct target *t)
559 {
560 /* needs proper handling later if doing a halt errors out */
561 t->state = TARGET_DEBUG_RUNNING;
562 if (enter_probemode(t) != ERROR_OK)
563 return ERROR_FAIL;
564
565 return lakemont_update_after_probemode_entry(t);
566 }
567
568 /* we need to expose the update to be able to complete the reset at SoC level */
569 int lakemont_update_after_probemode_entry(struct target *t)
570 {
571 if (save_context(t) != ERROR_OK)
572 return ERROR_FAIL;
573 if (halt_prep(t) != ERROR_OK)
574 return ERROR_FAIL;
575 t->state = TARGET_HALTED;
576
577 return target_call_event_callbacks(t, TARGET_EVENT_HALTED);
578 }
579
580 static int do_resume(struct target *t)
581 {
582 /* needs proper handling later */
583 t->state = TARGET_DEBUG_RUNNING;
584 if (restore_context(t) != ERROR_OK)
585 return ERROR_FAIL;
586 if (exit_probemode(t) != ERROR_OK)
587 return ERROR_FAIL;
588 t->state = TARGET_RUNNING;
589
590 t->debug_reason = DBG_REASON_NOTHALTED;
591 LOG_USER("target running");
592
593 return target_call_event_callbacks(t, TARGET_EVENT_RESUMED);
594 }
595
596 static int read_all_core_hw_regs(struct target *t)
597 {
598 int err;
599 uint32_t regval;
600 unsigned i;
601 struct x86_32_common *x86_32 = target_to_x86_32(t);
602 for (i = 0; i < (x86_32->cache->num_regs); i++) {
603 if (regs[i].pm_idx == NOT_AVAIL_REG)
604 continue;
605 err = read_hw_reg(t, regs[i].id, &regval, 1);
606 if (err != ERROR_OK) {
607 LOG_ERROR("%s error saving reg %s",
608 __func__, x86_32->cache->reg_list[i].name);
609 return err;
610 }
611 }
612 LOG_DEBUG("read_all_core_hw_regs read %u registers ok", i);
613 return ERROR_OK;
614 }
615
616 static int write_all_core_hw_regs(struct target *t)
617 {
618 int err;
619 unsigned i;
620 struct x86_32_common *x86_32 = target_to_x86_32(t);
621 for (i = 0; i < (x86_32->cache->num_regs); i++) {
622 if (regs[i].pm_idx == NOT_AVAIL_REG)
623 continue;
624 err = write_hw_reg(t, i, 0, 1);
625 if (err != ERROR_OK) {
626 LOG_ERROR("%s error restoring reg %s",
627 __func__, x86_32->cache->reg_list[i].name);
628 return err;
629 }
630 }
631 LOG_DEBUG("write_all_core_hw_regs wrote %u registers ok", i);
632 return ERROR_OK;
633 }
634
635 /* read reg from lakemont core shadow ram, update reg cache if needed */
636 static int read_hw_reg(struct target *t, int reg, uint32_t *regval, uint8_t cache)
637 {
638 struct x86_32_common *x86_32 = target_to_x86_32(t);
639 struct lakemont_core_reg *arch_info;
640 arch_info = x86_32->cache->reg_list[reg].arch_info;
641 x86_32->flush = 0; /* don't flush scans till we have a batch */
642 if (submit_reg_pir(t, reg) != ERROR_OK)
643 return ERROR_FAIL;
644 if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK)
645 return ERROR_FAIL;
646 if (submit_instruction_pir(t, SRAM2PDR) != ERROR_OK)
647 return ERROR_FAIL;
648 x86_32->flush = 1;
649 scan.out[0] = RDWRPDR;
650 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
651 return ERROR_FAIL;
652 if (drscan(t, NULL, scan.out, PDR_SIZE) != ERROR_OK)
653 return ERROR_FAIL;
654
655 jtag_add_sleep(DELAY_SUBMITPIR);
656 *regval = buf_get_u32(scan.out, 0, 32);
657 if (cache) {
658 buf_set_u32(x86_32->cache->reg_list[reg].value, 0, 32, *regval);
659 x86_32->cache->reg_list[reg].valid = true;
660 x86_32->cache->reg_list[reg].dirty = false;
661 }
662 LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=0x%08" PRIx32,
663 x86_32->cache->reg_list[reg].name,
664 arch_info->op,
665 *regval);
666 return ERROR_OK;
667 }
668
669 /* write lakemont core shadow ram reg, update reg cache if needed */
670 static int write_hw_reg(struct target *t, int reg, uint32_t regval, uint8_t cache)
671 {
672 struct x86_32_common *x86_32 = target_to_x86_32(t);
673 struct lakemont_core_reg *arch_info;
674 arch_info = x86_32->cache->reg_list[reg].arch_info;
675
676 uint8_t reg_buf[4];
677 if (cache)
678 regval = buf_get_u32(x86_32->cache->reg_list[reg].value, 0, 32);
679 buf_set_u32(reg_buf, 0, 32, regval);
680 LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=0x%08" PRIx32,
681 x86_32->cache->reg_list[reg].name,
682 arch_info->op,
683 regval);
684
685 x86_32->flush = 0; /* don't flush scans till we have a batch */
686 if (submit_reg_pir(t, reg) != ERROR_OK)
687 return ERROR_FAIL;
688 if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK)
689 return ERROR_FAIL;
690 scan.out[0] = RDWRPDR;
691 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
692 return ERROR_FAIL;
693 if (drscan(t, reg_buf, scan.out, PDR_SIZE) != ERROR_OK)
694 return ERROR_FAIL;
695 x86_32->flush = 1;
696 if (submit_instruction_pir(t, PDR2SRAM) != ERROR_OK)
697 return ERROR_FAIL;
698
699 /* we are writing from the cache so ensure we reset flags */
700 if (cache) {
701 x86_32->cache->reg_list[reg].dirty = false;
702 x86_32->cache->reg_list[reg].valid = false;
703 }
704 return ERROR_OK;
705 }
706
707 static bool is_paging_enabled(struct target *t)
708 {
709 struct x86_32_common *x86_32 = target_to_x86_32(t);
710 if (x86_32->pm_regs[I(CR0)] & CR0_PG)
711 return true;
712 else
713 return false;
714 }
715
716 static uint8_t get_num_user_regs(struct target *t)
717 {
718 struct x86_32_common *x86_32 = target_to_x86_32(t);
719 return x86_32->cache->num_regs;
720 }
721 /* value of the CR0.PG (paging enabled) bit influences memory reads/writes */
722 static int disable_paging(struct target *t)
723 {
724 struct x86_32_common *x86_32 = target_to_x86_32(t);
725 x86_32->pm_regs[I(CR0)] = x86_32->pm_regs[I(CR0)] & ~CR0_PG;
726 int err = x86_32->write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0);
727 if (err != ERROR_OK) {
728 LOG_ERROR("%s error disabling paging", __func__);
729 return err;
730 }
731 return err;
732 }
733
734 static int enable_paging(struct target *t)
735 {
736 struct x86_32_common *x86_32 = target_to_x86_32(t);
737 x86_32->pm_regs[I(CR0)] = (x86_32->pm_regs[I(CR0)] | CR0_PG);
738 int err = x86_32->write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0);
739 if (err != ERROR_OK) {
740 LOG_ERROR("%s error enabling paging", __func__);
741 return err;
742 }
743 return err;
744 }
745
746 static bool sw_bpts_supported(struct target *t)
747 {
748 uint32_t tapstatus = get_tapstatus(t);
749 if (tapstatus & TS_SBP_BIT)
750 return true;
751 else
752 return false;
753 }
754
755 static int transaction_status(struct target *t)
756 {
757 uint32_t tapstatus = get_tapstatus(t);
758 if ((TS_EN_PM_BIT | TS_PRDY_BIT) & tapstatus) {
759 LOG_ERROR("%s transaction error tapstatus = 0x%08" PRIx32
760 , __func__, tapstatus);
761 return ERROR_FAIL;
762 } else {
763 return ERROR_OK;
764 }
765 }
766
767 static int submit_instruction(struct target *t, int num)
768 {
769 int err = submit_instruction_pir(t, num);
770 if (err != ERROR_OK) {
771 LOG_ERROR("%s error submitting pir", __func__);
772 return err;
773 }
774 return err;
775 }
776
777 static int submit_reg_pir(struct target *t, int num)
778 {
779 LOG_DEBUG("reg %s op=0x%016" PRIx64, regs[num].name, regs[num].op);
780 int err = submit_pir(t, regs[num].op);
781 if (err != ERROR_OK) {
782 LOG_ERROR("%s error submitting pir", __func__);
783 return err;
784 }
785 return err;
786 }
787
788 static int submit_instruction_pir(struct target *t, int num)
789 {
790 LOG_DEBUG("%s op=0x%016" PRIx64, instructions[num].name,
791 instructions[num].op);
792 int err = submit_pir(t, instructions[num].op);
793 if (err != ERROR_OK) {
794 LOG_ERROR("%s error submitting pir", __func__);
795 return err;
796 }
797 return err;
798 }
799
800 /*
801 * PIR (Probe Mode Instruction Register), SUBMITPIR is an "IR only" TAP
802 * command; there is no corresponding data register
803 */
804 static int submit_pir(struct target *t, uint64_t op)
805 {
806 struct x86_32_common *x86_32 = target_to_x86_32(t);
807
808 uint8_t op_buf[8];
809 buf_set_u64(op_buf, 0, 64, op);
810 int flush = x86_32->flush;
811 x86_32->flush = 0;
812 scan.out[0] = WRPIR;
813 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
814 return ERROR_FAIL;
815 if (drscan(t, op_buf, scan.out, PIR_SIZE) != ERROR_OK)
816 return ERROR_FAIL;
817 scan.out[0] = SUBMITPIR;
818 x86_32->flush = flush;
819 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
820 return ERROR_FAIL;
821 jtag_add_sleep(DELAY_SUBMITPIR);
822 return ERROR_OK;
823 }
824
825 int lakemont_init_target(struct command_context *cmd_ctx, struct target *t)
826 {
827 lakemont_build_reg_cache(t);
828 t->state = TARGET_RUNNING;
829 t->debug_reason = DBG_REASON_NOTHALTED;
830 return ERROR_OK;
831 }
832
833 int lakemont_init_arch_info(struct target *t, struct x86_32_common *x86_32)
834 {
835 x86_32->submit_instruction = submit_instruction;
836 x86_32->transaction_status = transaction_status;
837 x86_32->read_hw_reg = read_hw_reg;
838 x86_32->write_hw_reg = write_hw_reg;
839 x86_32->sw_bpts_supported = sw_bpts_supported;
840 x86_32->get_num_user_regs = get_num_user_regs;
841 x86_32->is_paging_enabled = is_paging_enabled;
842 x86_32->disable_paging = disable_paging;
843 x86_32->enable_paging = enable_paging;
844 return ERROR_OK;
845 }
846
847 int lakemont_poll(struct target *t)
848 {
849 /* LMT1 PMCR register currently allows code breakpoints, data breakpoints,
850 * single stepping and shutdowns to be redirected to PM but does not allow
851 * redirecting into PM as a result of SMM enter and SMM exit
852 */
853 uint32_t ts = get_tapstatus(t);
854
855 if (ts == 0xFFFFFFFF && t->state != TARGET_DEBUG_RUNNING) {
856 /* something is wrong here */
857 LOG_ERROR("tapstatus invalid - scan_chain serialization or locked JTAG access issues");
858 /* TODO: Give a hint that unlocking is wrong or maybe a
859 * 'jtag arp_init' helps
860 */
861 t->state = TARGET_DEBUG_RUNNING;
862 return ERROR_OK;
863 }
864
865 if (t->state == TARGET_HALTED && (!(ts & TS_PM_BIT))) {
866 LOG_INFO("target running for unknown reason");
867 t->state = TARGET_RUNNING;
868 }
869
870 if (t->state == TARGET_RUNNING &&
871 t->state != TARGET_DEBUG_RUNNING) {
872
873 if ((ts & TS_PM_BIT) && (ts & TS_PMCR_BIT)) {
874
875 LOG_DEBUG("redirect to PM, tapstatus=0x%08" PRIx32, get_tapstatus(t));
876
877 t->state = TARGET_DEBUG_RUNNING;
878 if (save_context(t) != ERROR_OK)
879 return ERROR_FAIL;
880 if (halt_prep(t) != ERROR_OK)
881 return ERROR_FAIL;
882 t->state = TARGET_HALTED;
883 t->debug_reason = DBG_REASON_UNDEFINED;
884
885 struct x86_32_common *x86_32 = target_to_x86_32(t);
886 uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
887 uint32_t dr6 = buf_get_u32(x86_32->cache->reg_list[DR6].value, 0, 32);
888 uint32_t hwbreakpoint = (uint32_t)-1;
889
890 if (dr6 & DR6_BRKDETECT_0)
891 hwbreakpoint = 0;
892 if (dr6 & DR6_BRKDETECT_1)
893 hwbreakpoint = 1;
894 if (dr6 & DR6_BRKDETECT_2)
895 hwbreakpoint = 2;
896 if (dr6 & DR6_BRKDETECT_3)
897 hwbreakpoint = 3;
898
899 if (hwbreakpoint != (uint32_t)-1) {
900 uint32_t dr7 = buf_get_u32(x86_32->cache->reg_list[DR7].value, 0, 32);
901 uint32_t type = dr7 & (0x03 << (DR7_RW_SHIFT + hwbreakpoint*DR7_RW_LEN_SIZE));
902 if (type == DR7_BP_EXECUTE) {
903 LOG_USER("hit hardware breakpoint (hwreg=%" PRIu32 ") at 0x%08" PRIx32, hwbreakpoint, eip);
904 } else {
905 uint32_t address = 0;
906 switch (hwbreakpoint) {
907 default:
908 case 0:
909 address = buf_get_u32(x86_32->cache->reg_list[DR0].value, 0, 32);
910 break;
911 case 1:
912 address = buf_get_u32(x86_32->cache->reg_list[DR1].value, 0, 32);
913 break;
914 case 2:
915 address = buf_get_u32(x86_32->cache->reg_list[DR2].value, 0, 32);
916 break;
917 case 3:
918 address = buf_get_u32(x86_32->cache->reg_list[DR3].value, 0, 32);
919 break;
920 }
921 LOG_USER("hit '%s' watchpoint for 0x%08" PRIx32 " (hwreg=%" PRIu32 ") at 0x%08" PRIx32,
922 type == DR7_BP_WRITE ? "write" : "access", address,
923 hwbreakpoint, eip);
924 }
925 t->debug_reason = DBG_REASON_BREAKPOINT;
926 } else {
927 /* Check if the target hit a software breakpoint.
928 * ! Watch out: EIP is currently pointing after the breakpoint opcode
929 */
930 struct breakpoint *bp = NULL;
931 bp = breakpoint_find(t, eip-1);
932 if (bp) {
933 t->debug_reason = DBG_REASON_BREAKPOINT;
934 if (bp->type == BKPT_SOFT) {
935 /* The EIP is now pointing the next byte after the
936 * breakpoint instruction. This needs to be corrected.
937 */
938 buf_set_u32(x86_32->cache->reg_list[EIP].value, 0, 32, eip-1);
939 x86_32->cache->reg_list[EIP].dirty = true;
940 x86_32->cache->reg_list[EIP].valid = true;
941 LOG_USER("hit software breakpoint at 0x%08" PRIx32, eip-1);
942 } else {
943 /* it's not a hardware breakpoint (checked already in DR6 state)
944 * and it's also not a software breakpoint ...
945 */
946 LOG_USER("hit unknown breakpoint at 0x%08" PRIx32, eip);
947 }
948 } else {
949
950 /* There is also the case that we hit an breakpoint instruction,
951 * which was not set by us. This needs to be handled be the
952 * application that introduced the breakpoint.
953 */
954
955 LOG_USER("unknown break reason at 0x%08" PRIx32, eip);
956 }
957 }
958
959 return target_call_event_callbacks(t, TARGET_EVENT_HALTED);
960 }
961 }
962
963 return ERROR_OK;
964 }
965
966 int lakemont_arch_state(struct target *t)
967 {
968 struct x86_32_common *x86_32 = target_to_x86_32(t);
969
970 LOG_USER("target halted due to %s at 0x%08" PRIx32 " in %s mode",
971 debug_reason_name(t),
972 buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32),
973 (buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32) & CR0_PE) ? "protected" : "real");
974
975 return ERROR_OK;
976 }
977
978 int lakemont_halt(struct target *t)
979 {
980 if (t->state == TARGET_RUNNING) {
981 t->debug_reason = DBG_REASON_DBGRQ;
982 if (do_halt(t) != ERROR_OK)
983 return ERROR_FAIL;
984 return ERROR_OK;
985 } else {
986 LOG_ERROR("%s target not running", __func__);
987 return ERROR_FAIL;
988 }
989 }
990
991 int lakemont_resume(struct target *t, int current, target_addr_t address,
992 int handle_breakpoints, int debug_execution)
993 {
994 struct breakpoint *bp = NULL;
995 struct x86_32_common *x86_32 = target_to_x86_32(t);
996
997 if (check_not_halted(t))
998 return ERROR_TARGET_NOT_HALTED;
999 /* TODO lakemont_enable_breakpoints(t); */
1000 if (t->state == TARGET_HALTED) {
1001
1002 /* running away for a software breakpoint needs some special handling */
1003 uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
1004 bp = breakpoint_find(t, eip);
1005 if (bp /*&& bp->type == BKPT_SOFT*/) {
1006 /* the step will step over the breakpoint */
1007 if (lakemont_step(t, 0, 0, 1) != ERROR_OK) {
1008 LOG_ERROR("%s stepping over a software breakpoint at 0x%08" PRIx32 " "
1009 "failed to resume the target", __func__, eip);
1010 return ERROR_FAIL;
1011 }
1012 }
1013
1014 /* if breakpoints are enabled, we need to redirect these into probe mode */
1015 struct breakpoint *activeswbp = t->breakpoints;
1016 while (activeswbp && !activeswbp->is_set)
1017 activeswbp = activeswbp->next;
1018 struct watchpoint *activehwbp = t->watchpoints;
1019 while (activehwbp && !activehwbp->is_set)
1020 activehwbp = activehwbp->next;
1021 if (activeswbp || activehwbp)
1022 buf_set_u32(x86_32->cache->reg_list[PMCR].value, 0, 32, 1);
1023 if (do_resume(t) != ERROR_OK)
1024 return ERROR_FAIL;
1025 } else {
1026 LOG_USER("target not halted");
1027 return ERROR_FAIL;
1028 }
1029 return ERROR_OK;
1030 }
1031
1032 int lakemont_step(struct target *t, int current,
1033 target_addr_t address, int handle_breakpoints)
1034 {
1035 struct x86_32_common *x86_32 = target_to_x86_32(t);
1036 uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
1037 uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
1038 uint32_t pmcr = buf_get_u32(x86_32->cache->reg_list[PMCR].value, 0, 32);
1039 struct breakpoint *bp = NULL;
1040 int retval = ERROR_OK;
1041 uint32_t tapstatus = 0;
1042
1043 if (check_not_halted(t))
1044 return ERROR_TARGET_NOT_HALTED;
1045 bp = breakpoint_find(t, eip);
1046 if (retval == ERROR_OK && bp/*&& bp->type == BKPT_SOFT*/) {
1047 /* TODO: This should only be done for software breakpoints.
1048 * Stepping from hardware breakpoints should be possible with the resume flag
1049 * Needs testing.
1050 */
1051 retval = x86_32_common_remove_breakpoint(t, bp);
1052 }
1053
1054 /* Set EFLAGS[TF] and PMCR[IR], exit pm and wait for PRDY# */
1055 LOG_DEBUG("modifying PMCR = 0x%08" PRIx32 " and EFLAGS = 0x%08" PRIx32, pmcr, eflags);
1056 eflags = eflags | (EFLAGS_TF | EFLAGS_RF);
1057 buf_set_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32, eflags);
1058 buf_set_u32(x86_32->cache->reg_list[PMCR].value, 0, 32, 1);
1059 LOG_DEBUG("EFLAGS [TF] [RF] bits set=0x%08" PRIx32 ", PMCR=0x%08" PRIx32 ", EIP=0x%08" PRIx32,
1060 eflags, pmcr, eip);
1061
1062 /* Returned value unused. Can this line be removed? */
1063 get_tapstatus(t);
1064
1065 t->debug_reason = DBG_REASON_SINGLESTEP;
1066 t->state = TARGET_DEBUG_RUNNING;
1067 if (restore_context(t) != ERROR_OK)
1068 return ERROR_FAIL;
1069 if (exit_probemode(t) != ERROR_OK)
1070 return ERROR_FAIL;
1071
1072 target_call_event_callbacks(t, TARGET_EVENT_RESUMED);
1073
1074 tapstatus = get_tapstatus(t);
1075 if (tapstatus & (TS_PM_BIT | TS_EN_PM_BIT | TS_PRDY_BIT | TS_PMCR_BIT)) {
1076 /* target has stopped */
1077 if (save_context(t) != ERROR_OK)
1078 return ERROR_FAIL;
1079 if (halt_prep(t) != ERROR_OK)
1080 return ERROR_FAIL;
1081 t->state = TARGET_HALTED;
1082
1083 LOG_USER("step done from EIP 0x%08" PRIx32 " to 0x%08" PRIx32, eip,
1084 buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32));
1085 target_call_event_callbacks(t, TARGET_EVENT_HALTED);
1086 } else {
1087 /* target didn't stop
1088 * I hope the poll() will catch it, but the deleted breakpoint is gone
1089 */
1090 LOG_ERROR("%s target didn't stop after executing a single step", __func__);
1091 t->state = TARGET_RUNNING;
1092 return ERROR_FAIL;
1093 }
1094
1095 /* try to re-apply the breakpoint, even of step failed
1096 * TODO: When a bp was set, we should try to stop the target - fix the return above
1097 */
1098 if (bp/*&& bp->type == BKPT_SOFT*/) {
1099 /* TODO: This should only be done for software breakpoints.
1100 * Stepping from hardware breakpoints should be possible with the resume flag
1101 * Needs testing.
1102 */
1103 retval = x86_32_common_add_breakpoint(t, bp);
1104 }
1105
1106 return retval;
1107 }
1108
1109 static int lakemont_reset_break(struct target *t)
1110 {
1111 struct x86_32_common *x86_32 = target_to_x86_32(t);
1112 struct jtag_tap *saved_tap = x86_32->curr_tap;
1113 struct scan_field *fields = &scan.field;
1114
1115 int retval = ERROR_OK;
1116
1117 LOG_DEBUG("issuing port 0xcf9 reset");
1118
1119 /* prepare resetbreak setting the proper bits in CLTAPC_CPU_VPREQ */
1120 x86_32->curr_tap = jtag_tap_by_position(1);
1121 if (!x86_32->curr_tap) {
1122 x86_32->curr_tap = saved_tap;
1123 LOG_ERROR("%s could not select quark_x10xx.cltap", __func__);
1124 return ERROR_FAIL;
1125 }
1126
1127 fields->in_value = NULL;
1128 fields->num_bits = 8;
1129
1130 /* select CLTAPC_CPU_VPREQ instruction*/
1131 scan.out[0] = 0x51;
1132 fields->out_value = ((uint8_t *)scan.out);
1133 jtag_add_ir_scan(x86_32->curr_tap, fields, TAP_IDLE);
1134 retval = jtag_execute_queue();
1135 if (retval != ERROR_OK) {
1136 x86_32->curr_tap = saved_tap;
1137 LOG_ERROR("%s irscan failed to execute queue", __func__);
1138 return retval;
1139 }
1140
1141 /* set enable_preq_on_reset & enable_preq_on_reset2 bits*/
1142 scan.out[0] = 0x06;
1143 fields->out_value = ((uint8_t *)scan.out);
1144 jtag_add_dr_scan(x86_32->curr_tap, 1, fields, TAP_IDLE);
1145 retval = jtag_execute_queue();
1146 if (retval != ERROR_OK) {
1147 LOG_ERROR("%s drscan failed to execute queue", __func__);
1148 x86_32->curr_tap = saved_tap;
1149 return retval;
1150 }
1151
1152 /* restore current tap */
1153 x86_32->curr_tap = saved_tap;
1154
1155 return ERROR_OK;
1156 }
1157
1158 /*
1159 * If we ever get an adapter with support for PREQ# and PRDY#, we should
1160 * update this function to add support for using those two signals.
1161 *
1162 * Meanwhile, we're assuming that we only support reset break.
1163 */
1164 int lakemont_reset_assert(struct target *t)
1165 {
1166 struct x86_32_common *x86_32 = target_to_x86_32(t);
1167 /* write 0x6 to I/O port 0xcf9 to cause the reset */
1168 uint8_t cf9_reset_val = 0x6;
1169 int retval;
1170
1171 LOG_DEBUG(" ");
1172
1173 if (t->state != TARGET_HALTED) {
1174 LOG_DEBUG("target must be halted first");
1175 retval = lakemont_halt(t);
1176 if (retval != ERROR_OK) {
1177 LOG_ERROR("could not halt target");
1178 return retval;
1179 }
1180 x86_32->forced_halt_for_reset = true;
1181 }
1182
1183 if (t->reset_halt) {
1184 retval = lakemont_reset_break(t);
1185 if (retval != ERROR_OK)
1186 return retval;
1187 }
1188
1189 retval = x86_32_common_write_io(t, 0xcf9, BYTE, &cf9_reset_val);
1190 if (retval != ERROR_OK) {
1191 LOG_ERROR("could not write to port 0xcf9");
1192 return retval;
1193 }
1194
1195 if (!t->reset_halt && x86_32->forced_halt_for_reset) {
1196 x86_32->forced_halt_for_reset = false;
1197 retval = lakemont_resume(t, true, 0x00, false, true);
1198 if (retval != ERROR_OK)
1199 return retval;
1200 }
1201
1202 /* remove breakpoints and watchpoints */
1203 x86_32_common_reset_breakpoints_watchpoints(t);
1204
1205 return ERROR_OK;
1206 }
1207
1208 int lakemont_reset_deassert(struct target *t)
1209 {
1210 int retval;
1211
1212 LOG_DEBUG(" ");
1213
1214 if (target_was_examined(t)) {
1215 retval = lakemont_poll(t);
1216 if (retval != ERROR_OK)
1217 return retval;
1218 }
1219
1220 if (t->reset_halt) {
1221 /* entered PM after reset, update the state */
1222 retval = lakemont_update_after_probemode_entry(t);
1223 if (retval != ERROR_OK) {
1224 LOG_ERROR("could not update state after probemode entry");
1225 return retval;
1226 }
1227
1228 if (t->state != TARGET_HALTED) {
1229 LOG_WARNING("%s: ran after reset and before halt ...",
1230 target_name(t));
1231 if (target_was_examined(t)) {
1232 retval = target_halt(t);
1233 if (retval != ERROR_OK)
1234 return retval;
1235 } else {
1236 t->state = TARGET_UNKNOWN;
1237 }
1238 }
1239 }
1240
1241 return ERROR_OK;
1242 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)