jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / x86_32_common.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * Adrian Burns (adrian.burns@intel.com)
7 * Thomas Faust (thomas.faust@intel.com)
8 * Ivan De Cesaris (ivan.de.cesaris@intel.com)
9 * Julien Carreno (julien.carreno@intel.com)
10 * Jeffrey Maxwell (jeffrey.r.maxwell@intel.com)
11 *
12 * Contact Information:
13 * Intel Corporation
14 */
15
16 /*
17 * @file
18 * This implements generic x86 32 bit memory and breakpoint operations.
19 */
20
21 #ifdef HAVE_CONFIG_H
22 #include "config.h"
23 #endif
24
25 #include <helper/log.h>
26
27 #include "target.h"
28 #include "target_type.h"
29 #include "register.h"
30 #include "breakpoints.h"
31 #include "x86_32_common.h"
32
33 static int set_debug_regs(struct target *t, uint32_t address,
34 uint8_t bp_num, uint8_t bp_type, uint8_t bp_length);
35 static int unset_debug_regs(struct target *t, uint8_t bp_num);
36 static int read_mem(struct target *t, uint32_t size,
37 uint32_t addr, uint8_t *buf);
38 static int write_mem(struct target *t, uint32_t size,
39 uint32_t addr, const uint8_t *buf);
40 static int calcaddr_physfromlin(struct target *t, target_addr_t addr,
41 target_addr_t *physaddr);
42 static int read_phys_mem(struct target *t, uint32_t phys_address,
43 uint32_t size, uint32_t count, uint8_t *buffer);
44 static int write_phys_mem(struct target *t, uint32_t phys_address,
45 uint32_t size, uint32_t count, const uint8_t *buffer);
46 static int set_breakpoint(struct target *target,
47 struct breakpoint *breakpoint);
48 static int unset_breakpoint(struct target *target,
49 struct breakpoint *breakpoint);
50 static int set_watchpoint(struct target *target,
51 struct watchpoint *watchpoint);
52 static int unset_watchpoint(struct target *target,
53 struct watchpoint *watchpoint);
54 static int read_hw_reg_to_cache(struct target *t, int num);
55 static int write_hw_reg_from_cache(struct target *t, int num);
56
57 int x86_32_get_gdb_reg_list(struct target *t,
58 struct reg **reg_list[], int *reg_list_size,
59 enum target_register_class reg_class)
60 {
61
62 struct x86_32_common *x86_32 = target_to_x86_32(t);
63 int i;
64 *reg_list_size = x86_32->cache->num_regs;
65 LOG_DEBUG("num_regs=%d, reg_class=%d", (*reg_list_size), reg_class);
66 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
67 if (!*reg_list) {
68 LOG_ERROR("%s out of memory", __func__);
69 return ERROR_FAIL;
70 }
71 /* this will copy the values from our reg list to gdbs */
72 for (i = 0; i < (*reg_list_size); i++) {
73 (*reg_list)[i] = &x86_32->cache->reg_list[i];
74 LOG_DEBUG("value %s = %08" PRIx32, x86_32->cache->reg_list[i].name,
75 buf_get_u32(x86_32->cache->reg_list[i].value, 0, 32));
76 }
77 return ERROR_OK;
78 }
79
80 int x86_32_common_init_arch_info(struct target *t, struct x86_32_common *x86_32)
81 {
82 t->arch_info = x86_32;
83 x86_32->common_magic = X86_32_COMMON_MAGIC;
84 x86_32->num_hw_bpoints = MAX_DEBUG_REGS;
85 x86_32->hw_break_list = calloc(x86_32->num_hw_bpoints,
86 sizeof(struct x86_32_dbg_reg));
87 if (!x86_32->hw_break_list) {
88 LOG_ERROR("%s out of memory", __func__);
89 return ERROR_FAIL;
90 }
91 x86_32->curr_tap = t->tap;
92 x86_32->fast_data_area = NULL;
93 x86_32->flush = 1;
94 x86_32->read_hw_reg_to_cache = read_hw_reg_to_cache;
95 x86_32->write_hw_reg_from_cache = write_hw_reg_from_cache;
96 return ERROR_OK;
97 }
98
99 int x86_32_common_mmu(struct target *t, int *enabled)
100 {
101 *enabled = true;
102 return ERROR_OK;
103 }
104
105 int x86_32_common_virt2phys(struct target *t, target_addr_t address, target_addr_t *physical)
106 {
107 struct x86_32_common *x86_32 = target_to_x86_32(t);
108
109 /*
110 * We need to ignore 'segmentation' for now, as OpenOCD can't handle
111 * segmented addresses.
112 * In protected mode that is almost OK, as (almost) any known OS is using
113 * flat segmentation. In real mode we use use the base of the DS segment,
114 * as we don't know better ...
115 */
116
117 uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);
118 if (!(cr0 & CR0_PG)) {
119 /* target halted in real mode */
120 /* TODO: needs validation !!! */
121 uint32_t dsb = buf_get_u32(x86_32->cache->reg_list[DSB].value, 0, 32);
122 *physical = dsb + address;
123
124 } else {
125 /* target halted in protected mode */
126 if (calcaddr_physfromlin(t, address, physical) != ERROR_OK) {
127 LOG_ERROR("%s failed to calculate physical address from " TARGET_ADDR_FMT,
128 __func__, address);
129 return ERROR_FAIL;
130 }
131 }
132 return ERROR_OK;
133 }
134
135 int x86_32_common_read_phys_mem(struct target *t, target_addr_t phys_address,
136 uint32_t size, uint32_t count, uint8_t *buffer)
137 {
138 struct x86_32_common *x86_32 = target_to_x86_32(t);
139 int error;
140
141 error = read_phys_mem(t, phys_address, size, count, buffer);
142 if (error != ERROR_OK)
143 return error;
144
145 /* After reading memory from target, we must replace software breakpoints
146 * with the original instructions again.
147 */
148 struct swbp_mem_patch *iter = x86_32->swbbp_mem_patch_list;
149 while (iter) {
150 if (iter->physaddr >= phys_address && iter->physaddr < phys_address+(size*count)) {
151 uint32_t offset = iter->physaddr - phys_address;
152 buffer[offset] = iter->orig_byte;
153 }
154 iter = iter->next;
155 }
156 return ERROR_OK;
157 }
158
159 static int read_phys_mem(struct target *t, uint32_t phys_address,
160 uint32_t size, uint32_t count, uint8_t *buffer)
161 {
162 int retval = ERROR_OK;
163 bool pg_disabled = false;
164 LOG_DEBUG("addr=0x%08" PRIx32 ", size=%" PRIu32 ", count=0x%" PRIx32 ", buf=%p",
165 phys_address, size, count, buffer);
166 struct x86_32_common *x86_32 = target_to_x86_32(t);
167
168 if (check_not_halted(t))
169 return ERROR_TARGET_NOT_HALTED;
170 if (!count || !buffer || !phys_address) {
171 LOG_ERROR("%s invalid params count=0x%" PRIx32 ", buf=%p, addr=0x%08" PRIx32,
172 __func__, count, buffer, phys_address);
173 return ERROR_COMMAND_ARGUMENT_INVALID;
174 }
175
176 /* to access physical memory, switch off the CR0.PG bit */
177 if (x86_32->is_paging_enabled(t)) {
178 retval = x86_32->disable_paging(t);
179 if (retval != ERROR_OK) {
180 LOG_ERROR("%s could not disable paging", __func__);
181 return retval;
182 }
183 pg_disabled = true;
184 }
185
186 for (uint32_t i = 0; i < count; i++) {
187 switch (size) {
188 case BYTE:
189 retval = read_mem(t, size, phys_address + i, buffer + i);
190 break;
191 case WORD:
192 retval = read_mem(t, size, phys_address + i * 2, buffer + i * 2);
193 break;
194 case DWORD:
195 retval = read_mem(t, size, phys_address + i * 4, buffer + i * 4);
196 break;
197 default:
198 LOG_ERROR("%s invalid read size", __func__);
199 break;
200 }
201 if (retval != ERROR_OK)
202 break;
203 }
204 /* restore CR0.PG bit if needed (regardless of retval) */
205 if (pg_disabled) {
206 int retval2 = x86_32->enable_paging(t);
207 if (retval2 != ERROR_OK) {
208 LOG_ERROR("%s could not enable paging", __func__);
209 return retval2;
210 }
211 }
212 /* TODO: After reading memory from target, we must replace
213 * software breakpoints with the original instructions again.
214 * Solve this with the breakpoint fix
215 */
216 return retval;
217 }
218
219 int x86_32_common_write_phys_mem(struct target *t, target_addr_t phys_address,
220 uint32_t size, uint32_t count, const uint8_t *buffer)
221 {
222 struct x86_32_common *x86_32 = target_to_x86_32(t);
223 int error = ERROR_OK;
224 uint8_t *newbuffer = NULL;
225
226 check_not_halted(t);
227 if (!count || !buffer || !phys_address) {
228 LOG_ERROR("%s invalid params count=0x%" PRIx32 ", buf=%p, addr=" TARGET_ADDR_FMT,
229 __func__, count, buffer, phys_address);
230 return ERROR_COMMAND_ARGUMENT_INVALID;
231 }
232 /* Before writing memory to target, we must update software breakpoints
233 * with the new instructions and patch the memory buffer with the
234 * breakpoint instruction.
235 */
236 newbuffer = malloc(size*count);
237 if (!newbuffer) {
238 LOG_ERROR("%s out of memory", __func__);
239 return ERROR_FAIL;
240 }
241 memcpy(newbuffer, buffer, size*count);
242 struct swbp_mem_patch *iter = x86_32->swbbp_mem_patch_list;
243 while (iter) {
244 if (iter->physaddr >= phys_address && iter->physaddr < phys_address+(size*count)) {
245 uint32_t offset = iter->physaddr - phys_address;
246 newbuffer[offset] = SW_BP_OPCODE;
247
248 /* update the breakpoint */
249 struct breakpoint *pbiter = t->breakpoints;
250 while (pbiter && pbiter->unique_id != iter->swbp_unique_id)
251 pbiter = pbiter->next;
252 if (pbiter)
253 pbiter->orig_instr[0] = buffer[offset];
254 }
255 iter = iter->next;
256 }
257
258 error = write_phys_mem(t, phys_address, size, count, newbuffer);
259 free(newbuffer);
260 return error;
261 }
262
263 static int write_phys_mem(struct target *t, uint32_t phys_address,
264 uint32_t size, uint32_t count, const uint8_t *buffer)
265 {
266 int retval = ERROR_OK;
267 bool pg_disabled = false;
268 struct x86_32_common *x86_32 = target_to_x86_32(t);
269 LOG_DEBUG("addr=0x%08" PRIx32 ", size=%" PRIu32 ", count=0x%" PRIx32 ", buf=%p",
270 phys_address, size, count, buffer);
271
272 check_not_halted(t);
273 if (!count || !buffer || !phys_address) {
274 LOG_ERROR("%s invalid params count=0x%" PRIx32 ", buf=%p, addr=0x%08" PRIx32,
275 __func__, count, buffer, phys_address);
276 return ERROR_COMMAND_ARGUMENT_INVALID;
277 }
278 /* TODO: Before writing memory to target, we must update
279 * software breakpoints with the new instructions and
280 * patch the memory buffer with the breakpoint instruction.
281 * Solve this with the breakpoint fix
282 */
283
284 /* to access physical memory, switch off the CR0.PG bit */
285 if (x86_32->is_paging_enabled(t)) {
286 retval = x86_32->disable_paging(t);
287 if (retval != ERROR_OK) {
288 LOG_ERROR("%s could not disable paging", __func__);
289 return retval;
290 }
291 pg_disabled = true;
292 }
293 for (uint32_t i = 0; i < count; i++) {
294 switch (size) {
295 case BYTE:
296 retval = write_mem(t, size, phys_address + i, buffer + i);
297 break;
298 case WORD:
299 retval = write_mem(t, size, phys_address + i * 2, buffer + i * 2);
300 break;
301 case DWORD:
302 retval = write_mem(t, size, phys_address + i * 4, buffer + i * 4);
303 break;
304 default:
305 LOG_DEBUG("invalid read size");
306 break;
307 }
308 }
309 /* restore CR0.PG bit if needed (regardless of retval) */
310 if (pg_disabled) {
311 retval = x86_32->enable_paging(t);
312 if (retval != ERROR_OK) {
313 LOG_ERROR("%s could not enable paging", __func__);
314 return retval;
315 }
316 }
317 return retval;
318 }
319
320 static int read_mem(struct target *t, uint32_t size,
321 uint32_t addr, uint8_t *buf)
322 {
323 struct x86_32_common *x86_32 = target_to_x86_32(t);
324
325 /* if CS.D bit=1 then its a 32 bit code segment, else 16 */
326 bool use32 = (buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32)) & CSAR_D;
327 int retval = x86_32->write_hw_reg(t, EAX, addr, 0);
328 if (retval != ERROR_OK) {
329 LOG_ERROR("%s error write EAX", __func__);
330 return retval;
331 }
332
333 switch (size) {
334 case BYTE:
335 if (use32)
336 retval = x86_32->submit_instruction(t, MEMRDB32);
337 else
338 retval = x86_32->submit_instruction(t, MEMRDB16);
339 break;
340 case WORD:
341 if (use32)
342 retval = x86_32->submit_instruction(t, MEMRDH32);
343 else
344 retval = x86_32->submit_instruction(t, MEMRDH16);
345 break;
346 case DWORD:
347 if (use32)
348 retval = x86_32->submit_instruction(t, MEMRDW32);
349 else
350 retval = x86_32->submit_instruction(t, MEMRDW16);
351 break;
352 default:
353 LOG_ERROR("%s invalid read mem size", __func__);
354 break;
355 }
356
357 if (retval != ERROR_OK)
358 return retval;
359
360 /* read_hw_reg() will write to 4 bytes (uint32_t)
361 * Watch out, the buffer passed into read_mem() might be 1 or 2 bytes.
362 */
363 uint32_t regval;
364 retval = x86_32->read_hw_reg(t, EDX, &regval, 0);
365
366 if (retval != ERROR_OK) {
367 LOG_ERROR("%s error read EDX", __func__);
368 return retval;
369 }
370 for (uint8_t i = 0; i < size; i++)
371 buf[i] = (regval >> (i*8)) & 0x000000FF;
372
373 retval = x86_32->transaction_status(t);
374 if (retval != ERROR_OK) {
375 LOG_ERROR("%s error on mem read", __func__);
376 return retval;
377 }
378 return retval;
379 }
380
381 static int write_mem(struct target *t, uint32_t size,
382 uint32_t addr, const uint8_t *buf)
383 {
384 uint32_t i = 0;
385 uint32_t buf4bytes = 0;
386 int retval = ERROR_OK;
387 struct x86_32_common *x86_32 = target_to_x86_32(t);
388
389 for (i = 0; i < size; ++i) {
390 buf4bytes = buf4bytes << 8; /* first time we only shift 0s */
391 buf4bytes += buf[(size-1)-i]; /* it was hard to write, should be hard to read! */
392 }
393 /* if CS.D bit=1 then its a 32 bit code segment, else 16 */
394 bool use32 = (buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32)) & CSAR_D;
395 retval = x86_32->write_hw_reg(t, EAX, addr, 0);
396 if (retval != ERROR_OK) {
397 LOG_ERROR("%s error write EAX", __func__);
398 return retval;
399 }
400
401 /* write_hw_reg() will write to 4 bytes (uint32_t)
402 * Watch out, the buffer passed into write_mem() might be 1 or 2 bytes.
403 */
404 retval = x86_32->write_hw_reg(t, EDX, buf4bytes, 0);
405 if (retval != ERROR_OK) {
406 LOG_ERROR("%s error write EDX", __func__);
407 return retval;
408 }
409 switch (size) {
410 case BYTE:
411 if (use32)
412 retval = x86_32->submit_instruction(t, MEMWRB32);
413 else
414 retval = x86_32->submit_instruction(t, MEMWRB16);
415 break;
416 case WORD:
417 if (use32)
418 retval = x86_32->submit_instruction(t, MEMWRH32);
419 else
420 retval = x86_32->submit_instruction(t, MEMWRH16);
421 break;
422 case DWORD:
423 if (use32)
424 retval = x86_32->submit_instruction(t, MEMWRW32);
425 else
426 retval = x86_32->submit_instruction(t, MEMWRW16);
427 break;
428 default:
429 LOG_ERROR("%s invalid write mem size", __func__);
430 return ERROR_FAIL;
431 }
432
433 if (retval != ERROR_OK)
434 return retval;
435
436 retval = x86_32->transaction_status(t);
437 if (retval != ERROR_OK) {
438 LOG_ERROR("%s error on mem write", __func__);
439 return retval;
440 }
441 return retval;
442 }
443
444 int calcaddr_physfromlin(struct target *t, target_addr_t addr, target_addr_t *physaddr)
445 {
446 uint8_t entry_buffer[8];
447
448 if (!physaddr || !t)
449 return ERROR_FAIL;
450
451 struct x86_32_common *x86_32 = target_to_x86_32(t);
452
453 /* The 'user-visible' CR0.PG should be set - otherwise the function shouldn't be called
454 * (Don't check the CR0.PG on the target, this might be temporally disabled at this point)
455 */
456 uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);
457 if (!(cr0 & CR0_PG)) {
458 /* you are wrong in this function, never mind */
459 *physaddr = addr;
460 return ERROR_OK;
461 }
462
463 uint32_t cr4 = buf_get_u32(x86_32->cache->reg_list[CR4].value, 0, 32);
464 bool is_pae = cr4 & 0x00000020; /* PAE - Physical Address Extension */
465
466 uint32_t cr3 = buf_get_u32(x86_32->cache->reg_list[CR3].value, 0, 32);
467 if (is_pae) {
468 uint32_t pdpt_base = cr3 & 0xFFFFF000; /* lower 12 bits of CR3 must always be 0 */
469 uint32_t pdpt_index = (addr & 0xC0000000) >> 30; /* A[31:30] index to PDPT */
470 uint32_t pdpt_addr = pdpt_base + (8 * pdpt_index);
471 if (x86_32_common_read_phys_mem(t, pdpt_addr, 4, 2, entry_buffer) != ERROR_OK) {
472 LOG_ERROR("%s couldn't read page directory pointer table entry at 0x%08" PRIx32,
473 __func__, pdpt_addr);
474 return ERROR_FAIL;
475 }
476 uint64_t pdpt_entry = target_buffer_get_u64(t, entry_buffer);
477 if (!(pdpt_entry & 0x0000000000000001)) {
478 LOG_ERROR("%s page directory pointer table entry at 0x%08" PRIx32 " is not present",
479 __func__, pdpt_addr);
480 return ERROR_FAIL;
481 }
482
483 uint32_t pd_base = pdpt_entry & 0xFFFFF000; /* A[31:12] is PageTable/Page Base Address */
484 uint32_t pd_index = (addr & 0x3FE00000) >> 21; /* A[29:21] index to PD entry with PAE */
485 uint32_t pd_addr = pd_base + (8 * pd_index);
486 if (x86_32_common_read_phys_mem(t, pd_addr, 4, 2, entry_buffer) != ERROR_OK) {
487 LOG_ERROR("%s couldn't read page directory entry at 0x%08" PRIx32,
488 __func__, pd_addr);
489 return ERROR_FAIL;
490 }
491 uint64_t pd_entry = target_buffer_get_u64(t, entry_buffer);
492 if (!(pd_entry & 0x0000000000000001)) {
493 LOG_ERROR("%s page directory entry at 0x%08" PRIx32 " is not present",
494 __func__, pd_addr);
495 return ERROR_FAIL;
496 }
497
498 /* PS bit in PD entry is indicating 4KB or 2MB page size */
499 if (pd_entry & 0x0000000000000080) {
500
501 uint32_t page_base = (uint32_t)(pd_entry & 0x00000000FFE00000); /* [31:21] */
502 uint32_t offset = addr & 0x001FFFFF; /* [20:0] */
503 *physaddr = page_base + offset;
504 return ERROR_OK;
505
506 } else {
507
508 uint32_t pt_base = (uint32_t)(pd_entry & 0x00000000FFFFF000); /*[31:12]*/
509 uint32_t pt_index = (addr & 0x001FF000) >> 12; /*[20:12]*/
510 uint32_t pt_addr = pt_base + (8 * pt_index);
511 if (x86_32_common_read_phys_mem(t, pt_addr, 4, 2, entry_buffer) != ERROR_OK) {
512 LOG_ERROR("%s couldn't read page table entry at 0x%08" PRIx32, __func__, pt_addr);
513 return ERROR_FAIL;
514 }
515 uint64_t pt_entry = target_buffer_get_u64(t, entry_buffer);
516 if (!(pt_entry & 0x0000000000000001)) {
517 LOG_ERROR("%s page table entry at 0x%08" PRIx32 " is not present", __func__, pt_addr);
518 return ERROR_FAIL;
519 }
520
521 uint32_t page_base = (uint32_t)(pt_entry & 0x00000000FFFFF000); /*[31:12]*/
522 uint32_t offset = addr & 0x00000FFF; /*[11:0]*/
523 *physaddr = page_base + offset;
524 return ERROR_OK;
525 }
526 } else {
527 uint32_t pd_base = cr3 & 0xFFFFF000; /* lower 12 bits of CR3 must always be 0 */
528 uint32_t pd_index = (addr & 0xFFC00000) >> 22; /* A[31:22] index to PD entry */
529 uint32_t pd_addr = pd_base + (4 * pd_index);
530 if (x86_32_common_read_phys_mem(t, pd_addr, 4, 1, entry_buffer) != ERROR_OK) {
531 LOG_ERROR("%s couldn't read page directory entry at 0x%08" PRIx32, __func__, pd_addr);
532 return ERROR_FAIL;
533 }
534 uint32_t pd_entry = target_buffer_get_u32(t, entry_buffer);
535 if (!(pd_entry & 0x00000001)) {
536 LOG_ERROR("%s page directory entry at 0x%08" PRIx32 " is not present", __func__, pd_addr);
537 return ERROR_FAIL;
538 }
539
540 /* Bit 7 in page directory entry is page size.
541 */
542 if (pd_entry & 0x00000080) {
543 /* 4MB pages */
544 uint32_t page_base = pd_entry & 0xFFC00000;
545 *physaddr = page_base + (addr & 0x003FFFFF);
546
547 } else {
548 /* 4KB pages */
549 uint32_t pt_base = pd_entry & 0xFFFFF000; /* A[31:12] is PageTable/Page Base Address */
550 uint32_t pt_index = (addr & 0x003FF000) >> 12; /* A[21:12] index to page table entry */
551 uint32_t pt_addr = pt_base + (4 * pt_index);
552 if (x86_32_common_read_phys_mem(t, pt_addr, 4, 1, entry_buffer) != ERROR_OK) {
553 LOG_ERROR("%s couldn't read page table entry at 0x%08" PRIx32, __func__, pt_addr);
554 return ERROR_FAIL;
555 }
556 uint32_t pt_entry = target_buffer_get_u32(t, entry_buffer);
557 if (!(pt_entry & 0x00000001)) {
558 LOG_ERROR("%s page table entry at 0x%08" PRIx32 " is not present", __func__, pt_addr);
559 return ERROR_FAIL;
560 }
561 uint32_t page_base = pt_entry & 0xFFFFF000; /* A[31:12] is PageTable/Page Base Address */
562 *physaddr = page_base + (addr & 0x00000FFF); /* A[11:0] offset to 4KB page in linear address */
563 }
564 }
565 return ERROR_OK;
566 }
567
568 int x86_32_common_read_memory(struct target *t, target_addr_t addr,
569 uint32_t size, uint32_t count, uint8_t *buf)
570 {
571 int retval = ERROR_OK;
572 struct x86_32_common *x86_32 = target_to_x86_32(t);
573 LOG_DEBUG("addr=" TARGET_ADDR_FMT ", size=%" PRIu32 ", count=0x%" PRIx32 ", buf=%p",
574 addr, size, count, buf);
575 check_not_halted(t);
576 if (!count || !buf || !addr) {
577 LOG_ERROR("%s invalid params count=0x%" PRIx32 ", buf=%p, addr=" TARGET_ADDR_FMT,
578 __func__, count, buf, addr);
579 return ERROR_COMMAND_ARGUMENT_INVALID;
580 }
581
582 if (x86_32->is_paging_enabled(t)) {
583 /* all memory accesses from debugger must be physical (CR0.PG == 0)
584 * conversion to physical address space needed
585 */
586 retval = x86_32->disable_paging(t);
587 if (retval != ERROR_OK) {
588 LOG_ERROR("%s could not disable paging", __func__);
589 return retval;
590 }
591 target_addr_t physaddr = 0;
592 if (calcaddr_physfromlin(t, addr, &physaddr) != ERROR_OK) {
593 LOG_ERROR("%s failed to calculate physical address from " TARGET_ADDR_FMT,
594 __func__, addr);
595 retval = ERROR_FAIL;
596 }
597 /* TODO: !!! Watch out for page boundaries
598 * for every 4kB, the physical address has to be re-calculated
599 * This should be fixed together with bulk memory reads
600 */
601
602 if (retval == ERROR_OK
603 && x86_32_common_read_phys_mem(t, physaddr, size, count, buf) != ERROR_OK) {
604 LOG_ERROR("%s failed to read memory from physical address " TARGET_ADDR_FMT,
605 __func__, physaddr);
606 }
607 /* restore PG bit if it was cleared prior (regardless of retval) */
608 retval = x86_32->enable_paging(t);
609 if (retval != ERROR_OK) {
610 LOG_ERROR("%s could not enable paging", __func__);
611 return retval;
612 }
613 } else {
614 /* paging is off - linear address is physical address */
615 if (x86_32_common_read_phys_mem(t, addr, size, count, buf) != ERROR_OK) {
616 LOG_ERROR("%s failed to read memory from address " TARGET_ADDR_FMT,
617 __func__, addr);
618 retval = ERROR_FAIL;
619 }
620 }
621
622 return retval;
623 }
624
625 int x86_32_common_write_memory(struct target *t, target_addr_t addr,
626 uint32_t size, uint32_t count, const uint8_t *buf)
627 {
628 int retval = ERROR_OK;
629 struct x86_32_common *x86_32 = target_to_x86_32(t);
630 LOG_DEBUG("addr=" TARGET_ADDR_FMT ", size=%" PRIu32 ", count=0x%" PRIx32 ", buf=%p",
631 addr, size, count, buf);
632 check_not_halted(t);
633 if (!count || !buf || !addr) {
634 LOG_ERROR("%s invalid params count=0x%" PRIx32 ", buf=%p, addr=" TARGET_ADDR_FMT,
635 __func__, count, buf, addr);
636 return ERROR_COMMAND_ARGUMENT_INVALID;
637 }
638 if (x86_32->is_paging_enabled(t)) {
639 /* all memory accesses from debugger must be physical (CR0.PG == 0)
640 * conversion to physical address space needed
641 */
642 retval = x86_32->disable_paging(t);
643 if (retval != ERROR_OK) {
644 LOG_ERROR("%s could not disable paging", __func__);
645 return retval;
646 }
647 target_addr_t physaddr = 0;
648 if (calcaddr_physfromlin(t, addr, &physaddr) != ERROR_OK) {
649 LOG_ERROR("%s failed to calculate physical address from " TARGET_ADDR_FMT,
650 __func__, addr);
651 retval = ERROR_FAIL;
652 }
653 /* TODO: !!! Watch out for page boundaries
654 * for every 4kB, the physical address has to be re-calculated
655 * This should be fixed together with bulk memory reads
656 */
657 if (retval == ERROR_OK
658 && x86_32_common_write_phys_mem(t, physaddr, size, count, buf) != ERROR_OK) {
659 LOG_ERROR("%s failed to write memory to physical address " TARGET_ADDR_FMT,
660 __func__, physaddr);
661 }
662 /* restore PG bit if it was cleared prior (regardless of retval) */
663 retval = x86_32->enable_paging(t);
664 if (retval != ERROR_OK) {
665 LOG_ERROR("%s could not enable paging", __func__);
666 return retval;
667 }
668 } else {
669
670 /* paging is off - linear address is physical address */
671 if (x86_32_common_write_phys_mem(t, addr, size, count, buf) != ERROR_OK) {
672 LOG_ERROR("%s failed to write memory to address " TARGET_ADDR_FMT,
673 __func__, addr);
674 retval = ERROR_FAIL;
675 }
676 }
677 return retval;
678 }
679
680 int x86_32_common_read_io(struct target *t, uint32_t addr,
681 uint32_t size, uint8_t *buf)
682 {
683 struct x86_32_common *x86_32 = target_to_x86_32(t);
684 /* if CS.D bit=1 then its a 32 bit code segment, else 16 */
685 bool use32 = (buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32)) & CSAR_D;
686 int retval = ERROR_FAIL;
687 bool pg_disabled = false;
688 LOG_DEBUG("addr=0x%08" PRIx32 ", size=%" PRIu32 ", buf=%p", addr, size, buf);
689 check_not_halted(t);
690 if (!buf || !addr) {
691 LOG_ERROR("%s invalid params buf=%p, addr=%08" PRIx32, __func__, buf, addr);
692 return retval;
693 }
694 retval = x86_32->write_hw_reg(t, EDX, addr, 0);
695 if (retval != ERROR_OK) {
696 LOG_ERROR("%s error EDX write", __func__);
697 return retval;
698 }
699 /* to access physical memory, switch off the CR0.PG bit */
700 if (x86_32->is_paging_enabled(t)) {
701 retval = x86_32->disable_paging(t);
702 if (retval != ERROR_OK) {
703 LOG_ERROR("%s could not disable paging", __func__);
704 return retval;
705 }
706 pg_disabled = true;
707 }
708 switch (size) {
709 case BYTE:
710 if (use32)
711 retval = x86_32->submit_instruction(t, IORDB32);
712 else
713 retval = x86_32->submit_instruction(t, IORDB16);
714 break;
715 case WORD:
716 if (use32)
717 retval = x86_32->submit_instruction(t, IORDH32);
718 else
719 retval = x86_32->submit_instruction(t, IORDH16);
720 break;
721 case DWORD:
722 if (use32)
723 retval = x86_32->submit_instruction(t, IORDW32);
724 else
725 retval = x86_32->submit_instruction(t, IORDW16);
726 break;
727 default:
728 LOG_ERROR("%s invalid read io size", __func__);
729 return ERROR_FAIL;
730 }
731
732 /* restore CR0.PG bit if needed */
733 if (pg_disabled) {
734 int retval2 = x86_32->enable_paging(t);
735 if (retval2 != ERROR_OK) {
736 LOG_ERROR("%s could not enable paging", __func__);
737 return retval2;
738 }
739 }
740
741 if (retval != ERROR_OK)
742 return retval;
743
744 uint32_t regval = 0;
745 retval = x86_32->read_hw_reg(t, EAX, &regval, 0);
746 if (retval != ERROR_OK) {
747 LOG_ERROR("%s error on read EAX", __func__);
748 return retval;
749 }
750 for (uint8_t i = 0; i < size; i++)
751 buf[i] = (regval >> (i*8)) & 0x000000FF;
752 retval = x86_32->transaction_status(t);
753 if (retval != ERROR_OK) {
754 LOG_ERROR("%s error on io read", __func__);
755 return retval;
756 }
757 return retval;
758 }
759
760 int x86_32_common_write_io(struct target *t, uint32_t addr,
761 uint32_t size, const uint8_t *buf)
762 {
763 struct x86_32_common *x86_32 = target_to_x86_32(t);
764 /* if CS.D bit=1 then its a 32 bit code segment, else 16 */
765 bool use32 = (buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32)) & CSAR_D;
766 LOG_DEBUG("addr=0x%08" PRIx32 ", size=%" PRIu32 ", buf=%p", addr, size, buf);
767 check_not_halted(t);
768 int retval = ERROR_FAIL;
769 bool pg_disabled = false;
770 if (!buf || !addr) {
771 LOG_ERROR("%s invalid params buf=%p, addr=0x%08" PRIx32, __func__, buf, addr);
772 return retval;
773 }
774 /* no do the write */
775 retval = x86_32->write_hw_reg(t, EDX, addr, 0);
776 if (retval != ERROR_OK) {
777 LOG_ERROR("%s error on EDX write", __func__);
778 return retval;
779 }
780 uint32_t regval = 0;
781 for (uint8_t i = 0; i < size; i++)
782 regval += (buf[i] << (i*8));
783 retval = x86_32->write_hw_reg(t, EAX, regval, 0);
784 if (retval != ERROR_OK) {
785 LOG_ERROR("%s error on EAX write", __func__);
786 return retval;
787 }
788 /* to access physical memory, switch off the CR0.PG bit */
789 if (x86_32->is_paging_enabled(t)) {
790 retval = x86_32->disable_paging(t);
791 if (retval != ERROR_OK) {
792 LOG_ERROR("%s could not disable paging", __func__);
793 return retval;
794 }
795 pg_disabled = true;
796 }
797 switch (size) {
798 case BYTE:
799 if (use32)
800 retval = x86_32->submit_instruction(t, IOWRB32);
801 else
802 retval = x86_32->submit_instruction(t, IOWRB16);
803 break;
804 case WORD:
805 if (use32)
806 retval = x86_32->submit_instruction(t, IOWRH32);
807 else
808 retval = x86_32->submit_instruction(t, IOWRH16);
809 break;
810 case DWORD:
811 if (use32)
812 retval = x86_32->submit_instruction(t, IOWRW32);
813 else
814 retval = x86_32->submit_instruction(t, IOWRW16);
815 break;
816 default:
817 LOG_ERROR("%s invalid write io size", __func__);
818 return ERROR_FAIL;
819 }
820
821 /* restore CR0.PG bit if needed */
822 if (pg_disabled) {
823 int retval2 = x86_32->enable_paging(t);
824 if (retval2 != ERROR_OK) {
825 LOG_ERROR("%s could not enable paging", __func__);
826 return retval2;
827 }
828 }
829
830 if (retval != ERROR_OK)
831 return retval;
832
833 retval = x86_32->transaction_status(t);
834 if (retval != ERROR_OK) {
835 LOG_ERROR("%s error on io write", __func__);
836 return retval;
837 }
838 return retval;
839 }
840
841 int x86_32_common_add_watchpoint(struct target *t, struct watchpoint *wp)
842 {
843 check_not_halted(t);
844 /* set_watchpoint() will return ERROR_TARGET_RESOURCE_NOT_AVAILABLE if all
845 * hardware registers are gone
846 */
847 return set_watchpoint(t, wp);
848 }
849
850 int x86_32_common_remove_watchpoint(struct target *t, struct watchpoint *wp)
851 {
852 if (check_not_halted(t))
853 return ERROR_TARGET_NOT_HALTED;
854 if (wp->is_set)
855 unset_watchpoint(t, wp);
856 return ERROR_OK;
857 }
858
859 int x86_32_common_add_breakpoint(struct target *t, struct breakpoint *bp)
860 {
861 LOG_DEBUG("type=%d, addr=" TARGET_ADDR_FMT, bp->type, bp->address);
862 if (check_not_halted(t))
863 return ERROR_TARGET_NOT_HALTED;
864 /* set_breakpoint() will return ERROR_TARGET_RESOURCE_NOT_AVAILABLE if all
865 * hardware registers are gone (for hardware breakpoints)
866 */
867 return set_breakpoint(t, bp);
868 }
869
870 int x86_32_common_remove_breakpoint(struct target *t, struct breakpoint *bp)
871 {
872 LOG_DEBUG("type=%d, addr=" TARGET_ADDR_FMT, bp->type, bp->address);
873 if (check_not_halted(t))
874 return ERROR_TARGET_NOT_HALTED;
875 if (bp->is_set)
876 unset_breakpoint(t, bp);
877
878 return ERROR_OK;
879 }
880
881 static int set_debug_regs(struct target *t, uint32_t address,
882 uint8_t bp_num, uint8_t bp_type, uint8_t bp_length)
883 {
884 struct x86_32_common *x86_32 = target_to_x86_32(t);
885 LOG_DEBUG("addr=0x%08" PRIx32 ", bp_num=%" PRIu8 ", bp_type=%" PRIu8 ", pb_length=%" PRIu8,
886 address, bp_num, bp_type, bp_length);
887
888 /* DR7 - set global enable */
889 uint32_t dr7 = buf_get_u32(x86_32->cache->reg_list[DR7].value, 0, 32);
890
891 if (bp_length != 1 && bp_length != 2 && bp_length != 4)
892 return ERROR_FAIL;
893
894 if (DR7_BP_FREE(dr7, bp_num))
895 DR7_GLOBAL_ENABLE(dr7, bp_num);
896 else {
897 LOG_ERROR("%s dr7 error, already enabled, val=%08" PRIx32, __func__, dr7);
898 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
899 }
900
901 switch (bp_type) {
902 case 0:
903 /* 00 - only on instruction execution */
904 DR7_SET_EXE(dr7, bp_num);
905 DR7_SET_LENGTH(dr7, bp_num, bp_length);
906 break;
907 case 1:
908 /* 01 - only on data writes */
909 DR7_SET_WRITE(dr7, bp_num);
910 DR7_SET_LENGTH(dr7, bp_num, bp_length);
911 break;
912 case 2:
913 /* 10 UNSUPPORTED - an I/O read and I/O write */
914 LOG_ERROR("%s unsupported feature bp_type=%d", __func__, bp_type);
915 return ERROR_FAIL;
916 break;
917 case 3:
918 /* on data read or data write */
919 DR7_SET_ACCESS(dr7, bp_num);
920 DR7_SET_LENGTH(dr7, bp_num, bp_length);
921 break;
922 default:
923 LOG_ERROR("%s invalid request [only 0-3] bp_type=%d", __func__, bp_type);
924 return ERROR_FAIL;
925 }
926
927 /* update regs in the reg cache ready to be written to hardware
928 * when we exit PM
929 */
930 buf_set_u32(x86_32->cache->reg_list[bp_num+DR0].value, 0, 32, address);
931 x86_32->cache->reg_list[bp_num+DR0].dirty = true;
932 x86_32->cache->reg_list[bp_num+DR0].valid = true;
933 buf_set_u32(x86_32->cache->reg_list[DR6].value, 0, 32, PM_DR6);
934 x86_32->cache->reg_list[DR6].dirty = true;
935 x86_32->cache->reg_list[DR6].valid = true;
936 buf_set_u32(x86_32->cache->reg_list[DR7].value, 0, 32, dr7);
937 x86_32->cache->reg_list[DR7].dirty = true;
938 x86_32->cache->reg_list[DR7].valid = true;
939 return ERROR_OK;
940 }
941
942 static int unset_debug_regs(struct target *t, uint8_t bp_num)
943 {
944 struct x86_32_common *x86_32 = target_to_x86_32(t);
945 LOG_DEBUG("bp_num=%" PRIu8, bp_num);
946
947 uint32_t dr7 = buf_get_u32(x86_32->cache->reg_list[DR7].value, 0, 32);
948
949 if (!(DR7_BP_FREE(dr7, bp_num))) {
950 DR7_GLOBAL_DISABLE(dr7, bp_num);
951 } else {
952 LOG_ERROR("%s dr7 error, not enabled, val=0x%08" PRIx32, __func__, dr7);
953 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
954 }
955 /* this will clear rw and len bits */
956 DR7_RESET_RWLEN_BITS(dr7, bp_num);
957
958 /* update regs in the reg cache ready to be written to hardware
959 * when we exit PM
960 */
961 buf_set_u32(x86_32->cache->reg_list[bp_num+DR0].value, 0, 32, 0);
962 x86_32->cache->reg_list[bp_num+DR0].dirty = true;
963 x86_32->cache->reg_list[bp_num+DR0].valid = true;
964 buf_set_u32(x86_32->cache->reg_list[DR6].value, 0, 32, PM_DR6);
965 x86_32->cache->reg_list[DR6].dirty = true;
966 x86_32->cache->reg_list[DR6].valid = true;
967 buf_set_u32(x86_32->cache->reg_list[DR7].value, 0, 32, dr7);
968 x86_32->cache->reg_list[DR7].dirty = true;
969 x86_32->cache->reg_list[DR7].valid = true;
970 return ERROR_OK;
971 }
972
973 static int set_hwbp(struct target *t, struct breakpoint *bp)
974 {
975 struct x86_32_common *x86_32 = target_to_x86_32(t);
976 struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
977 uint8_t hwbp_num = 0;
978
979 while (debug_reg_list[hwbp_num].used && (hwbp_num < x86_32->num_hw_bpoints))
980 hwbp_num++;
981 if (hwbp_num >= x86_32->num_hw_bpoints) {
982 LOG_ERROR("%s no free hw breakpoint bpid=0x%" PRIx32, __func__, bp->unique_id);
983 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
984 }
985 if (set_debug_regs(t, bp->address, hwbp_num, DR7_BP_EXECUTE, 1) != ERROR_OK)
986 return ERROR_FAIL;
987 breakpoint_hw_set(bp, hwbp_num);
988 debug_reg_list[hwbp_num].used = 1;
989 debug_reg_list[hwbp_num].bp_value = bp->address;
990 LOG_USER("%s hardware breakpoint %" PRIu32 " set at 0x%08" PRIx32 " (hwreg=%" PRIu8 ")", __func__,
991 bp->unique_id, debug_reg_list[hwbp_num].bp_value, hwbp_num);
992 return ERROR_OK;
993 }
994
995 static int unset_hwbp(struct target *t, struct breakpoint *bp)
996 {
997 struct x86_32_common *x86_32 = target_to_x86_32(t);
998 struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
999 int hwbp_num = bp->number;
1000
1001 if (hwbp_num >= x86_32->num_hw_bpoints) {
1002 LOG_ERROR("%s invalid breakpoint number=%d, bpid=%" PRIu32,
1003 __func__, hwbp_num, bp->unique_id);
1004 return ERROR_OK;
1005 }
1006
1007 if (unset_debug_regs(t, hwbp_num) != ERROR_OK)
1008 return ERROR_FAIL;
1009 debug_reg_list[hwbp_num].used = 0;
1010 debug_reg_list[hwbp_num].bp_value = 0;
1011
1012 LOG_USER("%s hardware breakpoint %" PRIu32 " removed from " TARGET_ADDR_FMT " (hwreg=%d)",
1013 __func__, bp->unique_id, bp->address, hwbp_num);
1014 return ERROR_OK;
1015 }
1016
1017 static int set_swbp(struct target *t, struct breakpoint *bp)
1018 {
1019 struct x86_32_common *x86_32 = target_to_x86_32(t);
1020 LOG_DEBUG("id %" PRIx32, bp->unique_id);
1021 target_addr_t physaddr;
1022 uint8_t opcode = SW_BP_OPCODE;
1023 uint8_t readback;
1024
1025 if (calcaddr_physfromlin(t, bp->address, &physaddr) != ERROR_OK)
1026 return ERROR_FAIL;
1027 if (read_phys_mem(t, physaddr, 1, 1, bp->orig_instr))
1028 return ERROR_FAIL;
1029
1030 LOG_DEBUG("set software breakpoint - orig byte=0x%02" PRIx8 "", *bp->orig_instr);
1031
1032 /* just write the instruction trap byte */
1033 if (write_phys_mem(t, physaddr, 1, 1, &opcode))
1034 return ERROR_FAIL;
1035
1036 /* verify that this is not invalid/read-only memory */
1037 if (read_phys_mem(t, physaddr, 1, 1, &readback))
1038 return ERROR_FAIL;
1039
1040 if (readback != SW_BP_OPCODE) {
1041 LOG_ERROR("%s software breakpoint error at " TARGET_ADDR_FMT ", check memory",
1042 __func__, bp->address);
1043 LOG_ERROR("%s readback=0x%02" PRIx8 " orig=0x%02" PRIx8 "",
1044 __func__, readback, *bp->orig_instr);
1045 return ERROR_FAIL;
1046 }
1047 bp->is_set = true;
1048
1049 /* add the memory patch */
1050 struct swbp_mem_patch *new_patch = malloc(sizeof(struct swbp_mem_patch));
1051 if (!new_patch) {
1052 LOG_ERROR("%s out of memory", __func__);
1053 return ERROR_FAIL;
1054 }
1055 new_patch->next = NULL;
1056 new_patch->orig_byte = *bp->orig_instr;
1057 new_patch->physaddr = physaddr;
1058 new_patch->swbp_unique_id = bp->unique_id;
1059
1060 struct swbp_mem_patch *addto = x86_32->swbbp_mem_patch_list;
1061 if (!addto)
1062 x86_32->swbbp_mem_patch_list = new_patch;
1063 else {
1064 while (addto->next)
1065 addto = addto->next;
1066 addto->next = new_patch;
1067 }
1068 LOG_USER("%s software breakpoint %" PRIu32 " set at " TARGET_ADDR_FMT,
1069 __func__, bp->unique_id, bp->address);
1070 return ERROR_OK;
1071 }
1072
1073 static int unset_swbp(struct target *t, struct breakpoint *bp)
1074 {
1075 struct x86_32_common *x86_32 = target_to_x86_32(t);
1076 LOG_DEBUG("id %" PRIx32, bp->unique_id);
1077 target_addr_t physaddr;
1078 uint8_t current_instr;
1079
1080 /* check that user program has not modified breakpoint instruction */
1081 if (calcaddr_physfromlin(t, bp->address, &physaddr) != ERROR_OK)
1082 return ERROR_FAIL;
1083 if (read_phys_mem(t, physaddr, 1, 1, &current_instr))
1084 return ERROR_FAIL;
1085
1086 if (current_instr == SW_BP_OPCODE) {
1087 if (write_phys_mem(t, physaddr, 1, 1, bp->orig_instr))
1088 return ERROR_FAIL;
1089 } else {
1090 LOG_ERROR("%s software breakpoint remove error at " TARGET_ADDR_FMT ", check memory",
1091 __func__, bp->address);
1092 LOG_ERROR("%s current=0x%02" PRIx8 " orig=0x%02" PRIx8 "",
1093 __func__, current_instr, *bp->orig_instr);
1094 return ERROR_FAIL;
1095 }
1096
1097 /* remove from patch */
1098 struct swbp_mem_patch *iter = x86_32->swbbp_mem_patch_list;
1099 if (iter) {
1100 if (iter->swbp_unique_id == bp->unique_id) {
1101 /* it's the first item */
1102 x86_32->swbbp_mem_patch_list = iter->next;
1103 free(iter);
1104 } else {
1105 while (iter->next && iter->next->swbp_unique_id != bp->unique_id)
1106 iter = iter->next;
1107 if (iter->next) {
1108 /* it's the next one */
1109 struct swbp_mem_patch *freeme = iter->next;
1110 iter->next = iter->next->next;
1111 free(freeme);
1112 }
1113 }
1114 }
1115
1116 LOG_USER("%s software breakpoint %" PRIu32 " removed from " TARGET_ADDR_FMT,
1117 __func__, bp->unique_id, bp->address);
1118 return ERROR_OK;
1119 }
1120
1121 static int set_breakpoint(struct target *t, struct breakpoint *bp)
1122 {
1123 int error = ERROR_OK;
1124 struct x86_32_common *x86_32 = target_to_x86_32(t);
1125 LOG_DEBUG("type=%d, addr=" TARGET_ADDR_FMT, bp->type, bp->address);
1126 if (bp->is_set) {
1127 LOG_ERROR("breakpoint already set");
1128 return error;
1129 }
1130 if (bp->type == BKPT_HARD) {
1131 error = set_hwbp(t, bp);
1132 if (error != ERROR_OK) {
1133 LOG_ERROR("%s error setting hardware breakpoint at " TARGET_ADDR_FMT,
1134 __func__, bp->address);
1135 return error;
1136 }
1137 } else {
1138 if (x86_32->sw_bpts_supported(t)) {
1139 error = set_swbp(t, bp);
1140 if (error != ERROR_OK) {
1141 LOG_ERROR("%s error setting software breakpoint at " TARGET_ADDR_FMT,
1142 __func__, bp->address);
1143 return error;
1144 }
1145 } else {
1146 LOG_ERROR("%s core doesn't support SW breakpoints", __func__);
1147 return ERROR_FAIL;
1148 }
1149 }
1150 return error;
1151 }
1152
1153 static int unset_breakpoint(struct target *t, struct breakpoint *bp)
1154 {
1155 LOG_DEBUG("type=%d, addr=" TARGET_ADDR_FMT, bp->type, bp->address);
1156 if (!bp->is_set) {
1157 LOG_WARNING("breakpoint not set");
1158 return ERROR_OK;
1159 }
1160
1161 if (bp->type == BKPT_HARD) {
1162 if (unset_hwbp(t, bp) != ERROR_OK) {
1163 LOG_ERROR("%s error removing hardware breakpoint at " TARGET_ADDR_FMT,
1164 __func__, bp->address);
1165 return ERROR_FAIL;
1166 }
1167 } else {
1168 if (unset_swbp(t, bp) != ERROR_OK) {
1169 LOG_ERROR("%s error removing software breakpoint at " TARGET_ADDR_FMT,
1170 __func__, bp->address);
1171 return ERROR_FAIL;
1172 }
1173 }
1174 bp->is_set = false;
1175 return ERROR_OK;
1176 }
1177
1178 static int set_watchpoint(struct target *t, struct watchpoint *wp)
1179 {
1180 struct x86_32_common *x86_32 = target_to_x86_32(t);
1181 struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
1182 int wp_num = 0;
1183 LOG_DEBUG("type=%d, addr=" TARGET_ADDR_FMT, wp->rw, wp->address);
1184
1185 if (wp->is_set) {
1186 LOG_ERROR("%s watchpoint already set", __func__);
1187 return ERROR_OK;
1188 }
1189
1190 if (wp->rw == WPT_READ) {
1191 LOG_ERROR("%s no support for 'read' watchpoints, use 'access' or 'write'"
1192 , __func__);
1193 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1194 }
1195
1196 while (debug_reg_list[wp_num].used && (wp_num < x86_32->num_hw_bpoints))
1197 wp_num++;
1198 if (wp_num >= x86_32->num_hw_bpoints) {
1199 LOG_ERROR("%s no debug registers left", __func__);
1200 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1201 }
1202
1203 if (wp->length != 4 && wp->length != 2 && wp->length != 1) {
1204 LOG_ERROR("%s only watchpoints of length 1, 2 or 4 are supported", __func__);
1205 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1206 }
1207
1208 switch (wp->rw) {
1209 case WPT_WRITE:
1210 if (set_debug_regs(t, wp->address, wp_num,
1211 DR7_BP_WRITE, wp->length) != ERROR_OK) {
1212 return ERROR_FAIL;
1213 }
1214 break;
1215 case WPT_ACCESS:
1216 if (set_debug_regs(t, wp->address, wp_num, DR7_BP_READWRITE,
1217 wp->length) != ERROR_OK) {
1218 return ERROR_FAIL;
1219 }
1220 break;
1221 default:
1222 LOG_ERROR("%s only 'access' or 'write' watchpoints are supported", __func__);
1223 break;
1224 }
1225 watchpoint_set(wp, wp_num);
1226 debug_reg_list[wp_num].used = 1;
1227 debug_reg_list[wp_num].bp_value = wp->address;
1228 LOG_USER("'%s' watchpoint %d set at " TARGET_ADDR_FMT " with length %" PRIu32 " (hwreg=%d)",
1229 wp->rw == WPT_READ ? "read" : wp->rw == WPT_WRITE ?
1230 "write" : wp->rw == WPT_ACCESS ? "access" : "?",
1231 wp->unique_id, wp->address, wp->length, wp_num);
1232 return ERROR_OK;
1233 }
1234
1235 static int unset_watchpoint(struct target *t, struct watchpoint *wp)
1236 {
1237 struct x86_32_common *x86_32 = target_to_x86_32(t);
1238 struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
1239 LOG_DEBUG("type=%d, addr=" TARGET_ADDR_FMT, wp->rw, wp->address);
1240 if (!wp->is_set) {
1241 LOG_WARNING("watchpoint not set");
1242 return ERROR_OK;
1243 }
1244
1245 int wp_num = wp->number;
1246 if (wp_num >= x86_32->num_hw_bpoints) {
1247 LOG_DEBUG("Invalid FP Comparator number in watchpoint");
1248 return ERROR_OK;
1249 }
1250 if (unset_debug_regs(t, wp_num) != ERROR_OK)
1251 return ERROR_FAIL;
1252
1253 debug_reg_list[wp_num].used = 0;
1254 debug_reg_list[wp_num].bp_value = 0;
1255 wp->is_set = false;
1256
1257 LOG_USER("'%s' watchpoint %d removed from " TARGET_ADDR_FMT " with length %" PRIu32 " (hwreg=%d)",
1258 wp->rw == WPT_READ ? "read" : wp->rw == WPT_WRITE ?
1259 "write" : wp->rw == WPT_ACCESS ? "access" : "?",
1260 wp->unique_id, wp->address, wp->length, wp_num);
1261
1262 return ERROR_OK;
1263 }
1264
1265 /* after reset breakpoints and watchpoints in memory are not valid anymore and
1266 * debug registers are cleared.
1267 * we can't afford to remove sw breakpoints using the default methods as the
1268 * memory doesn't have the same layout yet and an access might crash the target,
1269 * so we just clear the openocd breakpoints structures.
1270 */
1271 void x86_32_common_reset_breakpoints_watchpoints(struct target *t)
1272 {
1273 struct x86_32_common *x86_32 = target_to_x86_32(t);
1274 struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
1275 struct breakpoint *next_b;
1276 struct watchpoint *next_w;
1277
1278 while (t->breakpoints) {
1279 next_b = t->breakpoints->next;
1280 free(t->breakpoints->orig_instr);
1281 free(t->breakpoints);
1282 t->breakpoints = next_b;
1283 }
1284
1285 while (t->watchpoints) {
1286 next_w = t->watchpoints->next;
1287 free(t->watchpoints);
1288 t->watchpoints = next_w;
1289 }
1290
1291 for (int i = 0; i < x86_32->num_hw_bpoints; i++) {
1292 debug_reg_list[i].used = 0;
1293 debug_reg_list[i].bp_value = 0;
1294 }
1295 }
1296
1297 static int read_hw_reg_to_cache(struct target *t, int num)
1298 {
1299 uint32_t reg_value;
1300 struct x86_32_common *x86_32 = target_to_x86_32(t);
1301
1302 if (check_not_halted(t))
1303 return ERROR_TARGET_NOT_HALTED;
1304 if ((num < 0) || (num >= x86_32->get_num_user_regs(t)))
1305 return ERROR_COMMAND_SYNTAX_ERROR;
1306 if (x86_32->read_hw_reg(t, num, &reg_value, 1) != ERROR_OK) {
1307 LOG_ERROR("%s fail for %s", x86_32->cache->reg_list[num].name, __func__);
1308 return ERROR_FAIL;
1309 }
1310 LOG_DEBUG("reg %s value 0x%08" PRIx32,
1311 x86_32->cache->reg_list[num].name, reg_value);
1312 return ERROR_OK;
1313 }
1314
1315 static int write_hw_reg_from_cache(struct target *t, int num)
1316 {
1317 struct x86_32_common *x86_32 = target_to_x86_32(t);
1318 if (check_not_halted(t))
1319 return ERROR_TARGET_NOT_HALTED;
1320 if ((num < 0) || (num >= x86_32->get_num_user_regs(t)))
1321 return ERROR_COMMAND_SYNTAX_ERROR;
1322 if (x86_32->write_hw_reg(t, num, 0, 1) != ERROR_OK) {
1323 LOG_ERROR("%s fail for %s", x86_32->cache->reg_list[num].name, __func__);
1324 return ERROR_FAIL;
1325 }
1326 LOG_DEBUG("reg %s value 0x%08" PRIx32, x86_32->cache->reg_list[num].name,
1327 buf_get_u32(x86_32->cache->reg_list[num].value, 0, 32));
1328 return ERROR_OK;
1329 }
1330
1331 /* x86 32 commands */
1332 static void handle_iod_output(struct command_invocation *cmd,
1333 struct target *target, uint32_t address, unsigned size,
1334 unsigned count, const uint8_t *buffer)
1335 {
1336 const unsigned line_bytecnt = 32;
1337 unsigned line_modulo = line_bytecnt / size;
1338
1339 char output[line_bytecnt * 4 + 1];
1340 unsigned output_len = 0;
1341
1342 const char *value_fmt;
1343 switch (size) {
1344 case 4:
1345 value_fmt = "%8.8x ";
1346 break;
1347 case 2:
1348 value_fmt = "%4.4x ";
1349 break;
1350 case 1:
1351 value_fmt = "%2.2x ";
1352 break;
1353 default:
1354 /* "can't happen", caller checked */
1355 LOG_ERROR("%s invalid memory read size: %u", __func__, size);
1356 return;
1357 }
1358
1359 for (unsigned i = 0; i < count; i++) {
1360 if (i % line_modulo == 0) {
1361 output_len += snprintf(output + output_len,
1362 sizeof(output) - output_len,
1363 "0x%8.8x: ",
1364 (unsigned)(address + (i*size)));
1365 }
1366
1367 uint32_t value = 0;
1368 const uint8_t *value_ptr = buffer + i * size;
1369 switch (size) {
1370 case 4:
1371 value = target_buffer_get_u32(target, value_ptr);
1372 break;
1373 case 2:
1374 value = target_buffer_get_u16(target, value_ptr);
1375 break;
1376 case 1:
1377 value = *value_ptr;
1378 }
1379 output_len += snprintf(output + output_len,
1380 sizeof(output) - output_len,
1381 value_fmt, value);
1382
1383 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
1384 command_print(cmd, "%s", output);
1385 output_len = 0;
1386 }
1387 }
1388 }
1389
1390 COMMAND_HANDLER(handle_iod_command)
1391 {
1392 if (CMD_ARGC != 1)
1393 return ERROR_COMMAND_SYNTAX_ERROR;
1394
1395 uint32_t address;
1396 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
1397 if (address > 0xffff) {
1398 LOG_ERROR("%s IA-32 I/O space is 2^16, 0x%08" PRIx32 " exceeds max", __func__, address);
1399 return ERROR_COMMAND_SYNTAX_ERROR;
1400 }
1401
1402 unsigned size = 0;
1403 switch (CMD_NAME[2]) {
1404 case 'w':
1405 size = 4;
1406 break;
1407 case 'h':
1408 size = 2;
1409 break;
1410 case 'b':
1411 size = 1;
1412 break;
1413 default:
1414 return ERROR_COMMAND_SYNTAX_ERROR;
1415 }
1416 unsigned count = 1;
1417 uint8_t *buffer = calloc(count, size);
1418 struct target *target = get_current_target(CMD_CTX);
1419 int retval = x86_32_common_read_io(target, address, size, buffer);
1420 if (retval == ERROR_OK)
1421 handle_iod_output(CMD, target, address, size, count, buffer);
1422 free(buffer);
1423 return retval;
1424 }
1425
1426 static int target_fill_io(struct target *target,
1427 uint32_t address,
1428 unsigned data_size,
1429 /* value */
1430 uint32_t b)
1431 {
1432 LOG_DEBUG("address=0x%08" PRIx32 ", data_size=%u, b=0x%08" PRIx32,
1433 address, data_size, b);
1434 uint8_t target_buf[data_size];
1435 switch (data_size) {
1436 case 4:
1437 target_buffer_set_u32(target, target_buf, b);
1438 break;
1439 case 2:
1440 target_buffer_set_u16(target, target_buf, b);
1441 break;
1442 case 1:
1443 target_buf[0] = (b & 0x0ff);
1444 break;
1445 default:
1446 exit(-1);
1447 }
1448 return x86_32_common_write_io(target, address, data_size, target_buf);
1449 }
1450
1451 COMMAND_HANDLER(handle_iow_command)
1452 {
1453 if (CMD_ARGC != 2)
1454 return ERROR_COMMAND_SYNTAX_ERROR;
1455 uint32_t address;
1456 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
1457 uint32_t value;
1458 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
1459 struct target *target = get_current_target(CMD_CTX);
1460
1461 unsigned wordsize;
1462 switch (CMD_NAME[2]) {
1463 case 'w':
1464 wordsize = 4;
1465 break;
1466 case 'h':
1467 wordsize = 2;
1468 break;
1469 case 'b':
1470 wordsize = 1;
1471 break;
1472 default:
1473 return ERROR_COMMAND_SYNTAX_ERROR;
1474 }
1475 return target_fill_io(target, address, wordsize, value);
1476 }
1477
1478 static const struct command_registration x86_32_exec_command_handlers[] = {
1479 {
1480 .name = "iww",
1481 .mode = COMMAND_EXEC,
1482 .handler = handle_iow_command,
1483 .help = "write I/O port word",
1484 .usage = "port data[word]",
1485 },
1486 {
1487 .name = "iwh",
1488 .mode = COMMAND_EXEC,
1489 .handler = handle_iow_command,
1490 .help = "write I/O port halfword",
1491 .usage = "port data[halfword]",
1492 },
1493 {
1494 .name = "iwb",
1495 .mode = COMMAND_EXEC,
1496 .handler = handle_iow_command,
1497 .help = "write I/O port byte",
1498 .usage = "port data[byte]",
1499 },
1500 {
1501 .name = "idw",
1502 .mode = COMMAND_EXEC,
1503 .handler = handle_iod_command,
1504 .help = "display I/O port word",
1505 .usage = "port",
1506 },
1507 {
1508 .name = "idh",
1509 .mode = COMMAND_EXEC,
1510 .handler = handle_iod_command,
1511 .help = "display I/O port halfword",
1512 .usage = "port",
1513 },
1514 {
1515 .name = "idb",
1516 .mode = COMMAND_EXEC,
1517 .handler = handle_iod_command,
1518 .help = "display I/O port byte",
1519 .usage = "port",
1520 },
1521
1522 COMMAND_REGISTRATION_DONE
1523 };
1524
1525 const struct command_registration x86_32_command_handlers[] = {
1526 {
1527 .name = "x86_32",
1528 .mode = COMMAND_ANY,
1529 .help = "x86_32 target commands",
1530 .usage = "",
1531 .chain = x86_32_exec_command_handlers,
1532 },
1533 COMMAND_REGISTRATION_DONE
1534 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)