jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / riscv / riscv-013.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4 * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
5 * latest draft.
6 */
7
8 #include <assert.h>
9 #include <stdlib.h>
10 #include <time.h>
11
12 #ifdef HAVE_CONFIG_H
13 #include "config.h"
14 #endif
15
16 #include "target/target.h"
17 #include "target/algorithm.h"
18 #include "target/target_type.h"
19 #include <helper/log.h>
20 #include "jtag/jtag.h"
21 #include "target/register.h"
22 #include "target/breakpoints.h"
23 #include "helper/time_support.h"
24 #include "helper/list.h"
25 #include "riscv.h"
26 #include "debug_defines.h"
27 #include "rtos/rtos.h"
28 #include "program.h"
29 #include "asm.h"
30 #include "batch.h"
31
32 static int riscv013_on_step_or_resume(struct target *target, bool step);
33 static int riscv013_step_or_resume_current_hart(struct target *target,
34 bool step, bool use_hasel);
35 static void riscv013_clear_abstract_error(struct target *target);
36
37 /* Implementations of the functions in struct riscv_info. */
38 static int riscv013_get_register(struct target *target,
39 riscv_reg_t *value, int rid);
40 static int riscv013_set_register(struct target *target, int regid, uint64_t value);
41 static int riscv013_select_current_hart(struct target *target);
42 static int riscv013_halt_prep(struct target *target);
43 static int riscv013_halt_go(struct target *target);
44 static int riscv013_resume_go(struct target *target);
45 static int riscv013_step_current_hart(struct target *target);
46 static int riscv013_on_halt(struct target *target);
47 static int riscv013_on_step(struct target *target);
48 static int riscv013_resume_prep(struct target *target);
49 static bool riscv013_is_halted(struct target *target);
50 static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
51 static int riscv013_write_debug_buffer(struct target *target, unsigned index,
52 riscv_insn_t d);
53 static riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned
54 index);
55 static int riscv013_execute_debug_buffer(struct target *target);
56 static void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
57 static void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a);
58 static int riscv013_dmi_write_u64_bits(struct target *target);
59 static void riscv013_fill_dmi_nop_u64(struct target *target, char *buf);
60 static int register_read(struct target *target, uint64_t *value, uint32_t number);
61 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number);
62 static int register_write_direct(struct target *target, unsigned number,
63 uint64_t value);
64 static int read_memory(struct target *target, target_addr_t address,
65 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment);
66 static int write_memory(struct target *target, target_addr_t address,
67 uint32_t size, uint32_t count, const uint8_t *buffer);
68
69 /**
70 * Since almost everything can be accomplish by scanning the dbus register, all
71 * functions here assume dbus is already selected. The exception are functions
72 * called directly by OpenOCD, which can't assume anything about what's
73 * currently in IR. They should set IR to dbus explicitly.
74 */
75
76 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
77 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
78
79 #define CSR_DCSR_CAUSE_SWBP 1
80 #define CSR_DCSR_CAUSE_TRIGGER 2
81 #define CSR_DCSR_CAUSE_DEBUGINT 3
82 #define CSR_DCSR_CAUSE_STEP 4
83 #define CSR_DCSR_CAUSE_HALT 5
84 #define CSR_DCSR_CAUSE_GROUP 6
85
86 #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
87
88 /*** JTAG registers. ***/
89
90 typedef enum {
91 DMI_OP_NOP = 0,
92 DMI_OP_READ = 1,
93 DMI_OP_WRITE = 2
94 } dmi_op_t;
95 typedef enum {
96 DMI_STATUS_SUCCESS = 0,
97 DMI_STATUS_FAILED = 2,
98 DMI_STATUS_BUSY = 3
99 } dmi_status_t;
100
101 typedef enum slot {
102 SLOT0,
103 SLOT1,
104 SLOT_LAST,
105 } slot_t;
106
107 /*** Debug Bus registers. ***/
108
109 #define CMDERR_NONE 0
110 #define CMDERR_BUSY 1
111 #define CMDERR_NOT_SUPPORTED 2
112 #define CMDERR_EXCEPTION 3
113 #define CMDERR_HALT_RESUME 4
114 #define CMDERR_OTHER 7
115
116 /*** Info about the core being debugged. ***/
117
118 struct trigger {
119 uint64_t address;
120 uint32_t length;
121 uint64_t mask;
122 uint64_t value;
123 bool read, write, execute;
124 int unique_id;
125 };
126
127 typedef enum {
128 YNM_MAYBE,
129 YNM_YES,
130 YNM_NO
131 } yes_no_maybe_t;
132
133 typedef struct {
134 struct list_head list;
135 int abs_chain_position;
136
137 /* The number of harts connected to this DM. */
138 int hart_count;
139 /* Indicates we already reset this DM, so don't need to do it again. */
140 bool was_reset;
141 /* Targets that are connected to this DM. */
142 struct list_head target_list;
143 /* The currently selected hartid on this DM. */
144 int current_hartid;
145 bool hasel_supported;
146
147 /* The program buffer stores executable code. 0 is an illegal instruction,
148 * so we use 0 to mean the cached value is invalid. */
149 uint32_t progbuf_cache[16];
150 } dm013_info_t;
151
152 typedef struct {
153 struct list_head list;
154 struct target *target;
155 } target_list_t;
156
157 typedef struct {
158 /* The indexed used to address this hart in its DM. */
159 unsigned index;
160 /* Number of address bits in the dbus register. */
161 unsigned abits;
162 /* Number of abstract command data registers. */
163 unsigned datacount;
164 /* Number of words in the Program Buffer. */
165 unsigned progbufsize;
166
167 /* We cache the read-only bits of sbcs here. */
168 uint32_t sbcs;
169
170 yes_no_maybe_t progbuf_writable;
171 /* We only need the address so that we know the alignment of the buffer. */
172 riscv_addr_t progbuf_address;
173
174 /* Number of run-test/idle cycles the target requests we do after each dbus
175 * access. */
176 unsigned int dtmcs_idle;
177
178 /* This value is incremented every time a dbus access comes back as "busy".
179 * It's used to determine how many run-test/idle cycles to feed the target
180 * in between accesses. */
181 unsigned int dmi_busy_delay;
182
183 /* Number of run-test/idle cycles to add between consecutive bus master
184 * reads/writes respectively. */
185 unsigned int bus_master_write_delay, bus_master_read_delay;
186
187 /* This value is increased every time we tried to execute two commands
188 * consecutively, and the second one failed because the previous hadn't
189 * completed yet. It's used to add extra run-test/idle cycles after
190 * starting a command, so we don't have to waste time checking for busy to
191 * go low. */
192 unsigned int ac_busy_delay;
193
194 bool abstract_read_csr_supported;
195 bool abstract_write_csr_supported;
196 bool abstract_read_fpr_supported;
197 bool abstract_write_fpr_supported;
198
199 yes_no_maybe_t has_aampostincrement;
200
201 /* When a function returns some error due to a failure indicated by the
202 * target in cmderr, the caller can look here to see what that error was.
203 * (Compare with errno.) */
204 uint8_t cmderr;
205
206 /* Some fields from hartinfo. */
207 uint8_t datasize;
208 uint8_t dataaccess;
209 int16_t dataaddr;
210
211 /* The width of the hartsel field. */
212 unsigned hartsellen;
213
214 /* DM that provides access to this target. */
215 dm013_info_t *dm;
216 } riscv013_info_t;
217
218 static LIST_HEAD(dm_list);
219
220 static riscv013_info_t *get_info(const struct target *target)
221 {
222 struct riscv_info *info = target->arch_info;
223 assert(info);
224 assert(info->version_specific);
225 return info->version_specific;
226 }
227
228 /**
229 * Return the DM structure for this target. If there isn't one, find it in the
230 * global list of DMs. If it's not in there, then create one and initialize it
231 * to 0.
232 */
233 static dm013_info_t *get_dm(struct target *target)
234 {
235 RISCV013_INFO(info);
236 if (info->dm)
237 return info->dm;
238
239 int abs_chain_position = target->tap->abs_chain_position;
240
241 dm013_info_t *entry;
242 dm013_info_t *dm = NULL;
243 list_for_each_entry(entry, &dm_list, list) {
244 if (entry->abs_chain_position == abs_chain_position) {
245 dm = entry;
246 break;
247 }
248 }
249
250 if (!dm) {
251 LOG_DEBUG("[%d] Allocating new DM", target->coreid);
252 dm = calloc(1, sizeof(dm013_info_t));
253 if (!dm)
254 return NULL;
255 dm->abs_chain_position = abs_chain_position;
256 dm->current_hartid = -1;
257 dm->hart_count = -1;
258 INIT_LIST_HEAD(&dm->target_list);
259 list_add(&dm->list, &dm_list);
260 }
261
262 info->dm = dm;
263 target_list_t *target_entry;
264 list_for_each_entry(target_entry, &dm->target_list, list) {
265 if (target_entry->target == target)
266 return dm;
267 }
268 target_entry = calloc(1, sizeof(*target_entry));
269 if (!target_entry) {
270 info->dm = NULL;
271 return NULL;
272 }
273 target_entry->target = target;
274 list_add(&target_entry->list, &dm->target_list);
275
276 return dm;
277 }
278
279 static uint32_t set_hartsel(uint32_t initial, uint32_t index)
280 {
281 initial &= ~DM_DMCONTROL_HARTSELLO;
282 initial &= ~DM_DMCONTROL_HARTSELHI;
283
284 uint32_t index_lo = index & ((1 << DM_DMCONTROL_HARTSELLO_LENGTH) - 1);
285 initial |= index_lo << DM_DMCONTROL_HARTSELLO_OFFSET;
286 uint32_t index_hi = index >> DM_DMCONTROL_HARTSELLO_LENGTH;
287 assert(index_hi < 1 << DM_DMCONTROL_HARTSELHI_LENGTH);
288 initial |= index_hi << DM_DMCONTROL_HARTSELHI_OFFSET;
289
290 return initial;
291 }
292
293 static void decode_dmi(char *text, unsigned address, unsigned data)
294 {
295 static const struct {
296 unsigned address;
297 uint64_t mask;
298 const char *name;
299 } description[] = {
300 { DM_DMCONTROL, DM_DMCONTROL_HALTREQ, "haltreq" },
301 { DM_DMCONTROL, DM_DMCONTROL_RESUMEREQ, "resumereq" },
302 { DM_DMCONTROL, DM_DMCONTROL_HARTRESET, "hartreset" },
303 { DM_DMCONTROL, DM_DMCONTROL_HASEL, "hasel" },
304 { DM_DMCONTROL, DM_DMCONTROL_HARTSELHI, "hartselhi" },
305 { DM_DMCONTROL, DM_DMCONTROL_HARTSELLO, "hartsello" },
306 { DM_DMCONTROL, DM_DMCONTROL_NDMRESET, "ndmreset" },
307 { DM_DMCONTROL, DM_DMCONTROL_DMACTIVE, "dmactive" },
308 { DM_DMCONTROL, DM_DMCONTROL_ACKHAVERESET, "ackhavereset" },
309
310 { DM_DMSTATUS, DM_DMSTATUS_IMPEBREAK, "impebreak" },
311 { DM_DMSTATUS, DM_DMSTATUS_ALLHAVERESET, "allhavereset" },
312 { DM_DMSTATUS, DM_DMSTATUS_ANYHAVERESET, "anyhavereset" },
313 { DM_DMSTATUS, DM_DMSTATUS_ALLRESUMEACK, "allresumeack" },
314 { DM_DMSTATUS, DM_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
315 { DM_DMSTATUS, DM_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
316 { DM_DMSTATUS, DM_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
317 { DM_DMSTATUS, DM_DMSTATUS_ALLUNAVAIL, "allunavail" },
318 { DM_DMSTATUS, DM_DMSTATUS_ANYUNAVAIL, "anyunavail" },
319 { DM_DMSTATUS, DM_DMSTATUS_ALLRUNNING, "allrunning" },
320 { DM_DMSTATUS, DM_DMSTATUS_ANYRUNNING, "anyrunning" },
321 { DM_DMSTATUS, DM_DMSTATUS_ALLHALTED, "allhalted" },
322 { DM_DMSTATUS, DM_DMSTATUS_ANYHALTED, "anyhalted" },
323 { DM_DMSTATUS, DM_DMSTATUS_AUTHENTICATED, "authenticated" },
324 { DM_DMSTATUS, DM_DMSTATUS_AUTHBUSY, "authbusy" },
325 { DM_DMSTATUS, DM_DMSTATUS_HASRESETHALTREQ, "hasresethaltreq" },
326 { DM_DMSTATUS, DM_DMSTATUS_CONFSTRPTRVALID, "confstrptrvalid" },
327 { DM_DMSTATUS, DM_DMSTATUS_VERSION, "version" },
328
329 { DM_ABSTRACTCS, DM_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
330 { DM_ABSTRACTCS, DM_ABSTRACTCS_BUSY, "busy" },
331 { DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR, "cmderr" },
332 { DM_ABSTRACTCS, DM_ABSTRACTCS_DATACOUNT, "datacount" },
333
334 { DM_COMMAND, DM_COMMAND_CMDTYPE, "cmdtype" },
335
336 { DM_SBCS, DM_SBCS_SBVERSION, "sbversion" },
337 { DM_SBCS, DM_SBCS_SBBUSYERROR, "sbbusyerror" },
338 { DM_SBCS, DM_SBCS_SBBUSY, "sbbusy" },
339 { DM_SBCS, DM_SBCS_SBREADONADDR, "sbreadonaddr" },
340 { DM_SBCS, DM_SBCS_SBACCESS, "sbaccess" },
341 { DM_SBCS, DM_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
342 { DM_SBCS, DM_SBCS_SBREADONDATA, "sbreadondata" },
343 { DM_SBCS, DM_SBCS_SBERROR, "sberror" },
344 { DM_SBCS, DM_SBCS_SBASIZE, "sbasize" },
345 { DM_SBCS, DM_SBCS_SBACCESS128, "sbaccess128" },
346 { DM_SBCS, DM_SBCS_SBACCESS64, "sbaccess64" },
347 { DM_SBCS, DM_SBCS_SBACCESS32, "sbaccess32" },
348 { DM_SBCS, DM_SBCS_SBACCESS16, "sbaccess16" },
349 { DM_SBCS, DM_SBCS_SBACCESS8, "sbaccess8" },
350 };
351
352 text[0] = 0;
353 for (unsigned i = 0; i < ARRAY_SIZE(description); i++) {
354 if (description[i].address == address) {
355 uint64_t mask = description[i].mask;
356 unsigned value = get_field(data, mask);
357 if (value) {
358 if (i > 0)
359 *(text++) = ' ';
360 if (mask & (mask >> 1)) {
361 /* If the field is more than 1 bit wide. */
362 sprintf(text, "%s=%d", description[i].name, value);
363 } else {
364 strcpy(text, description[i].name);
365 }
366 text += strlen(text);
367 }
368 }
369 }
370 }
371
372 static void dump_field(int idle, const struct scan_field *field)
373 {
374 static const char * const op_string[] = {"-", "r", "w", "?"};
375 static const char * const status_string[] = {"+", "?", "F", "b"};
376
377 if (debug_level < LOG_LVL_DEBUG)
378 return;
379
380 uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
381 unsigned int out_op = get_field(out, DTM_DMI_OP);
382 unsigned int out_data = get_field(out, DTM_DMI_DATA);
383 unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
384
385 uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
386 unsigned int in_op = get_field(in, DTM_DMI_OP);
387 unsigned int in_data = get_field(in, DTM_DMI_DATA);
388 unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
389
390 log_printf_lf(LOG_LVL_DEBUG,
391 __FILE__, __LINE__, "scan",
392 "%db %s %08x @%02x -> %s %08x @%02x; %di",
393 field->num_bits, op_string[out_op], out_data, out_address,
394 status_string[in_op], in_data, in_address, idle);
395
396 char out_text[500];
397 char in_text[500];
398 decode_dmi(out_text, out_address, out_data);
399 decode_dmi(in_text, in_address, in_data);
400 if (in_text[0] || out_text[0]) {
401 log_printf_lf(LOG_LVL_DEBUG, __FILE__, __LINE__, "scan", "%s -> %s",
402 out_text, in_text);
403 }
404 }
405
406 /*** Utility functions. ***/
407
408 static void select_dmi(struct target *target)
409 {
410 if (bscan_tunnel_ir_width != 0) {
411 select_dmi_via_bscan(target);
412 return;
413 }
414 jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
415 }
416
417 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
418 {
419 struct scan_field field;
420 uint8_t in_value[4];
421 uint8_t out_value[4] = { 0 };
422
423 if (bscan_tunnel_ir_width != 0)
424 return dtmcontrol_scan_via_bscan(target, out);
425
426 buf_set_u32(out_value, 0, 32, out);
427
428 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
429
430 field.num_bits = 32;
431 field.out_value = out_value;
432 field.in_value = in_value;
433 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
434
435 /* Always return to dmi. */
436 select_dmi(target);
437
438 int retval = jtag_execute_queue();
439 if (retval != ERROR_OK) {
440 LOG_ERROR("failed jtag scan: %d", retval);
441 return retval;
442 }
443
444 uint32_t in = buf_get_u32(field.in_value, 0, 32);
445 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
446
447 return in;
448 }
449
450 static void increase_dmi_busy_delay(struct target *target)
451 {
452 riscv013_info_t *info = get_info(target);
453 info->dmi_busy_delay += info->dmi_busy_delay / 10 + 1;
454 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
455 info->dtmcs_idle, info->dmi_busy_delay,
456 info->ac_busy_delay);
457
458 dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
459 }
460
461 /**
462 * exec: If this is set, assume the scan results in an execution, so more
463 * run-test/idle cycles may be required.
464 */
465 static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
466 uint32_t *data_in, dmi_op_t op, uint32_t address_out, uint32_t data_out,
467 bool exec)
468 {
469 riscv013_info_t *info = get_info(target);
470 RISCV_INFO(r);
471 unsigned num_bits = info->abits + DTM_DMI_OP_LENGTH + DTM_DMI_DATA_LENGTH;
472 size_t num_bytes = (num_bits + 7) / 8;
473 uint8_t in[num_bytes];
474 uint8_t out[num_bytes];
475 struct scan_field field = {
476 .num_bits = num_bits,
477 .out_value = out,
478 .in_value = in
479 };
480 riscv_bscan_tunneled_scan_context_t bscan_ctxt;
481
482 if (r->reset_delays_wait >= 0) {
483 r->reset_delays_wait--;
484 if (r->reset_delays_wait < 0) {
485 info->dmi_busy_delay = 0;
486 info->ac_busy_delay = 0;
487 }
488 }
489
490 memset(in, 0, num_bytes);
491 memset(out, 0, num_bytes);
492
493 assert(info->abits != 0);
494
495 buf_set_u32(out, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, op);
496 buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
497 buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
498
499 /* I wanted to place this code in a different function, but the way JTAG command
500 queueing works in the jtag handling functions, the scan fields either have to be
501 heap allocated, global/static, or else they need to stay on the stack until
502 the jtag_execute_queue() call. Heap or static fields in this case doesn't seem
503 the best fit. Declaring stack based field values in a subsidiary function call wouldn't
504 work. */
505 if (bscan_tunnel_ir_width != 0) {
506 riscv_add_bscan_tunneled_scan(target, &field, &bscan_ctxt);
507 } else {
508 /* Assume dbus is already selected. */
509 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
510 }
511
512 int idle_count = info->dmi_busy_delay;
513 if (exec)
514 idle_count += info->ac_busy_delay;
515
516 if (idle_count)
517 jtag_add_runtest(idle_count, TAP_IDLE);
518
519 int retval = jtag_execute_queue();
520 if (retval != ERROR_OK) {
521 LOG_ERROR("dmi_scan failed jtag scan");
522 if (data_in)
523 *data_in = ~0;
524 return DMI_STATUS_FAILED;
525 }
526
527 if (bscan_tunnel_ir_width != 0) {
528 /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
529 buffer_shr(in, num_bytes, 1);
530 }
531
532 if (data_in)
533 *data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
534
535 if (address_in)
536 *address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
537 dump_field(idle_count, &field);
538 return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
539 }
540
541 /**
542 * @param target
543 * @param data_in The data we received from the target.
544 * @param dmi_busy_encountered
545 * If non-NULL, will be updated to reflect whether DMI busy was
546 * encountered while executing this operation or not.
547 * @param dmi_op The operation to perform (read/write/nop).
548 * @param address The address argument to that operation.
549 * @param data_out The data to send to the target.
550 * @param timeout_sec
551 * @param exec When true, this scan will execute something, so extra RTI
552 * cycles may be added.
553 * @param ensure_success
554 * Scan a nop after the requested operation, ensuring the
555 * DMI operation succeeded.
556 */
557 static int dmi_op_timeout(struct target *target, uint32_t *data_in,
558 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
559 uint32_t data_out, int timeout_sec, bool exec, bool ensure_success)
560 {
561 select_dmi(target);
562
563 dmi_status_t status;
564 uint32_t address_in;
565
566 if (dmi_busy_encountered)
567 *dmi_busy_encountered = false;
568
569 const char *op_name;
570 switch (dmi_op) {
571 case DMI_OP_NOP:
572 op_name = "nop";
573 break;
574 case DMI_OP_READ:
575 op_name = "read";
576 break;
577 case DMI_OP_WRITE:
578 op_name = "write";
579 break;
580 default:
581 LOG_ERROR("Invalid DMI operation: %d", dmi_op);
582 return ERROR_FAIL;
583 }
584
585 keep_alive();
586
587 time_t start = time(NULL);
588 /* This first loop performs the request. Note that if for some reason this
589 * stays busy, it is actually due to the previous access. */
590 while (1) {
591 status = dmi_scan(target, NULL, NULL, dmi_op, address, data_out,
592 exec);
593 if (status == DMI_STATUS_BUSY) {
594 increase_dmi_busy_delay(target);
595 if (dmi_busy_encountered)
596 *dmi_busy_encountered = true;
597 } else if (status == DMI_STATUS_SUCCESS) {
598 break;
599 } else {
600 LOG_ERROR("failed %s at 0x%x, status=%d", op_name, address, status);
601 dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
602 return ERROR_FAIL;
603 }
604 if (time(NULL) - start > timeout_sec)
605 return ERROR_TIMEOUT_REACHED;
606 }
607
608 if (status != DMI_STATUS_SUCCESS) {
609 LOG_ERROR("Failed %s at 0x%x; status=%d", op_name, address, status);
610 return ERROR_FAIL;
611 }
612
613 if (ensure_success) {
614 /* This second loop ensures the request succeeded, and gets back data.
615 * Note that NOP can result in a 'busy' result as well, but that would be
616 * noticed on the next DMI access we do. */
617 while (1) {
618 status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
619 false);
620 if (status == DMI_STATUS_BUSY) {
621 increase_dmi_busy_delay(target);
622 if (dmi_busy_encountered)
623 *dmi_busy_encountered = true;
624 } else if (status == DMI_STATUS_SUCCESS) {
625 break;
626 } else {
627 if (data_in) {
628 LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
629 op_name, address, *data_in, status);
630 } else {
631 LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
632 status);
633 }
634 dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
635 return ERROR_FAIL;
636 }
637 if (time(NULL) - start > timeout_sec)
638 return ERROR_TIMEOUT_REACHED;
639 }
640 }
641
642 return ERROR_OK;
643 }
644
645 static int dmi_op(struct target *target, uint32_t *data_in,
646 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
647 uint32_t data_out, bool exec, bool ensure_success)
648 {
649 int result = dmi_op_timeout(target, data_in, dmi_busy_encountered, dmi_op,
650 address, data_out, riscv_command_timeout_sec, exec, ensure_success);
651 if (result == ERROR_TIMEOUT_REACHED) {
652 LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
653 "either really slow or broken. You could increase the "
654 "timeout with riscv set_command_timeout_sec.",
655 riscv_command_timeout_sec);
656 return ERROR_FAIL;
657 }
658 return result;
659 }
660
661 static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
662 {
663 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, false, true);
664 }
665
666 static int dmi_read_exec(struct target *target, uint32_t *value, uint32_t address)
667 {
668 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, true, true);
669 }
670
671 static int dmi_write(struct target *target, uint32_t address, uint32_t value)
672 {
673 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false, true);
674 }
675
676 static int dmi_write_exec(struct target *target, uint32_t address,
677 uint32_t value, bool ensure_success)
678 {
679 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true, ensure_success);
680 }
681
682 static int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
683 bool authenticated, unsigned timeout_sec)
684 {
685 int result = dmi_op_timeout(target, dmstatus, NULL, DMI_OP_READ,
686 DM_DMSTATUS, 0, timeout_sec, false, true);
687 if (result != ERROR_OK)
688 return result;
689 int dmstatus_version = get_field(*dmstatus, DM_DMSTATUS_VERSION);
690 if (dmstatus_version != 2 && dmstatus_version != 3) {
691 LOG_ERROR("OpenOCD only supports Debug Module version 2 (0.13) and 3 (1.0), not "
692 "%d (dmstatus=0x%x). This error might be caused by a JTAG "
693 "signal issue. Try reducing the JTAG clock speed.",
694 get_field(*dmstatus, DM_DMSTATUS_VERSION), *dmstatus);
695 } else if (authenticated && !get_field(*dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
696 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
697 "(dmstatus=0x%x). Use `riscv authdata_read` and "
698 "`riscv authdata_write` commands to authenticate.", *dmstatus);
699 return ERROR_FAIL;
700 }
701 return ERROR_OK;
702 }
703
704 static int dmstatus_read(struct target *target, uint32_t *dmstatus,
705 bool authenticated)
706 {
707 return dmstatus_read_timeout(target, dmstatus, authenticated,
708 riscv_command_timeout_sec);
709 }
710
711 static void increase_ac_busy_delay(struct target *target)
712 {
713 riscv013_info_t *info = get_info(target);
714 info->ac_busy_delay += info->ac_busy_delay / 10 + 1;
715 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
716 info->dtmcs_idle, info->dmi_busy_delay,
717 info->ac_busy_delay);
718 }
719
720 static uint32_t __attribute__((unused)) abstract_register_size(unsigned width)
721 {
722 switch (width) {
723 case 32:
724 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 2);
725 case 64:
726 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 3);
727 case 128:
728 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 4);
729 default:
730 LOG_ERROR("Unsupported register width: %d", width);
731 return 0;
732 }
733 }
734
735 static int wait_for_idle(struct target *target, uint32_t *abstractcs)
736 {
737 RISCV013_INFO(info);
738 time_t start = time(NULL);
739 while (1) {
740 if (dmi_read(target, abstractcs, DM_ABSTRACTCS) != ERROR_OK)
741 return ERROR_FAIL;
742
743 if (get_field(*abstractcs, DM_ABSTRACTCS_BUSY) == 0)
744 return ERROR_OK;
745
746 if (time(NULL) - start > riscv_command_timeout_sec) {
747 info->cmderr = get_field(*abstractcs, DM_ABSTRACTCS_CMDERR);
748 if (info->cmderr != CMDERR_NONE) {
749 const char *errors[8] = {
750 "none",
751 "busy",
752 "not supported",
753 "exception",
754 "halt/resume",
755 "reserved",
756 "reserved",
757 "other" };
758
759 LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
760 errors[info->cmderr], *abstractcs);
761 }
762
763 LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
764 "Increase the timeout with riscv set_command_timeout_sec.",
765 riscv_command_timeout_sec,
766 *abstractcs);
767 return ERROR_FAIL;
768 }
769 }
770 }
771
772 static int execute_abstract_command(struct target *target, uint32_t command)
773 {
774 RISCV013_INFO(info);
775 if (debug_level >= LOG_LVL_DEBUG) {
776 switch (get_field(command, DM_COMMAND_CMDTYPE)) {
777 case 0:
778 LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
779 "transfer=%d, write=%d, regno=0x%x",
780 command,
781 8 << get_field(command, AC_ACCESS_REGISTER_AARSIZE),
782 get_field(command, AC_ACCESS_REGISTER_POSTEXEC),
783 get_field(command, AC_ACCESS_REGISTER_TRANSFER),
784 get_field(command, AC_ACCESS_REGISTER_WRITE),
785 get_field(command, AC_ACCESS_REGISTER_REGNO));
786 break;
787 default:
788 LOG_DEBUG("command=0x%x", command);
789 break;
790 }
791 }
792
793 if (dmi_write_exec(target, DM_COMMAND, command, false) != ERROR_OK)
794 return ERROR_FAIL;
795
796 uint32_t abstractcs = 0;
797 int result = wait_for_idle(target, &abstractcs);
798
799 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
800 if (info->cmderr != 0 || result != ERROR_OK) {
801 LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
802 /* Clear the error. */
803 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
804 return ERROR_FAIL;
805 }
806
807 return ERROR_OK;
808 }
809
810 static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
811 unsigned size_bits)
812 {
813 riscv_reg_t value = 0;
814 uint32_t v;
815 unsigned offset = index * size_bits / 32;
816 switch (size_bits) {
817 default:
818 LOG_ERROR("Unsupported size: %d bits", size_bits);
819 return ~0;
820 case 64:
821 dmi_read(target, &v, DM_DATA0 + offset + 1);
822 value |= ((uint64_t) v) << 32;
823 /* falls through */
824 case 32:
825 dmi_read(target, &v, DM_DATA0 + offset);
826 value |= v;
827 }
828 return value;
829 }
830
831 static int write_abstract_arg(struct target *target, unsigned index,
832 riscv_reg_t value, unsigned size_bits)
833 {
834 unsigned offset = index * size_bits / 32;
835 switch (size_bits) {
836 default:
837 LOG_ERROR("Unsupported size: %d bits", size_bits);
838 return ERROR_FAIL;
839 case 64:
840 dmi_write(target, DM_DATA0 + offset + 1, value >> 32);
841 /* falls through */
842 case 32:
843 dmi_write(target, DM_DATA0 + offset, value);
844 }
845 return ERROR_OK;
846 }
847
848 /**
849 * @par size in bits
850 */
851 static uint32_t access_register_command(struct target *target, uint32_t number,
852 unsigned size, uint32_t flags)
853 {
854 uint32_t command = set_field(0, DM_COMMAND_CMDTYPE, 0);
855 switch (size) {
856 case 32:
857 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 2);
858 break;
859 case 64:
860 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 3);
861 break;
862 default:
863 LOG_ERROR("%d-bit register %s not supported.", size,
864 gdb_regno_name(number));
865 assert(0);
866 }
867
868 if (number <= GDB_REGNO_XPR31) {
869 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
870 0x1000 + number - GDB_REGNO_ZERO);
871 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
872 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
873 0x1020 + number - GDB_REGNO_FPR0);
874 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
875 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
876 number - GDB_REGNO_CSR0);
877 } else if (number >= GDB_REGNO_COUNT) {
878 /* Custom register. */
879 assert(target->reg_cache->reg_list[number].arch_info);
880 riscv_reg_info_t *reg_info = target->reg_cache->reg_list[number].arch_info;
881 assert(reg_info);
882 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
883 0xc000 + reg_info->custom_number);
884 } else {
885 assert(0);
886 }
887
888 command |= flags;
889
890 return command;
891 }
892
893 static int register_read_abstract(struct target *target, uint64_t *value,
894 uint32_t number, unsigned size)
895 {
896 RISCV013_INFO(info);
897
898 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
899 !info->abstract_read_fpr_supported)
900 return ERROR_FAIL;
901 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
902 !info->abstract_read_csr_supported)
903 return ERROR_FAIL;
904 /* The spec doesn't define abstract register numbers for vector registers. */
905 if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31)
906 return ERROR_FAIL;
907
908 uint32_t command = access_register_command(target, number, size,
909 AC_ACCESS_REGISTER_TRANSFER);
910
911 int result = execute_abstract_command(target, command);
912 if (result != ERROR_OK) {
913 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
914 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
915 info->abstract_read_fpr_supported = false;
916 LOG_INFO("Disabling abstract command reads from FPRs.");
917 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
918 info->abstract_read_csr_supported = false;
919 LOG_INFO("Disabling abstract command reads from CSRs.");
920 }
921 }
922 return result;
923 }
924
925 if (value)
926 *value = read_abstract_arg(target, 0, size);
927
928 return ERROR_OK;
929 }
930
931 static int register_write_abstract(struct target *target, uint32_t number,
932 uint64_t value, unsigned size)
933 {
934 RISCV013_INFO(info);
935
936 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
937 !info->abstract_write_fpr_supported)
938 return ERROR_FAIL;
939 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
940 !info->abstract_write_csr_supported)
941 return ERROR_FAIL;
942
943 uint32_t command = access_register_command(target, number, size,
944 AC_ACCESS_REGISTER_TRANSFER |
945 AC_ACCESS_REGISTER_WRITE);
946
947 if (write_abstract_arg(target, 0, value, size) != ERROR_OK)
948 return ERROR_FAIL;
949
950 int result = execute_abstract_command(target, command);
951 if (result != ERROR_OK) {
952 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
953 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
954 info->abstract_write_fpr_supported = false;
955 LOG_INFO("Disabling abstract command writes to FPRs.");
956 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
957 info->abstract_write_csr_supported = false;
958 LOG_INFO("Disabling abstract command writes to CSRs.");
959 }
960 }
961 return result;
962 }
963
964 return ERROR_OK;
965 }
966
967 /*
968 * Sets the AAMSIZE field of a memory access abstract command based on
969 * the width (bits).
970 */
971 static uint32_t abstract_memory_size(unsigned width)
972 {
973 switch (width) {
974 case 8:
975 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 0);
976 case 16:
977 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 1);
978 case 32:
979 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 2);
980 case 64:
981 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 3);
982 case 128:
983 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 4);
984 default:
985 LOG_ERROR("Unsupported memory width: %d", width);
986 return 0;
987 }
988 }
989
990 /*
991 * Creates a memory access abstract command.
992 */
993 static uint32_t access_memory_command(struct target *target, bool virtual,
994 unsigned width, bool postincrement, bool write)
995 {
996 uint32_t command = set_field(0, AC_ACCESS_MEMORY_CMDTYPE, 2);
997 command = set_field(command, AC_ACCESS_MEMORY_AAMVIRTUAL, virtual);
998 command |= abstract_memory_size(width);
999 command = set_field(command, AC_ACCESS_MEMORY_AAMPOSTINCREMENT,
1000 postincrement);
1001 command = set_field(command, AC_ACCESS_MEMORY_WRITE, write);
1002
1003 return command;
1004 }
1005
1006 static int examine_progbuf(struct target *target)
1007 {
1008 riscv013_info_t *info = get_info(target);
1009
1010 if (info->progbuf_writable != YNM_MAYBE)
1011 return ERROR_OK;
1012
1013 /* Figure out if progbuf is writable. */
1014
1015 if (info->progbufsize < 1) {
1016 info->progbuf_writable = YNM_NO;
1017 LOG_INFO("No program buffer present.");
1018 return ERROR_OK;
1019 }
1020
1021 uint64_t s0;
1022 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1023 return ERROR_FAIL;
1024
1025 struct riscv_program program;
1026 riscv_program_init(&program, target);
1027 riscv_program_insert(&program, auipc(S0));
1028 if (riscv_program_exec(&program, target) != ERROR_OK)
1029 return ERROR_FAIL;
1030
1031 if (register_read_direct(target, &info->progbuf_address, GDB_REGNO_S0) != ERROR_OK)
1032 return ERROR_FAIL;
1033
1034 riscv_program_init(&program, target);
1035 riscv_program_insert(&program, sw(S0, S0, 0));
1036 int result = riscv_program_exec(&program, target);
1037
1038 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1039 return ERROR_FAIL;
1040
1041 if (result != ERROR_OK) {
1042 /* This program might have failed if the program buffer is not
1043 * writable. */
1044 info->progbuf_writable = YNM_NO;
1045 return ERROR_OK;
1046 }
1047
1048 uint32_t written;
1049 if (dmi_read(target, &written, DM_PROGBUF0) != ERROR_OK)
1050 return ERROR_FAIL;
1051 if (written == (uint32_t) info->progbuf_address) {
1052 LOG_INFO("progbuf is writable at 0x%" PRIx64,
1053 info->progbuf_address);
1054 info->progbuf_writable = YNM_YES;
1055
1056 } else {
1057 LOG_INFO("progbuf is not writeable at 0x%" PRIx64,
1058 info->progbuf_address);
1059 info->progbuf_writable = YNM_NO;
1060 }
1061
1062 return ERROR_OK;
1063 }
1064
1065 static int is_fpu_reg(uint32_t gdb_regno)
1066 {
1067 return (gdb_regno >= GDB_REGNO_FPR0 && gdb_regno <= GDB_REGNO_FPR31) ||
1068 (gdb_regno == GDB_REGNO_CSR0 + CSR_FFLAGS) ||
1069 (gdb_regno == GDB_REGNO_CSR0 + CSR_FRM) ||
1070 (gdb_regno == GDB_REGNO_CSR0 + CSR_FCSR);
1071 }
1072
1073 static int is_vector_reg(uint32_t gdb_regno)
1074 {
1075 return (gdb_regno >= GDB_REGNO_V0 && gdb_regno <= GDB_REGNO_V31) ||
1076 gdb_regno == GDB_REGNO_VSTART ||
1077 gdb_regno == GDB_REGNO_VXSAT ||
1078 gdb_regno == GDB_REGNO_VXRM ||
1079 gdb_regno == GDB_REGNO_VL ||
1080 gdb_regno == GDB_REGNO_VTYPE ||
1081 gdb_regno == GDB_REGNO_VLENB;
1082 }
1083
1084 static int prep_for_register_access(struct target *target, uint64_t *mstatus,
1085 int regno)
1086 {
1087 if (is_fpu_reg(regno) || is_vector_reg(regno)) {
1088 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
1089 return ERROR_FAIL;
1090 if (is_fpu_reg(regno) && (*mstatus & MSTATUS_FS) == 0) {
1091 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1092 set_field(*mstatus, MSTATUS_FS, 1)) != ERROR_OK)
1093 return ERROR_FAIL;
1094 } else if (is_vector_reg(regno) && (*mstatus & MSTATUS_VS) == 0) {
1095 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1096 set_field(*mstatus, MSTATUS_VS, 1)) != ERROR_OK)
1097 return ERROR_FAIL;
1098 }
1099 } else {
1100 *mstatus = 0;
1101 }
1102 return ERROR_OK;
1103 }
1104
1105 static int cleanup_after_register_access(struct target *target,
1106 uint64_t mstatus, int regno)
1107 {
1108 if ((is_fpu_reg(regno) && (mstatus & MSTATUS_FS) == 0) ||
1109 (is_vector_reg(regno) && (mstatus & MSTATUS_VS) == 0))
1110 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
1111 return ERROR_FAIL;
1112 return ERROR_OK;
1113 }
1114
1115 typedef enum {
1116 SPACE_DM_DATA,
1117 SPACE_DMI_PROGBUF,
1118 SPACE_DMI_RAM
1119 } memory_space_t;
1120
1121 typedef struct {
1122 /* How can the debugger access this memory? */
1123 memory_space_t memory_space;
1124 /* Memory address to access the scratch memory from the hart. */
1125 riscv_addr_t hart_address;
1126 /* Memory address to access the scratch memory from the debugger. */
1127 riscv_addr_t debug_address;
1128 struct working_area *area;
1129 } scratch_mem_t;
1130
1131 /**
1132 * Find some scratch memory to be used with the given program.
1133 */
1134 static int scratch_reserve(struct target *target,
1135 scratch_mem_t *scratch,
1136 struct riscv_program *program,
1137 unsigned size_bytes)
1138 {
1139 riscv_addr_t alignment = 1;
1140 while (alignment < size_bytes)
1141 alignment *= 2;
1142
1143 scratch->area = NULL;
1144
1145 riscv013_info_t *info = get_info(target);
1146
1147 /* Option 1: See if data# registers can be used as the scratch memory */
1148 if (info->dataaccess == 1) {
1149 /* Sign extend dataaddr. */
1150 scratch->hart_address = info->dataaddr;
1151 if (info->dataaddr & (1<<11))
1152 scratch->hart_address |= 0xfffffffffffff000ULL;
1153 /* Align. */
1154 scratch->hart_address = (scratch->hart_address + alignment - 1) & ~(alignment - 1);
1155
1156 if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
1157 info->datasize) {
1158 scratch->memory_space = SPACE_DM_DATA;
1159 scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
1160 return ERROR_OK;
1161 }
1162 }
1163
1164 /* Option 2: See if progbuf can be used as the scratch memory */
1165 if (examine_progbuf(target) != ERROR_OK)
1166 return ERROR_FAIL;
1167
1168 /* Allow for ebreak at the end of the program. */
1169 unsigned program_size = (program->instruction_count + 1) * 4;
1170 scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
1171 ~(alignment - 1);
1172 if ((info->progbuf_writable == YNM_YES) &&
1173 ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
1174 info->progbufsize)) {
1175 scratch->memory_space = SPACE_DMI_PROGBUF;
1176 scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
1177 return ERROR_OK;
1178 }
1179
1180 /* Option 3: User-configured memory area as scratch RAM */
1181 if (target_alloc_working_area(target, size_bytes + alignment - 1,
1182 &scratch->area) == ERROR_OK) {
1183 scratch->hart_address = (scratch->area->address + alignment - 1) &
1184 ~(alignment - 1);
1185 scratch->memory_space = SPACE_DMI_RAM;
1186 scratch->debug_address = scratch->hart_address;
1187 return ERROR_OK;
1188 }
1189
1190 LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
1191 "a work area with 'configure -work-area-phys'.", size_bytes);
1192 return ERROR_FAIL;
1193 }
1194
1195 static int scratch_release(struct target *target,
1196 scratch_mem_t *scratch)
1197 {
1198 return target_free_working_area(target, scratch->area);
1199 }
1200
1201 static int scratch_read64(struct target *target, scratch_mem_t *scratch,
1202 uint64_t *value)
1203 {
1204 uint32_t v;
1205 switch (scratch->memory_space) {
1206 case SPACE_DM_DATA:
1207 if (dmi_read(target, &v, DM_DATA0 + scratch->debug_address) != ERROR_OK)
1208 return ERROR_FAIL;
1209 *value = v;
1210 if (dmi_read(target, &v, DM_DATA1 + scratch->debug_address) != ERROR_OK)
1211 return ERROR_FAIL;
1212 *value |= ((uint64_t) v) << 32;
1213 break;
1214 case SPACE_DMI_PROGBUF:
1215 if (dmi_read(target, &v, DM_PROGBUF0 + scratch->debug_address) != ERROR_OK)
1216 return ERROR_FAIL;
1217 *value = v;
1218 if (dmi_read(target, &v, DM_PROGBUF1 + scratch->debug_address) != ERROR_OK)
1219 return ERROR_FAIL;
1220 *value |= ((uint64_t) v) << 32;
1221 break;
1222 case SPACE_DMI_RAM:
1223 {
1224 uint8_t buffer[8] = {0};
1225 if (read_memory(target, scratch->debug_address, 4, 2, buffer, 4) != ERROR_OK)
1226 return ERROR_FAIL;
1227 *value = buffer[0] |
1228 (((uint64_t) buffer[1]) << 8) |
1229 (((uint64_t) buffer[2]) << 16) |
1230 (((uint64_t) buffer[3]) << 24) |
1231 (((uint64_t) buffer[4]) << 32) |
1232 (((uint64_t) buffer[5]) << 40) |
1233 (((uint64_t) buffer[6]) << 48) |
1234 (((uint64_t) buffer[7]) << 56);
1235 }
1236 break;
1237 }
1238 return ERROR_OK;
1239 }
1240
1241 static int scratch_write64(struct target *target, scratch_mem_t *scratch,
1242 uint64_t value)
1243 {
1244 switch (scratch->memory_space) {
1245 case SPACE_DM_DATA:
1246 dmi_write(target, DM_DATA0 + scratch->debug_address, value);
1247 dmi_write(target, DM_DATA1 + scratch->debug_address, value >> 32);
1248 break;
1249 case SPACE_DMI_PROGBUF:
1250 dmi_write(target, DM_PROGBUF0 + scratch->debug_address, value);
1251 dmi_write(target, DM_PROGBUF1 + scratch->debug_address, value >> 32);
1252 break;
1253 case SPACE_DMI_RAM:
1254 {
1255 uint8_t buffer[8] = {
1256 value,
1257 value >> 8,
1258 value >> 16,
1259 value >> 24,
1260 value >> 32,
1261 value >> 40,
1262 value >> 48,
1263 value >> 56
1264 };
1265 if (write_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
1266 return ERROR_FAIL;
1267 }
1268 break;
1269 }
1270 return ERROR_OK;
1271 }
1272
1273 /** Return register size in bits. */
1274 static unsigned register_size(struct target *target, unsigned number)
1275 {
1276 /* If reg_cache hasn't been initialized yet, make a guess. We need this for
1277 * when this function is called during examine(). */
1278 if (target->reg_cache)
1279 return target->reg_cache->reg_list[number].size;
1280 else
1281 return riscv_xlen(target);
1282 }
1283
1284 static bool has_sufficient_progbuf(struct target *target, unsigned size)
1285 {
1286 RISCV013_INFO(info);
1287 RISCV_INFO(r);
1288
1289 return info->progbufsize + r->impebreak >= size;
1290 }
1291
1292 /**
1293 * Immediately write the new value to the requested register. This mechanism
1294 * bypasses any caches.
1295 */
1296 static int register_write_direct(struct target *target, unsigned number,
1297 uint64_t value)
1298 {
1299 LOG_DEBUG("{%d} %s <- 0x%" PRIx64, riscv_current_hartid(target),
1300 gdb_regno_name(number), value);
1301
1302 int result = register_write_abstract(target, number, value,
1303 register_size(target, number));
1304 if (result == ERROR_OK || !has_sufficient_progbuf(target, 2) ||
1305 !riscv_is_halted(target))
1306 return result;
1307
1308 struct riscv_program program;
1309 riscv_program_init(&program, target);
1310
1311 uint64_t s0;
1312 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1313 return ERROR_FAIL;
1314
1315 uint64_t mstatus;
1316 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1317 return ERROR_FAIL;
1318
1319 scratch_mem_t scratch;
1320 bool use_scratch = false;
1321 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
1322 riscv_supports_extension(target, 'D') &&
1323 riscv_xlen(target) < 64) {
1324 /* There are no instructions to move all the bits from a register, so
1325 * we need to use some scratch RAM. */
1326 use_scratch = true;
1327 riscv_program_insert(&program, fld(number - GDB_REGNO_FPR0, S0, 0));
1328
1329 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1330 return ERROR_FAIL;
1331
1332 if (register_write_direct(target, GDB_REGNO_S0, scratch.hart_address)
1333 != ERROR_OK) {
1334 scratch_release(target, &scratch);
1335 return ERROR_FAIL;
1336 }
1337
1338 if (scratch_write64(target, &scratch, value) != ERROR_OK) {
1339 scratch_release(target, &scratch);
1340 return ERROR_FAIL;
1341 }
1342
1343 } else if (number == GDB_REGNO_VTYPE) {
1344 riscv_program_insert(&program, csrr(S0, CSR_VL));
1345 riscv_program_insert(&program, vsetvli(ZERO, S0, value));
1346
1347 } else {
1348 if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
1349 return ERROR_FAIL;
1350
1351 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1352 if (riscv_supports_extension(target, 'D'))
1353 riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
1354 else
1355 riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
1356 } else if (number == GDB_REGNO_VL) {
1357 /* "The XLEN-bit-wide read-only vl CSR can only be updated by the
1358 * vsetvli and vsetvl instructions, and the fault-only-rst vector
1359 * load instruction variants." */
1360 riscv_reg_t vtype;
1361 if (register_read(target, &vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1362 return ERROR_FAIL;
1363 if (riscv_program_insert(&program, vsetvli(ZERO, S0, vtype)) != ERROR_OK)
1364 return ERROR_FAIL;
1365 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1366 riscv_program_csrw(&program, S0, number);
1367 } else {
1368 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
1369 return ERROR_FAIL;
1370 }
1371 }
1372
1373 int exec_out = riscv_program_exec(&program, target);
1374 /* Don't message on error. Probably the register doesn't exist. */
1375 if (exec_out == ERROR_OK && target->reg_cache) {
1376 struct reg *reg = &target->reg_cache->reg_list[number];
1377 buf_set_u64(reg->value, 0, reg->size, value);
1378 }
1379
1380 if (use_scratch)
1381 scratch_release(target, &scratch);
1382
1383 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1384 return ERROR_FAIL;
1385
1386 /* Restore S0. */
1387 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1388 return ERROR_FAIL;
1389
1390 return exec_out;
1391 }
1392
1393 /** Read register value from the target. Also update the cached value. */
1394 static int register_read(struct target *target, uint64_t *value, uint32_t number)
1395 {
1396 if (number == GDB_REGNO_ZERO) {
1397 *value = 0;
1398 return ERROR_OK;
1399 }
1400 int result = register_read_direct(target, value, number);
1401 if (result != ERROR_OK)
1402 return ERROR_FAIL;
1403 if (target->reg_cache) {
1404 struct reg *reg = &target->reg_cache->reg_list[number];
1405 buf_set_u64(reg->value, 0, reg->size, *value);
1406 }
1407 return ERROR_OK;
1408 }
1409
1410 /** Actually read registers from the target right now. */
1411 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
1412 {
1413 int result = register_read_abstract(target, value, number,
1414 register_size(target, number));
1415
1416 if (result != ERROR_OK &&
1417 has_sufficient_progbuf(target, 2) &&
1418 number > GDB_REGNO_XPR31) {
1419 struct riscv_program program;
1420 riscv_program_init(&program, target);
1421
1422 scratch_mem_t scratch;
1423 bool use_scratch = false;
1424
1425 riscv_reg_t s0;
1426 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1427 return ERROR_FAIL;
1428
1429 /* Write program to move data into s0. */
1430
1431 uint64_t mstatus;
1432 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1433 return ERROR_FAIL;
1434
1435 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1436 if (riscv_supports_extension(target, 'D')
1437 && riscv_xlen(target) < 64) {
1438 /* There are no instructions to move all the bits from a
1439 * register, so we need to use some scratch RAM. */
1440 riscv_program_insert(&program, fsd(number - GDB_REGNO_FPR0, S0,
1441 0));
1442
1443 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1444 return ERROR_FAIL;
1445 use_scratch = true;
1446
1447 if (register_write_direct(target, GDB_REGNO_S0,
1448 scratch.hart_address) != ERROR_OK) {
1449 scratch_release(target, &scratch);
1450 return ERROR_FAIL;
1451 }
1452 } else if (riscv_supports_extension(target, 'D')) {
1453 riscv_program_insert(&program, fmv_x_d(S0, number - GDB_REGNO_FPR0));
1454 } else {
1455 riscv_program_insert(&program, fmv_x_w(S0, number - GDB_REGNO_FPR0));
1456 }
1457 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1458 riscv_program_csrr(&program, S0, number);
1459 } else {
1460 LOG_ERROR("Unsupported register: %s", gdb_regno_name(number));
1461 return ERROR_FAIL;
1462 }
1463
1464 /* Execute program. */
1465 result = riscv_program_exec(&program, target);
1466 /* Don't message on error. Probably the register doesn't exist. */
1467
1468 if (use_scratch) {
1469 result = scratch_read64(target, &scratch, value);
1470 scratch_release(target, &scratch);
1471 if (result != ERROR_OK)
1472 return result;
1473 } else {
1474 /* Read S0 */
1475 if (register_read_direct(target, value, GDB_REGNO_S0) != ERROR_OK)
1476 return ERROR_FAIL;
1477 }
1478
1479 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1480 return ERROR_FAIL;
1481
1482 /* Restore S0. */
1483 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1484 return ERROR_FAIL;
1485 }
1486
1487 if (result == ERROR_OK) {
1488 LOG_DEBUG("{%d} %s = 0x%" PRIx64, riscv_current_hartid(target),
1489 gdb_regno_name(number), *value);
1490 }
1491
1492 return result;
1493 }
1494
1495 static int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
1496 {
1497 time_t start = time(NULL);
1498 while (1) {
1499 uint32_t value;
1500 if (dmstatus_read(target, &value, false) != ERROR_OK)
1501 return ERROR_FAIL;
1502 if (dmstatus)
1503 *dmstatus = value;
1504 if (!get_field(value, DM_DMSTATUS_AUTHBUSY))
1505 break;
1506 if (time(NULL) - start > riscv_command_timeout_sec) {
1507 LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
1508 "Increase the timeout with riscv set_command_timeout_sec.",
1509 riscv_command_timeout_sec,
1510 value);
1511 return ERROR_FAIL;
1512 }
1513 }
1514
1515 return ERROR_OK;
1516 }
1517
1518 /*** OpenOCD target functions. ***/
1519
1520 static void deinit_target(struct target *target)
1521 {
1522 LOG_DEBUG("riscv_deinit_target()");
1523 struct riscv_info *info = target->arch_info;
1524 if (!info)
1525 return;
1526
1527 free(info->version_specific);
1528 /* TODO: free register arch_info */
1529 info->version_specific = NULL;
1530 }
1531
1532 static int set_haltgroup(struct target *target, bool *supported)
1533 {
1534 uint32_t write = set_field(DM_DMCS2_HGWRITE, DM_DMCS2_GROUP, target->smp);
1535 if (dmi_write(target, DM_DMCS2, write) != ERROR_OK)
1536 return ERROR_FAIL;
1537 uint32_t read;
1538 if (dmi_read(target, &read, DM_DMCS2) != ERROR_OK)
1539 return ERROR_FAIL;
1540 *supported = get_field(read, DM_DMCS2_GROUP) == (unsigned)target->smp;
1541 return ERROR_OK;
1542 }
1543
1544 static int discover_vlenb(struct target *target)
1545 {
1546 RISCV_INFO(r);
1547 riscv_reg_t vlenb;
1548
1549 if (register_read(target, &vlenb, GDB_REGNO_VLENB) != ERROR_OK) {
1550 LOG_WARNING("Couldn't read vlenb for %s; vector register access won't work.",
1551 target_name(target));
1552 r->vlenb = 0;
1553 return ERROR_OK;
1554 }
1555 r->vlenb = vlenb;
1556
1557 LOG_INFO("Vector support with vlenb=%d", r->vlenb);
1558
1559 return ERROR_OK;
1560 }
1561
1562 static int examine(struct target *target)
1563 {
1564 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
1565
1566 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
1567 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
1568 LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol, DTM_DTMCS_DMIRESET));
1569 LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTM_DTMCS_IDLE));
1570 LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol, DTM_DTMCS_DMISTAT));
1571 LOG_DEBUG(" abits=%d", get_field(dtmcontrol, DTM_DTMCS_ABITS));
1572 LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTM_DTMCS_VERSION));
1573 if (dtmcontrol == 0) {
1574 LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
1575 return ERROR_FAIL;
1576 }
1577 if (get_field(dtmcontrol, DTM_DTMCS_VERSION) != 1) {
1578 LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
1579 get_field(dtmcontrol, DTM_DTMCS_VERSION), dtmcontrol);
1580 return ERROR_FAIL;
1581 }
1582
1583 riscv013_info_t *info = get_info(target);
1584 /* TODO: This won't be true if there are multiple DMs. */
1585 info->index = target->coreid;
1586 info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
1587 info->dtmcs_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
1588
1589 /* Reset the Debug Module. */
1590 dm013_info_t *dm = get_dm(target);
1591 if (!dm)
1592 return ERROR_FAIL;
1593 if (!dm->was_reset) {
1594 dmi_write(target, DM_DMCONTROL, 0);
1595 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
1596 dm->was_reset = true;
1597 }
1598
1599 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_HARTSELLO |
1600 DM_DMCONTROL_HARTSELHI | DM_DMCONTROL_DMACTIVE |
1601 DM_DMCONTROL_HASEL);
1602 uint32_t dmcontrol;
1603 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
1604 return ERROR_FAIL;
1605
1606 if (!get_field(dmcontrol, DM_DMCONTROL_DMACTIVE)) {
1607 LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
1608 dmcontrol);
1609 return ERROR_FAIL;
1610 }
1611
1612 dm->hasel_supported = get_field(dmcontrol, DM_DMCONTROL_HASEL);
1613
1614 uint32_t dmstatus;
1615 if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
1616 return ERROR_FAIL;
1617 LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
1618 int dmstatus_version = get_field(dmstatus, DM_DMSTATUS_VERSION);
1619 if (dmstatus_version != 2 && dmstatus_version != 3) {
1620 /* Error was already printed out in dmstatus_read(). */
1621 return ERROR_FAIL;
1622 }
1623
1624 uint32_t hartsel =
1625 (get_field(dmcontrol, DM_DMCONTROL_HARTSELHI) <<
1626 DM_DMCONTROL_HARTSELLO_LENGTH) |
1627 get_field(dmcontrol, DM_DMCONTROL_HARTSELLO);
1628 info->hartsellen = 0;
1629 while (hartsel & 1) {
1630 info->hartsellen++;
1631 hartsel >>= 1;
1632 }
1633 LOG_DEBUG("hartsellen=%d", info->hartsellen);
1634
1635 uint32_t hartinfo;
1636 if (dmi_read(target, &hartinfo, DM_HARTINFO) != ERROR_OK)
1637 return ERROR_FAIL;
1638
1639 info->datasize = get_field(hartinfo, DM_HARTINFO_DATASIZE);
1640 info->dataaccess = get_field(hartinfo, DM_HARTINFO_DATAACCESS);
1641 info->dataaddr = get_field(hartinfo, DM_HARTINFO_DATAADDR);
1642
1643 if (!get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
1644 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
1645 "(dmstatus=0x%x). Use `riscv authdata_read` and "
1646 "`riscv authdata_write` commands to authenticate.", dmstatus);
1647 /* If we return ERROR_FAIL here, then in a multicore setup the next
1648 * core won't be examined, which means we won't set up the
1649 * authentication commands for them, which means the config script
1650 * needs to be a lot more complex. */
1651 return ERROR_OK;
1652 }
1653
1654 if (dmi_read(target, &info->sbcs, DM_SBCS) != ERROR_OK)
1655 return ERROR_FAIL;
1656
1657 /* Check that abstract data registers are accessible. */
1658 uint32_t abstractcs;
1659 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
1660 return ERROR_FAIL;
1661 info->datacount = get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT);
1662 info->progbufsize = get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE);
1663
1664 LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
1665
1666 RISCV_INFO(r);
1667 r->impebreak = get_field(dmstatus, DM_DMSTATUS_IMPEBREAK);
1668
1669 if (!has_sufficient_progbuf(target, 2)) {
1670 LOG_WARNING("We won't be able to execute fence instructions on this "
1671 "target. Memory may not always appear consistent. "
1672 "(progbufsize=%d, impebreak=%d)", info->progbufsize,
1673 r->impebreak);
1674 }
1675
1676 if (info->progbufsize < 4 && riscv_enable_virtual) {
1677 LOG_ERROR("set_enable_virtual is not available on this target. It "
1678 "requires a program buffer size of at least 4. (progbufsize=%d) "
1679 "Use `riscv set_enable_virtual off` to continue."
1680 , info->progbufsize);
1681 }
1682
1683 /* Before doing anything else we must first enumerate the harts. */
1684 if (dm->hart_count < 0) {
1685 for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
1686 r->current_hartid = i;
1687 if (riscv013_select_current_hart(target) != ERROR_OK)
1688 return ERROR_FAIL;
1689
1690 uint32_t s;
1691 if (dmstatus_read(target, &s, true) != ERROR_OK)
1692 return ERROR_FAIL;
1693 if (get_field(s, DM_DMSTATUS_ANYNONEXISTENT))
1694 break;
1695 dm->hart_count = i + 1;
1696
1697 if (get_field(s, DM_DMSTATUS_ANYHAVERESET))
1698 dmi_write(target, DM_DMCONTROL,
1699 set_hartsel(DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_ACKHAVERESET, i));
1700 }
1701
1702 LOG_DEBUG("Detected %d harts.", dm->hart_count);
1703 }
1704
1705 r->current_hartid = target->coreid;
1706
1707 if (dm->hart_count == 0) {
1708 LOG_ERROR("No harts found!");
1709 return ERROR_FAIL;
1710 }
1711
1712 /* Don't call any riscv_* functions until after we've counted the number of
1713 * cores and initialized registers. */
1714
1715 if (riscv013_select_current_hart(target) != ERROR_OK)
1716 return ERROR_FAIL;
1717
1718 bool halted = riscv_is_halted(target);
1719 if (!halted) {
1720 if (riscv013_halt_go(target) != ERROR_OK) {
1721 LOG_ERROR("Fatal: Hart %d failed to halt during examine()", r->current_hartid);
1722 return ERROR_FAIL;
1723 }
1724 }
1725
1726 /* Without knowing anything else we can at least mess with the
1727 * program buffer. */
1728 r->debug_buffer_size = info->progbufsize;
1729
1730 int result = register_read_abstract(target, NULL, GDB_REGNO_S0, 64);
1731 if (result == ERROR_OK)
1732 r->xlen = 64;
1733 else
1734 r->xlen = 32;
1735
1736 if (register_read(target, &r->misa, GDB_REGNO_MISA)) {
1737 LOG_ERROR("Fatal: Failed to read MISA from hart %d.", r->current_hartid);
1738 return ERROR_FAIL;
1739 }
1740
1741 if (riscv_supports_extension(target, 'V')) {
1742 if (discover_vlenb(target) != ERROR_OK)
1743 return ERROR_FAIL;
1744 }
1745
1746 /* Now init registers based on what we discovered. */
1747 if (riscv_init_registers(target) != ERROR_OK)
1748 return ERROR_FAIL;
1749
1750 /* Display this as early as possible to help people who are using
1751 * really slow simulators. */
1752 LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64, r->current_hartid, r->xlen,
1753 r->misa);
1754
1755 if (!halted)
1756 riscv013_step_or_resume_current_hart(target, false, false);
1757
1758 target_set_examined(target);
1759
1760 if (target->smp) {
1761 bool haltgroup_supported;
1762 if (set_haltgroup(target, &haltgroup_supported) != ERROR_OK)
1763 return ERROR_FAIL;
1764 if (haltgroup_supported)
1765 LOG_INFO("Core %d made part of halt group %d.", target->coreid,
1766 target->smp);
1767 else
1768 LOG_INFO("Core %d could not be made part of halt group %d.",
1769 target->coreid, target->smp);
1770 }
1771
1772 /* Some regression suites rely on seeing 'Examined RISC-V core' to know
1773 * when they can connect with gdb/telnet.
1774 * We will need to update those suites if we want to change that text. */
1775 LOG_INFO("Examined RISC-V core; found %d harts",
1776 riscv_count_harts(target));
1777 LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64, r->current_hartid, r->xlen,
1778 r->misa);
1779 return ERROR_OK;
1780 }
1781
1782 static int riscv013_authdata_read(struct target *target, uint32_t *value, unsigned int index)
1783 {
1784 if (index > 0) {
1785 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1786 return ERROR_FAIL;
1787 }
1788
1789 if (wait_for_authbusy(target, NULL) != ERROR_OK)
1790 return ERROR_FAIL;
1791
1792 return dmi_read(target, value, DM_AUTHDATA);
1793 }
1794
1795 static int riscv013_authdata_write(struct target *target, uint32_t value, unsigned int index)
1796 {
1797 if (index > 0) {
1798 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1799 return ERROR_FAIL;
1800 }
1801
1802 uint32_t before, after;
1803 if (wait_for_authbusy(target, &before) != ERROR_OK)
1804 return ERROR_FAIL;
1805
1806 dmi_write(target, DM_AUTHDATA, value);
1807
1808 if (wait_for_authbusy(target, &after) != ERROR_OK)
1809 return ERROR_FAIL;
1810
1811 if (!get_field(before, DM_DMSTATUS_AUTHENTICATED) &&
1812 get_field(after, DM_DMSTATUS_AUTHENTICATED)) {
1813 LOG_INFO("authdata_write resulted in successful authentication");
1814 int result = ERROR_OK;
1815 dm013_info_t *dm = get_dm(target);
1816 if (!dm)
1817 return ERROR_FAIL;
1818 target_list_t *entry;
1819 list_for_each_entry(entry, &dm->target_list, list) {
1820 if (examine(entry->target) != ERROR_OK)
1821 result = ERROR_FAIL;
1822 }
1823 return result;
1824 }
1825
1826 return ERROR_OK;
1827 }
1828
1829 static int riscv013_hart_count(struct target *target)
1830 {
1831 dm013_info_t *dm = get_dm(target);
1832 assert(dm);
1833 return dm->hart_count;
1834 }
1835
1836 /* Try to find out the widest memory access size depending on the selected memory access methods. */
1837 static unsigned riscv013_data_bits(struct target *target)
1838 {
1839 RISCV013_INFO(info);
1840 RISCV_INFO(r);
1841
1842 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
1843 int method = r->mem_access_methods[i];
1844
1845 if (method == RISCV_MEM_ACCESS_PROGBUF) {
1846 if (has_sufficient_progbuf(target, 3))
1847 return riscv_xlen(target);
1848 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
1849 if (get_field(info->sbcs, DM_SBCS_SBACCESS128))
1850 return 128;
1851 if (get_field(info->sbcs, DM_SBCS_SBACCESS64))
1852 return 64;
1853 if (get_field(info->sbcs, DM_SBCS_SBACCESS32))
1854 return 32;
1855 if (get_field(info->sbcs, DM_SBCS_SBACCESS16))
1856 return 16;
1857 if (get_field(info->sbcs, DM_SBCS_SBACCESS8))
1858 return 8;
1859 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
1860 /* TODO: Once there is a spec for discovering abstract commands, we can
1861 * take those into account as well. For now we assume abstract commands
1862 * support XLEN-wide accesses. */
1863 return riscv_xlen(target);
1864 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
1865 /* No further mem access method to try. */
1866 break;
1867 }
1868 LOG_ERROR("Unable to determine supported data bits on this target. Assuming 32 bits.");
1869 return 32;
1870 }
1871
1872 static COMMAND_HELPER(riscv013_print_info, struct target *target)
1873 {
1874 RISCV013_INFO(info);
1875
1876 /* Abstract description. */
1877 riscv_print_info_line(CMD, "target", "memory.read_while_running8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1878 riscv_print_info_line(CMD, "target", "memory.write_while_running8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1879 riscv_print_info_line(CMD, "target", "memory.read_while_running16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1880 riscv_print_info_line(CMD, "target", "memory.write_while_running16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1881 riscv_print_info_line(CMD, "target", "memory.read_while_running32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1882 riscv_print_info_line(CMD, "target", "memory.write_while_running32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1883 riscv_print_info_line(CMD, "target", "memory.read_while_running64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1884 riscv_print_info_line(CMD, "target", "memory.write_while_running64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1885 riscv_print_info_line(CMD, "target", "memory.read_while_running128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1886 riscv_print_info_line(CMD, "target", "memory.write_while_running128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1887
1888 /* Lower level description. */
1889 riscv_print_info_line(CMD, "dm", "abits", info->abits);
1890 riscv_print_info_line(CMD, "dm", "progbufsize", info->progbufsize);
1891 riscv_print_info_line(CMD, "dm", "sbversion", get_field(info->sbcs, DM_SBCS_SBVERSION));
1892 riscv_print_info_line(CMD, "dm", "sbasize", get_field(info->sbcs, DM_SBCS_SBASIZE));
1893 riscv_print_info_line(CMD, "dm", "sbaccess128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1894 riscv_print_info_line(CMD, "dm", "sbaccess64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1895 riscv_print_info_line(CMD, "dm", "sbaccess32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1896 riscv_print_info_line(CMD, "dm", "sbaccess16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1897 riscv_print_info_line(CMD, "dm", "sbaccess8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1898
1899 uint32_t dmstatus;
1900 if (dmstatus_read(target, &dmstatus, false) == ERROR_OK)
1901 riscv_print_info_line(CMD, "dm", "authenticated", get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED));
1902
1903 return 0;
1904 }
1905
1906 static int prep_for_vector_access(struct target *target, uint64_t *vtype,
1907 uint64_t *vl, unsigned *debug_vl)
1908 {
1909 RISCV_INFO(r);
1910 /* TODO: this continuous save/restore is terrible for performance. */
1911 /* Write vtype and vl. */
1912 unsigned encoded_vsew;
1913 switch (riscv_xlen(target)) {
1914 case 32:
1915 encoded_vsew = 2;
1916 break;
1917 case 64:
1918 encoded_vsew = 3;
1919 break;
1920 default:
1921 LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target));
1922 return ERROR_FAIL;
1923 }
1924
1925 /* Save vtype and vl. */
1926 if (register_read(target, vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1927 return ERROR_FAIL;
1928 if (register_read(target, vl, GDB_REGNO_VL) != ERROR_OK)
1929 return ERROR_FAIL;
1930
1931 if (register_write_direct(target, GDB_REGNO_VTYPE, encoded_vsew << 3) != ERROR_OK)
1932 return ERROR_FAIL;
1933 *debug_vl = DIV_ROUND_UP(r->vlenb * 8, riscv_xlen(target));
1934 if (register_write_direct(target, GDB_REGNO_VL, *debug_vl) != ERROR_OK)
1935 return ERROR_FAIL;
1936
1937 return ERROR_OK;
1938 }
1939
1940 static int cleanup_after_vector_access(struct target *target, uint64_t vtype,
1941 uint64_t vl)
1942 {
1943 /* Restore vtype and vl. */
1944 if (register_write_direct(target, GDB_REGNO_VTYPE, vtype) != ERROR_OK)
1945 return ERROR_FAIL;
1946 if (register_write_direct(target, GDB_REGNO_VL, vl) != ERROR_OK)
1947 return ERROR_FAIL;
1948 return ERROR_OK;
1949 }
1950
1951 static int riscv013_get_register_buf(struct target *target,
1952 uint8_t *value, int regno)
1953 {
1954 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
1955
1956 if (riscv_select_current_hart(target) != ERROR_OK)
1957 return ERROR_FAIL;
1958
1959 riscv_reg_t s0;
1960 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1961 return ERROR_FAIL;
1962
1963 uint64_t mstatus;
1964 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
1965 return ERROR_FAIL;
1966
1967 uint64_t vtype, vl;
1968 unsigned debug_vl;
1969 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
1970 return ERROR_FAIL;
1971
1972 unsigned vnum = regno - GDB_REGNO_V0;
1973 unsigned xlen = riscv_xlen(target);
1974
1975 struct riscv_program program;
1976 riscv_program_init(&program, target);
1977 riscv_program_insert(&program, vmv_x_s(S0, vnum));
1978 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
1979
1980 int result = ERROR_OK;
1981 for (unsigned i = 0; i < debug_vl; i++) {
1982 /* Executing the program might result in an exception if there is some
1983 * issue with the vector implementation/instructions we're using. If that
1984 * happens, attempt to restore as usual. We may have clobbered the
1985 * vector register we tried to read already.
1986 * For other failures, we just return error because things are probably
1987 * so messed up that attempting to restore isn't going to help. */
1988 result = riscv_program_exec(&program, target);
1989 if (result == ERROR_OK) {
1990 uint64_t v;
1991 if (register_read_direct(target, &v, GDB_REGNO_S0) != ERROR_OK)
1992 return ERROR_FAIL;
1993 buf_set_u64(value, xlen * i, xlen, v);
1994 } else {
1995 break;
1996 }
1997 }
1998
1999 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
2000 return ERROR_FAIL;
2001
2002 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2003 return ERROR_FAIL;
2004 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2005 return ERROR_FAIL;
2006
2007 return result;
2008 }
2009
2010 static int riscv013_set_register_buf(struct target *target,
2011 int regno, const uint8_t *value)
2012 {
2013 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
2014
2015 if (riscv_select_current_hart(target) != ERROR_OK)
2016 return ERROR_FAIL;
2017
2018 riscv_reg_t s0;
2019 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
2020 return ERROR_FAIL;
2021
2022 uint64_t mstatus;
2023 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
2024 return ERROR_FAIL;
2025
2026 uint64_t vtype, vl;
2027 unsigned debug_vl;
2028 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
2029 return ERROR_FAIL;
2030
2031 unsigned vnum = regno - GDB_REGNO_V0;
2032 unsigned xlen = riscv_xlen(target);
2033
2034 struct riscv_program program;
2035 riscv_program_init(&program, target);
2036 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
2037 int result = ERROR_OK;
2038 for (unsigned i = 0; i < debug_vl; i++) {
2039 if (register_write_direct(target, GDB_REGNO_S0,
2040 buf_get_u64(value, xlen * i, xlen)) != ERROR_OK)
2041 return ERROR_FAIL;
2042 result = riscv_program_exec(&program, target);
2043 if (result != ERROR_OK)
2044 break;
2045 }
2046
2047 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
2048 return ERROR_FAIL;
2049
2050 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2051 return ERROR_FAIL;
2052 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2053 return ERROR_FAIL;
2054
2055 return result;
2056 }
2057
2058 static uint32_t sb_sbaccess(unsigned int size_bytes)
2059 {
2060 switch (size_bytes) {
2061 case 1:
2062 return set_field(0, DM_SBCS_SBACCESS, 0);
2063 case 2:
2064 return set_field(0, DM_SBCS_SBACCESS, 1);
2065 case 4:
2066 return set_field(0, DM_SBCS_SBACCESS, 2);
2067 case 8:
2068 return set_field(0, DM_SBCS_SBACCESS, 3);
2069 case 16:
2070 return set_field(0, DM_SBCS_SBACCESS, 4);
2071 }
2072 assert(0);
2073 return 0;
2074 }
2075
2076 static int sb_write_address(struct target *target, target_addr_t address,
2077 bool ensure_success)
2078 {
2079 RISCV013_INFO(info);
2080 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2081 /* There currently is no support for >64-bit addresses in OpenOCD. */
2082 if (sbasize > 96)
2083 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS3, 0, false, false);
2084 if (sbasize > 64)
2085 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS2, 0, false, false);
2086 if (sbasize > 32)
2087 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS1, address >> 32, false, false);
2088 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS0, address,
2089 false, ensure_success);
2090 }
2091
2092 static int batch_run(const struct target *target, struct riscv_batch *batch)
2093 {
2094 RISCV013_INFO(info);
2095 RISCV_INFO(r);
2096 if (r->reset_delays_wait >= 0) {
2097 r->reset_delays_wait -= batch->used_scans;
2098 if (r->reset_delays_wait <= 0) {
2099 batch->idle_count = 0;
2100 info->dmi_busy_delay = 0;
2101 info->ac_busy_delay = 0;
2102 }
2103 }
2104 return riscv_batch_run(batch);
2105 }
2106
2107 static int sba_supports_access(struct target *target, unsigned int size_bytes)
2108 {
2109 RISCV013_INFO(info);
2110 switch (size_bytes) {
2111 case 1:
2112 return get_field(info->sbcs, DM_SBCS_SBACCESS8);
2113 case 2:
2114 return get_field(info->sbcs, DM_SBCS_SBACCESS16);
2115 case 4:
2116 return get_field(info->sbcs, DM_SBCS_SBACCESS32);
2117 case 8:
2118 return get_field(info->sbcs, DM_SBCS_SBACCESS64);
2119 case 16:
2120 return get_field(info->sbcs, DM_SBCS_SBACCESS128);
2121 default:
2122 return 0;
2123 }
2124 }
2125
2126 static int sample_memory_bus_v1(struct target *target,
2127 struct riscv_sample_buf *buf,
2128 const riscv_sample_config_t *config,
2129 int64_t until_ms)
2130 {
2131 RISCV013_INFO(info);
2132 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2133 if (sbasize > 64) {
2134 LOG_ERROR("Memory sampling is only implemented for sbasize <= 64.");
2135 return ERROR_NOT_IMPLEMENTED;
2136 }
2137
2138 if (get_field(info->sbcs, DM_SBCS_SBVERSION) != 1) {
2139 LOG_ERROR("Memory sampling is only implemented for SBA version 1.");
2140 return ERROR_NOT_IMPLEMENTED;
2141 }
2142
2143 uint32_t sbcs = 0;
2144 uint32_t sbcs_valid = false;
2145
2146 uint32_t sbaddress0 = 0;
2147 bool sbaddress0_valid = false;
2148 uint32_t sbaddress1 = 0;
2149 bool sbaddress1_valid = false;
2150
2151 /* How often to read each value in a batch. */
2152 const unsigned int repeat = 5;
2153
2154 unsigned int enabled_count = 0;
2155 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2156 if (config->bucket[i].enabled)
2157 enabled_count++;
2158 }
2159
2160 while (timeval_ms() < until_ms) {
2161 /*
2162 * batch_run() adds to the batch, so we can't simply reuse the same
2163 * batch over and over. So we create a new one every time through the
2164 * loop.
2165 */
2166 struct riscv_batch *batch = riscv_batch_alloc(
2167 target, 1 + enabled_count * 5 * repeat,
2168 info->dmi_busy_delay + info->bus_master_read_delay);
2169 if (!batch)
2170 return ERROR_FAIL;
2171
2172 unsigned int result_bytes = 0;
2173 for (unsigned int n = 0; n < repeat; n++) {
2174 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2175 if (config->bucket[i].enabled) {
2176 if (!sba_supports_access(target, config->bucket[i].size_bytes)) {
2177 LOG_ERROR("Hardware does not support SBA access for %d-byte memory sampling.",
2178 config->bucket[i].size_bytes);
2179 return ERROR_NOT_IMPLEMENTED;
2180 }
2181
2182 uint32_t sbcs_write = DM_SBCS_SBREADONADDR;
2183 if (enabled_count == 1)
2184 sbcs_write |= DM_SBCS_SBREADONDATA;
2185 sbcs_write |= sb_sbaccess(config->bucket[i].size_bytes);
2186 if (!sbcs_valid || sbcs_write != sbcs) {
2187 riscv_batch_add_dmi_write(batch, DM_SBCS, sbcs_write);
2188 sbcs = sbcs_write;
2189 sbcs_valid = true;
2190 }
2191
2192 if (sbasize > 32 &&
2193 (!sbaddress1_valid ||
2194 sbaddress1 != config->bucket[i].address >> 32)) {
2195 sbaddress1 = config->bucket[i].address >> 32;
2196 riscv_batch_add_dmi_write(batch, DM_SBADDRESS1, sbaddress1);
2197 sbaddress1_valid = true;
2198 }
2199 if (!sbaddress0_valid ||
2200 sbaddress0 != (config->bucket[i].address & 0xffffffff)) {
2201 sbaddress0 = config->bucket[i].address;
2202 riscv_batch_add_dmi_write(batch, DM_SBADDRESS0, sbaddress0);
2203 sbaddress0_valid = true;
2204 }
2205 if (config->bucket[i].size_bytes > 4)
2206 riscv_batch_add_dmi_read(batch, DM_SBDATA1);
2207 riscv_batch_add_dmi_read(batch, DM_SBDATA0);
2208 result_bytes += 1 + config->bucket[i].size_bytes;
2209 }
2210 }
2211 }
2212
2213 if (buf->used + result_bytes >= buf->size) {
2214 riscv_batch_free(batch);
2215 break;
2216 }
2217
2218 size_t sbcs_key = riscv_batch_add_dmi_read(batch, DM_SBCS);
2219
2220 int result = batch_run(target, batch);
2221 if (result != ERROR_OK)
2222 return result;
2223
2224 uint32_t sbcs_read = riscv_batch_get_dmi_read_data(batch, sbcs_key);
2225 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2226 /* Discard this batch (too much hassle to try to recover partial
2227 * data) and try again with a larger delay. */
2228 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2229 dmi_write(target, DM_SBCS, sbcs_read | DM_SBCS_SBBUSYERROR | DM_SBCS_SBERROR);
2230 riscv_batch_free(batch);
2231 continue;
2232 }
2233 if (get_field(sbcs_read, DM_SBCS_SBERROR)) {
2234 /* The memory we're sampling was unreadable, somehow. Give up. */
2235 dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR | DM_SBCS_SBERROR);
2236 riscv_batch_free(batch);
2237 return ERROR_FAIL;
2238 }
2239
2240 unsigned int read = 0;
2241 for (unsigned int n = 0; n < repeat; n++) {
2242 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2243 if (config->bucket[i].enabled) {
2244 assert(i < RISCV_SAMPLE_BUF_TIMESTAMP_BEFORE);
2245 uint64_t value = 0;
2246 if (config->bucket[i].size_bytes > 4)
2247 value = ((uint64_t)riscv_batch_get_dmi_read_data(batch, read++)) << 32;
2248 value |= riscv_batch_get_dmi_read_data(batch, read++);
2249
2250 buf->buf[buf->used] = i;
2251 buf_set_u64(buf->buf + buf->used + 1, 0, config->bucket[i].size_bytes * 8, value);
2252 buf->used += 1 + config->bucket[i].size_bytes;
2253 }
2254 }
2255 }
2256
2257 riscv_batch_free(batch);
2258 }
2259
2260 return ERROR_OK;
2261 }
2262
2263 static int sample_memory(struct target *target,
2264 struct riscv_sample_buf *buf,
2265 riscv_sample_config_t *config,
2266 int64_t until_ms)
2267 {
2268 if (!config->enabled)
2269 return ERROR_OK;
2270
2271 return sample_memory_bus_v1(target, buf, config, until_ms);
2272 }
2273
2274 static int init_target(struct command_context *cmd_ctx,
2275 struct target *target)
2276 {
2277 LOG_DEBUG("init");
2278 RISCV_INFO(generic_info);
2279
2280 generic_info->get_register = &riscv013_get_register;
2281 generic_info->set_register = &riscv013_set_register;
2282 generic_info->get_register_buf = &riscv013_get_register_buf;
2283 generic_info->set_register_buf = &riscv013_set_register_buf;
2284 generic_info->select_current_hart = &riscv013_select_current_hart;
2285 generic_info->is_halted = &riscv013_is_halted;
2286 generic_info->resume_go = &riscv013_resume_go;
2287 generic_info->step_current_hart = &riscv013_step_current_hart;
2288 generic_info->on_halt = &riscv013_on_halt;
2289 generic_info->resume_prep = &riscv013_resume_prep;
2290 generic_info->halt_prep = &riscv013_halt_prep;
2291 generic_info->halt_go = &riscv013_halt_go;
2292 generic_info->on_step = &riscv013_on_step;
2293 generic_info->halt_reason = &riscv013_halt_reason;
2294 generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
2295 generic_info->write_debug_buffer = &riscv013_write_debug_buffer;
2296 generic_info->execute_debug_buffer = &riscv013_execute_debug_buffer;
2297 generic_info->fill_dmi_write_u64 = &riscv013_fill_dmi_write_u64;
2298 generic_info->fill_dmi_read_u64 = &riscv013_fill_dmi_read_u64;
2299 generic_info->fill_dmi_nop_u64 = &riscv013_fill_dmi_nop_u64;
2300 generic_info->dmi_write_u64_bits = &riscv013_dmi_write_u64_bits;
2301 generic_info->authdata_read = &riscv013_authdata_read;
2302 generic_info->authdata_write = &riscv013_authdata_write;
2303 generic_info->dmi_read = &dmi_read;
2304 generic_info->dmi_write = &dmi_write;
2305 generic_info->read_memory = read_memory;
2306 generic_info->hart_count = &riscv013_hart_count;
2307 generic_info->data_bits = &riscv013_data_bits;
2308 generic_info->print_info = &riscv013_print_info;
2309 if (!generic_info->version_specific) {
2310 generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
2311 if (!generic_info->version_specific)
2312 return ERROR_FAIL;
2313 }
2314 generic_info->sample_memory = sample_memory;
2315 riscv013_info_t *info = get_info(target);
2316
2317 info->progbufsize = -1;
2318
2319 info->dmi_busy_delay = 0;
2320 info->bus_master_read_delay = 0;
2321 info->bus_master_write_delay = 0;
2322 info->ac_busy_delay = 0;
2323
2324 /* Assume all these abstract commands are supported until we learn
2325 * otherwise.
2326 * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
2327 * while another one isn't. We don't track that this closely here, but in
2328 * the future we probably should. */
2329 info->abstract_read_csr_supported = true;
2330 info->abstract_write_csr_supported = true;
2331 info->abstract_read_fpr_supported = true;
2332 info->abstract_write_fpr_supported = true;
2333
2334 info->has_aampostincrement = YNM_MAYBE;
2335
2336 return ERROR_OK;
2337 }
2338
2339 static int assert_reset(struct target *target)
2340 {
2341 RISCV_INFO(r);
2342
2343 select_dmi(target);
2344
2345 uint32_t control_base = set_field(0, DM_DMCONTROL_DMACTIVE, 1);
2346
2347 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
2348 /* Run the user-supplied script if there is one. */
2349 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
2350 } else if (target->rtos) {
2351 /* There's only one target, and OpenOCD thinks each hart is a thread.
2352 * We must reset them all. */
2353
2354 /* TODO: Try to use hasel in dmcontrol */
2355
2356 /* Set haltreq for each hart. */
2357 uint32_t control = set_hartsel(control_base, target->coreid);
2358 control = set_field(control, DM_DMCONTROL_HALTREQ,
2359 target->reset_halt ? 1 : 0);
2360 dmi_write(target, DM_DMCONTROL, control);
2361
2362 /* Assert ndmreset */
2363 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2364 dmi_write(target, DM_DMCONTROL, control);
2365
2366 } else {
2367 /* Reset just this hart. */
2368 uint32_t control = set_hartsel(control_base, r->current_hartid);
2369 control = set_field(control, DM_DMCONTROL_HALTREQ,
2370 target->reset_halt ? 1 : 0);
2371 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2372 dmi_write(target, DM_DMCONTROL, control);
2373 }
2374
2375 target->state = TARGET_RESET;
2376
2377 dm013_info_t *dm = get_dm(target);
2378 if (!dm)
2379 return ERROR_FAIL;
2380
2381 /* The DM might have gotten reset if OpenOCD called us in some reset that
2382 * involves SRST being toggled. So clear our cache which may be out of
2383 * date. */
2384 memset(dm->progbuf_cache, 0, sizeof(dm->progbuf_cache));
2385
2386 return ERROR_OK;
2387 }
2388
2389 static int deassert_reset(struct target *target)
2390 {
2391 RISCV_INFO(r);
2392 RISCV013_INFO(info);
2393 select_dmi(target);
2394
2395 /* Clear the reset, but make sure haltreq is still set */
2396 uint32_t control = 0, control_haltreq;
2397 control = set_field(control, DM_DMCONTROL_DMACTIVE, 1);
2398 control_haltreq = set_field(control, DM_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
2399 dmi_write(target, DM_DMCONTROL,
2400 set_hartsel(control_haltreq, r->current_hartid));
2401
2402 uint32_t dmstatus;
2403 int dmi_busy_delay = info->dmi_busy_delay;
2404 time_t start = time(NULL);
2405
2406 for (int i = 0; i < riscv_count_harts(target); ++i) {
2407 int index = i;
2408 if (target->rtos) {
2409 if (index != target->coreid)
2410 continue;
2411 dmi_write(target, DM_DMCONTROL,
2412 set_hartsel(control_haltreq, index));
2413 } else {
2414 index = r->current_hartid;
2415 }
2416
2417 LOG_DEBUG("Waiting for hart %d to come out of reset.", index);
2418 while (1) {
2419 int result = dmstatus_read_timeout(target, &dmstatus, true,
2420 riscv_reset_timeout_sec);
2421 if (result == ERROR_TIMEOUT_REACHED)
2422 LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
2423 "reset in %ds; Increase the timeout with riscv "
2424 "set_reset_timeout_sec.",
2425 index, riscv_reset_timeout_sec);
2426 if (result != ERROR_OK)
2427 return result;
2428 /* Certain debug modules, like the one in GD32VF103
2429 * MCUs, violate the specification's requirement that
2430 * each hart is in "exactly one of four states" and,
2431 * during reset, report harts as both unavailable and
2432 * halted/running. To work around this, we check for
2433 * the absence of the unavailable state rather than
2434 * the presence of any other state. */
2435 if (!get_field(dmstatus, DM_DMSTATUS_ALLUNAVAIL))
2436 break;
2437 if (time(NULL) - start > riscv_reset_timeout_sec) {
2438 LOG_ERROR("Hart %d didn't leave reset in %ds; "
2439 "dmstatus=0x%x; "
2440 "Increase the timeout with riscv set_reset_timeout_sec.",
2441 index, riscv_reset_timeout_sec, dmstatus);
2442 return ERROR_FAIL;
2443 }
2444 }
2445 target->state = TARGET_HALTED;
2446
2447 if (get_field(dmstatus, DM_DMSTATUS_ALLHAVERESET)) {
2448 /* Ack reset and clear DM_DMCONTROL_HALTREQ if previously set */
2449 dmi_write(target, DM_DMCONTROL,
2450 set_hartsel(control, index) |
2451 DM_DMCONTROL_ACKHAVERESET);
2452 }
2453
2454 if (!target->rtos)
2455 break;
2456 }
2457 info->dmi_busy_delay = dmi_busy_delay;
2458 return ERROR_OK;
2459 }
2460
2461 static int execute_fence(struct target *target)
2462 {
2463 /* FIXME: For non-coherent systems we need to flush the caches right
2464 * here, but there's no ISA-defined way of doing that. */
2465 {
2466 struct riscv_program program;
2467 riscv_program_init(&program, target);
2468 riscv_program_fence_i(&program);
2469 riscv_program_fence(&program);
2470 int result = riscv_program_exec(&program, target);
2471 if (result != ERROR_OK)
2472 LOG_DEBUG("Unable to execute pre-fence");
2473 }
2474
2475 return ERROR_OK;
2476 }
2477
2478 static void log_memory_access(target_addr_t address, uint64_t value,
2479 unsigned size_bytes, bool read)
2480 {
2481 if (debug_level < LOG_LVL_DEBUG)
2482 return;
2483
2484 char fmt[80];
2485 sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
2486 address, read ? "read" : "write", size_bytes * 2);
2487 switch (size_bytes) {
2488 case 1:
2489 value &= 0xff;
2490 break;
2491 case 2:
2492 value &= 0xffff;
2493 break;
2494 case 4:
2495 value &= 0xffffffffUL;
2496 break;
2497 case 8:
2498 break;
2499 default:
2500 assert(false);
2501 }
2502 LOG_DEBUG(fmt, value);
2503 }
2504
2505 /* Read the relevant sbdata regs depending on size, and put the results into
2506 * buffer. */
2507 static int read_memory_bus_word(struct target *target, target_addr_t address,
2508 uint32_t size, uint8_t *buffer)
2509 {
2510 uint32_t value;
2511 int result;
2512 static int sbdata[4] = { DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3 };
2513 assert(size <= 16);
2514 for (int i = (size - 1) / 4; i >= 0; i--) {
2515 result = dmi_op(target, &value, NULL, DMI_OP_READ, sbdata[i], 0, false, true);
2516 if (result != ERROR_OK)
2517 return result;
2518 buf_set_u32(buffer + i * 4, 0, 8 * MIN(size, 4), value);
2519 log_memory_access(address + i * 4, value, MIN(size, 4), true);
2520 }
2521 return ERROR_OK;
2522 }
2523
2524 static target_addr_t sb_read_address(struct target *target)
2525 {
2526 RISCV013_INFO(info);
2527 unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2528 target_addr_t address = 0;
2529 uint32_t v;
2530 if (sbasize > 32) {
2531 dmi_read(target, &v, DM_SBADDRESS1);
2532 address |= v;
2533 address <<= 32;
2534 }
2535 dmi_read(target, &v, DM_SBADDRESS0);
2536 address |= v;
2537 return address;
2538 }
2539
2540 static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
2541 {
2542 time_t start = time(NULL);
2543 while (1) {
2544 if (dmi_read(target, sbcs, DM_SBCS) != ERROR_OK)
2545 return ERROR_FAIL;
2546 if (!get_field(*sbcs, DM_SBCS_SBBUSY))
2547 return ERROR_OK;
2548 if (time(NULL) - start > riscv_command_timeout_sec) {
2549 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
2550 "Increase the timeout with riscv set_command_timeout_sec.",
2551 riscv_command_timeout_sec, *sbcs);
2552 return ERROR_FAIL;
2553 }
2554 }
2555 }
2556
2557 static int modify_privilege(struct target *target, uint64_t *mstatus, uint64_t *mstatus_old)
2558 {
2559 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5)) {
2560 /* Read DCSR */
2561 uint64_t dcsr;
2562 if (register_read(target, &dcsr, GDB_REGNO_DCSR) != ERROR_OK)
2563 return ERROR_FAIL;
2564
2565 /* Read and save MSTATUS */
2566 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
2567 return ERROR_FAIL;
2568 *mstatus_old = *mstatus;
2569
2570 /* If we come from m-mode with mprv set, we want to keep mpp */
2571 if (get_field(dcsr, DCSR_PRV) < 3) {
2572 /* MPP = PRIV */
2573 *mstatus = set_field(*mstatus, MSTATUS_MPP, get_field(dcsr, DCSR_PRV));
2574
2575 /* MPRV = 1 */
2576 *mstatus = set_field(*mstatus, MSTATUS_MPRV, 1);
2577
2578 /* Write MSTATUS */
2579 if (*mstatus != *mstatus_old)
2580 if (register_write_direct(target, GDB_REGNO_MSTATUS, *mstatus) != ERROR_OK)
2581 return ERROR_FAIL;
2582 }
2583 }
2584
2585 return ERROR_OK;
2586 }
2587
2588 static int read_memory_bus_v0(struct target *target, target_addr_t address,
2589 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2590 {
2591 if (size != increment) {
2592 LOG_ERROR("sba v0 reads only support size==increment");
2593 return ERROR_NOT_IMPLEMENTED;
2594 }
2595
2596 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
2597 TARGET_PRIxADDR, size, count, address);
2598 uint8_t *t_buffer = buffer;
2599 riscv_addr_t cur_addr = address;
2600 riscv_addr_t fin_addr = address + (count * size);
2601 uint32_t access = 0;
2602
2603 const int DM_SBCS_SBSINGLEREAD_OFFSET = 20;
2604 const uint32_t DM_SBCS_SBSINGLEREAD = (0x1U << DM_SBCS_SBSINGLEREAD_OFFSET);
2605
2606 const int DM_SBCS_SBAUTOREAD_OFFSET = 15;
2607 const uint32_t DM_SBCS_SBAUTOREAD = (0x1U << DM_SBCS_SBAUTOREAD_OFFSET);
2608
2609 /* ww favorise one off reading if there is an issue */
2610 if (count == 1) {
2611 for (uint32_t i = 0; i < count; i++) {
2612 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2613 return ERROR_FAIL;
2614 dmi_write(target, DM_SBADDRESS0, cur_addr);
2615 /* size/2 matching the bit access of the spec 0.13 */
2616 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2617 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2618 LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
2619 dmi_write(target, DM_SBCS, access);
2620 /* 3) read */
2621 uint32_t value;
2622 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2623 return ERROR_FAIL;
2624 LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
2625 buf_set_u32(t_buffer, 0, 8 * size, value);
2626 t_buffer += size;
2627 cur_addr += size;
2628 }
2629 return ERROR_OK;
2630 }
2631
2632 /* has to be the same size if we want to read a block */
2633 LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
2634 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2635 return ERROR_FAIL;
2636 /* set current address */
2637 dmi_write(target, DM_SBADDRESS0, cur_addr);
2638 /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
2639 * size/2 matching the bit access of the spec 0.13 */
2640 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2641 access = set_field(access, DM_SBCS_SBAUTOREAD, 1);
2642 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2643 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
2644 LOG_DEBUG("\r\naccess: 0x%08x", access);
2645 dmi_write(target, DM_SBCS, access);
2646
2647 while (cur_addr < fin_addr) {
2648 LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
2649 PRIx64, size, count, cur_addr);
2650 /* read */
2651 uint32_t value;
2652 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2653 return ERROR_FAIL;
2654 buf_set_u32(t_buffer, 0, 8 * size, value);
2655 cur_addr += size;
2656 t_buffer += size;
2657
2658 /* if we are reaching last address, we must clear autoread */
2659 if (cur_addr == fin_addr && count != 1) {
2660 dmi_write(target, DM_SBCS, 0);
2661 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2662 return ERROR_FAIL;
2663 buf_set_u32(t_buffer, 0, 8 * size, value);
2664 }
2665 }
2666
2667 uint32_t sbcs;
2668 if (dmi_read(target, &sbcs, DM_SBCS) != ERROR_OK)
2669 return ERROR_FAIL;
2670
2671 return ERROR_OK;
2672 }
2673
2674 /**
2675 * Read the requested memory using the system bus interface.
2676 */
2677 static int read_memory_bus_v1(struct target *target, target_addr_t address,
2678 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2679 {
2680 if (increment != size && increment != 0) {
2681 LOG_ERROR("sba v1 reads only support increment of size or 0");
2682 return ERROR_NOT_IMPLEMENTED;
2683 }
2684
2685 RISCV013_INFO(info);
2686 target_addr_t next_address = address;
2687 target_addr_t end_address = address + count * size;
2688
2689 while (next_address < end_address) {
2690 uint32_t sbcs_write = set_field(0, DM_SBCS_SBREADONADDR, 1);
2691 sbcs_write |= sb_sbaccess(size);
2692 if (increment == size)
2693 sbcs_write = set_field(sbcs_write, DM_SBCS_SBAUTOINCREMENT, 1);
2694 if (count > 1)
2695 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, count > 1);
2696 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2697 return ERROR_FAIL;
2698
2699 /* This address write will trigger the first read. */
2700 if (sb_write_address(target, next_address, true) != ERROR_OK)
2701 return ERROR_FAIL;
2702
2703 if (info->bus_master_read_delay) {
2704 jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
2705 if (jtag_execute_queue() != ERROR_OK) {
2706 LOG_ERROR("Failed to scan idle sequence");
2707 return ERROR_FAIL;
2708 }
2709 }
2710
2711 /* First value has been read, and is waiting for us to issue a DMI read
2712 * to get it. */
2713
2714 static int sbdata[4] = {DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3};
2715 assert(size <= 16);
2716 target_addr_t next_read = address - 1;
2717 for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
2718 for (int j = (size - 1) / 4; j >= 0; j--) {
2719 uint32_t value;
2720 unsigned attempt = 0;
2721 while (1) {
2722 if (attempt++ > 100) {
2723 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2724 next_read);
2725 return ERROR_FAIL;
2726 }
2727 keep_alive();
2728 dmi_status_t status = dmi_scan(target, NULL, &value,
2729 DMI_OP_READ, sbdata[j], 0, false);
2730 if (status == DMI_STATUS_BUSY)
2731 increase_dmi_busy_delay(target);
2732 else if (status == DMI_STATUS_SUCCESS)
2733 break;
2734 else
2735 return ERROR_FAIL;
2736 }
2737 if (next_read != address - 1) {
2738 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2739 log_memory_access(next_read, value, MIN(size, 4), true);
2740 }
2741 next_read = address + i * size + j * 4;
2742 }
2743 }
2744
2745 uint32_t sbcs_read = 0;
2746 if (count > 1) {
2747 uint32_t value;
2748 unsigned attempt = 0;
2749 while (1) {
2750 if (attempt++ > 100) {
2751 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2752 next_read);
2753 return ERROR_FAIL;
2754 }
2755 dmi_status_t status = dmi_scan(target, NULL, &value, DMI_OP_NOP, 0, 0, false);
2756 if (status == DMI_STATUS_BUSY)
2757 increase_dmi_busy_delay(target);
2758 else if (status == DMI_STATUS_SUCCESS)
2759 break;
2760 else
2761 return ERROR_FAIL;
2762 }
2763 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2764 log_memory_access(next_read, value, MIN(size, 4), true);
2765
2766 /* "Writes to sbcs while sbbusy is high result in undefined behavior.
2767 * A debugger must not write to sbcs until it reads sbbusy as 0." */
2768 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2769 return ERROR_FAIL;
2770
2771 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, 0);
2772 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2773 return ERROR_FAIL;
2774 }
2775
2776 /* Read the last word, after we disabled sbreadondata if necessary. */
2777 if (!get_field(sbcs_read, DM_SBCS_SBERROR) &&
2778 !get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2779 if (read_memory_bus_word(target, address + (count - 1) * size, size,
2780 buffer + (count - 1) * size) != ERROR_OK)
2781 return ERROR_FAIL;
2782
2783 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2784 return ERROR_FAIL;
2785 }
2786
2787 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2788 /* We read while the target was busy. Slow down and try again. */
2789 if (dmi_write(target, DM_SBCS, sbcs_read | DM_SBCS_SBBUSYERROR) != ERROR_OK)
2790 return ERROR_FAIL;
2791 next_address = sb_read_address(target);
2792 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2793 continue;
2794 }
2795
2796 unsigned error = get_field(sbcs_read, DM_SBCS_SBERROR);
2797 if (error == 0) {
2798 next_address = end_address;
2799 } else {
2800 /* Some error indicating the bus access failed, but not because of
2801 * something we did wrong. */
2802 if (dmi_write(target, DM_SBCS, DM_SBCS_SBERROR) != ERROR_OK)
2803 return ERROR_FAIL;
2804 return ERROR_FAIL;
2805 }
2806 }
2807
2808 return ERROR_OK;
2809 }
2810
2811 static void log_mem_access_result(struct target *target, bool success, int method, bool read)
2812 {
2813 RISCV_INFO(r);
2814 bool warn = false;
2815 char msg[60];
2816
2817 /* Compose the message */
2818 snprintf(msg, 60, "%s to %s memory via %s.",
2819 success ? "Succeeded" : "Failed",
2820 read ? "read" : "write",
2821 (method == RISCV_MEM_ACCESS_PROGBUF) ? "program buffer" :
2822 (method == RISCV_MEM_ACCESS_SYSBUS) ? "system bus" : "abstract access");
2823
2824 /* Determine the log message severity. Show warnings only once. */
2825 if (!success) {
2826 if (method == RISCV_MEM_ACCESS_PROGBUF) {
2827 warn = r->mem_access_progbuf_warn;
2828 r->mem_access_progbuf_warn = false;
2829 }
2830 if (method == RISCV_MEM_ACCESS_SYSBUS) {
2831 warn = r->mem_access_sysbus_warn;
2832 r->mem_access_sysbus_warn = false;
2833 }
2834 if (method == RISCV_MEM_ACCESS_ABSTRACT) {
2835 warn = r->mem_access_abstract_warn;
2836 r->mem_access_abstract_warn = false;
2837 }
2838 }
2839
2840 if (warn)
2841 LOG_WARNING("%s", msg);
2842 else
2843 LOG_DEBUG("%s", msg);
2844 }
2845
2846 static bool mem_should_skip_progbuf(struct target *target, target_addr_t address,
2847 uint32_t size, bool read, char **skip_reason)
2848 {
2849 assert(skip_reason);
2850
2851 if (!has_sufficient_progbuf(target, 3)) {
2852 LOG_DEBUG("Skipping mem %s via progbuf - insufficient progbuf size.",
2853 read ? "read" : "write");
2854 *skip_reason = "skipped (insufficient progbuf)";
2855 return true;
2856 }
2857 if (target->state != TARGET_HALTED) {
2858 LOG_DEBUG("Skipping mem %s via progbuf - target not halted.",
2859 read ? "read" : "write");
2860 *skip_reason = "skipped (target not halted)";
2861 return true;
2862 }
2863 if (riscv_xlen(target) < size * 8) {
2864 LOG_DEBUG("Skipping mem %s via progbuf - XLEN (%d) is too short for %d-bit memory access.",
2865 read ? "read" : "write", riscv_xlen(target), size * 8);
2866 *skip_reason = "skipped (XLEN too short)";
2867 return true;
2868 }
2869 if (size > 8) {
2870 LOG_DEBUG("Skipping mem %s via progbuf - unsupported size.",
2871 read ? "read" : "write");
2872 *skip_reason = "skipped (unsupported size)";
2873 return true;
2874 }
2875 if ((sizeof(address) * 8 > riscv_xlen(target)) && (address >> riscv_xlen(target))) {
2876 LOG_DEBUG("Skipping mem %s via progbuf - progbuf only supports %u-bit address.",
2877 read ? "read" : "write", riscv_xlen(target));
2878 *skip_reason = "skipped (too large address)";
2879 return true;
2880 }
2881
2882 return false;
2883 }
2884
2885 static bool mem_should_skip_sysbus(struct target *target, target_addr_t address,
2886 uint32_t size, uint32_t increment, bool read, char **skip_reason)
2887 {
2888 assert(skip_reason);
2889
2890 RISCV013_INFO(info);
2891 if (!sba_supports_access(target, size)) {
2892 LOG_DEBUG("Skipping mem %s via system bus - unsupported size.",
2893 read ? "read" : "write");
2894 *skip_reason = "skipped (unsupported size)";
2895 return true;
2896 }
2897 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2898 if ((sizeof(address) * 8 > sbasize) && (address >> sbasize)) {
2899 LOG_DEBUG("Skipping mem %s via system bus - sba only supports %u-bit address.",
2900 read ? "read" : "write", sbasize);
2901 *skip_reason = "skipped (too large address)";
2902 return true;
2903 }
2904 if (read && increment != size && (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0 || increment != 0)) {
2905 LOG_DEBUG("Skipping mem read via system bus - "
2906 "sba reads only support size==increment or also size==0 for sba v1.");
2907 *skip_reason = "skipped (unsupported increment)";
2908 return true;
2909 }
2910
2911 return false;
2912 }
2913
2914 static bool mem_should_skip_abstract(struct target *target, target_addr_t address,
2915 uint32_t size, uint32_t increment, bool read, char **skip_reason)
2916 {
2917 assert(skip_reason);
2918
2919 if (size > 8) {
2920 /* TODO: Add 128b support if it's ever used. Involves modifying
2921 read/write_abstract_arg() to work on two 64b values. */
2922 LOG_DEBUG("Skipping mem %s via abstract access - unsupported size: %d bits",
2923 read ? "read" : "write", size * 8);
2924 *skip_reason = "skipped (unsupported size)";
2925 return true;
2926 }
2927 if ((sizeof(address) * 8 > riscv_xlen(target)) && (address >> riscv_xlen(target))) {
2928 LOG_DEBUG("Skipping mem %s via abstract access - abstract access only supports %u-bit address.",
2929 read ? "read" : "write", riscv_xlen(target));
2930 *skip_reason = "skipped (too large address)";
2931 return true;
2932 }
2933 if (read && size != increment) {
2934 LOG_ERROR("Skipping mem read via abstract access - "
2935 "abstract command reads only support size==increment.");
2936 *skip_reason = "skipped (unsupported increment)";
2937 return true;
2938 }
2939
2940 return false;
2941 }
2942
2943 /*
2944 * Performs a memory read using memory access abstract commands. The read sizes
2945 * supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16 byte
2946 * aamsize fields in the memory access abstract command.
2947 */
2948 static int read_memory_abstract(struct target *target, target_addr_t address,
2949 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2950 {
2951 RISCV013_INFO(info);
2952
2953 int result = ERROR_OK;
2954 bool use_aampostincrement = info->has_aampostincrement != YNM_NO;
2955
2956 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
2957 size, address);
2958
2959 memset(buffer, 0, count * size);
2960
2961 /* Convert the size (bytes) to width (bits) */
2962 unsigned width = size << 3;
2963
2964 /* Create the command (physical address, postincrement, read) */
2965 uint32_t command = access_memory_command(target, false, width, use_aampostincrement, false);
2966
2967 /* Execute the reads */
2968 uint8_t *p = buffer;
2969 bool updateaddr = true;
2970 unsigned int width32 = (width < 32) ? 32 : width;
2971 for (uint32_t c = 0; c < count; c++) {
2972 /* Update the address if it is the first time or aampostincrement is not supported by the target. */
2973 if (updateaddr) {
2974 /* Set arg1 to the address: address + c * size */
2975 result = write_abstract_arg(target, 1, address + c * size, riscv_xlen(target));
2976 if (result != ERROR_OK) {
2977 LOG_ERROR("Failed to write arg1 during read_memory_abstract().");
2978 return result;
2979 }
2980 }
2981
2982 /* Execute the command */
2983 result = execute_abstract_command(target, command);
2984
2985 if (info->has_aampostincrement == YNM_MAYBE) {
2986 if (result == ERROR_OK) {
2987 /* Safety: double-check that the address was really auto-incremented */
2988 riscv_reg_t new_address = read_abstract_arg(target, 1, riscv_xlen(target));
2989 if (new_address == address + size) {
2990 LOG_DEBUG("aampostincrement is supported on this target.");
2991 info->has_aampostincrement = YNM_YES;
2992 } else {
2993 LOG_WARNING("Buggy aampostincrement! Address not incremented correctly.");
2994 info->has_aampostincrement = YNM_NO;
2995 }
2996 } else {
2997 /* Try the same access but with postincrement disabled. */
2998 command = access_memory_command(target, false, width, false, false);
2999 result = execute_abstract_command(target, command);
3000 if (result == ERROR_OK) {
3001 LOG_DEBUG("aampostincrement is not supported on this target.");
3002 info->has_aampostincrement = YNM_NO;
3003 }
3004 }
3005 }
3006
3007 if (result != ERROR_OK)
3008 return result;
3009
3010 /* Copy arg0 to buffer (rounded width up to nearest 32) */
3011 riscv_reg_t value = read_abstract_arg(target, 0, width32);
3012 buf_set_u64(p, 0, 8 * size, value);
3013
3014 if (info->has_aampostincrement == YNM_YES)
3015 updateaddr = false;
3016 p += size;
3017 }
3018
3019 return result;
3020 }
3021
3022 /*
3023 * Performs a memory write using memory access abstract commands. The write
3024 * sizes supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16
3025 * byte aamsize fields in the memory access abstract command.
3026 */
3027 static int write_memory_abstract(struct target *target, target_addr_t address,
3028 uint32_t size, uint32_t count, const uint8_t *buffer)
3029 {
3030 RISCV013_INFO(info);
3031 int result = ERROR_OK;
3032 bool use_aampostincrement = info->has_aampostincrement != YNM_NO;
3033
3034 LOG_DEBUG("writing %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
3035 size, address);
3036
3037 /* Convert the size (bytes) to width (bits) */
3038 unsigned width = size << 3;
3039
3040 /* Create the command (physical address, postincrement, write) */
3041 uint32_t command = access_memory_command(target, false, width, use_aampostincrement, true);
3042
3043 /* Execute the writes */
3044 const uint8_t *p = buffer;
3045 bool updateaddr = true;
3046 for (uint32_t c = 0; c < count; c++) {
3047 /* Move data to arg0 */
3048 riscv_reg_t value = buf_get_u64(p, 0, 8 * size);
3049 result = write_abstract_arg(target, 0, value, riscv_xlen(target));
3050 if (result != ERROR_OK) {
3051 LOG_ERROR("Failed to write arg0 during write_memory_abstract().");
3052 return result;
3053 }
3054
3055 /* Update the address if it is the first time or aampostincrement is not supported by the target. */
3056 if (updateaddr) {
3057 /* Set arg1 to the address: address + c * size */
3058 result = write_abstract_arg(target, 1, address + c * size, riscv_xlen(target));
3059 if (result != ERROR_OK) {
3060 LOG_ERROR("Failed to write arg1 during write_memory_abstract().");
3061 return result;
3062 }
3063 }
3064
3065 /* Execute the command */
3066 result = execute_abstract_command(target, command);
3067
3068 if (info->has_aampostincrement == YNM_MAYBE) {
3069 if (result == ERROR_OK) {
3070 /* Safety: double-check that the address was really auto-incremented */
3071 riscv_reg_t new_address = read_abstract_arg(target, 1, riscv_xlen(target));
3072 if (new_address == address + size) {
3073 LOG_DEBUG("aampostincrement is supported on this target.");
3074 info->has_aampostincrement = YNM_YES;
3075 } else {
3076 LOG_WARNING("Buggy aampostincrement! Address not incremented correctly.");
3077 info->has_aampostincrement = YNM_NO;
3078 }
3079 } else {
3080 /* Try the same access but with postincrement disabled. */
3081 command = access_memory_command(target, false, width, false, true);
3082 result = execute_abstract_command(target, command);
3083 if (result == ERROR_OK) {
3084 LOG_DEBUG("aampostincrement is not supported on this target.");
3085 info->has_aampostincrement = YNM_NO;
3086 }
3087 }
3088 }
3089
3090 if (result != ERROR_OK)
3091 return result;
3092
3093 if (info->has_aampostincrement == YNM_YES)
3094 updateaddr = false;
3095 p += size;
3096 }
3097
3098 return result;
3099 }
3100
3101 /**
3102 * Read the requested memory, taking care to execute every read exactly once,
3103 * even if cmderr=busy is encountered.
3104 */
3105 static int read_memory_progbuf_inner(struct target *target, target_addr_t address,
3106 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3107 {
3108 RISCV013_INFO(info);
3109
3110 int result = ERROR_OK;
3111
3112 /* Write address to S0. */
3113 result = register_write_direct(target, GDB_REGNO_S0, address);
3114 if (result != ERROR_OK)
3115 return result;
3116
3117 if (increment == 0 &&
3118 register_write_direct(target, GDB_REGNO_S2, 0) != ERROR_OK)
3119 return ERROR_FAIL;
3120
3121 uint32_t command = access_register_command(target, GDB_REGNO_S1,
3122 riscv_xlen(target),
3123 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
3124 if (execute_abstract_command(target, command) != ERROR_OK)
3125 return ERROR_FAIL;
3126
3127 /* First read has just triggered. Result is in s1. */
3128 if (count == 1) {
3129 uint64_t value;
3130 if (register_read_direct(target, &value, GDB_REGNO_S1) != ERROR_OK)
3131 return ERROR_FAIL;
3132 buf_set_u64(buffer, 0, 8 * size, value);
3133 log_memory_access(address, value, size, true);
3134 return ERROR_OK;
3135 }
3136
3137 if (dmi_write(target, DM_ABSTRACTAUTO,
3138 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) != ERROR_OK)
3139 goto error;
3140 /* Read garbage from dmi_data0, which triggers another execution of the
3141 * program. Now dmi_data0 contains the first good result, and s1 the next
3142 * memory value. */
3143 if (dmi_read_exec(target, NULL, DM_DATA0) != ERROR_OK)
3144 goto error;
3145
3146 /* read_addr is the next address that the hart will read from, which is the
3147 * value in s0. */
3148 unsigned index = 2;
3149 while (index < count) {
3150 riscv_addr_t read_addr = address + index * increment;
3151 LOG_DEBUG("i=%d, count=%d, read_addr=0x%" PRIx64, index, count, read_addr);
3152 /* The pipeline looks like this:
3153 * memory -> s1 -> dm_data0 -> debugger
3154 * Right now:
3155 * s0 contains read_addr
3156 * s1 contains mem[read_addr-size]
3157 * dm_data0 contains[read_addr-size*2]
3158 */
3159
3160 struct riscv_batch *batch = riscv_batch_alloc(target, 32,
3161 info->dmi_busy_delay + info->ac_busy_delay);
3162 if (!batch)
3163 return ERROR_FAIL;
3164
3165 unsigned reads = 0;
3166 for (unsigned j = index; j < count; j++) {
3167 if (size > 4)
3168 riscv_batch_add_dmi_read(batch, DM_DATA1);
3169 riscv_batch_add_dmi_read(batch, DM_DATA0);
3170
3171 reads++;
3172 if (riscv_batch_full(batch))
3173 break;
3174 }
3175
3176 batch_run(target, batch);
3177
3178 /* Wait for the target to finish performing the last abstract command,
3179 * and update our copy of cmderr. If we see that DMI is busy here,
3180 * dmi_busy_delay will be incremented. */
3181 uint32_t abstractcs;
3182 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3183 return ERROR_FAIL;
3184 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
3185 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3186 return ERROR_FAIL;
3187 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
3188
3189 unsigned next_index;
3190 unsigned ignore_last = 0;
3191 switch (info->cmderr) {
3192 case CMDERR_NONE:
3193 LOG_DEBUG("successful (partial?) memory read");
3194 next_index = index + reads;
3195 break;
3196 case CMDERR_BUSY:
3197 LOG_DEBUG("memory read resulted in busy response");
3198
3199 increase_ac_busy_delay(target);
3200 riscv013_clear_abstract_error(target);
3201
3202 dmi_write(target, DM_ABSTRACTAUTO, 0);
3203
3204 uint32_t dmi_data0, dmi_data1 = 0;
3205 /* This is definitely a good version of the value that we
3206 * attempted to read when we discovered that the target was
3207 * busy. */
3208 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK) {
3209 riscv_batch_free(batch);
3210 goto error;
3211 }
3212 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK) {
3213 riscv_batch_free(batch);
3214 goto error;
3215 }
3216
3217 /* See how far we got, clobbering dmi_data0. */
3218 if (increment == 0) {
3219 uint64_t counter;
3220 result = register_read_direct(target, &counter, GDB_REGNO_S2);
3221 next_index = counter;
3222 } else {
3223 uint64_t next_read_addr;
3224 result = register_read_direct(target, &next_read_addr,
3225 GDB_REGNO_S0);
3226 next_index = (next_read_addr - address) / increment;
3227 }
3228 if (result != ERROR_OK) {
3229 riscv_batch_free(batch);
3230 goto error;
3231 }
3232
3233 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
3234 buf_set_u64(buffer + (next_index - 2) * size, 0, 8 * size, value64);
3235 log_memory_access(address + (next_index - 2) * size, value64, size, true);
3236
3237 /* Restore the command, and execute it.
3238 * Now DM_DATA0 contains the next value just as it would if no
3239 * error had occurred. */
3240 dmi_write_exec(target, DM_COMMAND, command, true);
3241 next_index++;
3242
3243 dmi_write(target, DM_ABSTRACTAUTO,
3244 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
3245
3246 ignore_last = 1;
3247
3248 break;
3249 default:
3250 LOG_DEBUG("error when reading memory, abstractcs=0x%08lx", (long)abstractcs);
3251 riscv013_clear_abstract_error(target);
3252 riscv_batch_free(batch);
3253 result = ERROR_FAIL;
3254 goto error;
3255 }
3256
3257 /* Now read whatever we got out of the batch. */
3258 dmi_status_t status = DMI_STATUS_SUCCESS;
3259 unsigned read = 0;
3260 assert(index >= 2);
3261 for (unsigned j = index - 2; j < index + reads; j++) {
3262 assert(j < count);
3263 LOG_DEBUG("index=%d, reads=%d, next_index=%d, ignore_last=%d, j=%d",
3264 index, reads, next_index, ignore_last, j);
3265 if (j + 3 + ignore_last > next_index)
3266 break;
3267
3268 status = riscv_batch_get_dmi_read_op(batch, read);
3269 uint64_t value = riscv_batch_get_dmi_read_data(batch, read);
3270 read++;
3271 if (status != DMI_STATUS_SUCCESS) {
3272 /* If we're here because of busy count, dmi_busy_delay will
3273 * already have been increased and busy state will have been
3274 * cleared in dmi_read(). */
3275 /* In at least some implementations, we issue a read, and then
3276 * can get busy back when we try to scan out the read result,
3277 * and the actual read value is lost forever. Since this is
3278 * rare in any case, we return error here and rely on our
3279 * caller to reread the entire block. */
3280 LOG_WARNING("Batch memory read encountered DMI error %d. "
3281 "Falling back on slower reads.", status);
3282 riscv_batch_free(batch);
3283 result = ERROR_FAIL;
3284 goto error;
3285 }
3286 if (size > 4) {
3287 status = riscv_batch_get_dmi_read_op(batch, read);
3288 if (status != DMI_STATUS_SUCCESS) {
3289 LOG_WARNING("Batch memory read encountered DMI error %d. "
3290 "Falling back on slower reads.", status);
3291 riscv_batch_free(batch);
3292 result = ERROR_FAIL;
3293 goto error;
3294 }
3295 value <<= 32;
3296 value |= riscv_batch_get_dmi_read_data(batch, read);
3297 read++;
3298 }
3299 riscv_addr_t offset = j * size;
3300 buf_set_u64(buffer + offset, 0, 8 * size, value);
3301 log_memory_access(address + j * increment, value, size, true);
3302 }
3303
3304 index = next_index;
3305
3306 riscv_batch_free(batch);
3307 }
3308
3309 dmi_write(target, DM_ABSTRACTAUTO, 0);
3310
3311 if (count > 1) {
3312 /* Read the penultimate word. */
3313 uint32_t dmi_data0, dmi_data1 = 0;
3314 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK)
3315 return ERROR_FAIL;
3316 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK)
3317 return ERROR_FAIL;
3318 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
3319 buf_set_u64(buffer + size * (count - 2), 0, 8 * size, value64);
3320 log_memory_access(address + size * (count - 2), value64, size, true);
3321 }
3322
3323 /* Read the last word. */
3324 uint64_t value;
3325 result = register_read_direct(target, &value, GDB_REGNO_S1);
3326 if (result != ERROR_OK)
3327 goto error;
3328 buf_set_u64(buffer + size * (count-1), 0, 8 * size, value);
3329 log_memory_access(address + size * (count-1), value, size, true);
3330
3331 return ERROR_OK;
3332
3333 error:
3334 dmi_write(target, DM_ABSTRACTAUTO, 0);
3335
3336 return result;
3337 }
3338
3339 /* Only need to save/restore one GPR to read a single word, and the progbuf
3340 * program doesn't need to increment. */
3341 static int read_memory_progbuf_one(struct target *target, target_addr_t address,
3342 uint32_t size, uint8_t *buffer)
3343 {
3344 uint64_t mstatus = 0;
3345 uint64_t mstatus_old = 0;
3346 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3347 return ERROR_FAIL;
3348
3349 uint64_t s0;
3350 int result = ERROR_FAIL;
3351
3352 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3353 goto restore_mstatus;
3354
3355 /* Write the program (load, increment) */
3356 struct riscv_program program;
3357 riscv_program_init(&program, target);
3358 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3359 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3360 switch (size) {
3361 case 1:
3362 riscv_program_lbr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3363 break;
3364 case 2:
3365 riscv_program_lhr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3366 break;
3367 case 4:
3368 riscv_program_lwr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3369 break;
3370 case 8:
3371 riscv_program_ldr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3372 break;
3373 default:
3374 LOG_ERROR("Unsupported size: %d", size);
3375 goto restore_mstatus;
3376 }
3377 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3378 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3379
3380 if (riscv_program_ebreak(&program) != ERROR_OK)
3381 goto restore_mstatus;
3382 if (riscv_program_write(&program) != ERROR_OK)
3383 goto restore_mstatus;
3384
3385 /* Write address to S0, and execute buffer. */
3386 if (write_abstract_arg(target, 0, address, riscv_xlen(target)) != ERROR_OK)
3387 goto restore_mstatus;
3388 uint32_t command = access_register_command(target, GDB_REGNO_S0,
3389 riscv_xlen(target), AC_ACCESS_REGISTER_WRITE |
3390 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
3391 if (execute_abstract_command(target, command) != ERROR_OK)
3392 goto restore_s0;
3393
3394 uint64_t value;
3395 if (register_read(target, &value, GDB_REGNO_S0) != ERROR_OK)
3396 goto restore_s0;
3397 buf_set_u64(buffer, 0, 8 * size, value);
3398 log_memory_access(address, value, size, true);
3399 result = ERROR_OK;
3400
3401 restore_s0:
3402 if (riscv_set_register(target, GDB_REGNO_S0, s0) != ERROR_OK)
3403 result = ERROR_FAIL;
3404
3405 restore_mstatus:
3406 if (mstatus != mstatus_old)
3407 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3408 result = ERROR_FAIL;
3409
3410 return result;
3411 }
3412
3413 /**
3414 * Read the requested memory, silently handling memory access errors.
3415 */
3416 static int read_memory_progbuf(struct target *target, target_addr_t address,
3417 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3418 {
3419 if (riscv_xlen(target) < size * 8) {
3420 LOG_ERROR("XLEN (%d) is too short for %d-bit memory read.",
3421 riscv_xlen(target), size * 8);
3422 return ERROR_FAIL;
3423 }
3424
3425 int result = ERROR_OK;
3426
3427 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
3428 size, address);
3429
3430 select_dmi(target);
3431
3432 memset(buffer, 0, count*size);
3433
3434 if (execute_fence(target) != ERROR_OK)
3435 return ERROR_FAIL;
3436
3437 if (count == 1)
3438 return read_memory_progbuf_one(target, address, size, buffer);
3439
3440 uint64_t mstatus = 0;
3441 uint64_t mstatus_old = 0;
3442 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3443 return ERROR_FAIL;
3444
3445 /* s0 holds the next address to read from
3446 * s1 holds the next data value read
3447 * s2 is a counter in case increment is 0
3448 */
3449 uint64_t s0, s1, s2;
3450 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3451 return ERROR_FAIL;
3452 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3453 return ERROR_FAIL;
3454 if (increment == 0 && register_read(target, &s2, GDB_REGNO_S2) != ERROR_OK)
3455 return ERROR_FAIL;
3456
3457 /* Write the program (load, increment) */
3458 struct riscv_program program;
3459 riscv_program_init(&program, target);
3460 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3461 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3462
3463 switch (size) {
3464 case 1:
3465 riscv_program_lbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3466 break;
3467 case 2:
3468 riscv_program_lhr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3469 break;
3470 case 4:
3471 riscv_program_lwr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3472 break;
3473 case 8:
3474 riscv_program_ldr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3475 break;
3476 default:
3477 LOG_ERROR("Unsupported size: %d", size);
3478 return ERROR_FAIL;
3479 }
3480
3481 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3482 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3483 if (increment == 0)
3484 riscv_program_addi(&program, GDB_REGNO_S2, GDB_REGNO_S2, 1);
3485 else
3486 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, increment);
3487
3488 if (riscv_program_ebreak(&program) != ERROR_OK)
3489 return ERROR_FAIL;
3490 if (riscv_program_write(&program) != ERROR_OK)
3491 return ERROR_FAIL;
3492
3493 result = read_memory_progbuf_inner(target, address, size, count, buffer, increment);
3494
3495 if (result != ERROR_OK) {
3496 /* The full read did not succeed, so we will try to read each word individually. */
3497 /* This will not be fast, but reading outside actual memory is a special case anyway. */
3498 /* It will make the toolchain happier, especially Eclipse Memory View as it reads ahead. */
3499 target_addr_t address_i = address;
3500 uint32_t count_i = 1;
3501 uint8_t *buffer_i = buffer;
3502
3503 for (uint32_t i = 0; i < count; i++, address_i += increment, buffer_i += size) {
3504 /* TODO: This is much slower than it needs to be because we end up
3505 * writing the address to read for every word we read. */
3506 result = read_memory_progbuf_inner(target, address_i, size, count_i, buffer_i, increment);
3507
3508 /* The read of a single word failed, so we will just return 0 for that instead */
3509 if (result != ERROR_OK) {
3510 LOG_DEBUG("error reading single word of %d bytes from 0x%" TARGET_PRIxADDR,
3511 size, address_i);
3512
3513 buf_set_u64(buffer_i, 0, 8 * size, 0);
3514 }
3515 }
3516 result = ERROR_OK;
3517 }
3518
3519 riscv_set_register(target, GDB_REGNO_S0, s0);
3520 riscv_set_register(target, GDB_REGNO_S1, s1);
3521 if (increment == 0)
3522 riscv_set_register(target, GDB_REGNO_S2, s2);
3523
3524 /* Restore MSTATUS */
3525 if (mstatus != mstatus_old)
3526 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3527 return ERROR_FAIL;
3528
3529 return result;
3530 }
3531
3532 static int read_memory(struct target *target, target_addr_t address,
3533 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3534 {
3535 if (count == 0)
3536 return ERROR_OK;
3537
3538 if (size != 1 && size != 2 && size != 4 && size != 8 && size != 16) {
3539 LOG_ERROR("BUG: Unsupported size for memory read: %d", size);
3540 return ERROR_FAIL;
3541 }
3542
3543 int ret = ERROR_FAIL;
3544 RISCV_INFO(r);
3545 RISCV013_INFO(info);
3546
3547 char *progbuf_result = "disabled";
3548 char *sysbus_result = "disabled";
3549 char *abstract_result = "disabled";
3550
3551 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
3552 int method = r->mem_access_methods[i];
3553
3554 if (method == RISCV_MEM_ACCESS_PROGBUF) {
3555 if (mem_should_skip_progbuf(target, address, size, true, &progbuf_result))
3556 continue;
3557
3558 ret = read_memory_progbuf(target, address, size, count, buffer, increment);
3559
3560 if (ret != ERROR_OK)
3561 progbuf_result = "failed";
3562 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
3563 if (mem_should_skip_sysbus(target, address, size, increment, true, &sysbus_result))
3564 continue;
3565
3566 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
3567 ret = read_memory_bus_v0(target, address, size, count, buffer, increment);
3568 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
3569 ret = read_memory_bus_v1(target, address, size, count, buffer, increment);
3570
3571 if (ret != ERROR_OK)
3572 sysbus_result = "failed";
3573 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
3574 if (mem_should_skip_abstract(target, address, size, increment, true, &abstract_result))
3575 continue;
3576
3577 ret = read_memory_abstract(target, address, size, count, buffer, increment);
3578
3579 if (ret != ERROR_OK)
3580 abstract_result = "failed";
3581 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
3582 /* No further mem access method to try. */
3583 break;
3584
3585 log_mem_access_result(target, ret == ERROR_OK, method, true);
3586
3587 if (ret == ERROR_OK)
3588 return ret;
3589 }
3590
3591 LOG_ERROR("Target %s: Failed to read memory (addr=0x%" PRIx64 ")", target_name(target), address);
3592 LOG_ERROR(" progbuf=%s, sysbus=%s, abstract=%s", progbuf_result, sysbus_result, abstract_result);
3593 return ret;
3594 }
3595
3596 static int write_memory_bus_v0(struct target *target, target_addr_t address,
3597 uint32_t size, uint32_t count, const uint8_t *buffer)
3598 {
3599 /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
3600 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
3601 TARGET_PRIxADDR, size, count, address);
3602 dmi_write(target, DM_SBADDRESS0, address);
3603 int64_t value = 0;
3604 int64_t access = 0;
3605 riscv_addr_t offset = 0;
3606 riscv_addr_t t_addr = 0;
3607 const uint8_t *t_buffer = buffer + offset;
3608
3609 /* B.8 Writing Memory, single write check if we write in one go */
3610 if (count == 1) { /* count is in bytes here */
3611 value = buf_get_u64(t_buffer, 0, 8 * size);
3612
3613 access = 0;
3614 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3615 dmi_write(target, DM_SBCS, access);
3616 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3617 LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64, value);
3618 dmi_write(target, DM_SBDATA0, value);
3619 return ERROR_OK;
3620 }
3621
3622 /*B.8 Writing Memory, using autoincrement*/
3623
3624 access = 0;
3625 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3626 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
3627 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3628 dmi_write(target, DM_SBCS, access);
3629
3630 /*2)set the value according to the size required and write*/
3631 for (riscv_addr_t i = 0; i < count; ++i) {
3632 offset = size*i;
3633 /* for monitoring only */
3634 t_addr = address + offset;
3635 t_buffer = buffer + offset;
3636
3637 value = buf_get_u64(t_buffer, 0, 8 * size);
3638 LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
3639 PRIx64, (uint32_t)t_addr, (uint32_t)value);
3640 dmi_write(target, DM_SBDATA0, value);
3641 }
3642 /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
3643 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 0);
3644 dmi_write(target, DM_SBCS, access);
3645
3646 return ERROR_OK;
3647 }
3648
3649 static int write_memory_bus_v1(struct target *target, target_addr_t address,
3650 uint32_t size, uint32_t count, const uint8_t *buffer)
3651 {
3652 RISCV013_INFO(info);
3653 uint32_t sbcs = sb_sbaccess(size);
3654 sbcs = set_field(sbcs, DM_SBCS_SBAUTOINCREMENT, 1);
3655 dmi_write(target, DM_SBCS, sbcs);
3656
3657 target_addr_t next_address = address;
3658 target_addr_t end_address = address + count * size;
3659
3660 int result;
3661
3662 sb_write_address(target, next_address, true);
3663 while (next_address < end_address) {
3664 LOG_DEBUG("transferring burst starting at address 0x%" TARGET_PRIxADDR,
3665 next_address);
3666
3667 struct riscv_batch *batch = riscv_batch_alloc(
3668 target,
3669 32,
3670 info->dmi_busy_delay + info->bus_master_write_delay);
3671 if (!batch)
3672 return ERROR_FAIL;
3673
3674 for (uint32_t i = (next_address - address) / size; i < count; i++) {
3675 const uint8_t *p = buffer + i * size;
3676
3677 if (riscv_batch_available_scans(batch) < (size + 3) / 4)
3678 break;
3679
3680 if (size > 12)
3681 riscv_batch_add_dmi_write(batch, DM_SBDATA3,
3682 ((uint32_t) p[12]) |
3683 (((uint32_t) p[13]) << 8) |
3684 (((uint32_t) p[14]) << 16) |
3685 (((uint32_t) p[15]) << 24));
3686
3687 if (size > 8)
3688 riscv_batch_add_dmi_write(batch, DM_SBDATA2,
3689 ((uint32_t) p[8]) |
3690 (((uint32_t) p[9]) << 8) |
3691 (((uint32_t) p[10]) << 16) |
3692 (((uint32_t) p[11]) << 24));
3693 if (size > 4)
3694 riscv_batch_add_dmi_write(batch, DM_SBDATA1,
3695 ((uint32_t) p[4]) |
3696 (((uint32_t) p[5]) << 8) |
3697 (((uint32_t) p[6]) << 16) |
3698 (((uint32_t) p[7]) << 24));
3699 uint32_t value = p[0];
3700 if (size > 2) {
3701 value |= ((uint32_t) p[2]) << 16;
3702 value |= ((uint32_t) p[3]) << 24;
3703 }
3704 if (size > 1)
3705 value |= ((uint32_t) p[1]) << 8;
3706 riscv_batch_add_dmi_write(batch, DM_SBDATA0, value);
3707
3708 log_memory_access(address + i * size, value, size, false);
3709 next_address += size;
3710 }
3711
3712 /* Execute the batch of writes */
3713 result = batch_run(target, batch);
3714 riscv_batch_free(batch);
3715 if (result != ERROR_OK)
3716 return result;
3717
3718 /* Read sbcs value.
3719 * At the same time, detect if DMI busy has occurred during the batch write. */
3720 bool dmi_busy_encountered;
3721 if (dmi_op(target, &sbcs, &dmi_busy_encountered, DMI_OP_READ,
3722 DM_SBCS, 0, false, true) != ERROR_OK)
3723 return ERROR_FAIL;
3724 if (dmi_busy_encountered)
3725 LOG_DEBUG("DMI busy encountered during system bus write.");
3726
3727 /* Wait until sbbusy goes low */
3728 time_t start = time(NULL);
3729 while (get_field(sbcs, DM_SBCS_SBBUSY)) {
3730 if (time(NULL) - start > riscv_command_timeout_sec) {
3731 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
3732 "Increase the timeout with riscv set_command_timeout_sec.",
3733 riscv_command_timeout_sec, sbcs);
3734 return ERROR_FAIL;
3735 }
3736 if (dmi_read(target, &sbcs, DM_SBCS) != ERROR_OK)
3737 return ERROR_FAIL;
3738 }
3739
3740 if (get_field(sbcs, DM_SBCS_SBBUSYERROR)) {
3741 /* We wrote while the target was busy. */
3742 LOG_DEBUG("Sbbusyerror encountered during system bus write.");
3743 /* Clear the sticky error flag. */
3744 dmi_write(target, DM_SBCS, sbcs | DM_SBCS_SBBUSYERROR);
3745 /* Slow down before trying again. */
3746 info->bus_master_write_delay += info->bus_master_write_delay / 10 + 1;
3747 }
3748
3749 if (get_field(sbcs, DM_SBCS_SBBUSYERROR) || dmi_busy_encountered) {
3750 /* Recover from the case when the write commands were issued too fast.
3751 * Determine the address from which to resume writing. */
3752 next_address = sb_read_address(target);
3753 if (next_address < address) {
3754 /* This should never happen, probably buggy hardware. */
3755 LOG_DEBUG("unexpected sbaddress=0x%" TARGET_PRIxADDR
3756 " - buggy sbautoincrement in hw?", next_address);
3757 /* Fail the whole operation. */
3758 return ERROR_FAIL;
3759 }
3760 /* Try again - resume writing. */
3761 continue;
3762 }
3763
3764 unsigned int sberror = get_field(sbcs, DM_SBCS_SBERROR);
3765 if (sberror != 0) {
3766 /* Sberror indicates the bus access failed, but not because we issued the writes
3767 * too fast. Cannot recover. Sbaddress holds the address where the error occurred
3768 * (unless sbautoincrement in the HW is buggy).
3769 */
3770 target_addr_t sbaddress = sb_read_address(target);
3771 LOG_DEBUG("System bus access failed with sberror=%u (sbaddress=0x%" TARGET_PRIxADDR ")",
3772 sberror, sbaddress);
3773 if (sbaddress < address) {
3774 /* This should never happen, probably buggy hardware.
3775 * Make a note to the user not to trust the sbaddress value. */
3776 LOG_DEBUG("unexpected sbaddress=0x%" TARGET_PRIxADDR
3777 " - buggy sbautoincrement in hw?", next_address);
3778 }
3779 /* Clear the sticky error flag */
3780 dmi_write(target, DM_SBCS, DM_SBCS_SBERROR);
3781 /* Fail the whole operation */
3782 return ERROR_FAIL;
3783 }
3784 }
3785
3786 return ERROR_OK;
3787 }
3788
3789 static int write_memory_progbuf(struct target *target, target_addr_t address,
3790 uint32_t size, uint32_t count, const uint8_t *buffer)
3791 {
3792 RISCV013_INFO(info);
3793
3794 if (riscv_xlen(target) < size * 8) {
3795 LOG_ERROR("XLEN (%d) is too short for %d-bit memory write.",
3796 riscv_xlen(target), size * 8);
3797 return ERROR_FAIL;
3798 }
3799
3800 LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count, size, (long)address);
3801
3802 select_dmi(target);
3803
3804 uint64_t mstatus = 0;
3805 uint64_t mstatus_old = 0;
3806 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3807 return ERROR_FAIL;
3808
3809 /* s0 holds the next address to write to
3810 * s1 holds the next data value to write
3811 */
3812
3813 int result = ERROR_OK;
3814 uint64_t s0, s1;
3815 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3816 return ERROR_FAIL;
3817 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3818 return ERROR_FAIL;
3819
3820 /* Write the program (store, increment) */
3821 struct riscv_program program;
3822 riscv_program_init(&program, target);
3823 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3824 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3825
3826 switch (size) {
3827 case 1:
3828 riscv_program_sbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3829 break;
3830 case 2:
3831 riscv_program_shr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3832 break;
3833 case 4:
3834 riscv_program_swr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3835 break;
3836 case 8:
3837 riscv_program_sdr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3838 break;
3839 default:
3840 LOG_ERROR("write_memory_progbuf(): Unsupported size: %d", size);
3841 result = ERROR_FAIL;
3842 goto error;
3843 }
3844
3845 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3846 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3847 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
3848
3849 result = riscv_program_ebreak(&program);
3850 if (result != ERROR_OK)
3851 goto error;
3852 riscv_program_write(&program);
3853
3854 riscv_addr_t cur_addr = address;
3855 riscv_addr_t fin_addr = address + (count * size);
3856 bool setup_needed = true;
3857 LOG_DEBUG("writing until final address 0x%016" PRIx64, fin_addr);
3858 while (cur_addr < fin_addr) {
3859 LOG_DEBUG("transferring burst starting at address 0x%016" PRIx64,
3860 cur_addr);
3861
3862 struct riscv_batch *batch = riscv_batch_alloc(
3863 target,
3864 32,
3865 info->dmi_busy_delay + info->ac_busy_delay);
3866 if (!batch)
3867 goto error;
3868
3869 /* To write another word, we put it in S1 and execute the program. */
3870 unsigned start = (cur_addr - address) / size;
3871 for (unsigned i = start; i < count; ++i) {
3872 unsigned offset = size*i;
3873 const uint8_t *t_buffer = buffer + offset;
3874
3875 uint64_t value = buf_get_u64(t_buffer, 0, 8 * size);
3876
3877 log_memory_access(address + offset, value, size, false);
3878 cur_addr += size;
3879
3880 if (setup_needed) {
3881 result = register_write_direct(target, GDB_REGNO_S0,
3882 address + offset);
3883 if (result != ERROR_OK) {
3884 riscv_batch_free(batch);
3885 goto error;
3886 }
3887
3888 /* Write value. */
3889 if (size > 4)
3890 dmi_write(target, DM_DATA1, value >> 32);
3891 dmi_write(target, DM_DATA0, value);
3892
3893 /* Write and execute command that moves value into S1 and
3894 * executes program buffer. */
3895 uint32_t command = access_register_command(target,
3896 GDB_REGNO_S1, riscv_xlen(target),
3897 AC_ACCESS_REGISTER_POSTEXEC |
3898 AC_ACCESS_REGISTER_TRANSFER |
3899 AC_ACCESS_REGISTER_WRITE);
3900 result = execute_abstract_command(target, command);
3901 if (result != ERROR_OK) {
3902 riscv_batch_free(batch);
3903 goto error;
3904 }
3905
3906 /* Turn on autoexec */
3907 dmi_write(target, DM_ABSTRACTAUTO,
3908 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
3909
3910 setup_needed = false;
3911 } else {
3912 if (size > 4)
3913 riscv_batch_add_dmi_write(batch, DM_DATA1, value >> 32);
3914 riscv_batch_add_dmi_write(batch, DM_DATA0, value);
3915 if (riscv_batch_full(batch))
3916 break;
3917 }
3918 }
3919
3920 result = batch_run(target, batch);
3921 riscv_batch_free(batch);
3922 if (result != ERROR_OK)
3923 goto error;
3924
3925 /* Note that if the scan resulted in a Busy DMI response, it
3926 * is this read to abstractcs that will cause the dmi_busy_delay
3927 * to be incremented if necessary. */
3928
3929 uint32_t abstractcs;
3930 bool dmi_busy_encountered;
3931 result = dmi_op(target, &abstractcs, &dmi_busy_encountered,
3932 DMI_OP_READ, DM_ABSTRACTCS, 0, false, true);
3933 if (result != ERROR_OK)
3934 goto error;
3935 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
3936 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3937 return ERROR_FAIL;
3938 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
3939 if (info->cmderr == CMDERR_NONE && !dmi_busy_encountered) {
3940 LOG_DEBUG("successful (partial?) memory write");
3941 } else if (info->cmderr == CMDERR_BUSY || dmi_busy_encountered) {
3942 if (info->cmderr == CMDERR_BUSY)
3943 LOG_DEBUG("Memory write resulted in abstract command busy response.");
3944 else if (dmi_busy_encountered)
3945 LOG_DEBUG("Memory write resulted in DMI busy response.");
3946 riscv013_clear_abstract_error(target);
3947 increase_ac_busy_delay(target);
3948
3949 dmi_write(target, DM_ABSTRACTAUTO, 0);
3950 result = register_read_direct(target, &cur_addr, GDB_REGNO_S0);
3951 if (result != ERROR_OK)
3952 goto error;
3953 setup_needed = true;
3954 } else {
3955 LOG_ERROR("error when writing memory, abstractcs=0x%08lx", (long)abstractcs);
3956 riscv013_clear_abstract_error(target);
3957 result = ERROR_FAIL;
3958 goto error;
3959 }
3960 }
3961
3962 error:
3963 dmi_write(target, DM_ABSTRACTAUTO, 0);
3964
3965 if (register_write_direct(target, GDB_REGNO_S1, s1) != ERROR_OK)
3966 return ERROR_FAIL;
3967 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
3968 return ERROR_FAIL;
3969
3970 /* Restore MSTATUS */
3971 if (mstatus != mstatus_old)
3972 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3973 return ERROR_FAIL;
3974
3975 if (execute_fence(target) != ERROR_OK)
3976 return ERROR_FAIL;
3977
3978 return result;
3979 }
3980
3981 static int write_memory(struct target *target, target_addr_t address,
3982 uint32_t size, uint32_t count, const uint8_t *buffer)
3983 {
3984 if (size != 1 && size != 2 && size != 4 && size != 8 && size != 16) {
3985 LOG_ERROR("BUG: Unsupported size for memory write: %d", size);
3986 return ERROR_FAIL;
3987 }
3988
3989 int ret = ERROR_FAIL;
3990 RISCV_INFO(r);
3991 RISCV013_INFO(info);
3992
3993 char *progbuf_result = "disabled";
3994 char *sysbus_result = "disabled";
3995 char *abstract_result = "disabled";
3996
3997 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
3998 int method = r->mem_access_methods[i];
3999
4000 if (method == RISCV_MEM_ACCESS_PROGBUF) {
4001 if (mem_should_skip_progbuf(target, address, size, false, &progbuf_result))
4002 continue;
4003
4004 ret = write_memory_progbuf(target, address, size, count, buffer);
4005
4006 if (ret != ERROR_OK)
4007 progbuf_result = "failed";
4008 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
4009 if (mem_should_skip_sysbus(target, address, size, 0, false, &sysbus_result))
4010 continue;
4011
4012 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
4013 ret = write_memory_bus_v0(target, address, size, count, buffer);
4014 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
4015 ret = write_memory_bus_v1(target, address, size, count, buffer);
4016
4017 if (ret != ERROR_OK)
4018 sysbus_result = "failed";
4019 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
4020 if (mem_should_skip_abstract(target, address, size, 0, false, &abstract_result))
4021 continue;
4022
4023 ret = write_memory_abstract(target, address, size, count, buffer);
4024
4025 if (ret != ERROR_OK)
4026 abstract_result = "failed";
4027 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
4028 /* No further mem access method to try. */
4029 break;
4030
4031 log_mem_access_result(target, ret == ERROR_OK, method, false);
4032
4033 if (ret == ERROR_OK)
4034 return ret;
4035 }
4036
4037 LOG_ERROR("Target %s: Failed to write memory (addr=0x%" PRIx64 ")", target_name(target), address);
4038 LOG_ERROR(" progbuf=%s, sysbus=%s, abstract=%s", progbuf_result, sysbus_result, abstract_result);
4039 return ret;
4040 }
4041
4042 static int arch_state(struct target *target)
4043 {
4044 return ERROR_OK;
4045 }
4046
4047 struct target_type riscv013_target = {
4048 .name = "riscv",
4049
4050 .init_target = init_target,
4051 .deinit_target = deinit_target,
4052 .examine = examine,
4053
4054 .poll = &riscv_openocd_poll,
4055 .halt = &riscv_halt,
4056 .step = &riscv_openocd_step,
4057
4058 .assert_reset = assert_reset,
4059 .deassert_reset = deassert_reset,
4060
4061 .write_memory = write_memory,
4062
4063 .arch_state = arch_state
4064 };
4065
4066 /*** 0.13-specific implementations of various RISC-V helper functions. ***/
4067 static int riscv013_get_register(struct target *target,
4068 riscv_reg_t *value, int rid)
4069 {
4070 LOG_DEBUG("[%s] reading register %s", target_name(target),
4071 gdb_regno_name(rid));
4072
4073 if (riscv_select_current_hart(target) != ERROR_OK)
4074 return ERROR_FAIL;
4075
4076 int result = ERROR_OK;
4077 if (rid == GDB_REGNO_PC) {
4078 /* TODO: move this into riscv.c. */
4079 result = register_read(target, value, GDB_REGNO_DPC);
4080 LOG_DEBUG("[%d] read PC from DPC: 0x%" PRIx64, target->coreid, *value);
4081 } else if (rid == GDB_REGNO_PRIV) {
4082 uint64_t dcsr;
4083 /* TODO: move this into riscv.c. */
4084 result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4085 *value = set_field(0, VIRT_PRIV_V, get_field(dcsr, CSR_DCSR_V));
4086 *value = set_field(*value, VIRT_PRIV_PRV, get_field(dcsr, CSR_DCSR_PRV));
4087 } else {
4088 result = register_read(target, value, rid);
4089 if (result != ERROR_OK)
4090 *value = -1;
4091 }
4092
4093 return result;
4094 }
4095
4096 static int riscv013_set_register(struct target *target, int rid, uint64_t value)
4097 {
4098 riscv013_select_current_hart(target);
4099 LOG_DEBUG("[%d] writing 0x%" PRIx64 " to register %s",
4100 target->coreid, value, gdb_regno_name(rid));
4101
4102 if (rid <= GDB_REGNO_XPR31) {
4103 return register_write_direct(target, rid, value);
4104 } else if (rid == GDB_REGNO_PC) {
4105 LOG_DEBUG("[%d] writing PC to DPC: 0x%" PRIx64, target->coreid, value);
4106 register_write_direct(target, GDB_REGNO_DPC, value);
4107 uint64_t actual_value;
4108 register_read_direct(target, &actual_value, GDB_REGNO_DPC);
4109 LOG_DEBUG("[%d] actual DPC written: 0x%016" PRIx64, target->coreid, actual_value);
4110 if (value != actual_value) {
4111 LOG_ERROR("Written PC (0x%" PRIx64 ") does not match read back "
4112 "value (0x%" PRIx64 ")", value, actual_value);
4113 return ERROR_FAIL;
4114 }
4115 } else if (rid == GDB_REGNO_PRIV) {
4116 uint64_t dcsr;
4117 register_read(target, &dcsr, GDB_REGNO_DCSR);
4118 dcsr = set_field(dcsr, CSR_DCSR_PRV, get_field(value, VIRT_PRIV_PRV));
4119 dcsr = set_field(dcsr, CSR_DCSR_V, get_field(value, VIRT_PRIV_V));
4120 return register_write_direct(target, GDB_REGNO_DCSR, dcsr);
4121 } else {
4122 return register_write_direct(target, rid, value);
4123 }
4124
4125 return ERROR_OK;
4126 }
4127
4128 static int riscv013_select_current_hart(struct target *target)
4129 {
4130 RISCV_INFO(r);
4131
4132 dm013_info_t *dm = get_dm(target);
4133 if (!dm)
4134 return ERROR_FAIL;
4135 if (r->current_hartid == dm->current_hartid)
4136 return ERROR_OK;
4137
4138 uint32_t dmcontrol;
4139 /* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
4140 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
4141 return ERROR_FAIL;
4142 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4143 int result = dmi_write(target, DM_DMCONTROL, dmcontrol);
4144 dm->current_hartid = r->current_hartid;
4145 return result;
4146 }
4147
4148 /* Select all harts that were prepped and that are selectable, clearing the
4149 * prepped flag on the harts that actually were selected. */
4150 static int select_prepped_harts(struct target *target, bool *use_hasel)
4151 {
4152 dm013_info_t *dm = get_dm(target);
4153 if (!dm)
4154 return ERROR_FAIL;
4155 if (!dm->hasel_supported) {
4156 RISCV_INFO(r);
4157 r->prepped = false;
4158 *use_hasel = false;
4159 return ERROR_OK;
4160 }
4161
4162 assert(dm->hart_count);
4163 unsigned hawindow_count = (dm->hart_count + 31) / 32;
4164 uint32_t hawindow[hawindow_count];
4165
4166 memset(hawindow, 0, sizeof(uint32_t) * hawindow_count);
4167
4168 target_list_t *entry;
4169 unsigned total_selected = 0;
4170 list_for_each_entry(entry, &dm->target_list, list) {
4171 struct target *t = entry->target;
4172 struct riscv_info *r = riscv_info(t);
4173 riscv013_info_t *info = get_info(t);
4174 unsigned index = info->index;
4175 LOG_DEBUG("index=%d, coreid=%d, prepped=%d", index, t->coreid, r->prepped);
4176 r->selected = r->prepped;
4177 if (r->prepped) {
4178 hawindow[index / 32] |= 1 << (index % 32);
4179 r->prepped = false;
4180 total_selected++;
4181 }
4182 index++;
4183 }
4184
4185 /* Don't use hasel if we only need to talk to one hart. */
4186 if (total_selected <= 1) {
4187 *use_hasel = false;
4188 return ERROR_OK;
4189 }
4190
4191 for (unsigned i = 0; i < hawindow_count; i++) {
4192 if (dmi_write(target, DM_HAWINDOWSEL, i) != ERROR_OK)
4193 return ERROR_FAIL;
4194 if (dmi_write(target, DM_HAWINDOW, hawindow[i]) != ERROR_OK)
4195 return ERROR_FAIL;
4196 }
4197
4198 *use_hasel = true;
4199 return ERROR_OK;
4200 }
4201
4202 static int riscv013_halt_prep(struct target *target)
4203 {
4204 return ERROR_OK;
4205 }
4206
4207 static int riscv013_halt_go(struct target *target)
4208 {
4209 bool use_hasel = false;
4210 if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
4211 return ERROR_FAIL;
4212
4213 RISCV_INFO(r);
4214 LOG_DEBUG("halting hart %d", r->current_hartid);
4215
4216 /* Issue the halt command, and then wait for the current hart to halt. */
4217 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_HALTREQ;
4218 if (use_hasel)
4219 dmcontrol |= DM_DMCONTROL_HASEL;
4220 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4221 dmi_write(target, DM_DMCONTROL, dmcontrol);
4222 for (size_t i = 0; i < 256; ++i)
4223 if (riscv_is_halted(target))
4224 break;
4225
4226 if (!riscv_is_halted(target)) {
4227 uint32_t dmstatus;
4228 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4229 return ERROR_FAIL;
4230 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
4231 return ERROR_FAIL;
4232
4233 LOG_ERROR("unable to halt hart %d", r->current_hartid);
4234 LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
4235 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
4236 return ERROR_FAIL;
4237 }
4238
4239 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HALTREQ, 0);
4240 dmi_write(target, DM_DMCONTROL, dmcontrol);
4241
4242 if (use_hasel) {
4243 target_list_t *entry;
4244 dm013_info_t *dm = get_dm(target);
4245 if (!dm)
4246 return ERROR_FAIL;
4247 list_for_each_entry(entry, &dm->target_list, list) {
4248 struct target *t = entry->target;
4249 t->state = TARGET_HALTED;
4250 if (t->debug_reason == DBG_REASON_NOTHALTED)
4251 t->debug_reason = DBG_REASON_DBGRQ;
4252 }
4253 }
4254 /* The "else" case is handled in halt_go(). */
4255
4256 return ERROR_OK;
4257 }
4258
4259 static int riscv013_resume_go(struct target *target)
4260 {
4261 bool use_hasel = false;
4262 if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
4263 return ERROR_FAIL;
4264
4265 return riscv013_step_or_resume_current_hart(target, false, use_hasel);
4266 }
4267
4268 static int riscv013_step_current_hart(struct target *target)
4269 {
4270 return riscv013_step_or_resume_current_hart(target, true, false);
4271 }
4272
4273 static int riscv013_resume_prep(struct target *target)
4274 {
4275 return riscv013_on_step_or_resume(target, false);
4276 }
4277
4278 static int riscv013_on_step(struct target *target)
4279 {
4280 return riscv013_on_step_or_resume(target, true);
4281 }
4282
4283 static int riscv013_on_halt(struct target *target)
4284 {
4285 return ERROR_OK;
4286 }
4287
4288 static bool riscv013_is_halted(struct target *target)
4289 {
4290 uint32_t dmstatus;
4291 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4292 return false;
4293 if (get_field(dmstatus, DM_DMSTATUS_ANYUNAVAIL))
4294 LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target));
4295 if (get_field(dmstatus, DM_DMSTATUS_ANYNONEXISTENT))
4296 LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target));
4297 if (get_field(dmstatus, DM_DMSTATUS_ANYHAVERESET)) {
4298 int hartid = riscv_current_hartid(target);
4299 LOG_INFO("Hart %d unexpectedly reset!", hartid);
4300 /* TODO: Can we make this more obvious to eg. a gdb user? */
4301 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE |
4302 DM_DMCONTROL_ACKHAVERESET;
4303 dmcontrol = set_hartsel(dmcontrol, hartid);
4304 /* If we had been halted when we reset, request another halt. If we
4305 * ended up running out of reset, then the user will (hopefully) get a
4306 * message that a reset happened, that the target is running, and then
4307 * that it is halted again once the request goes through.
4308 */
4309 if (target->state == TARGET_HALTED)
4310 dmcontrol |= DM_DMCONTROL_HALTREQ;
4311 dmi_write(target, DM_DMCONTROL, dmcontrol);
4312 }
4313 return get_field(dmstatus, DM_DMSTATUS_ALLHALTED);
4314 }
4315
4316 static enum riscv_halt_reason riscv013_halt_reason(struct target *target)
4317 {
4318 riscv_reg_t dcsr;
4319 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4320 if (result != ERROR_OK)
4321 return RISCV_HALT_UNKNOWN;
4322
4323 LOG_DEBUG("dcsr.cause: 0x%" PRIx64, get_field(dcsr, CSR_DCSR_CAUSE));
4324
4325 switch (get_field(dcsr, CSR_DCSR_CAUSE)) {
4326 case CSR_DCSR_CAUSE_SWBP:
4327 return RISCV_HALT_BREAKPOINT;
4328 case CSR_DCSR_CAUSE_TRIGGER:
4329 /* We could get here before triggers are enumerated if a trigger was
4330 * already set when we connected. Force enumeration now, which has the
4331 * side effect of clearing any triggers we did not set. */
4332 riscv_enumerate_triggers(target);
4333 LOG_DEBUG("{%d} halted because of trigger", target->coreid);
4334 return RISCV_HALT_TRIGGER;
4335 case CSR_DCSR_CAUSE_STEP:
4336 return RISCV_HALT_SINGLESTEP;
4337 case CSR_DCSR_CAUSE_DEBUGINT:
4338 case CSR_DCSR_CAUSE_HALT:
4339 return RISCV_HALT_INTERRUPT;
4340 case CSR_DCSR_CAUSE_GROUP:
4341 return RISCV_HALT_GROUP;
4342 }
4343
4344 LOG_ERROR("Unknown DCSR cause field: 0x%" PRIx64, get_field(dcsr, CSR_DCSR_CAUSE));
4345 LOG_ERROR(" dcsr=0x%016lx", (long)dcsr);
4346 return RISCV_HALT_UNKNOWN;
4347 }
4348
4349 int riscv013_write_debug_buffer(struct target *target, unsigned index, riscv_insn_t data)
4350 {
4351 dm013_info_t *dm = get_dm(target);
4352 if (!dm)
4353 return ERROR_FAIL;
4354 if (dm->progbuf_cache[index] != data) {
4355 if (dmi_write(target, DM_PROGBUF0 + index, data) != ERROR_OK)
4356 return ERROR_FAIL;
4357 dm->progbuf_cache[index] = data;
4358 } else {
4359 LOG_DEBUG("cache hit for 0x%" PRIx32 " @%d", data, index);
4360 }
4361 return ERROR_OK;
4362 }
4363
4364 riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned index)
4365 {
4366 uint32_t value;
4367 dmi_read(target, &value, DM_PROGBUF0 + index);
4368 return value;
4369 }
4370
4371 int riscv013_execute_debug_buffer(struct target *target)
4372 {
4373 uint32_t run_program = 0;
4374 run_program = set_field(run_program, AC_ACCESS_REGISTER_AARSIZE, 2);
4375 run_program = set_field(run_program, AC_ACCESS_REGISTER_POSTEXEC, 1);
4376 run_program = set_field(run_program, AC_ACCESS_REGISTER_TRANSFER, 0);
4377 run_program = set_field(run_program, AC_ACCESS_REGISTER_REGNO, 0x1000);
4378
4379 return execute_abstract_command(target, run_program);
4380 }
4381
4382 void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
4383 {
4384 RISCV013_INFO(info);
4385 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_WRITE);
4386 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, d);
4387 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
4388 }
4389
4390 void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a)
4391 {
4392 RISCV013_INFO(info);
4393 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_READ);
4394 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
4395 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
4396 }
4397
4398 void riscv013_fill_dmi_nop_u64(struct target *target, char *buf)
4399 {
4400 RISCV013_INFO(info);
4401 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_NOP);
4402 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
4403 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, 0);
4404 }
4405
4406 int riscv013_dmi_write_u64_bits(struct target *target)
4407 {
4408 RISCV013_INFO(info);
4409 return info->abits + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH;
4410 }
4411
4412 static int maybe_execute_fence_i(struct target *target)
4413 {
4414 if (has_sufficient_progbuf(target, 3))
4415 return execute_fence(target);
4416 return ERROR_OK;
4417 }
4418
4419 /* Helper Functions. */
4420 static int riscv013_on_step_or_resume(struct target *target, bool step)
4421 {
4422 if (maybe_execute_fence_i(target) != ERROR_OK)
4423 return ERROR_FAIL;
4424
4425 /* We want to twiddle some bits in the debug CSR so debugging works. */
4426 riscv_reg_t dcsr;
4427 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4428 if (result != ERROR_OK)
4429 return result;
4430 dcsr = set_field(dcsr, CSR_DCSR_STEP, step);
4431 dcsr = set_field(dcsr, CSR_DCSR_EBREAKM, riscv_ebreakm);
4432 dcsr = set_field(dcsr, CSR_DCSR_EBREAKS, riscv_ebreaks);
4433 dcsr = set_field(dcsr, CSR_DCSR_EBREAKU, riscv_ebreaku);
4434 return riscv_set_register(target, GDB_REGNO_DCSR, dcsr);
4435 }
4436
4437 static int riscv013_step_or_resume_current_hart(struct target *target,
4438 bool step, bool use_hasel)
4439 {
4440 RISCV_INFO(r);
4441 LOG_DEBUG("resuming hart %d (for step?=%d)", r->current_hartid, step);
4442 if (!riscv_is_halted(target)) {
4443 LOG_ERROR("Hart %d is not halted!", r->current_hartid);
4444 return ERROR_FAIL;
4445 }
4446
4447 /* Issue the resume command, and then wait for the current hart to resume. */
4448 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_RESUMEREQ;
4449 if (use_hasel)
4450 dmcontrol |= DM_DMCONTROL_HASEL;
4451 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4452 dmi_write(target, DM_DMCONTROL, dmcontrol);
4453
4454 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HASEL, 0);
4455 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_RESUMEREQ, 0);
4456
4457 uint32_t dmstatus;
4458 for (size_t i = 0; i < 256; ++i) {
4459 usleep(10);
4460 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4461 return ERROR_FAIL;
4462 if (get_field(dmstatus, DM_DMSTATUS_ALLRESUMEACK) == 0)
4463 continue;
4464 if (step && get_field(dmstatus, DM_DMSTATUS_ALLHALTED) == 0)
4465 continue;
4466
4467 dmi_write(target, DM_DMCONTROL, dmcontrol);
4468 return ERROR_OK;
4469 }
4470
4471 dmi_write(target, DM_DMCONTROL, dmcontrol);
4472
4473 LOG_ERROR("unable to resume hart %d", r->current_hartid);
4474 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4475 return ERROR_FAIL;
4476 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
4477
4478 if (step) {
4479 LOG_ERROR(" was stepping, halting");
4480 riscv_halt(target);
4481 return ERROR_OK;
4482 }
4483
4484 return ERROR_FAIL;
4485 }
4486
4487 void riscv013_clear_abstract_error(struct target *target)
4488 {
4489 /* Wait for busy to go away. */
4490 time_t start = time(NULL);
4491 uint32_t abstractcs;
4492 dmi_read(target, &abstractcs, DM_ABSTRACTCS);
4493 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY)) {
4494 dmi_read(target, &abstractcs, DM_ABSTRACTCS);
4495
4496 if (time(NULL) - start > riscv_command_timeout_sec) {
4497 LOG_ERROR("abstractcs.busy is not going low after %d seconds "
4498 "(abstractcs=0x%x). The target is either really slow or "
4499 "broken. You could increase the timeout with riscv "
4500 "set_command_timeout_sec.",
4501 riscv_command_timeout_sec, abstractcs);
4502 break;
4503 }
4504 }
4505 /* Clear the error status. */
4506 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
4507 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)