Upstream a whole host of RISC-V changes.
[openocd.git] / src / target / riscv / riscv-013.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /*
4 * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
5 * latest draft.
6 */
7
8 #include <assert.h>
9 #include <stdlib.h>
10 #include <time.h>
11
12 #ifdef HAVE_CONFIG_H
13 #include "config.h"
14 #endif
15
16 #include "target/target.h"
17 #include "target/algorithm.h"
18 #include "target/target_type.h"
19 #include <helper/log.h>
20 #include "jtag/jtag.h"
21 #include "target/register.h"
22 #include "target/breakpoints.h"
23 #include "helper/time_support.h"
24 #include "helper/list.h"
25 #include "riscv.h"
26 #include "debug_defines.h"
27 #include "rtos/rtos.h"
28 #include "program.h"
29 #include "asm.h"
30 #include "batch.h"
31
32 static int riscv013_on_step_or_resume(struct target *target, bool step);
33 static int riscv013_step_or_resume_current_hart(struct target *target,
34 bool step, bool use_hasel);
35 static void riscv013_clear_abstract_error(struct target *target);
36
37 /* Implementations of the functions in riscv_info_t. */
38 static int riscv013_get_register(struct target *target,
39 riscv_reg_t *value, int rid);
40 static int riscv013_set_register(struct target *target, int regid, uint64_t value);
41 static int riscv013_select_current_hart(struct target *target);
42 static int riscv013_halt_prep(struct target *target);
43 static int riscv013_halt_go(struct target *target);
44 static int riscv013_resume_go(struct target *target);
45 static int riscv013_step_current_hart(struct target *target);
46 static int riscv013_on_halt(struct target *target);
47 static int riscv013_on_step(struct target *target);
48 static int riscv013_resume_prep(struct target *target);
49 static bool riscv013_is_halted(struct target *target);
50 static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
51 static int riscv013_write_debug_buffer(struct target *target, unsigned index,
52 riscv_insn_t d);
53 static riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned
54 index);
55 static int riscv013_execute_debug_buffer(struct target *target);
56 static void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
57 static void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a);
58 static int riscv013_dmi_write_u64_bits(struct target *target);
59 static void riscv013_fill_dmi_nop_u64(struct target *target, char *buf);
60 static int register_read(struct target *target, uint64_t *value, uint32_t number);
61 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number);
62 static int register_write_direct(struct target *target, unsigned number,
63 uint64_t value);
64 static int read_memory(struct target *target, target_addr_t address,
65 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment);
66 static int write_memory(struct target *target, target_addr_t address,
67 uint32_t size, uint32_t count, const uint8_t *buffer);
68 static int riscv013_test_sba_config_reg(struct target *target, target_addr_t legal_address,
69 uint32_t num_words, target_addr_t illegal_address, bool run_sbbusyerror_test);
70 void write_memory_sba_simple(struct target *target, target_addr_t addr, uint32_t *write_data,
71 uint32_t write_size, uint32_t sbcs);
72 void read_memory_sba_simple(struct target *target, target_addr_t addr,
73 uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs);
74
75 /**
76 * Since almost everything can be accomplish by scanning the dbus register, all
77 * functions here assume dbus is already selected. The exception are functions
78 * called directly by OpenOCD, which can't assume anything about what's
79 * currently in IR. They should set IR to dbus explicitly.
80 */
81
82 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
83 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
84
85 #define CSR_DCSR_CAUSE_SWBP 1
86 #define CSR_DCSR_CAUSE_TRIGGER 2
87 #define CSR_DCSR_CAUSE_DEBUGINT 3
88 #define CSR_DCSR_CAUSE_STEP 4
89 #define CSR_DCSR_CAUSE_HALT 5
90 #define CSR_DCSR_CAUSE_GROUP 6
91
92 #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
93
94 /*** JTAG registers. ***/
95
96 typedef enum {
97 DMI_OP_NOP = 0,
98 DMI_OP_READ = 1,
99 DMI_OP_WRITE = 2
100 } dmi_op_t;
101 typedef enum {
102 DMI_STATUS_SUCCESS = 0,
103 DMI_STATUS_FAILED = 2,
104 DMI_STATUS_BUSY = 3
105 } dmi_status_t;
106
107 typedef enum slot {
108 SLOT0,
109 SLOT1,
110 SLOT_LAST,
111 } slot_t;
112
113 /*** Debug Bus registers. ***/
114
115 #define CMDERR_NONE 0
116 #define CMDERR_BUSY 1
117 #define CMDERR_NOT_SUPPORTED 2
118 #define CMDERR_EXCEPTION 3
119 #define CMDERR_HALT_RESUME 4
120 #define CMDERR_OTHER 7
121
122 /*** Info about the core being debugged. ***/
123
124 struct trigger {
125 uint64_t address;
126 uint32_t length;
127 uint64_t mask;
128 uint64_t value;
129 bool read, write, execute;
130 int unique_id;
131 };
132
133 typedef enum {
134 YNM_MAYBE,
135 YNM_YES,
136 YNM_NO
137 } yes_no_maybe_t;
138
139 typedef struct {
140 struct list_head list;
141 int abs_chain_position;
142
143 /* The number of harts connected to this DM. */
144 int hart_count;
145 /* Indicates we already reset this DM, so don't need to do it again. */
146 bool was_reset;
147 /* Targets that are connected to this DM. */
148 struct list_head target_list;
149 /* The currently selected hartid on this DM. */
150 int current_hartid;
151 bool hasel_supported;
152
153 /* The program buffer stores executable code. 0 is an illegal instruction,
154 * so we use 0 to mean the cached value is invalid. */
155 uint32_t progbuf_cache[16];
156 } dm013_info_t;
157
158 typedef struct {
159 struct list_head list;
160 struct target *target;
161 } target_list_t;
162
163 typedef struct {
164 /* The indexed used to address this hart in its DM. */
165 unsigned index;
166 /* Number of address bits in the dbus register. */
167 unsigned abits;
168 /* Number of abstract command data registers. */
169 unsigned datacount;
170 /* Number of words in the Program Buffer. */
171 unsigned progbufsize;
172
173 /* We cache the read-only bits of sbcs here. */
174 uint32_t sbcs;
175
176 yes_no_maybe_t progbuf_writable;
177 /* We only need the address so that we know the alignment of the buffer. */
178 riscv_addr_t progbuf_address;
179
180 /* Number of run-test/idle cycles the target requests we do after each dbus
181 * access. */
182 unsigned int dtmcs_idle;
183
184 /* This value is incremented every time a dbus access comes back as "busy".
185 * It's used to determine how many run-test/idle cycles to feed the target
186 * in between accesses. */
187 unsigned int dmi_busy_delay;
188
189 /* Number of run-test/idle cycles to add between consecutive bus master
190 * reads/writes respectively. */
191 unsigned int bus_master_write_delay, bus_master_read_delay;
192
193 /* This value is increased every time we tried to execute two commands
194 * consecutively, and the second one failed because the previous hadn't
195 * completed yet. It's used to add extra run-test/idle cycles after
196 * starting a command, so we don't have to waste time checking for busy to
197 * go low. */
198 unsigned int ac_busy_delay;
199
200 bool abstract_read_csr_supported;
201 bool abstract_write_csr_supported;
202 bool abstract_read_fpr_supported;
203 bool abstract_write_fpr_supported;
204
205 yes_no_maybe_t has_aampostincrement;
206
207 /* When a function returns some error due to a failure indicated by the
208 * target in cmderr, the caller can look here to see what that error was.
209 * (Compare with errno.) */
210 uint8_t cmderr;
211
212 /* Some fields from hartinfo. */
213 uint8_t datasize;
214 uint8_t dataaccess;
215 int16_t dataaddr;
216
217 /* The width of the hartsel field. */
218 unsigned hartsellen;
219
220 /* DM that provides access to this target. */
221 dm013_info_t *dm;
222 } riscv013_info_t;
223
224 LIST_HEAD(dm_list);
225
226 static riscv013_info_t *get_info(const struct target *target)
227 {
228 riscv_info_t *info = (riscv_info_t *) target->arch_info;
229 assert(info);
230 assert(info->version_specific);
231 return (riscv013_info_t *) info->version_specific;
232 }
233
234 /**
235 * Return the DM structure for this target. If there isn't one, find it in the
236 * global list of DMs. If it's not in there, then create one and initialize it
237 * to 0.
238 */
239 dm013_info_t *get_dm(struct target *target)
240 {
241 RISCV013_INFO(info);
242 if (info->dm)
243 return info->dm;
244
245 int abs_chain_position = target->tap->abs_chain_position;
246
247 dm013_info_t *entry;
248 dm013_info_t *dm = NULL;
249 list_for_each_entry(entry, &dm_list, list) {
250 if (entry->abs_chain_position == abs_chain_position) {
251 dm = entry;
252 break;
253 }
254 }
255
256 if (!dm) {
257 LOG_DEBUG("[%d] Allocating new DM", target->coreid);
258 dm = calloc(1, sizeof(dm013_info_t));
259 if (!dm)
260 return NULL;
261 dm->abs_chain_position = abs_chain_position;
262 dm->current_hartid = -1;
263 dm->hart_count = -1;
264 INIT_LIST_HEAD(&dm->target_list);
265 list_add(&dm->list, &dm_list);
266 }
267
268 info->dm = dm;
269 target_list_t *target_entry;
270 list_for_each_entry(target_entry, &dm->target_list, list) {
271 if (target_entry->target == target)
272 return dm;
273 }
274 target_entry = calloc(1, sizeof(*target_entry));
275 if (!target_entry) {
276 info->dm = NULL;
277 return NULL;
278 }
279 target_entry->target = target;
280 list_add(&target_entry->list, &dm->target_list);
281
282 return dm;
283 }
284
285 static uint32_t set_hartsel(uint32_t initial, uint32_t index)
286 {
287 initial &= ~DM_DMCONTROL_HARTSELLO;
288 initial &= ~DM_DMCONTROL_HARTSELHI;
289
290 uint32_t index_lo = index & ((1 << DM_DMCONTROL_HARTSELLO_LENGTH) - 1);
291 initial |= index_lo << DM_DMCONTROL_HARTSELLO_OFFSET;
292 uint32_t index_hi = index >> DM_DMCONTROL_HARTSELLO_LENGTH;
293 assert(index_hi < 1 << DM_DMCONTROL_HARTSELHI_LENGTH);
294 initial |= index_hi << DM_DMCONTROL_HARTSELHI_OFFSET;
295
296 return initial;
297 }
298
299 static void decode_dmi(char *text, unsigned address, unsigned data)
300 {
301 static const struct {
302 unsigned address;
303 uint64_t mask;
304 const char *name;
305 } description[] = {
306 { DM_DMCONTROL, DM_DMCONTROL_HALTREQ, "haltreq" },
307 { DM_DMCONTROL, DM_DMCONTROL_RESUMEREQ, "resumereq" },
308 { DM_DMCONTROL, DM_DMCONTROL_HARTRESET, "hartreset" },
309 { DM_DMCONTROL, DM_DMCONTROL_HASEL, "hasel" },
310 { DM_DMCONTROL, DM_DMCONTROL_HARTSELHI, "hartselhi" },
311 { DM_DMCONTROL, DM_DMCONTROL_HARTSELLO, "hartsello" },
312 { DM_DMCONTROL, DM_DMCONTROL_NDMRESET, "ndmreset" },
313 { DM_DMCONTROL, DM_DMCONTROL_DMACTIVE, "dmactive" },
314 { DM_DMCONTROL, DM_DMCONTROL_ACKHAVERESET, "ackhavereset" },
315
316 { DM_DMSTATUS, DM_DMSTATUS_IMPEBREAK, "impebreak" },
317 { DM_DMSTATUS, DM_DMSTATUS_ALLHAVERESET, "allhavereset" },
318 { DM_DMSTATUS, DM_DMSTATUS_ANYHAVERESET, "anyhavereset" },
319 { DM_DMSTATUS, DM_DMSTATUS_ALLRESUMEACK, "allresumeack" },
320 { DM_DMSTATUS, DM_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
321 { DM_DMSTATUS, DM_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
322 { DM_DMSTATUS, DM_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
323 { DM_DMSTATUS, DM_DMSTATUS_ALLUNAVAIL, "allunavail" },
324 { DM_DMSTATUS, DM_DMSTATUS_ANYUNAVAIL, "anyunavail" },
325 { DM_DMSTATUS, DM_DMSTATUS_ALLRUNNING, "allrunning" },
326 { DM_DMSTATUS, DM_DMSTATUS_ANYRUNNING, "anyrunning" },
327 { DM_DMSTATUS, DM_DMSTATUS_ALLHALTED, "allhalted" },
328 { DM_DMSTATUS, DM_DMSTATUS_ANYHALTED, "anyhalted" },
329 { DM_DMSTATUS, DM_DMSTATUS_AUTHENTICATED, "authenticated" },
330 { DM_DMSTATUS, DM_DMSTATUS_AUTHBUSY, "authbusy" },
331 { DM_DMSTATUS, DM_DMSTATUS_HASRESETHALTREQ, "hasresethaltreq" },
332 { DM_DMSTATUS, DM_DMSTATUS_CONFSTRPTRVALID, "confstrptrvalid" },
333 { DM_DMSTATUS, DM_DMSTATUS_VERSION, "version" },
334
335 { DM_ABSTRACTCS, DM_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
336 { DM_ABSTRACTCS, DM_ABSTRACTCS_BUSY, "busy" },
337 { DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR, "cmderr" },
338 { DM_ABSTRACTCS, DM_ABSTRACTCS_DATACOUNT, "datacount" },
339
340 { DM_COMMAND, DM_COMMAND_CMDTYPE, "cmdtype" },
341
342 { DM_SBCS, DM_SBCS_SBVERSION, "sbversion" },
343 { DM_SBCS, DM_SBCS_SBBUSYERROR, "sbbusyerror" },
344 { DM_SBCS, DM_SBCS_SBBUSY, "sbbusy" },
345 { DM_SBCS, DM_SBCS_SBREADONADDR, "sbreadonaddr" },
346 { DM_SBCS, DM_SBCS_SBACCESS, "sbaccess" },
347 { DM_SBCS, DM_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
348 { DM_SBCS, DM_SBCS_SBREADONDATA, "sbreadondata" },
349 { DM_SBCS, DM_SBCS_SBERROR, "sberror" },
350 { DM_SBCS, DM_SBCS_SBASIZE, "sbasize" },
351 { DM_SBCS, DM_SBCS_SBACCESS128, "sbaccess128" },
352 { DM_SBCS, DM_SBCS_SBACCESS64, "sbaccess64" },
353 { DM_SBCS, DM_SBCS_SBACCESS32, "sbaccess32" },
354 { DM_SBCS, DM_SBCS_SBACCESS16, "sbaccess16" },
355 { DM_SBCS, DM_SBCS_SBACCESS8, "sbaccess8" },
356 };
357
358 text[0] = 0;
359 for (unsigned i = 0; i < ARRAY_SIZE(description); i++) {
360 if (description[i].address == address) {
361 uint64_t mask = description[i].mask;
362 unsigned value = get_field(data, mask);
363 if (value) {
364 if (i > 0)
365 *(text++) = ' ';
366 if (mask & (mask >> 1)) {
367 /* If the field is more than 1 bit wide. */
368 sprintf(text, "%s=%d", description[i].name, value);
369 } else {
370 strcpy(text, description[i].name);
371 }
372 text += strlen(text);
373 }
374 }
375 }
376 }
377
378 static void dump_field(int idle, const struct scan_field *field)
379 {
380 static const char * const op_string[] = {"-", "r", "w", "?"};
381 static const char * const status_string[] = {"+", "?", "F", "b"};
382
383 if (debug_level < LOG_LVL_DEBUG)
384 return;
385
386 uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
387 unsigned int out_op = get_field(out, DTM_DMI_OP);
388 unsigned int out_data = get_field(out, DTM_DMI_DATA);
389 unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
390
391 uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
392 unsigned int in_op = get_field(in, DTM_DMI_OP);
393 unsigned int in_data = get_field(in, DTM_DMI_DATA);
394 unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
395
396 log_printf_lf(LOG_LVL_DEBUG,
397 __FILE__, __LINE__, "scan",
398 "%db %s %08x @%02x -> %s %08x @%02x; %di",
399 field->num_bits, op_string[out_op], out_data, out_address,
400 status_string[in_op], in_data, in_address, idle);
401
402 char out_text[500];
403 char in_text[500];
404 decode_dmi(out_text, out_address, out_data);
405 decode_dmi(in_text, in_address, in_data);
406 if (in_text[0] || out_text[0]) {
407 log_printf_lf(LOG_LVL_DEBUG, __FILE__, __LINE__, "scan", "%s -> %s",
408 out_text, in_text);
409 }
410 }
411
412 /*** Utility functions. ***/
413
414 static void select_dmi(struct target *target)
415 {
416 if (bscan_tunnel_ir_width != 0) {
417 select_dmi_via_bscan(target);
418 return;
419 }
420 jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
421 }
422
423 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
424 {
425 struct scan_field field;
426 uint8_t in_value[4];
427 uint8_t out_value[4] = { 0 };
428
429 if (bscan_tunnel_ir_width != 0)
430 return dtmcontrol_scan_via_bscan(target, out);
431
432 buf_set_u32(out_value, 0, 32, out);
433
434 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
435
436 field.num_bits = 32;
437 field.out_value = out_value;
438 field.in_value = in_value;
439 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
440
441 /* Always return to dmi. */
442 select_dmi(target);
443
444 int retval = jtag_execute_queue();
445 if (retval != ERROR_OK) {
446 LOG_ERROR("failed jtag scan: %d", retval);
447 return retval;
448 }
449
450 uint32_t in = buf_get_u32(field.in_value, 0, 32);
451 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
452
453 return in;
454 }
455
456 static void increase_dmi_busy_delay(struct target *target)
457 {
458 riscv013_info_t *info = get_info(target);
459 info->dmi_busy_delay += info->dmi_busy_delay / 10 + 1;
460 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
461 info->dtmcs_idle, info->dmi_busy_delay,
462 info->ac_busy_delay);
463
464 dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
465 }
466
467 /**
468 * exec: If this is set, assume the scan results in an execution, so more
469 * run-test/idle cycles may be required.
470 */
471 static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
472 uint32_t *data_in, dmi_op_t op, uint32_t address_out, uint32_t data_out,
473 bool exec)
474 {
475 riscv013_info_t *info = get_info(target);
476 RISCV_INFO(r);
477 unsigned num_bits = info->abits + DTM_DMI_OP_LENGTH + DTM_DMI_DATA_LENGTH;
478 size_t num_bytes = (num_bits + 7) / 8;
479 uint8_t in[num_bytes];
480 uint8_t out[num_bytes];
481 struct scan_field field = {
482 .num_bits = num_bits,
483 .out_value = out,
484 .in_value = in
485 };
486 riscv_bscan_tunneled_scan_context_t bscan_ctxt;
487
488 if (r->reset_delays_wait >= 0) {
489 r->reset_delays_wait--;
490 if (r->reset_delays_wait < 0) {
491 info->dmi_busy_delay = 0;
492 info->ac_busy_delay = 0;
493 }
494 }
495
496 memset(in, 0, num_bytes);
497 memset(out, 0, num_bytes);
498
499 assert(info->abits != 0);
500
501 buf_set_u32(out, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, op);
502 buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
503 buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
504
505 /* I wanted to place this code in a different function, but the way JTAG command
506 queueing works in the jtag handling functions, the scan fields either have to be
507 heap allocated, global/static, or else they need to stay on the stack until
508 the jtag_execute_queue() call. Heap or static fields in this case doesn't seem
509 the best fit. Declaring stack based field values in a subsidiary function call wouldn't
510 work. */
511 if (bscan_tunnel_ir_width != 0) {
512 riscv_add_bscan_tunneled_scan(target, &field, &bscan_ctxt);
513 } else {
514 /* Assume dbus is already selected. */
515 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
516 }
517
518 int idle_count = info->dmi_busy_delay;
519 if (exec)
520 idle_count += info->ac_busy_delay;
521
522 if (idle_count)
523 jtag_add_runtest(idle_count, TAP_IDLE);
524
525 int retval = jtag_execute_queue();
526 if (retval != ERROR_OK) {
527 LOG_ERROR("dmi_scan failed jtag scan");
528 if (data_in)
529 *data_in = ~0;
530 return DMI_STATUS_FAILED;
531 }
532
533 if (bscan_tunnel_ir_width != 0) {
534 /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
535 buffer_shr(in, num_bytes, 1);
536 }
537
538 if (data_in)
539 *data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
540
541 if (address_in)
542 *address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
543 dump_field(idle_count, &field);
544 return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
545 }
546
547 /**
548 * @param target
549 * @param data_in The data we received from the target.
550 * @param dmi_busy_encountered
551 * If non-NULL, will be updated to reflect whether DMI busy was
552 * encountered while executing this operation or not.
553 * @param dmi_op The operation to perform (read/write/nop).
554 * @param address The address argument to that operation.
555 * @param data_out The data to send to the target.
556 * @param timeout_sec
557 * @param exec When true, this scan will execute something, so extra RTI
558 * cycles may be added.
559 * @param ensure_success
560 * Scan a nop after the requested operation, ensuring the
561 * DMI operation succeeded.
562 */
563 static int dmi_op_timeout(struct target *target, uint32_t *data_in,
564 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
565 uint32_t data_out, int timeout_sec, bool exec, bool ensure_success)
566 {
567 select_dmi(target);
568
569 dmi_status_t status;
570 uint32_t address_in;
571
572 if (dmi_busy_encountered)
573 *dmi_busy_encountered = false;
574
575 const char *op_name;
576 switch (dmi_op) {
577 case DMI_OP_NOP:
578 op_name = "nop";
579 break;
580 case DMI_OP_READ:
581 op_name = "read";
582 break;
583 case DMI_OP_WRITE:
584 op_name = "write";
585 break;
586 default:
587 LOG_ERROR("Invalid DMI operation: %d", dmi_op);
588 return ERROR_FAIL;
589 }
590
591 keep_alive();
592
593 time_t start = time(NULL);
594 /* This first loop performs the request. Note that if for some reason this
595 * stays busy, it is actually due to the previous access. */
596 while (1) {
597 status = dmi_scan(target, NULL, NULL, dmi_op, address, data_out,
598 exec);
599 if (status == DMI_STATUS_BUSY) {
600 increase_dmi_busy_delay(target);
601 if (dmi_busy_encountered)
602 *dmi_busy_encountered = true;
603 } else if (status == DMI_STATUS_SUCCESS) {
604 break;
605 } else {
606 LOG_ERROR("failed %s at 0x%x, status=%d", op_name, address, status);
607 return ERROR_FAIL;
608 }
609 if (time(NULL) - start > timeout_sec)
610 return ERROR_TIMEOUT_REACHED;
611 }
612
613 if (status != DMI_STATUS_SUCCESS) {
614 LOG_ERROR("Failed %s at 0x%x; status=%d", op_name, address, status);
615 return ERROR_FAIL;
616 }
617
618 if (ensure_success) {
619 /* This second loop ensures the request succeeded, and gets back data.
620 * Note that NOP can result in a 'busy' result as well, but that would be
621 * noticed on the next DMI access we do. */
622 while (1) {
623 status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
624 false);
625 if (status == DMI_STATUS_BUSY) {
626 increase_dmi_busy_delay(target);
627 if (dmi_busy_encountered)
628 *dmi_busy_encountered = true;
629 } else if (status == DMI_STATUS_SUCCESS) {
630 break;
631 } else {
632 if (data_in) {
633 LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
634 op_name, address, *data_in, status);
635 } else {
636 LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
637 status);
638 }
639 return ERROR_FAIL;
640 }
641 if (time(NULL) - start > timeout_sec)
642 return ERROR_TIMEOUT_REACHED;
643 }
644 }
645
646 return ERROR_OK;
647 }
648
649 static int dmi_op(struct target *target, uint32_t *data_in,
650 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
651 uint32_t data_out, bool exec, bool ensure_success)
652 {
653 int result = dmi_op_timeout(target, data_in, dmi_busy_encountered, dmi_op,
654 address, data_out, riscv_command_timeout_sec, exec, ensure_success);
655 if (result == ERROR_TIMEOUT_REACHED) {
656 LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
657 "either really slow or broken. You could increase the "
658 "timeout with riscv set_command_timeout_sec.",
659 riscv_command_timeout_sec);
660 return ERROR_FAIL;
661 }
662 return result;
663 }
664
665 static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
666 {
667 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, false, true);
668 }
669
670 static int dmi_read_exec(struct target *target, uint32_t *value, uint32_t address)
671 {
672 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, true, true);
673 }
674
675 static int dmi_write(struct target *target, uint32_t address, uint32_t value)
676 {
677 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false, true);
678 }
679
680 static int dmi_write_exec(struct target *target, uint32_t address,
681 uint32_t value, bool ensure_success)
682 {
683 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true, ensure_success);
684 }
685
686 int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
687 bool authenticated, unsigned timeout_sec)
688 {
689 int result = dmi_op_timeout(target, dmstatus, NULL, DMI_OP_READ,
690 DM_DMSTATUS, 0, timeout_sec, false, true);
691 if (result != ERROR_OK)
692 return result;
693 int dmstatus_version = get_field(*dmstatus, DM_DMSTATUS_VERSION);
694 if (dmstatus_version != 2 && dmstatus_version != 3) {
695 LOG_ERROR("OpenOCD only supports Debug Module version 2 (0.13) and 3 (1.0), not "
696 "%d (dmstatus=0x%x). This error might be caused by a JTAG "
697 "signal issue. Try reducing the JTAG clock speed.",
698 get_field(*dmstatus, DM_DMSTATUS_VERSION), *dmstatus);
699 } else if (authenticated && !get_field(*dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
700 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
701 "(dmstatus=0x%x). Use `riscv authdata_read` and "
702 "`riscv authdata_write` commands to authenticate.", *dmstatus);
703 return ERROR_FAIL;
704 }
705 return ERROR_OK;
706 }
707
708 int dmstatus_read(struct target *target, uint32_t *dmstatus,
709 bool authenticated)
710 {
711 return dmstatus_read_timeout(target, dmstatus, authenticated,
712 riscv_command_timeout_sec);
713 }
714
715 static void increase_ac_busy_delay(struct target *target)
716 {
717 riscv013_info_t *info = get_info(target);
718 info->ac_busy_delay += info->ac_busy_delay / 10 + 1;
719 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
720 info->dtmcs_idle, info->dmi_busy_delay,
721 info->ac_busy_delay);
722 }
723
724 uint32_t abstract_register_size(unsigned width)
725 {
726 switch (width) {
727 case 32:
728 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 2);
729 case 64:
730 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 3);
731 case 128:
732 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 4);
733 default:
734 LOG_ERROR("Unsupported register width: %d", width);
735 return 0;
736 }
737 }
738
739 static int wait_for_idle(struct target *target, uint32_t *abstractcs)
740 {
741 RISCV013_INFO(info);
742 time_t start = time(NULL);
743 while (1) {
744 if (dmi_read(target, abstractcs, DM_ABSTRACTCS) != ERROR_OK)
745 return ERROR_FAIL;
746
747 if (get_field(*abstractcs, DM_ABSTRACTCS_BUSY) == 0)
748 return ERROR_OK;
749
750 if (time(NULL) - start > riscv_command_timeout_sec) {
751 info->cmderr = get_field(*abstractcs, DM_ABSTRACTCS_CMDERR);
752 if (info->cmderr != CMDERR_NONE) {
753 const char *errors[8] = {
754 "none",
755 "busy",
756 "not supported",
757 "exception",
758 "halt/resume",
759 "reserved",
760 "reserved",
761 "other" };
762
763 LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
764 errors[info->cmderr], *abstractcs);
765 }
766
767 LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
768 "Increase the timeout with riscv set_command_timeout_sec.",
769 riscv_command_timeout_sec,
770 *abstractcs);
771 return ERROR_FAIL;
772 }
773 }
774 }
775
776 static int execute_abstract_command(struct target *target, uint32_t command)
777 {
778 RISCV013_INFO(info);
779 if (debug_level >= LOG_LVL_DEBUG) {
780 switch (get_field(command, DM_COMMAND_CMDTYPE)) {
781 case 0:
782 LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
783 "transfer=%d, write=%d, regno=0x%x",
784 command,
785 8 << get_field(command, AC_ACCESS_REGISTER_AARSIZE),
786 get_field(command, AC_ACCESS_REGISTER_POSTEXEC),
787 get_field(command, AC_ACCESS_REGISTER_TRANSFER),
788 get_field(command, AC_ACCESS_REGISTER_WRITE),
789 get_field(command, AC_ACCESS_REGISTER_REGNO));
790 break;
791 default:
792 LOG_DEBUG("command=0x%x", command);
793 break;
794 }
795 }
796
797 if (dmi_write_exec(target, DM_COMMAND, command, false) != ERROR_OK)
798 return ERROR_FAIL;
799
800 uint32_t abstractcs = 0;
801 int result = wait_for_idle(target, &abstractcs);
802
803 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
804 if (info->cmderr != 0 || result != ERROR_OK) {
805 LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
806 /* Clear the error. */
807 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
808 return ERROR_FAIL;
809 }
810
811 return ERROR_OK;
812 }
813
814 static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
815 unsigned size_bits)
816 {
817 riscv_reg_t value = 0;
818 uint32_t v;
819 unsigned offset = index * size_bits / 32;
820 switch (size_bits) {
821 default:
822 LOG_ERROR("Unsupported size: %d bits", size_bits);
823 return ~0;
824 case 64:
825 dmi_read(target, &v, DM_DATA0 + offset + 1);
826 value |= ((uint64_t) v) << 32;
827 /* falls through */
828 case 32:
829 dmi_read(target, &v, DM_DATA0 + offset);
830 value |= v;
831 }
832 return value;
833 }
834
835 static int write_abstract_arg(struct target *target, unsigned index,
836 riscv_reg_t value, unsigned size_bits)
837 {
838 unsigned offset = index * size_bits / 32;
839 switch (size_bits) {
840 default:
841 LOG_ERROR("Unsupported size: %d bits", size_bits);
842 return ERROR_FAIL;
843 case 64:
844 dmi_write(target, DM_DATA0 + offset + 1, value >> 32);
845 /* falls through */
846 case 32:
847 dmi_write(target, DM_DATA0 + offset, value);
848 }
849 return ERROR_OK;
850 }
851
852 /**
853 * @par size in bits
854 */
855 static uint32_t access_register_command(struct target *target, uint32_t number,
856 unsigned size, uint32_t flags)
857 {
858 uint32_t command = set_field(0, DM_COMMAND_CMDTYPE, 0);
859 switch (size) {
860 case 32:
861 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 2);
862 break;
863 case 64:
864 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 3);
865 break;
866 default:
867 LOG_ERROR("%d-bit register %s not supported.", size,
868 gdb_regno_name(number));
869 assert(0);
870 }
871
872 if (number <= GDB_REGNO_XPR31) {
873 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
874 0x1000 + number - GDB_REGNO_ZERO);
875 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
876 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
877 0x1020 + number - GDB_REGNO_FPR0);
878 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
879 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
880 number - GDB_REGNO_CSR0);
881 } else if (number >= GDB_REGNO_COUNT) {
882 /* Custom register. */
883 assert(target->reg_cache->reg_list[number].arch_info);
884 riscv_reg_info_t *reg_info = target->reg_cache->reg_list[number].arch_info;
885 assert(reg_info);
886 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
887 0xc000 + reg_info->custom_number);
888 } else {
889 assert(0);
890 }
891
892 command |= flags;
893
894 return command;
895 }
896
897 static int register_read_abstract(struct target *target, uint64_t *value,
898 uint32_t number, unsigned size)
899 {
900 RISCV013_INFO(info);
901
902 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
903 !info->abstract_read_fpr_supported)
904 return ERROR_FAIL;
905 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
906 !info->abstract_read_csr_supported)
907 return ERROR_FAIL;
908 /* The spec doesn't define abstract register numbers for vector registers. */
909 if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31)
910 return ERROR_FAIL;
911
912 uint32_t command = access_register_command(target, number, size,
913 AC_ACCESS_REGISTER_TRANSFER);
914
915 int result = execute_abstract_command(target, command);
916 if (result != ERROR_OK) {
917 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
918 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
919 info->abstract_read_fpr_supported = false;
920 LOG_INFO("Disabling abstract command reads from FPRs.");
921 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
922 info->abstract_read_csr_supported = false;
923 LOG_INFO("Disabling abstract command reads from CSRs.");
924 }
925 }
926 return result;
927 }
928
929 if (value)
930 *value = read_abstract_arg(target, 0, size);
931
932 return ERROR_OK;
933 }
934
935 static int register_write_abstract(struct target *target, uint32_t number,
936 uint64_t value, unsigned size)
937 {
938 RISCV013_INFO(info);
939
940 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
941 !info->abstract_write_fpr_supported)
942 return ERROR_FAIL;
943 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
944 !info->abstract_write_csr_supported)
945 return ERROR_FAIL;
946
947 uint32_t command = access_register_command(target, number, size,
948 AC_ACCESS_REGISTER_TRANSFER |
949 AC_ACCESS_REGISTER_WRITE);
950
951 if (write_abstract_arg(target, 0, value, size) != ERROR_OK)
952 return ERROR_FAIL;
953
954 int result = execute_abstract_command(target, command);
955 if (result != ERROR_OK) {
956 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
957 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
958 info->abstract_write_fpr_supported = false;
959 LOG_INFO("Disabling abstract command writes to FPRs.");
960 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
961 info->abstract_write_csr_supported = false;
962 LOG_INFO("Disabling abstract command writes to CSRs.");
963 }
964 }
965 return result;
966 }
967
968 return ERROR_OK;
969 }
970
971 /*
972 * Sets the AAMSIZE field of a memory access abstract command based on
973 * the width (bits).
974 */
975 static uint32_t abstract_memory_size(unsigned width)
976 {
977 switch (width) {
978 case 8:
979 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 0);
980 case 16:
981 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 1);
982 case 32:
983 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 2);
984 case 64:
985 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 3);
986 case 128:
987 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 4);
988 default:
989 LOG_ERROR("Unsupported memory width: %d", width);
990 return 0;
991 }
992 }
993
994 /*
995 * Creates a memory access abstract command.
996 */
997 static uint32_t access_memory_command(struct target *target, bool virtual,
998 unsigned width, bool postincrement, bool write)
999 {
1000 uint32_t command = set_field(0, AC_ACCESS_MEMORY_CMDTYPE, 2);
1001 command = set_field(command, AC_ACCESS_MEMORY_AAMVIRTUAL, virtual);
1002 command |= abstract_memory_size(width);
1003 command = set_field(command, AC_ACCESS_MEMORY_AAMPOSTINCREMENT,
1004 postincrement);
1005 command = set_field(command, AC_ACCESS_MEMORY_WRITE, write);
1006
1007 return command;
1008 }
1009
1010 static int examine_progbuf(struct target *target)
1011 {
1012 riscv013_info_t *info = get_info(target);
1013
1014 if (info->progbuf_writable != YNM_MAYBE)
1015 return ERROR_OK;
1016
1017 /* Figure out if progbuf is writable. */
1018
1019 if (info->progbufsize < 1) {
1020 info->progbuf_writable = YNM_NO;
1021 LOG_INFO("No program buffer present.");
1022 return ERROR_OK;
1023 }
1024
1025 uint64_t s0;
1026 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1027 return ERROR_FAIL;
1028
1029 struct riscv_program program;
1030 riscv_program_init(&program, target);
1031 riscv_program_insert(&program, auipc(S0));
1032 if (riscv_program_exec(&program, target) != ERROR_OK)
1033 return ERROR_FAIL;
1034
1035 if (register_read_direct(target, &info->progbuf_address, GDB_REGNO_S0) != ERROR_OK)
1036 return ERROR_FAIL;
1037
1038 riscv_program_init(&program, target);
1039 riscv_program_insert(&program, sw(S0, S0, 0));
1040 int result = riscv_program_exec(&program, target);
1041
1042 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1043 return ERROR_FAIL;
1044
1045 if (result != ERROR_OK) {
1046 /* This program might have failed if the program buffer is not
1047 * writable. */
1048 info->progbuf_writable = YNM_NO;
1049 return ERROR_OK;
1050 }
1051
1052 uint32_t written;
1053 if (dmi_read(target, &written, DM_PROGBUF0) != ERROR_OK)
1054 return ERROR_FAIL;
1055 if (written == (uint32_t) info->progbuf_address) {
1056 LOG_INFO("progbuf is writable at 0x%" PRIx64,
1057 info->progbuf_address);
1058 info->progbuf_writable = YNM_YES;
1059
1060 } else {
1061 LOG_INFO("progbuf is not writeable at 0x%" PRIx64,
1062 info->progbuf_address);
1063 info->progbuf_writable = YNM_NO;
1064 }
1065
1066 return ERROR_OK;
1067 }
1068
1069 static int is_fpu_reg(uint32_t gdb_regno)
1070 {
1071 return (gdb_regno >= GDB_REGNO_FPR0 && gdb_regno <= GDB_REGNO_FPR31) ||
1072 (gdb_regno == GDB_REGNO_CSR0 + CSR_FFLAGS) ||
1073 (gdb_regno == GDB_REGNO_CSR0 + CSR_FRM) ||
1074 (gdb_regno == GDB_REGNO_CSR0 + CSR_FCSR);
1075 }
1076
1077 static int is_vector_reg(uint32_t gdb_regno)
1078 {
1079 return (gdb_regno >= GDB_REGNO_V0 && gdb_regno <= GDB_REGNO_V31) ||
1080 gdb_regno == GDB_REGNO_VSTART ||
1081 gdb_regno == GDB_REGNO_VXSAT ||
1082 gdb_regno == GDB_REGNO_VXRM ||
1083 gdb_regno == GDB_REGNO_VL ||
1084 gdb_regno == GDB_REGNO_VTYPE ||
1085 gdb_regno == GDB_REGNO_VLENB;
1086 }
1087
1088 static int prep_for_register_access(struct target *target, uint64_t *mstatus,
1089 int regno)
1090 {
1091 if (is_fpu_reg(regno) || is_vector_reg(regno)) {
1092 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
1093 return ERROR_FAIL;
1094 if (is_fpu_reg(regno) && (*mstatus & MSTATUS_FS) == 0) {
1095 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1096 set_field(*mstatus, MSTATUS_FS, 1)) != ERROR_OK)
1097 return ERROR_FAIL;
1098 } else if (is_vector_reg(regno) && (*mstatus & MSTATUS_VS) == 0) {
1099 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1100 set_field(*mstatus, MSTATUS_VS, 1)) != ERROR_OK)
1101 return ERROR_FAIL;
1102 }
1103 } else {
1104 *mstatus = 0;
1105 }
1106 return ERROR_OK;
1107 }
1108
1109 static int cleanup_after_register_access(struct target *target,
1110 uint64_t mstatus, int regno)
1111 {
1112 if ((is_fpu_reg(regno) && (mstatus & MSTATUS_FS) == 0) ||
1113 (is_vector_reg(regno) && (mstatus & MSTATUS_VS) == 0))
1114 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
1115 return ERROR_FAIL;
1116 return ERROR_OK;
1117 }
1118
1119 typedef enum {
1120 SPACE_DM_DATA,
1121 SPACE_DMI_PROGBUF,
1122 SPACE_DMI_RAM
1123 } memory_space_t;
1124
1125 typedef struct {
1126 /* How can the debugger access this memory? */
1127 memory_space_t memory_space;
1128 /* Memory address to access the scratch memory from the hart. */
1129 riscv_addr_t hart_address;
1130 /* Memory address to access the scratch memory from the debugger. */
1131 riscv_addr_t debug_address;
1132 struct working_area *area;
1133 } scratch_mem_t;
1134
1135 /**
1136 * Find some scratch memory to be used with the given program.
1137 */
1138 static int scratch_reserve(struct target *target,
1139 scratch_mem_t *scratch,
1140 struct riscv_program *program,
1141 unsigned size_bytes)
1142 {
1143 riscv_addr_t alignment = 1;
1144 while (alignment < size_bytes)
1145 alignment *= 2;
1146
1147 scratch->area = NULL;
1148
1149 riscv013_info_t *info = get_info(target);
1150
1151 /* Option 1: See if data# registers can be used as the scratch memory */
1152 if (info->dataaccess == 1) {
1153 /* Sign extend dataaddr. */
1154 scratch->hart_address = info->dataaddr;
1155 if (info->dataaddr & (1<<11))
1156 scratch->hart_address |= 0xfffffffffffff000ULL;
1157 /* Align. */
1158 scratch->hart_address = (scratch->hart_address + alignment - 1) & ~(alignment - 1);
1159
1160 if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
1161 info->datasize) {
1162 scratch->memory_space = SPACE_DM_DATA;
1163 scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
1164 return ERROR_OK;
1165 }
1166 }
1167
1168 /* Option 2: See if progbuf can be used as the scratch memory */
1169 if (examine_progbuf(target) != ERROR_OK)
1170 return ERROR_FAIL;
1171
1172 /* Allow for ebreak at the end of the program. */
1173 unsigned program_size = (program->instruction_count + 1) * 4;
1174 scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
1175 ~(alignment - 1);
1176 if ((info->progbuf_writable == YNM_YES) &&
1177 ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
1178 info->progbufsize)) {
1179 scratch->memory_space = SPACE_DMI_PROGBUF;
1180 scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
1181 return ERROR_OK;
1182 }
1183
1184 /* Option 3: User-configured memory area as scratch RAM */
1185 if (target_alloc_working_area(target, size_bytes + alignment - 1,
1186 &scratch->area) == ERROR_OK) {
1187 scratch->hart_address = (scratch->area->address + alignment - 1) &
1188 ~(alignment - 1);
1189 scratch->memory_space = SPACE_DMI_RAM;
1190 scratch->debug_address = scratch->hart_address;
1191 return ERROR_OK;
1192 }
1193
1194 LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
1195 "a work area with 'configure -work-area-phys'.", size_bytes);
1196 return ERROR_FAIL;
1197 }
1198
1199 static int scratch_release(struct target *target,
1200 scratch_mem_t *scratch)
1201 {
1202 if (scratch->area)
1203 return target_free_working_area(target, scratch->area);
1204
1205 return ERROR_OK;
1206 }
1207
1208 static int scratch_read64(struct target *target, scratch_mem_t *scratch,
1209 uint64_t *value)
1210 {
1211 uint32_t v;
1212 switch (scratch->memory_space) {
1213 case SPACE_DM_DATA:
1214 if (dmi_read(target, &v, DM_DATA0 + scratch->debug_address) != ERROR_OK)
1215 return ERROR_FAIL;
1216 *value = v;
1217 if (dmi_read(target, &v, DM_DATA1 + scratch->debug_address) != ERROR_OK)
1218 return ERROR_FAIL;
1219 *value |= ((uint64_t) v) << 32;
1220 break;
1221 case SPACE_DMI_PROGBUF:
1222 if (dmi_read(target, &v, DM_PROGBUF0 + scratch->debug_address) != ERROR_OK)
1223 return ERROR_FAIL;
1224 *value = v;
1225 if (dmi_read(target, &v, DM_PROGBUF1 + scratch->debug_address) != ERROR_OK)
1226 return ERROR_FAIL;
1227 *value |= ((uint64_t) v) << 32;
1228 break;
1229 case SPACE_DMI_RAM:
1230 {
1231 uint8_t buffer[8] = {0};
1232 if (read_memory(target, scratch->debug_address, 4, 2, buffer, 4) != ERROR_OK)
1233 return ERROR_FAIL;
1234 *value = buffer[0] |
1235 (((uint64_t) buffer[1]) << 8) |
1236 (((uint64_t) buffer[2]) << 16) |
1237 (((uint64_t) buffer[3]) << 24) |
1238 (((uint64_t) buffer[4]) << 32) |
1239 (((uint64_t) buffer[5]) << 40) |
1240 (((uint64_t) buffer[6]) << 48) |
1241 (((uint64_t) buffer[7]) << 56);
1242 }
1243 break;
1244 }
1245 return ERROR_OK;
1246 }
1247
1248 static int scratch_write64(struct target *target, scratch_mem_t *scratch,
1249 uint64_t value)
1250 {
1251 switch (scratch->memory_space) {
1252 case SPACE_DM_DATA:
1253 dmi_write(target, DM_DATA0 + scratch->debug_address, value);
1254 dmi_write(target, DM_DATA1 + scratch->debug_address, value >> 32);
1255 break;
1256 case SPACE_DMI_PROGBUF:
1257 dmi_write(target, DM_PROGBUF0 + scratch->debug_address, value);
1258 dmi_write(target, DM_PROGBUF1 + scratch->debug_address, value >> 32);
1259 break;
1260 case SPACE_DMI_RAM:
1261 {
1262 uint8_t buffer[8] = {
1263 value,
1264 value >> 8,
1265 value >> 16,
1266 value >> 24,
1267 value >> 32,
1268 value >> 40,
1269 value >> 48,
1270 value >> 56
1271 };
1272 if (write_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
1273 return ERROR_FAIL;
1274 }
1275 break;
1276 }
1277 return ERROR_OK;
1278 }
1279
1280 /** Return register size in bits. */
1281 static unsigned register_size(struct target *target, unsigned number)
1282 {
1283 /* If reg_cache hasn't been initialized yet, make a guess. We need this for
1284 * when this function is called during examine(). */
1285 if (target->reg_cache)
1286 return target->reg_cache->reg_list[number].size;
1287 else
1288 return riscv_xlen(target);
1289 }
1290
1291 static bool has_sufficient_progbuf(struct target *target, unsigned size)
1292 {
1293 RISCV013_INFO(info);
1294 RISCV_INFO(r);
1295
1296 return info->progbufsize + r->impebreak >= size;
1297 }
1298
1299 /**
1300 * Immediately write the new value to the requested register. This mechanism
1301 * bypasses any caches.
1302 */
1303 static int register_write_direct(struct target *target, unsigned number,
1304 uint64_t value)
1305 {
1306 LOG_DEBUG("{%d} %s <- 0x%" PRIx64, riscv_current_hartid(target),
1307 gdb_regno_name(number), value);
1308
1309 int result = register_write_abstract(target, number, value,
1310 register_size(target, number));
1311 if (result == ERROR_OK || !has_sufficient_progbuf(target, 2) ||
1312 !riscv_is_halted(target))
1313 return result;
1314
1315 struct riscv_program program;
1316 riscv_program_init(&program, target);
1317
1318 uint64_t s0;
1319 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1320 return ERROR_FAIL;
1321
1322 uint64_t mstatus;
1323 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1324 return ERROR_FAIL;
1325
1326 scratch_mem_t scratch;
1327 bool use_scratch = false;
1328 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
1329 riscv_supports_extension(target, 'D') &&
1330 riscv_xlen(target) < 64) {
1331 /* There are no instructions to move all the bits from a register, so
1332 * we need to use some scratch RAM. */
1333 use_scratch = true;
1334 riscv_program_insert(&program, fld(number - GDB_REGNO_FPR0, S0, 0));
1335
1336 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1337 return ERROR_FAIL;
1338
1339 if (register_write_direct(target, GDB_REGNO_S0, scratch.hart_address)
1340 != ERROR_OK) {
1341 scratch_release(target, &scratch);
1342 return ERROR_FAIL;
1343 }
1344
1345 if (scratch_write64(target, &scratch, value) != ERROR_OK) {
1346 scratch_release(target, &scratch);
1347 return ERROR_FAIL;
1348 }
1349
1350 } else if (number == GDB_REGNO_VTYPE) {
1351 riscv_program_insert(&program, csrr(S0, CSR_VL));
1352 riscv_program_insert(&program, vsetvli(ZERO, S0, value));
1353
1354 } else {
1355 if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
1356 return ERROR_FAIL;
1357
1358 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1359 if (riscv_supports_extension(target, 'D'))
1360 riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
1361 else
1362 riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
1363 } else if (number == GDB_REGNO_VL) {
1364 /* "The XLEN-bit-wide read-only vl CSR can only be updated by the
1365 * vsetvli and vsetvl instructions, and the fault-only-rst vector
1366 * load instruction variants." */
1367 riscv_reg_t vtype;
1368 if (register_read(target, &vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1369 return ERROR_FAIL;
1370 if (riscv_program_insert(&program, vsetvli(ZERO, S0, vtype)) != ERROR_OK)
1371 return ERROR_FAIL;
1372 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1373 riscv_program_csrw(&program, S0, number);
1374 } else {
1375 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
1376 return ERROR_FAIL;
1377 }
1378 }
1379
1380 int exec_out = riscv_program_exec(&program, target);
1381 /* Don't message on error. Probably the register doesn't exist. */
1382 if (exec_out == ERROR_OK && target->reg_cache) {
1383 struct reg *reg = &target->reg_cache->reg_list[number];
1384 buf_set_u64(reg->value, 0, reg->size, value);
1385 }
1386
1387 if (use_scratch)
1388 scratch_release(target, &scratch);
1389
1390 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1391 return ERROR_FAIL;
1392
1393 /* Restore S0. */
1394 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1395 return ERROR_FAIL;
1396
1397 return exec_out;
1398 }
1399
1400 /** Read register value from the target. Also update the cached value. */
1401 static int register_read(struct target *target, uint64_t *value, uint32_t number)
1402 {
1403 if (number == GDB_REGNO_ZERO) {
1404 *value = 0;
1405 return ERROR_OK;
1406 }
1407 int result = register_read_direct(target, value, number);
1408 if (result != ERROR_OK)
1409 return ERROR_FAIL;
1410 if (target->reg_cache) {
1411 struct reg *reg = &target->reg_cache->reg_list[number];
1412 buf_set_u64(reg->value, 0, reg->size, *value);
1413 }
1414 return ERROR_OK;
1415 }
1416
1417 /** Actually read registers from the target right now. */
1418 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
1419 {
1420 int result = register_read_abstract(target, value, number,
1421 register_size(target, number));
1422
1423 if (result != ERROR_OK &&
1424 has_sufficient_progbuf(target, 2) &&
1425 number > GDB_REGNO_XPR31) {
1426 struct riscv_program program;
1427 riscv_program_init(&program, target);
1428
1429 scratch_mem_t scratch;
1430 bool use_scratch = false;
1431
1432 riscv_reg_t s0;
1433 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1434 return ERROR_FAIL;
1435
1436 /* Write program to move data into s0. */
1437
1438 uint64_t mstatus;
1439 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1440 return ERROR_FAIL;
1441
1442 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1443 if (riscv_supports_extension(target, 'D')
1444 && riscv_xlen(target) < 64) {
1445 /* There are no instructions to move all the bits from a
1446 * register, so we need to use some scratch RAM. */
1447 riscv_program_insert(&program, fsd(number - GDB_REGNO_FPR0, S0,
1448 0));
1449
1450 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1451 return ERROR_FAIL;
1452 use_scratch = true;
1453
1454 if (register_write_direct(target, GDB_REGNO_S0,
1455 scratch.hart_address) != ERROR_OK) {
1456 scratch_release(target, &scratch);
1457 return ERROR_FAIL;
1458 }
1459 } else if (riscv_supports_extension(target, 'D')) {
1460 riscv_program_insert(&program, fmv_x_d(S0, number - GDB_REGNO_FPR0));
1461 } else {
1462 riscv_program_insert(&program, fmv_x_w(S0, number - GDB_REGNO_FPR0));
1463 }
1464 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1465 riscv_program_csrr(&program, S0, number);
1466 } else {
1467 LOG_ERROR("Unsupported register: %s", gdb_regno_name(number));
1468 return ERROR_FAIL;
1469 }
1470
1471 /* Execute program. */
1472 result = riscv_program_exec(&program, target);
1473 /* Don't message on error. Probably the register doesn't exist. */
1474
1475 if (use_scratch) {
1476 result = scratch_read64(target, &scratch, value);
1477 scratch_release(target, &scratch);
1478 if (result != ERROR_OK)
1479 return result;
1480 } else {
1481 /* Read S0 */
1482 if (register_read_direct(target, value, GDB_REGNO_S0) != ERROR_OK)
1483 return ERROR_FAIL;
1484 }
1485
1486 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1487 return ERROR_FAIL;
1488
1489 /* Restore S0. */
1490 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1491 return ERROR_FAIL;
1492 }
1493
1494 if (result == ERROR_OK) {
1495 LOG_DEBUG("{%d} %s = 0x%" PRIx64, riscv_current_hartid(target),
1496 gdb_regno_name(number), *value);
1497 }
1498
1499 return result;
1500 }
1501
1502 static int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
1503 {
1504 time_t start = time(NULL);
1505 while (1) {
1506 uint32_t value;
1507 if (dmstatus_read(target, &value, false) != ERROR_OK)
1508 return ERROR_FAIL;
1509 if (dmstatus)
1510 *dmstatus = value;
1511 if (!get_field(value, DM_DMSTATUS_AUTHBUSY))
1512 break;
1513 if (time(NULL) - start > riscv_command_timeout_sec) {
1514 LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
1515 "Increase the timeout with riscv set_command_timeout_sec.",
1516 riscv_command_timeout_sec,
1517 value);
1518 return ERROR_FAIL;
1519 }
1520 }
1521
1522 return ERROR_OK;
1523 }
1524
1525 /*** OpenOCD target functions. ***/
1526
1527 static void deinit_target(struct target *target)
1528 {
1529 LOG_DEBUG("riscv_deinit_target()");
1530 riscv_info_t *info = (riscv_info_t *) target->arch_info;
1531 free(info->version_specific);
1532 /* TODO: free register arch_info */
1533 info->version_specific = NULL;
1534 }
1535
1536 static int set_haltgroup(struct target *target, bool *supported)
1537 {
1538 uint32_t write = set_field(DM_DMCS2_HGWRITE, DM_DMCS2_GROUP, target->smp);
1539 if (dmi_write(target, DM_DMCS2, write) != ERROR_OK)
1540 return ERROR_FAIL;
1541 uint32_t read;
1542 if (dmi_read(target, &read, DM_DMCS2) != ERROR_OK)
1543 return ERROR_FAIL;
1544 *supported = get_field(read, DM_DMCS2_GROUP) == (unsigned)target->smp;
1545 return ERROR_OK;
1546 }
1547
1548 static int discover_vlenb(struct target *target)
1549 {
1550 RISCV_INFO(r);
1551 riscv_reg_t vlenb;
1552
1553 if (register_read(target, &vlenb, GDB_REGNO_VLENB) != ERROR_OK) {
1554 LOG_WARNING("Couldn't read vlenb for %s; vector register access won't work.",
1555 target_name(target));
1556 r->vlenb = 0;
1557 return ERROR_OK;
1558 }
1559 r->vlenb = vlenb;
1560
1561 LOG_INFO("Vector support with vlenb=%d", r->vlenb);
1562
1563 return ERROR_OK;
1564 }
1565
1566 static int examine(struct target *target)
1567 {
1568 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
1569
1570 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
1571 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
1572 LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol, DTM_DTMCS_DMIRESET));
1573 LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTM_DTMCS_IDLE));
1574 LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol, DTM_DTMCS_DMISTAT));
1575 LOG_DEBUG(" abits=%d", get_field(dtmcontrol, DTM_DTMCS_ABITS));
1576 LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTM_DTMCS_VERSION));
1577 if (dtmcontrol == 0) {
1578 LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
1579 return ERROR_FAIL;
1580 }
1581 if (get_field(dtmcontrol, DTM_DTMCS_VERSION) != 1) {
1582 LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
1583 get_field(dtmcontrol, DTM_DTMCS_VERSION), dtmcontrol);
1584 return ERROR_FAIL;
1585 }
1586
1587 riscv013_info_t *info = get_info(target);
1588 /* TODO: This won't be true if there are multiple DMs. */
1589 info->index = target->coreid;
1590 info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
1591 info->dtmcs_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
1592
1593 /* Reset the Debug Module. */
1594 dm013_info_t *dm = get_dm(target);
1595 if (!dm)
1596 return ERROR_FAIL;
1597 if (!dm->was_reset) {
1598 dmi_write(target, DM_DMCONTROL, 0);
1599 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
1600 dm->was_reset = true;
1601 }
1602
1603 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_HARTSELLO |
1604 DM_DMCONTROL_HARTSELHI | DM_DMCONTROL_DMACTIVE |
1605 DM_DMCONTROL_HASEL);
1606 uint32_t dmcontrol;
1607 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
1608 return ERROR_FAIL;
1609
1610 if (!get_field(dmcontrol, DM_DMCONTROL_DMACTIVE)) {
1611 LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
1612 dmcontrol);
1613 return ERROR_FAIL;
1614 }
1615
1616 dm->hasel_supported = get_field(dmcontrol, DM_DMCONTROL_HASEL);
1617
1618 uint32_t dmstatus;
1619 if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
1620 return ERROR_FAIL;
1621 LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
1622 int dmstatus_version = get_field(dmstatus, DM_DMSTATUS_VERSION);
1623 if (dmstatus_version != 2 && dmstatus_version != 3) {
1624 /* Error was already printed out in dmstatus_read(). */
1625 return ERROR_FAIL;
1626 }
1627
1628 uint32_t hartsel =
1629 (get_field(dmcontrol, DM_DMCONTROL_HARTSELHI) <<
1630 DM_DMCONTROL_HARTSELLO_LENGTH) |
1631 get_field(dmcontrol, DM_DMCONTROL_HARTSELLO);
1632 info->hartsellen = 0;
1633 while (hartsel & 1) {
1634 info->hartsellen++;
1635 hartsel >>= 1;
1636 }
1637 LOG_DEBUG("hartsellen=%d", info->hartsellen);
1638
1639 uint32_t hartinfo;
1640 if (dmi_read(target, &hartinfo, DM_HARTINFO) != ERROR_OK)
1641 return ERROR_FAIL;
1642
1643 info->datasize = get_field(hartinfo, DM_HARTINFO_DATASIZE);
1644 info->dataaccess = get_field(hartinfo, DM_HARTINFO_DATAACCESS);
1645 info->dataaddr = get_field(hartinfo, DM_HARTINFO_DATAADDR);
1646
1647 if (!get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
1648 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
1649 "(dmstatus=0x%x). Use `riscv authdata_read` and "
1650 "`riscv authdata_write` commands to authenticate.", dmstatus);
1651 /* If we return ERROR_FAIL here, then in a multicore setup the next
1652 * core won't be examined, which means we won't set up the
1653 * authentication commands for them, which means the config script
1654 * needs to be a lot more complex. */
1655 return ERROR_OK;
1656 }
1657
1658 if (dmi_read(target, &info->sbcs, DM_SBCS) != ERROR_OK)
1659 return ERROR_FAIL;
1660
1661 /* Check that abstract data registers are accessible. */
1662 uint32_t abstractcs;
1663 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
1664 return ERROR_FAIL;
1665 info->datacount = get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT);
1666 info->progbufsize = get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE);
1667
1668 LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
1669
1670 RISCV_INFO(r);
1671 r->impebreak = get_field(dmstatus, DM_DMSTATUS_IMPEBREAK);
1672
1673 if (!has_sufficient_progbuf(target, 2)) {
1674 LOG_WARNING("We won't be able to execute fence instructions on this "
1675 "target. Memory may not always appear consistent. "
1676 "(progbufsize=%d, impebreak=%d)", info->progbufsize,
1677 r->impebreak);
1678 }
1679
1680 if (info->progbufsize < 4 && riscv_enable_virtual) {
1681 LOG_ERROR("set_enable_virtual is not available on this target. It "
1682 "requires a program buffer size of at least 4. (progbufsize=%d) "
1683 "Use `riscv set_enable_virtual off` to continue."
1684 , info->progbufsize);
1685 }
1686
1687 /* Before doing anything else we must first enumerate the harts. */
1688 if (dm->hart_count < 0) {
1689 for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
1690 r->current_hartid = i;
1691 if (riscv013_select_current_hart(target) != ERROR_OK)
1692 return ERROR_FAIL;
1693
1694 uint32_t s;
1695 if (dmstatus_read(target, &s, true) != ERROR_OK)
1696 return ERROR_FAIL;
1697 if (get_field(s, DM_DMSTATUS_ANYNONEXISTENT))
1698 break;
1699 dm->hart_count = i + 1;
1700
1701 if (get_field(s, DM_DMSTATUS_ANYHAVERESET))
1702 dmi_write(target, DM_DMCONTROL,
1703 set_hartsel(DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_ACKHAVERESET, i));
1704 }
1705
1706 LOG_DEBUG("Detected %d harts.", dm->hart_count);
1707 }
1708
1709 r->current_hartid = target->coreid;
1710
1711 if (dm->hart_count == 0) {
1712 LOG_ERROR("No harts found!");
1713 return ERROR_FAIL;
1714 }
1715
1716 /* Don't call any riscv_* functions until after we've counted the number of
1717 * cores and initialized registers. */
1718
1719 if (riscv013_select_current_hart(target) != ERROR_OK)
1720 return ERROR_FAIL;
1721
1722 bool halted = riscv_is_halted(target);
1723 if (!halted) {
1724 if (riscv013_halt_go(target) != ERROR_OK) {
1725 LOG_ERROR("Fatal: Hart %d failed to halt during examine()", r->current_hartid);
1726 return ERROR_FAIL;
1727 }
1728 }
1729
1730 /* Without knowing anything else we can at least mess with the
1731 * program buffer. */
1732 r->debug_buffer_size = info->progbufsize;
1733
1734 int result = register_read_abstract(target, NULL, GDB_REGNO_S0, 64);
1735 if (result == ERROR_OK)
1736 r->xlen = 64;
1737 else
1738 r->xlen = 32;
1739
1740 if (register_read(target, &r->misa, GDB_REGNO_MISA)) {
1741 LOG_ERROR("Fatal: Failed to read MISA from hart %d.", r->current_hartid);
1742 return ERROR_FAIL;
1743 }
1744
1745 if (riscv_supports_extension(target, 'V')) {
1746 if (discover_vlenb(target) != ERROR_OK)
1747 return ERROR_FAIL;
1748 }
1749
1750 /* Now init registers based on what we discovered. */
1751 if (riscv_init_registers(target) != ERROR_OK)
1752 return ERROR_FAIL;
1753
1754 /* Display this as early as possible to help people who are using
1755 * really slow simulators. */
1756 LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64, r->current_hartid, r->xlen,
1757 r->misa);
1758
1759 if (!halted)
1760 riscv013_step_or_resume_current_hart(target, false, false);
1761
1762 target_set_examined(target);
1763
1764 if (target->smp) {
1765 bool haltgroup_supported;
1766 if (set_haltgroup(target, &haltgroup_supported) != ERROR_OK)
1767 return ERROR_FAIL;
1768 if (haltgroup_supported)
1769 LOG_INFO("Core %d made part of halt group %d.", target->coreid,
1770 target->smp);
1771 else
1772 LOG_INFO("Core %d could not be made part of halt group %d.",
1773 target->coreid, target->smp);
1774 }
1775
1776 /* Some regression suites rely on seeing 'Examined RISC-V core' to know
1777 * when they can connect with gdb/telnet.
1778 * We will need to update those suites if we want to change that text. */
1779 LOG_INFO("Examined RISC-V core; found %d harts",
1780 riscv_count_harts(target));
1781 LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64, r->current_hartid, r->xlen,
1782 r->misa);
1783 return ERROR_OK;
1784 }
1785
1786 static int riscv013_authdata_read(struct target *target, uint32_t *value, unsigned int index)
1787 {
1788 if (index > 0) {
1789 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1790 return ERROR_FAIL;
1791 }
1792
1793 if (wait_for_authbusy(target, NULL) != ERROR_OK)
1794 return ERROR_FAIL;
1795
1796 return dmi_read(target, value, DM_AUTHDATA);
1797 }
1798
1799 static int riscv013_authdata_write(struct target *target, uint32_t value, unsigned int index)
1800 {
1801 if (index > 0) {
1802 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1803 return ERROR_FAIL;
1804 }
1805
1806 uint32_t before, after;
1807 if (wait_for_authbusy(target, &before) != ERROR_OK)
1808 return ERROR_FAIL;
1809
1810 dmi_write(target, DM_AUTHDATA, value);
1811
1812 if (wait_for_authbusy(target, &after) != ERROR_OK)
1813 return ERROR_FAIL;
1814
1815 if (!get_field(before, DM_DMSTATUS_AUTHENTICATED) &&
1816 get_field(after, DM_DMSTATUS_AUTHENTICATED)) {
1817 LOG_INFO("authdata_write resulted in successful authentication");
1818 int result = ERROR_OK;
1819 dm013_info_t *dm = get_dm(target);
1820 if (!dm)
1821 return ERROR_FAIL;
1822 target_list_t *entry;
1823 list_for_each_entry(entry, &dm->target_list, list) {
1824 if (examine(entry->target) != ERROR_OK)
1825 result = ERROR_FAIL;
1826 }
1827 return result;
1828 }
1829
1830 return ERROR_OK;
1831 }
1832
1833 static int riscv013_hart_count(struct target *target)
1834 {
1835 dm013_info_t *dm = get_dm(target);
1836 assert(dm);
1837 return dm->hart_count;
1838 }
1839
1840 /* Try to find out the widest memory access size depending on the selected memory access methods. */
1841 static unsigned riscv013_data_bits(struct target *target)
1842 {
1843 RISCV013_INFO(info);
1844 RISCV_INFO(r);
1845
1846 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
1847 int method = r->mem_access_methods[i];
1848
1849 if (method == RISCV_MEM_ACCESS_PROGBUF) {
1850 if (has_sufficient_progbuf(target, 3))
1851 return riscv_xlen(target);
1852 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
1853 if (get_field(info->sbcs, DM_SBCS_SBACCESS128))
1854 return 128;
1855 if (get_field(info->sbcs, DM_SBCS_SBACCESS64))
1856 return 64;
1857 if (get_field(info->sbcs, DM_SBCS_SBACCESS32))
1858 return 32;
1859 if (get_field(info->sbcs, DM_SBCS_SBACCESS16))
1860 return 16;
1861 if (get_field(info->sbcs, DM_SBCS_SBACCESS8))
1862 return 8;
1863 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
1864 /* TODO: Once there is a spec for discovering abstract commands, we can
1865 * take those into account as well. For now we assume abstract commands
1866 * support XLEN-wide accesses. */
1867 return riscv_xlen(target);
1868 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
1869 /* No further mem access method to try. */
1870 break;
1871 }
1872 LOG_ERROR("Unable to determine supported data bits on this target. Assuming 32 bits.");
1873 return 32;
1874 }
1875
1876 COMMAND_HELPER(riscv013_print_info, struct target *target)
1877 {
1878 RISCV013_INFO(info);
1879
1880 /* Abstract description. */
1881 riscv_print_info_line(CMD, "target", "memory.read_while_running8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1882 riscv_print_info_line(CMD, "target", "memory.write_while_running8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1883 riscv_print_info_line(CMD, "target", "memory.read_while_running16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1884 riscv_print_info_line(CMD, "target", "memory.write_while_running16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1885 riscv_print_info_line(CMD, "target", "memory.read_while_running32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1886 riscv_print_info_line(CMD, "target", "memory.write_while_running32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1887 riscv_print_info_line(CMD, "target", "memory.read_while_running64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1888 riscv_print_info_line(CMD, "target", "memory.write_while_running64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1889 riscv_print_info_line(CMD, "target", "memory.read_while_running128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1890 riscv_print_info_line(CMD, "target", "memory.write_while_running128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1891
1892 /* Lower level description. */
1893 riscv_print_info_line(CMD, "dm", "abits", info->abits);
1894 riscv_print_info_line(CMD, "dm", "progbufsize", info->progbufsize);
1895 riscv_print_info_line(CMD, "dm", "sbversion", get_field(info->sbcs, DM_SBCS_SBVERSION));
1896 riscv_print_info_line(CMD, "dm", "sbasize", get_field(info->sbcs, DM_SBCS_SBASIZE));
1897 riscv_print_info_line(CMD, "dm", "sbaccess128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1898 riscv_print_info_line(CMD, "dm", "sbaccess64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1899 riscv_print_info_line(CMD, "dm", "sbaccess32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1900 riscv_print_info_line(CMD, "dm", "sbaccess16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1901 riscv_print_info_line(CMD, "dm", "sbaccess8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1902
1903 uint32_t dmstatus;
1904 if (dmstatus_read(target, &dmstatus, false) == ERROR_OK)
1905 riscv_print_info_line(CMD, "dm", "authenticated", get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED));
1906
1907 return 0;
1908 }
1909
1910 static int prep_for_vector_access(struct target *target, uint64_t *vtype,
1911 uint64_t *vl, unsigned *debug_vl)
1912 {
1913 RISCV_INFO(r);
1914 /* TODO: this continuous save/restore is terrible for performance. */
1915 /* Write vtype and vl. */
1916 unsigned encoded_vsew;
1917 switch (riscv_xlen(target)) {
1918 case 32:
1919 encoded_vsew = 2;
1920 break;
1921 case 64:
1922 encoded_vsew = 3;
1923 break;
1924 default:
1925 LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target));
1926 return ERROR_FAIL;
1927 }
1928
1929 /* Save vtype and vl. */
1930 if (register_read(target, vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1931 return ERROR_FAIL;
1932 if (register_read(target, vl, GDB_REGNO_VL) != ERROR_OK)
1933 return ERROR_FAIL;
1934
1935 if (register_write_direct(target, GDB_REGNO_VTYPE, encoded_vsew << 3) != ERROR_OK)
1936 return ERROR_FAIL;
1937 *debug_vl = DIV_ROUND_UP(r->vlenb * 8, riscv_xlen(target));
1938 if (register_write_direct(target, GDB_REGNO_VL, *debug_vl) != ERROR_OK)
1939 return ERROR_FAIL;
1940
1941 return ERROR_OK;
1942 }
1943
1944 static int cleanup_after_vector_access(struct target *target, uint64_t vtype,
1945 uint64_t vl)
1946 {
1947 /* Restore vtype and vl. */
1948 if (register_write_direct(target, GDB_REGNO_VTYPE, vtype) != ERROR_OK)
1949 return ERROR_FAIL;
1950 if (register_write_direct(target, GDB_REGNO_VL, vl) != ERROR_OK)
1951 return ERROR_FAIL;
1952 return ERROR_OK;
1953 }
1954
1955 static int riscv013_get_register_buf(struct target *target,
1956 uint8_t *value, int regno)
1957 {
1958 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
1959
1960 if (riscv_select_current_hart(target) != ERROR_OK)
1961 return ERROR_FAIL;
1962
1963 riscv_reg_t s0;
1964 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1965 return ERROR_FAIL;
1966
1967 uint64_t mstatus;
1968 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
1969 return ERROR_FAIL;
1970
1971 uint64_t vtype, vl;
1972 unsigned debug_vl;
1973 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
1974 return ERROR_FAIL;
1975
1976 unsigned vnum = regno - GDB_REGNO_V0;
1977 unsigned xlen = riscv_xlen(target);
1978
1979 struct riscv_program program;
1980 riscv_program_init(&program, target);
1981 riscv_program_insert(&program, vmv_x_s(S0, vnum));
1982 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
1983
1984 int result = ERROR_OK;
1985 for (unsigned i = 0; i < debug_vl; i++) {
1986 /* Executing the program might result in an exception if there is some
1987 * issue with the vector implementation/instructions we're using. If that
1988 * happens, attempt to restore as usual. We may have clobbered the
1989 * vector register we tried to read already.
1990 * For other failures, we just return error because things are probably
1991 * so messed up that attempting to restore isn't going to help. */
1992 result = riscv_program_exec(&program, target);
1993 if (result == ERROR_OK) {
1994 uint64_t v;
1995 if (register_read_direct(target, &v, GDB_REGNO_S0) != ERROR_OK)
1996 return ERROR_FAIL;
1997 buf_set_u64(value, xlen * i, xlen, v);
1998 } else {
1999 break;
2000 }
2001 }
2002
2003 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
2004 return ERROR_FAIL;
2005
2006 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2007 return ERROR_FAIL;
2008 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2009 return ERROR_FAIL;
2010
2011 return result;
2012 }
2013
2014 static int riscv013_set_register_buf(struct target *target,
2015 int regno, const uint8_t *value)
2016 {
2017 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
2018
2019 if (riscv_select_current_hart(target) != ERROR_OK)
2020 return ERROR_FAIL;
2021
2022 riscv_reg_t s0;
2023 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
2024 return ERROR_FAIL;
2025
2026 uint64_t mstatus;
2027 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
2028 return ERROR_FAIL;
2029
2030 uint64_t vtype, vl;
2031 unsigned debug_vl;
2032 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
2033 return ERROR_FAIL;
2034
2035 unsigned vnum = regno - GDB_REGNO_V0;
2036 unsigned xlen = riscv_xlen(target);
2037
2038 struct riscv_program program;
2039 riscv_program_init(&program, target);
2040 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
2041 int result = ERROR_OK;
2042 for (unsigned i = 0; i < debug_vl; i++) {
2043 if (register_write_direct(target, GDB_REGNO_S0,
2044 buf_get_u64(value, xlen * i, xlen)) != ERROR_OK)
2045 return ERROR_FAIL;
2046 result = riscv_program_exec(&program, target);
2047 if (result != ERROR_OK)
2048 break;
2049 }
2050
2051 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
2052 return ERROR_FAIL;
2053
2054 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2055 return ERROR_FAIL;
2056 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2057 return ERROR_FAIL;
2058
2059 return result;
2060 }
2061
2062 static uint32_t sb_sbaccess(unsigned int size_bytes)
2063 {
2064 switch (size_bytes) {
2065 case 1:
2066 return set_field(0, DM_SBCS_SBACCESS, 0);
2067 case 2:
2068 return set_field(0, DM_SBCS_SBACCESS, 1);
2069 case 4:
2070 return set_field(0, DM_SBCS_SBACCESS, 2);
2071 case 8:
2072 return set_field(0, DM_SBCS_SBACCESS, 3);
2073 case 16:
2074 return set_field(0, DM_SBCS_SBACCESS, 4);
2075 }
2076 assert(0);
2077 return 0;
2078 }
2079
2080 static int sb_write_address(struct target *target, target_addr_t address,
2081 bool ensure_success)
2082 {
2083 RISCV013_INFO(info);
2084 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2085 /* There currently is no support for >64-bit addresses in OpenOCD. */
2086 if (sbasize > 96)
2087 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS3, 0, false, false);
2088 if (sbasize > 64)
2089 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS2, 0, false, false);
2090 if (sbasize > 32)
2091 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS1, address >> 32, false, false);
2092 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS0, address,
2093 false, ensure_success);
2094 }
2095
2096 static int batch_run(const struct target *target, struct riscv_batch *batch)
2097 {
2098 RISCV013_INFO(info);
2099 RISCV_INFO(r);
2100 if (r->reset_delays_wait >= 0) {
2101 r->reset_delays_wait -= batch->used_scans;
2102 if (r->reset_delays_wait <= 0) {
2103 batch->idle_count = 0;
2104 info->dmi_busy_delay = 0;
2105 info->ac_busy_delay = 0;
2106 }
2107 }
2108 return riscv_batch_run(batch);
2109 }
2110
2111 static int sba_supports_access(struct target *target, unsigned int size_bytes)
2112 {
2113 RISCV013_INFO(info);
2114 switch (size_bytes) {
2115 case 1:
2116 return get_field(info->sbcs, DM_SBCS_SBACCESS8);
2117 case 2:
2118 return get_field(info->sbcs, DM_SBCS_SBACCESS16);
2119 case 4:
2120 return get_field(info->sbcs, DM_SBCS_SBACCESS32);
2121 case 8:
2122 return get_field(info->sbcs, DM_SBCS_SBACCESS64);
2123 case 16:
2124 return get_field(info->sbcs, DM_SBCS_SBACCESS128);
2125 default:
2126 return 0;
2127 }
2128 }
2129
2130 static int sample_memory_bus_v1(struct target *target,
2131 struct riscv_sample_buf *buf,
2132 const riscv_sample_config_t *config,
2133 int64_t until_ms)
2134 {
2135 RISCV013_INFO(info);
2136 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2137 if (sbasize > 64) {
2138 LOG_ERROR("Memory sampling is only implemented for sbasize <= 64.");
2139 return ERROR_NOT_IMPLEMENTED;
2140 }
2141
2142 if (get_field(info->sbcs, DM_SBCS_SBVERSION) != 1) {
2143 LOG_ERROR("Memory sampling is only implemented for SBA version 1.");
2144 return ERROR_NOT_IMPLEMENTED;
2145 }
2146
2147 uint32_t sbcs = 0;
2148 uint32_t sbcs_valid = false;
2149
2150 uint32_t sbaddress0 = 0;
2151 bool sbaddress0_valid = false;
2152 uint32_t sbaddress1 = 0;
2153 bool sbaddress1_valid = false;
2154
2155 /* How often to read each value in a batch. */
2156 const unsigned int repeat = 5;
2157
2158 unsigned int enabled_count = 0;
2159 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2160 if (config->bucket[i].enabled)
2161 enabled_count++;
2162 }
2163
2164 while (timeval_ms() < until_ms) {
2165 /*
2166 * batch_run() adds to the batch, so we can't simply reuse the same
2167 * batch over and over. So we create a new one every time through the
2168 * loop.
2169 */
2170 struct riscv_batch *batch = riscv_batch_alloc(
2171 target, 1 + enabled_count * 5 * repeat,
2172 info->dmi_busy_delay + info->bus_master_read_delay);
2173 if (!batch)
2174 return ERROR_FAIL;
2175
2176 unsigned int result_bytes = 0;
2177 for (unsigned int n = 0; n < repeat; n++) {
2178 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2179 if (config->bucket[i].enabled) {
2180 if (!sba_supports_access(target, config->bucket[i].size_bytes)) {
2181 LOG_ERROR("Hardware does not support SBA access for %d-byte memory sampling.",
2182 config->bucket[i].size_bytes);
2183 return ERROR_NOT_IMPLEMENTED;
2184 }
2185
2186 uint32_t sbcs_write = DM_SBCS_SBREADONADDR;
2187 if (enabled_count == 1)
2188 sbcs_write |= DM_SBCS_SBREADONDATA;
2189 sbcs_write |= sb_sbaccess(config->bucket[i].size_bytes);
2190 if (!sbcs_valid || sbcs_write != sbcs) {
2191 riscv_batch_add_dmi_write(batch, DM_SBCS, sbcs_write);
2192 sbcs = sbcs_write;
2193 sbcs_valid = true;
2194 }
2195
2196 if (sbasize > 32 &&
2197 (!sbaddress1_valid ||
2198 sbaddress1 != config->bucket[i].address >> 32)) {
2199 sbaddress1 = config->bucket[i].address >> 32;
2200 riscv_batch_add_dmi_write(batch, DM_SBADDRESS1, sbaddress1);
2201 sbaddress1_valid = true;
2202 }
2203 if (!sbaddress0_valid ||
2204 sbaddress0 != (config->bucket[i].address & 0xffffffff)) {
2205 sbaddress0 = config->bucket[i].address;
2206 riscv_batch_add_dmi_write(batch, DM_SBADDRESS0, sbaddress0);
2207 sbaddress0_valid = true;
2208 }
2209 if (config->bucket[i].size_bytes > 4)
2210 riscv_batch_add_dmi_read(batch, DM_SBDATA1);
2211 riscv_batch_add_dmi_read(batch, DM_SBDATA0);
2212 result_bytes += 1 + config->bucket[i].size_bytes;
2213 }
2214 }
2215 }
2216
2217 if (buf->used + result_bytes >= buf->size) {
2218 riscv_batch_free(batch);
2219 break;
2220 }
2221
2222 size_t sbcs_key = riscv_batch_add_dmi_read(batch, DM_SBCS);
2223
2224 int result = batch_run(target, batch);
2225 if (result != ERROR_OK)
2226 return result;
2227
2228 uint32_t sbcs_read = riscv_batch_get_dmi_read_data(batch, sbcs_key);
2229 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2230 /* Discard this batch (too much hassle to try to recover partial
2231 * data) and try again with a larger delay. */
2232 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2233 dmi_write(target, DM_SBCS, sbcs_read | DM_SBCS_SBBUSYERROR | DM_SBCS_SBERROR);
2234 riscv_batch_free(batch);
2235 continue;
2236 }
2237 if (get_field(sbcs_read, DM_SBCS_SBERROR)) {
2238 /* The memory we're sampling was unreadable, somehow. Give up. */
2239 dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR | DM_SBCS_SBERROR);
2240 riscv_batch_free(batch);
2241 return ERROR_FAIL;
2242 }
2243
2244 unsigned int read = 0;
2245 for (unsigned int n = 0; n < repeat; n++) {
2246 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2247 if (config->bucket[i].enabled) {
2248 assert(i < RISCV_SAMPLE_BUF_TIMESTAMP_BEFORE);
2249 uint64_t value = 0;
2250 if (config->bucket[i].size_bytes > 4)
2251 value = ((uint64_t)riscv_batch_get_dmi_read_data(batch, read++)) << 32;
2252 value |= riscv_batch_get_dmi_read_data(batch, read++);
2253
2254 buf->buf[buf->used] = i;
2255 buf_set_u64(buf->buf + buf->used + 1, 0, config->bucket[i].size_bytes * 8, value);
2256 buf->used += 1 + config->bucket[i].size_bytes;
2257 }
2258 }
2259 }
2260
2261 riscv_batch_free(batch);
2262 }
2263
2264 return ERROR_OK;
2265 }
2266
2267 static int sample_memory(struct target *target,
2268 struct riscv_sample_buf *buf,
2269 riscv_sample_config_t *config,
2270 int64_t until_ms)
2271 {
2272 if (!config->enabled)
2273 return ERROR_OK;
2274
2275 return sample_memory_bus_v1(target, buf, config, until_ms);
2276 }
2277
2278 static int init_target(struct command_context *cmd_ctx,
2279 struct target *target)
2280 {
2281 LOG_DEBUG("init");
2282 RISCV_INFO(generic_info);
2283
2284 generic_info->get_register = &riscv013_get_register;
2285 generic_info->set_register = &riscv013_set_register;
2286 generic_info->get_register_buf = &riscv013_get_register_buf;
2287 generic_info->set_register_buf = &riscv013_set_register_buf;
2288 generic_info->select_current_hart = &riscv013_select_current_hart;
2289 generic_info->is_halted = &riscv013_is_halted;
2290 generic_info->resume_go = &riscv013_resume_go;
2291 generic_info->step_current_hart = &riscv013_step_current_hart;
2292 generic_info->on_halt = &riscv013_on_halt;
2293 generic_info->resume_prep = &riscv013_resume_prep;
2294 generic_info->halt_prep = &riscv013_halt_prep;
2295 generic_info->halt_go = &riscv013_halt_go;
2296 generic_info->on_step = &riscv013_on_step;
2297 generic_info->halt_reason = &riscv013_halt_reason;
2298 generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
2299 generic_info->write_debug_buffer = &riscv013_write_debug_buffer;
2300 generic_info->execute_debug_buffer = &riscv013_execute_debug_buffer;
2301 generic_info->fill_dmi_write_u64 = &riscv013_fill_dmi_write_u64;
2302 generic_info->fill_dmi_read_u64 = &riscv013_fill_dmi_read_u64;
2303 generic_info->fill_dmi_nop_u64 = &riscv013_fill_dmi_nop_u64;
2304 generic_info->dmi_write_u64_bits = &riscv013_dmi_write_u64_bits;
2305 generic_info->authdata_read = &riscv013_authdata_read;
2306 generic_info->authdata_write = &riscv013_authdata_write;
2307 generic_info->dmi_read = &dmi_read;
2308 generic_info->dmi_write = &dmi_write;
2309 generic_info->read_memory = read_memory;
2310 generic_info->test_sba_config_reg = &riscv013_test_sba_config_reg;
2311 generic_info->hart_count = &riscv013_hart_count;
2312 generic_info->data_bits = &riscv013_data_bits;
2313 generic_info->print_info = &riscv013_print_info;
2314 generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
2315 if (!generic_info->version_specific)
2316 return ERROR_FAIL;
2317 generic_info->sample_memory = sample_memory;
2318 riscv013_info_t *info = get_info(target);
2319
2320 info->progbufsize = -1;
2321
2322 info->dmi_busy_delay = 0;
2323 info->bus_master_read_delay = 0;
2324 info->bus_master_write_delay = 0;
2325 info->ac_busy_delay = 0;
2326
2327 /* Assume all these abstract commands are supported until we learn
2328 * otherwise.
2329 * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
2330 * while another one isn't. We don't track that this closely here, but in
2331 * the future we probably should. */
2332 info->abstract_read_csr_supported = true;
2333 info->abstract_write_csr_supported = true;
2334 info->abstract_read_fpr_supported = true;
2335 info->abstract_write_fpr_supported = true;
2336
2337 info->has_aampostincrement = YNM_MAYBE;
2338
2339 return ERROR_OK;
2340 }
2341
2342 static int assert_reset(struct target *target)
2343 {
2344 RISCV_INFO(r);
2345
2346 select_dmi(target);
2347
2348 uint32_t control_base = set_field(0, DM_DMCONTROL_DMACTIVE, 1);
2349
2350 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
2351 /* Run the user-supplied script if there is one. */
2352 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
2353 } else if (target->rtos) {
2354 /* There's only one target, and OpenOCD thinks each hart is a thread.
2355 * We must reset them all. */
2356
2357 /* TODO: Try to use hasel in dmcontrol */
2358
2359 /* Set haltreq for each hart. */
2360 uint32_t control = control_base;
2361
2362 control = set_hartsel(control_base, target->coreid);
2363 control = set_field(control, DM_DMCONTROL_HALTREQ,
2364 target->reset_halt ? 1 : 0);
2365 dmi_write(target, DM_DMCONTROL, control);
2366
2367 /* Assert ndmreset */
2368 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2369 dmi_write(target, DM_DMCONTROL, control);
2370
2371 } else {
2372 /* Reset just this hart. */
2373 uint32_t control = set_hartsel(control_base, r->current_hartid);
2374 control = set_field(control, DM_DMCONTROL_HALTREQ,
2375 target->reset_halt ? 1 : 0);
2376 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2377 dmi_write(target, DM_DMCONTROL, control);
2378 }
2379
2380 target->state = TARGET_RESET;
2381
2382 dm013_info_t *dm = get_dm(target);
2383 if (!dm)
2384 return ERROR_FAIL;
2385
2386 /* The DM might have gotten reset if OpenOCD called us in some reset that
2387 * involves SRST being toggled. So clear our cache which may be out of
2388 * date. */
2389 memset(dm->progbuf_cache, 0, sizeof(dm->progbuf_cache));
2390
2391 return ERROR_OK;
2392 }
2393
2394 static int deassert_reset(struct target *target)
2395 {
2396 RISCV_INFO(r);
2397 RISCV013_INFO(info);
2398 select_dmi(target);
2399
2400 /* Clear the reset, but make sure haltreq is still set */
2401 uint32_t control = 0;
2402 control = set_field(control, DM_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
2403 control = set_field(control, DM_DMCONTROL_DMACTIVE, 1);
2404 dmi_write(target, DM_DMCONTROL,
2405 set_hartsel(control, r->current_hartid));
2406
2407 uint32_t dmstatus;
2408 int dmi_busy_delay = info->dmi_busy_delay;
2409 time_t start = time(NULL);
2410
2411 for (int i = 0; i < riscv_count_harts(target); ++i) {
2412 int index = i;
2413 if (target->rtos) {
2414 if (index != target->coreid)
2415 continue;
2416 dmi_write(target, DM_DMCONTROL,
2417 set_hartsel(control, index));
2418 } else {
2419 index = r->current_hartid;
2420 }
2421
2422 LOG_DEBUG("Waiting for hart %d to come out of reset.", index);
2423 while (1) {
2424 int result = dmstatus_read_timeout(target, &dmstatus, true,
2425 riscv_reset_timeout_sec);
2426 if (result == ERROR_TIMEOUT_REACHED)
2427 LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
2428 "reset in %ds; Increase the timeout with riscv "
2429 "set_reset_timeout_sec.",
2430 index, riscv_reset_timeout_sec);
2431 if (result != ERROR_OK)
2432 return result;
2433 /* Certain debug modules, like the one in GD32VF103
2434 * MCUs, violate the specification's requirement that
2435 * each hart is in "exactly one of four states" and,
2436 * during reset, report harts as both unavailable and
2437 * halted/running. To work around this, we check for
2438 * the absence of the unavailable state rather than
2439 * the presence of any other state. */
2440 if (!get_field(dmstatus, DM_DMSTATUS_ALLUNAVAIL))
2441 break;
2442 if (time(NULL) - start > riscv_reset_timeout_sec) {
2443 LOG_ERROR("Hart %d didn't leave reset in %ds; "
2444 "dmstatus=0x%x; "
2445 "Increase the timeout with riscv set_reset_timeout_sec.",
2446 index, riscv_reset_timeout_sec, dmstatus);
2447 return ERROR_FAIL;
2448 }
2449 }
2450 target->state = TARGET_HALTED;
2451
2452 if (get_field(dmstatus, DM_DMSTATUS_ALLHAVERESET)) {
2453 /* Ack reset. */
2454 dmi_write(target, DM_DMCONTROL,
2455 set_hartsel(control, index) |
2456 DM_DMCONTROL_ACKHAVERESET);
2457 }
2458
2459 if (!target->rtos)
2460 break;
2461 }
2462 info->dmi_busy_delay = dmi_busy_delay;
2463 return ERROR_OK;
2464 }
2465
2466 static int execute_fence(struct target *target)
2467 {
2468 /* FIXME: For non-coherent systems we need to flush the caches right
2469 * here, but there's no ISA-defined way of doing that. */
2470 {
2471 struct riscv_program program;
2472 riscv_program_init(&program, target);
2473 riscv_program_fence_i(&program);
2474 riscv_program_fence(&program);
2475 int result = riscv_program_exec(&program, target);
2476 if (result != ERROR_OK)
2477 LOG_DEBUG("Unable to execute pre-fence");
2478 }
2479
2480 return ERROR_OK;
2481 }
2482
2483 static void log_memory_access(target_addr_t address, uint64_t value,
2484 unsigned size_bytes, bool read)
2485 {
2486 if (debug_level < LOG_LVL_DEBUG)
2487 return;
2488
2489 char fmt[80];
2490 sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
2491 address, read ? "read" : "write", size_bytes * 2);
2492 switch (size_bytes) {
2493 case 1:
2494 value &= 0xff;
2495 break;
2496 case 2:
2497 value &= 0xffff;
2498 break;
2499 case 4:
2500 value &= 0xffffffffUL;
2501 break;
2502 case 8:
2503 break;
2504 default:
2505 assert(false);
2506 }
2507 LOG_DEBUG(fmt, value);
2508 }
2509
2510 /* Read the relevant sbdata regs depending on size, and put the results into
2511 * buffer. */
2512 static int read_memory_bus_word(struct target *target, target_addr_t address,
2513 uint32_t size, uint8_t *buffer)
2514 {
2515 uint32_t value;
2516 int result;
2517 static int sbdata[4] = { DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3 };
2518 assert(size <= 16);
2519 for (int i = (size - 1) / 4; i >= 0; i--) {
2520 result = dmi_op(target, &value, NULL, DMI_OP_READ, sbdata[i], 0, false, true);
2521 if (result != ERROR_OK)
2522 return result;
2523 buf_set_u32(buffer + i * 4, 0, 8 * MIN(size, 4), value);
2524 log_memory_access(address + i * 4, value, MIN(size, 4), true);
2525 }
2526 return ERROR_OK;
2527 }
2528
2529 static target_addr_t sb_read_address(struct target *target)
2530 {
2531 RISCV013_INFO(info);
2532 unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2533 target_addr_t address = 0;
2534 uint32_t v;
2535 if (sbasize > 32) {
2536 dmi_read(target, &v, DM_SBADDRESS1);
2537 address |= v;
2538 address <<= 32;
2539 }
2540 dmi_read(target, &v, DM_SBADDRESS0);
2541 address |= v;
2542 return address;
2543 }
2544
2545 static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
2546 {
2547 time_t start = time(NULL);
2548 while (1) {
2549 if (dmi_read(target, sbcs, DM_SBCS) != ERROR_OK)
2550 return ERROR_FAIL;
2551 if (!get_field(*sbcs, DM_SBCS_SBBUSY))
2552 return ERROR_OK;
2553 if (time(NULL) - start > riscv_command_timeout_sec) {
2554 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
2555 "Increase the timeout with riscv set_command_timeout_sec.",
2556 riscv_command_timeout_sec, *sbcs);
2557 return ERROR_FAIL;
2558 }
2559 }
2560 }
2561
2562 static int modify_privilege(struct target *target, uint64_t *mstatus, uint64_t *mstatus_old)
2563 {
2564 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5)) {
2565 /* Read DCSR */
2566 uint64_t dcsr;
2567 if (register_read(target, &dcsr, GDB_REGNO_DCSR) != ERROR_OK)
2568 return ERROR_FAIL;
2569
2570 /* Read and save MSTATUS */
2571 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
2572 return ERROR_FAIL;
2573 *mstatus_old = *mstatus;
2574
2575 /* If we come from m-mode with mprv set, we want to keep mpp */
2576 if (get_field(dcsr, DCSR_PRV) < 3) {
2577 /* MPP = PRIV */
2578 *mstatus = set_field(*mstatus, MSTATUS_MPP, get_field(dcsr, DCSR_PRV));
2579
2580 /* MPRV = 1 */
2581 *mstatus = set_field(*mstatus, MSTATUS_MPRV, 1);
2582
2583 /* Write MSTATUS */
2584 if (*mstatus != *mstatus_old)
2585 if (register_write_direct(target, GDB_REGNO_MSTATUS, *mstatus) != ERROR_OK)
2586 return ERROR_FAIL;
2587 }
2588 }
2589
2590 return ERROR_OK;
2591 }
2592
2593 static int read_memory_bus_v0(struct target *target, target_addr_t address,
2594 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2595 {
2596 if (size != increment) {
2597 LOG_ERROR("sba v0 reads only support size==increment");
2598 return ERROR_NOT_IMPLEMENTED;
2599 }
2600
2601 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
2602 TARGET_PRIxADDR, size, count, address);
2603 uint8_t *t_buffer = buffer;
2604 riscv_addr_t cur_addr = address;
2605 riscv_addr_t fin_addr = address + (count * size);
2606 uint32_t access = 0;
2607
2608 const int DM_SBCS_SBSINGLEREAD_OFFSET = 20;
2609 const uint32_t DM_SBCS_SBSINGLEREAD = (0x1U << DM_SBCS_SBSINGLEREAD_OFFSET);
2610
2611 const int DM_SBCS_SBAUTOREAD_OFFSET = 15;
2612 const uint32_t DM_SBCS_SBAUTOREAD = (0x1U << DM_SBCS_SBAUTOREAD_OFFSET);
2613
2614 /* ww favorise one off reading if there is an issue */
2615 if (count == 1) {
2616 for (uint32_t i = 0; i < count; i++) {
2617 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2618 return ERROR_FAIL;
2619 dmi_write(target, DM_SBADDRESS0, cur_addr);
2620 /* size/2 matching the bit access of the spec 0.13 */
2621 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2622 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2623 LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
2624 dmi_write(target, DM_SBCS, access);
2625 /* 3) read */
2626 uint32_t value;
2627 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2628 return ERROR_FAIL;
2629 LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
2630 buf_set_u32(t_buffer, 0, 8 * size, value);
2631 t_buffer += size;
2632 cur_addr += size;
2633 }
2634 return ERROR_OK;
2635 }
2636
2637 /* has to be the same size if we want to read a block */
2638 LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
2639 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2640 return ERROR_FAIL;
2641 /* set current address */
2642 dmi_write(target, DM_SBADDRESS0, cur_addr);
2643 /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
2644 * size/2 matching the bit access of the spec 0.13 */
2645 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2646 access = set_field(access, DM_SBCS_SBAUTOREAD, 1);
2647 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2648 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
2649 LOG_DEBUG("\r\naccess: 0x%08x", access);
2650 dmi_write(target, DM_SBCS, access);
2651
2652 while (cur_addr < fin_addr) {
2653 LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
2654 PRIx64, size, count, cur_addr);
2655 /* read */
2656 uint32_t value;
2657 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2658 return ERROR_FAIL;
2659 buf_set_u32(t_buffer, 0, 8 * size, value);
2660 cur_addr += size;
2661 t_buffer += size;
2662
2663 /* if we are reaching last address, we must clear autoread */
2664 if (cur_addr == fin_addr && count != 1) {
2665 dmi_write(target, DM_SBCS, 0);
2666 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2667 return ERROR_FAIL;
2668 buf_set_u32(t_buffer, 0, 8 * size, value);
2669 }
2670 }
2671
2672 uint32_t sbcs;
2673 if (dmi_read(target, &sbcs, DM_SBCS) != ERROR_OK)
2674 return ERROR_FAIL;
2675
2676 return ERROR_OK;
2677 }
2678
2679 /**
2680 * Read the requested memory using the system bus interface.
2681 */
2682 static int read_memory_bus_v1(struct target *target, target_addr_t address,
2683 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2684 {
2685 if (increment != size && increment != 0) {
2686 LOG_ERROR("sba v1 reads only support increment of size or 0");
2687 return ERROR_NOT_IMPLEMENTED;
2688 }
2689
2690 RISCV013_INFO(info);
2691 target_addr_t next_address = address;
2692 target_addr_t end_address = address + count * size;
2693
2694 while (next_address < end_address) {
2695 uint32_t sbcs_write = set_field(0, DM_SBCS_SBREADONADDR, 1);
2696 sbcs_write |= sb_sbaccess(size);
2697 if (increment == size)
2698 sbcs_write = set_field(sbcs_write, DM_SBCS_SBAUTOINCREMENT, 1);
2699 if (count > 1)
2700 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, count > 1);
2701 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2702 return ERROR_FAIL;
2703
2704 /* This address write will trigger the first read. */
2705 if (sb_write_address(target, next_address, true) != ERROR_OK)
2706 return ERROR_FAIL;
2707
2708 if (info->bus_master_read_delay) {
2709 jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
2710 if (jtag_execute_queue() != ERROR_OK) {
2711 LOG_ERROR("Failed to scan idle sequence");
2712 return ERROR_FAIL;
2713 }
2714 }
2715
2716 /* First value has been read, and is waiting for us to issue a DMI read
2717 * to get it. */
2718
2719 static int sbdata[4] = {DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3};
2720 assert(size <= 16);
2721 target_addr_t next_read = address - 1;
2722 for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
2723 for (int j = (size - 1) / 4; j >= 0; j--) {
2724 uint32_t value;
2725 unsigned attempt = 0;
2726 while (1) {
2727 if (attempt++ > 100) {
2728 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2729 next_read);
2730 return ERROR_FAIL;
2731 }
2732 keep_alive();
2733 dmi_status_t status = dmi_scan(target, NULL, &value,
2734 DMI_OP_READ, sbdata[j], 0, false);
2735 if (status == DMI_STATUS_BUSY)
2736 increase_dmi_busy_delay(target);
2737 else if (status == DMI_STATUS_SUCCESS)
2738 break;
2739 else
2740 return ERROR_FAIL;
2741 }
2742 if (next_read != address - 1) {
2743 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2744 log_memory_access(next_read, value, MIN(size, 4), true);
2745 }
2746 next_read = address + i * size + j * 4;
2747 }
2748 }
2749
2750 uint32_t sbcs_read = 0;
2751 if (count > 1) {
2752 uint32_t value;
2753 unsigned attempt = 0;
2754 while (1) {
2755 if (attempt++ > 100) {
2756 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2757 next_read);
2758 return ERROR_FAIL;
2759 }
2760 dmi_status_t status = dmi_scan(target, NULL, &value, DMI_OP_NOP, 0, 0, false);
2761 if (status == DMI_STATUS_BUSY)
2762 increase_dmi_busy_delay(target);
2763 else if (status == DMI_STATUS_SUCCESS)
2764 break;
2765 else
2766 return ERROR_FAIL;
2767 }
2768 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2769 log_memory_access(next_read, value, MIN(size, 4), true);
2770
2771 /* "Writes to sbcs while sbbusy is high result in undefined behavior.
2772 * A debugger must not write to sbcs until it reads sbbusy as 0." */
2773 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2774 return ERROR_FAIL;
2775
2776 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, 0);
2777 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2778 return ERROR_FAIL;
2779 }
2780
2781 /* Read the last word, after we disabled sbreadondata if necessary. */
2782 if (!get_field(sbcs_read, DM_SBCS_SBERROR) &&
2783 !get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2784 if (read_memory_bus_word(target, address + (count - 1) * size, size,
2785 buffer + (count - 1) * size) != ERROR_OK)
2786 return ERROR_FAIL;
2787
2788 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2789 return ERROR_FAIL;
2790 }
2791
2792 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2793 /* We read while the target was busy. Slow down and try again. */
2794 if (dmi_write(target, DM_SBCS, sbcs_read | DM_SBCS_SBBUSYERROR) != ERROR_OK)
2795 return ERROR_FAIL;
2796 next_address = sb_read_address(target);
2797 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2798 continue;
2799 }
2800
2801 unsigned error = get_field(sbcs_read, DM_SBCS_SBERROR);
2802 if (error == 0) {
2803 next_address = end_address;
2804 } else {
2805 /* Some error indicating the bus access failed, but not because of
2806 * something we did wrong. */
2807 if (dmi_write(target, DM_SBCS, DM_SBCS_SBERROR) != ERROR_OK)
2808 return ERROR_FAIL;
2809 return ERROR_FAIL;
2810 }
2811 }
2812
2813 return ERROR_OK;
2814 }
2815
2816 static void log_mem_access_result(struct target *target, bool success, int method, bool read)
2817 {
2818 RISCV_INFO(r);
2819 bool warn = false;
2820 char msg[60];
2821
2822 /* Compose the message */
2823 snprintf(msg, 60, "%s to %s memory via %s.",
2824 success ? "Succeeded" : "Failed",
2825 read ? "read" : "write",
2826 (method == RISCV_MEM_ACCESS_PROGBUF) ? "program buffer" :
2827 (method == RISCV_MEM_ACCESS_SYSBUS) ? "system bus" : "abstract access");
2828
2829 /* Determine the log message severity. Show warnings only once. */
2830 if (!success) {
2831 if (method == RISCV_MEM_ACCESS_PROGBUF) {
2832 warn = r->mem_access_progbuf_warn;
2833 r->mem_access_progbuf_warn = false;
2834 }
2835 if (method == RISCV_MEM_ACCESS_SYSBUS) {
2836 warn = r->mem_access_sysbus_warn;
2837 r->mem_access_sysbus_warn = false;
2838 }
2839 if (method == RISCV_MEM_ACCESS_ABSTRACT) {
2840 warn = r->mem_access_abstract_warn;
2841 r->mem_access_abstract_warn = false;
2842 }
2843 }
2844
2845 if (warn)
2846 LOG_WARNING("%s", msg);
2847 else
2848 LOG_DEBUG("%s", msg);
2849 }
2850
2851 static bool mem_should_skip_progbuf(struct target *target, target_addr_t address,
2852 uint32_t size, bool read, char **skip_reason)
2853 {
2854 assert(skip_reason);
2855
2856 if (!has_sufficient_progbuf(target, 3)) {
2857 LOG_DEBUG("Skipping mem %s via progbuf - insufficient progbuf size.",
2858 read ? "read" : "write");
2859 *skip_reason = "skipped (insufficient progbuf)";
2860 return true;
2861 }
2862 if (target->state != TARGET_HALTED) {
2863 LOG_DEBUG("Skipping mem %s via progbuf - target not halted.",
2864 read ? "read" : "write");
2865 *skip_reason = "skipped (target not halted)";
2866 return true;
2867 }
2868 if (riscv_xlen(target) < size * 8) {
2869 LOG_DEBUG("Skipping mem %s via progbuf - XLEN (%d) is too short for %d-bit memory access.",
2870 read ? "read" : "write", riscv_xlen(target), size * 8);
2871 *skip_reason = "skipped (XLEN too short)";
2872 return true;
2873 }
2874 if (size > 8) {
2875 LOG_DEBUG("Skipping mem %s via progbuf - unsupported size.",
2876 read ? "read" : "write");
2877 *skip_reason = "skipped (unsupported size)";
2878 return true;
2879 }
2880 if ((sizeof(address) * 8 > riscv_xlen(target)) && (address >> riscv_xlen(target))) {
2881 LOG_DEBUG("Skipping mem %s via progbuf - progbuf only supports %u-bit address.",
2882 read ? "read" : "write", riscv_xlen(target));
2883 *skip_reason = "skipped (too large address)";
2884 return true;
2885 }
2886
2887 return false;
2888 }
2889
2890 static bool mem_should_skip_sysbus(struct target *target, target_addr_t address,
2891 uint32_t size, uint32_t increment, bool read, char **skip_reason)
2892 {
2893 assert(skip_reason);
2894
2895 RISCV013_INFO(info);
2896 if (!sba_supports_access(target, size)) {
2897 LOG_DEBUG("Skipping mem %s via system bus - unsupported size.",
2898 read ? "read" : "write");
2899 *skip_reason = "skipped (unsupported size)";
2900 return true;
2901 }
2902 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2903 if ((sizeof(address) * 8 > sbasize) && (address >> sbasize)) {
2904 LOG_DEBUG("Skipping mem %s via system bus - sba only supports %u-bit address.",
2905 read ? "read" : "write", sbasize);
2906 *skip_reason = "skipped (too large address)";
2907 return true;
2908 }
2909 if (read && increment != size && (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0 || increment != 0)) {
2910 LOG_DEBUG("Skipping mem read via system bus - "
2911 "sba reads only support size==increment or also size==0 for sba v1.");
2912 *skip_reason = "skipped (unsupported increment)";
2913 return true;
2914 }
2915
2916 return false;
2917 }
2918
2919 static bool mem_should_skip_abstract(struct target *target, target_addr_t address,
2920 uint32_t size, uint32_t increment, bool read, char **skip_reason)
2921 {
2922 assert(skip_reason);
2923
2924 if (size > 8) {
2925 /* TODO: Add 128b support if it's ever used. Involves modifying
2926 read/write_abstract_arg() to work on two 64b values. */
2927 LOG_DEBUG("Skipping mem %s via abstract access - unsupported size: %d bits",
2928 read ? "read" : "write", size * 8);
2929 *skip_reason = "skipped (unsupported size)";
2930 return true;
2931 }