riscv: drop deprecated command 'riscv test_sba_config_reg'
[openocd.git] / src / target / riscv / riscv-013.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4 * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
5 * latest draft.
6 */
7
8 #include <assert.h>
9 #include <stdlib.h>
10 #include <time.h>
11
12 #ifdef HAVE_CONFIG_H
13 #include "config.h"
14 #endif
15
16 #include "target/target.h"
17 #include "target/algorithm.h"
18 #include "target/target_type.h"
19 #include <helper/log.h>
20 #include "jtag/jtag.h"
21 #include "target/register.h"
22 #include "target/breakpoints.h"
23 #include "helper/time_support.h"
24 #include "helper/list.h"
25 #include "riscv.h"
26 #include "debug_defines.h"
27 #include "rtos/rtos.h"
28 #include "program.h"
29 #include "asm.h"
30 #include "batch.h"
31
32 static int riscv013_on_step_or_resume(struct target *target, bool step);
33 static int riscv013_step_or_resume_current_hart(struct target *target,
34 bool step, bool use_hasel);
35 static void riscv013_clear_abstract_error(struct target *target);
36
37 /* Implementations of the functions in struct riscv_info. */
38 static int riscv013_get_register(struct target *target,
39 riscv_reg_t *value, int rid);
40 static int riscv013_set_register(struct target *target, int regid, uint64_t value);
41 static int riscv013_select_current_hart(struct target *target);
42 static int riscv013_halt_prep(struct target *target);
43 static int riscv013_halt_go(struct target *target);
44 static int riscv013_resume_go(struct target *target);
45 static int riscv013_step_current_hart(struct target *target);
46 static int riscv013_on_halt(struct target *target);
47 static int riscv013_on_step(struct target *target);
48 static int riscv013_resume_prep(struct target *target);
49 static bool riscv013_is_halted(struct target *target);
50 static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
51 static int riscv013_write_debug_buffer(struct target *target, unsigned index,
52 riscv_insn_t d);
53 static riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned
54 index);
55 static int riscv013_execute_debug_buffer(struct target *target);
56 static void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
57 static void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a);
58 static int riscv013_dmi_write_u64_bits(struct target *target);
59 static void riscv013_fill_dmi_nop_u64(struct target *target, char *buf);
60 static int register_read(struct target *target, uint64_t *value, uint32_t number);
61 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number);
62 static int register_write_direct(struct target *target, unsigned number,
63 uint64_t value);
64 static int read_memory(struct target *target, target_addr_t address,
65 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment);
66 static int write_memory(struct target *target, target_addr_t address,
67 uint32_t size, uint32_t count, const uint8_t *buffer);
68
69 /**
70 * Since almost everything can be accomplish by scanning the dbus register, all
71 * functions here assume dbus is already selected. The exception are functions
72 * called directly by OpenOCD, which can't assume anything about what's
73 * currently in IR. They should set IR to dbus explicitly.
74 */
75
76 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
77 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
78
79 #define CSR_DCSR_CAUSE_SWBP 1
80 #define CSR_DCSR_CAUSE_TRIGGER 2
81 #define CSR_DCSR_CAUSE_DEBUGINT 3
82 #define CSR_DCSR_CAUSE_STEP 4
83 #define CSR_DCSR_CAUSE_HALT 5
84 #define CSR_DCSR_CAUSE_GROUP 6
85
86 #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
87
88 /*** JTAG registers. ***/
89
90 typedef enum {
91 DMI_OP_NOP = 0,
92 DMI_OP_READ = 1,
93 DMI_OP_WRITE = 2
94 } dmi_op_t;
95 typedef enum {
96 DMI_STATUS_SUCCESS = 0,
97 DMI_STATUS_FAILED = 2,
98 DMI_STATUS_BUSY = 3
99 } dmi_status_t;
100
101 typedef enum slot {
102 SLOT0,
103 SLOT1,
104 SLOT_LAST,
105 } slot_t;
106
107 /*** Debug Bus registers. ***/
108
109 #define CMDERR_NONE 0
110 #define CMDERR_BUSY 1
111 #define CMDERR_NOT_SUPPORTED 2
112 #define CMDERR_EXCEPTION 3
113 #define CMDERR_HALT_RESUME 4
114 #define CMDERR_OTHER 7
115
116 /*** Info about the core being debugged. ***/
117
118 struct trigger {
119 uint64_t address;
120 uint32_t length;
121 uint64_t mask;
122 uint64_t value;
123 bool read, write, execute;
124 int unique_id;
125 };
126
127 typedef enum {
128 YNM_MAYBE,
129 YNM_YES,
130 YNM_NO
131 } yes_no_maybe_t;
132
133 typedef struct {
134 struct list_head list;
135 int abs_chain_position;
136
137 /* The number of harts connected to this DM. */
138 int hart_count;
139 /* Indicates we already reset this DM, so don't need to do it again. */
140 bool was_reset;
141 /* Targets that are connected to this DM. */
142 struct list_head target_list;
143 /* The currently selected hartid on this DM. */
144 int current_hartid;
145 bool hasel_supported;
146
147 /* The program buffer stores executable code. 0 is an illegal instruction,
148 * so we use 0 to mean the cached value is invalid. */
149 uint32_t progbuf_cache[16];
150 } dm013_info_t;
151
152 typedef struct {
153 struct list_head list;
154 struct target *target;
155 } target_list_t;
156
157 typedef struct {
158 /* The indexed used to address this hart in its DM. */
159 unsigned index;
160 /* Number of address bits in the dbus register. */
161 unsigned abits;
162 /* Number of abstract command data registers. */
163 unsigned datacount;
164 /* Number of words in the Program Buffer. */
165 unsigned progbufsize;
166
167 /* We cache the read-only bits of sbcs here. */
168 uint32_t sbcs;
169
170 yes_no_maybe_t progbuf_writable;
171 /* We only need the address so that we know the alignment of the buffer. */
172 riscv_addr_t progbuf_address;
173
174 /* Number of run-test/idle cycles the target requests we do after each dbus
175 * access. */
176 unsigned int dtmcs_idle;
177
178 /* This value is incremented every time a dbus access comes back as "busy".
179 * It's used to determine how many run-test/idle cycles to feed the target
180 * in between accesses. */
181 unsigned int dmi_busy_delay;
182
183 /* Number of run-test/idle cycles to add between consecutive bus master
184 * reads/writes respectively. */
185 unsigned int bus_master_write_delay, bus_master_read_delay;
186
187 /* This value is increased every time we tried to execute two commands
188 * consecutively, and the second one failed because the previous hadn't
189 * completed yet. It's used to add extra run-test/idle cycles after
190 * starting a command, so we don't have to waste time checking for busy to
191 * go low. */
192 unsigned int ac_busy_delay;
193
194 bool abstract_read_csr_supported;
195 bool abstract_write_csr_supported;
196 bool abstract_read_fpr_supported;
197 bool abstract_write_fpr_supported;
198
199 yes_no_maybe_t has_aampostincrement;
200
201 /* When a function returns some error due to a failure indicated by the
202 * target in cmderr, the caller can look here to see what that error was.
203 * (Compare with errno.) */
204 uint8_t cmderr;
205
206 /* Some fields from hartinfo. */
207 uint8_t datasize;
208 uint8_t dataaccess;
209 int16_t dataaddr;
210
211 /* The width of the hartsel field. */
212 unsigned hartsellen;
213
214 /* DM that provides access to this target. */
215 dm013_info_t *dm;
216 } riscv013_info_t;
217
218 static LIST_HEAD(dm_list);
219
220 static riscv013_info_t *get_info(const struct target *target)
221 {
222 struct riscv_info *info = target->arch_info;
223 assert(info);
224 assert(info->version_specific);
225 return info->version_specific;
226 }
227
228 /**
229 * Return the DM structure for this target. If there isn't one, find it in the
230 * global list of DMs. If it's not in there, then create one and initialize it
231 * to 0.
232 */
233 static dm013_info_t *get_dm(struct target *target)
234 {
235 RISCV013_INFO(info);
236 if (info->dm)
237 return info->dm;
238
239 int abs_chain_position = target->tap->abs_chain_position;
240
241 dm013_info_t *entry;
242 dm013_info_t *dm = NULL;
243 list_for_each_entry(entry, &dm_list, list) {
244 if (entry->abs_chain_position == abs_chain_position) {
245 dm = entry;
246 break;
247 }
248 }
249
250 if (!dm) {
251 LOG_DEBUG("[%d] Allocating new DM", target->coreid);
252 dm = calloc(1, sizeof(dm013_info_t));
253 if (!dm)
254 return NULL;
255 dm->abs_chain_position = abs_chain_position;
256 dm->current_hartid = -1;
257 dm->hart_count = -1;
258 INIT_LIST_HEAD(&dm->target_list);
259 list_add(&dm->list, &dm_list);
260 }
261
262 info->dm = dm;
263 target_list_t *target_entry;
264 list_for_each_entry(target_entry, &dm->target_list, list) {
265 if (target_entry->target == target)
266 return dm;
267 }
268 target_entry = calloc(1, sizeof(*target_entry));
269 if (!target_entry) {
270 info->dm = NULL;
271 return NULL;
272 }
273 target_entry->target = target;
274 list_add(&target_entry->list, &dm->target_list);
275
276 return dm;
277 }
278
279 static uint32_t set_hartsel(uint32_t initial, uint32_t index)
280 {
281 initial &= ~DM_DMCONTROL_HARTSELLO;
282 initial &= ~DM_DMCONTROL_HARTSELHI;
283
284 uint32_t index_lo = index & ((1 << DM_DMCONTROL_HARTSELLO_LENGTH) - 1);
285 initial |= index_lo << DM_DMCONTROL_HARTSELLO_OFFSET;
286 uint32_t index_hi = index >> DM_DMCONTROL_HARTSELLO_LENGTH;
287 assert(index_hi < 1 << DM_DMCONTROL_HARTSELHI_LENGTH);
288 initial |= index_hi << DM_DMCONTROL_HARTSELHI_OFFSET;
289
290 return initial;
291 }
292
293 static void decode_dmi(char *text, unsigned address, unsigned data)
294 {
295 static const struct {
296 unsigned address;
297 uint64_t mask;
298 const char *name;
299 } description[] = {
300 { DM_DMCONTROL, DM_DMCONTROL_HALTREQ, "haltreq" },
301 { DM_DMCONTROL, DM_DMCONTROL_RESUMEREQ, "resumereq" },
302 { DM_DMCONTROL, DM_DMCONTROL_HARTRESET, "hartreset" },
303 { DM_DMCONTROL, DM_DMCONTROL_HASEL, "hasel" },
304 { DM_DMCONTROL, DM_DMCONTROL_HARTSELHI, "hartselhi" },
305 { DM_DMCONTROL, DM_DMCONTROL_HARTSELLO, "hartsello" },
306 { DM_DMCONTROL, DM_DMCONTROL_NDMRESET, "ndmreset" },
307 { DM_DMCONTROL, DM_DMCONTROL_DMACTIVE, "dmactive" },
308 { DM_DMCONTROL, DM_DMCONTROL_ACKHAVERESET, "ackhavereset" },
309
310 { DM_DMSTATUS, DM_DMSTATUS_IMPEBREAK, "impebreak" },
311 { DM_DMSTATUS, DM_DMSTATUS_ALLHAVERESET, "allhavereset" },
312 { DM_DMSTATUS, DM_DMSTATUS_ANYHAVERESET, "anyhavereset" },
313 { DM_DMSTATUS, DM_DMSTATUS_ALLRESUMEACK, "allresumeack" },
314 { DM_DMSTATUS, DM_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
315 { DM_DMSTATUS, DM_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
316 { DM_DMSTATUS, DM_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
317 { DM_DMSTATUS, DM_DMSTATUS_ALLUNAVAIL, "allunavail" },
318 { DM_DMSTATUS, DM_DMSTATUS_ANYUNAVAIL, "anyunavail" },
319 { DM_DMSTATUS, DM_DMSTATUS_ALLRUNNING, "allrunning" },
320 { DM_DMSTATUS, DM_DMSTATUS_ANYRUNNING, "anyrunning" },
321 { DM_DMSTATUS, DM_DMSTATUS_ALLHALTED, "allhalted" },
322 { DM_DMSTATUS, DM_DMSTATUS_ANYHALTED, "anyhalted" },
323 { DM_DMSTATUS, DM_DMSTATUS_AUTHENTICATED, "authenticated" },
324 { DM_DMSTATUS, DM_DMSTATUS_AUTHBUSY, "authbusy" },
325 { DM_DMSTATUS, DM_DMSTATUS_HASRESETHALTREQ, "hasresethaltreq" },
326 { DM_DMSTATUS, DM_DMSTATUS_CONFSTRPTRVALID, "confstrptrvalid" },
327 { DM_DMSTATUS, DM_DMSTATUS_VERSION, "version" },
328
329 { DM_ABSTRACTCS, DM_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
330 { DM_ABSTRACTCS, DM_ABSTRACTCS_BUSY, "busy" },
331 { DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR, "cmderr" },
332 { DM_ABSTRACTCS, DM_ABSTRACTCS_DATACOUNT, "datacount" },
333
334 { DM_COMMAND, DM_COMMAND_CMDTYPE, "cmdtype" },
335
336 { DM_SBCS, DM_SBCS_SBVERSION, "sbversion" },
337 { DM_SBCS, DM_SBCS_SBBUSYERROR, "sbbusyerror" },
338 { DM_SBCS, DM_SBCS_SBBUSY, "sbbusy" },
339 { DM_SBCS, DM_SBCS_SBREADONADDR, "sbreadonaddr" },
340 { DM_SBCS, DM_SBCS_SBACCESS, "sbaccess" },
341 { DM_SBCS, DM_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
342 { DM_SBCS, DM_SBCS_SBREADONDATA, "sbreadondata" },
343 { DM_SBCS, DM_SBCS_SBERROR, "sberror" },
344 { DM_SBCS, DM_SBCS_SBASIZE, "sbasize" },
345 { DM_SBCS, DM_SBCS_SBACCESS128, "sbaccess128" },
346 { DM_SBCS, DM_SBCS_SBACCESS64, "sbaccess64" },
347 { DM_SBCS, DM_SBCS_SBACCESS32, "sbaccess32" },
348 { DM_SBCS, DM_SBCS_SBACCESS16, "sbaccess16" },
349 { DM_SBCS, DM_SBCS_SBACCESS8, "sbaccess8" },
350 };
351
352 text[0] = 0;
353 for (unsigned i = 0; i < ARRAY_SIZE(description); i++) {
354 if (description[i].address == address) {
355 uint64_t mask = description[i].mask;
356 unsigned value = get_field(data, mask);
357 if (value) {
358 if (i > 0)
359 *(text++) = ' ';
360 if (mask & (mask >> 1)) {
361 /* If the field is more than 1 bit wide. */
362 sprintf(text, "%s=%d", description[i].name, value);
363 } else {
364 strcpy(text, description[i].name);
365 }
366 text += strlen(text);
367 }
368 }
369 }
370 }
371
372 static void dump_field(int idle, const struct scan_field *field)
373 {
374 static const char * const op_string[] = {"-", "r", "w", "?"};
375 static const char * const status_string[] = {"+", "?", "F", "b"};
376
377 if (debug_level < LOG_LVL_DEBUG)
378 return;
379
380 uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
381 unsigned int out_op = get_field(out, DTM_DMI_OP);
382 unsigned int out_data = get_field(out, DTM_DMI_DATA);
383 unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
384
385 uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
386 unsigned int in_op = get_field(in, DTM_DMI_OP);
387 unsigned int in_data = get_field(in, DTM_DMI_DATA);
388 unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
389
390 log_printf_lf(LOG_LVL_DEBUG,
391 __FILE__, __LINE__, "scan",
392 "%db %s %08x @%02x -> %s %08x @%02x; %di",
393 field->num_bits, op_string[out_op], out_data, out_address,
394 status_string[in_op], in_data, in_address, idle);
395
396 char out_text[500];
397 char in_text[500];
398 decode_dmi(out_text, out_address, out_data);
399 decode_dmi(in_text, in_address, in_data);
400 if (in_text[0] || out_text[0]) {
401 log_printf_lf(LOG_LVL_DEBUG, __FILE__, __LINE__, "scan", "%s -> %s",
402 out_text, in_text);
403 }
404 }
405
406 /*** Utility functions. ***/
407
408 static void select_dmi(struct target *target)
409 {
410 if (bscan_tunnel_ir_width != 0) {
411 select_dmi_via_bscan(target);
412 return;
413 }
414 jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
415 }
416
417 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
418 {
419 struct scan_field field;
420 uint8_t in_value[4];
421 uint8_t out_value[4] = { 0 };
422
423 if (bscan_tunnel_ir_width != 0)
424 return dtmcontrol_scan_via_bscan(target, out);
425
426 buf_set_u32(out_value, 0, 32, out);
427
428 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
429
430 field.num_bits = 32;
431 field.out_value = out_value;
432 field.in_value = in_value;
433 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
434
435 /* Always return to dmi. */
436 select_dmi(target);
437
438 int retval = jtag_execute_queue();
439 if (retval != ERROR_OK) {
440 LOG_ERROR("failed jtag scan: %d", retval);
441 return retval;
442 }
443
444 uint32_t in = buf_get_u32(field.in_value, 0, 32);
445 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
446
447 return in;
448 }
449
450 static void increase_dmi_busy_delay(struct target *target)
451 {
452 riscv013_info_t *info = get_info(target);
453 info->dmi_busy_delay += info->dmi_busy_delay / 10 + 1;
454 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
455 info->dtmcs_idle, info->dmi_busy_delay,
456 info->ac_busy_delay);
457
458 dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
459 }
460
461 /**
462 * exec: If this is set, assume the scan results in an execution, so more
463 * run-test/idle cycles may be required.
464 */
465 static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
466 uint32_t *data_in, dmi_op_t op, uint32_t address_out, uint32_t data_out,
467 bool exec)
468 {
469 riscv013_info_t *info = get_info(target);
470 RISCV_INFO(r);
471 unsigned num_bits = info->abits + DTM_DMI_OP_LENGTH + DTM_DMI_DATA_LENGTH;
472 size_t num_bytes = (num_bits + 7) / 8;
473 uint8_t in[num_bytes];
474 uint8_t out[num_bytes];
475 struct scan_field field = {
476 .num_bits = num_bits,
477 .out_value = out,
478 .in_value = in
479 };
480 riscv_bscan_tunneled_scan_context_t bscan_ctxt;
481
482 if (r->reset_delays_wait >= 0) {
483 r->reset_delays_wait--;
484 if (r->reset_delays_wait < 0) {
485 info->dmi_busy_delay = 0;
486 info->ac_busy_delay = 0;
487 }
488 }
489
490 memset(in, 0, num_bytes);
491 memset(out, 0, num_bytes);
492
493 assert(info->abits != 0);
494
495 buf_set_u32(out, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, op);
496 buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
497 buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
498
499 /* I wanted to place this code in a different function, but the way JTAG command
500 queueing works in the jtag handling functions, the scan fields either have to be
501 heap allocated, global/static, or else they need to stay on the stack until
502 the jtag_execute_queue() call. Heap or static fields in this case doesn't seem
503 the best fit. Declaring stack based field values in a subsidiary function call wouldn't
504 work. */
505 if (bscan_tunnel_ir_width != 0) {
506 riscv_add_bscan_tunneled_scan(target, &field, &bscan_ctxt);
507 } else {
508 /* Assume dbus is already selected. */
509 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
510 }
511
512 int idle_count = info->dmi_busy_delay;
513 if (exec)
514 idle_count += info->ac_busy_delay;
515
516 if (idle_count)
517 jtag_add_runtest(idle_count, TAP_IDLE);
518
519 int retval = jtag_execute_queue();
520 if (retval != ERROR_OK) {
521 LOG_ERROR("dmi_scan failed jtag scan");
522 if (data_in)
523 *data_in = ~0;
524 return DMI_STATUS_FAILED;
525 }
526
527 if (bscan_tunnel_ir_width != 0) {
528 /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
529 buffer_shr(in, num_bytes, 1);
530 }
531
532 if (data_in)
533 *data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
534
535 if (address_in)
536 *address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
537 dump_field(idle_count, &field);
538 return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
539 }
540
541 /**
542 * @param target
543 * @param data_in The data we received from the target.
544 * @param dmi_busy_encountered
545 * If non-NULL, will be updated to reflect whether DMI busy was
546 * encountered while executing this operation or not.
547 * @param dmi_op The operation to perform (read/write/nop).
548 * @param address The address argument to that operation.
549 * @param data_out The data to send to the target.
550 * @param timeout_sec
551 * @param exec When true, this scan will execute something, so extra RTI
552 * cycles may be added.
553 * @param ensure_success
554 * Scan a nop after the requested operation, ensuring the
555 * DMI operation succeeded.
556 */
557 static int dmi_op_timeout(struct target *target, uint32_t *data_in,
558 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
559 uint32_t data_out, int timeout_sec, bool exec, bool ensure_success)
560 {
561 select_dmi(target);
562
563 dmi_status_t status;
564 uint32_t address_in;
565
566 if (dmi_busy_encountered)
567 *dmi_busy_encountered = false;
568
569 const char *op_name;
570 switch (dmi_op) {
571 case DMI_OP_NOP:
572 op_name = "nop";
573 break;
574 case DMI_OP_READ:
575 op_name = "read";
576 break;
577 case DMI_OP_WRITE:
578 op_name = "write";
579 break;
580 default:
581 LOG_ERROR("Invalid DMI operation: %d", dmi_op);
582 return ERROR_FAIL;
583 }
584
585 keep_alive();
586
587 time_t start = time(NULL);
588 /* This first loop performs the request. Note that if for some reason this
589 * stays busy, it is actually due to the previous access. */
590 while (1) {
591 status = dmi_scan(target, NULL, NULL, dmi_op, address, data_out,
592 exec);
593 if (status == DMI_STATUS_BUSY) {
594 increase_dmi_busy_delay(target);
595 if (dmi_busy_encountered)
596 *dmi_busy_encountered = true;
597 } else if (status == DMI_STATUS_SUCCESS) {
598 break;
599 } else {
600 LOG_ERROR("failed %s at 0x%x, status=%d", op_name, address, status);
601 return ERROR_FAIL;
602 }
603 if (time(NULL) - start > timeout_sec)
604 return ERROR_TIMEOUT_REACHED;
605 }
606
607 if (status != DMI_STATUS_SUCCESS) {
608 LOG_ERROR("Failed %s at 0x%x; status=%d", op_name, address, status);
609 return ERROR_FAIL;
610 }
611
612 if (ensure_success) {
613 /* This second loop ensures the request succeeded, and gets back data.
614 * Note that NOP can result in a 'busy' result as well, but that would be
615 * noticed on the next DMI access we do. */
616 while (1) {
617 status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
618 false);
619 if (status == DMI_STATUS_BUSY) {
620 increase_dmi_busy_delay(target);
621 if (dmi_busy_encountered)
622 *dmi_busy_encountered = true;
623 } else if (status == DMI_STATUS_SUCCESS) {
624 break;
625 } else {
626 if (data_in) {
627 LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
628 op_name, address, *data_in, status);
629 } else {
630 LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
631 status);
632 }
633 return ERROR_FAIL;
634 }
635 if (time(NULL) - start > timeout_sec)
636 return ERROR_TIMEOUT_REACHED;
637 }
638 }
639
640 return ERROR_OK;
641 }
642
643 static int dmi_op(struct target *target, uint32_t *data_in,
644 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
645 uint32_t data_out, bool exec, bool ensure_success)
646 {
647 int result = dmi_op_timeout(target, data_in, dmi_busy_encountered, dmi_op,
648 address, data_out, riscv_command_timeout_sec, exec, ensure_success);
649 if (result == ERROR_TIMEOUT_REACHED) {
650 LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
651 "either really slow or broken. You could increase the "
652 "timeout with riscv set_command_timeout_sec.",
653 riscv_command_timeout_sec);
654 return ERROR_FAIL;
655 }
656 return result;
657 }
658
659 static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
660 {
661 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, false, true);
662 }
663
664 static int dmi_read_exec(struct target *target, uint32_t *value, uint32_t address)
665 {
666 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, true, true);
667 }
668
669 static int dmi_write(struct target *target, uint32_t address, uint32_t value)
670 {
671 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false, true);
672 }
673
674 static int dmi_write_exec(struct target *target, uint32_t address,
675 uint32_t value, bool ensure_success)
676 {
677 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true, ensure_success);
678 }
679
680 static int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
681 bool authenticated, unsigned timeout_sec)
682 {
683 int result = dmi_op_timeout(target, dmstatus, NULL, DMI_OP_READ,
684 DM_DMSTATUS, 0, timeout_sec, false, true);
685 if (result != ERROR_OK)
686 return result;
687 int dmstatus_version = get_field(*dmstatus, DM_DMSTATUS_VERSION);
688 if (dmstatus_version != 2 && dmstatus_version != 3) {
689 LOG_ERROR("OpenOCD only supports Debug Module version 2 (0.13) and 3 (1.0), not "
690 "%d (dmstatus=0x%x). This error might be caused by a JTAG "
691 "signal issue. Try reducing the JTAG clock speed.",
692 get_field(*dmstatus, DM_DMSTATUS_VERSION), *dmstatus);
693 } else if (authenticated && !get_field(*dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
694 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
695 "(dmstatus=0x%x). Use `riscv authdata_read` and "
696 "`riscv authdata_write` commands to authenticate.", *dmstatus);
697 return ERROR_FAIL;
698 }
699 return ERROR_OK;
700 }
701
702 static int dmstatus_read(struct target *target, uint32_t *dmstatus,
703 bool authenticated)
704 {
705 return dmstatus_read_timeout(target, dmstatus, authenticated,
706 riscv_command_timeout_sec);
707 }
708
709 static void increase_ac_busy_delay(struct target *target)
710 {
711 riscv013_info_t *info = get_info(target);
712 info->ac_busy_delay += info->ac_busy_delay / 10 + 1;
713 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
714 info->dtmcs_idle, info->dmi_busy_delay,
715 info->ac_busy_delay);
716 }
717
718 static uint32_t __attribute__((unused)) abstract_register_size(unsigned width)
719 {
720 switch (width) {
721 case 32:
722 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 2);
723 case 64:
724 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 3);
725 case 128:
726 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 4);
727 default:
728 LOG_ERROR("Unsupported register width: %d", width);
729 return 0;
730 }
731 }
732
733 static int wait_for_idle(struct target *target, uint32_t *abstractcs)
734 {
735 RISCV013_INFO(info);
736 time_t start = time(NULL);
737 while (1) {
738 if (dmi_read(target, abstractcs, DM_ABSTRACTCS) != ERROR_OK)
739 return ERROR_FAIL;
740
741 if (get_field(*abstractcs, DM_ABSTRACTCS_BUSY) == 0)
742 return ERROR_OK;
743
744 if (time(NULL) - start > riscv_command_timeout_sec) {
745 info->cmderr = get_field(*abstractcs, DM_ABSTRACTCS_CMDERR);
746 if (info->cmderr != CMDERR_NONE) {
747 const char *errors[8] = {
748 "none",
749 "busy",
750 "not supported",
751 "exception",
752 "halt/resume",
753 "reserved",
754 "reserved",
755 "other" };
756
757 LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
758 errors[info->cmderr], *abstractcs);
759 }
760
761 LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
762 "Increase the timeout with riscv set_command_timeout_sec.",
763 riscv_command_timeout_sec,
764 *abstractcs);
765 return ERROR_FAIL;
766 }
767 }
768 }
769
770 static int execute_abstract_command(struct target *target, uint32_t command)
771 {
772 RISCV013_INFO(info);
773 if (debug_level >= LOG_LVL_DEBUG) {
774 switch (get_field(command, DM_COMMAND_CMDTYPE)) {
775 case 0:
776 LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
777 "transfer=%d, write=%d, regno=0x%x",
778 command,
779 8 << get_field(command, AC_ACCESS_REGISTER_AARSIZE),
780 get_field(command, AC_ACCESS_REGISTER_POSTEXEC),
781 get_field(command, AC_ACCESS_REGISTER_TRANSFER),
782 get_field(command, AC_ACCESS_REGISTER_WRITE),
783 get_field(command, AC_ACCESS_REGISTER_REGNO));
784 break;
785 default:
786 LOG_DEBUG("command=0x%x", command);
787 break;
788 }
789 }
790
791 if (dmi_write_exec(target, DM_COMMAND, command, false) != ERROR_OK)
792 return ERROR_FAIL;
793
794 uint32_t abstractcs = 0;
795 int result = wait_for_idle(target, &abstractcs);
796
797 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
798 if (info->cmderr != 0 || result != ERROR_OK) {
799 LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
800 /* Clear the error. */
801 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
802 return ERROR_FAIL;
803 }
804
805 return ERROR_OK;
806 }
807
808 static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
809 unsigned size_bits)
810 {
811 riscv_reg_t value = 0;
812 uint32_t v;
813 unsigned offset = index * size_bits / 32;
814 switch (size_bits) {
815 default:
816 LOG_ERROR("Unsupported size: %d bits", size_bits);
817 return ~0;
818 case 64:
819 dmi_read(target, &v, DM_DATA0 + offset + 1);
820 value |= ((uint64_t) v) << 32;
821 /* falls through */
822 case 32:
823 dmi_read(target, &v, DM_DATA0 + offset);
824 value |= v;
825 }
826 return value;
827 }
828
829 static int write_abstract_arg(struct target *target, unsigned index,
830 riscv_reg_t value, unsigned size_bits)
831 {
832 unsigned offset = index * size_bits / 32;
833 switch (size_bits) {
834 default:
835 LOG_ERROR("Unsupported size: %d bits", size_bits);
836 return ERROR_FAIL;
837 case 64:
838 dmi_write(target, DM_DATA0 + offset + 1, value >> 32);
839 /* falls through */
840 case 32:
841 dmi_write(target, DM_DATA0 + offset, value);
842 }
843 return ERROR_OK;
844 }
845
846 /**
847 * @par size in bits
848 */
849 static uint32_t access_register_command(struct target *target, uint32_t number,
850 unsigned size, uint32_t flags)
851 {
852 uint32_t command = set_field(0, DM_COMMAND_CMDTYPE, 0);
853 switch (size) {
854 case 32:
855 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 2);
856 break;
857 case 64:
858 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 3);
859 break;
860 default:
861 LOG_ERROR("%d-bit register %s not supported.", size,
862 gdb_regno_name(number));
863 assert(0);
864 }
865
866 if (number <= GDB_REGNO_XPR31) {
867 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
868 0x1000 + number - GDB_REGNO_ZERO);
869 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
870 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
871 0x1020 + number - GDB_REGNO_FPR0);
872 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
873 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
874 number - GDB_REGNO_CSR0);
875 } else if (number >= GDB_REGNO_COUNT) {
876 /* Custom register. */
877 assert(target->reg_cache->reg_list[number].arch_info);
878 riscv_reg_info_t *reg_info = target->reg_cache->reg_list[number].arch_info;
879 assert(reg_info);
880 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
881 0xc000 + reg_info->custom_number);
882 } else {
883 assert(0);
884 }
885
886 command |= flags;
887
888 return command;
889 }
890
891 static int register_read_abstract(struct target *target, uint64_t *value,
892 uint32_t number, unsigned size)
893 {
894 RISCV013_INFO(info);
895
896 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
897 !info->abstract_read_fpr_supported)
898 return ERROR_FAIL;
899 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
900 !info->abstract_read_csr_supported)
901 return ERROR_FAIL;
902 /* The spec doesn't define abstract register numbers for vector registers. */
903 if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31)
904 return ERROR_FAIL;
905
906 uint32_t command = access_register_command(target, number, size,
907 AC_ACCESS_REGISTER_TRANSFER);
908
909 int result = execute_abstract_command(target, command);
910 if (result != ERROR_OK) {
911 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
912 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
913 info->abstract_read_fpr_supported = false;
914 LOG_INFO("Disabling abstract command reads from FPRs.");
915 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
916 info->abstract_read_csr_supported = false;
917 LOG_INFO("Disabling abstract command reads from CSRs.");
918 }
919 }
920 return result;
921 }
922
923 if (value)
924 *value = read_abstract_arg(target, 0, size);
925
926 return ERROR_OK;
927 }
928
929 static int register_write_abstract(struct target *target, uint32_t number,
930 uint64_t value, unsigned size)
931 {
932 RISCV013_INFO(info);
933
934 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
935 !info->abstract_write_fpr_supported)
936 return ERROR_FAIL;
937 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
938 !info->abstract_write_csr_supported)
939 return ERROR_FAIL;
940
941 uint32_t command = access_register_command(target, number, size,
942 AC_ACCESS_REGISTER_TRANSFER |
943 AC_ACCESS_REGISTER_WRITE);
944
945 if (write_abstract_arg(target, 0, value, size) != ERROR_OK)
946 return ERROR_FAIL;
947
948 int result = execute_abstract_command(target, command);
949 if (result != ERROR_OK) {
950 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
951 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
952 info->abstract_write_fpr_supported = false;
953 LOG_INFO("Disabling abstract command writes to FPRs.");
954 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
955 info->abstract_write_csr_supported = false;
956 LOG_INFO("Disabling abstract command writes to CSRs.");
957 }
958 }
959 return result;
960 }
961
962 return ERROR_OK;
963 }
964
965 /*
966 * Sets the AAMSIZE field of a memory access abstract command based on
967 * the width (bits).
968 */
969 static uint32_t abstract_memory_size(unsigned width)
970 {
971 switch (width) {
972 case 8:
973 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 0);
974 case 16:
975 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 1);
976 case 32:
977 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 2);
978 case 64:
979 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 3);
980 case 128:
981 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 4);
982 default:
983 LOG_ERROR("Unsupported memory width: %d", width);
984 return 0;
985 }
986 }
987
988 /*
989 * Creates a memory access abstract command.
990 */
991 static uint32_t access_memory_command(struct target *target, bool virtual,
992 unsigned width, bool postincrement, bool write)
993 {
994 uint32_t command = set_field(0, AC_ACCESS_MEMORY_CMDTYPE, 2);
995 command = set_field(command, AC_ACCESS_MEMORY_AAMVIRTUAL, virtual);
996 command |= abstract_memory_size(width);
997 command = set_field(command, AC_ACCESS_MEMORY_AAMPOSTINCREMENT,
998 postincrement);
999 command = set_field(command, AC_ACCESS_MEMORY_WRITE, write);
1000
1001 return command;
1002 }
1003
1004 static int examine_progbuf(struct target *target)
1005 {
1006 riscv013_info_t *info = get_info(target);
1007
1008 if (info->progbuf_writable != YNM_MAYBE)
1009 return ERROR_OK;
1010
1011 /* Figure out if progbuf is writable. */
1012
1013 if (info->progbufsize < 1) {
1014 info->progbuf_writable = YNM_NO;
1015 LOG_INFO("No program buffer present.");
1016 return ERROR_OK;
1017 }
1018
1019 uint64_t s0;
1020 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1021 return ERROR_FAIL;
1022
1023 struct riscv_program program;
1024 riscv_program_init(&program, target);
1025 riscv_program_insert(&program, auipc(S0));
1026 if (riscv_program_exec(&program, target) != ERROR_OK)
1027 return ERROR_FAIL;
1028
1029 if (register_read_direct(target, &info->progbuf_address, GDB_REGNO_S0) != ERROR_OK)
1030 return ERROR_FAIL;
1031
1032 riscv_program_init(&program, target);
1033 riscv_program_insert(&program, sw(S0, S0, 0));
1034 int result = riscv_program_exec(&program, target);
1035
1036 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1037 return ERROR_FAIL;
1038
1039 if (result != ERROR_OK) {
1040 /* This program might have failed if the program buffer is not
1041 * writable. */
1042 info->progbuf_writable = YNM_NO;
1043 return ERROR_OK;
1044 }
1045
1046 uint32_t written;
1047 if (dmi_read(target, &written, DM_PROGBUF0) != ERROR_OK)
1048 return ERROR_FAIL;
1049 if (written == (uint32_t) info->progbuf_address) {
1050 LOG_INFO("progbuf is writable at 0x%" PRIx64,
1051 info->progbuf_address);
1052 info->progbuf_writable = YNM_YES;
1053
1054 } else {
1055 LOG_INFO("progbuf is not writeable at 0x%" PRIx64,
1056 info->progbuf_address);
1057 info->progbuf_writable = YNM_NO;
1058 }
1059
1060 return ERROR_OK;
1061 }
1062
1063 static int is_fpu_reg(uint32_t gdb_regno)
1064 {
1065 return (gdb_regno >= GDB_REGNO_FPR0 && gdb_regno <= GDB_REGNO_FPR31) ||
1066 (gdb_regno == GDB_REGNO_CSR0 + CSR_FFLAGS) ||
1067 (gdb_regno == GDB_REGNO_CSR0 + CSR_FRM) ||
1068 (gdb_regno == GDB_REGNO_CSR0 + CSR_FCSR);
1069 }
1070
1071 static int is_vector_reg(uint32_t gdb_regno)
1072 {
1073 return (gdb_regno >= GDB_REGNO_V0 && gdb_regno <= GDB_REGNO_V31) ||
1074 gdb_regno == GDB_REGNO_VSTART ||
1075 gdb_regno == GDB_REGNO_VXSAT ||
1076 gdb_regno == GDB_REGNO_VXRM ||
1077 gdb_regno == GDB_REGNO_VL ||
1078 gdb_regno == GDB_REGNO_VTYPE ||
1079 gdb_regno == GDB_REGNO_VLENB;
1080 }
1081
1082 static int prep_for_register_access(struct target *target, uint64_t *mstatus,
1083 int regno)
1084 {
1085 if (is_fpu_reg(regno) || is_vector_reg(regno)) {
1086 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
1087 return ERROR_FAIL;
1088 if (is_fpu_reg(regno) && (*mstatus & MSTATUS_FS) == 0) {
1089 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1090 set_field(*mstatus, MSTATUS_FS, 1)) != ERROR_OK)
1091 return ERROR_FAIL;
1092 } else if (is_vector_reg(regno) && (*mstatus & MSTATUS_VS) == 0) {
1093 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1094 set_field(*mstatus, MSTATUS_VS, 1)) != ERROR_OK)
1095 return ERROR_FAIL;
1096 }
1097 } else {
1098 *mstatus = 0;
1099 }
1100 return ERROR_OK;
1101 }
1102
1103 static int cleanup_after_register_access(struct target *target,
1104 uint64_t mstatus, int regno)
1105 {
1106 if ((is_fpu_reg(regno) && (mstatus & MSTATUS_FS) == 0) ||
1107 (is_vector_reg(regno) && (mstatus & MSTATUS_VS) == 0))
1108 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
1109 return ERROR_FAIL;
1110 return ERROR_OK;
1111 }
1112
1113 typedef enum {
1114 SPACE_DM_DATA,
1115 SPACE_DMI_PROGBUF,
1116 SPACE_DMI_RAM
1117 } memory_space_t;
1118
1119 typedef struct {
1120 /* How can the debugger access this memory? */
1121 memory_space_t memory_space;
1122 /* Memory address to access the scratch memory from the hart. */
1123 riscv_addr_t hart_address;
1124 /* Memory address to access the scratch memory from the debugger. */
1125 riscv_addr_t debug_address;
1126 struct working_area *area;
1127 } scratch_mem_t;
1128
1129 /**
1130 * Find some scratch memory to be used with the given program.
1131 */
1132 static int scratch_reserve(struct target *target,
1133 scratch_mem_t *scratch,
1134 struct riscv_program *program,
1135 unsigned size_bytes)
1136 {
1137 riscv_addr_t alignment = 1;
1138 while (alignment < size_bytes)
1139 alignment *= 2;
1140
1141 scratch->area = NULL;
1142
1143 riscv013_info_t *info = get_info(target);
1144
1145 /* Option 1: See if data# registers can be used as the scratch memory */
1146 if (info->dataaccess == 1) {
1147 /* Sign extend dataaddr. */
1148 scratch->hart_address = info->dataaddr;
1149 if (info->dataaddr & (1<<11))
1150 scratch->hart_address |= 0xfffffffffffff000ULL;
1151 /* Align. */
1152 scratch->hart_address = (scratch->hart_address + alignment - 1) & ~(alignment - 1);
1153
1154 if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
1155 info->datasize) {
1156 scratch->memory_space = SPACE_DM_DATA;
1157 scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
1158 return ERROR_OK;
1159 }
1160 }
1161
1162 /* Option 2: See if progbuf can be used as the scratch memory */
1163 if (examine_progbuf(target) != ERROR_OK)
1164 return ERROR_FAIL;
1165
1166 /* Allow for ebreak at the end of the program. */
1167 unsigned program_size = (program->instruction_count + 1) * 4;
1168 scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
1169 ~(alignment - 1);
1170 if ((info->progbuf_writable == YNM_YES) &&
1171 ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
1172 info->progbufsize)) {
1173 scratch->memory_space = SPACE_DMI_PROGBUF;
1174 scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
1175 return ERROR_OK;
1176 }
1177
1178 /* Option 3: User-configured memory area as scratch RAM */
1179 if (target_alloc_working_area(target, size_bytes + alignment - 1,
1180 &scratch->area) == ERROR_OK) {
1181 scratch->hart_address = (scratch->area->address + alignment - 1) &
1182 ~(alignment - 1);
1183 scratch->memory_space = SPACE_DMI_RAM;
1184 scratch->debug_address = scratch->hart_address;
1185 return ERROR_OK;
1186 }
1187
1188 LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
1189 "a work area with 'configure -work-area-phys'.", size_bytes);
1190 return ERROR_FAIL;
1191 }
1192
1193 static int scratch_release(struct target *target,
1194 scratch_mem_t *scratch)
1195 {
1196 return target_free_working_area(target, scratch->area);
1197 }
1198
1199 static int scratch_read64(struct target *target, scratch_mem_t *scratch,
1200 uint64_t *value)
1201 {
1202 uint32_t v;
1203 switch (scratch->memory_space) {
1204 case SPACE_DM_DATA:
1205 if (dmi_read(target, &v, DM_DATA0 + scratch->debug_address) != ERROR_OK)
1206 return ERROR_FAIL;
1207 *value = v;
1208 if (dmi_read(target, &v, DM_DATA1 + scratch->debug_address) != ERROR_OK)
1209 return ERROR_FAIL;
1210 *value |= ((uint64_t) v) << 32;
1211 break;
1212 case SPACE_DMI_PROGBUF:
1213 if (dmi_read(target, &v, DM_PROGBUF0 + scratch->debug_address) != ERROR_OK)
1214 return ERROR_FAIL;
1215 *value = v;
1216 if (dmi_read(target, &v, DM_PROGBUF1 + scratch->debug_address) != ERROR_OK)
1217 return ERROR_FAIL;
1218 *value |= ((uint64_t) v) << 32;
1219 break;
1220 case SPACE_DMI_RAM:
1221 {
1222 uint8_t buffer[8] = {0};
1223 if (read_memory(target, scratch->debug_address, 4, 2, buffer, 4) != ERROR_OK)
1224 return ERROR_FAIL;
1225 *value = buffer[0] |
1226 (((uint64_t) buffer[1]) << 8) |
1227 (((uint64_t) buffer[2]) << 16) |
1228 (((uint64_t) buffer[3]) << 24) |
1229 (((uint64_t) buffer[4]) << 32) |
1230 (((uint64_t) buffer[5]) << 40) |
1231 (((uint64_t) buffer[6]) << 48) |
1232 (((uint64_t) buffer[7]) << 56);
1233 }
1234 break;
1235 }
1236 return ERROR_OK;
1237 }
1238
1239 static int scratch_write64(struct target *target, scratch_mem_t *scratch,
1240 uint64_t value)
1241 {
1242 switch (scratch->memory_space) {
1243 case SPACE_DM_DATA:
1244 dmi_write(target, DM_DATA0 + scratch->debug_address, value);
1245 dmi_write(target, DM_DATA1 + scratch->debug_address, value >> 32);
1246 break;
1247 case SPACE_DMI_PROGBUF:
1248 dmi_write(target, DM_PROGBUF0 + scratch->debug_address, value);
1249 dmi_write(target, DM_PROGBUF1 + scratch->debug_address, value >> 32);
1250 break;
1251 case SPACE_DMI_RAM:
1252 {
1253 uint8_t buffer[8] = {
1254 value,
1255 value >> 8,
1256 value >> 16,
1257 value >> 24,
1258 value >> 32,
1259 value >> 40,
1260 value >> 48,
1261 value >> 56
1262 };
1263 if (write_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
1264 return ERROR_FAIL;
1265 }
1266 break;
1267 }
1268 return ERROR_OK;
1269 }
1270
1271 /** Return register size in bits. */
1272 static unsigned register_size(struct target *target, unsigned number)
1273 {
1274 /* If reg_cache hasn't been initialized yet, make a guess. We need this for
1275 * when this function is called during examine(). */
1276 if (target->reg_cache)
1277 return target->reg_cache->reg_list[number].size;
1278 else
1279 return riscv_xlen(target);
1280 }
1281
1282 static bool has_sufficient_progbuf(struct target *target, unsigned size)
1283 {
1284 RISCV013_INFO(info);
1285 RISCV_INFO(r);
1286
1287 return info->progbufsize + r->impebreak >= size;
1288 }
1289
1290 /**
1291 * Immediately write the new value to the requested register. This mechanism
1292 * bypasses any caches.
1293 */
1294 static int register_write_direct(struct target *target, unsigned number,
1295 uint64_t value)
1296 {
1297 LOG_DEBUG("{%d} %s <- 0x%" PRIx64, riscv_current_hartid(target),
1298 gdb_regno_name(number), value);
1299
1300 int result = register_write_abstract(target, number, value,
1301 register_size(target, number));
1302 if (result == ERROR_OK || !has_sufficient_progbuf(target, 2) ||
1303 !riscv_is_halted(target))
1304 return result;
1305
1306 struct riscv_program program;
1307 riscv_program_init(&program, target);
1308
1309 uint64_t s0;
1310 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1311 return ERROR_FAIL;
1312
1313 uint64_t mstatus;
1314 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1315 return ERROR_FAIL;
1316
1317 scratch_mem_t scratch;
1318 bool use_scratch = false;
1319 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
1320 riscv_supports_extension(target, 'D') &&
1321 riscv_xlen(target) < 64) {
1322 /* There are no instructions to move all the bits from a register, so
1323 * we need to use some scratch RAM. */
1324 use_scratch = true;
1325 riscv_program_insert(&program, fld(number - GDB_REGNO_FPR0, S0, 0));
1326
1327 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1328 return ERROR_FAIL;
1329
1330 if (register_write_direct(target, GDB_REGNO_S0, scratch.hart_address)
1331 != ERROR_OK) {
1332 scratch_release(target, &scratch);
1333 return ERROR_FAIL;
1334 }
1335
1336 if (scratch_write64(target, &scratch, value) != ERROR_OK) {
1337 scratch_release(target, &scratch);
1338 return ERROR_FAIL;
1339 }
1340
1341 } else if (number == GDB_REGNO_VTYPE) {
1342 riscv_program_insert(&program, csrr(S0, CSR_VL));
1343 riscv_program_insert(&program, vsetvli(ZERO, S0, value));
1344
1345 } else {
1346 if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
1347 return ERROR_FAIL;
1348
1349 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1350 if (riscv_supports_extension(target, 'D'))
1351 riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
1352 else
1353 riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
1354 } else if (number == GDB_REGNO_VL) {
1355 /* "The XLEN-bit-wide read-only vl CSR can only be updated by the
1356 * vsetvli and vsetvl instructions, and the fault-only-rst vector
1357 * load instruction variants." */
1358 riscv_reg_t vtype;
1359 if (register_read(target, &vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1360 return ERROR_FAIL;
1361 if (riscv_program_insert(&program, vsetvli(ZERO, S0, vtype)) != ERROR_OK)
1362 return ERROR_FAIL;
1363 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1364 riscv_program_csrw(&program, S0, number);
1365 } else {
1366 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
1367 return ERROR_FAIL;
1368 }
1369 }
1370
1371 int exec_out = riscv_program_exec(&program, target);
1372 /* Don't message on error. Probably the register doesn't exist. */
1373 if (exec_out == ERROR_OK && target->reg_cache) {
1374 struct reg *reg = &target->reg_cache->reg_list[number];
1375 buf_set_u64(reg->value, 0, reg->size, value);
1376 }
1377
1378 if (use_scratch)
1379 scratch_release(target, &scratch);
1380
1381 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1382 return ERROR_FAIL;
1383
1384 /* Restore S0. */
1385 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1386 return ERROR_FAIL;
1387
1388 return exec_out;
1389 }
1390
1391 /** Read register value from the target. Also update the cached value. */
1392 static int register_read(struct target *target, uint64_t *value, uint32_t number)
1393 {
1394 if (number == GDB_REGNO_ZERO) {
1395 *value = 0;
1396 return ERROR_OK;
1397 }
1398 int result = register_read_direct(target, value, number);
1399 if (result != ERROR_OK)
1400 return ERROR_FAIL;
1401 if (target->reg_cache) {
1402 struct reg *reg = &target->reg_cache->reg_list[number];
1403 buf_set_u64(reg->value, 0, reg->size, *value);
1404 }
1405 return ERROR_OK;
1406 }
1407
1408 /** Actually read registers from the target right now. */
1409 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
1410 {
1411 int result = register_read_abstract(target, value, number,
1412 register_size(target, number));
1413
1414 if (result != ERROR_OK &&
1415 has_sufficient_progbuf(target, 2) &&
1416 number > GDB_REGNO_XPR31) {
1417 struct riscv_program program;
1418 riscv_program_init(&program, target);
1419
1420 scratch_mem_t scratch;
1421 bool use_scratch = false;
1422
1423 riscv_reg_t s0;
1424 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1425 return ERROR_FAIL;
1426
1427 /* Write program to move data into s0. */
1428
1429 uint64_t mstatus;
1430 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1431 return ERROR_FAIL;
1432
1433 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1434 if (riscv_supports_extension(target, 'D')
1435 && riscv_xlen(target) < 64) {
1436 /* There are no instructions to move all the bits from a
1437 * register, so we need to use some scratch RAM. */
1438 riscv_program_insert(&program, fsd(number - GDB_REGNO_FPR0, S0,
1439 0));
1440
1441 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1442 return ERROR_FAIL;
1443 use_scratch = true;
1444
1445 if (register_write_direct(target, GDB_REGNO_S0,
1446 scratch.hart_address) != ERROR_OK) {
1447 scratch_release(target, &scratch);
1448 return ERROR_FAIL;
1449 }
1450 } else if (riscv_supports_extension(target, 'D')) {
1451 riscv_program_insert(&program, fmv_x_d(S0, number - GDB_REGNO_FPR0));
1452 } else {
1453 riscv_program_insert(&program, fmv_x_w(S0, number - GDB_REGNO_FPR0));
1454 }
1455 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1456 riscv_program_csrr(&program, S0, number);
1457 } else {
1458 LOG_ERROR("Unsupported register: %s", gdb_regno_name(number));
1459 return ERROR_FAIL;
1460 }
1461
1462 /* Execute program. */
1463 result = riscv_program_exec(&program, target);
1464 /* Don't message on error. Probably the register doesn't exist. */
1465
1466 if (use_scratch) {
1467 result = scratch_read64(target, &scratch, value);
1468 scratch_release(target, &scratch);
1469 if (result != ERROR_OK)
1470 return result;
1471 } else {
1472 /* Read S0 */
1473 if (register_read_direct(target, value, GDB_REGNO_S0) != ERROR_OK)
1474 return ERROR_FAIL;
1475 }
1476
1477 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1478 return ERROR_FAIL;
1479
1480 /* Restore S0. */
1481 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1482 return ERROR_FAIL;
1483 }
1484
1485 if (result == ERROR_OK) {
1486 LOG_DEBUG("{%d} %s = 0x%" PRIx64, riscv_current_hartid(target),
1487 gdb_regno_name(number), *value);
1488 }
1489
1490 return result;
1491 }
1492
1493 static int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
1494 {
1495 time_t start = time(NULL);
1496 while (1) {
1497 uint32_t value;
1498 if (dmstatus_read(target, &value, false) != ERROR_OK)
1499 return ERROR_FAIL;
1500 if (dmstatus)
1501 *dmstatus = value;
1502 if (!get_field(value, DM_DMSTATUS_AUTHBUSY))
1503 break;
1504 if (time(NULL) - start > riscv_command_timeout_sec) {
1505 LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
1506 "Increase the timeout with riscv set_command_timeout_sec.",
1507 riscv_command_timeout_sec,
1508 value);
1509 return ERROR_FAIL;
1510 }
1511 }
1512
1513 return ERROR_OK;
1514 }
1515
1516 /*** OpenOCD target functions. ***/
1517
1518 static void deinit_target(struct target *target)
1519 {
1520 LOG_DEBUG("riscv_deinit_target()");
1521 struct riscv_info *info = target->arch_info;
1522 if (!info)
1523 return;
1524
1525 free(info->version_specific);
1526 /* TODO: free register arch_info */
1527 info->version_specific = NULL;
1528 }
1529
1530 static int set_haltgroup(struct target *target, bool *supported)
1531 {
1532 uint32_t write = set_field(DM_DMCS2_HGWRITE, DM_DMCS2_GROUP, target->smp);
1533 if (dmi_write(target, DM_DMCS2, write) != ERROR_OK)
1534 return ERROR_FAIL;
1535 uint32_t read;
1536 if (dmi_read(target, &read, DM_DMCS2) != ERROR_OK)
1537 return ERROR_FAIL;
1538 *supported = get_field(read, DM_DMCS2_GROUP) == (unsigned)target->smp;
1539 return ERROR_OK;
1540 }
1541
1542 static int discover_vlenb(struct target *target)
1543 {
1544 RISCV_INFO(r);
1545 riscv_reg_t vlenb;
1546
1547 if (register_read(target, &vlenb, GDB_REGNO_VLENB) != ERROR_OK) {
1548 LOG_WARNING("Couldn't read vlenb for %s; vector register access won't work.",
1549 target_name(target));
1550 r->vlenb = 0;
1551 return ERROR_OK;
1552 }
1553 r->vlenb = vlenb;
1554
1555 LOG_INFO("Vector support with vlenb=%d", r->vlenb);
1556
1557 return ERROR_OK;
1558 }
1559
1560 static int examine(struct target *target)
1561 {
1562 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
1563
1564 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
1565 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
1566 LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol, DTM_DTMCS_DMIRESET));
1567 LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTM_DTMCS_IDLE));
1568 LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol, DTM_DTMCS_DMISTAT));
1569 LOG_DEBUG(" abits=%d", get_field(dtmcontrol, DTM_DTMCS_ABITS));
1570 LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTM_DTMCS_VERSION));
1571 if (dtmcontrol == 0) {
1572 LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
1573 return ERROR_FAIL;
1574 }
1575 if (get_field(dtmcontrol, DTM_DTMCS_VERSION) != 1) {
1576 LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
1577 get_field(dtmcontrol, DTM_DTMCS_VERSION), dtmcontrol);
1578 return ERROR_FAIL;
1579 }
1580
1581 riscv013_info_t *info = get_info(target);
1582 /* TODO: This won't be true if there are multiple DMs. */
1583 info->index = target->coreid;
1584 info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
1585 info->dtmcs_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
1586
1587 /* Reset the Debug Module. */
1588 dm013_info_t *dm = get_dm(target);
1589 if (!dm)
1590 return ERROR_FAIL;
1591 if (!dm->was_reset) {
1592 dmi_write(target, DM_DMCONTROL, 0);
1593 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
1594 dm->was_reset = true;
1595 }
1596
1597 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_HARTSELLO |
1598 DM_DMCONTROL_HARTSELHI | DM_DMCONTROL_DMACTIVE |
1599 DM_DMCONTROL_HASEL);
1600 uint32_t dmcontrol;
1601 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
1602 return ERROR_FAIL;
1603
1604 if (!get_field(dmcontrol, DM_DMCONTROL_DMACTIVE)) {
1605 LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
1606 dmcontrol);
1607 return ERROR_FAIL;
1608 }
1609
1610 dm->hasel_supported = get_field(dmcontrol, DM_DMCONTROL_HASEL);
1611
1612 uint32_t dmstatus;
1613 if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
1614 return ERROR_FAIL;
1615 LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
1616 int dmstatus_version = get_field(dmstatus, DM_DMSTATUS_VERSION);
1617 if (dmstatus_version != 2 && dmstatus_version != 3) {
1618 /* Error was already printed out in dmstatus_read(). */
1619 return ERROR_FAIL;
1620 }
1621
1622 uint32_t hartsel =
1623 (get_field(dmcontrol, DM_DMCONTROL_HARTSELHI) <<
1624 DM_DMCONTROL_HARTSELLO_LENGTH) |
1625 get_field(dmcontrol, DM_DMCONTROL_HARTSELLO);
1626 info->hartsellen = 0;
1627 while (hartsel & 1) {
1628 info->hartsellen++;
1629 hartsel >>= 1;
1630 }
1631 LOG_DEBUG("hartsellen=%d", info->hartsellen);
1632
1633 uint32_t hartinfo;
1634 if (dmi_read(target, &hartinfo, DM_HARTINFO) != ERROR_OK)
1635 return ERROR_FAIL;
1636
1637 info->datasize = get_field(hartinfo, DM_HARTINFO_DATASIZE);
1638 info->dataaccess = get_field(hartinfo, DM_HARTINFO_DATAACCESS);
1639 info->dataaddr = get_field(hartinfo, DM_HARTINFO_DATAADDR);
1640
1641 if (!get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
1642 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
1643 "(dmstatus=0x%x). Use `riscv authdata_read` and "
1644 "`riscv authdata_write` commands to authenticate.", dmstatus);
1645 /* If we return ERROR_FAIL here, then in a multicore setup the next
1646 * core won't be examined, which means we won't set up the
1647 * authentication commands for them, which means the config script
1648 * needs to be a lot more complex. */
1649 return ERROR_OK;
1650 }
1651
1652 if (dmi_read(target, &info->sbcs, DM_SBCS) != ERROR_OK)
1653 return ERROR_FAIL;
1654
1655 /* Check that abstract data registers are accessible. */
1656 uint32_t abstractcs;
1657 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
1658 return ERROR_FAIL;
1659 info->datacount = get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT);
1660 info->progbufsize = get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE);
1661
1662 LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
1663
1664 RISCV_INFO(r);
1665 r->impebreak = get_field(dmstatus, DM_DMSTATUS_IMPEBREAK);
1666
1667 if (!has_sufficient_progbuf(target, 2)) {
1668 LOG_WARNING("We won't be able to execute fence instructions on this "
1669 "target. Memory may not always appear consistent. "
1670 "(progbufsize=%d, impebreak=%d)", info->progbufsize,
1671 r->impebreak);
1672 }
1673
1674 if (info->progbufsize < 4 && riscv_enable_virtual) {
1675 LOG_ERROR("set_enable_virtual is not available on this target. It "
1676 "requires a program buffer size of at least 4. (progbufsize=%d) "
1677 "Use `riscv set_enable_virtual off` to continue."
1678 , info->progbufsize);
1679 }
1680
1681 /* Before doing anything else we must first enumerate the harts. */
1682 if (dm->hart_count < 0) {
1683 for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
1684 r->current_hartid = i;
1685 if (riscv013_select_current_hart(target) != ERROR_OK)
1686 return ERROR_FAIL;
1687
1688 uint32_t s;
1689 if (dmstatus_read(target, &s, true) != ERROR_OK)
1690 return ERROR_FAIL;
1691 if (get_field(s, DM_DMSTATUS_ANYNONEXISTENT))
1692 break;
1693 dm->hart_count = i + 1;
1694
1695 if (get_field(s, DM_DMSTATUS_ANYHAVERESET))
1696 dmi_write(target, DM_DMCONTROL,
1697 set_hartsel(DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_ACKHAVERESET, i));
1698 }
1699
1700 LOG_DEBUG("Detected %d harts.", dm->hart_count);
1701 }
1702
1703 r->current_hartid = target->coreid;
1704
1705 if (dm->hart_count == 0) {
1706 LOG_ERROR("No harts found!");
1707 return ERROR_FAIL;
1708 }
1709
1710 /* Don't call any riscv_* functions until after we've counted the number of
1711 * cores and initialized registers. */
1712
1713 if (riscv013_select_current_hart(target) != ERROR_OK)
1714 return ERROR_FAIL;
1715
1716 bool halted = riscv_is_halted(target);
1717 if (!halted) {
1718 if (riscv013_halt_go(target) != ERROR_OK) {
1719 LOG_ERROR("Fatal: Hart %d failed to halt during examine()", r->current_hartid);
1720 return ERROR_FAIL;
1721 }
1722 }
1723
1724 /* Without knowing anything else we can at least mess with the
1725 * program buffer. */
1726 r->debug_buffer_size = info->progbufsize;
1727
1728 int result = register_read_abstract(target, NULL, GDB_REGNO_S0, 64);
1729 if (result == ERROR_OK)
1730 r->xlen = 64;
1731 else
1732 r->xlen = 32;
1733
1734 if (register_read(target, &r->misa, GDB_REGNO_MISA)) {
1735 LOG_ERROR("Fatal: Failed to read MISA from hart %d.", r->current_hartid);
1736 return ERROR_FAIL;
1737 }
1738
1739 if (riscv_supports_extension(target, 'V')) {
1740 if (discover_vlenb(target) != ERROR_OK)
1741 return ERROR_FAIL;
1742 }
1743
1744 /* Now init registers based on what we discovered. */
1745 if (riscv_init_registers(target) != ERROR_OK)
1746 return ERROR_FAIL;
1747
1748 /* Display this as early as possible to help people who are using
1749 * really slow simulators. */
1750 LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64, r->current_hartid, r->xlen,
1751 r->misa);
1752
1753 if (!halted)
1754 riscv013_step_or_resume_current_hart(target, false, false);
1755
1756 target_set_examined(target);
1757
1758 if (target->smp) {
1759 bool haltgroup_supported;
1760 if (set_haltgroup(target, &haltgroup_supported) != ERROR_OK)
1761 return ERROR_FAIL;
1762 if (haltgroup_supported)
1763 LOG_INFO("Core %d made part of halt group %d.", target->coreid,
1764 target->smp);
1765 else
1766 LOG_INFO("Core %d could not be made part of halt group %d.",
1767 target->coreid, target->smp);
1768 }
1769
1770 /* Some regression suites rely on seeing 'Examined RISC-V core' to know
1771 * when they can connect with gdb/telnet.
1772 * We will need to update those suites if we want to change that text. */
1773 LOG_INFO("Examined RISC-V core; found %d harts",
1774 riscv_count_harts(target));
1775 LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64, r->current_hartid, r->xlen,
1776 r->misa);
1777 return ERROR_OK;
1778 }
1779
1780 static int riscv013_authdata_read(struct target *target, uint32_t *value, unsigned int index)
1781 {
1782 if (index > 0) {
1783 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1784 return ERROR_FAIL;
1785 }
1786
1787 if (wait_for_authbusy(target, NULL) != ERROR_OK)
1788 return ERROR_FAIL;
1789
1790 return dmi_read(target, value, DM_AUTHDATA);
1791 }
1792
1793 static int riscv013_authdata_write(struct target *target, uint32_t value, unsigned int index)
1794 {
1795 if (index > 0) {
1796 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1797 return ERROR_FAIL;
1798 }
1799
1800 uint32_t before, after;
1801 if (wait_for_authbusy(target, &before) != ERROR_OK)
1802 return ERROR_FAIL;
1803
1804 dmi_write(target, DM_AUTHDATA, value);
1805
1806 if (wait_for_authbusy(target, &after) != ERROR_OK)
1807 return ERROR_FAIL;
1808
1809 if (!get_field(before, DM_DMSTATUS_AUTHENTICATED) &&
1810 get_field(after, DM_DMSTATUS_AUTHENTICATED)) {
1811 LOG_INFO("authdata_write resulted in successful authentication");
1812 int result = ERROR_OK;
1813 dm013_info_t *dm = get_dm(target);
1814 if (!dm)
1815 return ERROR_FAIL;
1816 target_list_t *entry;
1817 list_for_each_entry(entry, &dm->target_list, list) {
1818 if (examine(entry->target) != ERROR_OK)
1819 result = ERROR_FAIL;
1820 }
1821 return result;
1822 }
1823
1824 return ERROR_OK;
1825 }
1826
1827 static int riscv013_hart_count(struct target *target)
1828 {
1829 dm013_info_t *dm = get_dm(target);
1830 assert(dm);
1831 return dm->hart_count;
1832 }
1833
1834 /* Try to find out the widest memory access size depending on the selected memory access methods. */
1835 static unsigned riscv013_data_bits(struct target *target)
1836 {
1837 RISCV013_INFO(info);
1838 RISCV_INFO(r);
1839
1840 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
1841 int method = r->mem_access_methods[i];
1842
1843 if (method == RISCV_MEM_ACCESS_PROGBUF) {
1844 if (has_sufficient_progbuf(target, 3))
1845 return riscv_xlen(target);
1846 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
1847 if (get_field(info->sbcs, DM_SBCS_SBACCESS128))
1848 return 128;
1849 if (get_field(info->sbcs, DM_SBCS_SBACCESS64))
1850 return 64;
1851 if (get_field(info->sbcs, DM_SBCS_SBACCESS32))
1852 return 32;
1853 if (get_field(info->sbcs, DM_SBCS_SBACCESS16))
1854 return 16;
1855 if (get_field(info->sbcs, DM_SBCS_SBACCESS8))
1856 return 8;
1857 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
1858 /* TODO: Once there is a spec for discovering abstract commands, we can
1859 * take those into account as well. For now we assume abstract commands
1860 * support XLEN-wide accesses. */
1861 return riscv_xlen(target);
1862 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
1863 /* No further mem access method to try. */
1864 break;
1865 }
1866 LOG_ERROR("Unable to determine supported data bits on this target. Assuming 32 bits.");
1867 return 32;
1868 }
1869
1870 static COMMAND_HELPER(riscv013_print_info, struct target *target)
1871 {
1872 RISCV013_INFO(info);
1873
1874 /* Abstract description. */
1875 riscv_print_info_line(CMD, "target", "memory.read_while_running8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1876 riscv_print_info_line(CMD, "target", "memory.write_while_running8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1877 riscv_print_info_line(CMD, "target", "memory.read_while_running16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1878 riscv_print_info_line(CMD, "target", "memory.write_while_running16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1879 riscv_print_info_line(CMD, "target", "memory.read_while_running32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1880 riscv_print_info_line(CMD, "target", "memory.write_while_running32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1881 riscv_print_info_line(CMD, "target", "memory.read_while_running64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1882 riscv_print_info_line(CMD, "target", "memory.write_while_running64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1883 riscv_print_info_line(CMD, "target", "memory.read_while_running128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1884 riscv_print_info_line(CMD, "target", "memory.write_while_running128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1885
1886 /* Lower level description. */
1887 riscv_print_info_line(CMD, "dm", "abits", info->abits);
1888 riscv_print_info_line(CMD, "dm", "progbufsize", info->progbufsize);
1889 riscv_print_info_line(CMD, "dm", "sbversion", get_field(info->sbcs, DM_SBCS_SBVERSION));
1890 riscv_print_info_line(CMD, "dm", "sbasize", get_field(info->sbcs, DM_SBCS_SBASIZE));
1891 riscv_print_info_line(CMD, "dm", "sbaccess128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1892 riscv_print_info_line(CMD, "dm", "sbaccess64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1893 riscv_print_info_line(CMD, "dm", "sbaccess32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1894 riscv_print_info_line(CMD, "dm", "sbaccess16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1895 riscv_print_info_line(CMD, "dm", "sbaccess8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1896
1897 uint32_t dmstatus;
1898 if (dmstatus_read(target, &dmstatus, false) == ERROR_OK)
1899 riscv_print_info_line(CMD, "dm", "authenticated", get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED));
1900
1901 return 0;
1902 }
1903
1904 static int prep_for_vector_access(struct target *target, uint64_t *vtype,
1905 uint64_t *vl, unsigned *debug_vl)
1906 {
1907 RISCV_INFO(r);
1908 /* TODO: this continuous save/restore is terrible for performance. */
1909 /* Write vtype and vl. */
1910 unsigned encoded_vsew;
1911 switch (riscv_xlen(target)) {
1912 case 32:
1913 encoded_vsew = 2;
1914 break;
1915 case 64:
1916 encoded_vsew = 3;
1917 break;
1918 default:
1919 LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target));
1920 return ERROR_FAIL;
1921 }
1922
1923 /* Save vtype and vl. */
1924 if (register_read(target, vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1925 return ERROR_FAIL;
1926 if (register_read(target, vl, GDB_REGNO_VL) != ERROR_OK)
1927 return ERROR_FAIL;
1928
1929 if (register_write_direct(target, GDB_REGNO_VTYPE, encoded_vsew << 3) != ERROR_OK)
1930 return ERROR_FAIL;
1931 *debug_vl = DIV_ROUND_UP(r->vlenb * 8, riscv_xlen(target));
1932 if (register_write_direct(target, GDB_REGNO_VL, *debug_vl) != ERROR_OK)
1933 return ERROR_FAIL;
1934
1935 return ERROR_OK;
1936 }
1937
1938 static int cleanup_after_vector_access(struct target *target, uint64_t vtype,
1939 uint64_t vl)
1940 {
1941 /* Restore vtype and vl. */
1942 if (register_write_direct(target, GDB_REGNO_VTYPE, vtype) != ERROR_OK)
1943 return ERROR_FAIL;
1944 if (register_write_direct(target, GDB_REGNO_VL, vl) != ERROR_OK)
1945 return ERROR_FAIL;
1946 return ERROR_OK;
1947 }
1948
1949 static int riscv013_get_register_buf(struct target *target,
1950 uint8_t *value, int regno)
1951 {
1952 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
1953
1954 if (riscv_select_current_hart(target) != ERROR_OK)
1955 return ERROR_FAIL;
1956
1957 riscv_reg_t s0;
1958 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1959 return ERROR_FAIL;
1960
1961 uint64_t mstatus;
1962 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
1963 return ERROR_FAIL;
1964
1965 uint64_t vtype, vl;
1966 unsigned debug_vl;
1967 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
1968 return ERROR_FAIL;
1969
1970 unsigned vnum = regno - GDB_REGNO_V0;
1971 unsigned xlen = riscv_xlen(target);
1972
1973 struct riscv_program program;
1974 riscv_program_init(&program, target);
1975 riscv_program_insert(&program, vmv_x_s(S0, vnum));
1976 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
1977
1978 int result = ERROR_OK;
1979 for (unsigned i = 0; i < debug_vl; i++) {
1980 /* Executing the program might result in an exception if there is some
1981 * issue with the vector implementation/instructions we're using. If that
1982 * happens, attempt to restore as usual. We may have clobbered the
1983 * vector register we tried to read already.
1984 * For other failures, we just return error because things are probably
1985 * so messed up that attempting to restore isn't going to help. */
1986 result = riscv_program_exec(&program, target);
1987 if (result == ERROR_OK) {
1988 uint64_t v;
1989 if (register_read_direct(target, &v, GDB_REGNO_S0) != ERROR_OK)
1990 return ERROR_FAIL;
1991 buf_set_u64(value, xlen * i, xlen, v);
1992 } else {
1993 break;
1994 }
1995 }
1996
1997 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
1998 return ERROR_FAIL;
1999
2000 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2001 return ERROR_FAIL;
2002 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2003 return ERROR_FAIL;
2004
2005 return result;
2006 }
2007
2008 static int riscv013_set_register_buf(struct target *target,
2009 int regno, const uint8_t *value)
2010 {
2011 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
2012
2013 if (riscv_select_current_hart(target) != ERROR_OK)
2014 return ERROR_FAIL;
2015
2016 riscv_reg_t s0;
2017 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
2018 return ERROR_FAIL;
2019
2020 uint64_t mstatus;
2021 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
2022 return ERROR_FAIL;
2023
2024 uint64_t vtype, vl;
2025 unsigned debug_vl;
2026 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
2027 return ERROR_FAIL;
2028
2029 unsigned vnum = regno - GDB_REGNO_V0;
2030 unsigned xlen = riscv_xlen(target);
2031
2032 struct riscv_program program;
2033 riscv_program_init(&program, target);
2034 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
2035 int result = ERROR_OK;
2036 for (unsigned i = 0; i < debug_vl; i++) {
2037 if (register_write_direct(target, GDB_REGNO_S0,
2038 buf_get_u64(value, xlen * i, xlen)) != ERROR_OK)
2039 return ERROR_FAIL;
2040 result = riscv_program_exec(&program, target);
2041 if (result != ERROR_OK)
2042 break;
2043 }
2044
2045 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
2046 return ERROR_FAIL;
2047
2048 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2049 return ERROR_FAIL;
2050 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2051 return ERROR_FAIL;
2052
2053 return result;
2054 }
2055
2056 static uint32_t sb_sbaccess(unsigned int size_bytes)
2057 {
2058 switch (size_bytes) {
2059 case 1:
2060 return set_field(0, DM_SBCS_SBACCESS, 0);
2061 case 2:
2062 return set_field(0, DM_SBCS_SBACCESS, 1);
2063 case 4:
2064 return set_field(0, DM_SBCS_SBACCESS, 2);
2065 case 8:
2066 return set_field(0, DM_SBCS_SBACCESS, 3);
2067 case 16:
2068 return set_field(0, DM_SBCS_SBACCESS, 4);
2069 }
2070 assert(0);
2071 return 0;
2072 }
2073
2074 static int sb_write_address(struct target *target, target_addr_t address,
2075 bool ensure_success)
2076 {
2077 RISCV013_INFO(info);
2078 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2079 /* There currently is no support for >64-bit addresses in OpenOCD. */
2080 if (sbasize > 96)
2081 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS3, 0, false, false);
2082 if (sbasize > 64)
2083 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS2, 0, false, false);
2084 if (sbasize > 32)
2085 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS1, address >> 32, false, false);
2086 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS0, address,
2087 false, ensure_success);
2088 }
2089
2090 static int batch_run(const struct target *target, struct riscv_batch *batch)
2091 {
2092 RISCV013_INFO(info);
2093 RISCV_INFO(r);
2094 if (r->reset_delays_wait >= 0) {
2095 r->reset_delays_wait -= batch->used_scans;
2096 if (r->reset_delays_wait <= 0) {
2097 batch->idle_count = 0;
2098 info->dmi_busy_delay = 0;
2099 info->ac_busy_delay = 0;
2100 }
2101 }
2102 return riscv_batch_run(batch);
2103 }
2104
2105 static int sba_supports_access(struct target *target, unsigned int size_bytes)
2106 {
2107 RISCV013_INFO(info);
2108 switch (size_bytes) {
2109 case 1:
2110 return get_field(info->sbcs, DM_SBCS_SBACCESS8);
2111 case 2:
2112 return get_field(info->sbcs, DM_SBCS_SBACCESS16);
2113 case 4:
2114 return get_field(info->sbcs, DM_SBCS_SBACCESS32);
2115 case 8:
2116 return get_field(info->sbcs, DM_SBCS_SBACCESS64);
2117 case 16:
2118 return get_field(info->sbcs, DM_SBCS_SBACCESS128);
2119 default:
2120 return 0;
2121 }
2122 }
2123
2124 static int sample_memory_bus_v1(struct target *target,
2125 struct riscv_sample_buf *buf,
2126 const riscv_sample_config_t *config,
2127 int64_t until_ms)
2128 {
2129 RISCV013_INFO(info);
2130 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2131 if (sbasize > 64) {
2132 LOG_ERROR("Memory sampling is only implemented for sbasize <= 64.");
2133 return ERROR_NOT_IMPLEMENTED;
2134 }
2135
2136 if (get_field(info->sbcs, DM_SBCS_SBVERSION) != 1) {
2137 LOG_ERROR("Memory sampling is only implemented for SBA version 1.");
2138 return ERROR_NOT_IMPLEMENTED;
2139 }
2140
2141 uint32_t sbcs = 0;
2142 uint32_t sbcs_valid = false;
2143
2144 uint32_t sbaddress0 = 0;
2145 bool sbaddress0_valid = false;
2146 uint32_t sbaddress1 = 0;
2147 bool sbaddress1_valid = false;
2148
2149 /* How often to read each value in a batch. */
2150 const unsigned int repeat = 5;
2151
2152 unsigned int enabled_count = 0;
2153 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2154 if (config->bucket[i].enabled)
2155 enabled_count++;
2156 }
2157
2158 while (timeval_ms() < until_ms) {
2159 /*
2160 * batch_run() adds to the batch, so we can't simply reuse the same
2161 * batch over and over. So we create a new one every time through the
2162 * loop.
2163 */
2164 struct riscv_batch *batch = riscv_batch_alloc(
2165 target, 1 + enabled_count * 5 * repeat,
2166 info->dmi_busy_delay + info->bus_master_read_delay);
2167 if (!batch)
2168 return ERROR_FAIL;
2169
2170 unsigned int result_bytes = 0;
2171 for (unsigned int n = 0; n < repeat; n++) {
2172 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2173 if (config->bucket[i].enabled) {
2174 if (!sba_supports_access(target, config->bucket[i].size_bytes)) {
2175 LOG_ERROR("Hardware does not support SBA access for %d-byte memory sampling.",
2176 config->bucket[i].size_bytes);
2177 return ERROR_NOT_IMPLEMENTED;
2178 }
2179
2180 uint32_t sbcs_write = DM_SBCS_SBREADONADDR;
2181 if (enabled_count == 1)
2182 sbcs_write |= DM_SBCS_SBREADONDATA;
2183 sbcs_write |= sb_sbaccess(config->bucket[i].size_bytes);
2184 if (!sbcs_valid || sbcs_write != sbcs) {
2185 riscv_batch_add_dmi_write(batch, DM_SBCS, sbcs_write);
2186 sbcs = sbcs_write;
2187 sbcs_valid = true;
2188 }
2189
2190 if (sbasize > 32 &&
2191 (!sbaddress1_valid ||
2192 sbaddress1 != config->bucket[i].address >> 32)) {
2193 sbaddress1 = config->bucket[i].address >> 32;
2194 riscv_batch_add_dmi_write(batch, DM_SBADDRESS1, sbaddress1);
2195 sbaddress1_valid = true;
2196 }
2197 if (!sbaddress0_valid ||
2198 sbaddress0 != (config->bucket[i].address & 0xffffffff)) {
2199 sbaddress0 = config->bucket[i].address;
2200 riscv_batch_add_dmi_write(batch, DM_SBADDRESS0, sbaddress0);
2201 sbaddress0_valid = true;
2202 }
2203 if (config->bucket[i].size_bytes > 4)
2204 riscv_batch_add_dmi_read(batch, DM_SBDATA1);
2205 riscv_batch_add_dmi_read(batch, DM_SBDATA0);
2206 result_bytes += 1 + config->bucket[i].size_bytes;
2207 }
2208 }
2209 }
2210
2211 if (buf->used + result_bytes >= buf->size) {
2212 riscv_batch_free(batch);
2213 break;
2214 }
2215
2216 size_t sbcs_key = riscv_batch_add_dmi_read(batch, DM_SBCS);
2217
2218 int result = batch_run(target, batch);
2219 if (result != ERROR_OK)
2220 return result;
2221
2222 uint32_t sbcs_read = riscv_batch_get_dmi_read_data(batch, sbcs_key);
2223 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2224 /* Discard this batch (too much hassle to try to recover partial
2225 * data) and try again with a larger delay. */
2226 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2227 dmi_write(target, DM_SBCS, sbcs_read | DM_SBCS_SBBUSYERROR | DM_SBCS_SBERROR);
2228 riscv_batch_free(batch);
2229 continue;
2230 }
2231 if (get_field(sbcs_read, DM_SBCS_SBERROR)) {
2232 /* The memory we're sampling was unreadable, somehow. Give up. */
2233 dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR | DM_SBCS_SBERROR);
2234 riscv_batch_free(batch);
2235 return ERROR_FAIL;
2236 }
2237
2238 unsigned int read = 0;
2239 for (unsigned int n = 0; n < repeat; n++) {
2240 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2241 if (config->bucket[i].enabled) {
2242 assert(i < RISCV_SAMPLE_BUF_TIMESTAMP_BEFORE);
2243 uint64_t value = 0;
2244 if (config->bucket[i].size_bytes > 4)
2245 value = ((uint64_t)riscv_batch_get_dmi_read_data(batch, read++)) << 32;
2246 value |= riscv_batch_get_dmi_read_data(batch, read++);
2247
2248 buf->buf[buf->used] = i;
2249 buf_set_u64(buf->buf + buf->used + 1, 0, config->bucket[i].size_bytes * 8, value);
2250 buf->used += 1 + config->bucket[i].size_bytes;
2251 }
2252 }
2253 }
2254
2255 riscv_batch_free(batch);
2256 }
2257
2258 return ERROR_OK;
2259 }
2260
2261 static int sample_memory(struct target *target,
2262 struct riscv_sample_buf *buf,
2263 riscv_sample_config_t *config,
2264 int64_t until_ms)
2265 {
2266 if (!config->enabled)
2267 return ERROR_OK;
2268
2269 return sample_memory_bus_v1(target, buf, config, until_ms);
2270 }
2271
2272 static int init_target(struct command_context *cmd_ctx,
2273 struct target *target)
2274 {
2275 LOG_DEBUG("init");
2276 RISCV_INFO(generic_info);
2277
2278 generic_info->get_register = &riscv013_get_register;
2279 generic_info->set_register = &riscv013_set_register;
2280 generic_info->get_register_buf = &riscv013_get_register_buf;
2281 generic_info->set_register_buf = &riscv013_set_register_buf;
2282 generic_info->select_current_hart = &riscv013_select_current_hart;
2283 generic_info->is_halted = &riscv013_is_halted;
2284 generic_info->resume_go = &riscv013_resume_go;
2285 generic_info->step_current_hart = &riscv013_step_current_hart;
2286 generic_info->on_halt = &riscv013_on_halt;
2287 generic_info->resume_prep = &riscv013_resume_prep;
2288 generic_info->halt_prep = &riscv013_halt_prep;
2289 generic_info->halt_go = &riscv013_halt_go;
2290 generic_info->on_step = &riscv013_on_step;
2291 generic_info->halt_reason = &riscv013_halt_reason;
2292 generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
2293 generic_info->write_debug_buffer = &riscv013_write_debug_buffer;
2294 generic_info->execute_debug_buffer = &riscv013_execute_debug_buffer;
2295 generic_info->fill_dmi_write_u64 = &riscv013_fill_dmi_write_u64;
2296 generic_info->fill_dmi_read_u64 = &riscv013_fill_dmi_read_u64;
2297 generic_info->fill_dmi_nop_u64 = &riscv013_fill_dmi_nop_u64;
2298 generic_info->dmi_write_u64_bits = &riscv013_dmi_write_u64_bits;
2299 generic_info->authdata_read = &riscv013_authdata_read;
2300 generic_info->authdata_write = &riscv013_authdata_write;
2301 generic_info->dmi_read = &dmi_read;
2302 generic_info->dmi_write = &dmi_write;
2303 generic_info->read_memory = read_memory;
2304 generic_info->hart_count = &riscv013_hart_count;
2305 generic_info->data_bits = &riscv013_data_bits;
2306 generic_info->print_info = &riscv013_print_info;
2307 if (!generic_info->version_specific) {
2308 generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
2309 if (!generic_info->version_specific)
2310 return ERROR_FAIL;
2311 }
2312 generic_info->sample_memory = sample_memory;
2313 riscv013_info_t *info = get_info(target);
2314
2315 info->progbufsize = -1;
2316
2317 info->dmi_busy_delay = 0;
2318 info->bus_master_read_delay = 0;
2319 info->bus_master_write_delay = 0;
2320 info->ac_busy_delay = 0;
2321
2322 /* Assume all these abstract commands are supported until we learn
2323 * otherwise.
2324 * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
2325 * while another one isn't. We don't track that this closely here, but in
2326 * the future we probably should. */
2327 info->abstract_read_csr_supported = true;
2328 info->abstract_write_csr_supported = true;
2329 info->abstract_read_fpr_supported = true;
2330 info->abstract_write_fpr_supported = true;
2331
2332 info->has_aampostincrement = YNM_MAYBE;
2333
2334 return ERROR_OK;
2335 }
2336
2337 static int assert_reset(struct target *target)
2338 {
2339 RISCV_INFO(r);
2340
2341 select_dmi(target);
2342
2343 uint32_t control_base = set_field(0, DM_DMCONTROL_DMACTIVE, 1);
2344
2345 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
2346 /* Run the user-supplied script if there is one. */
2347 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
2348 } else if (target->rtos) {
2349 /* There's only one target, and OpenOCD thinks each hart is a thread.
2350 * We must reset them all. */
2351
2352 /* TODO: Try to use hasel in dmcontrol */
2353
2354 /* Set haltreq for each hart. */
2355 uint32_t control = set_hartsel(control_base, target->coreid);
2356 control = set_field(control, DM_DMCONTROL_HALTREQ,
2357 target->reset_halt ? 1 : 0);
2358 dmi_write(target, DM_DMCONTROL, control);
2359
2360 /* Assert ndmreset */
2361 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2362 dmi_write(target, DM_DMCONTROL, control);
2363
2364 } else {
2365 /* Reset just this hart. */
2366 uint32_t control = set_hartsel(control_base, r->current_hartid);
2367 control = set_field(control, DM_DMCONTROL_HALTREQ,
2368 target->reset_halt ? 1 : 0);
2369 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2370 dmi_write(target, DM_DMCONTROL, control);
2371 }
2372
2373 target->state = TARGET_RESET;
2374
2375 dm013_info_t *dm = get_dm(target);
2376 if (!dm)
2377 return ERROR_FAIL;
2378
2379 /* The DM might have gotten reset if OpenOCD called us in some reset that
2380 * involves SRST being toggled. So clear our cache which may be out of
2381 * date. */
2382 memset(dm->progbuf_cache, 0, sizeof(dm->progbuf_cache));
2383
2384 return ERROR_OK;
2385 }
2386
2387 static int deassert_reset(struct target *target)
2388 {
2389 RISCV_INFO(r);
2390 RISCV013_INFO(info);
2391 select_dmi(target);
2392
2393 /* Clear the reset, but make sure haltreq is still set */
2394 uint32_t control = 0, control_haltreq;
2395 control = set_field(control, DM_DMCONTROL_DMACTIVE, 1);
2396 control_haltreq = set_field(control, DM_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
2397 dmi_write(target, DM_DMCONTROL,
2398 set_hartsel(control_haltreq, r->current_hartid));
2399
2400 uint32_t dmstatus;
2401 int dmi_busy_delay = info->dmi_busy_delay;
2402 time_t start = time(NULL);
2403
2404 for (int i = 0; i < riscv_count_harts(target); ++i) {
2405 int index = i;
2406 if (target->rtos) {
2407 if (index != target->coreid)
2408 continue;
2409 dmi_write(target, DM_DMCONTROL,
2410 set_hartsel(control_haltreq, index));
2411 } else {
2412 index = r->current_hartid;
2413 }
2414
2415 LOG_DEBUG("Waiting for hart %d to come out of reset.", index);
2416 while (1) {
2417 int result = dmstatus_read_timeout(target, &dmstatus, true,
2418 riscv_reset_timeout_sec);
2419 if (result == ERROR_TIMEOUT_REACHED)
2420 LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
2421 "reset in %ds; Increase the timeout with riscv "
2422 "set_reset_timeout_sec.",
2423 index, riscv_reset_timeout_sec);
2424 if (result != ERROR_OK)
2425 return result;
2426 /* Certain debug modules, like the one in GD32VF103
2427 * MCUs, violate the specification's requirement that
2428 * each hart is in "exactly one of four states" and,
2429 * during reset, report harts as both unavailable and
2430 * halted/running. To work around this, we check for
2431 * the absence of the unavailable state rather than
2432 * the presence of any other state. */
2433 if (!get_field(dmstatus, DM_DMSTATUS_ALLUNAVAIL))
2434 break;
2435 if (time(NULL) - start > riscv_reset_timeout_sec) {
2436 LOG_ERROR("Hart %d didn't leave reset in %ds; "
2437 "dmstatus=0x%x; "
2438 "Increase the timeout with riscv set_reset_timeout_sec.",
2439 index, riscv_reset_timeout_sec, dmstatus);
2440 return ERROR_FAIL;
2441 }
2442 }
2443 target->state = TARGET_HALTED;
2444
2445 if (get_field(dmstatus, DM_DMSTATUS_ALLHAVERESET)) {
2446 /* Ack reset and clear DM_DMCONTROL_HALTREQ if previously set */
2447 dmi_write(target, DM_DMCONTROL,
2448 set_hartsel(control, index) |
2449 DM_DMCONTROL_ACKHAVERESET);
2450 }
2451
2452 if (!target->rtos)
2453 break;
2454 }
2455 info->dmi_busy_delay = dmi_busy_delay;
2456 return ERROR_OK;
2457 }
2458
2459 static int execute_fence(struct target *target)
2460 {
2461 /* FIXME: For non-coherent systems we need to flush the caches right
2462 * here, but there's no ISA-defined way of doing that. */
2463 {
2464 struct riscv_program program;
2465 riscv_program_init(&program, target);
2466 riscv_program_fence_i(&program);
2467 riscv_program_fence(&program);
2468 int result = riscv_program_exec(&program, target);
2469 if (result != ERROR_OK)
2470 LOG_DEBUG("Unable to execute pre-fence");
2471 }
2472
2473 return ERROR_OK;
2474 }
2475
2476 static void log_memory_access(target_addr_t address, uint64_t value,
2477 unsigned size_bytes, bool read)
2478 {
2479 if (debug_level < LOG_LVL_DEBUG)
2480 return;
2481
2482 char fmt[80];
2483 sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
2484 address, read ? "read" : "write", size_bytes * 2);
2485 switch (size_bytes) {
2486 case 1:
2487 value &= 0xff;
2488 break;
2489 case 2:
2490 value &= 0xffff;
2491 break;
2492 case 4:
2493 value &= 0xffffffffUL;
2494 break;
2495 case 8:
2496 break;
2497 default:
2498 assert(false);
2499 }
2500 LOG_DEBUG(fmt, value);
2501 }
2502
2503 /* Read the relevant sbdata regs depending on size, and put the results into
2504 * buffer. */
2505 static int read_memory_bus_word(struct target *target, target_addr_t address,
2506 uint32_t size, uint8_t *buffer)
2507 {
2508 uint32_t value;
2509 int result;
2510 static int sbdata[4] = { DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3 };
2511 assert(size <= 16);
2512 for (int i = (size - 1) / 4; i >= 0; i--) {
2513 result = dmi_op(target, &value, NULL, DMI_OP_READ, sbdata[i], 0, false, true);
2514 if (result != ERROR_OK)
2515 return result;
2516 buf_set_u32(buffer + i * 4, 0, 8 * MIN(size, 4), value);
2517 log_memory_access(address + i * 4, value, MIN(size, 4), true);
2518 }
2519 return ERROR_OK;
2520 }
2521
2522 static target_addr_t sb_read_address(struct target *target)
2523 {
2524 RISCV013_INFO(info);
2525 unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2526 target_addr_t address = 0;
2527 uint32_t v;
2528 if (sbasize > 32) {
2529 dmi_read(target, &v, DM_SBADDRESS1);
2530 address |= v;
2531 address <<= 32;
2532 }
2533 dmi_read(target, &v, DM_SBADDRESS0);
2534 address |= v;
2535 return address;
2536 }
2537
2538 static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
2539 {
2540 time_t start = time(NULL);
2541 while (1) {
2542 if (dmi_read(target, sbcs, DM_SBCS) != ERROR_OK)
2543 return ERROR_FAIL;
2544 if (!get_field(*sbcs, DM_SBCS_SBBUSY))
2545 return ERROR_OK;
2546 if (time(NULL) - start > riscv_command_timeout_sec) {
2547 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
2548 "Increase the timeout with riscv set_command_timeout_sec.",
2549 riscv_command_timeout_sec, *sbcs);
2550 return ERROR_FAIL;
2551 }
2552 }
2553 }
2554
2555 static int modify_privilege(struct target *target, uint64_t *mstatus, uint64_t *mstatus_old)
2556 {
2557 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5)) {
2558 /* Read DCSR */
2559 uint64_t dcsr;
2560 if (register_read(target, &dcsr, GDB_REGNO_DCSR) != ERROR_OK)
2561 return ERROR_FAIL;
2562
2563 /* Read and save MSTATUS */
2564 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
2565 return ERROR_FAIL;
2566 *mstatus_old = *mstatus;
2567
2568 /* If we come from m-mode with mprv set, we want to keep mpp */
2569 if (get_field(dcsr, DCSR_PRV) < 3) {
2570 /* MPP = PRIV */
2571 *mstatus = set_field(*mstatus, MSTATUS_MPP, get_field(dcsr, DCSR_PRV));
2572
2573 /* MPRV = 1 */
2574 *mstatus = set_field(*mstatus, MSTATUS_MPRV, 1);
2575
2576 /* Write MSTATUS */
2577 if (*mstatus != *mstatus_old)
2578 if (register_write_direct(target, GDB_REGNO_MSTATUS, *mstatus) != ERROR_OK)
2579 return ERROR_FAIL;
2580 }
2581 }
2582
2583 return ERROR_OK;
2584 }
2585
2586 static int read_memory_bus_v0(struct target *target, target_addr_t address,
2587 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2588 {
2589 if (size != increment) {
2590 LOG_ERROR("sba v0 reads only support size==increment");
2591 return ERROR_NOT_IMPLEMENTED;
2592 }
2593
2594 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
2595 TARGET_PRIxADDR, size, count, address);
2596 uint8_t *t_buffer = buffer;
2597 riscv_addr_t cur_addr = address;
2598 riscv_addr_t fin_addr = address + (count * size);
2599 uint32_t access = 0;
2600
2601 const int DM_SBCS_SBSINGLEREAD_OFFSET = 20;
2602 const uint32_t DM_SBCS_SBSINGLEREAD = (0x1U << DM_SBCS_SBSINGLEREAD_OFFSET);
2603
2604 const int DM_SBCS_SBAUTOREAD_OFFSET = 15;
2605 const uint32_t DM_SBCS_SBAUTOREAD = (0x1U << DM_SBCS_SBAUTOREAD_OFFSET);
2606
2607 /* ww favorise one off reading if there is an issue */
2608 if (count == 1) {
2609 for (uint32_t i = 0; i < count; i++) {
2610 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2611 return ERROR_FAIL;
2612 dmi_write(target, DM_SBADDRESS0, cur_addr);
2613 /* size/2 matching the bit access of the spec 0.13 */
2614 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2615 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2616 LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
2617 dmi_write(target, DM_SBCS, access);
2618 /* 3) read */
2619 uint32_t value;
2620 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2621 return ERROR_FAIL;
2622 LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
2623 buf_set_u32(t_buffer, 0, 8 * size, value);
2624 t_buffer += size;
2625 cur_addr += size;
2626 }
2627 return ERROR_OK;
2628 }
2629
2630 /* has to be the same size if we want to read a block */
2631 LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
2632 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2633 return ERROR_FAIL;
2634 /* set current address */
2635 dmi_write(target, DM_SBADDRESS0, cur_addr);
2636 /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
2637 * size/2 matching the bit access of the spec 0.13 */
2638 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2639 access = set_field(access, DM_SBCS_SBAUTOREAD, 1);
2640 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2641 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
2642 LOG_DEBUG("\r\naccess: 0x%08x", access);
2643 dmi_write(target, DM_SBCS, access);
2644
2645 while (cur_addr < fin_addr) {
2646 LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
2647 PRIx64, size, count, cur_addr);
2648 /* read */
2649 uint32_t value;
2650 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2651 return ERROR_FAIL;
2652 buf_set_u32(t_buffer, 0, 8 * size, value);
2653 cur_addr += size;
2654 t_buffer += size;
2655
2656 /* if we are reaching last address, we must clear autoread */
2657 if (cur_addr == fin_addr && count != 1) {
2658 dmi_write(target, DM_SBCS, 0);
2659 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2660 return ERROR_FAIL;
2661 buf_set_u32(t_buffer, 0, 8 * size, value);
2662 }
2663 }
2664
2665 uint32_t sbcs;
2666 if (dmi_read(target, &sbcs, DM_SBCS) != ERROR_OK)
2667 return ERROR_FAIL;
2668
2669 return ERROR_OK;
2670 }
2671
2672 /**
2673 * Read the requested memory using the system bus interface.
2674 */
2675 static int read_memory_bus_v1(struct target *target, target_addr_t address,
2676 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2677 {
2678 if (increment != size && increment != 0) {
2679 LOG_ERROR("sba v1 reads only support increment of size or 0");
2680 return ERROR_NOT_IMPLEMENTED;
2681 }
2682
2683 RISCV013_INFO(info);
2684 target_addr_t next_address = address;
2685 target_addr_t end_address = address + count * size;
2686
2687 while (next_address < end_address) {
2688 uint32_t sbcs_write = set_field(0, DM_SBCS_SBREADONADDR, 1);
2689 sbcs_write |= sb_sbaccess(size);
2690 if (increment == size)
2691 sbcs_write = set_field(sbcs_write, DM_SBCS_SBAUTOINCREMENT, 1);
2692 if (count > 1)
2693 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, count > 1);
2694 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2695 return ERROR_FAIL;
2696
2697 /* This address write will trigger the first read. */
2698 if (sb_write_address(target, next_address, true) != ERROR_OK)
2699 return ERROR_FAIL;
2700
2701 if (info->bus_master_read_delay) {
2702 jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
2703 if (jtag_execute_queue() != ERROR_OK) {
2704 LOG_ERROR("Failed to scan idle sequence");
2705 return ERROR_FAIL;
2706 }
2707 }
2708
2709 /* First value has been read, and is waiting for us to issue a DMI read
2710 * to get it. */
2711
2712 static int sbdata[4] = {DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3};
2713 assert(size <= 16);
2714 target_addr_t next_read = address - 1;
2715 for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
2716 for (int j = (size - 1) / 4; j >= 0; j--) {
2717 uint32_t value;
2718 unsigned attempt = 0;
2719 while (1) {
2720 if (attempt++ > 100) {
2721 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2722 next_read);
2723 return ERROR_FAIL;
2724 }
2725 keep_alive();
2726 dmi_status_t status = dmi_scan(target, NULL, &value,
2727 DMI_OP_READ, sbdata[j], 0, false);
2728 if (status == DMI_STATUS_BUSY)
2729 increase_dmi_busy_delay(target);
2730 else if (status == DMI_STATUS_SUCCESS)
2731 break;
2732 else
2733 return ERROR_FAIL;
2734 }
2735 if (next_read != address - 1) {
2736 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2737 log_memory_access(next_read, value, MIN(size, 4), true);
2738 }
2739 next_read = address + i * size + j * 4;
2740 }
2741 }
2742
2743 uint32_t sbcs_read = 0;
2744 if (count > 1) {
2745 uint32_t value;
2746 unsigned attempt = 0;
2747 while (1) {
2748 if (attempt++ > 100) {
2749 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2750 next_read);
2751 return ERROR_FAIL;
2752 }
2753 dmi_status_t status = dmi_scan(target, NULL, &value, DMI_OP_NOP, 0, 0, false);
2754 if (status == DMI_STATUS_BUSY)
2755 increase_dmi_busy_delay(target);
2756 else if (status == DMI_STATUS_SUCCESS)
2757 break;
2758 else
2759 return ERROR_FAIL;
2760 }
2761 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2762 log_memory_access(next_read, value, MIN(size, 4), true);
2763
2764 /* "Writes to sbcs while sbbusy is high result in undefined behavior.
2765 * A debugger must not write to sbcs until it reads sbbusy as 0." */
2766 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2767 return ERROR_FAIL;
2768
2769 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, 0);
2770 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2771 return ERROR_FAIL;
2772 }
2773
2774 /* Read the last word, after we disabled sbreadondata if necessary. */
2775 if (!get_field(sbcs_read, DM_SBCS_SBERROR) &&
2776 !get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2777 if (read_memory_bus_word(target, address + (count - 1) * size, size,
2778 buffer + (count - 1) * size) != ERROR_OK)
2779 return ERROR_FAIL;
2780
2781 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2782 return ERROR_FAIL;
2783 }
2784
2785 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2786 /* We read while the target was busy. Slow down and try again. */
2787 if (dmi_write(target, DM_SBCS, sbcs_read | DM_SBCS_SBBUSYERROR) != ERROR_OK)
2788 return ERROR_FAIL;
2789 next_address = sb_read_address(target);
2790 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2791 continue;
2792 }
2793
2794 unsigned error = get_field(sbcs_read, DM_SBCS_SBERROR);
2795 if (error == 0) {
2796 next_address = end_address;
2797 } else {
2798 /* Some error indicating the bus access failed, but not because of
2799 * something we did wrong. */
2800 if (dmi_write(target, DM_SBCS, DM_SBCS_SBERROR) != ERROR_OK)
2801 return ERROR_FAIL;
2802 return ERROR_FAIL;
2803 }
2804 }
2805
2806 return ERROR_OK;
2807 }
2808
2809 static void log_mem_access_result(struct target *target, bool success, int method, bool read)
2810 {
2811 RISCV_INFO(r);
2812 bool warn = false;
2813 char msg[60];
2814
2815 /* Compose the message */
2816 snprintf(msg, 60, "%s to %s memory via %s.",
2817 success ? "Succeeded" : "Failed",
2818 read ? "read" : "write",
2819 (method == RISCV_MEM_ACCESS_PROGBUF) ? "program buffer" :
2820 (method == RISCV_MEM_ACCESS_SYSBUS) ? "system bus" : "abstract access");
2821
2822 /* Determine the log message severity. Show warnings only once. */
2823 if (!success) {
2824 if (method == RISCV_MEM_ACCESS_PROGBUF) {
2825 warn = r->mem_access_progbuf_warn;
2826 r->mem_access_progbuf_warn = false;
2827 }
2828 if (method == RISCV_MEM_ACCESS_SYSBUS) {
2829 warn = r->mem_access_sysbus_warn;
2830 r->mem_access_sysbus_warn = false;
2831 }
2832 if (method == RISCV_MEM_ACCESS_ABSTRACT) {
2833 warn = r->mem_access_abstract_warn;
2834 r->mem_access_abstract_warn = false;
2835 }
2836 }
2837
2838 if (warn)
2839 LOG_WARNING("%s", msg);
2840 else
2841 LOG_DEBUG("%s", msg);
2842 }
2843
2844 static bool mem_should_skip_progbuf(struct target *target, target_addr_t address,
2845 uint32_t size, bool read, char **skip_reason)
2846 {
2847 assert(skip_reason);
2848
2849 if (!has_sufficient_progbuf(target, 3)) {
2850 LOG_DEBUG("Skipping mem %s via progbuf - insufficient progbuf size.",
2851 read ? "read" : "write");
2852 *skip_reason = "skipped (insufficient progbuf)";
2853 return true;
2854 }
2855 if (target->state != TARGET_HALTED) {
2856 LOG_DEBUG("Skipping mem %s via progbuf - target not halted.",
2857 read ? "read" : "write");
2858 *skip_reason = "skipped (target not halted)";
2859 return true;
2860 }
2861 if (riscv_xlen(target) < size * 8) {
2862 LOG_DEBUG("Skipping mem %s via progbuf - XLEN (%d) is too short for %d-bit memory access.",
2863 read ? "read" : "write", riscv_xlen(target), size * 8);
2864 *skip_reason = "skipped (XLEN too short)";
2865 return true;
2866 }
2867 if (size > 8) {
2868 LOG_DEBUG("Skipping mem %s via progbuf - unsupported size.",
2869 read ? "read" : "write");
2870 *skip_reason = "skipped (unsupported size)";
2871 return true;
2872 }
2873 if ((sizeof(address) * 8 > riscv_xlen(target)) && (address >> riscv_xlen(target))) {
2874 LOG_DEBUG("Skipping mem %s via progbuf - progbuf only supports %u-bit address.",
2875 read ? "read" : "write", riscv_xlen(target));
2876 *skip_reason = "skipped (too large address)";
2877 return true;
2878 }
2879
2880 return false;
2881 }
2882
2883 static bool mem_should_skip_sysbus(struct target *target, target_addr_t address,
2884 uint32_t size, uint32_t increment, bool read, char **skip_reason)
2885 {
2886 assert(skip_reason);
2887
2888 RISCV013_INFO(info);
2889 if (!sba_supports_access(target, size)) {
2890 LOG_DEBUG("Skipping mem %s via system bus - unsupported size.",
2891 read ? "read" : "write");
2892 *skip_reason = "skipped (unsupported size)";
2893 return true;
2894 }
2895 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2896 if ((sizeof(address) * 8 > sbasize) && (address >> sbasize)) {
2897 LOG_DEBUG("Skipping mem %s via system bus - sba only supports %u-bit address.",
2898 read ? "read" : "write", sbasize);
2899 *skip_reason = "skipped (too large address)";
2900 return true;
2901 }
2902 if (read && increment != size && (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0 || increment != 0)) {
2903 LOG_DEBUG("Skipping mem read via system bus - "
2904 "sba reads only support size==increment or also size==0 for sba v1.");
2905 *skip_reason = "skipped (unsupported increment)";
2906 return true;
2907 }
2908
2909 return false;
2910 }
2911
2912 static bool mem_should_skip_abstract(struct target *target, target_addr_t address,
2913 uint32_t size, uint32_t increment, bool read, char **skip_reason)
2914 {
2915 assert(skip_reason);
2916
2917 if (size > 8) {
2918 /* TODO: Add 128b support if it's ever used. Involves modifying
2919 read/write_abstract_arg() to work on two 64b values. */
2920 LOG_DEBUG("Skipping mem %s via abstract access - unsupported size: %d bits",
2921 read ? "read" : "write", size * 8);
2922 *skip_reason = "skipped (unsupported size)";
2923 return true;
2924 }
2925 if ((sizeof(address) * 8 > riscv_xlen(target)) && (address >> riscv_xlen(target))) {
2926 LOG_DEBUG("Skipping mem %s via abstract access - abstract access only supports %u-bit address.",
2927 read ? "read" : "write", riscv_xlen(target));
2928 *skip_reason = "skipped (too large address)";
2929 return true;
2930 }
2931 if (read && size != increment) {
2932 LOG_ERROR("Skipping mem read via abstract access - "
2933 "abstract command reads only support size==increment.");
2934 *skip_reason = "skipped (unsupported increment)";
2935 return true;
2936 }
2937
2938 return false;
2939 }
2940
2941 /*
2942 * Performs a memory read using memory access abstract commands. The read sizes
2943 * supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16 byte
2944 * aamsize fields in the memory access abstract command.
2945 */
2946 static int read_memory_abstract(struct target *target, target_addr_t address,
2947 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2948 {
2949 RISCV013_INFO(info);
2950
2951 int result = ERROR_OK;
2952 bool use_aampostincrement = info->has_aampostincrement != YNM_NO;
2953
2954 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
2955 size, address);
2956
2957 memset(buffer, 0, count * size);
2958
2959 /* Convert the size (bytes) to width (bits) */
2960 unsigned width = size << 3;
2961
2962 /* Create the command (physical address, postincrement, read) */
2963 uint32_t command = access_memory_command(target, false, width, use_aampostincrement, false);
2964
2965 /* Execute the reads */
2966 uint8_t *p = buffer;
2967 bool updateaddr = true;
2968 unsigned int width32 = (width < 32) ? 32 : width;
2969 for (uint32_t c = 0; c < count; c++) {
2970 /* Update the address if it is the first time or aampostincrement is not supported by the target. */
2971 if (updateaddr) {
2972 /* Set arg1 to the address: address + c * size */
2973 result = write_abstract_arg(target, 1, address + c * size, riscv_xlen(target));
2974 if (result != ERROR_OK) {
2975 LOG_ERROR("Failed to write arg1 during read_memory_abstract().");
2976 return result;
2977 }
2978 }
2979
2980 /* Execute the command */
2981 result = execute_abstract_command(target, command);
2982
2983 if (info->has_aampostincrement == YNM_MAYBE) {
2984 if (result == ERROR_OK) {
2985 /* Safety: double-check that the address was really auto-incremented */
2986 riscv_reg_t new_address = read_abstract_arg(target, 1, riscv_xlen(target));
2987 if (new_address == address + size) {
2988 LOG_DEBUG("aampostincrement is supported on this target.");
2989 info->has_aampostincrement = YNM_YES;
2990 } else {
2991 LOG_WARNING("Buggy aampostincrement! Address not incremented correctly.");
2992 info->has_aampostincrement = YNM_NO;
2993 }
2994 } else {
2995 /* Try the same access but with postincrement disabled. */
2996 command = access_memory_command(target, false, width, false, false);
2997 result = execute_abstract_command(target, command);
2998 if (result == ERROR_OK) {
2999 LOG_DEBUG("aampostincrement is not supported on this target.");
3000 info->has_aampostincrement = YNM_NO;
3001 }
3002 }
3003 }
3004
3005 if (result != ERROR_OK)
3006 return result;
3007
3008 /* Copy arg0 to buffer (rounded width up to nearest 32) */
3009 riscv_reg_t value = read_abstract_arg(target, 0, width32);
3010 buf_set_u64(p, 0, 8 * size, value);
3011
3012 if (info->has_aampostincrement == YNM_YES)
3013 updateaddr = false;
3014 p += size;
3015 }
3016
3017 return result;
3018 }
3019
3020 /*
3021 * Performs a memory write using memory access abstract commands. The write
3022 * sizes supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16
3023 * byte aamsize fields in the memory access abstract command.
3024 */
3025 static int write_memory_abstract(struct target *target, target_addr_t address,
3026 uint32_t size, uint32_t count, const uint8_t *buffer)
3027 {
3028 RISCV013_INFO(info);
3029 int result = ERROR_OK;
3030 bool use_aampostincrement = info->has_aampostincrement != YNM_NO;
3031
3032 LOG_DEBUG("writing %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
3033 size, address);
3034
3035 /* Convert the size (bytes) to width (bits) */
3036 unsigned width = size << 3;
3037
3038 /* Create the command (physical address, postincrement, write) */
3039 uint32_t command = access_memory_command(target, false, width, use_aampostincrement, true);
3040
3041 /* Execute the writes */
3042 const uint8_t *p = buffer;
3043 bool updateaddr = true;
3044 for (uint32_t c = 0; c < count; c++) {
3045 /* Move data to arg0 */
3046 riscv_reg_t value = buf_get_u64(p, 0, 8 * size);
3047 result = write_abstract_arg(target, 0, value, riscv_xlen(target));
3048 if (result != ERROR_OK) {
3049 LOG_ERROR("Failed to write arg0 during write_memory_abstract().");
3050 return result;
3051 }
3052
3053 /* Update the address if it is the first time or aampostincrement is not supported by the target. */
3054 if (updateaddr) {
3055 /* Set arg1 to the address: address + c * size */
3056 result = write_abstract_arg(target, 1, address + c * size, riscv_xlen(target));
3057 if (result != ERROR_OK) {
3058 LOG_ERROR("Failed to write arg1 during write_memory_abstract().");
3059 return result;
3060 }
3061 }
3062
3063 /* Execute the command */
3064 result = execute_abstract_command(target, command);
3065
3066 if (info->has_aampostincrement == YNM_MAYBE) {
3067 if (result == ERROR_OK) {
3068 /* Safety: double-check that the address was really auto-incremented */
3069 riscv_reg_t new_address = read_abstract_arg(target, 1, riscv_xlen(target));
3070 if (new_address == address + size) {
3071 LOG_DEBUG("aampostincrement is supported on this target.");
3072 info->has_aampostincrement = YNM_YES;
3073 } else {
3074 LOG_WARNING("Buggy aampostincrement! Address not incremented correctly.");
3075 info->has_aampostincrement = YNM_NO;
3076 }
3077 } else {
3078 /* Try the same access but with postincrement disabled. */
3079 command = access_memory_command(target, false, width, false, true);
3080 result = execute_abstract_command(target, command);
3081 if (result == ERROR_OK) {
3082 LOG_DEBUG("aampostincrement is not supported on this target.");
3083 info->has_aampostincrement = YNM_NO;
3084 }
3085 }
3086 }
3087
3088 if (result != ERROR_OK)
3089 return result;
3090
3091 if (info->has_aampostincrement == YNM_YES)
3092 updateaddr = false;
3093 p += size;
3094 }
3095
3096 return result;
3097 }
3098
3099 /**
3100 * Read the requested memory, taking care to execute every read exactly once,
3101 * even if cmderr=busy is encountered.
3102 */
3103 static int read_memory_progbuf_inner(struct target *target, target_addr_t address,
3104 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3105 {
3106 RISCV013_INFO(info);
3107
3108 int result = ERROR_OK;
3109
3110 /* Write address to S0. */
3111 result = register_write_direct(target, GDB_REGNO_S0, address);
3112 if (result != ERROR_OK)
3113 return result;
3114
3115 if (increment == 0 &&
3116 register_write_direct(target, GDB_REGNO_S2, 0) != ERROR_OK)
3117 return ERROR_FAIL;
3118
3119 uint32_t command = access_register_command(target, GDB_REGNO_S1,
3120 riscv_xlen(target),
3121 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
3122 if (execute_abstract_command(target, command) != ERROR_OK)
3123 return ERROR_FAIL;
3124
3125 /* First read has just triggered. Result is in s1. */
3126 if (count == 1) {
3127 uint64_t value;
3128 if (register_read_direct(target, &value, GDB_REGNO_S1) != ERROR_OK)
3129 return ERROR_FAIL;
3130 buf_set_u64(buffer, 0, 8 * size, value);
3131 log_memory_access(address, value, size, true);
3132 return ERROR_OK;
3133 }
3134
3135 if (dmi_write(target, DM_ABSTRACTAUTO,
3136 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) != ERROR_OK)
3137 goto error;
3138 /* Read garbage from dmi_data0, which triggers another execution of the
3139 * program. Now dmi_data0 contains the first good result, and s1 the next
3140 * memory value. */
3141 if (dmi_read_exec(target, NULL, DM_DATA0) != ERROR_OK)
3142 goto error;
3143
3144 /* read_addr is the next address that the hart will read from, which is the
3145 * value in s0. */
3146 unsigned index = 2;
3147 while (index < count) {
3148 riscv_addr_t read_addr = address + index * increment;
3149 LOG_DEBUG("i=%d, count=%d, read_addr=0x%" PRIx64, index, count, read_addr);
3150 /* The pipeline looks like this:
3151 * memory -> s1 -> dm_data0 -> debugger
3152 * Right now:
3153 * s0 contains read_addr
3154 * s1 contains mem[read_addr-size]
3155 * dm_data0 contains[read_addr-size*2]
3156 */
3157
3158 struct riscv_batch *batch = riscv_batch_alloc(target, 32,
3159 info->dmi_busy_delay + info->ac_busy_delay);
3160 if (!batch)
3161 return ERROR_FAIL;
3162
3163 unsigned reads = 0;
3164 for (unsigned j = index; j < count; j++) {
3165 if (size > 4)
3166 riscv_batch_add_dmi_read(batch, DM_DATA1);
3167 riscv_batch_add_dmi_read(batch, DM_DATA0);
3168
3169 reads++;
3170 if (riscv_batch_full(batch))
3171 break;
3172 }
3173
3174 batch_run(target, batch);
3175
3176 /* Wait for the target to finish performing the last abstract command,
3177 * and update our copy of cmderr. If we see that DMI is busy here,
3178 * dmi_busy_delay will be incremented. */
3179 uint32_t abstractcs;
3180 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3181 return ERROR_FAIL;
3182 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
3183 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3184 return ERROR_FAIL;
3185 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
3186
3187 unsigned next_index;
3188 unsigned ignore_last = 0;
3189 switch (info->cmderr) {
3190 case CMDERR_NONE:
3191 LOG_DEBUG("successful (partial?) memory read");
3192 next_index = index + reads;
3193 break;
3194 case CMDERR_BUSY:
3195 LOG_DEBUG("memory read resulted in busy response");
3196
3197 increase_ac_busy_delay(target);
3198 riscv013_clear_abstract_error(target);
3199
3200 dmi_write(target, DM_ABSTRACTAUTO, 0);
3201
3202 uint32_t dmi_data0, dmi_data1 = 0;
3203 /* This is definitely a good version of the value that we
3204 * attempted to read when we discovered that the target was
3205 * busy. */
3206 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK) {
3207 riscv_batch_free(batch);
3208 goto error;
3209 }
3210 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK) {
3211 riscv_batch_free(batch);
3212 goto error;
3213 }
3214
3215 /* See how far we got, clobbering dmi_data0. */
3216 if (increment == 0) {
3217 uint64_t counter;
3218 result = register_read_direct(target, &counter, GDB_REGNO_S2);
3219 next_index = counter;
3220 } else {
3221 uint64_t next_read_addr;
3222 result = register_read_direct(target, &next_read_addr,
3223 GDB_REGNO_S0);
3224 next_index = (next_read_addr - address) / increment;
3225 }
3226 if (result != ERROR_OK) {
3227 riscv_batch_free(batch);
3228 goto error;
3229 }
3230
3231 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
3232 buf_set_u64(buffer + (next_index - 2) * size, 0, 8 * size, value64);
3233 log_memory_access(address + (next_index - 2) * size, value64, size, true);
3234
3235 /* Restore the command, and execute it.
3236 * Now DM_DATA0 contains the next value just as it would if no
3237 * error had occurred. */
3238 dmi_write_exec(target, DM_COMMAND, command, true);
3239 next_index++;
3240
3241 dmi_write(target, DM_ABSTRACTAUTO,
3242 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
3243
3244 ignore_last = 1;
3245
3246 break;
3247 default:
3248 LOG_DEBUG("error when reading memory, abstractcs=0x%08lx", (long)abstractcs);
3249 riscv013_clear_abstract_error(target);
3250 riscv_batch_free(batch);
3251 result = ERROR_FAIL;
3252 goto error;
3253 }
3254
3255 /* Now read whatever we got out of the batch. */
3256 dmi_status_t status = DMI_STATUS_SUCCESS;
3257 unsigned read = 0;
3258 assert(index >= 2);
3259 for (unsigned j = index - 2; j < index + reads; j++) {
3260 assert(j < count);
3261 LOG_DEBUG("index=%d, reads=%d, next_index=%d, ignore_last=%d, j=%d",
3262 index, reads, next_index, ignore_last, j);
3263 if (j + 3 + ignore_last > next_index)
3264 break;
3265
3266 status = riscv_batch_get_dmi_read_op(batch, read);
3267 uint64_t value = riscv_batch_get_dmi_read_data(batch, read);
3268 read++;
3269 if (status != DMI_STATUS_SUCCESS) {
3270 /* If we're here because of busy count, dmi_busy_delay will
3271 * already have been increased and busy state will have been
3272 * cleared in dmi_read(). */
3273 /* In at least some implementations, we issue a read, and then
3274 * can get busy back when we try to scan out the read result,
3275 * and the actual read value is lost forever. Since this is
3276 * rare in any case, we return error here and rely on our
3277 * caller to reread the entire block. */
3278 LOG_WARNING("Batch memory read encountered DMI error %d. "
3279 "Falling back on slower reads.", status);
3280 riscv_batch_free(batch);
3281 result = ERROR_FAIL;
3282 goto error;
3283 }
3284 if (size > 4) {
3285 status = riscv_batch_get_dmi_read_op(batch, read);
3286 if (status != DMI_STATUS_SUCCESS) {
3287 LOG_WARNING("Batch memory read encountered DMI error %d. "
3288 "Falling back on slower reads.", status);
3289 riscv_batch_free(batch);
3290 result = ERROR_FAIL;
3291 goto error;
3292 }
3293 value <<= 32;
3294 value |= riscv_batch_get_dmi_read_data(batch, read);
3295 read++;
3296 }
3297 riscv_addr_t offset = j * size;
3298 buf_set_u64(buffer + offset, 0, 8 * size, value);
3299 log_memory_access(address + j * increment, value, size, true);
3300 }
3301
3302 index = next_index;
3303
3304 riscv_batch_free(batch);
3305 }
3306
3307 dmi_write(target, DM_ABSTRACTAUTO, 0);
3308
3309 if (count > 1) {
3310 /* Read the penultimate word. */
3311 uint32_t dmi_data0, dmi_data1 = 0;
3312 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK)
3313 return ERROR_FAIL;
3314 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK)
3315 return ERROR_FAIL;
3316 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
3317 buf_set_u64(buffer + size * (count - 2), 0, 8 * size, value64);
3318 log_memory_access(address + size * (count - 2), value64, size, true);
3319 }
3320
3321 /* Read the last word. */
3322 uint64_t value;
3323 result = register_read_direct(target, &value, GDB_REGNO_S1);
3324 if (result != ERROR_OK)
3325 goto error;
3326 buf_set_u64(buffer + size * (count-1), 0, 8 * size, value);
3327 log_memory_access(address + size * (count-1), value, size, true);
3328
3329 return ERROR_OK;
3330
3331 error:
3332 dmi_write(target, DM_ABSTRACTAUTO, 0);
3333
3334 return result;
3335 }
3336
3337 /* Only need to save/restore one GPR to read a single word, and the progbuf
3338 * program doesn't need to increment. */
3339 static int read_memory_progbuf_one(struct target *target, target_addr_t address,
3340 uint32_t size, uint8_t *buffer)
3341 {
3342 uint64_t mstatus = 0;
3343 uint64_t mstatus_old = 0;
3344 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3345 return ERROR_FAIL;
3346
3347 uint64_t s0;
3348 int result = ERROR_FAIL;
3349
3350 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3351 goto restore_mstatus;
3352
3353 /* Write the program (load, increment) */
3354 struct riscv_program program;
3355 riscv_program_init(&program, target);
3356 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3357 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3358 switch (size) {
3359 case 1:
3360 riscv_program_lbr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3361 break;
3362 case 2:
3363 riscv_program_lhr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3364 break;
3365 case 4:
3366 riscv_program_lwr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3367 break;
3368 case 8:
3369 riscv_program_ldr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3370 break;
3371 default:
3372 LOG_ERROR("Unsupported size: %d", size);
3373 goto restore_mstatus;
3374 }
3375 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3376 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3377
3378 if (riscv_program_ebreak(&program) != ERROR_OK)
3379 goto restore_mstatus;
3380 if (riscv_program_write(&program) != ERROR_OK)
3381 goto restore_mstatus;
3382
3383 /* Write address to S0, and execute buffer. */
3384 if (write_abstract_arg(target, 0, address, riscv_xlen(target)) != ERROR_OK)
3385 goto restore_mstatus;
3386 uint32_t command = access_register_command(target, GDB_REGNO_S0,
3387 riscv_xlen(target), AC_ACCESS_REGISTER_WRITE |
3388 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
3389 if (execute_abstract_command(target, command) != ERROR_OK)
3390 goto restore_s0;
3391
3392 uint64_t value;
3393 if (register_read(target, &value, GDB_REGNO_S0) != ERROR_OK)
3394 goto restore_s0;
3395 buf_set_u64(buffer, 0, 8 * size, value);
3396 log_memory_access(address, value, size, true);
3397 result = ERROR_OK;
3398
3399 restore_s0:
3400 if (riscv_set_register(target, GDB_REGNO_S0, s0) != ERROR_OK)
3401 result = ERROR_FAIL;
3402
3403 restore_mstatus:
3404 if (mstatus != mstatus_old)
3405 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3406 result = ERROR_FAIL;
3407
3408 return result;
3409 }
3410
3411 /**
3412 * Read the requested memory, silently handling memory access errors.
3413 */
3414 static int read_memory_progbuf(struct target *target, target_addr_t address,
3415 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3416 {
3417 if (riscv_xlen(target) < size * 8) {
3418 LOG_ERROR("XLEN (%d) is too short for %d-bit memory read.",
3419 riscv_xlen(target), size * 8);
3420 return ERROR_FAIL;
3421 }
3422
3423 int result = ERROR_OK;
3424
3425 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
3426 size, address);
3427
3428 select_dmi(target);
3429
3430 memset(buffer, 0, count*size);
3431
3432 if (execute_fence(target) != ERROR_OK)
3433 return ERROR_FAIL;
3434
3435 if (count == 1)
3436 return read_memory_progbuf_one(target, address, size, buffer);
3437
3438 uint64_t mstatus = 0;
3439 uint64_t mstatus_old = 0;
3440 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3441 return ERROR_FAIL;
3442
3443 /* s0 holds the next address to read from
3444 * s1 holds the next data value read
3445 * s2 is a counter in case increment is 0
3446 */
3447 uint64_t s0, s1, s2;
3448 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3449 return ERROR_FAIL;
3450 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3451 return ERROR_FAIL;
3452 if (increment == 0 && register_read(target, &s2, GDB_REGNO_S2) != ERROR_OK)
3453 return ERROR_FAIL;
3454
3455 /* Write the program (load, increment) */
3456 struct riscv_program program;
3457 riscv_program_init(&program, target);
3458 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3459 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3460
3461 switch (size) {
3462 case 1:
3463 riscv_program_lbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3464 break;
3465 case 2:
3466 riscv_program_lhr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3467 break;
3468 case 4:
3469 riscv_program_lwr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3470 break;
3471 case 8:
3472 riscv_program_ldr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3473 break;
3474 default:
3475 LOG_ERROR("Unsupported size: %d", size);
3476 return ERROR_FAIL;
3477 }
3478
3479 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3480 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3481 if (increment == 0)
3482 riscv_program_addi(&program, GDB_REGNO_S2, GDB_REGNO_S2, 1);
3483 else
3484 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, increment);
3485
3486 if (riscv_program_ebreak(&program) != ERROR_OK)
3487 return ERROR_FAIL;
3488 if (riscv_program_write(&program) != ERROR_OK)
3489 return ERROR_FAIL;
3490
3491 result = read_memory_progbuf_inner(target, address, size, count, buffer, increment);
3492
3493 if (result != ERROR_OK) {
3494 /* The full read did not succeed, so we will try to read each word individually. */
3495 /* This will not be fast, but reading outside actual memory is a special case anyway. */
3496 /* It will make the toolchain happier, especially Eclipse Memory View as it reads ahead. */
3497 target_addr_t address_i = address;
3498 uint32_t count_i = 1;
3499 uint8_t *buffer_i = buffer;
3500
3501 for (uint32_t i = 0; i < count; i++, address_i += increment, buffer_i += size) {
3502 /* TODO: This is much slower than it needs to be because we end up
3503 * writing the address to read for every word we read. */
3504 result = read_memory_progbuf_inner(target, address_i, size, count_i, buffer_i, increment);
3505
3506 /* The read of a single word failed, so we will just return 0 for that instead */
3507 if (result != ERROR_OK) {
3508 LOG_DEBUG("error reading single word of %d bytes from 0x%" TARGET_PRIxADDR,
3509 size, address_i);
3510
3511 buf_set_u64(buffer_i, 0, 8 * size, 0);
3512 }
3513 }
3514 result = ERROR_OK;
3515 }
3516
3517 riscv_set_register(target, GDB_REGNO_S0, s0);
3518 riscv_set_register(target, GDB_REGNO_S1, s1);
3519 if (increment == 0)
3520 riscv_set_register(target, GDB_REGNO_S2, s2);
3521
3522 /* Restore MSTATUS */
3523 if (mstatus != mstatus_old)
3524 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3525 return ERROR_FAIL;
3526
3527 return result;
3528 }
3529
3530 static int read_memory(struct target *target, target_addr_t address,
3531 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3532 {
3533 if (count == 0)
3534 return ERROR_OK;
3535
3536 if (size != 1 && size != 2 && size != 4 && size != 8 && size != 16) {
3537 LOG_ERROR("BUG: Unsupported size for memory read: %d", size);
3538 return ERROR_FAIL;
3539 }
3540
3541 int ret = ERROR_FAIL;
3542 RISCV_INFO(r);
3543 RISCV013_INFO(info);
3544
3545 char *progbuf_result = "disabled";
3546 char *sysbus_result = "disabled";
3547 char *abstract_result = "disabled";
3548
3549 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
3550 int method = r->mem_access_methods[i];
3551
3552 if (method == RISCV_MEM_ACCESS_PROGBUF) {
3553 if (mem_should_skip_progbuf(target, address, size, true, &progbuf_result))
3554 continue;
3555
3556 ret = read_memory_progbuf(target, address, size, count, buffer, increment);
3557
3558 if (ret != ERROR_OK)
3559 progbuf_result = "failed";
3560 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
3561 if (mem_should_skip_sysbus(target, address, size, increment, true, &sysbus_result))
3562 continue;
3563
3564 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
3565 ret = read_memory_bus_v0(target, address, size, count, buffer, increment);
3566 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
3567 ret = read_memory_bus_v1(target, address, size, count, buffer, increment);
3568
3569 if (ret != ERROR_OK)
3570 sysbus_result = "failed";
3571 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
3572 if (mem_should_skip_abstract(target, address, size, increment, true, &abstract_result))
3573 continue;
3574
3575 ret = read_memory_abstract(target, address, size, count, buffer, increment);
3576
3577 if (ret != ERROR_OK)
3578 abstract_result = "failed";
3579 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
3580 /* No further mem access method to try. */
3581 break;
3582
3583 log_mem_access_result(target, ret == ERROR_OK, method, true);
3584
3585 if (ret == ERROR_OK)
3586 return ret;
3587 }
3588
3589 LOG_ERROR("Target %s: Failed to read memory (addr=0x%" PRIx64 ")", target_name(target), address);
3590 LOG_ERROR(" progbuf=%s, sysbus=%s, abstract=%s", progbuf_result, sysbus_result, abstract_result);
3591 return ret;
3592 }
3593
3594 static int write_memory_bus_v0(struct target *target, target_addr_t address,
3595 uint32_t size, uint32_t count, const uint8_t *buffer)
3596 {
3597 /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
3598 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
3599 TARGET_PRIxADDR, size, count, address);
3600 dmi_write(target, DM_SBADDRESS0, address);
3601 int64_t value = 0;
3602 int64_t access = 0;
3603 riscv_addr_t offset = 0;
3604 riscv_addr_t t_addr = 0;
3605 const uint8_t *t_buffer = buffer + offset;
3606
3607 /* B.8 Writing Memory, single write check if we write in one go */
3608 if (count == 1) { /* count is in bytes here */
3609 value = buf_get_u64(t_buffer, 0, 8 * size);
3610
3611 access = 0;
3612 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3613 dmi_write(target, DM_SBCS, access);
3614 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3615 LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64, value);
3616 dmi_write(target, DM_SBDATA0, value);
3617 return ERROR_OK;
3618 }
3619
3620 /*B.8 Writing Memory, using autoincrement*/
3621
3622 access = 0;
3623 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3624 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
3625 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3626 dmi_write(target, DM_SBCS, access);
3627
3628 /*2)set the value according to the size required and write*/
3629 for (riscv_addr_t i = 0; i < count; ++i) {
3630 offset = size*i;
3631 /* for monitoring only */
3632 t_addr = address + offset;
3633 t_buffer = buffer + offset;
3634
3635 value = buf_get_u64(t_buffer, 0, 8 * size);
3636 LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
3637 PRIx64, (uint32_t)t_addr, (uint32_t)value);
3638 dmi_write(target, DM_SBDATA0, value);
3639 }
3640 /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
3641 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 0);
3642 dmi_write(target, DM_SBCS, access);
3643
3644 return ERROR_OK;
3645 }
3646
3647 static int write_memory_bus_v1(struct target *target, target_addr_t address,
3648 uint32_t size, uint32_t count, const uint8_t *buffer)
3649 {
3650 RISCV013_INFO(info);
3651 uint32_t sbcs = sb_sbaccess(size);
3652 sbcs = set_field(sbcs, DM_SBCS_SBAUTOINCREMENT, 1);
3653 dmi_write(target, DM_SBCS, sbcs);
3654
3655 target_addr_t next_address = address;
3656 target_addr_t end_address = address + count * size;
3657
3658 int result;
3659
3660 sb_write_address(target, next_address, true);
3661 while (next_address < end_address) {
3662 LOG_DEBUG("transferring burst starting at address 0x%" TARGET_PRIxADDR,
3663 next_address);
3664
3665 struct riscv_batch *batch = riscv_batch_alloc(
3666 target,
3667 32,
3668 info->dmi_busy_delay + info->bus_master_write_delay);
3669 if (!batch)
3670 return ERROR_FAIL;
3671
3672 for (uint32_t i = (next_address - address) / size; i < count; i++) {
3673 const uint8_t *p = buffer + i * size;
3674
3675 if (riscv_batch_available_scans(batch) < (size + 3) / 4)
3676 break;
3677
3678 if (size > 12)
3679 riscv_batch_add_dmi_write(batch, DM_SBDATA3,
3680 ((uint32_t) p[12]) |
3681 (((uint32_t) p[13]) << 8) |
3682 (((uint32_t) p[14]) << 16) |
3683 (((uint32_t) p[15]) << 24));
3684
3685 if (size > 8)
3686 riscv_batch_add_dmi_write(batch, DM_SBDATA2,
3687 ((uint32_t) p[8]) |
3688 (((uint32_t) p[9]) << 8) |
3689 (((uint32_t) p[10]) << 16) |
3690 (((uint32_t) p[11]) << 24));
3691 if (size > 4)
3692 riscv_batch_add_dmi_write(batch, DM_SBDATA1,
3693 ((uint32_t) p[4]) |
3694 (((uint32_t) p[5]) << 8) |
3695 (((uint32_t) p[6]) << 16) |
3696 (((uint32_t) p[7]) << 24));
3697 uint32_t value = p[0];
3698 if (size > 2) {
3699 value |= ((uint32_t) p[2]) << 16;
3700 value |= ((uint32_t) p[3]) << 24;
3701 }
3702 if (size > 1)
3703 value |= ((uint32_t) p[1]) << 8;
3704 riscv_batch_add_dmi_write(batch, DM_SBDATA0, value);
3705
3706 log_memory_access(address + i * size, value, size, false);
3707 next_address += size;
3708 }
3709
3710 /* Execute the batch of writes */
3711 result = batch_run(target, batch);
3712 riscv_batch_free(batch);
3713 if (result != ERROR_OK)
3714 return result;
3715
3716 /* Read sbcs value.
3717 * At the same time, detect if DMI busy has occurred during the batch write. */
3718 bool dmi_busy_encountered;
3719 if (dmi_op(target, &sbcs, &dmi_busy_encountered, DMI_OP_READ,
3720 DM_SBCS, 0, false, true) != ERROR_OK)
3721 return ERROR_FAIL;
3722 if (dmi_busy_encountered)
3723 LOG_DEBUG("DMI busy encountered during system bus write.");
3724
3725 /* Wait until sbbusy goes low */
3726 time_t start = time(NULL);
3727 while (get_field(sbcs, DM_SBCS_SBBUSY)) {
3728 if (time(NULL) - start > riscv_command_timeout_sec) {
3729 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
3730 "Increase the timeout with riscv set_command_timeout_sec.",
3731 riscv_command_timeout_sec, sbcs);
3732 return ERROR_FAIL;
3733 }
3734 if (dmi_read(target, &sbcs, DM_SBCS) != ERROR_OK)
3735 return ERROR_FAIL;
3736 }
3737
3738 if (get_field(sbcs, DM_SBCS_SBBUSYERROR)) {
3739 /* We wrote while the target was busy. */
3740 LOG_DEBUG("Sbbusyerror encountered during system bus write.");
3741 /* Clear the sticky error flag. */
3742 dmi_write(target, DM_SBCS, sbcs | DM_SBCS_SBBUSYERROR);
3743 /* Slow down before trying again. */
3744 info->bus_master_write_delay += info->bus_master_write_delay / 10 + 1;
3745 }
3746
3747 if (get_field(sbcs, DM_SBCS_SBBUSYERROR) || dmi_busy_encountered) {
3748 /* Recover from the case when the write commands were issued too fast.
3749 * Determine the address from which to resume writing. */
3750 next_address = sb_read_address(target);
3751 if (next_address < address) {
3752 /* This should never happen, probably buggy hardware. */
3753 LOG_DEBUG("unexpected sbaddress=0x%" TARGET_PRIxADDR
3754 " - buggy sbautoincrement in hw?", next_address);
3755 /* Fail the whole operation. */
3756 return ERROR_FAIL;
3757 }
3758 /* Try again - resume writing. */
3759 continue;
3760 }
3761
3762 unsigned int sberror = get_field(sbcs, DM_SBCS_SBERROR);
3763 if (sberror != 0) {
3764 /* Sberror indicates the bus access failed, but not because we issued the writes
3765 * too fast. Cannot recover. Sbaddress holds the address where the error occurred
3766 * (unless sbautoincrement in the HW is buggy).
3767 */
3768 target_addr_t sbaddress = sb_read_address(target);
3769 LOG_DEBUG("System bus access failed with sberror=%u (sbaddress=0x%" TARGET_PRIxADDR ")",
3770 sberror, sbaddress);
3771 if (sbaddress < address) {
3772 /* This should never happen, probably buggy hardware.
3773 * Make a note to the user not to trust the sbaddress value. */
3774 LOG_DEBUG("unexpected sbaddress=0x%" TARGET_PRIxADDR
3775 " - buggy sbautoincrement in hw?", next_address);
3776 }
3777 /* Clear the sticky error flag */
3778 dmi_write(target, DM_SBCS, DM_SBCS_SBERROR);
3779 /* Fail the whole operation */
3780 return ERROR_FAIL;
3781 }
3782 }
3783
3784 return ERROR_OK;
3785 }
3786
3787 static int write_memory_progbuf(struct target *target, target_addr_t address,
3788 uint32_t size, uint32_t count, const uint8_t *buffer)
3789 {
3790 RISCV013_INFO(info);
3791
3792 if (riscv_xlen(target) < size * 8) {
3793 LOG_ERROR("XLEN (%d) is too short for %d-bit memory write.",
3794 riscv_xlen(target), size * 8);
3795 return ERROR_FAIL;
3796 }
3797
3798 LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count, size, (long)address);
3799
3800 select_dmi(target);
3801
3802 uint64_t mstatus = 0;
3803 uint64_t mstatus_old = 0;
3804 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3805 return ERROR_FAIL;
3806
3807 /* s0 holds the next address to write to
3808 * s1 holds the next data value to write
3809 */
3810
3811 int result = ERROR_OK;
3812 uint64_t s0, s1;
3813 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3814 return ERROR_FAIL;
3815 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3816 return ERROR_FAIL;
3817
3818 /* Write the program (store, increment) */
3819 struct riscv_program program;
3820 riscv_program_init(&program, target);
3821 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3822 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3823
3824 switch (size) {
3825 case 1:
3826 riscv_program_sbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3827 break;
3828 case 2:
3829 riscv_program_shr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3830 break;
3831 case 4:
3832 riscv_program_swr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3833 break;
3834 case 8:
3835 riscv_program_sdr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3836 break;
3837 default:
3838 LOG_ERROR("write_memory_progbuf(): Unsupported size: %d", size);
3839 result = ERROR_FAIL;
3840 goto error;
3841 }
3842
3843 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3844 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3845 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
3846
3847 result = riscv_program_ebreak(&program);
3848 if (result != ERROR_OK)
3849 goto error;
3850 riscv_program_write(&program);
3851
3852 riscv_addr_t cur_addr = address;
3853 riscv_addr_t fin_addr = address + (count * size);
3854 bool setup_needed = true;
3855 LOG_DEBUG("writing until final address 0x%016" PRIx64, fin_addr);
3856 while (cur_addr < fin_addr) {
3857 LOG_DEBUG("transferring burst starting at address 0x%016" PRIx64,
3858 cur_addr);
3859
3860 struct riscv_batch *batch = riscv_batch_alloc(
3861 target,
3862 32,
3863 info->dmi_busy_delay + info->ac_busy_delay);
3864 if (!batch)
3865 goto error;
3866
3867 /* To write another word, we put it in S1 and execute the program. */
3868 unsigned start = (cur_addr - address) / size;
3869 for (unsigned i = start; i < count; ++i) {
3870 unsigned offset = size*i;
3871 const uint8_t *t_buffer = buffer + offset;
3872
3873 uint64_t value = buf_get_u64(t_buffer, 0, 8 * size);
3874
3875 log_memory_access(address + offset, value, size, false);
3876 cur_addr += size;
3877
3878 if (setup_needed) {
3879 result = register_write_direct(target, GDB_REGNO_S0,
3880 address + offset);
3881 if (result != ERROR_OK) {
3882 riscv_batch_free(batch);
3883 goto error;
3884 }
3885
3886 /* Write value. */
3887 if (size > 4)
3888 dmi_write(target, DM_DATA1, value >> 32);
3889 dmi_write(target, DM_DATA0, value);
3890
3891 /* Write and execute command that moves value into S1 and
3892 * executes program buffer. */
3893 uint32_t command = access_register_command(target,
3894 GDB_REGNO_S1, riscv_xlen(target),
3895 AC_ACCESS_REGISTER_POSTEXEC |
3896 AC_ACCESS_REGISTER_TRANSFER |
3897 AC_ACCESS_REGISTER_WRITE);
3898 result = execute_abstract_command(target, command);
3899 if (result != ERROR_OK) {
3900 riscv_batch_free(batch);
3901 goto error;
3902 }
3903
3904 /* Turn on autoexec */
3905 dmi_write(target, DM_ABSTRACTAUTO,
3906 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
3907
3908 setup_needed = false;
3909 } else {
3910 if (size > 4)
3911 riscv_batch_add_dmi_write(batch, DM_DATA1, value >> 32);
3912 riscv_batch_add_dmi_write(batch, DM_DATA0, value);
3913 if (riscv_batch_full(batch))
3914 break;
3915 }
3916 }
3917
3918 result = batch_run(target, batch);
3919 riscv_batch_free(batch);
3920 if (result != ERROR_OK)
3921 goto error;
3922
3923 /* Note that if the scan resulted in a Busy DMI response, it
3924 * is this read to abstractcs that will cause the dmi_busy_delay
3925 * to be incremented if necessary. */
3926
3927 uint32_t abstractcs;
3928 bool dmi_busy_encountered;
3929 result = dmi_op(target, &abstractcs, &dmi_busy_encountered,
3930 DMI_OP_READ, DM_ABSTRACTCS, 0, false, true);
3931 if (result != ERROR_OK)
3932 goto error;
3933 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
3934 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3935 return ERROR_FAIL;
3936 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
3937 if (info->cmderr == CMDERR_NONE && !dmi_busy_encountered) {
3938 LOG_DEBUG("successful (partial?) memory write");
3939 } else if (info->cmderr == CMDERR_BUSY || dmi_busy_encountered) {
3940 if (info->cmderr == CMDERR_BUSY)
3941 LOG_DEBUG("Memory write resulted in abstract command busy response.");
3942 else if (dmi_busy_encountered)
3943 LOG_DEBUG("Memory write resulted in DMI busy response.");
3944 riscv013_clear_abstract_error(target);
3945 increase_ac_busy_delay(target);
3946
3947 dmi_write(target, DM_ABSTRACTAUTO, 0);
3948 result = register_read_direct(target, &cur_addr, GDB_REGNO_S0);
3949 if (result != ERROR_OK)
3950 goto error;
3951 setup_needed = true;
3952 } else {
3953 LOG_ERROR("error when writing memory, abstractcs=0x%08lx", (long)abstractcs);
3954 riscv013_clear_abstract_error(target);
3955 result = ERROR_FAIL;
3956 goto error;
3957 }
3958 }
3959
3960 error:
3961 dmi_write(target, DM_ABSTRACTAUTO, 0);
3962
3963 if (register_write_direct(target, GDB_REGNO_S1, s1) != ERROR_OK)
3964 return ERROR_FAIL;
3965 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
3966 return ERROR_FAIL;
3967
3968 /* Restore MSTATUS */
3969 if (mstatus != mstatus_old)
3970 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3971 return ERROR_FAIL;
3972
3973 if (execute_fence(target) != ERROR_OK)
3974 return ERROR_FAIL;
3975
3976 return result;
3977 }
3978
3979 static int write_memory(struct target *target, target_addr_t address,
3980 uint32_t size, uint32_t count, const uint8_t *buffer)
3981 {
3982 if (size != 1 && size != 2 && size != 4 && size != 8 && size != 16) {
3983 LOG_ERROR("BUG: Unsupported size for memory write: %d", size);
3984 return ERROR_FAIL;
3985 }
3986
3987 int ret = ERROR_FAIL;
3988 RISCV_INFO(r);
3989 RISCV013_INFO(info);
3990
3991 char *progbuf_result = "disabled";
3992 char *sysbus_result = "disabled";
3993 char *abstract_result = "disabled";
3994
3995 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
3996 int method = r->mem_access_methods[i];
3997
3998 if (method == RISCV_MEM_ACCESS_PROGBUF) {
3999 if (mem_should_skip_progbuf(target, address, size, false, &progbuf_result))
4000 continue;
4001
4002 ret = write_memory_progbuf(target, address, size, count, buffer);
4003
4004 if (ret != ERROR_OK)
4005 progbuf_result = "failed";
4006 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
4007 if (mem_should_skip_sysbus(target, address, size, 0, false, &sysbus_result))
4008 continue;
4009
4010 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
4011 ret = write_memory_bus_v0(target, address, size, count, buffer);
4012 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
4013 ret = write_memory_bus_v1(target, address, size, count, buffer);
4014
4015 if (ret != ERROR_OK)
4016 sysbus_result = "failed";
4017 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
4018 if (mem_should_skip_abstract(target, address, size, 0, false, &abstract_result))
4019 continue;
4020
4021 ret = write_memory_abstract(target, address, size, count, buffer);
4022
4023 if (ret != ERROR_OK)
4024 abstract_result = "failed";
4025 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
4026 /* No further mem access method to try. */
4027 break;
4028
4029 log_mem_access_result(target, ret == ERROR_OK, method, false);
4030
4031 if (ret == ERROR_OK)
4032 return ret;
4033 }
4034
4035 LOG_ERROR("Target %s: Failed to write memory (addr=0x%" PRIx64 ")", target_name(target), address);
4036 LOG_ERROR(" progbuf=%s, sysbus=%s, abstract=%s", progbuf_result, sysbus_result, abstract_result);
4037 return ret;
4038 }
4039
4040 static int arch_state(struct target *target)
4041 {
4042 return ERROR_OK;
4043 }
4044
4045 struct target_type riscv013_target = {
4046 .name = "riscv",
4047
4048 .init_target = init_target,
4049 .deinit_target = deinit_target,
4050 .examine = examine,
4051
4052 .poll = &riscv_openocd_poll,
4053 .halt = &riscv_halt,
4054 .step = &riscv_openocd_step,
4055
4056 .assert_reset = assert_reset,
4057 .deassert_reset = deassert_reset,
4058
4059 .write_memory = write_memory,
4060
4061 .arch_state = arch_state
4062 };
4063
4064 /*** 0.13-specific implementations of various RISC-V helper functions. ***/
4065 static int riscv013_get_register(struct target *target,
4066 riscv_reg_t *value, int rid)
4067 {
4068 LOG_DEBUG("[%s] reading register %s", target_name(target),
4069 gdb_regno_name(rid));
4070
4071 if (riscv_select_current_hart(target) != ERROR_OK)
4072 return ERROR_FAIL;
4073
4074 int result = ERROR_OK;
4075 if (rid == GDB_REGNO_PC) {
4076 /* TODO: move this into riscv.c. */
4077 result = register_read(target, value, GDB_REGNO_DPC);
4078 LOG_DEBUG("[%d] read PC from DPC: 0x%" PRIx64, target->coreid, *value);
4079 } else if (rid == GDB_REGNO_PRIV) {
4080 uint64_t dcsr;
4081 /* TODO: move this into riscv.c. */
4082 result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4083 *value = set_field(0, VIRT_PRIV_V, get_field(dcsr, CSR_DCSR_V));
4084 *value = set_field(*value, VIRT_PRIV_PRV, get_field(dcsr, CSR_DCSR_PRV));
4085 } else {
4086 result = register_read(target, value, rid);
4087 if (result != ERROR_OK)
4088 *value = -1;
4089 }
4090
4091 return result;
4092 }
4093
4094 static int riscv013_set_register(struct target *target, int rid, uint64_t value)
4095 {
4096 riscv013_select_current_hart(target);
4097 LOG_DEBUG("[%d] writing 0x%" PRIx64 " to register %s",
4098 target->coreid, value, gdb_regno_name(rid));
4099
4100 if (rid <= GDB_REGNO_XPR31) {
4101 return register_write_direct(target, rid, value);
4102 } else if (rid == GDB_REGNO_PC) {
4103 LOG_DEBUG("[%d] writing PC to DPC: 0x%" PRIx64, target->coreid, value);
4104 register_write_direct(target, GDB_REGNO_DPC, value);
4105 uint64_t actual_value;
4106 register_read_direct(target, &actual_value, GDB_REGNO_DPC);
4107 LOG_DEBUG("[%d] actual DPC written: 0x%016" PRIx64, target->coreid, actual_value);
4108 if (value != actual_value) {
4109 LOG_ERROR("Written PC (0x%" PRIx64 ") does not match read back "
4110 "value (0x%" PRIx64 ")", value, actual_value);
4111 return ERROR_FAIL;
4112 }
4113 } else if (rid == GDB_REGNO_PRIV) {
4114 uint64_t dcsr;
4115 register_read(target, &dcsr, GDB_REGNO_DCSR);
4116 dcsr = set_field(dcsr, CSR_DCSR_PRV, get_field(value, VIRT_PRIV_PRV));
4117 dcsr = set_field(dcsr, CSR_DCSR_V, get_field(value, VIRT_PRIV_V));
4118 return register_write_direct(target, GDB_REGNO_DCSR, dcsr);
4119 } else {
4120 return register_write_direct(target, rid, value);
4121 }
4122
4123 return ERROR_OK;
4124 }
4125
4126 static int riscv013_select_current_hart(struct target *target)
4127 {
4128 RISCV_INFO(r);
4129
4130 dm013_info_t *dm = get_dm(target);
4131 if (!dm)
4132 return ERROR_FAIL;
4133 if (r->current_hartid == dm->current_hartid)
4134 return ERROR_OK;
4135
4136 uint32_t dmcontrol;
4137 /* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
4138 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
4139 return ERROR_FAIL;
4140 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4141 int result = dmi_write(target, DM_DMCONTROL, dmcontrol);
4142 dm->current_hartid = r->current_hartid;
4143 return result;
4144 }
4145
4146 /* Select all harts that were prepped and that are selectable, clearing the
4147 * prepped flag on the harts that actually were selected. */
4148 static int select_prepped_harts(struct target *target, bool *use_hasel)
4149 {
4150 dm013_info_t *dm = get_dm(target);
4151 if (!dm)
4152 return ERROR_FAIL;
4153 if (!dm->hasel_supported) {
4154 RISCV_INFO(r);
4155 r->prepped = false;
4156 *use_hasel = false;
4157 return ERROR_OK;
4158 }
4159
4160 assert(dm->hart_count);
4161 unsigned hawindow_count = (dm->hart_count + 31) / 32;
4162 uint32_t hawindow[hawindow_count];
4163
4164 memset(hawindow, 0, sizeof(uint32_t) * hawindow_count);
4165
4166 target_list_t *entry;
4167 unsigned total_selected = 0;
4168 list_for_each_entry(entry, &dm->target_list, list) {
4169 struct target *t = entry->target;
4170 struct riscv_info *r = riscv_info(t);
4171 riscv013_info_t *info = get_info(t);
4172 unsigned index = info->index;
4173 LOG_DEBUG("index=%d, coreid=%d, prepped=%d", index, t->coreid, r->prepped);
4174 r->selected = r->prepped;
4175 if (r->prepped) {
4176 hawindow[index / 32] |= 1 << (index % 32);
4177 r->prepped = false;
4178 total_selected++;
4179 }
4180 index++;
4181 }
4182
4183 /* Don't use hasel if we only need to talk to one hart. */
4184 if (total_selected <= 1) {
4185 *use_hasel = false;
4186 return ERROR_OK;
4187 }
4188
4189 for (unsigned i = 0; i < hawindow_count; i++) {
4190 if (dmi_write(target, DM_HAWINDOWSEL, i) != ERROR_OK)
4191 return ERROR_FAIL;
4192 if (dmi_write(target, DM_HAWINDOW, hawindow[i]) != ERROR_OK)
4193 return ERROR_FAIL;
4194 }
4195
4196 *use_hasel = true;
4197 return ERROR_OK;
4198 }
4199
4200 static int riscv013_halt_prep(struct target *target)
4201 {
4202 return ERROR_OK;
4203 }
4204
4205 static int riscv013_halt_go(struct target *target)
4206 {
4207 bool use_hasel = false;
4208 if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
4209 return ERROR_FAIL;
4210
4211 RISCV_INFO(r);
4212 LOG_DEBUG("halting hart %d", r->current_hartid);
4213
4214 /* Issue the halt command, and then wait for the current hart to halt. */
4215 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_HALTREQ;
4216 if (use_hasel)
4217 dmcontrol |= DM_DMCONTROL_HASEL;
4218 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4219 dmi_write(target, DM_DMCONTROL, dmcontrol);
4220 for (size_t i = 0; i < 256; ++i)
4221 if (riscv_is_halted(target))
4222 break;
4223
4224 if (!riscv_is_halted(target)) {
4225 uint32_t dmstatus;
4226 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4227 return ERROR_FAIL;
4228 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
4229 return ERROR_FAIL;
4230
4231 LOG_ERROR("unable to halt hart %d", r->current_hartid);
4232 LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
4233 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
4234 return ERROR_FAIL;
4235 }
4236
4237 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HALTREQ, 0);
4238 dmi_write(target, DM_DMCONTROL, dmcontrol);
4239
4240 if (use_hasel) {
4241 target_list_t *entry;
4242 dm013_info_t *dm = get_dm(target);
4243 if (!dm)
4244 return ERROR_FAIL;
4245 list_for_each_entry(entry, &dm->target_list, list) {
4246 struct target *t = entry->target;
4247 t->state = TARGET_HALTED;
4248 if (t->debug_reason == DBG_REASON_NOTHALTED)
4249 t->debug_reason = DBG_REASON_DBGRQ;
4250 }
4251 }
4252 /* The "else" case is handled in halt_go(). */
4253
4254 return ERROR_OK;
4255 }
4256
4257 static int riscv013_resume_go(struct target *target)
4258 {
4259 bool use_hasel = false;
4260 if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
4261 return ERROR_FAIL;
4262
4263 return riscv013_step_or_resume_current_hart(target, false, use_hasel);
4264 }
4265
4266 static int riscv013_step_current_hart(struct target *target)
4267 {
4268 return riscv013_step_or_resume_current_hart(target, true, false);
4269 }
4270
4271 static int riscv013_resume_prep(struct target *target)
4272 {
4273 return riscv013_on_step_or_resume(target, false);
4274 }
4275
4276 static int riscv013_on_step(struct target *target)
4277 {
4278 return riscv013_on_step_or_resume(target, true);
4279 }
4280
4281 static int riscv013_on_halt(struct target *target)
4282 {
4283 return ERROR_OK;
4284 }
4285
4286 static bool riscv013_is_halted(struct target *target)
4287 {
4288 uint32_t dmstatus;
4289 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4290 return false;
4291 if (get_field(dmstatus, DM_DMSTATUS_ANYUNAVAIL))
4292 LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target));
4293 if (get_field(dmstatus, DM_DMSTATUS_ANYNONEXISTENT))
4294 LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target));
4295 if (get_field(dmstatus, DM_DMSTATUS_ANYHAVERESET)) {
4296 int hartid = riscv_current_hartid(target);
4297 LOG_INFO("Hart %d unexpectedly reset!", hartid);
4298 /* TODO: Can we make this more obvious to eg. a gdb user? */
4299 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE |
4300 DM_DMCONTROL_ACKHAVERESET;
4301 dmcontrol = set_hartsel(dmcontrol, hartid);
4302 /* If we had been halted when we reset, request another halt. If we
4303 * ended up running out of reset, then the user will (hopefully) get a
4304 * message that a reset happened, that the target is running, and then
4305 * that it is halted again once the request goes through.
4306 */
4307 if (target->state == TARGET_HALTED)
4308 dmcontrol |= DM_DMCONTROL_HALTREQ;
4309 dmi_write(target, DM_DMCONTROL, dmcontrol);
4310 }
4311 return get_field(dmstatus, DM_DMSTATUS_ALLHALTED);
4312 }
4313
4314 static enum riscv_halt_reason riscv013_halt_reason(struct target *target)
4315 {
4316 riscv_reg_t dcsr;
4317 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4318 if (result != ERROR_OK)
4319 return RISCV_HALT_UNKNOWN;
4320
4321 LOG_DEBUG("dcsr.cause: 0x%" PRIx64, get_field(dcsr, CSR_DCSR_CAUSE));
4322
4323 switch (get_field(dcsr, CSR_DCSR_CAUSE)) {
4324 case CSR_DCSR_CAUSE_SWBP:
4325 return RISCV_HALT_BREAKPOINT;
4326 case CSR_DCSR_CAUSE_TRIGGER:
4327 /* We could get here before triggers are enumerated if a trigger was
4328 * already set when we connected. Force enumeration now, which has the
4329 * side effect of clearing any triggers we did not set. */
4330 riscv_enumerate_triggers(target);
4331 LOG_DEBUG("{%d} halted because of trigger", target->coreid);
4332 return RISCV_HALT_TRIGGER;
4333 case CSR_DCSR_CAUSE_STEP:
4334 return RISCV_HALT_SINGLESTEP;
4335 case CSR_DCSR_CAUSE_DEBUGINT:
4336 case CSR_DCSR_CAUSE_HALT:
4337 return RISCV_HALT_INTERRUPT;
4338 case CSR_DCSR_CAUSE_GROUP:
4339 return RISCV_HALT_GROUP;
4340 }
4341
4342 LOG_ERROR("Unknown DCSR cause field: 0x%" PRIx64, get_field(dcsr, CSR_DCSR_CAUSE));
4343 LOG_ERROR(" dcsr=0x%016lx", (long)dcsr);
4344 return RISCV_HALT_UNKNOWN;
4345 }
4346
4347 int riscv013_write_debug_buffer(struct target *target, unsigned index, riscv_insn_t data)
4348 {
4349 dm013_info_t *dm = get_dm(target);
4350 if (!dm)
4351 return ERROR_FAIL;
4352 if (dm->progbuf_cache[index] != data) {
4353 if (dmi_write(target, DM_PROGBUF0 + index, data) != ERROR_OK)
4354 return ERROR_FAIL;
4355 dm->progbuf_cache[index] = data;
4356 } else {
4357 LOG_DEBUG("cache hit for 0x%" PRIx32 " @%d", data, index);
4358 }
4359 return ERROR_OK;
4360 }
4361
4362 riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned index)
4363 {
4364 uint32_t value;
4365 dmi_read(target, &value, DM_PROGBUF0 + index);
4366 return value;
4367 }
4368
4369 int riscv013_execute_debug_buffer(struct target *target)
4370 {
4371 uint32_t run_program = 0;
4372 run_program = set_field(run_program, AC_ACCESS_REGISTER_AARSIZE, 2);
4373 run_program = set_field(run_program, AC_ACCESS_REGISTER_POSTEXEC, 1);
4374 run_program = set_field(run_program, AC_ACCESS_REGISTER_TRANSFER, 0);
4375 run_program = set_field(run_program, AC_ACCESS_REGISTER_REGNO, 0x1000);
4376
4377 return execute_abstract_command(target, run_program);
4378 }
4379
4380 void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
4381 {
4382 RISCV013_INFO(info);
4383 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_WRITE);
4384 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, d);
4385 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
4386 }
4387
4388 void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a)
4389 {
4390 RISCV013_INFO(info);
4391 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_READ);
4392 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
4393 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
4394 }
4395
4396 void riscv013_fill_dmi_nop_u64(struct target *target, char *buf)
4397 {
4398 RISCV013_INFO(info);
4399 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_NOP);
4400 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
4401 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, 0);
4402 }
4403
4404 int riscv013_dmi_write_u64_bits(struct target *target)
4405 {
4406 RISCV013_INFO(info);
4407 return info->abits + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH;
4408 }
4409
4410 static int maybe_execute_fence_i(struct target *target)
4411 {
4412 if (has_sufficient_progbuf(target, 3))
4413 return execute_fence(target);
4414 return ERROR_OK;
4415 }
4416
4417 /* Helper Functions. */
4418 static int riscv013_on_step_or_resume(struct target *target, bool step)
4419 {
4420 if (maybe_execute_fence_i(target) != ERROR_OK)
4421 return ERROR_FAIL;
4422
4423 /* We want to twiddle some bits in the debug CSR so debugging works. */
4424 riscv_reg_t dcsr;
4425 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4426 if (result != ERROR_OK)
4427 return result;
4428 dcsr = set_field(dcsr, CSR_DCSR_STEP, step);
4429 dcsr = set_field(dcsr, CSR_DCSR_EBREAKM, riscv_ebreakm);
4430 dcsr = set_field(dcsr, CSR_DCSR_EBREAKS, riscv_ebreaks);
4431 dcsr = set_field(dcsr, CSR_DCSR_EBREAKU, riscv_ebreaku);
4432 return riscv_set_register(target, GDB_REGNO_DCSR, dcsr);
4433 }
4434
4435 static int riscv013_step_or_resume_current_hart(struct target *target,
4436 bool step, bool use_hasel)
4437 {
4438 RISCV_INFO(r);
4439 LOG_DEBUG("resuming hart %d (for step?=%d)", r->current_hartid, step);
4440 if (!riscv_is_halted(target)) {
4441 LOG_ERROR("Hart %d is not halted!", r->current_hartid);
4442 return ERROR_FAIL;
4443 }
4444
4445 /* Issue the resume command, and then wait for the current hart to resume. */
4446 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_RESUMEREQ;
4447 if (use_hasel)
4448 dmcontrol |= DM_DMCONTROL_HASEL;
4449 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4450 dmi_write(target, DM_DMCONTROL, dmcontrol);
4451
4452 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HASEL, 0);
4453 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_RESUMEREQ, 0);
4454
4455 uint32_t dmstatus;
4456 for (size_t i = 0; i < 256; ++i) {
4457 usleep(10);
4458 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4459 return ERROR_FAIL;
4460 if (get_field(dmstatus, DM_DMSTATUS_ALLRESUMEACK) == 0)
4461 continue;
4462 if (step && get_field(dmstatus, DM_DMSTATUS_ALLHALTED) == 0)
4463 continue;
4464
4465 dmi_write(target, DM_DMCONTROL, dmcontrol);
4466 return ERROR_OK;
4467 }
4468
4469 dmi_write(target, DM_DMCONTROL, dmcontrol);
4470
4471 LOG_ERROR("unable to resume hart %d", r->current_hartid);
4472 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4473 return ERROR_FAIL;
4474 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
4475
4476 if (step) {
4477 LOG_ERROR(" was stepping, halting");
4478 riscv_halt(target);
4479 return ERROR_OK;
4480 }
4481
4482 return ERROR_FAIL;
4483 }
4484
4485 void riscv013_clear_abstract_error(struct target *target)
4486 {
4487 /* Wait for busy to go away. */
4488 time_t start = time(NULL);
4489 uint32_t abstractcs;
4490 dmi_read(target, &abstractcs, DM_ABSTRACTCS);
4491 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY)) {
4492 dmi_read(target, &abstractcs, DM_ABSTRACTCS);
4493
4494 if (time(NULL) - start > riscv_command_timeout_sec) {
4495 LOG_ERROR("abstractcs.busy is not going low after %d seconds "
4496 "(abstractcs=0x%x). The target is either really slow or "
4497 "broken. You could increase the timeout with riscv "
4498 "set_command_timeout_sec.",
4499 riscv_command_timeout_sec, abstractcs);
4500 break;
4501 }
4502 }
4503 /* Clear the error status. */
4504 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
4505 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)