fdebdd413d9ff0b00960c9a8af210042476e69b2
[openocd.git] / src / target / riscv / riscv-013.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /*
4 * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
5 * latest draft.
6 */
7
8 #include <assert.h>
9 #include <stdlib.h>
10 #include <time.h>
11
12 #ifdef HAVE_CONFIG_H
13 #include "config.h"
14 #endif
15
16 #include "target/target.h"
17 #include "target/algorithm.h"
18 #include "target/target_type.h"
19 #include <helper/log.h>
20 #include "jtag/jtag.h"
21 #include "target/register.h"
22 #include "target/breakpoints.h"
23 #include "helper/time_support.h"
24 #include "helper/list.h"
25 #include "riscv.h"
26 #include "debug_defines.h"
27 #include "rtos/rtos.h"
28 #include "program.h"
29 #include "asm.h"
30 #include "batch.h"
31
32 #define DM_DATA1 (DM_DATA0 + 1)
33 #define DM_PROGBUF1 (DM_PROGBUF0 + 1)
34
35 static int riscv013_on_step_or_resume(struct target *target, bool step);
36 static int riscv013_step_or_resume_current_hart(struct target *target,
37 bool step, bool use_hasel);
38 static void riscv013_clear_abstract_error(struct target *target);
39
40 /* Implementations of the functions in riscv_info_t. */
41 static int riscv013_get_register(struct target *target,
42 riscv_reg_t *value, int hid, int rid);
43 static int riscv013_set_register(struct target *target, int hartid, int regid, uint64_t value);
44 static int riscv013_select_current_hart(struct target *target);
45 static int riscv013_halt_prep(struct target *target);
46 static int riscv013_halt_go(struct target *target);
47 static int riscv013_resume_go(struct target *target);
48 static int riscv013_step_current_hart(struct target *target);
49 static int riscv013_on_halt(struct target *target);
50 static int riscv013_on_step(struct target *target);
51 static int riscv013_resume_prep(struct target *target);
52 static bool riscv013_is_halted(struct target *target);
53 static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
54 static int riscv013_write_debug_buffer(struct target *target, unsigned index,
55 riscv_insn_t d);
56 static riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned
57 index);
58 static int riscv013_execute_debug_buffer(struct target *target);
59 static void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
60 static void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a);
61 static int riscv013_dmi_write_u64_bits(struct target *target);
62 static void riscv013_fill_dmi_nop_u64(struct target *target, char *buf);
63 static int register_read(struct target *target, uint64_t *value, uint32_t number);
64 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number);
65 static int register_write_direct(struct target *target, unsigned number,
66 uint64_t value);
67 static int read_memory(struct target *target, target_addr_t address,
68 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment);
69 static int write_memory(struct target *target, target_addr_t address,
70 uint32_t size, uint32_t count, const uint8_t *buffer);
71 static int riscv013_test_sba_config_reg(struct target *target, target_addr_t legal_address,
72 uint32_t num_words, target_addr_t illegal_address, bool run_sbbusyerror_test);
73 void write_memory_sba_simple(struct target *target, target_addr_t addr, uint32_t *write_data,
74 uint32_t write_size, uint32_t sbcs);
75 void read_memory_sba_simple(struct target *target, target_addr_t addr,
76 uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs);
77 static int riscv013_test_compliance(struct target *target);
78
79 /**
80 * Since almost everything can be accomplish by scanning the dbus register, all
81 * functions here assume dbus is already selected. The exception are functions
82 * called directly by OpenOCD, which can't assume anything about what's
83 * currently in IR. They should set IR to dbus explicitly.
84 */
85
86 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
87 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
88
89 #define CSR_DCSR_CAUSE_SWBP 1
90 #define CSR_DCSR_CAUSE_TRIGGER 2
91 #define CSR_DCSR_CAUSE_DEBUGINT 3
92 #define CSR_DCSR_CAUSE_STEP 4
93 #define CSR_DCSR_CAUSE_HALT 5
94 #define CSR_DCSR_CAUSE_GROUP 6
95
96 #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
97
98 /*** JTAG registers. ***/
99
100 typedef enum {
101 DMI_OP_NOP = 0,
102 DMI_OP_READ = 1,
103 DMI_OP_WRITE = 2
104 } dmi_op_t;
105 typedef enum {
106 DMI_STATUS_SUCCESS = 0,
107 DMI_STATUS_FAILED = 2,
108 DMI_STATUS_BUSY = 3
109 } dmi_status_t;
110
111 typedef enum slot {
112 SLOT0,
113 SLOT1,
114 SLOT_LAST,
115 } slot_t;
116
117 /*** Debug Bus registers. ***/
118
119 #define CMDERR_NONE 0
120 #define CMDERR_BUSY 1
121 #define CMDERR_NOT_SUPPORTED 2
122 #define CMDERR_EXCEPTION 3
123 #define CMDERR_HALT_RESUME 4
124 #define CMDERR_OTHER 7
125
126 /*** Info about the core being debugged. ***/
127
128 struct trigger {
129 uint64_t address;
130 uint32_t length;
131 uint64_t mask;
132 uint64_t value;
133 bool read, write, execute;
134 int unique_id;
135 };
136
137 typedef enum {
138 YNM_MAYBE,
139 YNM_YES,
140 YNM_NO
141 } yes_no_maybe_t;
142
143 typedef struct {
144 struct list_head list;
145 int abs_chain_position;
146
147 /* The number of harts connected to this DM. */
148 int hart_count;
149 /* Indicates we already reset this DM, so don't need to do it again. */
150 bool was_reset;
151 /* Targets that are connected to this DM. */
152 struct list_head target_list;
153 /* The currently selected hartid on this DM. */
154 int current_hartid;
155 bool hasel_supported;
156
157 /* The program buffer stores executable code. 0 is an illegal instruction,
158 * so we use 0 to mean the cached value is invalid. */
159 uint32_t progbuf_cache[16];
160 } dm013_info_t;
161
162 typedef struct {
163 struct list_head list;
164 struct target *target;
165 } target_list_t;
166
167 typedef struct {
168 /* The indexed used to address this hart in its DM. */
169 unsigned index;
170 /* Number of address bits in the dbus register. */
171 unsigned abits;
172 /* Number of abstract command data registers. */
173 unsigned datacount;
174 /* Number of words in the Program Buffer. */
175 unsigned progbufsize;
176
177 /* We cache the read-only bits of sbcs here. */
178 uint32_t sbcs;
179
180 yes_no_maybe_t progbuf_writable;
181 /* We only need the address so that we know the alignment of the buffer. */
182 riscv_addr_t progbuf_address;
183
184 /* Number of run-test/idle cycles the target requests we do after each dbus
185 * access. */
186 unsigned int dtmcs_idle;
187
188 /* This value is incremented every time a dbus access comes back as "busy".
189 * It's used to determine how many run-test/idle cycles to feed the target
190 * in between accesses. */
191 unsigned int dmi_busy_delay;
192
193 /* Number of run-test/idle cycles to add between consecutive bus master
194 * reads/writes respectively. */
195 unsigned int bus_master_write_delay, bus_master_read_delay;
196
197 /* This value is increased every time we tried to execute two commands
198 * consecutively, and the second one failed because the previous hadn't
199 * completed yet. It's used to add extra run-test/idle cycles after
200 * starting a command, so we don't have to waste time checking for busy to
201 * go low. */
202 unsigned int ac_busy_delay;
203
204 bool abstract_read_csr_supported;
205 bool abstract_write_csr_supported;
206 bool abstract_read_fpr_supported;
207 bool abstract_write_fpr_supported;
208
209 /* When a function returns some error due to a failure indicated by the
210 * target in cmderr, the caller can look here to see what that error was.
211 * (Compare with errno.) */
212 uint8_t cmderr;
213
214 /* Some fields from hartinfo. */
215 uint8_t datasize;
216 uint8_t dataaccess;
217 int16_t dataaddr;
218
219 /* The width of the hartsel field. */
220 unsigned hartsellen;
221
222 /* DM that provides access to this target. */
223 dm013_info_t *dm;
224 } riscv013_info_t;
225
226 LIST_HEAD(dm_list);
227
228 static riscv013_info_t *get_info(const struct target *target)
229 {
230 riscv_info_t *info = (riscv_info_t *) target->arch_info;
231 return (riscv013_info_t *) info->version_specific;
232 }
233
234 /**
235 * Return the DM structure for this target. If there isn't one, find it in the
236 * global list of DMs. If it's not in there, then create one and initialize it
237 * to 0.
238 */
239 dm013_info_t *get_dm(struct target *target)
240 {
241 RISCV013_INFO(info);
242 if (info->dm)
243 return info->dm;
244
245 int abs_chain_position = target->tap->abs_chain_position;
246
247 dm013_info_t *entry;
248 dm013_info_t *dm = NULL;
249 list_for_each_entry(entry, &dm_list, list) {
250 if (entry->abs_chain_position == abs_chain_position) {
251 dm = entry;
252 break;
253 }
254 }
255
256 if (!dm) {
257 LOG_DEBUG("[%d] Allocating new DM", target->coreid);
258 dm = calloc(1, sizeof(dm013_info_t));
259 if (!dm)
260 return NULL;
261 dm->abs_chain_position = abs_chain_position;
262 dm->current_hartid = -1;
263 dm->hart_count = -1;
264 INIT_LIST_HEAD(&dm->target_list);
265 list_add(&dm->list, &dm_list);
266 }
267
268 info->dm = dm;
269 target_list_t *target_entry;
270 list_for_each_entry(target_entry, &dm->target_list, list) {
271 if (target_entry->target == target)
272 return dm;
273 }
274 target_entry = calloc(1, sizeof(*target_entry));
275 if (!target_entry) {
276 info->dm = NULL;
277 return NULL;
278 }
279 target_entry->target = target;
280 list_add(&target_entry->list, &dm->target_list);
281
282 return dm;
283 }
284
285 static uint32_t set_hartsel(uint32_t initial, uint32_t index)
286 {
287 initial &= ~DM_DMCONTROL_HARTSELLO;
288 initial &= ~DM_DMCONTROL_HARTSELHI;
289
290 uint32_t index_lo = index & ((1 << DM_DMCONTROL_HARTSELLO_LENGTH) - 1);
291 initial |= index_lo << DM_DMCONTROL_HARTSELLO_OFFSET;
292 uint32_t index_hi = index >> DM_DMCONTROL_HARTSELLO_LENGTH;
293 assert(index_hi < 1 << DM_DMCONTROL_HARTSELHI_LENGTH);
294 initial |= index_hi << DM_DMCONTROL_HARTSELHI_OFFSET;
295
296 return initial;
297 }
298
299 static void decode_dmi(char *text, unsigned address, unsigned data)
300 {
301 static const struct {
302 unsigned address;
303 uint64_t mask;
304 const char *name;
305 } description[] = {
306 { DM_DMCONTROL, DM_DMCONTROL_HALTREQ, "haltreq" },
307 { DM_DMCONTROL, DM_DMCONTROL_RESUMEREQ, "resumereq" },
308 { DM_DMCONTROL, DM_DMCONTROL_HARTRESET, "hartreset" },
309 { DM_DMCONTROL, DM_DMCONTROL_HASEL, "hasel" },
310 { DM_DMCONTROL, DM_DMCONTROL_HARTSELHI, "hartselhi" },
311 { DM_DMCONTROL, DM_DMCONTROL_HARTSELLO, "hartsello" },
312 { DM_DMCONTROL, DM_DMCONTROL_NDMRESET, "ndmreset" },
313 { DM_DMCONTROL, DM_DMCONTROL_DMACTIVE, "dmactive" },
314 { DM_DMCONTROL, DM_DMCONTROL_ACKHAVERESET, "ackhavereset" },
315
316 { DM_DMSTATUS, DM_DMSTATUS_IMPEBREAK, "impebreak" },
317 { DM_DMSTATUS, DM_DMSTATUS_ALLHAVERESET, "allhavereset" },
318 { DM_DMSTATUS, DM_DMSTATUS_ANYHAVERESET, "anyhavereset" },
319 { DM_DMSTATUS, DM_DMSTATUS_ALLRESUMEACK, "allresumeack" },
320 { DM_DMSTATUS, DM_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
321 { DM_DMSTATUS, DM_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
322 { DM_DMSTATUS, DM_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
323 { DM_DMSTATUS, DM_DMSTATUS_ALLUNAVAIL, "allunavail" },
324 { DM_DMSTATUS, DM_DMSTATUS_ANYUNAVAIL, "anyunavail" },
325 { DM_DMSTATUS, DM_DMSTATUS_ALLRUNNING, "allrunning" },
326 { DM_DMSTATUS, DM_DMSTATUS_ANYRUNNING, "anyrunning" },
327 { DM_DMSTATUS, DM_DMSTATUS_ALLHALTED, "allhalted" },
328 { DM_DMSTATUS, DM_DMSTATUS_ANYHALTED, "anyhalted" },
329 { DM_DMSTATUS, DM_DMSTATUS_AUTHENTICATED, "authenticated" },
330 { DM_DMSTATUS, DM_DMSTATUS_AUTHBUSY, "authbusy" },
331 { DM_DMSTATUS, DM_DMSTATUS_HASRESETHALTREQ, "hasresethaltreq" },
332 { DM_DMSTATUS, DM_DMSTATUS_CONFSTRPTRVALID, "confstrptrvalid" },
333 { DM_DMSTATUS, DM_DMSTATUS_VERSION, "version" },
334
335 { DM_ABSTRACTCS, DM_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
336 { DM_ABSTRACTCS, DM_ABSTRACTCS_BUSY, "busy" },
337 { DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR, "cmderr" },
338 { DM_ABSTRACTCS, DM_ABSTRACTCS_DATACOUNT, "datacount" },
339
340 { DM_COMMAND, DM_COMMAND_CMDTYPE, "cmdtype" },
341
342 { DM_SBCS, DM_SBCS_SBVERSION, "sbversion" },
343 { DM_SBCS, DM_SBCS_SBBUSYERROR, "sbbusyerror" },
344 { DM_SBCS, DM_SBCS_SBBUSY, "sbbusy" },
345 { DM_SBCS, DM_SBCS_SBREADONADDR, "sbreadonaddr" },
346 { DM_SBCS, DM_SBCS_SBACCESS, "sbaccess" },
347 { DM_SBCS, DM_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
348 { DM_SBCS, DM_SBCS_SBREADONDATA, "sbreadondata" },
349 { DM_SBCS, DM_SBCS_SBERROR, "sberror" },
350 { DM_SBCS, DM_SBCS_SBASIZE, "sbasize" },
351 { DM_SBCS, DM_SBCS_SBACCESS128, "sbaccess128" },
352 { DM_SBCS, DM_SBCS_SBACCESS64, "sbaccess64" },
353 { DM_SBCS, DM_SBCS_SBACCESS32, "sbaccess32" },
354 { DM_SBCS, DM_SBCS_SBACCESS16, "sbaccess16" },
355 { DM_SBCS, DM_SBCS_SBACCESS8, "sbaccess8" },
356 };
357
358 text[0] = 0;
359 for (unsigned i = 0; i < ARRAY_SIZE(description); i++) {
360 if (description[i].address == address) {
361 uint64_t mask = description[i].mask;
362 unsigned value = get_field(data, mask);
363 if (value) {
364 if (i > 0)
365 *(text++) = ' ';
366 if (mask & (mask >> 1)) {
367 /* If the field is more than 1 bit wide. */
368 sprintf(text, "%s=%d", description[i].name, value);
369 } else {
370 strcpy(text, description[i].name);
371 }
372 text += strlen(text);
373 }
374 }
375 }
376 }
377
378 static void dump_field(int idle, const struct scan_field *field)
379 {
380 static const char * const op_string[] = {"-", "r", "w", "?"};
381 static const char * const status_string[] = {"+", "?", "F", "b"};
382
383 if (debug_level < LOG_LVL_DEBUG)
384 return;
385
386 uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
387 unsigned int out_op = get_field(out, DTM_DMI_OP);
388 unsigned int out_data = get_field(out, DTM_DMI_DATA);
389 unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
390
391 uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
392 unsigned int in_op = get_field(in, DTM_DMI_OP);
393 unsigned int in_data = get_field(in, DTM_DMI_DATA);
394 unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
395
396 log_printf_lf(LOG_LVL_DEBUG,
397 __FILE__, __LINE__, "scan",
398 "%db %s %08x @%02x -> %s %08x @%02x; %di",
399 field->num_bits, op_string[out_op], out_data, out_address,
400 status_string[in_op], in_data, in_address, idle);
401
402 char out_text[500];
403 char in_text[500];
404 decode_dmi(out_text, out_address, out_data);
405 decode_dmi(in_text, in_address, in_data);
406 if (in_text[0] || out_text[0]) {
407 log_printf_lf(LOG_LVL_DEBUG, __FILE__, __LINE__, "scan", "%s -> %s",
408 out_text, in_text);
409 }
410 }
411
412 /*** Utility functions. ***/
413
414 static void select_dmi(struct target *target)
415 {
416 if (bscan_tunnel_ir_width != 0) {
417 select_dmi_via_bscan(target);
418 return;
419 }
420 jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
421 }
422
423 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
424 {
425 struct scan_field field;
426 uint8_t in_value[4];
427 uint8_t out_value[4] = { 0 };
428
429 if (bscan_tunnel_ir_width != 0)
430 return dtmcontrol_scan_via_bscan(target, out);
431
432 buf_set_u32(out_value, 0, 32, out);
433
434 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
435
436 field.num_bits = 32;
437 field.out_value = out_value;
438 field.in_value = in_value;
439 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
440
441 /* Always return to dmi. */
442 select_dmi(target);
443
444 int retval = jtag_execute_queue();
445 if (retval != ERROR_OK) {
446 LOG_ERROR("failed jtag scan: %d", retval);
447 return retval;
448 }
449
450 uint32_t in = buf_get_u32(field.in_value, 0, 32);
451 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
452
453 return in;
454 }
455
456 static void increase_dmi_busy_delay(struct target *target)
457 {
458 riscv013_info_t *info = get_info(target);
459 info->dmi_busy_delay += info->dmi_busy_delay / 10 + 1;
460 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
461 info->dtmcs_idle, info->dmi_busy_delay,
462 info->ac_busy_delay);
463
464 dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
465 }
466
467 /**
468 * exec: If this is set, assume the scan results in an execution, so more
469 * run-test/idle cycles may be required.
470 */
471 static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
472 uint32_t *data_in, dmi_op_t op, uint32_t address_out, uint32_t data_out,
473 bool exec)
474 {
475 riscv013_info_t *info = get_info(target);
476 RISCV_INFO(r);
477 unsigned num_bits = info->abits + DTM_DMI_OP_LENGTH + DTM_DMI_DATA_LENGTH;
478 size_t num_bytes = (num_bits + 7) / 8;
479 uint8_t in[num_bytes];
480 uint8_t out[num_bytes];
481 struct scan_field field = {
482 .num_bits = num_bits,
483 .out_value = out,
484 .in_value = in
485 };
486 riscv_bscan_tunneled_scan_context_t bscan_ctxt;
487
488 if (r->reset_delays_wait >= 0) {
489 r->reset_delays_wait--;
490 if (r->reset_delays_wait < 0) {
491 info->dmi_busy_delay = 0;
492 info->ac_busy_delay = 0;
493 }
494 }
495
496 memset(in, 0, num_bytes);
497 memset(out, 0, num_bytes);
498
499 assert(info->abits != 0);
500
501 buf_set_u32(out, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, op);
502 buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
503 buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
504
505 /* I wanted to place this code in a different function, but the way JTAG command
506 queueing works in the jtag handling functions, the scan fields either have to be
507 heap allocated, global/static, or else they need to stay on the stack until
508 the jtag_execute_queue() call. Heap or static fields in this case doesn't seem
509 the best fit. Declaring stack based field values in a subsidiary function call wouldn't
510 work. */
511 if (bscan_tunnel_ir_width != 0) {
512 riscv_add_bscan_tunneled_scan(target, &field, &bscan_ctxt);
513 } else {
514 /* Assume dbus is already selected. */
515 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
516 }
517
518 int idle_count = info->dmi_busy_delay;
519 if (exec)
520 idle_count += info->ac_busy_delay;
521
522 if (idle_count)
523 jtag_add_runtest(idle_count, TAP_IDLE);
524
525 int retval = jtag_execute_queue();
526 if (retval != ERROR_OK) {
527 LOG_ERROR("dmi_scan failed jtag scan");
528 if (data_in)
529 *data_in = ~0;
530 return DMI_STATUS_FAILED;
531 }
532
533 if (bscan_tunnel_ir_width != 0) {
534 /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
535 buffer_shr(in, num_bytes, 1);
536 }
537
538 if (data_in)
539 *data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
540
541 if (address_in)
542 *address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
543 dump_field(idle_count, &field);
544 return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
545 }
546
547 /**
548 * @param target
549 * @param data_in The data we received from the target.
550 * @param dmi_busy_encountered
551 * If non-NULL, will be updated to reflect whether DMI busy was
552 * encountered while executing this operation or not.
553 * @param dmi_op The operation to perform (read/write/nop).
554 * @param address The address argument to that operation.
555 * @param data_out The data to send to the target.
556 * @param timeout_sec
557 * @param exec When true, this scan will execute something, so extra RTI
558 * cycles may be added.
559 * @param ensure_success
560 * Scan a nop after the requested operation, ensuring the
561 * DMI operation succeeded.
562 */
563 static int dmi_op_timeout(struct target *target, uint32_t *data_in,
564 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
565 uint32_t data_out, int timeout_sec, bool exec, bool ensure_success)
566 {
567 select_dmi(target);
568
569 dmi_status_t status;
570 uint32_t address_in;
571
572 if (dmi_busy_encountered)
573 *dmi_busy_encountered = false;
574
575 const char *op_name;
576 switch (dmi_op) {
577 case DMI_OP_NOP:
578 op_name = "nop";
579 break;
580 case DMI_OP_READ:
581 op_name = "read";
582 break;
583 case DMI_OP_WRITE:
584 op_name = "write";
585 break;
586 default:
587 LOG_ERROR("Invalid DMI operation: %d", dmi_op);
588 return ERROR_FAIL;
589 }
590
591 time_t start = time(NULL);
592 /* This first loop performs the request. Note that if for some reason this
593 * stays busy, it is actually due to the previous access. */
594 while (1) {
595 status = dmi_scan(target, NULL, NULL, dmi_op, address, data_out,
596 exec);
597 if (status == DMI_STATUS_BUSY) {
598 increase_dmi_busy_delay(target);
599 if (dmi_busy_encountered)
600 *dmi_busy_encountered = true;
601 } else if (status == DMI_STATUS_SUCCESS) {
602 break;
603 } else {
604 LOG_ERROR("failed %s at 0x%x, status=%d", op_name, address, status);
605 return ERROR_FAIL;
606 }
607 if (time(NULL) - start > timeout_sec)
608 return ERROR_TIMEOUT_REACHED;
609 }
610
611 if (status != DMI_STATUS_SUCCESS) {
612 LOG_ERROR("Failed %s at 0x%x; status=%d", op_name, address, status);
613 return ERROR_FAIL;
614 }
615
616 if (ensure_success) {
617 /* This second loop ensures the request succeeded, and gets back data.
618 * Note that NOP can result in a 'busy' result as well, but that would be
619 * noticed on the next DMI access we do. */
620 while (1) {
621 status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
622 false);
623 if (status == DMI_STATUS_BUSY) {
624 increase_dmi_busy_delay(target);
625 if (dmi_busy_encountered)
626 *dmi_busy_encountered = true;
627 } else if (status == DMI_STATUS_SUCCESS) {
628 break;
629 } else {
630 if (data_in) {
631 LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
632 op_name, address, *data_in, status);
633 } else {
634 LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
635 status);
636 }
637 return ERROR_FAIL;
638 }
639 if (time(NULL) - start > timeout_sec)
640 return ERROR_TIMEOUT_REACHED;
641 }
642 }
643
644 return ERROR_OK;
645 }
646
647 static int dmi_op(struct target *target, uint32_t *data_in,
648 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
649 uint32_t data_out, bool exec, bool ensure_success)
650 {
651 int result = dmi_op_timeout(target, data_in, dmi_busy_encountered, dmi_op,
652 address, data_out, riscv_command_timeout_sec, exec, ensure_success);
653 if (result == ERROR_TIMEOUT_REACHED) {
654 LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
655 "either really slow or broken. You could increase the "
656 "timeout with riscv set_command_timeout_sec.",
657 riscv_command_timeout_sec);
658 return ERROR_FAIL;
659 }
660 return result;
661 }
662
663 static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
664 {
665 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, false, true);
666 }
667
668 static int dmi_read_exec(struct target *target, uint32_t *value, uint32_t address)
669 {
670 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, true, true);
671 }
672
673 static int dmi_write(struct target *target, uint32_t address, uint32_t value)
674 {
675 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false, true);
676 }
677
678 static int dmi_write_exec(struct target *target, uint32_t address,
679 uint32_t value, bool ensure_success)
680 {
681 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true, ensure_success);
682 }
683
684 int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
685 bool authenticated, unsigned timeout_sec)
686 {
687 int result = dmi_op_timeout(target, dmstatus, NULL, DMI_OP_READ,
688 DM_DMSTATUS, 0, timeout_sec, false, true);
689 if (result != ERROR_OK)
690 return result;
691 int dmstatus_version = get_field(*dmstatus, DM_DMSTATUS_VERSION);
692 if (dmstatus_version != 2 && dmstatus_version != 3) {
693 LOG_ERROR("OpenOCD only supports Debug Module version 2 (0.13) and 3 (0.14), not "
694 "%d (dmstatus=0x%x). This error might be caused by a JTAG "
695 "signal issue. Try reducing the JTAG clock speed.",
696 get_field(*dmstatus, DM_DMSTATUS_VERSION), *dmstatus);
697 } else if (authenticated && !get_field(*dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
698 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
699 "(dmstatus=0x%x). Use `riscv authdata_read` and "
700 "`riscv authdata_write` commands to authenticate.", *dmstatus);
701 return ERROR_FAIL;
702 }
703 return ERROR_OK;
704 }
705
706 int dmstatus_read(struct target *target, uint32_t *dmstatus,
707 bool authenticated)
708 {
709 return dmstatus_read_timeout(target, dmstatus, authenticated,
710 riscv_command_timeout_sec);
711 }
712
713 static void increase_ac_busy_delay(struct target *target)
714 {
715 riscv013_info_t *info = get_info(target);
716 info->ac_busy_delay += info->ac_busy_delay / 10 + 1;
717 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
718 info->dtmcs_idle, info->dmi_busy_delay,
719 info->ac_busy_delay);
720 }
721
722 uint32_t abstract_register_size(unsigned width)
723 {
724 switch (width) {
725 case 32:
726 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 2);
727 case 64:
728 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 3);
729 case 128:
730 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 4);
731 default:
732 LOG_ERROR("Unsupported register width: %d", width);
733 return 0;
734 }
735 }
736
737 static int wait_for_idle(struct target *target, uint32_t *abstractcs)
738 {
739 RISCV013_INFO(info);
740 time_t start = time(NULL);
741 while (1) {
742 if (dmi_read(target, abstractcs, DM_ABSTRACTCS) != ERROR_OK)
743 return ERROR_FAIL;
744
745 if (get_field(*abstractcs, DM_ABSTRACTCS_BUSY) == 0)
746 return ERROR_OK;
747
748 if (time(NULL) - start > riscv_command_timeout_sec) {
749 info->cmderr = get_field(*abstractcs, DM_ABSTRACTCS_CMDERR);
750 if (info->cmderr != CMDERR_NONE) {
751 const char *errors[8] = {
752 "none",
753 "busy",
754 "not supported",
755 "exception",
756 "halt/resume",
757 "reserved",
758 "reserved",
759 "other" };
760
761 LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
762 errors[info->cmderr], *abstractcs);
763 }
764
765 LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
766 "Increase the timeout with riscv set_command_timeout_sec.",
767 riscv_command_timeout_sec,
768 *abstractcs);
769 return ERROR_FAIL;
770 }
771 }
772 }
773
774 static int execute_abstract_command(struct target *target, uint32_t command)
775 {
776 RISCV013_INFO(info);
777 if (debug_level >= LOG_LVL_DEBUG) {
778 switch (get_field(command, DM_COMMAND_CMDTYPE)) {
779 case 0:
780 LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
781 "transfer=%d, write=%d, regno=0x%x",
782 command,
783 8 << get_field(command, AC_ACCESS_REGISTER_AARSIZE),
784 get_field(command, AC_ACCESS_REGISTER_POSTEXEC),
785 get_field(command, AC_ACCESS_REGISTER_TRANSFER),
786 get_field(command, AC_ACCESS_REGISTER_WRITE),
787 get_field(command, AC_ACCESS_REGISTER_REGNO));
788 break;
789 default:
790 LOG_DEBUG("command=0x%x", command);
791 break;
792 }
793 }
794
795 if (dmi_write_exec(target, DM_COMMAND, command, false) != ERROR_OK)
796 return ERROR_FAIL;
797
798 uint32_t abstractcs = 0;
799 int result = wait_for_idle(target, &abstractcs);
800
801 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
802 if (info->cmderr != 0 || result != ERROR_OK) {
803 LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
804 /* Clear the error. */
805 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
806 return ERROR_FAIL;
807 }
808
809 return ERROR_OK;
810 }
811
812 static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
813 unsigned size_bits)
814 {
815 riscv_reg_t value = 0;
816 uint32_t v;
817 unsigned offset = index * size_bits / 32;
818 switch (size_bits) {
819 default:
820 LOG_ERROR("Unsupported size: %d bits", size_bits);
821 return ~0;
822 case 64:
823 dmi_read(target, &v, DM_DATA0 + offset + 1);
824 value |= ((uint64_t) v) << 32;
825 /* falls through */
826 case 32:
827 dmi_read(target, &v, DM_DATA0 + offset);
828 value |= v;
829 }
830 return value;
831 }
832
833 static int write_abstract_arg(struct target *target, unsigned index,
834 riscv_reg_t value, unsigned size_bits)
835 {
836 unsigned offset = index * size_bits / 32;
837 switch (size_bits) {
838 default:
839 LOG_ERROR("Unsupported size: %d bits", size_bits);
840 return ERROR_FAIL;
841 case 64:
842 dmi_write(target, DM_DATA0 + offset + 1, value >> 32);
843 /* falls through */
844 case 32:
845 dmi_write(target, DM_DATA0 + offset, value);
846 }
847 return ERROR_OK;
848 }
849
850 /**
851 * @par size in bits
852 */
853 static uint32_t access_register_command(struct target *target, uint32_t number,
854 unsigned size, uint32_t flags)
855 {
856 uint32_t command = set_field(0, DM_COMMAND_CMDTYPE, 0);
857 switch (size) {
858 case 32:
859 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 2);
860 break;
861 case 64:
862 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 3);
863 break;
864 default:
865 LOG_ERROR("%d-bit register %s not supported.", size,
866 gdb_regno_name(number));
867 assert(0);
868 }
869
870 if (number <= GDB_REGNO_XPR31) {
871 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
872 0x1000 + number - GDB_REGNO_ZERO);
873 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
874 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
875 0x1020 + number - GDB_REGNO_FPR0);
876 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
877 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
878 number - GDB_REGNO_CSR0);
879 } else if (number >= GDB_REGNO_COUNT) {
880 /* Custom register. */
881 assert(target->reg_cache->reg_list[number].arch_info);
882 riscv_reg_info_t *reg_info = target->reg_cache->reg_list[number].arch_info;
883 assert(reg_info);
884 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
885 0xc000 + reg_info->custom_number);
886 } else {
887 assert(0);
888 }
889
890 command |= flags;
891
892 return command;
893 }
894
895 static int register_read_abstract(struct target *target, uint64_t *value,
896 uint32_t number, unsigned size)
897 {
898 RISCV013_INFO(info);
899
900 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
901 !info->abstract_read_fpr_supported)
902 return ERROR_FAIL;
903 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
904 !info->abstract_read_csr_supported)
905 return ERROR_FAIL;
906 /* The spec doesn't define abstract register numbers for vector registers. */
907 if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31)
908 return ERROR_FAIL;
909
910 uint32_t command = access_register_command(target, number, size,
911 AC_ACCESS_REGISTER_TRANSFER);
912
913 int result = execute_abstract_command(target, command);
914 if (result != ERROR_OK) {
915 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
916 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
917 info->abstract_read_fpr_supported = false;
918 LOG_INFO("Disabling abstract command reads from FPRs.");
919 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
920 info->abstract_read_csr_supported = false;
921 LOG_INFO("Disabling abstract command reads from CSRs.");
922 }
923 }
924 return result;
925 }
926
927 if (value)
928 *value = read_abstract_arg(target, 0, size);
929
930 return ERROR_OK;
931 }
932
933 static int register_write_abstract(struct target *target, uint32_t number,
934 uint64_t value, unsigned size)
935 {
936 RISCV013_INFO(info);
937
938 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
939 !info->abstract_write_fpr_supported)
940 return ERROR_FAIL;
941 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
942 !info->abstract_write_csr_supported)
943 return ERROR_FAIL;
944
945 uint32_t command = access_register_command(target, number, size,
946 AC_ACCESS_REGISTER_TRANSFER |
947 AC_ACCESS_REGISTER_WRITE);
948
949 if (write_abstract_arg(target, 0, value, size) != ERROR_OK)
950 return ERROR_FAIL;
951
952 int result = execute_abstract_command(target, command);
953 if (result != ERROR_OK) {
954 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
955 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
956 info->abstract_write_fpr_supported = false;
957 LOG_INFO("Disabling abstract command writes to FPRs.");
958 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
959 info->abstract_write_csr_supported = false;
960 LOG_INFO("Disabling abstract command writes to CSRs.");
961 }
962 }
963 return result;
964 }
965
966 return ERROR_OK;
967 }
968
969 /*
970 * Sets the AAMSIZE field of a memory access abstract command based on
971 * the width (bits).
972 */
973 static uint32_t abstract_memory_size(unsigned width)
974 {
975 switch (width) {
976 case 8:
977 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 0);
978 case 16:
979 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 1);
980 case 32:
981 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 2);
982 case 64:
983 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 3);
984 case 128:
985 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 4);
986 default:
987 LOG_ERROR("Unsupported memory width: %d", width);
988 return 0;
989 }
990 }
991
992 /*
993 * Creates a memory access abstract command.
994 */
995 static uint32_t access_memory_command(struct target *target, bool virtual,
996 unsigned width, bool postincrement, bool write)
997 {
998 uint32_t command = set_field(0, AC_ACCESS_MEMORY_CMDTYPE, 2);
999 command = set_field(command, AC_ACCESS_MEMORY_AAMVIRTUAL, virtual);
1000 command |= abstract_memory_size(width);
1001 command = set_field(command, AC_ACCESS_MEMORY_AAMPOSTINCREMENT,
1002 postincrement);
1003 command = set_field(command, AC_ACCESS_MEMORY_WRITE, write);
1004
1005 return command;
1006 }
1007
1008 static int examine_progbuf(struct target *target)
1009 {
1010 riscv013_info_t *info = get_info(target);
1011
1012 if (info->progbuf_writable != YNM_MAYBE)
1013 return ERROR_OK;
1014
1015 /* Figure out if progbuf is writable. */
1016
1017 if (info->progbufsize < 1) {
1018 info->progbuf_writable = YNM_NO;
1019 LOG_INFO("No program buffer present.");
1020 return ERROR_OK;
1021 }
1022
1023 uint64_t s0;
1024 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1025 return ERROR_FAIL;
1026
1027 struct riscv_program program;
1028 riscv_program_init(&program, target);
1029 riscv_program_insert(&program, auipc(S0));
1030 if (riscv_program_exec(&program, target) != ERROR_OK)
1031 return ERROR_FAIL;
1032
1033 if (register_read_direct(target, &info->progbuf_address, GDB_REGNO_S0) != ERROR_OK)
1034 return ERROR_FAIL;
1035
1036 riscv_program_init(&program, target);
1037 riscv_program_insert(&program, sw(S0, S0, 0));
1038 int result = riscv_program_exec(&program, target);
1039
1040 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1041 return ERROR_FAIL;
1042
1043 if (result != ERROR_OK) {
1044 /* This program might have failed if the program buffer is not
1045 * writable. */
1046 info->progbuf_writable = YNM_NO;
1047 return ERROR_OK;
1048 }
1049
1050 uint32_t written;
1051 if (dmi_read(target, &written, DM_PROGBUF0) != ERROR_OK)
1052 return ERROR_FAIL;
1053 if (written == (uint32_t) info->progbuf_address) {
1054 LOG_INFO("progbuf is writable at 0x%" PRIx64,
1055 info->progbuf_address);
1056 info->progbuf_writable = YNM_YES;
1057
1058 } else {
1059 LOG_INFO("progbuf is not writeable at 0x%" PRIx64,
1060 info->progbuf_address);
1061 info->progbuf_writable = YNM_NO;
1062 }
1063
1064 return ERROR_OK;
1065 }
1066
1067 static int is_fpu_reg(uint32_t gdb_regno)
1068 {
1069 return (gdb_regno >= GDB_REGNO_FPR0 && gdb_regno <= GDB_REGNO_FPR31) ||
1070 (gdb_regno == GDB_REGNO_CSR0 + CSR_FFLAGS) ||
1071 (gdb_regno == GDB_REGNO_CSR0 + CSR_FRM) ||
1072 (gdb_regno == GDB_REGNO_CSR0 + CSR_FCSR);
1073 }
1074
1075 static int is_vector_reg(uint32_t gdb_regno)
1076 {
1077 return (gdb_regno >= GDB_REGNO_V0 && gdb_regno <= GDB_REGNO_V31) ||
1078 gdb_regno == GDB_REGNO_VSTART ||
1079 gdb_regno == GDB_REGNO_VXSAT ||
1080 gdb_regno == GDB_REGNO_VXRM ||
1081 gdb_regno == GDB_REGNO_VL ||
1082 gdb_regno == GDB_REGNO_VTYPE ||
1083 gdb_regno == GDB_REGNO_VLENB;
1084 }
1085
1086 static int prep_for_register_access(struct target *target, uint64_t *mstatus,
1087 int regno)
1088 {
1089 if (is_fpu_reg(regno) || is_vector_reg(regno)) {
1090 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
1091 return ERROR_FAIL;
1092 if (is_fpu_reg(regno) && (*mstatus & MSTATUS_FS) == 0) {
1093 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1094 set_field(*mstatus, MSTATUS_FS, 1)) != ERROR_OK)
1095 return ERROR_FAIL;
1096 } else if (is_vector_reg(regno) && (*mstatus & MSTATUS_VS) == 0) {
1097 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1098 set_field(*mstatus, MSTATUS_VS, 1)) != ERROR_OK)
1099 return ERROR_FAIL;
1100 }
1101 } else {
1102 *mstatus = 0;
1103 }
1104 return ERROR_OK;
1105 }
1106
1107 static int cleanup_after_register_access(struct target *target,
1108 uint64_t mstatus, int regno)
1109 {
1110 if ((is_fpu_reg(regno) && (mstatus & MSTATUS_FS) == 0) ||
1111 (is_vector_reg(regno) && (mstatus & MSTATUS_VS) == 0))
1112 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
1113 return ERROR_FAIL;
1114 return ERROR_OK;
1115 }
1116
1117 typedef enum {
1118 SPACE_DM_DATA,
1119 SPACE_DMI_PROGBUF,
1120 SPACE_DMI_RAM
1121 } memory_space_t;
1122
1123 typedef struct {
1124 /* How can the debugger access this memory? */
1125 memory_space_t memory_space;
1126 /* Memory address to access the scratch memory from the hart. */
1127 riscv_addr_t hart_address;
1128 /* Memory address to access the scratch memory from the debugger. */
1129 riscv_addr_t debug_address;
1130 struct working_area *area;
1131 } scratch_mem_t;
1132
1133 /**
1134 * Find some scratch memory to be used with the given program.
1135 */
1136 static int scratch_reserve(struct target *target,
1137 scratch_mem_t *scratch,
1138 struct riscv_program *program,
1139 unsigned size_bytes)
1140 {
1141 riscv_addr_t alignment = 1;
1142 while (alignment < size_bytes)
1143 alignment *= 2;
1144
1145 scratch->area = NULL;
1146
1147 riscv013_info_t *info = get_info(target);
1148
1149 /* Option 1: See if data# registers can be used as the scratch memory */
1150 if (info->dataaccess == 1) {
1151 /* Sign extend dataaddr. */
1152 scratch->hart_address = info->dataaddr;
1153 if (info->dataaddr & (1<<11))
1154 scratch->hart_address |= 0xfffffffffffff000ULL;
1155 /* Align. */
1156 scratch->hart_address = (scratch->hart_address + alignment - 1) & ~(alignment - 1);
1157
1158 if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
1159 info->datasize) {
1160 scratch->memory_space = SPACE_DM_DATA;
1161 scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
1162 return ERROR_OK;
1163 }
1164 }
1165
1166 /* Option 2: See if progbuf can be used as the scratch memory */
1167 if (examine_progbuf(target) != ERROR_OK)
1168 return ERROR_FAIL;
1169
1170 /* Allow for ebreak at the end of the program. */
1171 unsigned program_size = (program->instruction_count + 1) * 4;
1172 scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
1173 ~(alignment - 1);
1174 if ((info->progbuf_writable == YNM_YES) &&
1175 ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
1176 info->progbufsize)) {
1177 scratch->memory_space = SPACE_DMI_PROGBUF;
1178 scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
1179 return ERROR_OK;
1180 }
1181
1182 /* Option 3: User-configured memory area as scratch RAM */
1183 if (target_alloc_working_area(target, size_bytes + alignment - 1,
1184 &scratch->area) == ERROR_OK) {
1185 scratch->hart_address = (scratch->area->address + alignment - 1) &
1186 ~(alignment - 1);
1187 scratch->memory_space = SPACE_DMI_RAM;
1188 scratch->debug_address = scratch->hart_address;
1189 return ERROR_OK;
1190 }
1191
1192 LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
1193 "a work area with 'configure -work-area-phys'.", size_bytes);
1194 return ERROR_FAIL;
1195 }
1196
1197 static int scratch_release(struct target *target,
1198 scratch_mem_t *scratch)
1199 {
1200 if (scratch->area)
1201 return target_free_working_area(target, scratch->area);
1202
1203 return ERROR_OK;
1204 }
1205
1206 static int scratch_read64(struct target *target, scratch_mem_t *scratch,
1207 uint64_t *value)
1208 {
1209 uint32_t v;
1210 switch (scratch->memory_space) {
1211 case SPACE_DM_DATA:
1212 if (dmi_read(target, &v, DM_DATA0 + scratch->debug_address) != ERROR_OK)
1213 return ERROR_FAIL;
1214 *value = v;
1215 if (dmi_read(target, &v, DM_DATA1 + scratch->debug_address) != ERROR_OK)
1216 return ERROR_FAIL;
1217 *value |= ((uint64_t) v) << 32;
1218 break;
1219 case SPACE_DMI_PROGBUF:
1220 if (dmi_read(target, &v, DM_PROGBUF0 + scratch->debug_address) != ERROR_OK)
1221 return ERROR_FAIL;
1222 *value = v;
1223 if (dmi_read(target, &v, DM_PROGBUF1 + scratch->debug_address) != ERROR_OK)
1224 return ERROR_FAIL;
1225 *value |= ((uint64_t) v) << 32;
1226 break;
1227 case SPACE_DMI_RAM:
1228 {
1229 uint8_t buffer[8] = {0};
1230 if (read_memory(target, scratch->debug_address, 4, 2, buffer, 4) != ERROR_OK)
1231 return ERROR_FAIL;
1232 *value = buffer[0] |
1233 (((uint64_t) buffer[1]) << 8) |
1234 (((uint64_t) buffer[2]) << 16) |
1235 (((uint64_t) buffer[3]) << 24) |
1236 (((uint64_t) buffer[4]) << 32) |
1237 (((uint64_t) buffer[5]) << 40) |
1238 (((uint64_t) buffer[6]) << 48) |
1239 (((uint64_t) buffer[7]) << 56);
1240 }
1241 break;
1242 }
1243 return ERROR_OK;
1244 }
1245
1246 static int scratch_write64(struct target *target, scratch_mem_t *scratch,
1247 uint64_t value)
1248 {
1249 switch (scratch->memory_space) {
1250 case SPACE_DM_DATA:
1251 dmi_write(target, DM_DATA0 + scratch->debug_address, value);
1252 dmi_write(target, DM_DATA1 + scratch->debug_address, value >> 32);
1253 break;
1254 case SPACE_DMI_PROGBUF:
1255 dmi_write(target, DM_PROGBUF0 + scratch->debug_address, value);
1256 dmi_write(target, DM_PROGBUF1 + scratch->debug_address, value >> 32);
1257 break;
1258 case SPACE_DMI_RAM:
1259 {
1260 uint8_t buffer[8] = {
1261 value,
1262 value >> 8,
1263 value >> 16,
1264 value >> 24,
1265 value >> 32,
1266 value >> 40,
1267 value >> 48,
1268 value >> 56
1269 };
1270 if (write_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
1271 return ERROR_FAIL;
1272 }
1273 break;
1274 }
1275 return ERROR_OK;
1276 }
1277
1278 /** Return register size in bits. */
1279 static unsigned register_size(struct target *target, unsigned number)
1280 {
1281 /* If reg_cache hasn't been initialized yet, make a guess. We need this for
1282 * when this function is called during examine(). */
1283 if (target->reg_cache)
1284 return target->reg_cache->reg_list[number].size;
1285 else
1286 return riscv_xlen(target);
1287 }
1288
1289 static bool has_sufficient_progbuf(struct target *target, unsigned size)
1290 {
1291 RISCV013_INFO(info);
1292 RISCV_INFO(r);
1293
1294 return info->progbufsize + r->impebreak >= size;
1295 }
1296
1297 /**
1298 * Immediately write the new value to the requested register. This mechanism
1299 * bypasses any caches.
1300 */
1301 static int register_write_direct(struct target *target, unsigned number,
1302 uint64_t value)
1303 {
1304 LOG_DEBUG("{%d} %s <- 0x%" PRIx64, riscv_current_hartid(target),
1305 gdb_regno_name(number), value);
1306
1307 int result = register_write_abstract(target, number, value,
1308 register_size(target, number));
1309 if (result == ERROR_OK || !has_sufficient_progbuf(target, 2) ||
1310 !riscv_is_halted(target))
1311 return result;
1312
1313 struct riscv_program program;
1314 riscv_program_init(&program, target);
1315
1316 uint64_t s0;
1317 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1318 return ERROR_FAIL;
1319
1320 uint64_t mstatus;
1321 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1322 return ERROR_FAIL;
1323
1324 scratch_mem_t scratch;
1325 bool use_scratch = false;
1326 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
1327 riscv_supports_extension(target, riscv_current_hartid(target), 'D') &&
1328 riscv_xlen(target) < 64) {
1329 /* There are no instructions to move all the bits from a register, so
1330 * we need to use some scratch RAM. */
1331 use_scratch = true;
1332 riscv_program_insert(&program, fld(number - GDB_REGNO_FPR0, S0, 0));
1333
1334 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1335 return ERROR_FAIL;
1336
1337 if (register_write_direct(target, GDB_REGNO_S0, scratch.hart_address)
1338 != ERROR_OK) {
1339 scratch_release(target, &scratch);
1340 return ERROR_FAIL;
1341 }
1342
1343 if (scratch_write64(target, &scratch, value) != ERROR_OK) {
1344 scratch_release(target, &scratch);
1345 return ERROR_FAIL;
1346 }
1347
1348 } else if (number == GDB_REGNO_VTYPE) {
1349 riscv_program_insert(&program, csrr(S0, CSR_VL));
1350 riscv_program_insert(&program, vsetvli(ZERO, S0, value));
1351
1352 } else {
1353 if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
1354 return ERROR_FAIL;
1355
1356 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1357 if (riscv_supports_extension(target, riscv_current_hartid(target), 'D'))
1358 riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
1359 else
1360 riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
1361 } else if (number == GDB_REGNO_VL) {
1362 /* "The XLEN-bit-wide read-only vl CSR can only be updated by the
1363 * vsetvli and vsetvl instructions, and the fault-only-rst vector
1364 * load instruction variants." */
1365 riscv_reg_t vtype;
1366 if (register_read(target, &vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1367 return ERROR_FAIL;
1368 if (riscv_program_insert(&program, vsetvli(ZERO, S0, vtype)) != ERROR_OK)
1369 return ERROR_FAIL;
1370 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1371 riscv_program_csrw(&program, S0, number);
1372 } else {
1373 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
1374 return ERROR_FAIL;
1375 }
1376 }
1377
1378 int exec_out = riscv_program_exec(&program, target);
1379 /* Don't message on error. Probably the register doesn't exist. */
1380 if (exec_out == ERROR_OK && target->reg_cache) {
1381 struct reg *reg = &target->reg_cache->reg_list[number];
1382 buf_set_u64(reg->value, 0, reg->size, value);
1383 }
1384
1385 if (use_scratch)
1386 scratch_release(target, &scratch);
1387
1388 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1389 return ERROR_FAIL;
1390
1391 /* Restore S0. */
1392 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1393 return ERROR_FAIL;
1394
1395 return exec_out;
1396 }
1397
1398 /** Return the cached value, or read from the target if necessary. */
1399 static int register_read(struct target *target, uint64_t *value, uint32_t number)
1400 {
1401 if (number == GDB_REGNO_ZERO) {
1402 *value = 0;
1403 return ERROR_OK;
1404 }
1405 int result = register_read_direct(target, value, number);
1406 if (result != ERROR_OK)
1407 return ERROR_FAIL;
1408 if (target->reg_cache) {
1409 struct reg *reg = &target->reg_cache->reg_list[number];
1410 buf_set_u64(reg->value, 0, reg->size, *value);
1411 }
1412 return ERROR_OK;
1413 }
1414
1415 /** Actually read registers from the target right now. */
1416 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
1417 {
1418 int result = register_read_abstract(target, value, number,
1419 register_size(target, number));
1420
1421 if (result != ERROR_OK &&
1422 has_sufficient_progbuf(target, 2) &&
1423 number > GDB_REGNO_XPR31) {
1424 struct riscv_program program;
1425 riscv_program_init(&program, target);
1426
1427 scratch_mem_t scratch;
1428 bool use_scratch = false;
1429
1430 riscv_reg_t s0;
1431 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1432 return ERROR_FAIL;
1433
1434 /* Write program to move data into s0. */
1435
1436 uint64_t mstatus;
1437 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1438 return ERROR_FAIL;
1439
1440 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1441 if (riscv_supports_extension(target, riscv_current_hartid(target), 'D')
1442 && riscv_xlen(target) < 64) {
1443 /* There are no instructions to move all the bits from a
1444 * register, so we need to use some scratch RAM. */
1445 riscv_program_insert(&program, fsd(number - GDB_REGNO_FPR0, S0,
1446 0));
1447
1448 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1449 return ERROR_FAIL;
1450 use_scratch = true;
1451
1452 if (register_write_direct(target, GDB_REGNO_S0,
1453 scratch.hart_address) != ERROR_OK) {
1454 scratch_release(target, &scratch);
1455 return ERROR_FAIL;
1456 }
1457 } else if (riscv_supports_extension(target,
1458 riscv_current_hartid(target), 'D')) {
1459 riscv_program_insert(&program, fmv_x_d(S0, number - GDB_REGNO_FPR0));
1460 } else {
1461 riscv_program_insert(&program, fmv_x_w(S0, number - GDB_REGNO_FPR0));
1462 }
1463 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1464 riscv_program_csrr(&program, S0, number);
1465 } else {
1466 LOG_ERROR("Unsupported register: %s", gdb_regno_name(number));
1467 return ERROR_FAIL;
1468 }
1469
1470 /* Execute program. */
1471 result = riscv_program_exec(&program, target);
1472 /* Don't message on error. Probably the register doesn't exist. */
1473
1474 if (use_scratch) {
1475 result = scratch_read64(target, &scratch, value);
1476 scratch_release(target, &scratch);
1477 if (result != ERROR_OK)
1478 return result;
1479 } else {
1480 /* Read S0 */
1481 if (register_read_direct(target, value, GDB_REGNO_S0) != ERROR_OK)
1482 return ERROR_FAIL;
1483 }
1484
1485 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1486 return ERROR_FAIL;
1487
1488 /* Restore S0. */
1489 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1490 return ERROR_FAIL;
1491 }
1492
1493 if (result == ERROR_OK) {
1494 LOG_DEBUG("{%d} %s = 0x%" PRIx64, riscv_current_hartid(target),
1495 gdb_regno_name(number), *value);
1496 }
1497
1498 return result;
1499 }
1500
1501 int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
1502 {
1503 time_t start = time(NULL);
1504 while (1) {
1505 uint32_t value;
1506 if (dmstatus_read(target, &value, false) != ERROR_OK)
1507 return ERROR_FAIL;
1508 if (dmstatus)
1509 *dmstatus = value;
1510 if (!get_field(value, DM_DMSTATUS_AUTHBUSY))
1511 break;
1512 if (time(NULL) - start > riscv_command_timeout_sec) {
1513 LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
1514 "Increase the timeout with riscv set_command_timeout_sec.",
1515 riscv_command_timeout_sec,
1516 value);
1517 return ERROR_FAIL;
1518 }
1519 }
1520
1521 return ERROR_OK;
1522 }
1523
1524 /*** OpenOCD target functions. ***/
1525
1526 static void deinit_target(struct target *target)
1527 {
1528 LOG_DEBUG("riscv_deinit_target()");
1529 riscv_info_t *info = (riscv_info_t *) target->arch_info;
1530 free(info->version_specific);
1531 /* TODO: free register arch_info */
1532 info->version_specific = NULL;
1533 }
1534
1535 static int set_haltgroup(struct target *target, bool *supported)
1536 {
1537 uint32_t write = set_field(DM_DMCS2_HGWRITE, DM_DMCS2_GROUP, target->smp);
1538 if (dmi_write(target, DM_DMCS2, write) != ERROR_OK)
1539 return ERROR_FAIL;
1540 uint32_t read;
1541 if (dmi_read(target, &read, DM_DMCS2) != ERROR_OK)
1542 return ERROR_FAIL;
1543 *supported = get_field(read, DM_DMCS2_GROUP) == (unsigned)target->smp;
1544 return ERROR_OK;
1545 }
1546
1547 static int discover_vlenb(struct target *target, int hartid)
1548 {
1549 RISCV_INFO(r);
1550 riscv_reg_t vlenb;
1551
1552 if (register_read(target, &vlenb, GDB_REGNO_VLENB) != ERROR_OK) {
1553 LOG_WARNING("Couldn't read vlenb for %s; vector register access won't work.",
1554 target_name(target));
1555 r->vlenb[hartid] = 0;
1556 return ERROR_OK;
1557 }
1558 r->vlenb[hartid] = vlenb;
1559
1560 LOG_INFO("hart %d: Vector support with vlenb=%d", hartid, r->vlenb[hartid]);
1561
1562 return ERROR_OK;
1563 }
1564
1565 static int examine(struct target *target)
1566 {
1567 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
1568
1569 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
1570 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
1571 LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol, DTM_DTMCS_DMIRESET));
1572 LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTM_DTMCS_IDLE));
1573 LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol, DTM_DTMCS_DMISTAT));
1574 LOG_DEBUG(" abits=%d", get_field(dtmcontrol, DTM_DTMCS_ABITS));
1575 LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTM_DTMCS_VERSION));
1576 if (dtmcontrol == 0) {
1577 LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
1578 return ERROR_FAIL;
1579 }
1580 if (get_field(dtmcontrol, DTM_DTMCS_VERSION) != 1) {
1581 LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
1582 get_field(dtmcontrol, DTM_DTMCS_VERSION), dtmcontrol);
1583 return ERROR_FAIL;
1584 }
1585
1586 riscv013_info_t *info = get_info(target);
1587 /* TODO: This won't be true if there are multiple DMs. */
1588 info->index = target->coreid;
1589 info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
1590 info->dtmcs_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
1591
1592 /* Reset the Debug Module. */
1593 dm013_info_t *dm = get_dm(target);
1594 if (!dm)
1595 return ERROR_FAIL;
1596 if (!dm->was_reset) {
1597 dmi_write(target, DM_DMCONTROL, 0);
1598 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
1599 dm->was_reset = true;
1600 }
1601
1602 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_HARTSELLO |
1603 DM_DMCONTROL_HARTSELHI | DM_DMCONTROL_DMACTIVE |
1604 DM_DMCONTROL_HASEL);
1605 uint32_t dmcontrol;
1606 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
1607 return ERROR_FAIL;
1608
1609 if (!get_field(dmcontrol, DM_DMCONTROL_DMACTIVE)) {
1610 LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
1611 dmcontrol);
1612 return ERROR_FAIL;
1613 }
1614
1615 dm->hasel_supported = get_field(dmcontrol, DM_DMCONTROL_HASEL);
1616
1617 uint32_t dmstatus;
1618 if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
1619 return ERROR_FAIL;
1620 LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
1621 int dmstatus_version = get_field(dmstatus, DM_DMSTATUS_VERSION);
1622 if (dmstatus_version != 2 && dmstatus_version != 3) {
1623 /* Error was already printed out in dmstatus_read(). */
1624 return ERROR_FAIL;
1625 }
1626
1627 uint32_t hartsel =
1628 (get_field(dmcontrol, DM_DMCONTROL_HARTSELHI) <<
1629 DM_DMCONTROL_HARTSELLO_LENGTH) |
1630 get_field(dmcontrol, DM_DMCONTROL_HARTSELLO);
1631 info->hartsellen = 0;
1632 while (hartsel & 1) {
1633 info->hartsellen++;
1634 hartsel >>= 1;
1635 }
1636 LOG_DEBUG("hartsellen=%d", info->hartsellen);
1637
1638 uint32_t hartinfo;
1639 if (dmi_read(target, &hartinfo, DM_HARTINFO) != ERROR_OK)
1640 return ERROR_FAIL;
1641
1642 info->datasize = get_field(hartinfo, DM_HARTINFO_DATASIZE);
1643 info->dataaccess = get_field(hartinfo, DM_HARTINFO_DATAACCESS);
1644 info->dataaddr = get_field(hartinfo, DM_HARTINFO_DATAADDR);
1645
1646 if (!get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
1647 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
1648 "(dmstatus=0x%x). Use `riscv authdata_read` and "
1649 "`riscv authdata_write` commands to authenticate.", dmstatus);
1650 /* If we return ERROR_FAIL here, then in a multicore setup the next
1651 * core won't be examined, which means we won't set up the
1652 * authentication commands for them, which means the config script
1653 * needs to be a lot more complex. */
1654 return ERROR_OK;
1655 }
1656
1657 if (dmi_read(target, &info->sbcs, DM_SBCS) != ERROR_OK)
1658 return ERROR_FAIL;
1659
1660 /* Check that abstract data registers are accessible. */
1661 uint32_t abstractcs;
1662 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
1663 return ERROR_FAIL;
1664 info->datacount = get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT);
1665 info->progbufsize = get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE);
1666
1667 LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
1668
1669 RISCV_INFO(r);
1670 r->impebreak = get_field(dmstatus, DM_DMSTATUS_IMPEBREAK);
1671
1672 if (!has_sufficient_progbuf(target, 2)) {
1673 LOG_WARNING("We won't be able to execute fence instructions on this "
1674 "target. Memory may not always appear consistent. "
1675 "(progbufsize=%d, impebreak=%d)", info->progbufsize,
1676 r->impebreak);
1677 }
1678
1679 if (info->progbufsize < 4 && riscv_enable_virtual) {
1680 LOG_ERROR("set_enable_virtual is not available on this target. It "
1681 "requires a program buffer size of at least 4. (progbufsize=%d) "
1682 "Use `riscv set_enable_virtual off` to continue."
1683 , info->progbufsize);
1684 }
1685
1686 /* Before doing anything else we must first enumerate the harts. */
1687 if (dm->hart_count < 0) {
1688 for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
1689 r->current_hartid = i;
1690 if (riscv013_select_current_hart(target) != ERROR_OK)
1691 return ERROR_FAIL;
1692
1693 uint32_t s;
1694 if (dmstatus_read(target, &s, true) != ERROR_OK)
1695 return ERROR_FAIL;
1696 if (get_field(s, DM_DMSTATUS_ANYNONEXISTENT))
1697 break;
1698 dm->hart_count = i + 1;
1699
1700 if (get_field(s, DM_DMSTATUS_ANYHAVERESET))
1701 dmi_write(target, DM_DMCONTROL,
1702 set_hartsel(DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_ACKHAVERESET, i));
1703 }
1704
1705 LOG_DEBUG("Detected %d harts.", dm->hart_count);
1706 }
1707
1708 if (dm->hart_count == 0) {
1709 LOG_ERROR("No harts found!");
1710 return ERROR_FAIL;
1711 }
1712
1713 /* Don't call any riscv_* functions until after we've counted the number of
1714 * cores and initialized registers. */
1715 for (int i = 0; i < dm->hart_count; ++i) {
1716 if (!riscv_rtos_enabled(target) && i != target->coreid)
1717 continue;
1718
1719 r->current_hartid = i;
1720 if (riscv013_select_current_hart(target) != ERROR_OK)
1721 return ERROR_FAIL;
1722
1723 bool halted = riscv_is_halted(target);
1724 if (!halted) {
1725 if (riscv013_halt_go(target) != ERROR_OK) {
1726 LOG_ERROR("Fatal: Hart %d failed to halt during examine()", i);
1727 return ERROR_FAIL;
1728 }
1729 }
1730
1731 /* Without knowing anything else we can at least mess with the
1732 * program buffer. */
1733 r->debug_buffer_size[i] = info->progbufsize;
1734
1735 int result = register_read_abstract(target, NULL, GDB_REGNO_S0, 64);
1736 if (result == ERROR_OK)
1737 r->xlen[i] = 64;
1738 else
1739 r->xlen[i] = 32;
1740
1741 if (register_read(target, &r->misa[i], GDB_REGNO_MISA)) {
1742 LOG_ERROR("Fatal: Failed to read MISA from hart %d.", i);
1743 return ERROR_FAIL;
1744 }
1745
1746 if (riscv_supports_extension(target, i, 'V')) {
1747 if (discover_vlenb(target, i) != ERROR_OK)
1748 return ERROR_FAIL;
1749 }
1750
1751 /* Now init registers based on what we discovered. */
1752 if (riscv_init_registers(target) != ERROR_OK)
1753 return ERROR_FAIL;
1754
1755 /* Display this as early as possible to help people who are using
1756 * really slow simulators. */
1757 LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
1758 r->misa[i]);
1759
1760 if (!halted)
1761 riscv013_step_or_resume_current_hart(target, false, false);
1762 }
1763
1764 target_set_examined(target);
1765
1766 if (target->smp) {
1767 bool haltgroup_supported;
1768 if (set_haltgroup(target, &haltgroup_supported) != ERROR_OK)
1769 return ERROR_FAIL;
1770 if (haltgroup_supported)
1771 LOG_INFO("Core %d made part of halt group %d.", target->coreid,
1772 target->smp);
1773 else
1774 LOG_INFO("Core %d could not be made part of halt group %d.",
1775 target->coreid, target->smp);
1776 }
1777
1778 /* Some regression suites rely on seeing 'Examined RISC-V core' to know
1779 * when they can connect with gdb/telnet.
1780 * We will need to update those suites if we want to change that text. */
1781 LOG_INFO("Examined RISC-V core; found %d harts",
1782 riscv_count_harts(target));
1783 for (int i = 0; i < riscv_count_harts(target); ++i) {
1784 if (riscv_hart_enabled(target, i)) {
1785 LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
1786 r->misa[i]);
1787 } else {
1788 LOG_INFO(" hart %d: currently disabled", i);
1789 }
1790 }
1791 return ERROR_OK;
1792 }
1793
1794 int riscv013_authdata_read(struct target *target, uint32_t *value)
1795 {
1796 if (wait_for_authbusy(target, NULL) != ERROR_OK)
1797 return ERROR_FAIL;
1798
1799 return dmi_read(target, value, DM_AUTHDATA);
1800 }
1801
1802 int riscv013_authdata_write(struct target *target, uint32_t value)
1803 {
1804 uint32_t before, after;
1805 if (wait_for_authbusy(target, &before) != ERROR_OK)
1806 return ERROR_FAIL;
1807
1808 dmi_write(target, DM_AUTHDATA, value);
1809
1810 if (wait_for_authbusy(target, &after) != ERROR_OK)
1811 return ERROR_FAIL;
1812
1813 if (!get_field(before, DM_DMSTATUS_AUTHENTICATED) &&
1814 get_field(after, DM_DMSTATUS_AUTHENTICATED)) {
1815 LOG_INFO("authdata_write resulted in successful authentication");
1816 int result = ERROR_OK;
1817 dm013_info_t *dm = get_dm(target);
1818 if (!dm)
1819 return ERROR_FAIL;
1820 target_list_t *entry;
1821 list_for_each_entry(entry, &dm->target_list, list) {
1822 if (examine(entry->target) != ERROR_OK)
1823 result = ERROR_FAIL;
1824 }
1825 return result;
1826 }
1827
1828 return ERROR_OK;
1829 }
1830
1831 static int riscv013_hart_count(struct target *target)
1832 {
1833 dm013_info_t *dm = get_dm(target);
1834 assert(dm);
1835 return dm->hart_count;
1836 }
1837
1838 static unsigned riscv013_data_bits(struct target *target)
1839 {
1840 RISCV013_INFO(info);
1841 /* TODO: Once there is a spec for discovering abstract commands, we can
1842 * take those into account as well. For now we assume abstract commands
1843 * support XLEN-wide accesses. */
1844 if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
1845 return riscv_xlen(target);
1846
1847 if (get_field(info->sbcs, DM_SBCS_SBACCESS128))
1848 return 128;
1849 if (get_field(info->sbcs, DM_SBCS_SBACCESS64))
1850 return 64;
1851 if (get_field(info->sbcs, DM_SBCS_SBACCESS32))
1852 return 32;
1853 if (get_field(info->sbcs, DM_SBCS_SBACCESS16))
1854 return 16;
1855 if (get_field(info->sbcs, DM_SBCS_SBACCESS8))
1856 return 8;
1857
1858 return riscv_xlen(target);
1859 }
1860
1861 static int prep_for_vector_access(struct target *target, uint64_t *vtype,
1862 uint64_t *vl, unsigned *debug_vl)
1863 {
1864 RISCV_INFO(r);
1865 /* TODO: this continuous save/restore is terrible for performance. */
1866 /* Write vtype and vl. */
1867 unsigned encoded_vsew;
1868 switch (riscv_xlen(target)) {
1869 case 32:
1870 encoded_vsew = 2;
1871 break;
1872 case 64:
1873 encoded_vsew = 3;
1874 break;
1875 default:
1876 LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target));
1877 return ERROR_FAIL;
1878 }
1879
1880 /* Save vtype and vl. */
1881 if (register_read(target, vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1882 return ERROR_FAIL;
1883 if (register_read(target, vl, GDB_REGNO_VL) != ERROR_OK)
1884 return ERROR_FAIL;
1885
1886 if (register_write_direct(target, GDB_REGNO_VTYPE, encoded_vsew << 3) != ERROR_OK)
1887 return ERROR_FAIL;
1888 *debug_vl = DIV_ROUND_UP(r->vlenb[r->current_hartid] * 8,
1889 riscv_xlen(target));
1890 if (register_write_direct(target, GDB_REGNO_VL, *debug_vl) != ERROR_OK)
1891 return ERROR_FAIL;
1892
1893 return ERROR_OK;
1894 }
1895
1896 static int cleanup_after_vector_access(struct target *target, uint64_t vtype,
1897 uint64_t vl)
1898 {
1899 /* Restore vtype and vl. */
1900 if (register_write_direct(target, GDB_REGNO_VTYPE, vtype) != ERROR_OK)
1901 return ERROR_FAIL;
1902 if (register_write_direct(target, GDB_REGNO_VL, vl) != ERROR_OK)
1903 return ERROR_FAIL;
1904 return ERROR_OK;
1905 }
1906
1907 static int riscv013_get_register_buf(struct target *target,
1908 uint8_t *value, int regno)
1909 {
1910 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
1911
1912 riscv_reg_t s0;
1913 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1914 return ERROR_FAIL;
1915
1916 uint64_t mstatus;
1917 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
1918 return ERROR_FAIL;
1919
1920 uint64_t vtype, vl;
1921 unsigned debug_vl;
1922 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
1923 return ERROR_FAIL;
1924
1925 unsigned vnum = regno - GDB_REGNO_V0;
1926 unsigned xlen = riscv_xlen(target);
1927
1928 struct riscv_program program;
1929 riscv_program_init(&program, target);
1930 riscv_program_insert(&program, vmv_x_s(S0, vnum));
1931 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
1932
1933 int result = ERROR_OK;
1934 for (unsigned i = 0; i < debug_vl; i++) {
1935 /* Executing the program might result in an exception if there is some
1936 * issue with the vector implementation/instructions we're using. If that
1937 * happens, attempt to restore as usual. We may have clobbered the
1938 * vector register we tried to read already.
1939 * For other failures, we just return error because things are probably
1940 * so messed up that attempting to restore isn't going to help. */
1941 result = riscv_program_exec(&program, target);
1942 if (result == ERROR_OK) {
1943 uint64_t v;
1944 if (register_read_direct(target, &v, GDB_REGNO_S0) != ERROR_OK)
1945 return ERROR_FAIL;
1946 buf_set_u64(value, xlen * i, xlen, v);
1947 } else {
1948 break;
1949 }
1950 }
1951
1952 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
1953 return ERROR_FAIL;
1954
1955 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
1956 return ERROR_FAIL;
1957 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1958 return ERROR_FAIL;
1959
1960 return result;
1961 }
1962
1963 static int riscv013_set_register_buf(struct target *target,
1964 int regno, const uint8_t *value)
1965 {
1966 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
1967
1968 riscv_reg_t s0;
1969 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1970 return ERROR_FAIL;
1971
1972 uint64_t mstatus;
1973 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
1974 return ERROR_FAIL;
1975
1976 uint64_t vtype, vl;
1977 unsigned debug_vl;
1978 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
1979 return ERROR_FAIL;
1980
1981 unsigned vnum = regno - GDB_REGNO_V0;
1982 unsigned xlen = riscv_xlen(target);
1983
1984 struct riscv_program program;
1985 riscv_program_init(&program, target);
1986 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
1987 int result = ERROR_OK;
1988 for (unsigned i = 0; i < debug_vl; i++) {
1989 if (register_write_direct(target, GDB_REGNO_S0,
1990 buf_get_u64(value, xlen * i, xlen)) != ERROR_OK)
1991 return ERROR_FAIL;
1992 result = riscv_program_exec(&program, target);
1993 if (result != ERROR_OK)
1994 break;
1995 }
1996
1997 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
1998 return ERROR_FAIL;
1999
2000 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2001 return ERROR_FAIL;
2002 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2003 return ERROR_FAIL;
2004
2005 return result;
2006 }
2007
2008 static int init_target(struct command_context *cmd_ctx,
2009 struct target *target)
2010 {
2011 LOG_DEBUG("init");
2012 riscv_info_t *generic_info = (riscv_info_t *) target->arch_info;
2013
2014 generic_info->get_register = &riscv013_get_register;
2015 generic_info->set_register = &riscv013_set_register;
2016 generic_info->get_register_buf = &riscv013_get_register_buf;
2017 generic_info->set_register_buf = &riscv013_set_register_buf;
2018 generic_info->select_current_hart = &riscv013_select_current_hart;
2019 generic_info->is_halted = &riscv013_is_halted;
2020 generic_info->resume_go = &riscv013_resume_go;
2021 generic_info->step_current_hart = &riscv013_step_current_hart;
2022 generic_info->on_halt = &riscv013_on_halt;
2023 generic_info->resume_prep = &riscv013_resume_prep;
2024 generic_info->halt_prep = &riscv013_halt_prep;
2025 generic_info->halt_go = &riscv013_halt_go;
2026 generic_info->on_step = &riscv013_on_step;
2027 generic_info->halt_reason = &riscv013_halt_reason;
2028 generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
2029 generic_info->write_debug_buffer = &riscv013_write_debug_buffer;
2030 generic_info->execute_debug_buffer = &riscv013_execute_debug_buffer;
2031 generic_info->fill_dmi_write_u64 = &riscv013_fill_dmi_write_u64;
2032 generic_info->fill_dmi_read_u64 = &riscv013_fill_dmi_read_u64;
2033 generic_info->fill_dmi_nop_u64 = &riscv013_fill_dmi_nop_u64;
2034 generic_info->dmi_write_u64_bits = &riscv013_dmi_write_u64_bits;
2035 generic_info->authdata_read = &riscv013_authdata_read;
2036 generic_info->authdata_write = &riscv013_authdata_write;
2037 generic_info->dmi_read = &dmi_read;
2038 generic_info->dmi_write = &dmi_write;
2039 generic_info->read_memory = read_memory;
2040 generic_info->test_sba_config_reg = &riscv013_test_sba_config_reg;
2041 generic_info->test_compliance = &riscv013_test_compliance;
2042 generic_info->hart_count = &riscv013_hart_count;
2043 generic_info->data_bits = &riscv013_data_bits;
2044 generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
2045 if (!generic_info->version_specific)
2046 return ERROR_FAIL;
2047 riscv013_info_t *info = get_info(target);
2048
2049 info->progbufsize = -1;
2050
2051 info->dmi_busy_delay = 0;
2052 info->bus_master_read_delay = 0;
2053 info->bus_master_write_delay = 0;
2054 info->ac_busy_delay = 0;
2055
2056 /* Assume all these abstract commands are supported until we learn
2057 * otherwise.
2058 * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
2059 * while another one isn't. We don't track that this closely here, but in
2060 * the future we probably should. */
2061 info->abstract_read_csr_supported = true;
2062 info->abstract_write_csr_supported = true;
2063 info->abstract_read_fpr_supported = true;
2064 info->abstract_write_fpr_supported = true;
2065
2066 return ERROR_OK;
2067 }
2068
2069 static int assert_reset(struct target *target)
2070 {
2071 RISCV_INFO(r);
2072
2073 select_dmi(target);
2074
2075 uint32_t control_base = set_field(0, DM_DMCONTROL_DMACTIVE, 1);
2076
2077 if (target->rtos) {
2078 /* There's only one target, and OpenOCD thinks each hart is a thread.
2079 * We must reset them all. */
2080
2081 /* TODO: Try to use hasel in dmcontrol */
2082
2083 /* Set haltreq for each hart. */
2084 uint32_t control = control_base;
2085 for (int i = 0; i < riscv_count_harts(target); ++i) {
2086 if (!riscv_hart_enabled(target, i))
2087 continue;
2088
2089 control = set_hartsel(control_base, i);
2090 control = set_field(control, DM_DMCONTROL_HALTREQ,
2091 target->reset_halt ? 1 : 0);
2092 dmi_write(target, DM_DMCONTROL, control);
2093 }
2094 /* Assert ndmreset */
2095 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2096 dmi_write(target, DM_DMCONTROL, control);
2097
2098 } else {
2099 /* Reset just this hart. */
2100 uint32_t control = set_hartsel(control_base, r->current_hartid);
2101 control = set_field(control, DM_DMCONTROL_HALTREQ,
2102 target->reset_halt ? 1 : 0);
2103 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2104 dmi_write(target, DM_DMCONTROL, control);
2105 }
2106
2107 target->state = TARGET_RESET;
2108
2109 dm013_info_t *dm = get_dm(target);
2110 if (!dm)
2111 return ERROR_FAIL;
2112
2113 /* The DM might have gotten reset if OpenOCD called us in some reset that
2114 * involves SRST being toggled. So clear our cache which may be out of
2115 * date. */
2116 memset(dm->progbuf_cache, 0, sizeof(dm->progbuf_cache));
2117
2118 return ERROR_OK;
2119 }
2120
2121 static int deassert_reset(struct target *target)
2122 {
2123 RISCV_INFO(r);
2124 RISCV013_INFO(info);
2125 select_dmi(target);
2126
2127 /* Clear the reset, but make sure haltreq is still set */
2128 uint32_t control = 0;
2129 control = set_field(control, DM_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
2130 control = set_field(control, DM_DMCONTROL_DMACTIVE, 1);
2131 dmi_write(target, DM_DMCONTROL,
2132 set_hartsel(control, r->current_hartid));
2133
2134 uint32_t dmstatus;
2135 int dmi_busy_delay = info->dmi_busy_delay;
2136 time_t start = time(NULL);
2137
2138 for (int i = 0; i < riscv_count_harts(target); ++i) {
2139 int index = i;
2140 if (target->rtos) {
2141 if (!riscv_hart_enabled(target, index))
2142 continue;
2143 dmi_write(target, DM_DMCONTROL,
2144 set_hartsel(control, index));
2145 } else {
2146 index = r->current_hartid;
2147 }
2148
2149 char *operation;
2150 uint32_t expected_field;
2151 if (target->reset_halt) {
2152 operation = "halt";
2153 expected_field = DM_DMSTATUS_ALLHALTED;
2154 } else {
2155 operation = "run";
2156 expected_field = DM_DMSTATUS_ALLRUNNING;
2157 }
2158 LOG_DEBUG("Waiting for hart %d to %s out of reset.", index, operation);
2159 while (1) {
2160 int result = dmstatus_read_timeout(target, &dmstatus, true,
2161 riscv_reset_timeout_sec);
2162 if (result == ERROR_TIMEOUT_REACHED)
2163 LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
2164 "reset in %ds; Increase the timeout with riscv "
2165 "set_reset_timeout_sec.",
2166 index, riscv_reset_timeout_sec);
2167 if (result != ERROR_OK)
2168 return result;
2169 if (get_field(dmstatus, expected_field))
2170 break;
2171 if (time(NULL) - start > riscv_reset_timeout_sec) {
2172 LOG_ERROR("Hart %d didn't %s coming out of reset in %ds; "
2173 "dmstatus=0x%x; "
2174 "Increase the timeout with riscv set_reset_timeout_sec.",
2175 index, operation, riscv_reset_timeout_sec, dmstatus);
2176 return ERROR_FAIL;
2177 }
2178 }
2179 target->state = TARGET_HALTED;
2180
2181 if (get_field(dmstatus, DM_DMSTATUS_ALLHAVERESET)) {
2182 /* Ack reset. */
2183 dmi_write(target, DM_DMCONTROL,
2184 set_hartsel(control, index) |
2185 DM_DMCONTROL_ACKHAVERESET);
2186 }
2187
2188 if (!target->rtos)
2189 break;
2190 }
2191 info->dmi_busy_delay = dmi_busy_delay;
2192 return ERROR_OK;
2193 }
2194
2195 static int execute_fence(struct target *target)
2196 {
2197 int old_hartid = riscv_current_hartid(target);
2198
2199 /* FIXME: For non-coherent systems we need to flush the caches right
2200 * here, but there's no ISA-defined way of doing that. */
2201 {
2202 struct riscv_program program;
2203 riscv_program_init(&program, target);
2204 riscv_program_fence_i(&program);
2205 riscv_program_fence(&program);
2206 int result = riscv_program_exec(&program, target);
2207 if (result != ERROR_OK)
2208 LOG_DEBUG("Unable to execute pre-fence");
2209 }
2210
2211 for (int i = 0; i < riscv_count_harts(target); ++i) {
2212 if (!riscv_hart_enabled(target, i))
2213 continue;
2214
2215 if (i == old_hartid)
2216 /* Fence already executed for this hart */
2217 continue;
2218
2219 riscv_set_current_hartid(target, i);
2220
2221 struct riscv_program program;
2222 riscv_program_init(&program, target);
2223 riscv_program_fence_i(&program);
2224 riscv_program_fence(&program);
2225 int result = riscv_program_exec(&program, target);
2226 if (result != ERROR_OK)
2227 LOG_DEBUG("Unable to execute fence on hart %d", i);
2228 }
2229
2230 riscv_set_current_hartid(target, old_hartid);
2231
2232 return ERROR_OK;
2233 }
2234
2235 static void log_memory_access(target_addr_t address, uint64_t value,
2236 unsigned size_bytes, bool read)
2237 {
2238 if (debug_level < LOG_LVL_DEBUG)
2239 return;
2240
2241 char fmt[80];
2242 sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
2243 address, read ? "read" : "write", size_bytes * 2);
2244 switch (size_bytes) {
2245 case 1:
2246 value &= 0xff;
2247 break;
2248 case 2:
2249 value &= 0xffff;
2250 break;
2251 case 4:
2252 value &= 0xffffffffUL;
2253 break;
2254 case 8:
2255 break;
2256 default:
2257 assert(false);
2258 }
2259 LOG_DEBUG(fmt, value);
2260 }
2261
2262 /* Read the relevant sbdata regs depending on size, and put the results into
2263 * buffer. */
2264 static int read_memory_bus_word(struct target *target, target_addr_t address,
2265 uint32_t size, uint8_t *buffer)
2266 {
2267 uint32_t value;
2268 int result;
2269 static int sbdata[4] = { DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3 };
2270 assert(size <= 16);
2271 for (int i = (size - 1) / 4; i >= 0; i--) {
2272 result = dmi_op(target, &value, NULL, DMI_OP_READ, sbdata[i], 0, false, true);
2273 if (result != ERROR_OK)
2274 return result;
2275 buf_set_u32(buffer + i * 4, 0, 8 * MIN(size, 4), value);
2276 log_memory_access(address + i * 4, value, MIN(size, 4), true);
2277 }
2278 return ERROR_OK;
2279 }
2280
2281 static uint32_t sb_sbaccess(unsigned size_bytes)
2282 {
2283 switch (size_bytes) {
2284 case 1:
2285 return set_field(0, DM_SBCS_SBACCESS, 0);
2286 case 2:
2287 return set_field(0, DM_SBCS_SBACCESS, 1);
2288 case 4:
2289 return set_field(0, DM_SBCS_SBACCESS, 2);
2290 case 8:
2291 return set_field(0, DM_SBCS_SBACCESS, 3);
2292 case 16:
2293 return set_field(0, DM_SBCS_SBACCESS, 4);
2294 }
2295 assert(0);
2296 return 0; /* Make mingw happy. */
2297 }
2298
2299 static target_addr_t sb_read_address(struct target *target)
2300 {
2301 RISCV013_INFO(info);
2302 unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2303 target_addr_t address = 0;
2304 uint32_t v;
2305 if (sbasize > 32) {
2306 dmi_read(target, &v, DM_SBADDRESS1);
2307 address |= v;
2308 address <<= 32;
2309 }
2310 dmi_read(target, &v, DM_SBADDRESS0);
2311 address |= v;
2312 return address;
2313 }
2314
2315 static int sb_write_address(struct target *target, target_addr_t address)
2316 {
2317 RISCV013_INFO(info);
2318 unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2319 /* There currently is no support for >64-bit addresses in OpenOCD. */
2320 if (sbasize > 96)
2321 dmi_write(target, DM_SBADDRESS3, 0);
2322 if (sbasize > 64)
2323 dmi_write(target, DM_SBADDRESS2, 0);
2324 if (sbasize > 32)
2325 dmi_write(target, DM_SBADDRESS1, address >> 32);
2326 return dmi_write(target, DM_SBADDRESS0, address);
2327 }
2328
2329 static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
2330 {
2331 time_t start = time(NULL);
2332 while (1) {
2333 if (dmi_read(target, sbcs, DM_SBCS) != ERROR_OK)
2334 return ERROR_FAIL;
2335 if (!get_field(*sbcs, DM_SBCS_SBBUSY))
2336 return ERROR_OK;
2337 if (time(NULL) - start > riscv_command_timeout_sec) {
2338 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
2339 "Increase the timeout with riscv set_command_timeout_sec.",
2340 riscv_command_timeout_sec, *sbcs);
2341 return ERROR_FAIL;
2342 }
2343 }
2344 }
2345
2346 static int modify_privilege(struct target *target, uint64_t *mstatus, uint64_t *mstatus_old)
2347 {
2348 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5)) {
2349 /* Read DCSR */
2350 uint64_t dcsr;
2351 if (register_read(target, &dcsr, GDB_REGNO_DCSR) != ERROR_OK)
2352 return ERROR_FAIL;
2353
2354 /* Read and save MSTATUS */
2355 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
2356 return ERROR_FAIL;
2357 *mstatus_old = *mstatus;
2358
2359 /* If we come from m-mode with mprv set, we want to keep mpp */
2360 if (get_field(dcsr, DCSR_PRV) < 3) {
2361 /* MPP = PRIV */
2362 *mstatus = set_field(*mstatus, MSTATUS_MPP, get_field(dcsr, DCSR_PRV));
2363
2364 /* MPRV = 1 */
2365 *mstatus = set_field(*mstatus, MSTATUS_MPRV, 1);
2366
2367 /* Write MSTATUS */
2368 if (*mstatus != *mstatus_old)
2369 if (register_write_direct(target, GDB_REGNO_MSTATUS, *mstatus) != ERROR_OK)
2370 return ERROR_FAIL;
2371 }
2372 }
2373
2374 return ERROR_OK;
2375 }
2376
2377 static int read_memory_bus_v0(struct target *target, target_addr_t address,
2378 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2379 {
2380 if (size != increment) {
2381 LOG_ERROR("sba v0 reads only support size==increment");
2382 return ERROR_NOT_IMPLEMENTED;
2383 }
2384
2385 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
2386 TARGET_PRIxADDR, size, count, address);
2387 uint8_t *t_buffer = buffer;
2388 riscv_addr_t cur_addr = address;
2389 riscv_addr_t fin_addr = address + (count * size);
2390 uint32_t access = 0;
2391
2392 const int DM_SBCS_SBSINGLEREAD_OFFSET = 20;
2393 const uint32_t DM_SBCS_SBSINGLEREAD = (0x1U << DM_SBCS_SBSINGLEREAD_OFFSET);
2394
2395 const int DM_SBCS_SBAUTOREAD_OFFSET = 15;
2396 const uint32_t DM_SBCS_SBAUTOREAD = (0x1U << DM_SBCS_SBAUTOREAD_OFFSET);
2397
2398 /* ww favorise one off reading if there is an issue */
2399 if (count == 1) {
2400 for (uint32_t i = 0; i < count; i++) {
2401 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2402 return ERROR_FAIL;
2403 dmi_write(target, DM_SBADDRESS0, cur_addr);
2404 /* size/2 matching the bit access of the spec 0.13 */
2405 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2406 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2407 LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
2408 dmi_write(target, DM_SBCS, access);
2409 /* 3) read */
2410 uint32_t value;
2411 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2412 return ERROR_FAIL;
2413 LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
2414 buf_set_u32(t_buffer, 0, 8 * size, value);
2415 t_buffer += size;
2416 cur_addr += size;
2417 }
2418 return ERROR_OK;
2419 }
2420
2421 /* has to be the same size if we want to read a block */
2422 LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
2423 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2424 return ERROR_FAIL;
2425 /* set current address */
2426 dmi_write(target, DM_SBADDRESS0, cur_addr);
2427 /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
2428 * size/2 matching the bit access of the spec 0.13 */
2429 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2430 access = set_field(access, DM_SBCS_SBAUTOREAD, 1);
2431 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2432 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
2433 LOG_DEBUG("\r\naccess: 0x%08x", access);
2434 dmi_write(target, DM_SBCS, access);
2435
2436 while (cur_addr < fin_addr) {
2437 LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
2438 PRIx64, size, count, cur_addr);
2439 /* read */
2440 uint32_t value;
2441 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2442 return ERROR_FAIL;
2443 buf_set_u32(t_buffer, 0, 8 * size, value);
2444 cur_addr += size;
2445 t_buffer += size;
2446
2447 /* if we are reaching last address, we must clear autoread */
2448 if (cur_addr == fin_addr && count != 1) {
2449 dmi_write(target, DM_SBCS, 0);
2450 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2451 return ERROR_FAIL;
2452 buf_set_u32(t_buffer, 0, 8 * size, value);
2453 }
2454 }
2455
2456 return ERROR_OK;
2457 }
2458
2459 /**
2460 * Read the requested memory using the system bus interface.
2461 */
2462 static int read_memory_bus_v1(struct target *target, target_addr_t address,
2463 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2464 {
2465 if (increment != size && increment != 0) {
2466 LOG_ERROR("sba v1 reads only support increment of size or 0");
2467 return ERROR_NOT_IMPLEMENTED;
2468 }
2469
2470 RISCV013_INFO(info);
2471 target_addr_t next_address = address;
2472 target_addr_t end_address = address + count * size;
2473
2474 while (next_address < end_address) {
2475 uint32_t sbcs_write = set_field(0, DM_SBCS_SBREADONADDR, 1);
2476 sbcs_write |= sb_sbaccess(size);
2477 if (increment == size)
2478 sbcs_write = set_field(sbcs_write, DM_SBCS_SBAUTOINCREMENT, 1);
2479 if (count > 1)
2480 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, count > 1);
2481 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2482 return ERROR_FAIL;
2483
2484 /* This address write will trigger the first read. */
2485 if (sb_write_address(target, next_address) != ERROR_OK)
2486 return ERROR_FAIL;
2487
2488 if (info->bus_master_read_delay) {
2489 jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
2490 if (jtag_execute_queue() != ERROR_OK) {
2491 LOG_ERROR("Failed to scan idle sequence");
2492 return ERROR_FAIL;
2493 }
2494 }
2495
2496 /* First value has been read, and is waiting for us to issue a DMI read
2497 * to get it. */
2498
2499 static int sbdata[4] = {DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3};
2500 assert(size <= 16);
2501 target_addr_t next_read = address - 1;
2502 for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
2503 for (int j = (size - 1) / 4; j >= 0; j--) {
2504 uint32_t value;
2505 unsigned attempt = 0;
2506 while (1) {
2507 if (attempt++ > 100) {
2508 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2509 next_read);
2510 return ERROR_FAIL;
2511 }
2512 dmi_status_t status = dmi_scan(target, NULL, &value,
2513 DMI_OP_READ, sbdata[j], 0, false);
2514 if (status == DMI_STATUS_BUSY)
2515 increase_dmi_busy_delay(target);
2516 else if (status == DMI_STATUS_SUCCESS)
2517 break;
2518 else
2519 return ERROR_FAIL;
2520 }
2521 if (next_read != address - 1) {
2522 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2523 log_memory_access(next_read, value, MIN(size, 4), true);
2524 }
2525 next_read = address + i * size + j * 4;
2526 }
2527 }
2528
2529 uint32_t sbcs_read = 0;
2530 if (count > 1) {
2531 uint32_t value;
2532 unsigned attempt = 0;
2533 while (1) {
2534 if (attempt++ > 100) {
2535 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2536 next_read);
2537 return ERROR_FAIL;
2538 }
2539 dmi_status_t status = dmi_scan(target, NULL, &value, DMI_OP_NOP, 0, 0, false);
2540 if (status == DMI_STATUS_BUSY)
2541 increase_dmi_busy_delay(target);
2542 else if (status == DMI_STATUS_SUCCESS)
2543 break;
2544 else
2545 return ERROR_FAIL;
2546 }
2547 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2548 log_memory_access(next_read, value, MIN(size, 4), true);
2549
2550 /* "Writes to sbcs while sbbusy is high result in undefined behavior.
2551 * A debugger must not write to sbcs until it reads sbbusy as 0." */
2552 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2553 return ERROR_FAIL;
2554
2555 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, 0);
2556 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2557 return ERROR_FAIL;
2558 }
2559
2560 /* Read the last word, after we disabled sbreadondata if necessary. */
2561 if (!get_field(sbcs_read, DM_SBCS_SBERROR) &&
2562 !get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2563 if (read_memory_bus_word(target, address + (count - 1) * size, size,
2564 buffer + (count - 1) * size) != ERROR_OK)
2565 return ERROR_FAIL;
2566
2567 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2568 return ERROR_FAIL;
2569 }
2570
2571 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2572 /* We read while the target was busy. Slow down and try again. */
2573 if (dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR) != ERROR_OK)
2574 return ERROR_FAIL;
2575 next_address = sb_read_address(target);
2576 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2577 continue;
2578 }
2579
2580 unsigned error = get_field(sbcs_read, DM_SBCS_SBERROR);
2581 if (error == 0) {
2582 next_address = end_address;
2583 } else {
2584 /* Some error indicating the bus access failed, but not because of
2585 * something we did wrong. */
2586 if (dmi_write(target, DM_SBCS, DM_SBCS_SBERROR) != ERROR_OK)
2587 return ERROR_FAIL;
2588 return ERROR_FAIL;
2589 }
2590 }
2591
2592 return ERROR_OK;
2593 }
2594
2595 static int batch_run(const struct target *target, struct riscv_batch *batch)
2596 {
2597 RISCV013_INFO(info);
2598 RISCV_INFO(r);
2599 if (r->reset_delays_wait >= 0) {
2600 r->reset_delays_wait -= batch->used_scans;
2601 if (r->reset_delays_wait <= 0) {
2602 batch->idle_count = 0;
2603 info->dmi_busy_delay = 0;
2604 info->ac_busy_delay = 0;
2605 }
2606 }
2607 return riscv_batch_run(batch);
2608 }
2609
2610 /*
2611 * Performs a memory read using memory access abstract commands. The read sizes
2612 * supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16 byte
2613 * aamsize fields in the memory access abstract command.
2614 */
2615 static int read_memory_abstract(struct target *target, target_addr_t address,
2616 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2617 {
2618 if (size != increment) {
2619 LOG_ERROR("abstract command reads only support size==increment");
2620 return ERROR_NOT_IMPLEMENTED;
2621 }
2622
2623 int result = ERROR_OK;
2624
2625 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
2626 size, address);
2627
2628 memset(buffer, 0, count * size);
2629
2630 /* Convert the size (bytes) to width (bits) */
2631 unsigned width = size << 3;
2632 if (width > 64) {
2633 /* TODO: Add 128b support if it's ever used. Involves modifying
2634 read/write_abstract_arg() to work on two 64b values. */
2635 LOG_ERROR("Unsupported size: %d bits", size);
2636 return ERROR_FAIL;
2637 }
2638
2639 /* Create the command (physical address, postincrement, read) */
2640 uint32_t command = access_memory_command(target, false, width, true, false);
2641
2642 /* Execute the reads */
2643 uint8_t *p = buffer;
2644 bool updateaddr = true;
2645 unsigned width32 = (width + 31) / 32 * 32;
2646 for (uint32_t c = 0; c < count; c++) {
2647 /* Only update the address initially and let postincrement update it */
2648 if (updateaddr) {
2649 /* Set arg1 to the address: address + c * size */
2650 result = write_abstract_arg(target, 1, address, riscv_xlen(target));
2651 if (result != ERROR_OK) {
2652 LOG_ERROR("Failed to write arg1 during read_memory_abstract().");
2653 return result;
2654 }
2655 }
2656
2657 /* Execute the command */
2658 result = execute_abstract_command(target, command);
2659 if (result != ERROR_OK) {
2660 LOG_ERROR("Failed to execute command read_memory_abstract().");
2661 return result;
2662 }
2663
2664 /* Copy arg0 to buffer (rounded width up to nearest 32) */
2665 riscv_reg_t value = read_abstract_arg(target, 0, width32);
2666 buf_set_u64(p, 0, 8 * size, value);
2667
2668 updateaddr = false;
2669 p += size;
2670 }
2671
2672 return result;
2673 }
2674
2675 /*
2676 * Performs a memory write using memory access abstract commands. The write
2677 * sizes supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16
2678 * byte aamsize fields in the memory access abstract command.
2679 */
2680 static int write_memory_abstract(struct target *target, target_addr_t address,
2681 uint32_t size, uint32_t count, const uint8_t *buffer)
2682 {
2683 int result = ERROR_OK;
2684
2685 LOG_DEBUG("writing %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
2686 size, address);
2687
2688 /* Convert the size (bytes) to width (bits) */
2689 unsigned width = size << 3;
2690 if (width > 64) {
2691 /* TODO: Add 128b support if it's ever used. Involves modifying
2692 read/write_abstract_arg() to work on two 64b values. */
2693 LOG_ERROR("Unsupported size: %d bits", width);
2694 return ERROR_FAIL;
2695 }
2696
2697 /* Create the command (physical address, postincrement, write) */
2698 uint32_t command = access_memory_command(target, false, width, true, true);
2699
2700 /* Execute the writes */
2701 const uint8_t *p = buffer;
2702 bool updateaddr = true;
2703 for (uint32_t c = 0; c < count; c++) {
2704 /* Move data to arg0 */
2705 riscv_reg_t value = buf_get_u64(p, 0, 8 * size);
2706 result = write_abstract_arg(target, 0, value, riscv_xlen(target));
2707 if (result != ERROR_OK) {
2708 LOG_ERROR("Failed to write arg0 during write_memory_abstract().");
2709 return result;
2710 }
2711
2712 /* Only update the address initially and let postincrement update it */
2713 if (updateaddr) {
2714 /* Set arg1 to the address: address + c * size */
2715 result = write_abstract_arg(target, 1, address, riscv_xlen(target));
2716 if (result != ERROR_OK) {
2717 LOG_ERROR("Failed to write arg1 during write_memory_abstract().");
2718 return result;
2719 }
2720 }
2721
2722 /* Execute the command */
2723 result = execute_abstract_command(target, command);
2724 if (result != ERROR_OK) {
2725 LOG_ERROR("Failed to execute command write_memory_abstract().");
2726 return result;
2727 }
2728
2729 updateaddr = false;
2730 p += size;
2731 }
2732
2733 return result;
2734 }
2735
2736 /**
2737 * Read the requested memory, taking care to execute every read exactly once,
2738 * even if cmderr=busy is encountered.
2739 */
2740 static int read_memory_progbuf_inner(struct target *target, target_addr_t address,
2741 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2742 {
2743 RISCV013_INFO(info);
2744
2745 int result = ERROR_OK;
2746
2747 /* Write address to S0. */
2748 result = register_write_direct(target, GDB_REGNO_S0, address);
2749 if (result != ERROR_OK)
2750 return result;
2751
2752 if (increment == 0 &&
2753 register_write_direct(target, GDB_REGNO_S2, 0) != ERROR_OK)
2754 return ERROR_FAIL;
2755
2756 uint32_t command = access_register_command(target, GDB_REGNO_S1,
2757 riscv_xlen(target),
2758 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
2759 if (execute_abstract_command(target, command) != ERROR_OK)
2760 return ERROR_FAIL;
2761
2762 /* First read has just triggered. Result is in s1. */
2763 if (count == 1) {
2764 uint64_t value;
2765 if (register_read_direct(target, &value, GDB_REGNO_S1) != ERROR_OK)
2766 return ERROR_FAIL;
2767 buf_set_u64(buffer, 0, 8 * size, value);
2768 log_memory_access(address, value, size, true);
2769 return ERROR_OK;
2770 }
2771
2772 if (dmi_write(target, DM_ABSTRACTAUTO,
2773 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) != ERROR_OK)
2774 goto error;
2775 /* Read garbage from dmi_data0, which triggers another execution of the
2776 * program. Now dmi_data0 contains the first good result, and s1 the next
2777 * memory value. */
2778 if (dmi_read_exec(target, NULL, DM_DATA0) != ERROR_OK)
2779 goto error;
2780
2781 /* read_addr is the next address that the hart will read from, which is the
2782 * value in s0. */
2783 unsigned index = 2;
2784 while (index < count) {
2785 riscv_addr_t read_addr = address + index * increment;
2786 LOG_DEBUG("i=%d, count=%d, read_addr=0x%" PRIx64, index, count, read_addr);
2787 /* The pipeline looks like this:
2788 * memory -> s1 -> dm_data0 -> debugger
2789 * Right now:
2790 * s0 contains read_addr
2791 * s1 contains mem[read_addr-size]
2792 * dm_data0 contains[read_addr-size*2]
2793 */
2794
2795 struct riscv_batch *batch = riscv_batch_alloc(target, 32,
2796 info->dmi_busy_delay + info->ac_busy_delay);
2797 if (!batch)
2798 return ERROR_FAIL;
2799
2800 unsigned reads = 0;
2801 for (unsigned j = index; j < count; j++) {
2802 if (size > 4)
2803 riscv_batch_add_dmi_read(batch, DM_DATA1);
2804 riscv_batch_add_dmi_read(batch, DM_DATA0);
2805
2806 reads++;
2807 if (riscv_batch_full(batch))
2808 break;
2809 }
2810
2811 batch_run(target, batch);
2812
2813 /* Wait for the target to finish performing the last abstract command,
2814 * and update our copy of cmderr. If we see that DMI is busy here,
2815 * dmi_busy_delay will be incremented. */
2816 uint32_t abstractcs;
2817 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
2818 return ERROR_FAIL;
2819 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
2820 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
2821 return ERROR_FAIL;
2822 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
2823
2824 unsigned next_index;
2825 unsigned ignore_last = 0;
2826 switch (info->cmderr) {
2827 case CMDERR_NONE:
2828 LOG_DEBUG("successful (partial?) memory read");
2829 next_index = index + reads;
2830 break;
2831 case CMDERR_BUSY:
2832 LOG_DEBUG("memory read resulted in busy response");
2833
2834 increase_ac_busy_delay(target);
2835 riscv013_clear_abstract_error(target);
2836
2837 dmi_write(target, DM_ABSTRACTAUTO, 0);
2838
2839 uint32_t dmi_data0, dmi_data1 = 0;
2840 /* This is definitely a good version of the value that we
2841 * attempted to read when we discovered that the target was
2842 * busy. */
2843 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK) {
2844 riscv_batch_free(batch);
2845 goto error;
2846 }
2847 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK) {
2848 riscv_batch_free(batch);
2849 goto error;
2850 }
2851
2852 /* See how far we got, clobbering dmi_data0. */
2853 if (increment == 0) {
2854 uint64_t counter;
2855 result = register_read_direct(target, &counter, GDB_REGNO_S2);
2856 next_index = counter;
2857 } else {
2858 uint64_t next_read_addr;
2859 result = register_read_direct(target, &next_read_addr,
2860 GDB_REGNO_S0);
2861 next_index = (next_read_addr - address) / increment;
2862 }
2863 if (result != ERROR_OK) {
2864 riscv_batch_free(batch);
2865 goto error;
2866 }
2867
2868 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
2869 buf_set_u64(buffer + (next_index - 2) * size, 0, 8 * size, value64);
2870 log_memory_access(address + (next_index - 2) * size, value64, size, true);
2871
2872 /* Restore the command, and execute it.
2873 * Now DM_DATA0 contains the next value just as it would if no
2874 * error had occurred. */
2875 dmi_write_exec(target, DM_COMMAND, command, true);
2876 next_index++;
2877
2878 dmi_write(target, DM_ABSTRACTAUTO,
2879 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
2880
2881 ignore_last = 1;
2882
2883 break;
2884 default:
2885 LOG_DEBUG("error when reading memory, abstractcs=0x%08lx", (long)abstractcs);
2886 riscv013_clear_abstract_error(target);
2887 riscv_batch_free(batch);
2888 result = ERROR_FAIL;
2889 goto error;
2890 }
2891
2892 /* Now read whatever we got out of the batch. */
2893 dmi_status_t status = DMI_STATUS_SUCCESS;
2894 unsigned read = 0;
2895 assert(index >= 2);
2896 for (unsigned j = index - 2; j < index + reads; j++) {
2897 assert(j < count);
2898 LOG_DEBUG("index=%d, reads=%d, next_index=%d, ignore_last=%d, j=%d",
2899 index, reads, next_index, ignore_last, j);
2900 if (j + 3 + ignore_last > next_index)
2901 break;
2902
2903 status = riscv_batch_get_dmi_read_op(batch, read);
2904 uint64_t value = riscv_batch_get_dmi_read_data(batch, read);
2905 read++;
2906 if (status != DMI_STATUS_SUCCESS) {
2907 /* If we're here because of busy count, dmi_busy_delay will
2908 * already have been increased and busy state will have been
2909 * cleared in dmi_read(). */
2910 /* In at least some implementations, we issue a read, and then
2911 * can get busy back when we try to scan out the read result,
2912 * and the actual read value is lost forever. Since this is
2913 * rare in any case, we return error here and rely on our
2914 * caller to reread the entire block. */
2915 LOG_WARNING("Batch memory read encountered DMI error %d. "
2916 "Falling back on slower reads.", status);
2917 riscv_batch_free(batch);
2918 result = ERROR_FAIL;
2919 goto error;
2920 }
2921 if (size > 4) {
2922 status = riscv_batch_get_dmi_read_op(batch, read);
2923 if (status != DMI_STATUS_SUCCESS) {
2924 LOG_WARNING("Batch memory read encountered DMI error %d. "
2925 "Falling back on slower reads.", status);
2926 riscv_batch_free(batch);
2927 result = ERROR_FAIL;
2928 goto error;
2929 }
2930 value <<= 32;
2931 value |= riscv_batch_get_dmi_read_data(batch, read);
2932 read++;
2933 }
2934 riscv_addr_t offset = j * size;
2935 buf_set_u64(buffer + offset, 0, 8 * size, value);
2936 log_memory_access(address + j * increment, value, size, true);
2937 }
2938
2939 index = next_index;
2940
2941 riscv_batch_free(batch);
2942 }
2943
2944 dmi_write(target, DM_ABSTRACTAUTO, 0);
2945
2946 if (count > 1) {
2947 /* Read the penultimate word. */
2948 uint32_t dmi_data0, dmi_data1 = 0;
2949 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK)
2950 return ERROR_FAIL;
2951 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK)
2952 return ERROR_FAIL;
2953 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
2954 buf_set_u64(buffer + size * (count - 2), 0, 8 * size, value64);
2955 log_memory_access(address + size * (count - 2), value64, size, true);
2956 }
2957
2958 /* Read the last word. */
2959 uint64_t value;
2960 result = register_read_direct(target, &value, GDB_REGNO_S1);
2961 if (result != ERROR_OK)
2962 goto error;
2963 buf_set_u64(buffer + size * (count-1), 0, 8 * size, value);
2964 log_memory_access(address + size * (count-1), value, size, true);
2965
2966 return ERROR_OK;
2967
2968 error:
2969 dmi_write(target, DM_ABSTRACTAUTO, 0);
2970
2971 return result;
2972 }
2973
2974 /* Only need to save/restore one GPR to read a single word, and the progbuf
2975 * program doesn't need to increment. */
2976 static int read_memory_progbuf_one(struct target *target, target_addr_t address,
2977 uint32_t size, uint8_t *buffer)
2978 {
2979 uint64_t mstatus = 0;
2980 uint64_t mstatus_old = 0;
2981 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
2982 return ERROR_FAIL;
2983
2984 uint64_t s0;
2985
2986 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
2987 return ERROR_FAIL;
2988
2989 /* Write the program (load, increment) */
2990 struct riscv_program program;
2991 riscv_program_init(&program, target);
2992 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
2993 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
2994 switch (size) {
2995 case 1:
2996 riscv_program_lbr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
2997 break;
2998 case 2:
2999 riscv_program_lhr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3000 break;
3001 case 4:
3002 riscv_program_lwr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3003 break;
3004 case 8:
3005 riscv_program_ldr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3006 break;
3007 default:
3008 LOG_ERROR("Unsupported size: %d", size);
3009 return ERROR_FAIL;
3010 }
3011 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3012 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3013
3014 if (riscv_program_ebreak(&program) != ERROR_OK)
3015 return ERROR_FAIL;
3016 if (riscv_program_write(&program) != ERROR_OK)
3017 return ERROR_FAIL;
3018
3019 /* Write address to S0, and execute buffer. */
3020 if (write_abstract_arg(target, 0, address, riscv_xlen(target)) != ERROR_OK)
3021 return ERROR_FAIL;
3022 uint32_t command = access_register_command(target, GDB_REGNO_S0,
3023 riscv_xlen(target), AC_ACCESS_REGISTER_WRITE |
3024 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
3025 if (execute_abstract_command(target, command) != ERROR_OK)
3026 return ERROR_FAIL;
3027
3028 uint64_t value;
3029 if (register_read(target, &value, GDB_REGNO_S0) != ERROR_OK)
3030 return ERROR_FAIL;
3031 buf_set_u64(buffer, 0, 8 * size, value);
3032 log_memory_access(address, value, size, true);
3033
3034 if (riscv_set_register(target, GDB_REGNO_S0, s0) != ERROR_OK)
3035 return ERROR_FAIL;
3036
3037 /* Restore MSTATUS */
3038 if (mstatus != mstatus_old)
3039 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3040 return ERROR_FAIL;
3041
3042 return ERROR_OK;
3043 }
3044
3045 /**
3046 * Read the requested memory, silently handling memory access errors.
3047 */
3048 static int read_memory_progbuf(struct target *target, target_addr_t address,
3049 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3050 {
3051 if (riscv_xlen(target) < size * 8) {
3052 LOG_ERROR("XLEN (%d) is too short for %d-bit memory read.",
3053 riscv_xlen(target), size * 8);
3054 return ERROR_FAIL;
3055 }
3056
3057 int result = ERROR_OK;
3058
3059 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
3060 size, address);
3061
3062 select_dmi(target);
3063
3064 memset(buffer, 0, count*size);
3065
3066 if (execute_fence(target) != ERROR_OK)
3067 return ERROR_FAIL;
3068
3069 if (count == 1)
3070 return read_memory_progbuf_one(target, address, size, buffer);
3071
3072 uint64_t mstatus = 0;
3073 uint64_t mstatus_old = 0;
3074 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3075 return ERROR_FAIL;
3076
3077 /* s0 holds the next address to write to
3078 * s1 holds the next data value to write
3079 * s2 is a counter in case increment is 0
3080 */
3081 uint64_t s0, s1, s2;
3082 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3083 return ERROR_FAIL;
3084 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3085 return ERROR_FAIL;
3086 if (increment == 0 && register_read(target, &s2, GDB_REGNO_S1) != ERROR_OK)
3087 return ERROR_FAIL;
3088
3089 /* Write the program (load, increment) */
3090 struct riscv_program program;
3091 riscv_program_init(&program, target);
3092 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3093 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3094
3095 switch (size) {
3096 case 1:
3097 riscv_program_lbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3098 break;
3099 case 2:
3100 riscv_program_lhr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3101 break;
3102 case 4:
3103 riscv_program_lwr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3104 break;
3105 case 8:
3106 riscv_program_ldr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3107 break;
3108 default:
3109 LOG_ERROR("Unsupported size: %d", size);
3110 return ERROR_FAIL;
3111 }
3112
3113 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3114 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3115 if (increment == 0)
3116 riscv_program_addi(&program, GDB_REGNO_S2, GDB_REGNO_S2, 1);
3117 else
3118 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, increment);
3119
3120 if (riscv_program_ebreak(&program) != ERROR_OK)
3121 return ERROR_FAIL;
3122 if (riscv_program_write(&program) != ERROR_OK)
3123 return ERROR_FAIL;
3124
3125 result = read_memory_progbuf_inner(target, address, size, count, buffer, increment);
3126
3127 if (result != ERROR_OK) {
3128 /* The full read did not succeed, so we will try to read each word individually. */
3129 /* This will not be fast, but reading outside actual memory is a special case anyway. */
3130 /* It will make the toolchain happier, especially Eclipse Memory View as it reads ahead. */
3131 target_addr_t address_i = address;
3132 uint32_t count_i = 1;
3133 uint8_t *buffer_i = buffer;
3134
3135 for (uint32_t i = 0; i < count; i++, address_i += increment, buffer_i += size) {
3136 keep_alive();
3137 /* TODO: This is much slower than it needs to be because we end up
3138 * writing the address to read for every word we read. */
3139 result = read_memory_progbuf_inner(target, address_i, size, count_i, buffer_i, increment);
3140
3141 /* The read of a single word failed, so we will just return 0 for that instead */
3142 if (result != ERROR_OK) {
3143 LOG_DEBUG("error reading single word of %d bytes from 0x%" TARGET_PRIxADDR,
3144 size, address_i);
3145
3146 buf_set_u64(buffer_i, 0, 8 * size, 0);
3147 }
3148 }
3149 result = ERROR_OK;
3150 }
3151
3152 riscv_set_register(target, GDB_REGNO_S0, s0);
3153 riscv_set_register(target, GDB_REGNO_S1, s1);
3154 if (increment == 0)
3155 riscv_set_register(target, GDB_REGNO_S2, s2);
3156
3157 /* Restore MSTATUS */
3158 if (mstatus != mstatus_old)
3159 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3160 return ERROR_FAIL;
3161
3162 return result;
3163 }
3164
3165 static int read_memory(struct target *target, target_addr_t address,
3166 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3167 {
3168 if (count == 0)
3169 return ERROR_OK;
3170
3171 RISCV013_INFO(info);
3172 if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
3173 return read_memory_progbuf(target, address, size, count, buffer,
3174 increment);
3175
3176 if ((get_field(info->sbcs, DM_SBCS_SBACCESS8) && size == 1) ||
3177 (get_field(info->sbcs, DM_SBCS_SBACCESS16) && size == 2) ||
3178 (get_field(info->sbcs, DM_SBCS_SBACCESS32) && size == 4) ||
3179 (get_field(info->sbcs, DM_SBCS_SBACCESS64) && size == 8) ||
3180 (get_field(info->sbcs, DM_SBCS_SBACCESS128) && size == 16)) {
3181 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
3182 return read_memory_bus_v0(target, address, size, count, buffer,
3183 increment);
3184 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
3185 return read_memory_bus_v1(target, address, size, count, buffer,
3186 increment);
3187 }
3188
3189 if (has_sufficient_progbuf(target, 3))
3190 return read_memory_progbuf(target, address, size, count, buffer,
3191 increment);
3192
3193 return read_memory_abstract(target, address, size, count, buffer,
3194 increment);
3195 }
3196
3197 static int write_memory_bus_v0(struct target *target, target_addr_t address,
3198 uint32_t size, uint32_t count, const uint8_t *buffer)
3199 {
3200 /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
3201 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
3202 TARGET_PRIxADDR, size, count, address);
3203 dmi_write(target, DM_SBADDRESS0, address);
3204 int64_t value = 0;
3205 int64_t access = 0;
3206 riscv_addr_t offset = 0;
3207 riscv_addr_t t_addr = 0;
3208 const uint8_t *t_buffer = buffer + offset;
3209
3210 /* B.8 Writing Memory, single write check if we write in one go */
3211 if (count == 1) { /* count is in bytes here */
3212 value = buf_get_u64(t_buffer, 0, 8 * size);
3213
3214 access = 0;
3215 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3216 dmi_write(target, DM_SBCS, access);
3217 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3218 LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64, value);
3219 dmi_write(target, DM_SBDATA0, value);
3220 return ERROR_OK;
3221 }
3222
3223 /*B.8 Writing Memory, using autoincrement*/
3224
3225 access = 0;
3226 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3227 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
3228 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3229 dmi_write(target, DM_SBCS, access);
3230
3231 /*2)set the value according to the size required and write*/
3232 for (riscv_addr_t i = 0; i < count; ++i) {
3233 offset = size*i;
3234 /* for monitoring only */
3235 t_addr = address + offset;
3236 t_buffer = buffer + offset;
3237
3238 value = buf_get_u64(t_buffer, 0, 8 * size);
3239 LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
3240 PRIx64, (uint32_t)t_addr, (uint32_t)value);
3241 dmi_write(target, DM_SBDATA0, value);
3242 }
3243 /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
3244 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 0);
3245 dmi_write(target, DM_SBCS, access);
3246
3247 return ERROR_OK;
3248 }
3249
3250 static int write_memory_bus_v1(struct target *target, target_addr_t address,
3251 uint32_t size, uint32_t count, const uint8_t *buffer)
3252 {
3253 RISCV013_INFO(info);
3254 uint32_t sbcs = sb_sbaccess(size);
3255 sbcs = set_field(sbcs, DM_SBCS_SBAUTOINCREMENT, 1);
3256 dmi_write(target, DM_SBCS, sbcs);
3257
3258 target_addr_t next_address = address;
3259 target_addr_t end_address = address + count * size;
3260
3261 int result;
3262
3263 sb_write_address(target, next_address);
3264 while (next_address < end_address) {
3265 LOG_DEBUG("transferring burst starting at address 0x%" TARGET_PRIxADDR,
3266 next_address);
3267
3268 struct riscv_batch *batch = riscv_batch_alloc(
3269 target,
3270 32,
3271 info->dmi_busy_delay + info->bus_master_write_delay);
3272 if (!batch)
3273 return ERROR_FAIL;
3274
3275 for (uint32_t i = (next_address - address) / size; i < count; i++) {
3276 const uint8_t *p = buffer + i * size;
3277
3278 if (riscv_batch_available_scans(batch) < (size + 3) / 4)
3279 break;
3280
3281 if (size > 12)
3282 riscv_batch_add_dmi_write(batch, DM_SBDATA3,
3283 ((uint32_t) p[12]) |
3284 (((uint32_t) p[13]) << 8) |
3285 (((uint32_t) p[14]) << 16) |
3286 (((uint32_t) p[15]) << 24));
3287
3288 if (size > 8)
3289 riscv_batch_add_dmi_write(batch, DM_SBDATA2,
3290 ((uint32_t) p[8]) |
3291 (((uint32_t) p[9]) << 8) |
3292 (((uint32_t) p[10]) << 16) |
3293 (((uint32_t) p[11]) << 24));
3294 if (size > 4)
3295 riscv_batch_add_dmi_write(batch, DM_SBDATA1,
3296 ((uint32_t) p[4]) |
3297 (((uint32_t) p[5]) << 8) |
3298 (((uint32_t) p[6]) << 16) |
3299 (((uint32_t) p[7]) << 24));
3300 uint32_t value = p[0];
3301 if (size > 2) {
3302 value |= ((uint32_t) p[2]) << 16;
3303 value |= ((uint32_t) p[3]) << 24;
3304 }
3305 if (size > 1)
3306 value |= ((uint32_t) p[1]) << 8;
3307 riscv_batch_add_dmi_write(batch, DM_SBDATA0, value);
3308
3309 log_memory_access(address + i * size, value, size, false);
3310 next_address += size;
3311 }
3312
3313 result = batch_run(target, batch);
3314 riscv_batch_free(batch);
3315 if (result != ERROR_OK)
3316 return result;
3317
3318 bool dmi_busy_encountered;
3319 if (dmi_op(target, &sbcs, &dmi_busy_encountered, DMI_OP_READ,
3320 DM_SBCS, 0, false, false) != ERROR_OK)
3321 return ERROR_FAIL;
3322
3323 time_t start = time(NULL);
3324 bool dmi_busy = dmi_busy_encountered;
3325 while (get_field(sbcs, DM_SBCS_SBBUSY) || dmi_busy) {
3326 if (time(NULL) - start > riscv_command_timeout_sec) {
3327 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
3328 "Increase the timeout with riscv set_command_timeout_sec.",
3329 riscv_command_timeout_sec, sbcs);
3330 return ERROR_FAIL;
3331 }
3332
3333 if (dmi_op(target, &sbcs, &dmi_busy, DMI_OP_READ,
3334 DM_SBCS, 0, false, true) != ERROR_OK)
3335 return ERROR_FAIL;
3336 }
3337
3338 if (get_field(sbcs, DM_SBCS_SBBUSYERROR)) {
3339 /* We wrote while the target was busy. Slow down and try again. */
3340 dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR);
3341 info->bus_master_write_delay += info->bus_master_write_delay / 10 + 1;
3342 }
3343
3344 if (get_field(sbcs, DM_SBCS_SBBUSYERROR) || dmi_busy_encountered) {
3345 next_address = sb_read_address(target);
3346 if (next_address < address) {
3347 /* This should never happen, probably buggy hardware. */
3348 LOG_DEBUG("unexpected system bus address 0x%" TARGET_PRIxADDR,
3349 next_address);
3350 return ERROR_FAIL;
3351 }
3352
3353 continue;
3354 }
3355
3356 unsigned error = get_field(sbcs, DM_SBCS_SBERROR);
3357 if (error != 0) {
3358 /* Some error indicating the bus access failed, but not because of
3359 * something we did wrong. */
3360 dmi_write(target, DM_SBCS, DM_SBCS_SBERROR);
3361 return ERROR_FAIL;
3362 }
3363 }
3364