b666f524622d110ede58d82469b2dd1b95c63fe2
[openocd.git] / src / target / riscv / riscv-013.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /*
4 * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
5 * latest draft.
6 */
7
8 #include <assert.h>
9 #include <stdlib.h>
10 #include <time.h>
11
12 #ifdef HAVE_CONFIG_H
13 #include "config.h"
14 #endif
15
16 #include "target/target.h"
17 #include "target/algorithm.h"
18 #include "target/target_type.h"
19 #include <helper/log.h>
20 #include "jtag/jtag.h"
21 #include "target/register.h"
22 #include "target/breakpoints.h"
23 #include "helper/time_support.h"
24 #include "helper/list.h"
25 #include "riscv.h"
26 #include "debug_defines.h"
27 #include "rtos/rtos.h"
28 #include "program.h"
29 #include "asm.h"
30 #include "batch.h"
31
32 static int riscv013_on_step_or_resume(struct target *target, bool step);
33 static int riscv013_step_or_resume_current_hart(struct target *target,
34 bool step, bool use_hasel);
35 static void riscv013_clear_abstract_error(struct target *target);
36
37 /* Implementations of the functions in struct riscv_info. */
38 static int riscv013_get_register(struct target *target,
39 riscv_reg_t *value, int rid);
40 static int riscv013_set_register(struct target *target, int regid, uint64_t value);
41 static int riscv013_select_current_hart(struct target *target);
42 static int riscv013_halt_prep(struct target *target);
43 static int riscv013_halt_go(struct target *target);
44 static int riscv013_resume_go(struct target *target);
45 static int riscv013_step_current_hart(struct target *target);
46 static int riscv013_on_halt(struct target *target);
47 static int riscv013_on_step(struct target *target);
48 static int riscv013_resume_prep(struct target *target);
49 static bool riscv013_is_halted(struct target *target);
50 static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
51 static int riscv013_write_debug_buffer(struct target *target, unsigned index,
52 riscv_insn_t d);
53 static riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned
54 index);
55 static int riscv013_execute_debug_buffer(struct target *target);
56 static void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
57 static void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a);
58 static int riscv013_dmi_write_u64_bits(struct target *target);
59 static void riscv013_fill_dmi_nop_u64(struct target *target, char *buf);
60 static int register_read(struct target *target, uint64_t *value, uint32_t number);
61 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number);
62 static int register_write_direct(struct target *target, unsigned number,
63 uint64_t value);
64 static int read_memory(struct target *target, target_addr_t address,
65 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment);
66 static int write_memory(struct target *target, target_addr_t address,
67 uint32_t size, uint32_t count, const uint8_t *buffer);
68 static int riscv013_test_sba_config_reg(struct target *target, target_addr_t legal_address,
69 uint32_t num_words, target_addr_t illegal_address, bool run_sbbusyerror_test);
70 void write_memory_sba_simple(struct target *target, target_addr_t addr, uint32_t *write_data,
71 uint32_t write_size, uint32_t sbcs);
72 void read_memory_sba_simple(struct target *target, target_addr_t addr,
73 uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs);
74
75 /**
76 * Since almost everything can be accomplish by scanning the dbus register, all
77 * functions here assume dbus is already selected. The exception are functions
78 * called directly by OpenOCD, which can't assume anything about what's
79 * currently in IR. They should set IR to dbus explicitly.
80 */
81
82 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
83 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
84
85 #define CSR_DCSR_CAUSE_SWBP 1
86 #define CSR_DCSR_CAUSE_TRIGGER 2
87 #define CSR_DCSR_CAUSE_DEBUGINT 3
88 #define CSR_DCSR_CAUSE_STEP 4
89 #define CSR_DCSR_CAUSE_HALT 5
90 #define CSR_DCSR_CAUSE_GROUP 6
91
92 #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
93
94 /*** JTAG registers. ***/
95
96 typedef enum {
97 DMI_OP_NOP = 0,
98 DMI_OP_READ = 1,
99 DMI_OP_WRITE = 2
100 } dmi_op_t;
101 typedef enum {
102 DMI_STATUS_SUCCESS = 0,
103 DMI_STATUS_FAILED = 2,
104 DMI_STATUS_BUSY = 3
105 } dmi_status_t;
106
107 typedef enum slot {
108 SLOT0,
109 SLOT1,
110 SLOT_LAST,
111 } slot_t;
112
113 /*** Debug Bus registers. ***/
114
115 #define CMDERR_NONE 0
116 #define CMDERR_BUSY 1
117 #define CMDERR_NOT_SUPPORTED 2
118 #define CMDERR_EXCEPTION 3
119 #define CMDERR_HALT_RESUME 4
120 #define CMDERR_OTHER 7
121
122 /*** Info about the core being debugged. ***/
123
124 struct trigger {
125 uint64_t address;
126 uint32_t length;
127 uint64_t mask;
128 uint64_t value;
129 bool read, write, execute;
130 int unique_id;
131 };
132
133 typedef enum {
134 YNM_MAYBE,
135 YNM_YES,
136 YNM_NO
137 } yes_no_maybe_t;
138
139 typedef struct {
140 struct list_head list;
141 int abs_chain_position;
142
143 /* The number of harts connected to this DM. */
144 int hart_count;
145 /* Indicates we already reset this DM, so don't need to do it again. */
146 bool was_reset;
147 /* Targets that are connected to this DM. */
148 struct list_head target_list;
149 /* The currently selected hartid on this DM. */
150 int current_hartid;
151 bool hasel_supported;
152
153 /* The program buffer stores executable code. 0 is an illegal instruction,
154 * so we use 0 to mean the cached value is invalid. */
155 uint32_t progbuf_cache[16];
156 } dm013_info_t;
157
158 typedef struct {
159 struct list_head list;
160 struct target *target;
161 } target_list_t;
162
163 typedef struct {
164 /* The indexed used to address this hart in its DM. */
165 unsigned index;
166 /* Number of address bits in the dbus register. */
167 unsigned abits;
168 /* Number of abstract command data registers. */
169 unsigned datacount;
170 /* Number of words in the Program Buffer. */
171 unsigned progbufsize;
172
173 /* We cache the read-only bits of sbcs here. */
174 uint32_t sbcs;
175
176 yes_no_maybe_t progbuf_writable;
177 /* We only need the address so that we know the alignment of the buffer. */
178 riscv_addr_t progbuf_address;
179
180 /* Number of run-test/idle cycles the target requests we do after each dbus
181 * access. */
182 unsigned int dtmcs_idle;
183
184 /* This value is incremented every time a dbus access comes back as "busy".
185 * It's used to determine how many run-test/idle cycles to feed the target
186 * in between accesses. */
187 unsigned int dmi_busy_delay;
188
189 /* Number of run-test/idle cycles to add between consecutive bus master
190 * reads/writes respectively. */
191 unsigned int bus_master_write_delay, bus_master_read_delay;
192
193 /* This value is increased every time we tried to execute two commands
194 * consecutively, and the second one failed because the previous hadn't
195 * completed yet. It's used to add extra run-test/idle cycles after
196 * starting a command, so we don't have to waste time checking for busy to
197 * go low. */
198 unsigned int ac_busy_delay;
199
200 bool abstract_read_csr_supported;
201 bool abstract_write_csr_supported;
202 bool abstract_read_fpr_supported;
203 bool abstract_write_fpr_supported;
204
205 yes_no_maybe_t has_aampostincrement;
206
207 /* When a function returns some error due to a failure indicated by the
208 * target in cmderr, the caller can look here to see what that error was.
209 * (Compare with errno.) */
210 uint8_t cmderr;
211
212 /* Some fields from hartinfo. */
213 uint8_t datasize;
214 uint8_t dataaccess;
215 int16_t dataaddr;
216
217 /* The width of the hartsel field. */
218 unsigned hartsellen;
219
220 /* DM that provides access to this target. */
221 dm013_info_t *dm;
222 } riscv013_info_t;
223
224 LIST_HEAD(dm_list);
225
226 static riscv013_info_t *get_info(const struct target *target)
227 {
228 struct riscv_info *info = target->arch_info;
229 assert(info);
230 assert(info->version_specific);
231 return info->version_specific;
232 }
233
234 /**
235 * Return the DM structure for this target. If there isn't one, find it in the
236 * global list of DMs. If it's not in there, then create one and initialize it
237 * to 0.
238 */
239 dm013_info_t *get_dm(struct target *target)
240 {
241 RISCV013_INFO(info);
242 if (info->dm)
243 return info->dm;
244
245 int abs_chain_position = target->tap->abs_chain_position;
246
247 dm013_info_t *entry;
248 dm013_info_t *dm = NULL;
249 list_for_each_entry(entry, &dm_list, list) {
250 if (entry->abs_chain_position == abs_chain_position) {
251 dm = entry;
252 break;
253 }
254 }
255
256 if (!dm) {
257 LOG_DEBUG("[%d] Allocating new DM", target->coreid);
258 dm = calloc(1, sizeof(dm013_info_t));
259 if (!dm)
260 return NULL;
261 dm->abs_chain_position = abs_chain_position;
262 dm->current_hartid = -1;
263 dm->hart_count = -1;
264 INIT_LIST_HEAD(&dm->target_list);
265 list_add(&dm->list, &dm_list);
266 }
267
268 info->dm = dm;
269 target_list_t *target_entry;
270 list_for_each_entry(target_entry, &dm->target_list, list) {
271 if (target_entry->target == target)
272 return dm;
273 }
274 target_entry = calloc(1, sizeof(*target_entry));
275 if (!target_entry) {
276 info->dm = NULL;
277 return NULL;
278 }
279 target_entry->target = target;
280 list_add(&target_entry->list, &dm->target_list);
281
282 return dm;
283 }
284
285 static uint32_t set_hartsel(uint32_t initial, uint32_t index)
286 {
287 initial &= ~DM_DMCONTROL_HARTSELLO;
288 initial &= ~DM_DMCONTROL_HARTSELHI;
289
290 uint32_t index_lo = index & ((1 << DM_DMCONTROL_HARTSELLO_LENGTH) - 1);
291 initial |= index_lo << DM_DMCONTROL_HARTSELLO_OFFSET;
292 uint32_t index_hi = index >> DM_DMCONTROL_HARTSELLO_LENGTH;
293 assert(index_hi < 1 << DM_DMCONTROL_HARTSELHI_LENGTH);
294 initial |= index_hi << DM_DMCONTROL_HARTSELHI_OFFSET;
295
296 return initial;
297 }
298
299 static void decode_dmi(char *text, unsigned address, unsigned data)
300 {
301 static const struct {
302 unsigned address;
303 uint64_t mask;
304 const char *name;
305 } description[] = {
306 { DM_DMCONTROL, DM_DMCONTROL_HALTREQ, "haltreq" },
307 { DM_DMCONTROL, DM_DMCONTROL_RESUMEREQ, "resumereq" },
308 { DM_DMCONTROL, DM_DMCONTROL_HARTRESET, "hartreset" },
309 { DM_DMCONTROL, DM_DMCONTROL_HASEL, "hasel" },
310 { DM_DMCONTROL, DM_DMCONTROL_HARTSELHI, "hartselhi" },
311 { DM_DMCONTROL, DM_DMCONTROL_HARTSELLO, "hartsello" },
312 { DM_DMCONTROL, DM_DMCONTROL_NDMRESET, "ndmreset" },
313 { DM_DMCONTROL, DM_DMCONTROL_DMACTIVE, "dmactive" },
314 { DM_DMCONTROL, DM_DMCONTROL_ACKHAVERESET, "ackhavereset" },
315
316 { DM_DMSTATUS, DM_DMSTATUS_IMPEBREAK, "impebreak" },
317 { DM_DMSTATUS, DM_DMSTATUS_ALLHAVERESET, "allhavereset" },
318 { DM_DMSTATUS, DM_DMSTATUS_ANYHAVERESET, "anyhavereset" },
319 { DM_DMSTATUS, DM_DMSTATUS_ALLRESUMEACK, "allresumeack" },
320 { DM_DMSTATUS, DM_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
321 { DM_DMSTATUS, DM_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
322 { DM_DMSTATUS, DM_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
323 { DM_DMSTATUS, DM_DMSTATUS_ALLUNAVAIL, "allunavail" },
324 { DM_DMSTATUS, DM_DMSTATUS_ANYUNAVAIL, "anyunavail" },
325 { DM_DMSTATUS, DM_DMSTATUS_ALLRUNNING, "allrunning" },
326 { DM_DMSTATUS, DM_DMSTATUS_ANYRUNNING, "anyrunning" },
327 { DM_DMSTATUS, DM_DMSTATUS_ALLHALTED, "allhalted" },
328 { DM_DMSTATUS, DM_DMSTATUS_ANYHALTED, "anyhalted" },
329 { DM_DMSTATUS, DM_DMSTATUS_AUTHENTICATED, "authenticated" },
330 { DM_DMSTATUS, DM_DMSTATUS_AUTHBUSY, "authbusy" },
331 { DM_DMSTATUS, DM_DMSTATUS_HASRESETHALTREQ, "hasresethaltreq" },
332 { DM_DMSTATUS, DM_DMSTATUS_CONFSTRPTRVALID, "confstrptrvalid" },
333 { DM_DMSTATUS, DM_DMSTATUS_VERSION, "version" },
334
335 { DM_ABSTRACTCS, DM_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
336 { DM_ABSTRACTCS, DM_ABSTRACTCS_BUSY, "busy" },
337 { DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR, "cmderr" },
338 { DM_ABSTRACTCS, DM_ABSTRACTCS_DATACOUNT, "datacount" },
339
340 { DM_COMMAND, DM_COMMAND_CMDTYPE, "cmdtype" },
341
342 { DM_SBCS, DM_SBCS_SBVERSION, "sbversion" },
343 { DM_SBCS, DM_SBCS_SBBUSYERROR, "sbbusyerror" },
344 { DM_SBCS, DM_SBCS_SBBUSY, "sbbusy" },
345 { DM_SBCS, DM_SBCS_SBREADONADDR, "sbreadonaddr" },
346 { DM_SBCS, DM_SBCS_SBACCESS, "sbaccess" },
347 { DM_SBCS, DM_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
348 { DM_SBCS, DM_SBCS_SBREADONDATA, "sbreadondata" },
349 { DM_SBCS, DM_SBCS_SBERROR, "sberror" },
350 { DM_SBCS, DM_SBCS_SBASIZE, "sbasize" },
351 { DM_SBCS, DM_SBCS_SBACCESS128, "sbaccess128" },
352 { DM_SBCS, DM_SBCS_SBACCESS64, "sbaccess64" },
353 { DM_SBCS, DM_SBCS_SBACCESS32, "sbaccess32" },
354 { DM_SBCS, DM_SBCS_SBACCESS16, "sbaccess16" },
355 { DM_SBCS, DM_SBCS_SBACCESS8, "sbaccess8" },
356 };
357
358 text[0] = 0;
359 for (unsigned i = 0; i < ARRAY_SIZE(description); i++) {
360 if (description[i].address == address) {
361 uint64_t mask = description[i].mask;
362 unsigned value = get_field(data, mask);
363 if (value) {
364 if (i > 0)
365 *(text++) = ' ';
366 if (mask & (mask >> 1)) {
367 /* If the field is more than 1 bit wide. */
368 sprintf(text, "%s=%d", description[i].name, value);
369 } else {
370 strcpy(text, description[i].name);
371 }
372 text += strlen(text);
373 }
374 }
375 }
376 }
377
378 static void dump_field(int idle, const struct scan_field *field)
379 {
380 static const char * const op_string[] = {"-", "r", "w", "?"};
381 static const char * const status_string[] = {"+", "?", "F", "b"};
382
383 if (debug_level < LOG_LVL_DEBUG)
384 return;
385
386 uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
387 unsigned int out_op = get_field(out, DTM_DMI_OP);
388 unsigned int out_data = get_field(out, DTM_DMI_DATA);
389 unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
390
391 uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
392 unsigned int in_op = get_field(in, DTM_DMI_OP);
393 unsigned int in_data = get_field(in, DTM_DMI_DATA);
394 unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
395
396 log_printf_lf(LOG_LVL_DEBUG,
397 __FILE__, __LINE__, "scan",
398 "%db %s %08x @%02x -> %s %08x @%02x; %di",
399 field->num_bits, op_string[out_op], out_data, out_address,
400 status_string[in_op], in_data, in_address, idle);
401
402 char out_text[500];
403 char in_text[500];
404 decode_dmi(out_text, out_address, out_data);
405 decode_dmi(in_text, in_address, in_data);
406 if (in_text[0] || out_text[0]) {
407 log_printf_lf(LOG_LVL_DEBUG, __FILE__, __LINE__, "scan", "%s -> %s",
408 out_text, in_text);
409 }
410 }
411
412 /*** Utility functions. ***/
413
414 static void select_dmi(struct target *target)
415 {
416 if (bscan_tunnel_ir_width != 0) {
417 select_dmi_via_bscan(target);
418 return;
419 }
420 jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
421 }
422
423 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
424 {
425 struct scan_field field;
426 uint8_t in_value[4];
427 uint8_t out_value[4] = { 0 };
428
429 if (bscan_tunnel_ir_width != 0)
430 return dtmcontrol_scan_via_bscan(target, out);
431
432 buf_set_u32(out_value, 0, 32, out);
433
434 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
435
436 field.num_bits = 32;
437 field.out_value = out_value;
438 field.in_value = in_value;
439 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
440
441 /* Always return to dmi. */
442 select_dmi(target);
443
444 int retval = jtag_execute_queue();
445 if (retval != ERROR_OK) {
446 LOG_ERROR("failed jtag scan: %d", retval);
447 return retval;
448 }
449
450 uint32_t in = buf_get_u32(field.in_value, 0, 32);
451 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
452
453 return in;
454 }
455
456 static void increase_dmi_busy_delay(struct target *target)
457 {
458 riscv013_info_t *info = get_info(target);
459 info->dmi_busy_delay += info->dmi_busy_delay / 10 + 1;
460 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
461 info->dtmcs_idle, info->dmi_busy_delay,
462 info->ac_busy_delay);
463
464 dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
465 }
466
467 /**
468 * exec: If this is set, assume the scan results in an execution, so more
469 * run-test/idle cycles may be required.
470 */
471 static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
472 uint32_t *data_in, dmi_op_t op, uint32_t address_out, uint32_t data_out,
473 bool exec)
474 {
475 riscv013_info_t *info = get_info(target);
476 RISCV_INFO(r);
477 unsigned num_bits = info->abits + DTM_DMI_OP_LENGTH + DTM_DMI_DATA_LENGTH;
478 size_t num_bytes = (num_bits + 7) / 8;
479 uint8_t in[num_bytes];
480 uint8_t out[num_bytes];
481 struct scan_field field = {
482 .num_bits = num_bits,
483 .out_value = out,
484 .in_value = in
485 };
486 riscv_bscan_tunneled_scan_context_t bscan_ctxt;
487
488 if (r->reset_delays_wait >= 0) {
489 r->reset_delays_wait--;
490 if (r->reset_delays_wait < 0) {
491 info->dmi_busy_delay = 0;
492 info->ac_busy_delay = 0;
493 }
494 }
495
496 memset(in, 0, num_bytes);
497 memset(out, 0, num_bytes);
498
499 assert(info->abits != 0);
500
501 buf_set_u32(out, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, op);
502 buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
503 buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
504
505 /* I wanted to place this code in a different function, but the way JTAG command
506 queueing works in the jtag handling functions, the scan fields either have to be
507 heap allocated, global/static, or else they need to stay on the stack until
508 the jtag_execute_queue() call. Heap or static fields in this case doesn't seem
509 the best fit. Declaring stack based field values in a subsidiary function call wouldn't
510 work. */
511 if (bscan_tunnel_ir_width != 0) {
512 riscv_add_bscan_tunneled_scan(target, &field, &bscan_ctxt);
513 } else {
514 /* Assume dbus is already selected. */
515 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
516 }
517
518 int idle_count = info->dmi_busy_delay;
519 if (exec)
520 idle_count += info->ac_busy_delay;
521
522 if (idle_count)
523 jtag_add_runtest(idle_count, TAP_IDLE);
524
525 int retval = jtag_execute_queue();
526 if (retval != ERROR_OK) {
527 LOG_ERROR("dmi_scan failed jtag scan");
528 if (data_in)
529 *data_in = ~0;
530 return DMI_STATUS_FAILED;
531 }
532
533 if (bscan_tunnel_ir_width != 0) {
534 /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
535 buffer_shr(in, num_bytes, 1);
536 }
537
538 if (data_in)
539 *data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
540
541 if (address_in)
542 *address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
543 dump_field(idle_count, &field);
544 return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
545 }
546
547 /**
548 * @param target
549 * @param data_in The data we received from the target.
550 * @param dmi_busy_encountered
551 * If non-NULL, will be updated to reflect whether DMI busy was
552 * encountered while executing this operation or not.
553 * @param dmi_op The operation to perform (read/write/nop).
554 * @param address The address argument to that operation.
555 * @param data_out The data to send to the target.
556 * @param timeout_sec
557 * @param exec When true, this scan will execute something, so extra RTI
558 * cycles may be added.
559 * @param ensure_success
560 * Scan a nop after the requested operation, ensuring the
561 * DMI operation succeeded.
562 */
563 static int dmi_op_timeout(struct target *target, uint32_t *data_in,
564 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
565 uint32_t data_out, int timeout_sec, bool exec, bool ensure_success)
566 {
567 select_dmi(target);
568
569 dmi_status_t status;
570 uint32_t address_in;
571
572 if (dmi_busy_encountered)
573 *dmi_busy_encountered = false;
574
575 const char *op_name;
576 switch (dmi_op) {
577 case DMI_OP_NOP:
578 op_name = "nop";
579 break;
580 case DMI_OP_READ:
581 op_name = "read";
582 break;
583 case DMI_OP_WRITE:
584 op_name = "write";
585 break;
586 default:
587 LOG_ERROR("Invalid DMI operation: %d", dmi_op);
588 return ERROR_FAIL;
589 }
590
591 keep_alive();
592
593 time_t start = time(NULL);
594 /* This first loop performs the request. Note that if for some reason this
595 * stays busy, it is actually due to the previous access. */
596 while (1) {
597 status = dmi_scan(target, NULL, NULL, dmi_op, address, data_out,
598 exec);
599 if (status == DMI_STATUS_BUSY) {
600 increase_dmi_busy_delay(target);
601 if (dmi_busy_encountered)
602 *dmi_busy_encountered = true;
603 } else if (status == DMI_STATUS_SUCCESS) {
604 break;
605 } else {
606 LOG_ERROR("failed %s at 0x%x, status=%d", op_name, address, status);
607 return ERROR_FAIL;
608 }
609 if (time(NULL) - start > timeout_sec)
610 return ERROR_TIMEOUT_REACHED;
611 }
612
613 if (status != DMI_STATUS_SUCCESS) {
614 LOG_ERROR("Failed %s at 0x%x; status=%d", op_name, address, status);
615 return ERROR_FAIL;
616 }
617
618 if (ensure_success) {
619 /* This second loop ensures the request succeeded, and gets back data.
620 * Note that NOP can result in a 'busy' result as well, but that would be
621 * noticed on the next DMI access we do. */
622 while (1) {
623 status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
624 false);
625 if (status == DMI_STATUS_BUSY) {
626 increase_dmi_busy_delay(target);
627 if (dmi_busy_encountered)
628 *dmi_busy_encountered = true;
629 } else if (status == DMI_STATUS_SUCCESS) {
630 break;
631 } else {
632 if (data_in) {
633 LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
634 op_name, address, *data_in, status);
635 } else {
636 LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
637 status);
638 }
639 return ERROR_FAIL;
640 }
641 if (time(NULL) - start > timeout_sec)
642 return ERROR_TIMEOUT_REACHED;
643 }
644 }
645
646 return ERROR_OK;
647 }
648
649 static int dmi_op(struct target *target, uint32_t *data_in,
650 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
651 uint32_t data_out, bool exec, bool ensure_success)
652 {
653 int result = dmi_op_timeout(target, data_in, dmi_busy_encountered, dmi_op,
654 address, data_out, riscv_command_timeout_sec, exec, ensure_success);
655 if (result == ERROR_TIMEOUT_REACHED) {
656 LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
657 "either really slow or broken. You could increase the "
658 "timeout with riscv set_command_timeout_sec.",
659 riscv_command_timeout_sec);
660 return ERROR_FAIL;
661 }
662 return result;
663 }
664
665 static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
666 {
667 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, false, true);
668 }
669
670 static int dmi_read_exec(struct target *target, uint32_t *value, uint32_t address)
671 {
672 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, true, true);
673 }
674
675 static int dmi_write(struct target *target, uint32_t address, uint32_t value)
676 {
677 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false, true);
678 }
679
680 static int dmi_write_exec(struct target *target, uint32_t address,
681 uint32_t value, bool ensure_success)
682 {
683 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true, ensure_success);
684 }
685
686 int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
687 bool authenticated, unsigned timeout_sec)
688 {
689 int result = dmi_op_timeout(target, dmstatus, NULL, DMI_OP_READ,
690 DM_DMSTATUS, 0, timeout_sec, false, true);
691 if (result != ERROR_OK)
692 return result;
693 int dmstatus_version = get_field(*dmstatus, DM_DMSTATUS_VERSION);
694 if (dmstatus_version != 2 && dmstatus_version != 3) {
695 LOG_ERROR("OpenOCD only supports Debug Module version 2 (0.13) and 3 (1.0), not "
696 "%d (dmstatus=0x%x). This error might be caused by a JTAG "
697 "signal issue. Try reducing the JTAG clock speed.",
698 get_field(*dmstatus, DM_DMSTATUS_VERSION), *dmstatus);
699 } else if (authenticated && !get_field(*dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
700 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
701 "(dmstatus=0x%x). Use `riscv authdata_read` and "
702 "`riscv authdata_write` commands to authenticate.", *dmstatus);
703 return ERROR_FAIL;
704 }
705 return ERROR_OK;
706 }
707
708 int dmstatus_read(struct target *target, uint32_t *dmstatus,
709 bool authenticated)
710 {
711 return dmstatus_read_timeout(target, dmstatus, authenticated,
712 riscv_command_timeout_sec);
713 }
714
715 static void increase_ac_busy_delay(struct target *target)
716 {
717 riscv013_info_t *info = get_info(target);
718 info->ac_busy_delay += info->ac_busy_delay / 10 + 1;
719 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
720 info->dtmcs_idle, info->dmi_busy_delay,
721 info->ac_busy_delay);
722 }
723
724 uint32_t abstract_register_size(unsigned width)
725 {
726 switch (width) {
727 case 32:
728 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 2);
729 case 64:
730 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 3);
731 case 128:
732 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 4);
733 default:
734 LOG_ERROR("Unsupported register width: %d", width);
735 return 0;
736 }
737 }
738
739 static int wait_for_idle(struct target *target, uint32_t *abstractcs)
740 {
741 RISCV013_INFO(info);
742 time_t start = time(NULL);
743 while (1) {
744 if (dmi_read(target, abstractcs, DM_ABSTRACTCS) != ERROR_OK)
745 return ERROR_FAIL;
746
747 if (get_field(*abstractcs, DM_ABSTRACTCS_BUSY) == 0)
748 return ERROR_OK;
749
750 if (time(NULL) - start > riscv_command_timeout_sec) {
751 info->cmderr = get_field(*abstractcs, DM_ABSTRACTCS_CMDERR);
752 if (info->cmderr != CMDERR_NONE) {
753 const char *errors[8] = {
754 "none",
755 "busy",
756 "not supported",
757 "exception",
758 "halt/resume",
759 "reserved",
760 "reserved",
761 "other" };
762
763 LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
764 errors[info->cmderr], *abstractcs);
765 }
766
767 LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
768 "Increase the timeout with riscv set_command_timeout_sec.",
769 riscv_command_timeout_sec,
770 *abstractcs);
771 return ERROR_FAIL;
772 }
773 }
774 }
775
776 static int execute_abstract_command(struct target *target, uint32_t command)
777 {
778 RISCV013_INFO(info);
779 if (debug_level >= LOG_LVL_DEBUG) {
780 switch (get_field(command, DM_COMMAND_CMDTYPE)) {
781 case 0:
782 LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
783 "transfer=%d, write=%d, regno=0x%x",
784 command,
785 8 << get_field(command, AC_ACCESS_REGISTER_AARSIZE),
786 get_field(command, AC_ACCESS_REGISTER_POSTEXEC),
787 get_field(command, AC_ACCESS_REGISTER_TRANSFER),
788 get_field(command, AC_ACCESS_REGISTER_WRITE),
789 get_field(command, AC_ACCESS_REGISTER_REGNO));
790 break;
791 default:
792 LOG_DEBUG("command=0x%x", command);
793 break;
794 }
795 }
796
797 if (dmi_write_exec(target, DM_COMMAND, command, false) != ERROR_OK)
798 return ERROR_FAIL;
799
800 uint32_t abstractcs = 0;
801 int result = wait_for_idle(target, &abstractcs);
802
803 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
804 if (info->cmderr != 0 || result != ERROR_OK) {
805 LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
806 /* Clear the error. */
807 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
808 return ERROR_FAIL;
809 }
810
811 return ERROR_OK;
812 }
813
814 static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
815 unsigned size_bits)
816 {
817 riscv_reg_t value = 0;
818 uint32_t v;
819 unsigned offset = index * size_bits / 32;
820 switch (size_bits) {
821 default:
822 LOG_ERROR("Unsupported size: %d bits", size_bits);
823 return ~0;
824 case 64:
825 dmi_read(target, &v, DM_DATA0 + offset + 1);
826 value |= ((uint64_t) v) << 32;
827 /* falls through */
828 case 32:
829 dmi_read(target, &v, DM_DATA0 + offset);
830 value |= v;
831 }
832 return value;
833 }
834
835 static int write_abstract_arg(struct target *target, unsigned index,
836 riscv_reg_t value, unsigned size_bits)
837 {
838 unsigned offset = index * size_bits / 32;
839 switch (size_bits) {
840 default:
841 LOG_ERROR("Unsupported size: %d bits", size_bits);
842 return ERROR_FAIL;
843 case 64:
844 dmi_write(target, DM_DATA0 + offset + 1, value >> 32);
845 /* falls through */
846 case 32:
847 dmi_write(target, DM_DATA0 + offset, value);
848 }
849 return ERROR_OK;
850 }
851
852 /**
853 * @par size in bits
854 */
855 static uint32_t access_register_command(struct target *target, uint32_t number,
856 unsigned size, uint32_t flags)
857 {
858 uint32_t command = set_field(0, DM_COMMAND_CMDTYPE, 0);
859 switch (size) {
860 case 32:
861 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 2);
862 break;
863 case 64:
864 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 3);
865 break;
866 default:
867 LOG_ERROR("%d-bit register %s not supported.", size,
868 gdb_regno_name(number));
869 assert(0);
870 }
871
872 if (number <= GDB_REGNO_XPR31) {
873 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
874 0x1000 + number - GDB_REGNO_ZERO);
875 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
876 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
877 0x1020 + number - GDB_REGNO_FPR0);
878 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
879 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
880 number - GDB_REGNO_CSR0);
881 } else if (number >= GDB_REGNO_COUNT) {
882 /* Custom register. */
883 assert(target->reg_cache->reg_list[number].arch_info);
884 riscv_reg_info_t *reg_info = target->reg_cache->reg_list[number].arch_info;
885 assert(reg_info);
886 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
887 0xc000 + reg_info->custom_number);
888 } else {
889 assert(0);
890 }
891
892 command |= flags;
893
894 return command;
895 }
896
897 static int register_read_abstract(struct target *target, uint64_t *value,
898 uint32_t number, unsigned size)
899 {
900 RISCV013_INFO(info);
901
902 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
903 !info->abstract_read_fpr_supported)
904 return ERROR_FAIL;
905 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
906 !info->abstract_read_csr_supported)
907 return ERROR_FAIL;
908 /* The spec doesn't define abstract register numbers for vector registers. */
909 if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31)
910 return ERROR_FAIL;
911
912 uint32_t command = access_register_command(target, number, size,
913 AC_ACCESS_REGISTER_TRANSFER);
914
915 int result = execute_abstract_command(target, command);
916 if (result != ERROR_OK) {
917 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
918 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
919 info->abstract_read_fpr_supported = false;
920 LOG_INFO("Disabling abstract command reads from FPRs.");
921 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
922 info->abstract_read_csr_supported = false;
923 LOG_INFO("Disabling abstract command reads from CSRs.");
924 }
925 }
926 return result;
927 }
928
929 if (value)
930 *value = read_abstract_arg(target, 0, size);
931
932 return ERROR_OK;
933 }
934
935 static int register_write_abstract(struct target *target, uint32_t number,
936 uint64_t value, unsigned size)
937 {
938 RISCV013_INFO(info);
939
940 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
941 !info->abstract_write_fpr_supported)
942 return ERROR_FAIL;
943 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
944 !info->abstract_write_csr_supported)
945 return ERROR_FAIL;
946
947 uint32_t command = access_register_command(target, number, size,
948 AC_ACCESS_REGISTER_TRANSFER |
949 AC_ACCESS_REGISTER_WRITE);
950
951 if (write_abstract_arg(target, 0, value, size) != ERROR_OK)
952 return ERROR_FAIL;
953
954 int result = execute_abstract_command(target, command);
955 if (result != ERROR_OK) {
956 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
957 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
958 info->abstract_write_fpr_supported = false;
959 LOG_INFO("Disabling abstract command writes to FPRs.");
960 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
961 info->abstract_write_csr_supported = false;
962 LOG_INFO("Disabling abstract command writes to CSRs.");
963 }
964 }
965 return result;
966 }
967
968 return ERROR_OK;
969 }
970
971 /*
972 * Sets the AAMSIZE field of a memory access abstract command based on
973 * the width (bits).
974 */
975 static uint32_t abstract_memory_size(unsigned width)
976 {
977 switch (width) {
978 case 8:
979 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 0);
980 case 16:
981 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 1);
982 case 32:
983 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 2);
984 case 64:
985 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 3);
986 case 128:
987 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 4);
988 default:
989 LOG_ERROR("Unsupported memory width: %d", width);
990 return 0;
991 }
992 }
993
994 /*
995 * Creates a memory access abstract command.
996 */
997 static uint32_t access_memory_command(struct target *target, bool virtual,
998 unsigned width, bool postincrement, bool write)
999 {
1000 uint32_t command = set_field(0, AC_ACCESS_MEMORY_CMDTYPE, 2);
1001 command = set_field(command, AC_ACCESS_MEMORY_AAMVIRTUAL, virtual);
1002 command |= abstract_memory_size(width);
1003 command = set_field(command, AC_ACCESS_MEMORY_AAMPOSTINCREMENT,
1004 postincrement);
1005 command = set_field(command, AC_ACCESS_MEMORY_WRITE, write);
1006
1007 return command;
1008 }
1009
1010 static int examine_progbuf(struct target *target)
1011 {
1012 riscv013_info_t *info = get_info(target);
1013
1014 if (info->progbuf_writable != YNM_MAYBE)
1015 return ERROR_OK;
1016
1017 /* Figure out if progbuf is writable. */
1018
1019 if (info->progbufsize < 1) {
1020 info->progbuf_writable = YNM_NO;
1021 LOG_INFO("No program buffer present.");
1022 return ERROR_OK;
1023 }
1024
1025 uint64_t s0;
1026 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1027 return ERROR_FAIL;
1028
1029 struct riscv_program program;
1030 riscv_program_init(&program, target);
1031 riscv_program_insert(&program, auipc(S0));
1032 if (riscv_program_exec(&program, target) != ERROR_OK)
1033 return ERROR_FAIL;
1034
1035 if (register_read_direct(target, &info->progbuf_address, GDB_REGNO_S0) != ERROR_OK)
1036 return ERROR_FAIL;
1037
1038 riscv_program_init(&program, target);
1039 riscv_program_insert(&program, sw(S0, S0, 0));
1040 int result = riscv_program_exec(&program, target);
1041
1042 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1043 return ERROR_FAIL;
1044
1045 if (result != ERROR_OK) {
1046 /* This program might have failed if the program buffer is not
1047 * writable. */
1048 info->progbuf_writable = YNM_NO;
1049 return ERROR_OK;
1050 }
1051
1052 uint32_t written;
1053 if (dmi_read(target, &written, DM_PROGBUF0) != ERROR_OK)
1054 return ERROR_FAIL;
1055 if (written == (uint32_t) info->progbuf_address) {
1056 LOG_INFO("progbuf is writable at 0x%" PRIx64,
1057 info->progbuf_address);
1058 info->progbuf_writable = YNM_YES;
1059
1060 } else {
1061 LOG_INFO("progbuf is not writeable at 0x%" PRIx64,
1062 info->progbuf_address);
1063 info->progbuf_writable = YNM_NO;
1064 }
1065
1066 return ERROR_OK;
1067 }
1068
1069 static int is_fpu_reg(uint32_t gdb_regno)
1070 {
1071 return (gdb_regno >= GDB_REGNO_FPR0 && gdb_regno <= GDB_REGNO_FPR31) ||
1072 (gdb_regno == GDB_REGNO_CSR0 + CSR_FFLAGS) ||
1073 (gdb_regno == GDB_REGNO_CSR0 + CSR_FRM) ||
1074 (gdb_regno == GDB_REGNO_CSR0 + CSR_FCSR);
1075 }
1076
1077 static int is_vector_reg(uint32_t gdb_regno)
1078 {
1079 return (gdb_regno >= GDB_REGNO_V0 && gdb_regno <= GDB_REGNO_V31) ||
1080 gdb_regno == GDB_REGNO_VSTART ||
1081 gdb_regno == GDB_REGNO_VXSAT ||
1082 gdb_regno == GDB_REGNO_VXRM ||
1083 gdb_regno == GDB_REGNO_VL ||
1084 gdb_regno == GDB_REGNO_VTYPE ||
1085 gdb_regno == GDB_REGNO_VLENB;
1086 }
1087
1088 static int prep_for_register_access(struct target *target, uint64_t *mstatus,
1089 int regno)
1090 {
1091 if (is_fpu_reg(regno) || is_vector_reg(regno)) {
1092 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
1093 return ERROR_FAIL;
1094 if (is_fpu_reg(regno) && (*mstatus & MSTATUS_FS) == 0) {
1095 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1096 set_field(*mstatus, MSTATUS_FS, 1)) != ERROR_OK)
1097 return ERROR_FAIL;
1098 } else if (is_vector_reg(regno) && (*mstatus & MSTATUS_VS) == 0) {
1099 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1100 set_field(*mstatus, MSTATUS_VS, 1)) != ERROR_OK)
1101 return ERROR_FAIL;
1102 }
1103 } else {
1104 *mstatus = 0;
1105 }
1106 return ERROR_OK;
1107 }
1108
1109 static int cleanup_after_register_access(struct target *target,
1110 uint64_t mstatus, int regno)
1111 {
1112 if ((is_fpu_reg(regno) && (mstatus & MSTATUS_FS) == 0) ||
1113 (is_vector_reg(regno) && (mstatus & MSTATUS_VS) == 0))
1114 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
1115 return ERROR_FAIL;
1116 return ERROR_OK;
1117 }
1118
1119 typedef enum {
1120 SPACE_DM_DATA,
1121 SPACE_DMI_PROGBUF,
1122 SPACE_DMI_RAM
1123 } memory_space_t;
1124
1125 typedef struct {
1126 /* How can the debugger access this memory? */
1127 memory_space_t memory_space;
1128 /* Memory address to access the scratch memory from the hart. */
1129 riscv_addr_t hart_address;
1130 /* Memory address to access the scratch memory from the debugger. */
1131 riscv_addr_t debug_address;
1132 struct working_area *area;
1133 } scratch_mem_t;
1134
1135 /**
1136 * Find some scratch memory to be used with the given program.
1137 */
1138 static int scratch_reserve(struct target *target,
1139 scratch_mem_t *scratch,
1140 struct riscv_program *program,
1141 unsigned size_bytes)
1142 {
1143 riscv_addr_t alignment = 1;
1144 while (alignment < size_bytes)
1145 alignment *= 2;
1146
1147 scratch->area = NULL;
1148
1149 riscv013_info_t *info = get_info(target);
1150
1151 /* Option 1: See if data# registers can be used as the scratch memory */
1152 if (info->dataaccess == 1) {
1153 /* Sign extend dataaddr. */
1154 scratch->hart_address = info->dataaddr;
1155 if (info->dataaddr & (1<<11))
1156 scratch->hart_address |= 0xfffffffffffff000ULL;
1157 /* Align. */
1158 scratch->hart_address = (scratch->hart_address + alignment - 1) & ~(alignment - 1);
1159
1160 if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
1161 info->datasize) {
1162 scratch->memory_space = SPACE_DM_DATA;
1163 scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
1164 return ERROR_OK;
1165 }
1166 }
1167
1168 /* Option 2: See if progbuf can be used as the scratch memory */
1169 if (examine_progbuf(target) != ERROR_OK)
1170 return ERROR_FAIL;
1171
1172 /* Allow for ebreak at the end of the program. */
1173 unsigned program_size = (program->instruction_count + 1) * 4;
1174 scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
1175 ~(alignment - 1);
1176 if ((info->progbuf_writable == YNM_YES) &&
1177 ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
1178 info->progbufsize)) {
1179 scratch->memory_space = SPACE_DMI_PROGBUF;
1180 scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
1181 return ERROR_OK;
1182 }
1183
1184 /* Option 3: User-configured memory area as scratch RAM */
1185 if (target_alloc_working_area(target, size_bytes + alignment - 1,
1186 &scratch->area) == ERROR_OK) {
1187 scratch->hart_address = (scratch->area->address + alignment - 1) &
1188 ~(alignment - 1);
1189 scratch->memory_space = SPACE_DMI_RAM;
1190 scratch->debug_address = scratch->hart_address;
1191 return ERROR_OK;
1192 }
1193
1194 LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
1195 "a work area with 'configure -work-area-phys'.", size_bytes);
1196 return ERROR_FAIL;
1197 }
1198
1199 static int scratch_release(struct target *target,
1200 scratch_mem_t *scratch)
1201 {
1202 return target_free_working_area(target, scratch->area);
1203 }
1204
1205 static int scratch_read64(struct target *target, scratch_mem_t *scratch,
1206 uint64_t *value)
1207 {
1208 uint32_t v;
1209 switch (scratch->memory_space) {
1210 case SPACE_DM_DATA:
1211 if (dmi_read(target, &v, DM_DATA0 + scratch->debug_address) != ERROR_OK)
1212 return ERROR_FAIL;
1213 *value = v;
1214 if (dmi_read(target, &v, DM_DATA1 + scratch->debug_address) != ERROR_OK)
1215 return ERROR_FAIL;
1216 *value |= ((uint64_t) v) << 32;
1217 break;
1218 case SPACE_DMI_PROGBUF:
1219 if (dmi_read(target, &v, DM_PROGBUF0 + scratch->debug_address) != ERROR_OK)
1220 return ERROR_FAIL;
1221 *value = v;
1222 if (dmi_read(target, &v, DM_PROGBUF1 + scratch->debug_address) != ERROR_OK)
1223 return ERROR_FAIL;
1224 *value |= ((uint64_t) v) << 32;
1225 break;
1226 case SPACE_DMI_RAM:
1227 {
1228 uint8_t buffer[8] = {0};
1229 if (read_memory(target, scratch->debug_address, 4, 2, buffer, 4) != ERROR_OK)
1230 return ERROR_FAIL;
1231 *value = buffer[0] |
1232 (((uint64_t) buffer[1]) << 8) |
1233 (((uint64_t) buffer[2]) << 16) |
1234 (((uint64_t) buffer[3]) << 24) |
1235 (((uint64_t) buffer[4]) << 32) |
1236 (((uint64_t) buffer[5]) << 40) |
1237 (((uint64_t) buffer[6]) << 48) |
1238 (((uint64_t) buffer[7]) << 56);
1239 }
1240 break;
1241 }
1242 return ERROR_OK;
1243 }
1244
1245 static int scratch_write64(struct target *target, scratch_mem_t *scratch,
1246 uint64_t value)
1247 {
1248 switch (scratch->memory_space) {
1249 case SPACE_DM_DATA:
1250 dmi_write(target, DM_DATA0 + scratch->debug_address, value);
1251 dmi_write(target, DM_DATA1 + scratch->debug_address, value >> 32);
1252 break;
1253 case SPACE_DMI_PROGBUF:
1254 dmi_write(target, DM_PROGBUF0 + scratch->debug_address, value);
1255 dmi_write(target, DM_PROGBUF1 + scratch->debug_address, value >> 32);
1256 break;
1257 case SPACE_DMI_RAM:
1258 {
1259 uint8_t buffer[8] = {
1260 value,
1261 value >> 8,
1262 value >> 16,
1263 value >> 24,
1264 value >> 32,
1265 value >> 40,
1266 value >> 48,
1267 value >> 56
1268 };
1269 if (write_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
1270 return ERROR_FAIL;
1271 }
1272 break;
1273 }
1274 return ERROR_OK;
1275 }
1276
1277 /** Return register size in bits. */
1278 static unsigned register_size(struct target *target, unsigned number)
1279 {
1280 /* If reg_cache hasn't been initialized yet, make a guess. We need this for
1281 * when this function is called during examine(). */
1282 if (target->reg_cache)
1283 return target->reg_cache->reg_list[number].size;
1284 else
1285 return riscv_xlen(target);
1286 }
1287
1288 static bool has_sufficient_progbuf(struct target *target, unsigned size)
1289 {
1290 RISCV013_INFO(info);
1291 RISCV_INFO(r);
1292
1293 return info->progbufsize + r->impebreak >= size;
1294 }
1295
1296 /**
1297 * Immediately write the new value to the requested register. This mechanism
1298 * bypasses any caches.
1299 */
1300 static int register_write_direct(struct target *target, unsigned number,
1301 uint64_t value)
1302 {
1303 LOG_DEBUG("{%d} %s <- 0x%" PRIx64, riscv_current_hartid(target),
1304 gdb_regno_name(number), value);
1305
1306 int result = register_write_abstract(target, number, value,
1307 register_size(target, number));
1308 if (result == ERROR_OK || !has_sufficient_progbuf(target, 2) ||
1309 !riscv_is_halted(target))
1310 return result;
1311
1312 struct riscv_program program;
1313 riscv_program_init(&program, target);
1314
1315 uint64_t s0;
1316 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1317 return ERROR_FAIL;
1318
1319 uint64_t mstatus;
1320 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1321 return ERROR_FAIL;
1322
1323 scratch_mem_t scratch;
1324 bool use_scratch = false;
1325 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
1326 riscv_supports_extension(target, 'D') &&
1327 riscv_xlen(target) < 64) {
1328 /* There are no instructions to move all the bits from a register, so
1329 * we need to use some scratch RAM. */
1330 use_scratch = true;
1331 riscv_program_insert(&program, fld(number - GDB_REGNO_FPR0, S0, 0));
1332
1333 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1334 return ERROR_FAIL;
1335
1336 if (register_write_direct(target, GDB_REGNO_S0, scratch.hart_address)
1337 != ERROR_OK) {
1338 scratch_release(target, &scratch);
1339 return ERROR_FAIL;
1340 }
1341
1342 if (scratch_write64(target, &scratch, value) != ERROR_OK) {
1343 scratch_release(target, &scratch);
1344 return ERROR_FAIL;
1345 }
1346
1347 } else if (number == GDB_REGNO_VTYPE) {
1348 riscv_program_insert(&program, csrr(S0, CSR_VL));
1349 riscv_program_insert(&program, vsetvli(ZERO, S0, value));
1350
1351 } else {
1352 if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
1353 return ERROR_FAIL;
1354
1355 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1356 if (riscv_supports_extension(target, 'D'))
1357 riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
1358 else
1359 riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
1360 } else if (number == GDB_REGNO_VL) {
1361 /* "The XLEN-bit-wide read-only vl CSR can only be updated by the
1362 * vsetvli and vsetvl instructions, and the fault-only-rst vector
1363 * load instruction variants." */
1364 riscv_reg_t vtype;
1365 if (register_read(target, &vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1366 return ERROR_FAIL;
1367 if (riscv_program_insert(&program, vsetvli(ZERO, S0, vtype)) != ERROR_OK)
1368 return ERROR_FAIL;
1369 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1370 riscv_program_csrw(&program, S0, number);
1371 } else {
1372 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
1373 return ERROR_FAIL;
1374 }
1375 }
1376
1377 int exec_out = riscv_program_exec(&program, target);
1378 /* Don't message on error. Probably the register doesn't exist. */
1379 if (exec_out == ERROR_OK && target->reg_cache) {
1380 struct reg *reg = &target->reg_cache->reg_list[number];
1381 buf_set_u64(reg->value, 0, reg->size, value);
1382 }
1383
1384 if (use_scratch)
1385 scratch_release(target, &scratch);
1386
1387 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1388 return ERROR_FAIL;
1389
1390 /* Restore S0. */
1391 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1392 return ERROR_FAIL;
1393
1394 return exec_out;
1395 }
1396
1397 /** Read register value from the target. Also update the cached value. */
1398 static int register_read(struct target *target, uint64_t *value, uint32_t number)
1399 {
1400 if (number == GDB_REGNO_ZERO) {
1401 *value = 0;
1402 return ERROR_OK;
1403 }
1404 int result = register_read_direct(target, value, number);
1405 if (result != ERROR_OK)
1406 return ERROR_FAIL;
1407 if (target->reg_cache) {
1408 struct reg *reg = &target->reg_cache->reg_list[number];
1409 buf_set_u64(reg->value, 0, reg->size, *value);
1410 }
1411 return ERROR_OK;
1412 }
1413
1414 /** Actually read registers from the target right now. */
1415 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
1416 {
1417 int result = register_read_abstract(target, value, number,
1418 register_size(target, number));
1419
1420 if (result != ERROR_OK &&
1421 has_sufficient_progbuf(target, 2) &&
1422 number > GDB_REGNO_XPR31) {
1423 struct riscv_program program;
1424 riscv_program_init(&program, target);
1425
1426 scratch_mem_t scratch;
1427 bool use_scratch = false;
1428
1429 riscv_reg_t s0;
1430 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1431 return ERROR_FAIL;
1432
1433 /* Write program to move data into s0. */
1434
1435 uint64_t mstatus;
1436 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1437 return ERROR_FAIL;
1438
1439 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1440 if (riscv_supports_extension(target, 'D')
1441 && riscv_xlen(target) < 64) {
1442 /* There are no instructions to move all the bits from a
1443 * register, so we need to use some scratch RAM. */
1444 riscv_program_insert(&program, fsd(number - GDB_REGNO_FPR0, S0,
1445 0));
1446
1447 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1448 return ERROR_FAIL;
1449 use_scratch = true;
1450
1451 if (register_write_direct(target, GDB_REGNO_S0,
1452 scratch.hart_address) != ERROR_OK) {
1453 scratch_release(target, &scratch);
1454 return ERROR_FAIL;
1455 }
1456 } else if (riscv_supports_extension(target, 'D')) {
1457 riscv_program_insert(&program, fmv_x_d(S0, number - GDB_REGNO_FPR0));
1458 } else {
1459 riscv_program_insert(&program, fmv_x_w(S0, number - GDB_REGNO_FPR0));
1460 }
1461 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1462 riscv_program_csrr(&program, S0, number);
1463 } else {
1464 LOG_ERROR("Unsupported register: %s", gdb_regno_name(number));
1465 return ERROR_FAIL;
1466 }
1467
1468 /* Execute program. */
1469 result = riscv_program_exec(&program, target);
1470 /* Don't message on error. Probably the register doesn't exist. */
1471
1472 if (use_scratch) {
1473 result = scratch_read64(target, &scratch, value);
1474 scratch_release(target, &scratch);
1475 if (result != ERROR_OK)
1476 return result;
1477 } else {
1478 /* Read S0 */
1479 if (register_read_direct(target, value, GDB_REGNO_S0) != ERROR_OK)
1480 return ERROR_FAIL;
1481 }
1482
1483 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1484 return ERROR_FAIL;
1485
1486 /* Restore S0. */
1487 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1488 return ERROR_FAIL;
1489 }
1490
1491 if (result == ERROR_OK) {
1492 LOG_DEBUG("{%d} %s = 0x%" PRIx64, riscv_current_hartid(target),
1493 gdb_regno_name(number), *value);
1494 }
1495
1496 return result;
1497 }
1498
1499 static int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
1500 {
1501 time_t start = time(NULL);
1502 while (1) {
1503 uint32_t value;
1504 if (dmstatus_read(target, &value, false) != ERROR_OK)
1505 return ERROR_FAIL;
1506 if (dmstatus)
1507 *dmstatus = value;
1508 if (!get_field(value, DM_DMSTATUS_AUTHBUSY))
1509 break;
1510 if (time(NULL) - start > riscv_command_timeout_sec) {
1511 LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
1512 "Increase the timeout with riscv set_command_timeout_sec.",
1513 riscv_command_timeout_sec,
1514 value);
1515 return ERROR_FAIL;
1516 }
1517 }
1518
1519 return ERROR_OK;
1520 }
1521
1522 /*** OpenOCD target functions. ***/
1523
1524 static void deinit_target(struct target *target)
1525 {
1526 LOG_DEBUG("riscv_deinit_target()");
1527 struct riscv_info *info = target->arch_info;
1528 if (!info)
1529 return;
1530
1531 free(info->version_specific);
1532 /* TODO: free register arch_info */
1533 info->version_specific = NULL;
1534 }
1535
1536 static int set_haltgroup(struct target *target, bool *supported)
1537 {
1538 uint32_t write = set_field(DM_DMCS2_HGWRITE, DM_DMCS2_GROUP, target->smp);
1539 if (dmi_write(target, DM_DMCS2, write) != ERROR_OK)
1540 return ERROR_FAIL;
1541 uint32_t read;
1542 if (dmi_read(target, &read, DM_DMCS2) != ERROR_OK)
1543 return ERROR_FAIL;
1544 *supported = get_field(read, DM_DMCS2_GROUP) == (unsigned)target->smp;
1545 return ERROR_OK;
1546 }
1547
1548 static int discover_vlenb(struct target *target)
1549 {
1550 RISCV_INFO(r);
1551 riscv_reg_t vlenb;
1552
1553 if (register_read(target, &vlenb, GDB_REGNO_VLENB) != ERROR_OK) {
1554 LOG_WARNING("Couldn't read vlenb for %s; vector register access won't work.",
1555 target_name(target));
1556 r->vlenb = 0;
1557 return ERROR_OK;
1558 }
1559 r->vlenb = vlenb;
1560
1561 LOG_INFO("Vector support with vlenb=%d", r->vlenb);
1562
1563 return ERROR_OK;
1564 }
1565
1566 static int examine(struct target *target)
1567 {
1568 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
1569
1570 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
1571 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
1572 LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol, DTM_DTMCS_DMIRESET));
1573 LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTM_DTMCS_IDLE));
1574 LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol, DTM_DTMCS_DMISTAT));
1575 LOG_DEBUG(" abits=%d", get_field(dtmcontrol, DTM_DTMCS_ABITS));
1576 LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTM_DTMCS_VERSION));
1577 if (dtmcontrol == 0) {
1578 LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
1579 return ERROR_FAIL;
1580 }
1581 if (get_field(dtmcontrol, DTM_DTMCS_VERSION) != 1) {
1582 LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
1583 get_field(dtmcontrol, DTM_DTMCS_VERSION), dtmcontrol);
1584 return ERROR_FAIL;
1585 }
1586
1587 riscv013_info_t *info = get_info(target);
1588 /* TODO: This won't be true if there are multiple DMs. */
1589 info->index = target->coreid;
1590 info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
1591 info->dtmcs_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
1592
1593 /* Reset the Debug Module. */
1594 dm013_info_t *dm = get_dm(target);
1595 if (!dm)
1596 return ERROR_FAIL;
1597 if (!dm->was_reset) {
1598 dmi_write(target, DM_DMCONTROL, 0);
1599 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
1600 dm->was_reset = true;
1601 }
1602
1603 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_HARTSELLO |
1604 DM_DMCONTROL_HARTSELHI | DM_DMCONTROL_DMACTIVE |
1605 DM_DMCONTROL_HASEL);
1606 uint32_t dmcontrol;
1607 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
1608 return ERROR_FAIL;
1609
1610 if (!get_field(dmcontrol, DM_DMCONTROL_DMACTIVE)) {
1611 LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
1612 dmcontrol);
1613 return ERROR_FAIL;
1614 }
1615
1616 dm->hasel_supported = get_field(dmcontrol, DM_DMCONTROL_HASEL);
1617
1618 uint32_t dmstatus;
1619 if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
1620 return ERROR_FAIL;
1621 LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
1622 int dmstatus_version = get_field(dmstatus, DM_DMSTATUS_VERSION);
1623 if (dmstatus_version != 2 && dmstatus_version != 3) {
1624 /* Error was already printed out in dmstatus_read(). */
1625 return ERROR_FAIL;
1626 }
1627
1628 uint32_t hartsel =
1629 (get_field(dmcontrol, DM_DMCONTROL_HARTSELHI) <<
1630 DM_DMCONTROL_HARTSELLO_LENGTH) |
1631 get_field(dmcontrol, DM_DMCONTROL_HARTSELLO);
1632 info->hartsellen = 0;
1633 while (hartsel & 1) {
1634 info->hartsellen++;
1635 hartsel >>= 1;
1636 }
1637 LOG_DEBUG("hartsellen=%d", info->hartsellen);
1638
1639 uint32_t hartinfo;
1640 if (dmi_read(target, &hartinfo, DM_HARTINFO) != ERROR_OK)
1641 return ERROR_FAIL;
1642
1643 info->datasize = get_field(hartinfo, DM_HARTINFO_DATASIZE);
1644 info->dataaccess = get_field(hartinfo, DM_HARTINFO_DATAACCESS);
1645 info->dataaddr = get_field(hartinfo, DM_HARTINFO_DATAADDR);
1646
1647 if (!get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
1648 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
1649 "(dmstatus=0x%x). Use `riscv authdata_read` and "
1650 "`riscv authdata_write` commands to authenticate.", dmstatus);
1651 /* If we return ERROR_FAIL here, then in a multicore setup the next
1652 * core won't be examined, which means we won't set up the
1653 * authentication commands for them, which means the config script
1654 * needs to be a lot more complex. */
1655 return ERROR_OK;
1656 }
1657
1658 if (dmi_read(target, &info->sbcs, DM_SBCS) != ERROR_OK)
1659 return ERROR_FAIL;
1660
1661 /* Check that abstract data registers are accessible. */
1662 uint32_t abstractcs;
1663 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
1664 return ERROR_FAIL;
1665 info->datacount = get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT);
1666 info->progbufsize = get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE);
1667
1668 LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
1669
1670 RISCV_INFO(r);
1671 r->impebreak = get_field(dmstatus, DM_DMSTATUS_IMPEBREAK);
1672
1673 if (!has_sufficient_progbuf(target, 2)) {
1674 LOG_WARNING("We won't be able to execute fence instructions on this "
1675 "target. Memory may not always appear consistent. "
1676 "(progbufsize=%d, impebreak=%d)", info->progbufsize,
1677 r->impebreak);
1678 }
1679
1680 if (info->progbufsize < 4 && riscv_enable_virtual) {
1681 LOG_ERROR("set_enable_virtual is not available on this target. It "
1682 "requires a program buffer size of at least 4. (progbufsize=%d) "
1683 "Use `riscv set_enable_virtual off` to continue."
1684 , info->progbufsize);
1685 }
1686
1687 /* Before doing anything else we must first enumerate the harts. */
1688 if (dm->hart_count < 0) {
1689 for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
1690 r->current_hartid = i;
1691 if (riscv013_select_current_hart(target) != ERROR_OK)
1692 return ERROR_FAIL;
1693
1694 uint32_t s;
1695 if (dmstatus_read(target, &s, true) != ERROR_OK)
1696 return ERROR_FAIL;
1697 if (get_field(s, DM_DMSTATUS_ANYNONEXISTENT))
1698 break;
1699 dm->hart_count = i + 1;
1700
1701 if (get_field(s, DM_DMSTATUS_ANYHAVERESET))
1702 dmi_write(target, DM_DMCONTROL,
1703 set_hartsel(DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_ACKHAVERESET, i));
1704 }
1705
1706 LOG_DEBUG("Detected %d harts.", dm->hart_count);
1707 }
1708
1709 r->current_hartid = target->coreid;
1710
1711 if (dm->hart_count == 0) {
1712 LOG_ERROR("No harts found!");
1713 return ERROR_FAIL;
1714 }
1715
1716 /* Don't call any riscv_* functions until after we've counted the number of
1717 * cores and initialized registers. */
1718
1719 if (riscv013_select_current_hart(target) != ERROR_OK)
1720 return ERROR_FAIL;
1721
1722 bool halted = riscv_is_halted(target);
1723 if (!halted) {
1724 if (riscv013_halt_go(target) != ERROR_OK) {
1725 LOG_ERROR("Fatal: Hart %d failed to halt during examine()", r->current_hartid);
1726 return ERROR_FAIL;
1727 }
1728 }
1729
1730 /* Without knowing anything else we can at least mess with the
1731 * program buffer. */
1732 r->debug_buffer_size = info->progbufsize;
1733
1734 int result = register_read_abstract(target, NULL, GDB_REGNO_S0, 64);
1735 if (result == ERROR_OK)
1736 r->xlen = 64;
1737 else
1738 r->xlen = 32;
1739
1740 if (register_read(target, &r->misa, GDB_REGNO_MISA)) {
1741 LOG_ERROR("Fatal: Failed to read MISA from hart %d.", r->current_hartid);
1742 return ERROR_FAIL;
1743 }
1744
1745 if (riscv_supports_extension(target, 'V')) {
1746 if (discover_vlenb(target) != ERROR_OK)
1747 return ERROR_FAIL;
1748 }
1749
1750 /* Now init registers based on what we discovered. */
1751 if (riscv_init_registers(target) != ERROR_OK)
1752 return ERROR_FAIL;
1753
1754 /* Display this as early as possible to help people who are using
1755 * really slow simulators. */
1756 LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64, r->current_hartid, r->xlen,
1757 r->misa);
1758
1759 if (!halted)
1760 riscv013_step_or_resume_current_hart(target, false, false);
1761
1762 target_set_examined(target);
1763
1764 if (target->smp) {
1765 bool haltgroup_supported;
1766 if (set_haltgroup(target, &haltgroup_supported) != ERROR_OK)
1767 return ERROR_FAIL;
1768 if (haltgroup_supported)
1769 LOG_INFO("Core %d made part of halt group %d.", target->coreid,
1770 target->smp);
1771 else
1772 LOG_INFO("Core %d could not be made part of halt group %d.",
1773 target->coreid, target->smp);
1774 }
1775
1776 /* Some regression suites rely on seeing 'Examined RISC-V core' to know
1777 * when they can connect with gdb/telnet.
1778 * We will need to update those suites if we want to change that text. */
1779 LOG_INFO("Examined RISC-V core; found %d harts",
1780 riscv_count_harts(target));
1781 LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64, r->current_hartid, r->xlen,
1782 r->misa);
1783 return ERROR_OK;
1784 }
1785
1786 static int riscv013_authdata_read(struct target *target, uint32_t *value, unsigned int index)
1787 {
1788 if (index > 0) {
1789 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1790 return ERROR_FAIL;
1791 }
1792
1793 if (wait_for_authbusy(target, NULL) != ERROR_OK)
1794 return ERROR_FAIL;
1795
1796 return dmi_read(target, value, DM_AUTHDATA);
1797 }
1798
1799 static int riscv013_authdata_write(struct target *target, uint32_t value, unsigned int index)
1800 {
1801 if (index > 0) {
1802 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1803 return ERROR_FAIL;
1804 }
1805
1806 uint32_t before, after;
1807 if (wait_for_authbusy(target, &before) != ERROR_OK)
1808 return ERROR_FAIL;
1809
1810 dmi_write(target, DM_AUTHDATA, value);
1811
1812 if (wait_for_authbusy(target, &after) != ERROR_OK)
1813 return ERROR_FAIL;
1814
1815 if (!get_field(before, DM_DMSTATUS_AUTHENTICATED) &&
1816 get_field(after, DM_DMSTATUS_AUTHENTICATED)) {
1817 LOG_INFO("authdata_write resulted in successful authentication");
1818 int result = ERROR_OK;
1819 dm013_info_t *dm = get_dm(target);
1820 if (!dm)
1821 return ERROR_FAIL;
1822 target_list_t *entry;
1823 list_for_each_entry(entry, &dm->target_list, list) {
1824 if (examine(entry->target) != ERROR_OK)
1825 result = ERROR_FAIL;
1826 }
1827 return result;
1828 }
1829
1830 return ERROR_OK;
1831 }
1832
1833 static int riscv013_hart_count(struct target *target)
1834 {
1835 dm013_info_t *dm = get_dm(target);
1836 assert(dm);
1837 return dm->hart_count;
1838 }
1839
1840 /* Try to find out the widest memory access size depending on the selected memory access methods. */
1841 static unsigned riscv013_data_bits(struct target *target)
1842 {
1843 RISCV013_INFO(info);
1844 RISCV_INFO(r);
1845
1846 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
1847 int method = r->mem_access_methods[i];
1848
1849 if (method == RISCV_MEM_ACCESS_PROGBUF) {
1850 if (has_sufficient_progbuf(target, 3))
1851 return riscv_xlen(target);
1852 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
1853 if (get_field(info->sbcs, DM_SBCS_SBACCESS128))
1854 return 128;
1855 if (get_field(info->sbcs, DM_SBCS_SBACCESS64))
1856 return 64;
1857 if (get_field(info->sbcs, DM_SBCS_SBACCESS32))
1858 return 32;
1859 if (get_field(info->sbcs, DM_SBCS_SBACCESS16))
1860 return 16;
1861 if (get_field(info->sbcs, DM_SBCS_SBACCESS8))
1862 return 8;
1863 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
1864 /* TODO: Once there is a spec for discovering abstract commands, we can
1865 * take those into account as well. For now we assume abstract commands
1866 * support XLEN-wide accesses. */
1867 return riscv_xlen(target);
1868 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
1869 /* No further mem access method to try. */
1870 break;
1871 }
1872 LOG_ERROR("Unable to determine supported data bits on this target. Assuming 32 bits.");
1873 return 32;
1874 }
1875
1876 COMMAND_HELPER(riscv013_print_info, struct target *target)
1877 {
1878 RISCV013_INFO(info);
1879
1880 /* Abstract description. */
1881 riscv_print_info_line(CMD, "target", "memory.read_while_running8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1882 riscv_print_info_line(CMD, "target", "memory.write_while_running8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1883 riscv_print_info_line(CMD, "target", "memory.read_while_running16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1884 riscv_print_info_line(CMD, "target", "memory.write_while_running16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1885 riscv_print_info_line(CMD, "target", "memory.read_while_running32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1886 riscv_print_info_line(CMD, "target", "memory.write_while_running32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1887 riscv_print_info_line(CMD, "target", "memory.read_while_running64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1888 riscv_print_info_line(CMD, "target", "memory.write_while_running64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1889 riscv_print_info_line(CMD, "target", "memory.read_while_running128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1890 riscv_print_info_line(CMD, "target", "memory.write_while_running128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1891
1892 /* Lower level description. */
1893 riscv_print_info_line(CMD, "dm", "abits", info->abits);
1894 riscv_print_info_line(CMD, "dm", "progbufsize", info->progbufsize);
1895 riscv_print_info_line(CMD, "dm", "sbversion", get_field(info->sbcs, DM_SBCS_SBVERSION));
1896 riscv_print_info_line(CMD, "dm", "sbasize", get_field(info->sbcs, DM_SBCS_SBASIZE));
1897 riscv_print_info_line(CMD, "dm", "sbaccess128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1898 riscv_print_info_line(CMD, "dm", "sbaccess64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1899 riscv_print_info_line(CMD, "dm", "sbaccess32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1900 riscv_print_info_line(CMD, "dm", "sbaccess16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1901 riscv_print_info_line(CMD, "dm", "sbaccess8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1902
1903 uint32_t dmstatus;
1904 if (dmstatus_read(target, &dmstatus, false) == ERROR_OK)
1905 riscv_print_info_line(CMD, "dm", "authenticated", get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED));
1906
1907 return 0;
1908 }
1909
1910 static int prep_for_vector_access(struct target *target, uint64_t *vtype,
1911 uint64_t *vl, unsigned *debug_vl)
1912 {
1913 RISCV_INFO(r);
1914 /* TODO: this continuous save/restore is terrible for performance. */
1915 /* Write vtype and vl. */
1916 unsigned encoded_vsew;
1917 switch (riscv_xlen(target)) {
1918 case 32:
1919 encoded_vsew = 2;
1920 break;
1921 case 64:
1922 encoded_vsew = 3;
1923 break;
1924 default:
1925 LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target));
1926 return ERROR_FAIL;
1927 }
1928
1929 /* Save vtype and vl. */
1930 if (register_read(target, vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1931 return ERROR_FAIL;
1932 if (register_read(target, vl, GDB_REGNO_VL) != ERROR_OK)
1933 return ERROR_FAIL;
1934
1935 if (register_write_direct(target, GDB_REGNO_VTYPE, encoded_vsew << 3) != ERROR_OK)
1936 return ERROR_FAIL;
1937 *debug_vl = DIV_ROUND_UP(r->vlenb * 8, riscv_xlen(target));
1938 if (register_write_direct(target, GDB_REGNO_VL, *debug_vl) != ERROR_OK)
1939 return ERROR_FAIL;
1940
1941 return ERROR_OK;
1942 }
1943
1944 static int cleanup_after_vector_access(struct target *target, uint64_t vtype,
1945 uint64_t vl)
1946 {
1947 /* Restore vtype and vl. */
1948 if (register_write_direct(target, GDB_REGNO_VTYPE, vtype) != ERROR_OK)
1949 return ERROR_FAIL;
1950 if (register_write_direct(target, GDB_REGNO_VL, vl) != ERROR_OK)
1951 return ERROR_FAIL;
1952 return ERROR_OK;
1953 }
1954
1955 static int riscv013_get_register_buf(struct target *target,
1956 uint8_t *value, int regno)
1957 {
1958 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
1959
1960 if (riscv_select_current_hart(target) != ERROR_OK)
1961 return ERROR_FAIL;
1962
1963 riscv_reg_t s0;
1964 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1965 return ERROR_FAIL;
1966
1967 uint64_t mstatus;
1968 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
1969 return ERROR_FAIL;
1970
1971 uint64_t vtype, vl;
1972 unsigned debug_vl;
1973 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
1974 return ERROR_FAIL;
1975
1976 unsigned vnum = regno - GDB_REGNO_V0;
1977 unsigned xlen = riscv_xlen(target);
1978
1979 struct riscv_program program;
1980 riscv_program_init(&program, target);
1981 riscv_program_insert(&program, vmv_x_s(S0, vnum));
1982 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
1983
1984 int result = ERROR_OK;
1985 for (unsigned i = 0; i < debug_vl; i++) {
1986 /* Executing the program might result in an exception if there is some
1987 * issue with the vector implementation/instructions we're using. If that
1988 * happens, attempt to restore as usual. We may have clobbered the
1989 * vector register we tried to read already.
1990 * For other failures, we just return error because things are probably
1991 * so messed up that attempting to restore isn't going to help. */
1992 result = riscv_program_exec(&program, target);
1993 if (result == ERROR_OK) {
1994 uint64_t v;
1995 if (register_read_direct(target, &v, GDB_REGNO_S0) != ERROR_OK)
1996 return ERROR_FAIL;
1997 buf_set_u64(value, xlen * i, xlen, v);
1998 } else {
1999 break;
2000 }
2001 }
2002
2003 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
2004 return ERROR_FAIL;
2005
2006 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2007 return ERROR_FAIL;
2008 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2009 return ERROR_FAIL;
2010
2011 return result;
2012 }
2013
2014 static int riscv013_set_register_buf(struct target *target,
2015 int regno, const uint8_t *value)
2016 {
2017 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
2018
2019 if (riscv_select_current_hart(target) != ERROR_OK)
2020 return ERROR_FAIL;
2021
2022 riscv_reg_t s0;
2023 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
2024 return ERROR_FAIL;
2025
2026 uint64_t mstatus;
2027 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
2028 return ERROR_FAIL;
2029
2030 uint64_t vtype, vl;
2031 unsigned debug_vl;
2032 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
2033 return ERROR_FAIL;
2034
2035 unsigned vnum = regno - GDB_REGNO_V0;
2036 unsigned xlen = riscv_xlen(target);
2037
2038 struct riscv_program program;
2039 riscv_program_init(&program, target);
2040 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
2041 int result = ERROR_OK;
2042 for (unsigned i = 0; i < debug_vl; i++) {
2043 if (register_write_direct(target, GDB_REGNO_S0,
2044 buf_get_u64(value, xlen * i, xlen)) != ERROR_OK)
2045 return ERROR_FAIL;
2046 result = riscv_program_exec(&program, target);
2047 if (result != ERROR_OK)
2048 break;
2049 }
2050
2051 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
2052 return ERROR_FAIL;
2053
2054 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2055 return ERROR_FAIL;
2056 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2057 return ERROR_FAIL;
2058
2059 return result;
2060 }
2061
2062 static uint32_t sb_sbaccess(unsigned int size_bytes)
2063 {
2064 switch (size_bytes) {
2065 case 1:
2066 return set_field(0, DM_SBCS_SBACCESS, 0);
2067 case 2:
2068 return set_field(0, DM_SBCS_SBACCESS, 1);
2069 case 4:
2070 return set_field(0, DM_SBCS_SBACCESS, 2);
2071 case 8:
2072 return set_field(0, DM_SBCS_SBACCESS, 3);
2073 case 16:
2074 return set_field(0, DM_SBCS_SBACCESS, 4);
2075 }
2076 assert(0);
2077 return 0;
2078 }
2079
2080 static int sb_write_address(struct target *target, target_addr_t address,
2081 bool ensure_success)
2082 {
2083 RISCV013_INFO(info);
2084 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2085 /* There currently is no support for >64-bit addresses in OpenOCD. */
2086 if (sbasize > 96)
2087 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS3, 0, false, false);
2088 if (sbasize > 64)
2089 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS2, 0, false, false);
2090 if (sbasize > 32)
2091 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS1, address >> 32, false, false);
2092 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS0, address,
2093 false, ensure_success);
2094 }
2095
2096 static int batch_run(const struct target *target, struct riscv_batch *batch)
2097 {
2098 RISCV013_INFO(info);
2099 RISCV_INFO(r);
2100 if (r->reset_delays_wait >= 0) {
2101 r->reset_delays_wait -= batch->used_scans;
2102 if (r->reset_delays_wait <= 0) {
2103 batch->idle_count = 0;
2104 info->dmi_busy_delay = 0;
2105 info->ac_busy_delay = 0;
2106 }
2107 }
2108 return riscv_batch_run(batch);
2109 }
2110
2111 static int sba_supports_access(struct target *target, unsigned int size_bytes)
2112 {
2113 RISCV013_INFO(info);
2114 switch (size_bytes) {
2115 case 1:
2116 return get_field(info->sbcs, DM_SBCS_SBACCESS8);
2117 case 2:
2118 return get_field(info->sbcs, DM_SBCS_SBACCESS16);
2119 case 4:
2120 return get_field(info->sbcs, DM_SBCS_SBACCESS32);
2121 case 8:
2122 return get_field(info->sbcs, DM_SBCS_SBACCESS64);
2123 case 16:
2124 return get_field(info->sbcs, DM_SBCS_SBACCESS128);
2125 default:
2126 return 0;
2127 }
2128 }
2129
2130 static int sample_memory_bus_v1(struct target *target,
2131 struct riscv_sample_buf *buf,
2132 const riscv_sample_config_t *config,
2133 int64_t until_ms)
2134 {
2135 RISCV013_INFO(info);
2136 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2137 if (sbasize > 64) {
2138 LOG_ERROR("Memory sampling is only implemented for sbasize <= 64.");
2139 return ERROR_NOT_IMPLEMENTED;
2140 }
2141
2142 if (get_field(info->sbcs, DM_SBCS_SBVERSION) != 1) {
2143 LOG_ERROR("Memory sampling is only implemented for SBA version 1.");
2144 return ERROR_NOT_IMPLEMENTED;
2145 }
2146
2147 uint32_t sbcs = 0;
2148 uint32_t sbcs_valid = false;
2149
2150 uint32_t sbaddress0 = 0;
2151 bool sbaddress0_valid = false;
2152 uint32_t sbaddress1 = 0;
2153 bool sbaddress1_valid = false;
2154
2155 /* How often to read each value in a batch. */
2156 const unsigned int repeat = 5;
2157
2158 unsigned int enabled_count = 0;
2159 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2160 if (config->bucket[i].enabled)
2161 enabled_count++;
2162 }
2163
2164 while (timeval_ms() < until_ms) {
2165 /*
2166 * batch_run() adds to the batch, so we can't simply reuse the same
2167 * batch over and over. So we create a new one every time through the
2168 * loop.
2169 */
2170 struct riscv_batch *batch = riscv_batch_alloc(
2171 target, 1 + enabled_count * 5 * repeat,
2172 info->dmi_busy_delay + info->bus_master_read_delay);
2173 if (!batch)
2174 return ERROR_FAIL;
2175
2176 unsigned int result_bytes = 0;
2177 for (unsigned int n = 0; n < repeat; n++) {
2178 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2179 if (config->bucket[i].enabled) {
2180 if (!sba_supports_access(target, config->bucket[i].size_bytes)) {
2181 LOG_ERROR("Hardware does not support SBA access for %d-byte memory sampling.",
2182 config->bucket[i].size_bytes);
2183 return ERROR_NOT_IMPLEMENTED;
2184 }
2185
2186 uint32_t sbcs_write = DM_SBCS_SBREADONADDR;
2187 if (enabled_count == 1)
2188 sbcs_write |= DM_SBCS_SBREADONDATA;
2189 sbcs_write |= sb_sbaccess(config->bucket[i].size_bytes);
2190 if (!sbcs_valid || sbcs_write != sbcs) {
2191 riscv_batch_add_dmi_write(batch, DM_SBCS, sbcs_write);
2192 sbcs = sbcs_write;
2193 sbcs_valid = true;
2194 }
2195
2196 if (sbasize > 32 &&
2197 (!sbaddress1_valid ||
2198 sbaddress1 != config->bucket[i].address >> 32)) {
2199 sbaddress1 = config->bucket[i].address >> 32;
2200 riscv_batch_add_dmi_write(batch, DM_SBADDRESS1, sbaddress1);
2201 sbaddress1_valid = true;
2202 }
2203 if (!sbaddress0_valid ||
2204 sbaddress0 != (config->bucket[i].address & 0xffffffff)) {
2205 sbaddress0 = config->bucket[i].address;
2206 riscv_batch_add_dmi_write(batch, DM_SBADDRESS0, sbaddress0);
2207 sbaddress0_valid = true;
2208 }
2209 if (config->bucket[i].size_bytes > 4)
2210 riscv_batch_add_dmi_read(batch, DM_SBDATA1);
2211 riscv_batch_add_dmi_read(batch, DM_SBDATA0);
2212 result_bytes += 1 + config->bucket[i].size_bytes;
2213 }
2214 }
2215 }
2216
2217 if (buf->used + result_bytes >= buf->size) {
2218 riscv_batch_free(batch);
2219 break;
2220 }
2221
2222 size_t sbcs_key = riscv_batch_add_dmi_read(batch, DM_SBCS);
2223
2224 int result = batch_run(target, batch);
2225 if (result != ERROR_OK)
2226 return result;
2227
2228 uint32_t sbcs_read = riscv_batch_get_dmi_read_data(batch, sbcs_key);
2229 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2230 /* Discard this batch (too much hassle to try to recover partial
2231 * data) and try again with a larger delay. */
2232 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2233 dmi_write(target, DM_SBCS, sbcs_read | DM_SBCS_SBBUSYERROR | DM_SBCS_SBERROR);
2234 riscv_batch_free(batch);
2235 continue;
2236 }
2237 if (get_field(sbcs_read, DM_SBCS_SBERROR)) {
2238 /* The memory we're sampling was unreadable, somehow. Give up. */
2239 dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR | DM_SBCS_SBERROR);
2240 riscv_batch_free(batch);
2241 return ERROR_FAIL;
2242 }
2243
2244 unsigned int read = 0;
2245 for (unsigned int n = 0; n < repeat; n++) {
2246 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2247 if (config->bucket[i].enabled) {
2248 assert(i < RISCV_SAMPLE_BUF_TIMESTAMP_BEFORE);
2249 uint64_t value = 0;
2250 if (config->bucket[i].size_bytes > 4)
2251 value = ((uint64_t)riscv_batch_get_dmi_read_data(batch, read++)) << 32;
2252 value |= riscv_batch_get_dmi_read_data(batch, read++);
2253
2254 buf->buf[buf->used] = i;
2255 buf_set_u64(buf->buf + buf->used + 1, 0, config->bucket[i].size_bytes * 8, value);
2256 buf->used += 1 + config->bucket[i].size_bytes;
2257 }
2258 }
2259 }
2260
2261 riscv_batch_free(batch);
2262 }
2263
2264 return ERROR_OK;
2265 }
2266
2267 static int sample_memory(struct target *target,
2268 struct riscv_sample_buf *buf,
2269 riscv_sample_config_t *config,
2270 int64_t until_ms)
2271 {
2272 if (!config->enabled)
2273 return ERROR_OK;
2274
2275 return sample_memory_bus_v1(target, buf, config, until_ms);
2276 }
2277
2278 static int init_target(struct command_context *cmd_ctx,
2279 struct target *target)
2280 {
2281 LOG_DEBUG("init");
2282 RISCV_INFO(generic_info);
2283
2284 generic_info->get_register = &riscv013_get_register;
2285 generic_info->set_register = &riscv013_set_register;
2286 generic_info->get_register_buf = &riscv013_get_register_buf;
2287 generic_info->set_register_buf = &riscv013_set_register_buf;
2288 generic_info->select_current_hart = &riscv013_select_current_hart;
2289 generic_info->is_halted = &riscv013_is_halted;
2290 generic_info->resume_go = &riscv013_resume_go;
2291 generic_info->step_current_hart = &riscv013_step_current_hart;
2292 generic_info->on_halt = &riscv013_on_halt;
2293 generic_info->resume_prep = &riscv013_resume_prep;
2294 generic_info->halt_prep = &riscv013_halt_prep;
2295 generic_info->halt_go = &riscv013_halt_go;
2296 generic_info->on_step = &riscv013_on_step;
2297 generic_info->halt_reason = &riscv013_halt_reason;
2298 generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
2299 generic_info->write_debug_buffer = &riscv013_write_debug_buffer;
2300 generic_info->execute_debug_buffer = &riscv013_execute_debug_buffer;
2301 generic_info->fill_dmi_write_u64 = &riscv013_fill_dmi_write_u64;
2302 generic_info->fill_dmi_read_u64 = &riscv013_fill_dmi_read_u64;
2303 generic_info->fill_dmi_nop_u64 = &riscv013_fill_dmi_nop_u64;
2304 generic_info->dmi_write_u64_bits = &riscv013_dmi_write_u64_bits;
2305 generic_info->authdata_read = &riscv013_authdata_read;
2306 generic_info->authdata_write = &riscv013_authdata_write;
2307 generic_info->dmi_read = &dmi_read;
2308 generic_info->dmi_write = &dmi_write;
2309 generic_info->read_memory = read_memory;
2310 generic_info->test_sba_config_reg = &riscv013_test_sba_config_reg;
2311 generic_info->hart_count = &riscv013_hart_count;
2312 generic_info->data_bits = &riscv013_data_bits;
2313 generic_info->print_info = &riscv013_print_info;
2314 if (!generic_info->version_specific) {
2315 generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
2316 if (!generic_info->version_specific)
2317 return ERROR_FAIL;
2318 }
2319 generic_info->sample_memory = sample_memory;
2320 riscv013_info_t *info = get_info(target);
2321
2322 info->progbufsize = -1;
2323
2324 info->dmi_busy_delay = 0;
2325 info->bus_master_read_delay = 0;
2326 info->bus_master_write_delay = 0;
2327 info->ac_busy_delay = 0;
2328
2329 /* Assume all these abstract commands are supported until we learn
2330 * otherwise.
2331 * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
2332 * while another one isn't. We don't track that this closely here, but in
2333 * the future we probably should. */
2334 info->abstract_read_csr_supported = true;
2335 info->abstract_write_csr_supported = true;
2336 info->abstract_read_fpr_supported = true;
2337 info->abstract_write_fpr_supported = true;
2338
2339 info->has_aampostincrement = YNM_MAYBE;
2340
2341 return ERROR_OK;
2342 }
2343
2344 static int assert_reset(struct target *target)
2345 {
2346 RISCV_INFO(r);
2347
2348 select_dmi(target);
2349
2350 uint32_t control_base = set_field(0, DM_DMCONTROL_DMACTIVE, 1);
2351
2352 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
2353 /* Run the user-supplied script if there is one. */
2354 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
2355 } else if (target->rtos) {
2356 /* There's only one target, and OpenOCD thinks each hart is a thread.
2357 * We must reset them all. */
2358
2359 /* TODO: Try to use hasel in dmcontrol */
2360
2361 /* Set haltreq for each hart. */
2362 uint32_t control = control_base;
2363
2364 control = set_hartsel(control_base, target->coreid);
2365 control = set_field(control, DM_DMCONTROL_HALTREQ,
2366 target->reset_halt ? 1 : 0);
2367 dmi_write(target, DM_DMCONTROL, control);
2368
2369 /* Assert ndmreset */
2370 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2371 dmi_write(target, DM_DMCONTROL, control);
2372
2373 } else {
2374 /* Reset just this hart. */
2375 uint32_t control = set_hartsel(control_base, r->current_hartid);
2376 control = set_field(control, DM_DMCONTROL_HALTREQ,
2377 target->reset_halt ? 1 : 0);
2378 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2379 dmi_write(target, DM_DMCONTROL, control);
2380 }
2381
2382 target->state = TARGET_RESET;
2383
2384 dm013_info_t *dm = get_dm(target);
2385 if (!dm)
2386 return ERROR_FAIL;
2387
2388 /* The DM might have gotten reset if OpenOCD called us in some reset that
2389 * involves SRST being toggled. So clear our cache which may be out of
2390 * date. */
2391 memset(dm->progbuf_cache, 0, sizeof(dm->progbuf_cache));
2392
2393 return ERROR_OK;
2394 }
2395
2396 static int deassert_reset(struct target *target)
2397 {
2398 RISCV_INFO(r);
2399 RISCV013_INFO(info);
2400 select_dmi(target);
2401
2402 /* Clear the reset, but make sure haltreq is still set */
2403 uint32_t control = 0, control_haltreq;
2404 control = set_field(control, DM_DMCONTROL_DMACTIVE, 1);
2405 control_haltreq = set_field(control, DM_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
2406 dmi_write(target, DM_DMCONTROL,
2407 set_hartsel(control_haltreq, r->current_hartid));
2408
2409 uint32_t dmstatus;
2410 int dmi_busy_delay = info->dmi_busy_delay;
2411 time_t start = time(NULL);
2412
2413 for (int i = 0; i < riscv_count_harts(target); ++i) {
2414 int index = i;
2415 if (target->rtos) {
2416 if (index != target->coreid)
2417 continue;
2418 dmi_write(target, DM_DMCONTROL,
2419 set_hartsel(control_haltreq, index));
2420 } else {
2421 index = r->current_hartid;
2422 }
2423
2424 LOG_DEBUG("Waiting for hart %d to come out of reset.", index);
2425 while (1) {
2426 int result = dmstatus_read_timeout(target, &dmstatus, true,
2427 riscv_reset_timeout_sec);
2428 if (result == ERROR_TIMEOUT_REACHED)
2429 LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
2430 "reset in %ds; Increase the timeout with riscv "
2431 "set_reset_timeout_sec.",
2432 index, riscv_reset_timeout_sec);
2433 if (result != ERROR_OK)
2434 return result;
2435 /* Certain debug modules, like the one in GD32VF103
2436 * MCUs, violate the specification's requirement that
2437 * each hart is in "exactly one of four states" and,
2438 * during reset, report harts as both unavailable and
2439 * halted/running. To work around this, we check for
2440 * the absence of the unavailable state rather than
2441 * the presence of any other state. */
2442 if (!get_field(dmstatus, DM_DMSTATUS_ALLUNAVAIL))
2443 break;
2444 if (time(NULL) - start > riscv_reset_timeout_sec) {
2445 LOG_ERROR("Hart %d didn't leave reset in %ds; "
2446 "dmstatus=0x%x; "
2447 "Increase the timeout with riscv set_reset_timeout_sec.",
2448 index, riscv_reset_timeout_sec, dmstatus);
2449 return ERROR_FAIL;
2450 }
2451 }
2452 target->state = TARGET_HALTED;
2453
2454 if (get_field(dmstatus, DM_DMSTATUS_ALLHAVERESET)) {
2455 /* Ack reset and clear DM_DMCONTROL_HALTREQ if previously set */
2456 dmi_write(target, DM_DMCONTROL,
2457 set_hartsel(control, index) |
2458 DM_DMCONTROL_ACKHAVERESET);
2459 }
2460
2461 if (!target->rtos)
2462 break;
2463 }
2464 info->dmi_busy_delay = dmi_busy_delay;
2465 return ERROR_OK;
2466 }
2467
2468 static int execute_fence(struct target *target)
2469 {
2470 /* FIXME: For non-coherent systems we need to flush the caches right
2471 * here, but there's no ISA-defined way of doing that. */
2472 {
2473 struct riscv_program program;
2474 riscv_program_init(&program, target);
2475 riscv_program_fence_i(&program);
2476 riscv_program_fence(&program);
2477 int result = riscv_program_exec(&program, target);
2478 if (result != ERROR_OK)
2479 LOG_DEBUG("Unable to execute pre-fence");
2480 }
2481
2482 return ERROR_OK;
2483 }
2484
2485 static void log_memory_access(target_addr_t address, uint64_t value,
2486 unsigned size_bytes, bool read)
2487 {
2488 if (debug_level < LOG_LVL_DEBUG)
2489 return;
2490
2491 char fmt[80];
2492 sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
2493 address, read ? "read" : "write", size_bytes * 2);
2494 switch (size_bytes) {
2495 case 1:
2496 value &= 0xff;
2497 break;
2498 case 2:
2499 value &= 0xffff;
2500 break;
2501 case 4:
2502 value &= 0xffffffffUL;
2503 break;
2504 case 8:
2505 break;
2506 default:
2507 assert(false);
2508 }
2509 LOG_DEBUG(fmt, value);
2510 }
2511
2512 /* Read the relevant sbdata regs depending on size, and put the results into
2513 * buffer. */
2514 static int read_memory_bus_word(struct target *target, target_addr_t address,
2515 uint32_t size, uint8_t *buffer)
2516 {
2517 uint32_t value;
2518 int result;
2519 static int sbdata[4] = { DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3 };
2520 assert(size <= 16);
2521 for (int i = (size - 1) / 4; i >= 0; i--) {
2522 result = dmi_op(target, &value, NULL, DMI_OP_READ, sbdata[i], 0, false, true);
2523 if (result != ERROR_OK)
2524 return result;
2525 buf_set_u32(buffer + i * 4, 0, 8 * MIN(size, 4), value);
2526 log_memory_access(address + i * 4, value, MIN(size, 4), true);
2527 }
2528 return ERROR_OK;
2529 }
2530
2531 static target_addr_t sb_read_address(struct target *target)
2532 {
2533 RISCV013_INFO(info);
2534 unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2535 target_addr_t address = 0;
2536 uint32_t v;
2537 if (sbasize > 32) {
2538 dmi_read(target, &v, DM_SBADDRESS1);
2539 address |= v;
2540 address <<= 32;
2541 }
2542 dmi_read(target, &v, DM_SBADDRESS0);
2543 address |= v;
2544 return address;
2545 }
2546
2547 static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
2548 {
2549 time_t start = time(NULL);
2550 while (1) {
2551 if (dmi_read(target, sbcs, DM_SBCS) != ERROR_OK)
2552 return ERROR_FAIL;
2553 if (!get_field(*sbcs, DM_SBCS_SBBUSY))
2554 return ERROR_OK;
2555 if (time(NULL) - start > riscv_command_timeout_sec) {
2556 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
2557 "Increase the timeout with riscv set_command_timeout_sec.",
2558 riscv_command_timeout_sec, *sbcs);
2559 return ERROR_FAIL;
2560 }
2561 }
2562 }
2563
2564 static int modify_privilege(struct target *target, uint64_t *mstatus, uint64_t *mstatus_old)
2565 {
2566 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5)) {
2567 /* Read DCSR */
2568 uint64_t dcsr;
2569 if (register_read(target, &dcsr, GDB_REGNO_DCSR) != ERROR_OK)
2570 return ERROR_FAIL;
2571
2572 /* Read and save MSTATUS */
2573 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
2574 return ERROR_FAIL;
2575 *mstatus_old = *mstatus;
2576
2577 /* If we come from m-mode with mprv set, we want to keep mpp */
2578 if (get_field(dcsr, DCSR_PRV) < 3) {
2579 /* MPP = PRIV */
2580 *mstatus = set_field(*mstatus, MSTATUS_MPP, get_field(dcsr, DCSR_PRV));
2581
2582 /* MPRV = 1 */
2583 *mstatus = set_field(*mstatus, MSTATUS_MPRV, 1);
2584
2585 /* Write MSTATUS */
2586 if (*mstatus != *mstatus_old)
2587 if (register_write_direct(target, GDB_REGNO_MSTATUS, *mstatus) != ERROR_OK)
2588 return ERROR_FAIL;
2589 }
2590 }
2591
2592 return ERROR_OK;
2593 }
2594
2595 static int read_memory_bus_v0(struct target *target, target_addr_t address,
2596 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2597 {
2598 if (size != increment) {
2599 LOG_ERROR("sba v0 reads only support size==increment");
2600 return ERROR_NOT_IMPLEMENTED;
2601 }
2602
2603 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
2604 TARGET_PRIxADDR, size, count, address);
2605 uint8_t *t_buffer = buffer;
2606 riscv_addr_t cur_addr = address;
2607 riscv_addr_t fin_addr = address + (count * size);
2608 uint32_t access = 0;
2609
2610 const int DM_SBCS_SBSINGLEREAD_OFFSET = 20;
2611 const uint32_t DM_SBCS_SBSINGLEREAD = (0x1U << DM_SBCS_SBSINGLEREAD_OFFSET);
2612
2613 const int DM_SBCS_SBAUTOREAD_OFFSET = 15;
2614 const uint32_t DM_SBCS_SBAUTOREAD = (0x1U << DM_SBCS_SBAUTOREAD_OFFSET);
2615
2616 /* ww favorise one off reading if there is an issue */
2617 if (count == 1) {
2618 for (uint32_t i = 0; i < count; i++) {
2619 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2620 return ERROR_FAIL;
2621 dmi_write(target, DM_SBADDRESS0, cur_addr);
2622 /* size/2 matching the bit access of the spec 0.13 */
2623 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2624 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2625 LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
2626 dmi_write(target, DM_SBCS, access);
2627 /* 3) read */
2628 uint32_t value;
2629 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2630 return ERROR_FAIL;
2631 LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
2632 buf_set_u32(t_buffer, 0, 8 * size, value);
2633 t_buffer += size;
2634 cur_addr += size;
2635 }
2636 return ERROR_OK;
2637 }
2638
2639 /* has to be the same size if we want to read a block */
2640 LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
2641 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2642 return ERROR_FAIL;
2643 /* set current address */
2644 dmi_write(target, DM_SBADDRESS0, cur_addr);
2645 /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
2646 * size/2 matching the bit access of the spec 0.13 */
2647 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2648 access = set_field(access, DM_SBCS_SBAUTOREAD, 1);
2649 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2650 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
2651 LOG_DEBUG("\r\naccess: 0x%08x", access);
2652 dmi_write(target, DM_SBCS, access);
2653
2654 while (cur_addr < fin_addr) {
2655 LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
2656 PRIx64, size, count, cur_addr);
2657 /* read */
2658 uint32_t value;
2659 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2660 return ERROR_FAIL;
2661 buf_set_u32(t_buffer, 0, 8 * size, value);
2662 cur_addr += size;
2663 t_buffer += size;
2664
2665 /* if we are reaching last address, we must clear autoread */
2666 if (cur_addr == fin_addr && count != 1) {
2667 dmi_write(target, DM_SBCS, 0);
2668 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2669 return ERROR_FAIL;
2670 buf_set_u32(t_buffer, 0, 8 * size, value);
2671 }
2672 }
2673
2674 uint32_t sbcs;
2675 if (dmi_read(target, &sbcs, DM_SBCS) != ERROR_OK)
2676 return ERROR_FAIL;
2677
2678 return ERROR_OK;
2679 }
2680
2681 /**
2682 * Read the requested memory using the system bus interface.
2683 */
2684 static int read_memory_bus_v1(struct target *target, target_addr_t address,
2685 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2686 {
2687 if (increment != size && increment != 0) {
2688 LOG_ERROR("sba v1 reads only support increment of size or 0");
2689 return ERROR_NOT_IMPLEMENTED;
2690 }
2691
2692 RISCV013_INFO(info);
2693 target_addr_t next_address = address;
2694 target_addr_t end_address = address + count * size;
2695
2696 while (next_address < end_address) {
2697 uint32_t sbcs_write = set_field(0, DM_SBCS_SBREADONADDR, 1);
2698 sbcs_write |= sb_sbaccess(size);
2699 if (increment == size)
2700 sbcs_write = set_field(sbcs_write, DM_SBCS_SBAUTOINCREMENT, 1);
2701 if (count > 1)
2702 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, count > 1);
2703 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2704 return ERROR_FAIL;
2705
2706 /* This address write will trigger the first read. */
2707 if (sb_write_address(target, next_address, true) != ERROR_OK)
2708 return ERROR_FAIL;
2709
2710 if (info->bus_master_read_delay) {
2711 jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
2712 if (jtag_execute_queue() != ERROR_OK) {
2713 LOG_ERROR("Failed to scan idle sequence");
2714 return ERROR_FAIL;
2715 }
2716 }
2717
2718 /* First value has been read, and is waiting for us to issue a DMI read
2719 * to get it. */
2720
2721 static int sbdata[4] = {DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3};
2722 assert(size <= 16);
2723 target_addr_t next_read = address - 1;
2724 for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
2725 for (int j = (size - 1) / 4; j >= 0; j--) {
2726 uint32_t value;
2727 unsigned attempt = 0;
2728 while (1) {
2729 if (attempt++ > 100) {
2730 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2731 next_read);
2732 return ERROR_FAIL;
2733 }
2734 keep_alive();
2735 dmi_status_t status = dmi_scan(target, NULL, &value,
2736 DMI_OP_READ, sbdata[j], 0, false);
2737 if (status == DMI_STATUS_BUSY)
2738 increase_dmi_busy_delay(target);
2739 else if (status == DMI_STATUS_SUCCESS)
2740 break;
2741 else
2742 return ERROR_FAIL;
2743 }
2744 if (next_read != address - 1) {
2745 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2746 log_memory_access(next_read, value, MIN(size, 4), true);
2747 }
2748 next_read = address + i * size + j * 4;
2749 }
2750 }
2751
2752 uint32_t sbcs_read = 0;
2753 if (count > 1) {
2754 uint32_t value;
2755 unsigned attempt = 0;
2756 while (1) {
2757 if (attempt++ > 100) {
2758 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2759 next_read);
2760 return ERROR_FAIL;
2761 }
2762 dmi_status_t status = dmi_scan(target, NULL, &value, DMI_OP_NOP, 0, 0, false);
2763 if (status == DMI_STATUS_BUSY)
2764 increase_dmi_busy_delay(target);
2765 else if (status == DMI_STATUS_SUCCESS)
2766 break;
2767 else
2768 return ERROR_FAIL;
2769 }
2770 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2771 log_memory_access(next_read, value, MIN(size, 4), true);
2772
2773 /* "Writes to sbcs while sbbusy is high result in undefined behavior.
2774 * A debugger must not write to sbcs until it reads sbbusy as 0." */
2775 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2776 return ERROR_FAIL;
2777
2778 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, 0);
2779 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2780 return ERROR_FAIL;
2781 }
2782
2783 /* Read the last word, after we disabled sbreadondata if necessary. */
2784 if (!get_field(sbcs_read, DM_SBCS_SBERROR) &&
2785 !get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2786 if (read_memory_bus_word(target, address + (count - 1) * size, size,
2787 buffer + (count - 1) * size) != ERROR_OK)
2788 return ERROR_FAIL;
2789
2790 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2791 return ERROR_FAIL;
2792 }
2793
2794 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2795 /* We read while the target was busy. Slow down and try again. */
2796 if (dmi_write(target, DM_SBCS, sbcs_read | DM_SBCS_SBBUSYERROR) != ERROR_OK)
2797 return ERROR_FAIL;
2798 next_address = sb_read_address(target);
2799 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2800 continue;
2801 }
2802
2803 unsigned error = get_field(sbcs_read, DM_SBCS_SBERROR);
2804 if (error == 0) {
2805 next_address = end_address;
2806 } else {
2807 /* Some error indicating the bus access failed, but not because of
2808 * something we did wrong. */
2809 if (dmi_write(target, DM_SBCS, DM_SBCS_SBERROR) != ERROR_OK)
2810 return ERROR_FAIL;
2811 return ERROR_FAIL;
2812 }
2813 }
2814
2815 return ERROR_OK;
2816 }
2817
2818 static void log_mem_access_result(struct target *target, bool success, int method, bool read)
2819 {
2820 RISCV_INFO(r);
2821 bool warn = false;
2822 char msg[60];
2823
2824 /* Compose the message */
2825 snprintf(msg, 60, "%s to %s memory via %s.",
2826 success ? "Succeeded" : "Failed",
2827 read ? "read" : "write",
2828 (method == RISCV_MEM_ACCESS_PROGBUF) ? "program buffer" :
2829 (method == RISCV_MEM_ACCESS_SYSBUS) ? "system bus" : "abstract access");
2830
2831 /* Determine the log message severity. Show warnings only once. */
2832 if (!success) {
2833 if (method == RISCV_MEM_ACCESS_PROGBUF) {
2834 warn = r->mem_access_progbuf_warn;
2835 r->mem_access_progbuf_warn = false;
2836 }
2837 if (method == RISCV_MEM_ACCESS_SYSBUS) {
2838 warn = r->mem_access_sysbus_warn;
2839 r->mem_access_sysbus_warn = false;
2840 }
2841 if (method == RISCV_MEM_ACCESS_ABSTRACT) {
2842 warn = r->mem_access_abstract_warn;
2843 r->mem_access_abstract_warn = false;
2844 }
2845 }
2846
2847 if (warn)
2848 LOG_WARNING("%s", msg);
2849 else
2850 LOG_DEBUG("%s", msg);
2851 }
2852
2853 static bool mem_should_skip_progbuf(struct target *target, target_addr_t address,
2854 uint32_t size, bool read, char **skip_reason)
2855 {
2856 assert(skip_reason);
2857
2858 if (!has_sufficient_progbuf(target, 3)) {
2859 LOG_DEBUG("Skipping mem %s via progbuf - insufficient progbuf size.",
2860 read ? "read" : "write");
2861 *skip_reason = "skipped (insufficient progbuf)";
2862 return true;
2863 }
2864 if (target->state != TARGET_HALTED) {
2865 LOG_DEBUG("Skipping mem %s via progbuf - target not halted.",
2866 read ? "read" : "write");
2867 *skip_reason = "skipped (target not halted)";
2868 return true;
2869 }
2870 if (riscv_xlen(target) < size * 8) {
2871 LOG_DEBUG("Skipping mem %s via progbuf - XLEN (%d) is too short for %d-bit memory access.",
2872 read ? "read" : "write", riscv_xlen(target), size * 8);
2873 *skip_reason = "skipped (XLEN too short)";
2874 return true;
2875 }
2876 if (size > 8) {
2877 LOG_DEBUG("Skipping mem %s via progbuf - unsupported size.",
2878 read ? "read" : "write");
2879 *skip_reason = "skipped (unsupported size)";
2880 return true;
2881 }
2882 if ((sizeof(address) * 8 > riscv_xlen(target)) && (address >> riscv_xlen(target))) {
2883 LOG_DEBUG("Skipping mem %s via progbuf - progbuf only supports %u-bit address.",
2884 read ? "read" : "write", riscv_xlen(target));
2885 *skip_reason = "skipped (too large address)";
2886 return true;
2887 }
2888
2889 return false;
2890 }
2891
2892 static bool mem_should_skip_sysbus(struct target *target, target_addr_t address,
2893 uint32_t size, uint32_t increment, bool read, char **skip_reason)
2894 {
2895 assert(skip_reason);
2896
2897 RISCV013_INFO(info);
2898 if (!sba_supports_access(target, size)) {
2899 LOG_DEBUG("Skipping mem %s via system bus - unsupported size.",
2900 read ? "read" : "write");
2901 *skip_reason = "skipped (unsupported size)";
2902 return true;
2903 }
2904 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2905 if ((sizeof(address) * 8 > sbasize) && (address >> sbasize)) {
2906 LOG_DEBUG("Skipping mem %s via system bus - sba only supports %u-bit address.",
2907 read ? "read" : "write", sbasize);
2908 *skip_reason = "skipped (too large address)";
2909 return true;
2910 }
2911 if (read && increment != size && (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0 || increment != 0)) {
2912 LOG_DEBUG("Skipping mem read via system bus - "
2913 "sba reads only support size==increment or also size==0 for sba v1.");
2914 *skip_reason = "skipped (unsupported increment)";
2915 return true;
2916 }
2917
2918 return false;
2919 }
2920
2921 static bool mem_should_skip_abstract(struct target *target, target_addr_t address,
2922 uint32_t size, uint32_t increment, bool read, char **skip_reason)
2923 {
2924 assert(skip_reason);
2925
2926 if (size > 8) {
2927 /* TODO: Add 128b support if it's ever used. Involves modifying
2928 read/write_abstract_arg() to work on two 64b values. */
2929 LOG_DEBUG("Skipping mem %s via abstract access - unsupported size: %d bits",
2930 read ? "read" : "write", size * 8);
2931 *skip_reason = "skipped (unsupported size)";
2932 return true;
2933 }
2934 if ((sizeof(address) * 8 > riscv_xlen(target)) && (address >> riscv_xlen(target))) {
2935 LOG_DEBUG("Skipping mem %s via abstract access - abstract access only supports %u-bit address.",
2936 read ? "read" : "write", riscv_xlen(target));
2937 *skip_reason = "skipped (too large address)";
2938 return true;
2939 }
2940 if (read && size != increment) {
2941 LOG_ERROR("Skipping mem read via abstract access - "
2942 "abstract command reads only support size==increment.");
2943 *skip_reason = "skipped (unsupported increment)";
2944 return true;
2945 }
2946
2947 return false;
2948 }
2949
2950 /*
2951 * Performs a memory read using memory access abstract commands. The read sizes
2952 * supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16 byte
2953 * aamsize fields in the memory access abstract command.
2954 */
2955 static int read_memory_abstract(struct target *target, target_addr_t address,
2956 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2957 {
2958 RISCV013_INFO(info);
2959
2960 int result = ERROR_OK;
2961 bool use_aampostincrement = info->has_aampostincrement != YNM_NO;
2962
2963 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
2964 size, address);
2965
2966 memset(buffer, 0, count * size);
2967
2968 /* Convert the size (bytes) to width (bits) */
2969 unsigned width = size << 3;
2970
2971 /* Create the command (physical address, postincrement, read) */
2972 uint32_t command = access_memory_command(target, false, width, use_aampostincrement, false);
2973
2974 /* Execute the reads */
2975 uint8_t *p = buffer;
2976 bool updateaddr = true;
2977 unsigned int width32 = (width < 32) ? 32 : width;
2978 for (uint32_t c = 0; c < count; c++) {
2979 /* Update the address if it is the first time or aampostincrement is not supported by the target. */
2980 if (updateaddr) {
2981 /* Set arg1 to the address: address + c * size */
2982 result = write_abstract_arg(target, 1, address + c * size, riscv_xlen(target));
2983 if (result != ERROR_OK) {
2984 LOG_ERROR("Failed to write arg1 during read_memory_abstract().");
2985 return result;
2986 }
2987 }
2988
2989 /* Execute the command */
2990 result = execute_abstract_command(target, command);
2991
2992 if (info->has_aampostincrement == YNM_MAYBE) {
2993 if (result == ERROR_OK) {
2994 /* Safety: double-check that the address was really auto-incremented */
2995 riscv_reg_t new_address = read_abstract_arg(target, 1, riscv_xlen(target));
2996 if (new_address == address + size) {
2997 LOG_DEBUG("aampostincrement is supported on this target.");
2998 info->has_aampostincrement = YNM_YES;
2999 } else {
3000 LOG_WARNING("Buggy aampostincrement! Address not incremented correctly.");
3001 info->has_aampostincrement = YNM_NO;
3002 }
3003 } else {
3004 /* Try the same access but with postincrement disabled. */
3005 command = access_memory_command(target, false, width, false, false);
3006 result = execute_abstract_command(target, command);
3007 if (result == ERROR_OK) {
3008 LOG_DEBUG("aampostincrement is not supported on this target.");
3009 info->has_aampostincrement = YNM_NO;
3010 }
3011 }
3012 }
3013
3014 if (result != ERROR_OK)
3015 return result;
3016
3017 /* Copy arg0 to buffer (rounded width up to nearest 32) */
3018 riscv_reg_t value = read_abstract_arg(target, 0, width32);
3019 buf_set_u64(p, 0, 8 * size, value);
3020
3021 if (info->has_aampostincrement == YNM_YES)
3022 updateaddr = false;
3023 p += size;
3024 }
3025
3026 return result;
3027 }
3028
3029 /*
3030 * Performs a memory write using memory access abstract commands. The write
3031 * sizes supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16
3032 * byte aamsize fields in the memory access abstract command.
3033 */
3034 static int write_memory_abstract(struct target *target, target_addr_t address,
3035 uint32_t size, uint32_t count, const uint8_t *buffer)
3036 {
3037 RISCV013_INFO(info);
3038 int result = ERROR_OK;
3039 bool use_aampostincrement = info->has_aampostincrement != YNM_NO;
3040
3041 LOG_DEBUG("writing %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
3042 size, address);
3043
3044 /* Convert the size (bytes) to width (bits) */
3045 unsigned width = size << 3;
3046
3047 /* Create the command (physical address, postincrement, write) */
3048 uint32_t command = access_memory_command(target, false, width, use_aampostincrement, true);
3049
3050 /* Execute the writes */
3051 const uint8_t *p = buffer;
3052 bool updateaddr = true;
3053 for (uint32_t c = 0; c < count; c++) {
3054 /* Move data to arg0 */
3055 riscv_reg_t value = buf_get_u64(p, 0, 8 * size);
3056 result = write_abstract_arg(target, 0, value, riscv_xlen(target));
3057 if (result != ERROR_OK) {
3058 LOG_ERROR("Failed to write arg0 during write_memory_abstract().");
3059 return result;
3060 }
3061
3062 /* Update the address if it is the first time or aampostincrement is not supported by the target. */
3063 if (updateaddr) {
3064 /* Set arg1 to the address: address + c * size */
3065 result = write_abstract_arg(target, 1, address + c * size, riscv_xlen(target));
3066 if (result != ERROR_OK) {
3067 LOG_ERROR("Failed to write arg1 during write_memory_abstract().");
3068 return result;
3069 }
3070 }
3071
3072 /* Execute the command */
3073 result = execute_abstract_command(target, command);
3074
3075 if (info->has_aampostincrement == YNM_MAYBE) {
3076 if (result == ERROR_OK) {
3077 /* Safety: double-check that the address was really auto-incremented */
3078 riscv_reg_t new_address = read_abstract_arg(target, 1, riscv_xlen(target));
3079 if (new_address == address + size) {
3080 LOG_DEBUG("aampostincrement is supported on this target.");
3081 info->has_aampostincrement = YNM_YES;
3082 } else {
3083 LOG_WARNING("Buggy aampostincrement! Address not incremented correctly.");
3084 info->has_aampostincrement = YNM_NO;
3085 }
3086 } else {
3087 /* Try the same access but with postincrement disabled. */
3088 command = access_memory_command(target, false, width, false, true);
3089 result = execute_abstract_command(target, command);
3090 if (result == ERROR_OK) {
3091 LOG_DEBUG("aampostincrement is not supported on this target.");
3092 info->has_aampostincrement = YNM_NO;
3093 }
3094 }
3095 }
3096
3097 if (result != ERROR_OK)
3098 return result;
3099
3100 if (info->has_aampostincrement == YNM_YES)
3101 updateaddr = false;
3102 p += size;
3103 }
3104
3105 return result;
3106 }
3107
3108 /**
3109 * Read the requested memory, taking care to execute every read exactly once,
3110 * even if cmderr=busy is encountered.
3111 */
3112 static int read_memory_progbuf_inner(struct target *target, target_addr_t address,
3113 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3114 {
3115 RISCV013_INFO(info);
3116
3117 int result = ERROR_OK;
3118
3119 /* Write address to S0. */
3120 result = register_write_direct(target, GDB_REGNO_S0, address);
3121 if (result != ERROR_OK)
3122 return result;
3123
3124 if (increment == 0 &&
3125 register_write_direct(target, GDB_REGNO_S2, 0) != ERROR_OK)
3126 return ERROR_FAIL;
3127
3128 uint32_t command = access_register_command(target, GDB_REGNO_S1,
3129 riscv_xlen(target),
3130 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
3131 if (execute_abstract_command(target, command) != ERROR_OK)
3132 return ERROR_FAIL;
3133
3134 /* First read has just triggered. Result is in s1. */
3135 if (count == 1) {
3136 uint64_t value;
3137 if (register_read_direct(target, &value, GDB_REGNO_S1) != ERROR_OK)
3138 return ERROR_FAIL;
3139 buf_set_u64(buffer, 0, 8 * size, value);
3140 log_memory_access(address, value, size, true);
3141 return ERROR_OK;
3142 }
3143
3144 if (dmi_write(target, DM_ABSTRACTAUTO,
3145 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) != ERROR_OK)
3146 goto error;
3147 /* Read garbage from dmi_data0, which triggers another execution of the
3148 * program. Now dmi_data0 contains the first good result, and s1 the next
3149 * memory value. */
3150 if (dmi_read_exec(target, NULL, DM_DATA0) != ERROR_OK)
3151 goto error;
3152
3153 /* read_addr is the next address that the hart will read from, which is the
3154 * value in s0. */
3155 unsigned index = 2;
3156 while (index < count) {
3157 riscv_addr_t read_addr = address + index * increment;
3158 LOG_DEBUG("i=%d, count=%d, read_addr=0x%" PRIx64, index, count, read_addr);
3159 /* The pipeline looks like this:
3160 * memory -> s1 -> dm_data0 -> debugger
3161 * Right now:
3162 * s0 contains read_addr
3163 * s1 contains mem[read_addr-size]
3164 * dm_data0 contains[read_addr-size*2]
3165 */
3166
3167 struct riscv_batch *batch = riscv_batch_alloc(target, 32,
3168 info->dmi_busy_delay + info->ac_busy_delay);
3169 if (!batch)
3170 return ERROR_FAIL;
3171
3172 unsigned reads = 0;
3173 for (unsigned j = index; j < count; j++) {
3174 if (size > 4)
3175 riscv_batch_add_dmi_read(batch, DM_DATA1);
3176 riscv_batch_add_dmi_read(batch, DM_DATA0);
3177
3178 reads++;
3179 if (riscv_batch_full(batch))
3180 break;
3181 }
3182
3183 batch_run(target, batch);
3184
3185 /* Wait for the target to finish performing the last abstract command,
3186 * and update our copy of cmderr. If we see that DMI is busy here,
3187 * dmi_busy_delay will be incremented. */
3188 uint32_t abstractcs;
3189 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3190 return ERROR_FAIL;
3191 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
3192 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3193 return ERROR_FAIL;
3194 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
3195
3196 unsigned next_index;
3197 unsigned ignore_last = 0;
3198 switch (info->cmderr) {
3199 case CMDERR_NONE:
3200 LOG_DEBUG("successful (partial?) memory read");
3201 next_index = index + reads;
3202 break;
3203 case CMDERR_BUSY:
3204 LOG_DEBUG("memory read resulted in busy response");
3205
3206 increase_ac_busy_delay(target);
3207 riscv013_clear_abstract_error(target);
3208
3209 dmi_write(target, DM_ABSTRACTAUTO, 0);
3210
3211 uint32_t dmi_data0, dmi_data1 = 0;
3212 /* This is definitely a good version of the value that we
3213 * attempted to read when we discovered that the target was
3214 * busy. */
3215 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK) {
3216 riscv_batch_free(batch);
3217 goto error;
3218 }
3219 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK) {
3220 riscv_batch_free(batch);
3221 goto error;
3222 }
3223
3224 /* See how far we got, clobbering dmi_data0. */
3225 if (increment == 0) {
3226 uint64_t counter;
3227 result = register_read_direct(target, &counter, GDB_REGNO_S2);
3228 next_index = counter;
3229 } else {
3230 uint64_t next_read_addr;
3231 result = register_read_direct(target, &next_read_addr,
3232 GDB_REGNO_S0);
3233 next_index = (next_read_addr - address) / increment;
3234 }
3235 if (result != ERROR_OK) {
3236 riscv_batch_free(batch);
3237 goto error;
3238 }
3239
3240 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
3241 buf_set_u64(buffer + (next_index - 2) * size, 0, 8 * size, value64);
3242 log_memory_access(address + (next_index - 2) * size, value64, size, true);
3243
3244 /* Restore the command, and execute it.
3245 * Now DM_DATA0 contains the next value just as it would if no
3246 * error had occurred. */
3247 dmi_write_exec(target, DM_COMMAND, command, true);
3248 next_index++;
3249
3250 dmi_write(target, DM_ABSTRACTAUTO,
3251 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
3252
3253 ignore_last = 1;
3254
3255 break;
3256 default:
3257 LOG_DEBUG("error when reading memory, abstractcs=0x%08lx", (long)abstractcs);
3258 riscv013_clear_abstract_error(target);
3259 riscv_batch_free(batch);
3260 result = ERROR_FAIL;
3261 goto error;
3262 }
3263
3264 /* Now read whatever we got out of the batch. */
3265 dmi_status_t status = DMI_STATUS_SUCCESS;
3266 unsigned read = 0;
3267 assert(index >= 2);
3268 for (unsigned j = index - 2; j < index + reads; j++) {
3269 assert(j < count);
3270 LOG_DEBUG("index=%d, reads=%d, next_index=%d, ignore_last=%d, j=%d",
3271 index, reads, next_index, ignore_last, j);
3272 if (j + 3 + ignore_last > next_index)
3273 break;
3274
3275 status = riscv_batch_get_dmi_read_op(batch, read);
3276 uint64_t value = riscv_batch_get_dmi_read_data(batch, read);
3277 read++;
3278 if (status != DMI_STATUS_SUCCESS) {
3279 /* If we're here because of busy count, dmi_busy_delay will
3280 * already have been increased and busy state will have been
3281 * cleared in dmi_read(). */
3282 /* In at least some implementations, we issue a read, and then
3283 * can get busy back when we try to scan out the read result,
3284 * and the actual read value is lost forever. Since this is
3285 * rare in any case, we return error here and rely on our
3286 * caller to reread the entire block. */
3287 LOG_WARNING("Batch memory read encountered DMI error %d. "
3288 "Falling back on slower reads.", status);
3289 riscv_batch_free(batch);
3290 result = ERROR_FAIL;
3291 goto error;
3292 }
3293 if (size > 4) {
3294 status = riscv_batch_get_dmi_read_op(batch, read);
3295 if (status != DMI_STATUS_SUCCESS) {
3296 LOG_WARNING("Batch memory read encountered DMI error %d. "
3297 "Falling back on slower reads.", status);
3298 riscv_batch_free(batch);
3299 result = ERROR_FAIL;
3300 goto error;
3301 }
3302 value <<= 32;
3303 value |= riscv_batch_get_dmi_read_data(batch, read);
3304 read++;
3305 }
3306 riscv_addr_t offset = j * size;
3307 buf_set_u64(buffer + offset, 0, 8 * size, value);
3308 log_memory_access(address + j * increment, value, size, true);
3309 }
3310
3311 index = next_index;
3312
3313 riscv_batch_free(batch);
3314 }
3315
3316 dmi_write(target, DM_ABSTRACTAUTO, 0);
3317
3318 if (count > 1) {
3319 /* Read the penultimate word. */
3320 uint32_t dmi_data0, dmi_data1 = 0;
3321 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK)
3322 return ERROR_FAIL;
3323 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK)
3324 return ERROR_FAIL;
3325 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
3326 buf_set_u64(buffer + size * (count - 2), 0, 8 * size, value64);
3327 log_memory_access(address + size * (count - 2), value64, size, true);
3328 }
3329
3330 /* Read the last word. */
3331 uint64_t value;
3332 result = register_read_direct(target, &value, GDB_REGNO_S1);
3333 if (result != ERROR_OK)
3334 goto error;
3335 buf_set_u64(buffer + size * (count-1), 0, 8 * size, value);
3336 log_memory_access(address + size * (count-1), value, size, true);
3337
3338 return ERROR_OK;
3339
3340 error:
3341 dmi_write(target, DM_ABSTRACTAUTO, 0);
3342
3343 return result;
3344 }
3345
3346 /* Only need to save/restore one GPR to read a single word, and the progbuf
3347 * program doesn't need to increment. */
3348 static int read_memory_progbuf_one(struct target *target, target_addr_t address,
3349 uint32_t size, uint8_t *buffer)
3350 {
3351 uint64_t mstatus = 0;
3352 uint64_t mstatus_old = 0;
3353 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3354 return ERROR_FAIL;
3355
3356 uint64_t s0;
3357 int result = ERROR_FAIL;
3358
3359 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3360 goto restore_mstatus;
3361
3362 /* Write the program (load, increment) */
3363 struct riscv_program program;
3364 riscv_program_init(&program, target);
3365 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3366 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3367 switch (size) {
3368 case 1:
3369 riscv_program_lbr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3370 break;
3371 case 2:
3372 riscv_program_lhr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3373 break;
3374 case 4:
3375 riscv_program_lwr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3376 break;
3377 case 8:
3378 riscv_program_ldr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3379 break;
3380 default:
3381 LOG_ERROR("Unsupported size: %d", size);
3382 goto restore_mstatus;
3383 }
3384 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3385 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3386
3387 if (riscv_program_ebreak(&program) != ERROR_OK)
3388 goto restore_mstatus;
3389 if (riscv_program_write(&program) != ERROR_OK)
3390 goto restore_mstatus;
3391
3392 /* Write address to S0, and execute buffer. */
3393 if (write_abstract_arg(target, 0, address, riscv_xlen(target)) != ERROR_OK)
3394 goto restore_mstatus;
3395 uint32_t command = access_register_command(target, GDB_REGNO_S0,
3396 riscv_xlen(target), AC_ACCESS_REGISTER_WRITE |
3397 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
3398 if (execute_abstract_command(target, command) != ERROR_OK)
3399 goto restore_s0;
3400
3401 uint64_t value;
3402 if (register_read(target, &value, GDB_REGNO_S0) != ERROR_OK)
3403 goto restore_s0;
3404 buf_set_u64(buffer, 0, 8 * size, value);
3405 log_memory_access(address, value, size, true);
3406 result = ERROR_OK;
3407
3408 restore_s0:
3409 if (riscv_set_register(target, GDB_REGNO_S0, s0) != ERROR_OK)
3410 result = ERROR_FAIL;
3411
3412 restore_mstatus:
3413 if (mstatus != mstatus_old)
3414 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3415 result = ERROR_FAIL;
3416
3417 return result;
3418 }
3419
3420 /**
3421 * Read the requested memory, silently handling memory access errors.
3422 */
3423 static int read_memory_progbuf(struct target *target, target_addr_t address,
3424 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3425 {
3426 if (riscv_xlen(target) < size * 8) {
3427 LOG_ERROR("XLEN (%d) is too short for %d-bit memory read.",
3428 riscv_xlen(target), size * 8);
3429 return ERROR_FAIL;
3430 }
3431
3432 int result = ERROR_OK;
3433
3434 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
3435 size, address);
3436
3437 select_dmi(target);
3438
3439 memset(buffer, 0, count*size);
3440
3441 if (execute_fence(target) != ERROR_OK)
3442 return ERROR_FAIL;
3443
3444 if (count == 1)
3445 return read_memory_progbuf_one(target, address, size, buffer);
3446
3447 uint64_t mstatus = 0;
3448 uint64_t mstatus_old = 0;
3449 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3450 return ERROR_FAIL;
3451
3452 /* s0 holds the next address to read from
3453 * s1 holds the next data value read
3454 * s2 is a counter in case increment is 0
3455 */
3456 uint64_t s0, s1, s2;
3457 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3458 return ERROR_FAIL;
3459 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3460 return ERROR_FAIL;
3461 if (increment == 0 && register_read(target, &s2, GDB_REGNO_S2) != ERROR_OK)
3462 return ERROR_FAIL;
3463
3464 /* Write the program (load, increment) */
3465 struct riscv_program program;
3466 riscv_program_init(&program, target);
3467 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3468 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3469
3470 switch (size) {
3471 case 1:
3472 riscv_program_lbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3473 break;
3474 case 2:
3475 riscv_program_lhr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3476 break;
3477 case 4:
3478 riscv_program_lwr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3479 break;
3480 case 8:
3481 riscv_program_ldr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3482 break;
3483 default:
3484 LOG_ERROR("Unsupported size: %d", size);
3485 return ERROR_FAIL;
3486 }
3487
3488 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3489 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3490 if (increment == 0)
3491 riscv_program_addi(&program, GDB_REGNO_S2, GDB_REGNO_S2, 1);
3492 else
3493 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, increment);
3494
3495 if (riscv_program_ebreak(&program) != ERROR_OK)
3496 return ERROR_FAIL;
3497 if (riscv_program_write(&program) != ERROR_OK)
3498 return ERROR_FAIL;
3499
3500 result = read_memory_progbuf_inner(target, address, size, count, buffer, increment);
3501
3502 if (result != ERROR_OK) {
3503 /* The full read did not succeed, so we will try to read each word individually. */
3504 /* This will not be fast, but reading outside actual memory is a special case anyway. */
3505 /* It will make the toolchain happier, especially Eclipse Memory View as it reads ahead. */
3506 target_addr_t address_i = address;
3507 uint32_t count_i = 1;
3508 uint8_t *buffer_i = buffer;
3509
3510 for (uint32_t i = 0; i < count; i++, address_i += increment, buffer_i += size) {
3511 /* TODO: This is much slower than it needs to be because we end up
3512 * writing the address to read for every word we read. */
3513 result = read_memory_progbuf_inner(target, address_i, size, count_i, buffer_i, increment);
3514
3515 /* The read of a single word failed, so we will just return 0 for that instead */
3516 if (result != ERROR_OK) {
3517 LOG_DEBUG("error reading single word of %d bytes from 0x%" TARGET_PRIxADDR,
3518 size, address_i);
3519
3520 buf_set_u64(buffer_i, 0, 8 * size, 0);
3521 }
3522 }
3523 result = ERROR_OK;
3524 }
3525
3526 riscv_set_register(target, GDB_REGNO_S0, s0);
3527 riscv_set_register(target, GDB_REGNO_S1, s1);
3528 if (increment == 0)
3529 riscv_set_register(target, GDB_REGNO_S2, s2);
3530
3531 /* Restore MSTATUS */
3532 if (mstatus != mstatus_old)
3533 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3534 return ERROR_FAIL;
3535
3536 return result;
3537 }
3538
3539 static int read_memory(struct target *target, target_addr_t address,
3540 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3541 {
3542 if (count == 0)
3543 return ERROR_OK;
3544
3545 if (size != 1 && size != 2 && size != 4 && size != 8 && size != 16) {
3546 LOG_ERROR("BUG: Unsupported size for memory read: %d", size);
3547 return ERROR_FAIL;
3548 }
3549
3550 int ret = ERROR_FAIL;
3551 RISCV_INFO(r);
3552 RISCV013_INFO(info);
3553
3554 char *progbuf_result = "disabled";
3555 char *sysbus_result = "disabled";
3556 char *abstract_result = "disabled";
3557
3558 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
3559 int method = r->mem_access_methods[i];
3560
3561 if (method == RISCV_MEM_ACCESS_PROGBUF) {
3562 if (mem_should_skip_progbuf(target, address, size, true, &progbuf_result))
3563 continue;
3564
3565 ret = read_memory_progbuf(target, address, size, count, buffer, increment);
3566
3567 if (ret != ERROR_OK)
3568 progbuf_result = "failed";
3569 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
3570 if (mem_should_skip_sysbus(target, address, size, increment, true, &sysbus_result))
3571 continue;
3572
3573 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
3574 ret = read_memory_bus_v0(target, address, size, count, buffer, increment);
3575 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
3576 ret = read_memory_bus_v1(target, address, size, count, buffer, increment);
3577
3578 if (ret != ERROR_OK)
3579 sysbus_result = "failed";
3580 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
3581 if (mem_should_skip_abstract(target, address, size, increment, true, &abstract_result))
3582 continue;
3583
3584 ret = read_memory_abstract(target, address, size, count, buffer, increment);
3585
3586 if (ret != ERROR_OK)
3587 abstract_result = "failed";
3588 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
3589 /* No further mem access method to try. */
3590 break;
3591
3592 log_mem_access_result(target, ret == ERROR_OK, method, true);
3593
3594 if (ret == ERROR_OK)
3595 return ret;
3596 }
3597
3598 LOG_ERROR("Target %s: Failed to read memory (addr=0x%" PRIx64 ")", target_name(target), address);
3599 LOG_ERROR(" progbuf=%s, sysbus=%s, abstract=%s", progbuf_result, sysbus_result, abstract_result);
3600 return ret;
3601 }
3602
3603 static int write_memory_bus_v0(struct target *target, target_addr_t address,
3604 uint32_t size, uint32_t count, const uint8_t *buffer)
3605 {
3606 /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
3607 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
3608 TARGET_PRIxADDR, size, count, address);
3609 dmi_write(target, DM_SBADDRESS0, address);
3610 int64_t value = 0;
3611 int64_t access = 0;
3612 riscv_addr_t offset = 0;
3613 riscv_addr_t t_addr = 0;
3614 const uint8_t *t_buffer = buffer + offset;
3615
3616 /* B.8 Writing Memory, single write check if we write in one go */
3617 if (count == 1) { /* count is in bytes here */
3618 value = buf_get_u64(t_buffer, 0, 8 * size);
3619
3620 access = 0;
3621 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3622 dmi_write(target, DM_SBCS, access);
3623 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3624 LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64, value);
3625 dmi_write(target, DM_SBDATA0, value);
3626 return ERROR_OK;
3627 }
3628
3629 /*B.8 Writing Memory, using autoincrement*/
3630
3631 access = 0;
3632 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3633 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
3634 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3635 dmi_write(target, DM_SBCS, access);
3636
3637 /*2)set the value according to the size required and write*/
3638 for (riscv_addr_t i = 0; i < count; ++i) {
3639 offset = size*i;
3640 /* for monitoring only */
3641 t_addr = address + offset;
3642 t_buffer = buffer + offset;
3643
3644 value = buf_get_u64(t_buffer, 0, 8 * size);
3645 LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
3646 PRIx64, (uint32_t)t_addr, (uint32_t)value);
3647 dmi_write(target, DM_SBDATA0, value);
3648 }
3649 /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
3650 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 0);
3651 dmi_write(target, DM_SBCS, access);
3652
3653 return ERROR_OK;
3654 }
3655
3656 static int write_memory_bus_v1(struct target *target, target_addr_t address,
3657 uint32_t size, uint32_t count, const uint8_t *buffer)
3658 {
3659 RISCV013_INFO(info);
3660 uint32_t sbcs = sb_sbaccess(size);
3661 sbcs = set_field(sbcs, DM_SBCS_SBAUTOINCREMENT, 1);
3662 dmi_write(target, DM_SBCS, sbcs);
3663
3664 target_addr_t next_address = address;
3665 target_addr_t end_address = address + count * size;
3666
3667 int result;
3668
3669 sb_write_address(target, next_address, true);
3670 while (next_address < end_address) {
3671 LOG_DEBUG("transferring burst starting at address 0x%" TARGET_PRIxADDR,
3672 next_address);
3673
3674 struct riscv_batch *batch = riscv_batch_alloc(
3675 target,
3676 32,
3677 info->dmi_busy_delay + info->bus_master_write_delay);
3678 if (!batch)
3679 return ERROR_FAIL;
3680
3681 for (uint32_t i = (next_address - address) / size; i < count; i++) {
3682 const uint8_t *p = buffer + i * size;
3683
3684 if (riscv_batch_available_scans(batch) < (size + 3) / 4)
3685 break;
3686
3687 if (size > 12)
3688 riscv_batch_add_dmi_write(batch, DM_SBDATA3,
3689 ((uint32_t) p[12]) |
3690 (((uint32_t) p[13]) << 8) |
3691 (((uint32_t) p[14]) << 16) |
3692 (((uint32_t) p[15]) << 24));
3693
3694 if (size > 8)
3695 riscv_batch_add_dmi_write(batch, DM_SBDATA2,
3696 ((uint32_t) p[8]) |
3697 (((uint32_t) p[9]) << 8) |
3698 (((uint32_t) p[10]) << 16) |
3699 (((uint32_t) p[11]) << 24));
3700 if (size > 4)
3701 riscv_batch_add_dmi_write(batch, DM_SBDATA1,
3702 ((uint32_t) p[4]) |
3703 (((uint32_t) p[5]) << 8) |
3704 (((uint32_t) p[6]) << 16) |
3705 (((uint32_t) p[7]) << 24));
3706 uint32_t value = p[0];
3707 if (size > 2) {
3708 value |= ((uint32_t) p[2]) << 16;
3709 value |= ((uint32_t) p[3]) << 24;
3710 }
3711 if (size > 1)
3712 value |= ((uint32_t) p[1]) << 8;
3713 riscv_batch_add_dmi_write(batch, DM_SBDATA0, value);
3714
3715 log_memory_access(address + i * size, value, size, false);
3716 next_address += size;
3717 }
3718
3719 /* Execute the batch of writes */
3720 result = batch_run(target, batch);
3721 riscv_batch_free(batch);
3722 if (result != ERROR_OK)
3723 return result;
3724
3725 /* Read sbcs value.
3726 * At the same time, detect if DMI busy has occurred during the batch write. */
3727 bool dmi_busy_encountered;
3728 if (dmi_op(target, &sbcs, &dmi_busy_encountered, DMI_OP_READ,
3729 DM_SBCS, 0, false, true) != ERROR_OK)
3730 return ERROR_FAIL;
3731 if (dmi_busy_encountered)
3732 LOG_DEBUG("DMI busy encountered during system bus write.");
3733
3734 /* Wait until sbbusy goes low */
3735 time_t start = time(NULL);
3736 while (get_field(sbcs, DM_SBCS_SBBUSY)) {
3737 if (time(NULL) - start > riscv_command_timeout_sec) {
3738 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
3739 "Increase the timeout with riscv set_command_timeout_sec.",
3740 riscv_command_timeout_sec, sbcs);
3741 return ERROR_FAIL;
3742 }
3743 if (dmi_read(target, &sbcs, DM_SBCS) != ERROR_OK)
3744 return ERROR_FAIL;
3745 }
3746
3747 if (get_field(sbcs, DM_SBCS_SBBUSYERROR)) {
3748 /* We wrote while the target was busy. */
3749 LOG_DEBUG("Sbbusyerror encountered during system bus write.");
3750 /* Clear the sticky error flag. */
3751 dmi_write(target, DM_SBCS, sbcs | DM_SBCS_SBBUSYERROR);
3752 /* Slow down before trying again. */
3753 info->bus_master_write_delay += info->bus_master_write_delay / 10 + 1;
3754 }
3755
3756 if (get_field(sbcs, DM_SBCS_SBBUSYERROR) || dmi_busy_encountered) {
3757 /* Recover from the case when the write commands were issued too fast.
3758 * Determine the address from which to resume writing. */
3759 next_address = sb_read_address(target);
3760 if (next_address < address) {
3761 /* This should never happen, probably buggy hardware. */
3762 LOG_DEBUG("unexpected sbaddress=0x%" TARGET_PRIxADDR
3763 " - buggy sbautoincrement in hw?", next_address);
3764 /* Fail the whole operation. */
3765 return ERROR_FAIL;
3766 }
3767 /* Try again - resume writing. */
3768 continue;
3769 }
3770
3771 unsigned int sberror = get_field(sbcs, DM_SBCS_SBERROR);
3772 if (sberror != 0) {
3773 /* Sberror indicates the bus access failed, but not because we issued the writes
3774 * too fast. Cannot recover. Sbaddress holds the address where the error occurred
3775 * (unless sbautoincrement in the HW is buggy).
3776 */
3777 target_addr_t sbaddress = sb_read_address(target);
3778 LOG_DEBUG("System bus access failed with sberror=%u (sbaddress=0x%" TARGET_PRIxADDR ")",
3779 sberror, sbaddress);
3780 if (sbaddress < address) {
3781 /* This should never happen, probably buggy hardware.
3782 * Make a note to the user not to trust the sbaddress value. */
3783 LOG_DEBUG("unexpected sbaddress=0x%" TARGET_PRIxADDR
3784 " - buggy sbautoincrement in hw?", next_address);
3785 }
3786 /* Clear the sticky error flag */
3787 dmi_write(target, DM_SBCS, DM_SBCS_SBERROR);
3788 /* Fail the whole operation */
3789 return ERROR_FAIL;
3790 }
3791 }
3792
3793 return ERROR_OK;
3794 }
3795
3796 static int write_memory_progbuf(struct target *target, target_addr_t address,
3797 uint32_t size, uint32_t count, const uint8_t *buffer)
3798 {
3799 RISCV013_INFO(info);
3800
3801 if (riscv_xlen(target) < size * 8) {
3802 LOG_ERROR("XLEN (%d) is too short for %d-bit memory write.",
3803 riscv_xlen(target), size * 8);
3804 return ERROR_FAIL;
3805 }
3806
3807 LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count, size, (long)address);
3808
3809 select_dmi(target);
3810
3811 uint64_t mstatus = 0;
3812 uint64_t mstatus_old = 0;
3813 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3814 return ERROR_FAIL;
3815
3816 /* s0 holds the next address to write to
3817 * s1 holds the next data value to write
3818 */
3819
3820 int result = ERROR_OK;
3821 uint64_t s0, s1;
3822 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3823 return ERROR_FAIL;
3824 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3825 return ERROR_FAIL;
3826
3827 /* Write the program (store, increment) */
3828 struct riscv_program program;
3829 riscv_program_init(&program, target);
3830 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3831 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3832
3833 switch (size) {
3834 case 1:
3835 riscv_program_sbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3836 break;
3837 case 2:
3838 riscv_program_shr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3839 break;
3840 case 4:
3841 riscv_program_swr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3842 break;
3843 case 8:
3844 riscv_program_sdr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3845 break;
3846 default:
3847 LOG_ERROR("write_memory_progbuf(): Unsupported size: %d", size);
3848 result = ERROR_FAIL;
3849 goto error;
3850 }
3851
3852 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3853 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3854 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
3855
3856 result = riscv_program_ebreak(&program);
3857 if (result != ERROR_OK)
3858 goto error;
3859 riscv_program_write(&program);
3860
3861 riscv_addr_t cur_addr = address;
3862 riscv_addr_t fin_addr = address + (count * size);
3863 bool setup_needed = true;
3864 LOG_DEBUG("writing until final address 0x%016" PRIx64, fin_addr);
3865 while (cur_addr < fin_addr) {
3866 LOG_DEBUG("transferring burst starting at address 0x%016" PRIx64,
3867 cur_addr);
3868
3869 struct riscv_batch *batch = riscv_batch_alloc(
3870 target,
3871 32,
3872 info->dmi_busy_delay + info->ac_busy_delay);
3873 if (!batch)
3874 goto error;
3875
3876 /* To write another word, we put it in S1 and execute the program. */
3877 unsigned start = (cur_addr - address) / size;
3878 for (unsigned i = start; i < count; ++i) {
3879 unsigned offset = size*i;
3880 const uint8_t *t_buffer = buffer + offset;
3881
3882 uint64_t value = buf_get_u64(t_buffer, 0, 8 * size);
3883
3884 log_memory_access(address + offset, value, size, false);
3885 cur_addr += size;
3886
3887 if (setup_needed) {
3888 result = register_write_direct(target, GDB_REGNO_S0,
3889 address + offset);
3890 if (result != ERROR_OK) {
3891 riscv_batch_free(batch);
3892 goto error;
3893 }
3894
3895 /* Write value. */
3896 if (size > 4)
3897 dmi_write(target, DM_DATA1, value >> 32);
3898 dmi_write(target, DM_DATA0, value);
3899
3900 /* Write and execute command that moves value into S1 and
3901 * executes program buffer. */
3902 uint32_t command = access_register_command(target,
3903 GDB_REGNO_S1, riscv_xlen(target),
3904 AC_ACCESS_REGISTER_POSTEXEC |
3905 AC_ACCESS_REGISTER_TRANSFER |
3906 AC_ACCESS_REGISTER_WRITE);
3907 result = execute_abstract_command(target, command);
3908 if (result != ERROR_OK) {
3909 riscv_batch_free(batch);
3910 goto error;
3911 }
3912
3913 /* Turn on autoexec */
3914 dmi_write(target, DM_ABSTRACTAUTO,
3915 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
3916
3917 setup_needed = false;
3918 } else {
3919 if (size > 4)
3920 riscv_batch_add_dmi_write(batch, DM_DATA1, value >> 32);
3921 riscv_batch_add_dmi_write(batch, DM_DATA0, value);
3922 if (riscv_batch_full(batch))
3923 break;
3924 }
3925 }
3926
3927 result = batch_run(target, batch);
3928 riscv_batch_free(batch);
3929 if (result != ERROR_OK)
3930 goto error;
3931
3932 /* Note that if the scan resulted in a Busy DMI response, it
3933 * is this read to abstractcs that will cause the dmi_busy_delay
3934 * to be incremented if necessary. */
3935
3936 uint32_t abstractcs;
3937 bool dmi_busy_encountered;
3938 result = dmi_op(target, &abstractcs, &dmi_busy_encountered,
3939 DMI_OP_READ, DM_ABSTRACTCS, 0, false, true);
3940 if (result != ERROR_OK)
3941 goto error;
3942 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
3943 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3944 return ERROR_FAIL;
3945 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
3946 if (info->cmderr == CMDERR_NONE && !dmi_busy_encountered) {
3947 LOG_DEBUG("successful (partial?) memory write");
3948 } else if (info->cmderr == CMDERR_BUSY || dmi_busy_encountered) {
3949 if (info->cmderr == CMDERR_BUSY)
3950 LOG_DEBUG("Memory write resulted in abstract command busy response.");
3951 else if (dmi_busy_encountered)
3952 LOG_DEBUG("Memory write resulted in DMI busy response.");
3953 riscv013_clear_abstract_error(target);
3954 increase_ac_busy_delay(target);
3955
3956 dmi_write(target, DM_ABSTRACTAUTO, 0);
3957 result = register_read_direct(target, &cur_addr, GDB_REGNO_S0);
3958 if (result != ERROR_OK)
3959 goto error;
3960 setup_needed = true;
3961 } else {
3962 LOG_ERROR("error when writing memory, abstractcs=0x%08lx", (long)abstractcs);
3963 riscv013_clear_abstract_error(target);
3964 result = ERROR_FAIL;
3965 goto error;
3966 }
3967 }
3968
3969 error:
3970 dmi_write(target, DM_ABSTRACTAUTO, 0);
3971
3972 if (register_write_direct(target, GDB_REGNO_S1, s1) != ERROR_OK)
3973 return ERROR_FAIL;
3974 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
3975 return ERROR_FAIL;
3976
3977 /* Restore MSTATUS */
3978 if (mstatus != mstatus_old)
3979 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3980 return ERROR_FAIL;
3981
3982 if (execute_fence(target) != ERROR_OK)
3983 return ERROR_FAIL;
3984
3985 return result;
3986 }
3987
3988 static int write_memory(struct target *target, target_addr_t address,
3989 uint32_t size, uint32_t count, const uint8_t *buffer)
3990 {
3991 if (size != 1 && size != 2 && size != 4 && size != 8 && size != 16) {
3992 LOG_ERROR("BUG: Unsupported size for memory write: %d", size);
3993 return ERROR_FAIL;
3994 }
3995
3996 int ret = ERROR_FAIL;
3997 RISCV_INFO(r);
3998 RISCV013_INFO(info);
3999
4000 char *progbuf_result = "disabled";
4001 char *sysbus_result = "disabled";
4002 char *abstract_result = "disabled";
4003
4004 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
4005 int method = r->mem_access_methods[i];
4006
4007 if (method == RISCV_MEM_ACCESS_PROGBUF) {
4008 if (mem_should_skip_progbuf(target, address, size, false, &progbuf_result))
4009 continue;
4010
4011 ret = write_memory_progbuf(target, address, size, count, buffer);
4012
4013 if (ret != ERROR_OK)
4014 progbuf_result = "failed";
4015 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
4016 if (mem_should_skip_sysbus(target, address, size, 0, false, &sysbus_result))
4017 continue;
4018
4019 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
4020 ret = write_memory_bus_v0(target, address, size, count, buffer);
4021 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
4022 ret = write_memory_bus_v1(target, address, size, count, buffer);
4023
4024 if (ret != ERROR_OK)
4025 sysbus_result = "failed";
4026 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
4027 if (mem_should_skip_abstract(target, address, size, 0, false, &abstract_result))
4028 continue;
4029
4030 ret = write_memory_abstract(target, address, size, count, buffer);
4031
4032 if (ret != ERROR_OK)
4033 abstract_result = "failed";
4034 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
4035 /* No further mem access method to try. */
4036 break;
4037
4038 log_mem_access_result(target, ret == ERROR_OK, method, false);
4039
4040 if (ret == ERROR_OK)
4041 return ret;
4042 }
4043
4044 LOG_ERROR("Target %s: Failed to write memory (addr=0x%" PRIx64 ")", target_name(target), address);
4045 LOG_ERROR(" progbuf=%s, sysbus=%s, abstract=%s", progbuf_result, sysbus_result, abstract_result);
4046 return ret;
4047 }
4048
4049 static int arch_state(struct target *target)
4050 {
4051 return ERROR_OK;
4052 }
4053
4054 struct target_type riscv013_target = {
4055 .name = "riscv",
4056
4057 .init_target = init_target,
4058 .deinit_target = deinit_target,
4059 .examine = examine,
4060
4061 .poll = &riscv_openocd_poll,
4062 .halt = &riscv_halt,
4063 .step = &riscv_openocd_step,
4064
4065 .assert_reset = assert_reset,
4066 .deassert_reset = deassert_reset,
4067
4068 .write_memory = write_memory,
4069
4070 .arch_state = arch_state
4071 };
4072
4073 /*** 0.13-specific implementations of various RISC-V helper functions. ***/
4074 static int riscv013_get_register(struct target *target,
4075 riscv_reg_t *value, int rid)
4076 {
4077 LOG_DEBUG("[%s] reading register %s", target_name(target),
4078 gdb_regno_name(rid));
4079
4080 if (riscv_select_current_hart(target) != ERROR_OK)
4081 return ERROR_FAIL;
4082
4083 int result = ERROR_OK;
4084 if (rid == GDB_REGNO_PC) {
4085 /* TODO: move this into riscv.c. */
4086 result = register_read(target, value, GDB_REGNO_DPC);
4087 LOG_DEBUG("[%d] read PC from DPC: 0x%" PRIx64, target->coreid, *value);
4088 } else if (rid == GDB_REGNO_PRIV) {
4089 uint64_t dcsr;
4090 /* TODO: move this into riscv.c. */
4091 result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4092 *value = set_field(0, VIRT_PRIV_V, get_field(dcsr, CSR_DCSR_V));
4093 *value = set_field(*value, VIRT_PRIV_PRV, get_field(dcsr, CSR_DCSR_PRV));
4094 } else {
4095 result = register_read(target, value, rid);
4096 if (result != ERROR_OK)
4097 *value = -1;
4098 }
4099
4100 return result;
4101 }
4102
4103 static int riscv013_set_register(struct target *target, int rid, uint64_t value)
4104 {
4105 riscv013_select_current_hart(target);
4106 LOG_DEBUG("[%d] writing 0x%" PRIx64 " to register %s",
4107 target->coreid, value, gdb_regno_name(rid));
4108
4109 if (rid <= GDB_REGNO_XPR31) {
4110 return register_write_direct(target, rid, value);
4111 } else if (rid == GDB_REGNO_PC) {
4112 LOG_DEBUG("[%d] writing PC to DPC: 0x%" PRIx64, target->coreid, value);
4113 register_write_direct(target, GDB_REGNO_DPC, value);
4114 uint64_t actual_value;
4115 register_read_direct(target, &actual_value, GDB_REGNO_DPC);
4116 LOG_DEBUG("[%d] actual DPC written: 0x%016" PRIx64, target->coreid, actual_value);
4117 if (value != actual_value) {
4118 LOG_ERROR("Written PC (0x%" PRIx64 ") does not match read back "
4119 "value (0x%" PRIx64 ")", value, actual_value);
4120 return ERROR_FAIL;
4121 }
4122 } else if (rid == GDB_REGNO_PRIV) {
4123 uint64_t dcsr;
4124 register_read(target, &dcsr, GDB_REGNO_DCSR);
4125 dcsr = set_field(dcsr, CSR_DCSR_PRV, get_field(value, VIRT_PRIV_PRV));
4126 dcsr = set_field(dcsr, CSR_DCSR_V, get_field(value, VIRT_PRIV_V));
4127 return register_write_direct(target, GDB_REGNO_DCSR, dcsr);
4128 } else {
4129 return register_write_direct(target, rid, value);
4130 }
4131
4132 return ERROR_OK;
4133 }
4134
4135 static int riscv013_select_current_hart(struct target *target)
4136 {
4137 RISCV_INFO(r);
4138
4139 dm013_info_t *dm = get_dm(target);
4140 if (!dm)
4141 return ERROR_FAIL;
4142 if (r->current_hartid == dm->current_hartid)
4143 return ERROR_OK;
4144
4145 uint32_t dmcontrol;
4146 /* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
4147 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
4148 return ERROR_FAIL;
4149 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4150 int result = dmi_write(target, DM_DMCONTROL, dmcontrol);
4151 dm->current_hartid = r->current_hartid;
4152 return result;
4153 }
4154
4155 /* Select all harts that were prepped and that are selectable, clearing the
4156 * prepped flag on the harts that actually were selected. */
4157 static int select_prepped_harts(struct target *target, bool *use_hasel)
4158 {
4159 dm013_info_t *dm = get_dm(target);
4160 if (!dm)
4161 return ERROR_FAIL;
4162 if (!dm->hasel_supported) {
4163 RISCV_INFO(r);
4164 r->prepped = false;
4165 *use_hasel = false;
4166 return ERROR_OK;
4167 }
4168
4169 assert(dm->hart_count);
4170 unsigned hawindow_count = (dm->hart_count + 31) / 32;
4171 uint32_t hawindow[hawindow_count];
4172
4173 memset(hawindow, 0, sizeof(uint32_t) * hawindow_count);
4174
4175 target_list_t *entry;
4176 unsigned total_selected = 0;
4177 list_for_each_entry(entry, &dm->target_list, list) {
4178 struct target *t = entry->target;
4179 struct riscv_info *r = riscv_info(t);
4180 riscv013_info_t *info = get_info(t);
4181 unsigned index = info->index;
4182 LOG_DEBUG("index=%d, coreid=%d, prepped=%d", index, t->coreid, r->prepped);
4183 r->selected = r->prepped;
4184 if (r->prepped) {
4185 hawindow[index / 32] |= 1 << (index % 32);
4186 r->prepped = false;
4187 total_selected++;
4188 }
4189 index++;
4190 }
4191
4192 /* Don't use hasel if we only need to talk to one hart. */
4193 if (total_selected <= 1) {
4194 *use_hasel = false;
4195 return ERROR_OK;
4196 }
4197
4198 for (unsigned i = 0; i < hawindow_count; i++) {
4199 if (dmi_write(target, DM_HAWINDOWSEL, i) != ERROR_OK)
4200 return ERROR_FAIL;
4201 if (dmi_write(target, DM_HAWINDOW, hawindow[i]) != ERROR_OK)
4202 return ERROR_FAIL;
4203 }
4204
4205 *use_hasel = true;
4206 return ERROR_OK;
4207 }
4208
4209 static int riscv013_halt_prep(struct target *target)
4210 {
4211 return ERROR_OK;
4212 }
4213
4214 static int riscv013_halt_go(struct target *target)
4215 {
4216 bool use_hasel = false;
4217 if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
4218 return ERROR_FAIL;
4219
4220 RISCV_INFO(r);
4221 LOG_DEBUG("halting hart %d", r->current_hartid);
4222
4223 /* Issue the halt command, and then wait for the current hart to halt. */
4224 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_HALTREQ;
4225 if (use_hasel)
4226 dmcontrol |= DM_DMCONTROL_HASEL;
4227 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4228 dmi_write(target, DM_DMCONTROL, dmcontrol);
4229 for (size_t i = 0; i < 256; ++i)
4230 if (riscv_is_halted(target))
4231 break;
4232
4233 if (!riscv_is_halted(target)) {
4234 uint32_t dmstatus;
4235 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4236 return ERROR_FAIL;
4237 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
4238 return ERROR_FAIL;
4239
4240 LOG_ERROR("unable to halt hart %d", r->current_hartid);
4241 LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
4242 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
4243 return ERROR_FAIL;
4244 }
4245
4246 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HALTREQ, 0);
4247 dmi_write(target, DM_DMCONTROL, dmcontrol);
4248
4249 if (use_hasel) {
4250 target_list_t *entry;
4251 dm013_info_t *dm = get_dm(target);
4252 if (!dm)
4253 return ERROR_FAIL;
4254 list_for_each_entry(entry, &dm->target_list, list) {
4255 struct target *t = entry->target;
4256 t->state = TARGET_HALTED;
4257 if (t->debug_reason == DBG_REASON_NOTHALTED)
4258 t->debug_reason = DBG_REASON_DBGRQ;
4259 }
4260 }
4261 /* The "else" case is handled in halt_go(). */
4262
4263 return ERROR_OK;
4264 }
4265
4266 static int riscv013_resume_go(struct target *target)
4267 {
4268 bool use_hasel = false;
4269 if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
4270 return ERROR_FAIL;
4271
4272 return riscv013_step_or_resume_current_hart(target, false, use_hasel);
4273 }
4274
4275 static int riscv013_step_current_hart(struct target *target)
4276 {
4277 return riscv013_step_or_resume_current_hart(target, true, false);
4278 }
4279
4280 static int riscv013_resume_prep(struct target *target)
4281 {
4282 return riscv013_on_step_or_resume(target, false);
4283 }
4284
4285 static int riscv013_on_step(struct target *target)
4286 {
4287 return riscv013_on_step_or_resume(target, true);
4288 }
4289
4290 static int riscv013_on_halt(struct target *target)
4291 {
4292 return ERROR_OK;
4293 }
4294
4295 static bool riscv013_is_halted(struct target *target)
4296 {
4297 uint32_t dmstatus;
4298 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4299 return false;
4300 if (get_field(dmstatus, DM_DMSTATUS_ANYUNAVAIL))
4301 LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target));
4302 if (get_field(dmstatus, DM_DMSTATUS_ANYNONEXISTENT))
4303 LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target));
4304 if (get_field(dmstatus, DM_DMSTATUS_ANYHAVERESET)) {
4305 int hartid = riscv_current_hartid(target);
4306 LOG_INFO("Hart %d unexpectedly reset!", hartid);
4307 /* TODO: Can we make this more obvious to eg. a gdb user? */
4308 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE |
4309 DM_DMCONTROL_ACKHAVERESET;
4310 dmcontrol = set_hartsel(dmcontrol, hartid);
4311 /* If we had been halted when we reset, request another halt. If we
4312 * ended up running out of reset, then the user will (hopefully) get a
4313 * message that a reset happened, that the target is running, and then
4314 * that it is halted again once the request goes through.
4315 */
4316 if (target->state == TARGET_HALTED)
4317 dmcontrol |= DM_DMCONTROL_HALTREQ;
4318 dmi_write(target, DM_DMCONTROL, dmcontrol);
4319 }
4320 return get_field(dmstatus, DM_DMSTATUS_ALLHALTED);
4321 }
4322
4323 static enum riscv_halt_reason riscv013_halt_reason(struct target *target)
4324 {
4325 riscv_reg_t dcsr;
4326 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4327 if (result != ERROR_OK)
4328 return RISCV_HALT_UNKNOWN;
4329
4330 LOG_DEBUG("dcsr.cause: 0x%" PRIx64, get_field(dcsr, CSR_DCSR_CAUSE));
4331
4332 switch (get_field(dcsr, CSR_DCSR_CAUSE)) {
4333 case CSR_DCSR_CAUSE_SWBP:
4334 return RISCV_HALT_BREAKPOINT;
4335 case CSR_DCSR_CAUSE_TRIGGER:
4336 /* We could get here before triggers are enumerated if a trigger was
4337 * already set when we connected. Force enumeration now, which has the
4338 * side effect of clearing any triggers we did not set. */
4339 riscv_enumerate_triggers(target);
4340 LOG_DEBUG("{%d} halted because of trigger", target->coreid);
4341 return RISCV_HALT_TRIGGER;
4342 case CSR_DCSR_CAUSE_STEP:
4343 return RISCV_HALT_SINGLESTEP;
4344 case CSR_DCSR_CAUSE_DEBUGINT:
4345 case CSR_DCSR_CAUSE_HALT:
4346 return RISCV_HALT_INTERRUPT;
4347 case CSR_DCSR_CAUSE_GROUP:
4348 return RISCV_HALT_GROUP;
4349 }
4350
4351 LOG_ERROR("Unknown DCSR cause field: 0x%" PRIx64, get_field(dcsr, CSR_DCSR_CAUSE));
4352 LOG_ERROR(" dcsr=0x%016lx", (long)dcsr);
4353 return RISCV_HALT_UNKNOWN;
4354 }
4355
4356 int riscv013_write_debug_buffer(struct target *target, unsigned index, riscv_insn_t data)
4357 {
4358 dm013_info_t *dm = get_dm(target);
4359 if (!dm)
4360 return ERROR_FAIL;
4361 if (dm->progbuf_cache[index] != data) {
4362 if (dmi_write(target, DM_PROGBUF0 + index, data) != ERROR_OK)
4363 return ERROR_FAIL;
4364 dm->progbuf_cache[index] = data;
4365 } else {
4366 LOG_DEBUG("cache hit for 0x%" PRIx32 " @%d", data, index);
4367 }
4368 return ERROR_OK;
4369 }
4370
4371 riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned index)
4372 {
4373 uint32_t value;
4374 dmi_read(target, &value, DM_PROGBUF0 + index);
4375 return value;
4376 }
4377
4378 int riscv013_execute_debug_buffer(struct target *target)
4379 {
4380 uint32_t run_program = 0;
4381 run_program = set_field(run_program, AC_ACCESS_REGISTER_AARSIZE, 2);
4382 run_program = set_field(run_program, AC_ACCESS_REGISTER_POSTEXEC, 1);
4383 run_program = set_field(run_program, AC_ACCESS_REGISTER_TRANSFER, 0);
4384 run_program = set_field(run_program, AC_ACCESS_REGISTER_REGNO, 0x1000);
4385
4386 return execute_abstract_command(target, run_program);
4387 }
4388
4389 void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
4390 {
4391 RISCV013_INFO(info);
4392 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_WRITE);
4393 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, d);
4394 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
4395 }
4396
4397 void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a)
4398 {
4399 RISCV013_INFO(info);
4400 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_READ);
4401 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
4402 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
4403 }
4404
4405 void riscv013_fill_dmi_nop_u64(struct target *target, char *buf)
4406 {
4407 RISCV013_INFO(info);
4408 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_NOP);
4409 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
4410 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, 0);
4411 }
4412
4413 /* Helper function for riscv013_test_sba_config_reg */
4414 static int get_max_sbaccess(struct target *target)
4415 {
4416 RISCV013_INFO(info);
4417
4418 uint32_t sbaccess128 = get_field(info->sbcs, DM_SBCS_SBACCESS128);
4419 uint32_t sbaccess64 = get_field(info->sbcs, DM_SBCS_SBACCESS64);
4420 uint32_t sbaccess32 = get_field(info->sbcs, DM_SBCS_SBACCESS32);
4421 uint32_t sbaccess16 = get_field(info->sbcs, DM_SBCS_SBACCESS16);
4422 uint32_t sbaccess8 = get_field(info->sbcs, DM_SBCS_SBACCESS8);
4423
4424 if (sbaccess128)
4425 return 4;
4426 else if (sbaccess64)
4427 return 3;
4428 else if (sbaccess32)
4429 return 2;
4430 else if (sbaccess16)
4431 return 1;
4432 else if (sbaccess8)
4433 return 0;
4434 else
4435 return -1;
4436 }
4437
4438 static uint32_t get_num_sbdata_regs(struct target *target)
4439 {
4440 RISCV013_INFO(info);
4441
4442 uint32_t sbaccess128 = get_field(info->sbcs, DM_SBCS_SBACCESS128);
4443 uint32_t sbaccess64 = get_field(info->sbcs, DM_SBCS_SBACCESS64);
4444 uint32_t sbaccess32 = get_field(info->sbcs, DM_SBCS_SBACCESS32);
4445
4446 if (sbaccess128)
4447 return 4;
4448 else if (sbaccess64)
4449 return 2;
4450 else if (sbaccess32)
4451 return 1;
4452 else
4453 return 0;
4454 }
4455
4456 static int riscv013_test_sba_config_reg(struct target *target,
4457 target_addr_t legal_address, uint32_t num_words,
4458 target_addr_t illegal_address, bool run_sbbusyerror_test)
4459 {
4460 LOG_INFO("Testing System Bus Access as defined by RISC-V Debug Spec v0.13");
4461
4462 uint32_t tests_failed = 0;
4463
4464 uint32_t rd_val;
4465 uint32_t sbcs_orig;
4466 dmi_read(target, &sbcs_orig, DM_SBCS);
4467
4468 uint32_t sbcs = sbcs_orig;
4469 bool test_passed;
4470
4471 int max_sbaccess = get_max_sbaccess(target);
4472
4473 if (max_sbaccess == -1) {
4474 LOG_ERROR("System Bus Access not supported in this config.");
4475 return ERROR_FAIL;
4476 }
4477
4478 if (get_field(sbcs, DM_SBCS_SBVERSION) != 1) {
4479 LOG_ERROR("System Bus Access unsupported SBVERSION (%d). Only version 1 is supported.",
4480 get_field(sbcs, DM_SBCS_SBVERSION));
4481 return ERROR_FAIL;
4482 }
4483
4484 uint32_t num_sbdata_regs = get_num_sbdata_regs(target);
4485 assert(num_sbdata_regs);
4486
4487 uint32_t rd_buf[num_sbdata_regs];
4488
4489 /* Test 1: Simple write/read test */
4490 test_passed = true;
4491 sbcs = set_field(sbcs_orig, DM_SBCS_SBAUTOINCREMENT, 0);
4492 dmi_write(target, DM_SBCS, sbcs);
4493
4494 uint32_t test_patterns[4] = {0xdeadbeef, 0xfeedbabe, 0x12345678, 0x08675309};
4495 for (uint32_t sbaccess = 0; sbaccess <= (uint32_t)max_sbaccess; sbaccess++) {
4496 sbcs = set_field(sbcs, DM_SBCS_SBACCESS, sbaccess);
4497 dmi_write(target, DM_SBCS, sbcs);
4498
4499 uint32_t compare_mask = (sbaccess == 0) ? 0xff : (sbaccess == 1) ? 0xffff : 0xffffffff;
4500
4501 for (uint32_t i = 0; i < num_words; i++) {
4502 uint32_t addr = legal_address + (i << sbaccess);
4503 uint32_t wr_data[num_sbdata_regs];
4504 for (uint32_t j = 0; j < num_sbdata_regs; j++)
4505 wr_data[j] = test_patterns[j] + i;
4506 write_memory_sba_simple(target, addr, wr_data, num_sbdata_regs, sbcs);
4507 }
4508
4509 for (uint32_t i = 0; i < num_words; i++) {
4510 uint32_t addr = legal_address + (i << sbaccess);
4511 read_memory_sba_simple(target, addr, rd_buf, num_sbdata_regs, sbcs);
4512 for (uint32_t j = 0; j < num_sbdata_regs; j++) {
4513 if (((test_patterns[j]+i)&compare_mask) != (rd_buf[j]&compare_mask)) {
4514 LOG_ERROR("System Bus Access Test 1: Error reading non-autoincremented address %x,"
4515 "expected val = %x, read val = %x", addr, test_patterns[j]+i, rd_buf[j]);
4516 test_passed = false;
4517 tests_failed++;
4518 }
4519 }
4520 }
4521 }
4522 if (test_passed)
4523 LOG_INFO("System Bus Access Test 1: Simple write/read test PASSED.");
4524
4525 /* Test 2: Address autoincrement test */
4526 target_addr_t curr_addr;
4527 target_addr_t prev_addr;
4528 test_passed = true;
4529 sbcs = set_field(sbcs_orig, DM_SBCS_SBAUTOINCREMENT, 1);
4530 dmi_write(target, DM_SBCS, sbcs);
4531
4532 for (uint32_t sbaccess = 0; sbaccess <= (uint32_t)max_sbaccess; sbaccess++) {
4533 sbcs = set_field(sbcs, DM_SBCS_SBACCESS, sbaccess);
4534 dmi_write(target, DM_SBCS, sbcs);
4535
4536 dmi_write(target, DM_SBADDRESS0, legal_address);
4537 read_sbcs_nonbusy(target, &sbcs);
4538 curr_addr = legal_address;
4539 for (uint32_t i = 0; i < num_words; i++) {
4540 prev_addr = curr_addr;
4541 read_sbcs_nonbusy(target, &sbcs);
4542 curr_addr = sb_read_address(target);
4543 if ((curr_addr - prev_addr != (uint32_t)(1 << sbaccess)) && (i != 0)) {
4544 LOG_ERROR("System Bus Access Test 2: Error with address auto-increment, sbaccess = %x.", sbaccess);
4545 test_passed = false;
4546 tests_failed++;
4547 }
4548 dmi_write(target, DM_SBDATA0, i);
4549 }
4550
4551 read_sbcs_nonbusy(target, &sbcs);
4552
4553 dmi_write(target, DM_SBADDRESS0, legal_address);
4554
4555 uint32_t val;
4556 sbcs = set_field(sbcs, DM_SBCS_SBREADONDATA, 1);
4557 dmi_write(target, DM_SBCS, sbcs);
4558 dmi_read(target, &val, DM_SBDATA0); /* Dummy read to trigger first system bus read */
4559 curr_addr = legal_address;
4560 for (uint32_t i = 0; i < num_words; i++) {
4561 prev_addr = curr_addr;
4562 read_sbcs_nonbusy(target, &sbcs);
4563 curr_addr = sb_read_address(target);
4564 if ((curr_addr - prev_addr != (uint32_t)(1 << sbaccess)) && (i != 0)) {
4565 LOG_ERROR("System Bus Access Test 2: Error with address auto-increment, sbaccess = %x", sbaccess);
4566 test_passed = false;
4567 tests_failed++;
4568 }
4569 dmi_read(target, &val, DM_SBDATA0);
4570 read_sbcs_nonbusy(target, &sbcs);
4571 if (i != val) {
4572 LOG_ERROR("System Bus Access Test 2: Error reading auto-incremented address,"
4573 "expected val = %x, read val = %x.", i, val);
4574 test_passed = false;
4575 tests_failed++;
4576 }
4577 }
4578 }
4579 if (test_passed)
4580 LOG_INFO("System Bus Access Test 2: Address auto-increment test PASSED.");
4581
4582 /* Test 3: Read from illegal address */
4583 read_memory_sba_simple(target, illegal_address, rd_buf, 1, sbcs_orig);
4584
4585 dmi_read(target, &rd_val, DM_SBCS);
4586 if (get_field(rd_val, DM_SBCS_SBERROR) == 2) {
4587 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 2);
4588 dmi_write(target, DM_SBCS, sbcs);
4589 dmi_read(target, &rd_val, DM_SBCS);
4590 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4591 LOG_INFO("System Bus Access Test 3: Illegal address read test PASSED.");
4592 else
4593 LOG_ERROR("System Bus Access Test 3: Illegal address read test FAILED, unable to clear to 0.");
4594 } else {
4595 LOG_ERROR("System Bus Access Test 3: Illegal address read test FAILED, unable to set error code.");
4596 }
4597
4598 /* Test 4: Write to illegal address */
4599 write_memory_sba_simple(target, illegal_address, test_patterns, 1, sbcs_orig);
4600
4601 dmi_read(target, &rd_val, DM_SBCS);
4602 if (get_field(rd_val, DM_SBCS_SBERROR) == 2) {
4603 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 2);
4604 dmi_write(target, DM_SBCS, sbcs);
4605 dmi_read(target, &rd_val, DM_SBCS);
4606 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4607 LOG_INFO("System Bus Access Test 4: Illegal address write test PASSED.");
4608 else {
4609 LOG_ERROR("System Bus Access Test 4: Illegal address write test FAILED, unable to clear to 0.");
4610 tests_failed++;
4611 }
4612 } else {
4613 LOG_ERROR("System Bus Access Test 4: Illegal address write test FAILED, unable to set error code.");
4614 tests_failed++;
4615 }
4616
4617 /* Test 5: Write with unsupported sbaccess size */
4618 uint32_t sbaccess128 = get_field(sbcs_orig, DM_SBCS_SBACCESS128);
4619
4620 if (sbaccess128) {
4621 LOG_INFO("System Bus Access Test 5: SBCS sbaccess error test PASSED, all sbaccess sizes supported.");
4622 } else {
4623 sbcs = set_field(sbcs_orig, DM_SBCS_SBACCESS, 4);
4624
4625 write_memory_sba_simple(target, legal_address, test_patterns, 1, sbcs);
4626
4627 dmi_read(target, &rd_val, DM_SBCS);
4628 if (get_field(rd_val, DM_SBCS_SBERROR) == 4) {
4629 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 4);
4630 dmi_write(target, DM_SBCS, sbcs);
4631 dmi_read(target, &rd_val, DM_SBCS);
4632 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4633 LOG_INFO("System Bus Access Test 5: SBCS sbaccess error test PASSED.");
4634 else {
4635 LOG_ERROR("System Bus Access Test 5: SBCS sbaccess error test FAILED, unable to clear to 0.");
4636 tests_failed++;
4637 }
4638 } else {
4639 LOG_ERROR("System Bus Access Test 5: SBCS sbaccess error test FAILED, unable to set error code.");
4640 tests_failed++;
4641 }
4642 }
4643
4644 /* Test 6: Write to misaligned address */
4645 sbcs = set_field(sbcs_orig, DM_SBCS_SBACCESS, 1);
4646
4647 write_memory_sba_simple(target, legal_address+1, test_patterns, 1, sbcs);
4648
4649 dmi_read(target, &rd_val, DM_SBCS);
4650 if (get_field(rd_val, DM_SBCS_SBERROR) == 3) {
4651 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 3);
4652 dmi_write(target, DM_SBCS, sbcs);
4653 dmi_read(target, &rd_val, DM_SBCS);
4654 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4655 LOG_INFO("System Bus Access Test 6: SBCS address alignment error test PASSED");
4656 else {
4657 LOG_ERROR("System Bus Access Test 6: SBCS address alignment error test FAILED, unable to clear to 0.");
4658 tests_failed++;
4659 }
4660 } else {
4661 LOG_ERROR("System Bus Access Test 6: SBCS address alignment error test FAILED, unable to set error code.");
4662 tests_failed++;
4663 }
4664
4665 /* Test 7: Set sbbusyerror, only run this case in simulation as it is likely
4666 * impossible to hit otherwise */
4667 if (run_sbbusyerror_test) {
4668 sbcs = set_field(sbcs_orig, DM_SBCS_SBREADONADDR, 1);
4669 dmi_write(target, DM_SBCS, sbcs);
4670
4671 for (int i = 0; i < 16; i++)
4672 dmi_write(target, DM_SBDATA0, 0xdeadbeef);
4673
4674 for (int i = 0; i < 16; i++)
4675 dmi_write(target, DM_SBADDRESS0, legal_address);
4676
4677 dmi_read(target, &rd_val, DM_SBCS);
4678 if (get_field(rd_val, DM_SBCS_SBBUSYERROR)) {
4679 sbcs = set_field(sbcs_orig, DM_SBCS_SBBUSYERROR, 1);
4680 dmi_write(target, DM_SBCS, sbcs);
4681 dmi_read(target, &rd_val, DM_SBCS);
4682 if (get_field(rd_val, DM_SBCS_SBBUSYERROR) == 0)
4683 LOG_INFO("System Bus Access Test 7: SBCS sbbusyerror test PASSED.");
4684 else {
4685 LOG_ERROR("System Bus Access Test 7: SBCS sbbusyerror test FAILED, unable to clear to 0.");
4686 tests_failed++;
4687 }
4688 } else {
4689 LOG_ERROR("System Bus Access Test 7: SBCS sbbusyerror test FAILED, unable to set error code.");
4690 tests_failed++;
4691 }
4692 }
4693
4694 if (tests_failed == 0) {
4695 LOG_INFO("ALL TESTS PASSED");
4696 return ERROR_OK;
4697 } else {
4698 LOG_ERROR("%d TESTS FAILED", tests_failed);
4699 return ERROR_FAIL;
4700 }
4701
4702 }
4703
4704 void write_memory_sba_simple(struct target *target, target_addr_t addr,
4705 uint32_t *write_data, uint32_t write_size, uint32_t sbcs)
4706 {
4707 RISCV013_INFO(info);
4708
4709 uint32_t rd_sbcs;
4710 uint32_t masked_addr;
4711
4712 uint32_t sba_size = get_field(info->sbcs, DM_SBCS_SBASIZE);
4713
4714 read_sbcs_nonbusy(target, &rd_sbcs);
4715
4716 uint32_t sbcs_no_readonaddr = set_field(sbcs, DM_SBCS_SBREADONADDR, 0);
4717 dmi_write(target, DM_SBCS, sbcs_no_readonaddr);
4718
4719 for (uint32_t i = 0; i < sba_size/32; i++) {
4720 masked_addr = (addr >> 32*i) & 0xffffffff;
4721
4722 if (i != 3)
4723 dmi_write(target, DM_SBADDRESS0+i, masked_addr);
4724 else
4725 dmi_write(target, DM_SBADDRESS3, masked_addr);
4726 }
4727
4728 /* Write SBDATA registers starting with highest address, since write to
4729 * SBDATA0 triggers write */
4730 for (int i = write_size-1; i >= 0; i--)
4731 dmi_write(target, DM_SBDATA0+i, write_data[i]);
4732 }
4733
4734 void read_memory_sba_simple(struct target *target, target_addr_t addr,
4735 uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs)
4736 {
4737 RISCV013_INFO(info);
4738
4739 uint32_t rd_sbcs;
4740 uint32_t masked_addr;
4741
4742 uint32_t sba_size = get_field(info->sbcs, DM_SBCS_SBASIZE);
4743
4744 read_sbcs_nonbusy(target, &rd_sbcs);
4745
4746 uint32_t sbcs_readonaddr = set_field(sbcs, DM_SBCS_SBREADONADDR, 1);
4747 dmi_write(target, DM_SBCS, sbcs_readonaddr);
4748
4749 /* Write addresses starting with highest address register */
4750 for (int i = sba_size/32-1; i >= 0; i--) {
4751 masked_addr = (addr >> 32*i) & 0xffffffff;
4752
4753 if (i != 3)
4754 dmi_write(target, DM_SBADDRESS0+i, masked_addr);
4755 else
4756 dmi_write(target, DM_SBADDRESS3, masked_addr);
4757 }
4758
4759 read_sbcs_nonbusy(target, &rd_sbcs);
4760
4761 for (uint32_t i = 0; i < read_size; i++)
4762 dmi_read(target, &(rd_buf[i]), DM_SBDATA0+i);
4763 }
4764
4765 int riscv013_dmi_write_u64_bits(struct target *target)
4766 {
4767 RISCV013_INFO(info);
4768 return info->abits + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH;
4769 }
4770
4771 static int maybe_execute_fence_i(struct target *target)
4772 {
4773 if (has_sufficient_progbuf(target, 3))
4774 return execute_fence(target);
4775 return ERROR_OK;
4776 }
4777
4778 /* Helper Functions. */
4779 static int riscv013_on_step_or_resume(struct target *target, bool step)
4780 {
4781 if (maybe_execute_fence_i(target) != ERROR_OK)
4782 return ERROR_FAIL;
4783
4784 /* We want to twiddle some bits in the debug CSR so debugging works. */
4785 riscv_reg_t dcsr;
4786 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4787 if (result != ERROR_OK)
4788 return result;
4789 dcsr = set_field(dcsr, CSR_DCSR_STEP, step);
4790 dcsr = set_field(dcsr, CSR_DCSR_EBREAKM, riscv_ebreakm);
4791 dcsr = set_field(dcsr, CSR_DCSR_EBREAKS, riscv_ebreaks);
4792 dcsr = set_field(dcsr, CSR_DCSR_EBREAKU, riscv_ebreaku);
4793 return riscv_set_register(target, GDB_REGNO_DCSR, dcsr);
4794 }
4795
4796 static int riscv013_step_or_resume_current_hart(struct target *target,
4797 bool step, bool use_hasel)
4798 {
4799 RISCV_INFO(r);
4800 LOG_DEBUG("resuming hart %d (for step?=%d)", r->current_hartid, step);
4801 if (!riscv_is_halted(target)) {
4802 LOG_ERROR("Hart %d is not halted!", r->current_hartid);
4803 return ERROR_FAIL;
4804 }
4805
4806 /* Issue the resume command, and then wait for the current hart to resume. */
4807 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_RESUMEREQ;
4808 if (use_hasel)
4809 dmcontrol |= DM_DMCONTROL_HASEL;
4810 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4811 dmi_write(target, DM_DMCONTROL, dmcontrol);
4812
4813 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HASEL, 0);
4814 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_RESUMEREQ, 0);
4815
4816 uint32_t dmstatus;
4817 for (size_t i = 0; i < 256; ++i) {
4818 usleep(10);
4819 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4820 return ERROR_FAIL;
4821 if (get_field(dmstatus, DM_DMSTATUS_ALLRESUMEACK) == 0)
4822 continue;
4823 if (step && get_field(dmstatus, DM_DMSTATUS_ALLHALTED) == 0)
4824 continue;
4825
4826 dmi_write(target, DM_DMCONTROL, dmcontrol);
4827 return ERROR_OK;
4828 }
4829
4830 dmi_write(target, DM_DMCONTROL, dmcontrol);
4831
4832 LOG_ERROR("unable to resume hart %d", r->current_hartid);
4833 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4834 return ERROR_FAIL;
4835 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
4836
4837 if (step) {
4838 LOG_ERROR(" was stepping, halting");
4839 riscv_halt(target);
4840 return ERROR_OK;
4841 }
4842
4843 return ERROR_FAIL;
4844 }
4845
4846 void riscv013_clear_abstract_error(struct target *target)
4847 {
4848 /* Wait for busy to go away. */
4849 time_t start = time(NULL);
4850 uint32_t abstractcs;
4851 dmi_read(target, &abstractcs, DM_ABSTRACTCS);
4852 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY)) {
4853 dmi_read(target, &abstractcs, DM_ABSTRACTCS);
4854
4855 if (time(NULL) - start > riscv_command_timeout_sec) {
4856 LOG_ERROR("abstractcs.busy is not going low after %d seconds "
4857 "(abstractcs=0x%x). The target is either really slow or "
4858 "broken. You could increase the timeout with riscv "
4859 "set_command_timeout_sec.",
4860 riscv_command_timeout_sec, abstractcs);
4861 break;
4862 }
4863 }
4864 /* Clear the error status. */
4865 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
4866 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)