target/riscv: fix 'reset run' after 'reset halt'
[openocd.git] / src / target / riscv / riscv-013.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /*
4 * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
5 * latest draft.
6 */
7
8 #include <assert.h>
9 #include <stdlib.h>
10 #include <time.h>
11
12 #ifdef HAVE_CONFIG_H
13 #include "config.h"
14 #endif
15
16 #include "target/target.h"
17 #include "target/algorithm.h"
18 #include "target/target_type.h"
19 #include <helper/log.h>
20 #include "jtag/jtag.h"
21 #include "target/register.h"
22 #include "target/breakpoints.h"
23 #include "helper/time_support.h"
24 #include "helper/list.h"
25 #include "riscv.h"
26 #include "debug_defines.h"
27 #include "rtos/rtos.h"
28 #include "program.h"
29 #include "asm.h"
30 #include "batch.h"
31
32 static int riscv013_on_step_or_resume(struct target *target, bool step);
33 static int riscv013_step_or_resume_current_hart(struct target *target,
34 bool step, bool use_hasel);
35 static void riscv013_clear_abstract_error(struct target *target);
36
37 /* Implementations of the functions in riscv_info_t. */
38 static int riscv013_get_register(struct target *target,
39 riscv_reg_t *value, int rid);
40 static int riscv013_set_register(struct target *target, int regid, uint64_t value);
41 static int riscv013_select_current_hart(struct target *target);
42 static int riscv013_halt_prep(struct target *target);
43 static int riscv013_halt_go(struct target *target);
44 static int riscv013_resume_go(struct target *target);
45 static int riscv013_step_current_hart(struct target *target);
46 static int riscv013_on_halt(struct target *target);
47 static int riscv013_on_step(struct target *target);
48 static int riscv013_resume_prep(struct target *target);
49 static bool riscv013_is_halted(struct target *target);
50 static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
51 static int riscv013_write_debug_buffer(struct target *target, unsigned index,
52 riscv_insn_t d);
53 static riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned
54 index);
55 static int riscv013_execute_debug_buffer(struct target *target);
56 static void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
57 static void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a);
58 static int riscv013_dmi_write_u64_bits(struct target *target);
59 static void riscv013_fill_dmi_nop_u64(struct target *target, char *buf);
60 static int register_read(struct target *target, uint64_t *value, uint32_t number);
61 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number);
62 static int register_write_direct(struct target *target, unsigned number,
63 uint64_t value);
64 static int read_memory(struct target *target, target_addr_t address,
65 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment);
66 static int write_memory(struct target *target, target_addr_t address,
67 uint32_t size, uint32_t count, const uint8_t *buffer);
68 static int riscv013_test_sba_config_reg(struct target *target, target_addr_t legal_address,
69 uint32_t num_words, target_addr_t illegal_address, bool run_sbbusyerror_test);
70 void write_memory_sba_simple(struct target *target, target_addr_t addr, uint32_t *write_data,
71 uint32_t write_size, uint32_t sbcs);
72 void read_memory_sba_simple(struct target *target, target_addr_t addr,
73 uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs);
74
75 /**
76 * Since almost everything can be accomplish by scanning the dbus register, all
77 * functions here assume dbus is already selected. The exception are functions
78 * called directly by OpenOCD, which can't assume anything about what's
79 * currently in IR. They should set IR to dbus explicitly.
80 */
81
82 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
83 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
84
85 #define CSR_DCSR_CAUSE_SWBP 1
86 #define CSR_DCSR_CAUSE_TRIGGER 2
87 #define CSR_DCSR_CAUSE_DEBUGINT 3
88 #define CSR_DCSR_CAUSE_STEP 4
89 #define CSR_DCSR_CAUSE_HALT 5
90 #define CSR_DCSR_CAUSE_GROUP 6
91
92 #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
93
94 /*** JTAG registers. ***/
95
96 typedef enum {
97 DMI_OP_NOP = 0,
98 DMI_OP_READ = 1,
99 DMI_OP_WRITE = 2
100 } dmi_op_t;
101 typedef enum {
102 DMI_STATUS_SUCCESS = 0,
103 DMI_STATUS_FAILED = 2,
104 DMI_STATUS_BUSY = 3
105 } dmi_status_t;
106
107 typedef enum slot {
108 SLOT0,
109 SLOT1,
110 SLOT_LAST,
111 } slot_t;
112
113 /*** Debug Bus registers. ***/
114
115 #define CMDERR_NONE 0
116 #define CMDERR_BUSY 1
117 #define CMDERR_NOT_SUPPORTED 2
118 #define CMDERR_EXCEPTION 3
119 #define CMDERR_HALT_RESUME 4
120 #define CMDERR_OTHER 7
121
122 /*** Info about the core being debugged. ***/
123
124 struct trigger {
125 uint64_t address;
126 uint32_t length;
127 uint64_t mask;
128 uint64_t value;
129 bool read, write, execute;
130 int unique_id;
131 };
132
133 typedef enum {
134 YNM_MAYBE,
135 YNM_YES,
136 YNM_NO
137 } yes_no_maybe_t;
138
139 typedef struct {
140 struct list_head list;
141 int abs_chain_position;
142
143 /* The number of harts connected to this DM. */
144 int hart_count;
145 /* Indicates we already reset this DM, so don't need to do it again. */
146 bool was_reset;
147 /* Targets that are connected to this DM. */
148 struct list_head target_list;
149 /* The currently selected hartid on this DM. */
150 int current_hartid;
151 bool hasel_supported;
152
153 /* The program buffer stores executable code. 0 is an illegal instruction,
154 * so we use 0 to mean the cached value is invalid. */
155 uint32_t progbuf_cache[16];
156 } dm013_info_t;
157
158 typedef struct {
159 struct list_head list;
160 struct target *target;
161 } target_list_t;
162
163 typedef struct {
164 /* The indexed used to address this hart in its DM. */
165 unsigned index;
166 /* Number of address bits in the dbus register. */
167 unsigned abits;
168 /* Number of abstract command data registers. */
169 unsigned datacount;
170 /* Number of words in the Program Buffer. */
171 unsigned progbufsize;
172
173 /* We cache the read-only bits of sbcs here. */
174 uint32_t sbcs;
175
176 yes_no_maybe_t progbuf_writable;
177 /* We only need the address so that we know the alignment of the buffer. */
178 riscv_addr_t progbuf_address;
179
180 /* Number of run-test/idle cycles the target requests we do after each dbus
181 * access. */
182 unsigned int dtmcs_idle;
183
184 /* This value is incremented every time a dbus access comes back as "busy".
185 * It's used to determine how many run-test/idle cycles to feed the target
186 * in between accesses. */
187 unsigned int dmi_busy_delay;
188
189 /* Number of run-test/idle cycles to add between consecutive bus master
190 * reads/writes respectively. */
191 unsigned int bus_master_write_delay, bus_master_read_delay;
192
193 /* This value is increased every time we tried to execute two commands
194 * consecutively, and the second one failed because the previous hadn't
195 * completed yet. It's used to add extra run-test/idle cycles after
196 * starting a command, so we don't have to waste time checking for busy to
197 * go low. */
198 unsigned int ac_busy_delay;
199
200 bool abstract_read_csr_supported;
201 bool abstract_write_csr_supported;
202 bool abstract_read_fpr_supported;
203 bool abstract_write_fpr_supported;
204
205 yes_no_maybe_t has_aampostincrement;
206
207 /* When a function returns some error due to a failure indicated by the
208 * target in cmderr, the caller can look here to see what that error was.
209 * (Compare with errno.) */
210 uint8_t cmderr;
211
212 /* Some fields from hartinfo. */
213 uint8_t datasize;
214 uint8_t dataaccess;
215 int16_t dataaddr;
216
217 /* The width of the hartsel field. */
218 unsigned hartsellen;
219
220 /* DM that provides access to this target. */
221 dm013_info_t *dm;
222 } riscv013_info_t;
223
224 LIST_HEAD(dm_list);
225
226 static riscv013_info_t *get_info(const struct target *target)
227 {
228 riscv_info_t *info = (riscv_info_t *) target->arch_info;
229 assert(info);
230 assert(info->version_specific);
231 return (riscv013_info_t *) info->version_specific;
232 }
233
234 /**
235 * Return the DM structure for this target. If there isn't one, find it in the
236 * global list of DMs. If it's not in there, then create one and initialize it
237 * to 0.
238 */
239 dm013_info_t *get_dm(struct target *target)
240 {
241 RISCV013_INFO(info);
242 if (info->dm)
243 return info->dm;
244
245 int abs_chain_position = target->tap->abs_chain_position;
246
247 dm013_info_t *entry;
248 dm013_info_t *dm = NULL;
249 list_for_each_entry(entry, &dm_list, list) {
250 if (entry->abs_chain_position == abs_chain_position) {
251 dm = entry;
252 break;
253 }
254 }
255
256 if (!dm) {
257 LOG_DEBUG("[%d] Allocating new DM", target->coreid);
258 dm = calloc(1, sizeof(dm013_info_t));
259 if (!dm)
260 return NULL;
261 dm->abs_chain_position = abs_chain_position;
262 dm->current_hartid = -1;
263 dm->hart_count = -1;
264 INIT_LIST_HEAD(&dm->target_list);
265 list_add(&dm->list, &dm_list);
266 }
267
268 info->dm = dm;
269 target_list_t *target_entry;
270 list_for_each_entry(target_entry, &dm->target_list, list) {
271 if (target_entry->target == target)
272 return dm;
273 }
274 target_entry = calloc(1, sizeof(*target_entry));
275 if (!target_entry) {
276 info->dm = NULL;
277 return NULL;
278 }
279 target_entry->target = target;
280 list_add(&target_entry->list, &dm->target_list);
281
282 return dm;
283 }
284
285 static uint32_t set_hartsel(uint32_t initial, uint32_t index)
286 {
287 initial &= ~DM_DMCONTROL_HARTSELLO;
288 initial &= ~DM_DMCONTROL_HARTSELHI;
289
290 uint32_t index_lo = index & ((1 << DM_DMCONTROL_HARTSELLO_LENGTH) - 1);
291 initial |= index_lo << DM_DMCONTROL_HARTSELLO_OFFSET;
292 uint32_t index_hi = index >> DM_DMCONTROL_HARTSELLO_LENGTH;
293 assert(index_hi < 1 << DM_DMCONTROL_HARTSELHI_LENGTH);
294 initial |= index_hi << DM_DMCONTROL_HARTSELHI_OFFSET;
295
296 return initial;
297 }
298
299 static void decode_dmi(char *text, unsigned address, unsigned data)
300 {
301 static const struct {
302 unsigned address;
303 uint64_t mask;
304 const char *name;
305 } description[] = {
306 { DM_DMCONTROL, DM_DMCONTROL_HALTREQ, "haltreq" },
307 { DM_DMCONTROL, DM_DMCONTROL_RESUMEREQ, "resumereq" },
308 { DM_DMCONTROL, DM_DMCONTROL_HARTRESET, "hartreset" },
309 { DM_DMCONTROL, DM_DMCONTROL_HASEL, "hasel" },
310 { DM_DMCONTROL, DM_DMCONTROL_HARTSELHI, "hartselhi" },
311 { DM_DMCONTROL, DM_DMCONTROL_HARTSELLO, "hartsello" },
312 { DM_DMCONTROL, DM_DMCONTROL_NDMRESET, "ndmreset" },
313 { DM_DMCONTROL, DM_DMCONTROL_DMACTIVE, "dmactive" },
314 { DM_DMCONTROL, DM_DMCONTROL_ACKHAVERESET, "ackhavereset" },
315
316 { DM_DMSTATUS, DM_DMSTATUS_IMPEBREAK, "impebreak" },
317 { DM_DMSTATUS, DM_DMSTATUS_ALLHAVERESET, "allhavereset" },
318 { DM_DMSTATUS, DM_DMSTATUS_ANYHAVERESET, "anyhavereset" },
319 { DM_DMSTATUS, DM_DMSTATUS_ALLRESUMEACK, "allresumeack" },
320 { DM_DMSTATUS, DM_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
321 { DM_DMSTATUS, DM_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
322 { DM_DMSTATUS, DM_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
323 { DM_DMSTATUS, DM_DMSTATUS_ALLUNAVAIL, "allunavail" },
324 { DM_DMSTATUS, DM_DMSTATUS_ANYUNAVAIL, "anyunavail" },
325 { DM_DMSTATUS, DM_DMSTATUS_ALLRUNNING, "allrunning" },
326 { DM_DMSTATUS, DM_DMSTATUS_ANYRUNNING, "anyrunning" },
327 { DM_DMSTATUS, DM_DMSTATUS_ALLHALTED, "allhalted" },
328 { DM_DMSTATUS, DM_DMSTATUS_ANYHALTED, "anyhalted" },
329 { DM_DMSTATUS, DM_DMSTATUS_AUTHENTICATED, "authenticated" },
330 { DM_DMSTATUS, DM_DMSTATUS_AUTHBUSY, "authbusy" },
331 { DM_DMSTATUS, DM_DMSTATUS_HASRESETHALTREQ, "hasresethaltreq" },
332 { DM_DMSTATUS, DM_DMSTATUS_CONFSTRPTRVALID, "confstrptrvalid" },
333 { DM_DMSTATUS, DM_DMSTATUS_VERSION, "version" },
334
335 { DM_ABSTRACTCS, DM_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
336 { DM_ABSTRACTCS, DM_ABSTRACTCS_BUSY, "busy" },
337 { DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR, "cmderr" },
338 { DM_ABSTRACTCS, DM_ABSTRACTCS_DATACOUNT, "datacount" },
339
340 { DM_COMMAND, DM_COMMAND_CMDTYPE, "cmdtype" },
341
342 { DM_SBCS, DM_SBCS_SBVERSION, "sbversion" },
343 { DM_SBCS, DM_SBCS_SBBUSYERROR, "sbbusyerror" },
344 { DM_SBCS, DM_SBCS_SBBUSY, "sbbusy" },
345 { DM_SBCS, DM_SBCS_SBREADONADDR, "sbreadonaddr" },
346 { DM_SBCS, DM_SBCS_SBACCESS, "sbaccess" },
347 { DM_SBCS, DM_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
348 { DM_SBCS, DM_SBCS_SBREADONDATA, "sbreadondata" },
349 { DM_SBCS, DM_SBCS_SBERROR, "sberror" },
350 { DM_SBCS, DM_SBCS_SBASIZE, "sbasize" },
351 { DM_SBCS, DM_SBCS_SBACCESS128, "sbaccess128" },
352 { DM_SBCS, DM_SBCS_SBACCESS64, "sbaccess64" },
353 { DM_SBCS, DM_SBCS_SBACCESS32, "sbaccess32" },
354 { DM_SBCS, DM_SBCS_SBACCESS16, "sbaccess16" },
355 { DM_SBCS, DM_SBCS_SBACCESS8, "sbaccess8" },
356 };
357
358 text[0] = 0;
359 for (unsigned i = 0; i < ARRAY_SIZE(description); i++) {
360 if (description[i].address == address) {
361 uint64_t mask = description[i].mask;
362 unsigned value = get_field(data, mask);
363 if (value) {
364 if (i > 0)
365 *(text++) = ' ';
366 if (mask & (mask >> 1)) {
367 /* If the field is more than 1 bit wide. */
368 sprintf(text, "%s=%d", description[i].name, value);
369 } else {
370 strcpy(text, description[i].name);
371 }
372 text += strlen(text);
373 }
374 }
375 }
376 }
377
378 static void dump_field(int idle, const struct scan_field *field)
379 {
380 static const char * const op_string[] = {"-", "r", "w", "?"};
381 static const char * const status_string[] = {"+", "?", "F", "b"};
382
383 if (debug_level < LOG_LVL_DEBUG)
384 return;
385
386 uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
387 unsigned int out_op = get_field(out, DTM_DMI_OP);
388 unsigned int out_data = get_field(out, DTM_DMI_DATA);
389 unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
390
391 uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
392 unsigned int in_op = get_field(in, DTM_DMI_OP);
393 unsigned int in_data = get_field(in, DTM_DMI_DATA);
394 unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
395
396 log_printf_lf(LOG_LVL_DEBUG,
397 __FILE__, __LINE__, "scan",
398 "%db %s %08x @%02x -> %s %08x @%02x; %di",
399 field->num_bits, op_string[out_op], out_data, out_address,
400 status_string[in_op], in_data, in_address, idle);
401
402 char out_text[500];
403 char in_text[500];
404 decode_dmi(out_text, out_address, out_data);
405 decode_dmi(in_text, in_address, in_data);
406 if (in_text[0] || out_text[0]) {
407 log_printf_lf(LOG_LVL_DEBUG, __FILE__, __LINE__, "scan", "%s -> %s",
408 out_text, in_text);
409 }
410 }
411
412 /*** Utility functions. ***/
413
414 static void select_dmi(struct target *target)
415 {
416 if (bscan_tunnel_ir_width != 0) {
417 select_dmi_via_bscan(target);
418 return;
419 }
420 jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
421 }
422
423 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
424 {
425 struct scan_field field;
426 uint8_t in_value[4];
427 uint8_t out_value[4] = { 0 };
428
429 if (bscan_tunnel_ir_width != 0)
430 return dtmcontrol_scan_via_bscan(target, out);
431
432 buf_set_u32(out_value, 0, 32, out);
433
434 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
435
436 field.num_bits = 32;
437 field.out_value = out_value;
438 field.in_value = in_value;
439 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
440
441 /* Always return to dmi. */
442 select_dmi(target);
443
444 int retval = jtag_execute_queue();
445 if (retval != ERROR_OK) {
446 LOG_ERROR("failed jtag scan: %d", retval);
447 return retval;
448 }
449
450 uint32_t in = buf_get_u32(field.in_value, 0, 32);
451 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
452
453 return in;
454 }
455
456 static void increase_dmi_busy_delay(struct target *target)
457 {
458 riscv013_info_t *info = get_info(target);
459 info->dmi_busy_delay += info->dmi_busy_delay / 10 + 1;
460 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
461 info->dtmcs_idle, info->dmi_busy_delay,
462 info->ac_busy_delay);
463
464 dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
465 }
466
467 /**
468 * exec: If this is set, assume the scan results in an execution, so more
469 * run-test/idle cycles may be required.
470 */
471 static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
472 uint32_t *data_in, dmi_op_t op, uint32_t address_out, uint32_t data_out,
473 bool exec)
474 {
475 riscv013_info_t *info = get_info(target);
476 RISCV_INFO(r);
477 unsigned num_bits = info->abits + DTM_DMI_OP_LENGTH + DTM_DMI_DATA_LENGTH;
478 size_t num_bytes = (num_bits + 7) / 8;
479 uint8_t in[num_bytes];
480 uint8_t out[num_bytes];
481 struct scan_field field = {
482 .num_bits = num_bits,
483 .out_value = out,
484 .in_value = in
485 };
486 riscv_bscan_tunneled_scan_context_t bscan_ctxt;
487
488 if (r->reset_delays_wait >= 0) {
489 r->reset_delays_wait--;
490 if (r->reset_delays_wait < 0) {
491 info->dmi_busy_delay = 0;
492 info->ac_busy_delay = 0;
493 }
494 }
495
496 memset(in, 0, num_bytes);
497 memset(out, 0, num_bytes);
498
499 assert(info->abits != 0);
500
501 buf_set_u32(out, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, op);
502 buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
503 buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
504
505 /* I wanted to place this code in a different function, but the way JTAG command
506 queueing works in the jtag handling functions, the scan fields either have to be
507 heap allocated, global/static, or else they need to stay on the stack until
508 the jtag_execute_queue() call. Heap or static fields in this case doesn't seem
509 the best fit. Declaring stack based field values in a subsidiary function call wouldn't
510 work. */
511 if (bscan_tunnel_ir_width != 0) {
512 riscv_add_bscan_tunneled_scan(target, &field, &bscan_ctxt);
513 } else {
514 /* Assume dbus is already selected. */
515 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
516 }
517
518 int idle_count = info->dmi_busy_delay;
519 if (exec)
520 idle_count += info->ac_busy_delay;
521
522 if (idle_count)
523 jtag_add_runtest(idle_count, TAP_IDLE);
524
525 int retval = jtag_execute_queue();
526 if (retval != ERROR_OK) {
527 LOG_ERROR("dmi_scan failed jtag scan");
528 if (data_in)
529 *data_in = ~0;
530 return DMI_STATUS_FAILED;
531 }
532
533 if (bscan_tunnel_ir_width != 0) {
534 /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
535 buffer_shr(in, num_bytes, 1);
536 }
537
538 if (data_in)
539 *data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
540
541 if (address_in)
542 *address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
543 dump_field(idle_count, &field);
544 return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
545 }
546
547 /**
548 * @param target
549 * @param data_in The data we received from the target.
550 * @param dmi_busy_encountered
551 * If non-NULL, will be updated to reflect whether DMI busy was
552 * encountered while executing this operation or not.
553 * @param dmi_op The operation to perform (read/write/nop).
554 * @param address The address argument to that operation.
555 * @param data_out The data to send to the target.
556 * @param timeout_sec
557 * @param exec When true, this scan will execute something, so extra RTI
558 * cycles may be added.
559 * @param ensure_success
560 * Scan a nop after the requested operation, ensuring the
561 * DMI operation succeeded.
562 */
563 static int dmi_op_timeout(struct target *target, uint32_t *data_in,
564 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
565 uint32_t data_out, int timeout_sec, bool exec, bool ensure_success)
566 {
567 select_dmi(target);
568
569 dmi_status_t status;
570 uint32_t address_in;
571
572 if (dmi_busy_encountered)
573 *dmi_busy_encountered = false;
574
575 const char *op_name;
576 switch (dmi_op) {
577 case DMI_OP_NOP:
578 op_name = "nop";
579 break;
580 case DMI_OP_READ:
581 op_name = "read";
582 break;
583 case DMI_OP_WRITE:
584 op_name = "write";
585 break;
586 default:
587 LOG_ERROR("Invalid DMI operation: %d", dmi_op);
588 return ERROR_FAIL;
589 }
590
591 keep_alive();
592
593 time_t start = time(NULL);
594 /* This first loop performs the request. Note that if for some reason this
595 * stays busy, it is actually due to the previous access. */
596 while (1) {
597 status = dmi_scan(target, NULL, NULL, dmi_op, address, data_out,
598 exec);
599 if (status == DMI_STATUS_BUSY) {
600 increase_dmi_busy_delay(target);
601 if (dmi_busy_encountered)
602 *dmi_busy_encountered = true;
603 } else if (status == DMI_STATUS_SUCCESS) {
604 break;
605 } else {
606 LOG_ERROR("failed %s at 0x%x, status=%d", op_name, address, status);
607 return ERROR_FAIL;
608 }
609 if (time(NULL) - start > timeout_sec)
610 return ERROR_TIMEOUT_REACHED;
611 }
612
613 if (status != DMI_STATUS_SUCCESS) {
614 LOG_ERROR("Failed %s at 0x%x; status=%d", op_name, address, status);
615 return ERROR_FAIL;
616 }
617
618 if (ensure_success) {
619 /* This second loop ensures the request succeeded, and gets back data.
620 * Note that NOP can result in a 'busy' result as well, but that would be
621 * noticed on the next DMI access we do. */
622 while (1) {
623 status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
624 false);
625 if (status == DMI_STATUS_BUSY) {
626 increase_dmi_busy_delay(target);
627 if (dmi_busy_encountered)
628 *dmi_busy_encountered = true;
629 } else if (status == DMI_STATUS_SUCCESS) {
630 break;
631 } else {
632 if (data_in) {
633 LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
634 op_name, address, *data_in, status);
635 } else {
636 LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
637 status);
638 }
639 return ERROR_FAIL;
640 }
641 if (time(NULL) - start > timeout_sec)
642 return ERROR_TIMEOUT_REACHED;
643 }
644 }
645
646 return ERROR_OK;
647 }
648
649 static int dmi_op(struct target *target, uint32_t *data_in,
650 bool *dmi_busy_encountered, int dmi_op, uint32_t address,
651 uint32_t data_out, bool exec, bool ensure_success)
652 {
653 int result = dmi_op_timeout(target, data_in, dmi_busy_encountered, dmi_op,
654 address, data_out, riscv_command_timeout_sec, exec, ensure_success);
655 if (result == ERROR_TIMEOUT_REACHED) {
656 LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
657 "either really slow or broken. You could increase the "
658 "timeout with riscv set_command_timeout_sec.",
659 riscv_command_timeout_sec);
660 return ERROR_FAIL;
661 }
662 return result;
663 }
664
665 static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
666 {
667 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, false, true);
668 }
669
670 static int dmi_read_exec(struct target *target, uint32_t *value, uint32_t address)
671 {
672 return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, true, true);
673 }
674
675 static int dmi_write(struct target *target, uint32_t address, uint32_t value)
676 {
677 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false, true);
678 }
679
680 static int dmi_write_exec(struct target *target, uint32_t address,
681 uint32_t value, bool ensure_success)
682 {
683 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true, ensure_success);
684 }
685
686 int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
687 bool authenticated, unsigned timeout_sec)
688 {
689 int result = dmi_op_timeout(target, dmstatus, NULL, DMI_OP_READ,
690 DM_DMSTATUS, 0, timeout_sec, false, true);
691 if (result != ERROR_OK)
692 return result;
693 int dmstatus_version = get_field(*dmstatus, DM_DMSTATUS_VERSION);
694 if (dmstatus_version != 2 && dmstatus_version != 3) {
695 LOG_ERROR("OpenOCD only supports Debug Module version 2 (0.13) and 3 (1.0), not "
696 "%d (dmstatus=0x%x). This error might be caused by a JTAG "
697 "signal issue. Try reducing the JTAG clock speed.",
698 get_field(*dmstatus, DM_DMSTATUS_VERSION), *dmstatus);
699 } else if (authenticated && !get_field(*dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
700 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
701 "(dmstatus=0x%x). Use `riscv authdata_read` and "
702 "`riscv authdata_write` commands to authenticate.", *dmstatus);
703 return ERROR_FAIL;
704 }
705 return ERROR_OK;
706 }
707
708 int dmstatus_read(struct target *target, uint32_t *dmstatus,
709 bool authenticated)
710 {
711 return dmstatus_read_timeout(target, dmstatus, authenticated,
712 riscv_command_timeout_sec);
713 }
714
715 static void increase_ac_busy_delay(struct target *target)
716 {
717 riscv013_info_t *info = get_info(target);
718 info->ac_busy_delay += info->ac_busy_delay / 10 + 1;
719 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
720 info->dtmcs_idle, info->dmi_busy_delay,
721 info->ac_busy_delay);
722 }
723
724 uint32_t abstract_register_size(unsigned width)
725 {
726 switch (width) {
727 case 32:
728 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 2);
729 case 64:
730 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 3);
731 case 128:
732 return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 4);
733 default:
734 LOG_ERROR("Unsupported register width: %d", width);
735 return 0;
736 }
737 }
738
739 static int wait_for_idle(struct target *target, uint32_t *abstractcs)
740 {
741 RISCV013_INFO(info);
742 time_t start = time(NULL);
743 while (1) {
744 if (dmi_read(target, abstractcs, DM_ABSTRACTCS) != ERROR_OK)
745 return ERROR_FAIL;
746
747 if (get_field(*abstractcs, DM_ABSTRACTCS_BUSY) == 0)
748 return ERROR_OK;
749
750 if (time(NULL) - start > riscv_command_timeout_sec) {
751 info->cmderr = get_field(*abstractcs, DM_ABSTRACTCS_CMDERR);
752 if (info->cmderr != CMDERR_NONE) {
753 const char *errors[8] = {
754 "none",
755 "busy",
756 "not supported",
757 "exception",
758 "halt/resume",
759 "reserved",
760 "reserved",
761 "other" };
762
763 LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
764 errors[info->cmderr], *abstractcs);
765 }
766
767 LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
768 "Increase the timeout with riscv set_command_timeout_sec.",
769 riscv_command_timeout_sec,
770 *abstractcs);
771 return ERROR_FAIL;
772 }
773 }
774 }
775
776 static int execute_abstract_command(struct target *target, uint32_t command)
777 {
778 RISCV013_INFO(info);
779 if (debug_level >= LOG_LVL_DEBUG) {
780 switch (get_field(command, DM_COMMAND_CMDTYPE)) {
781 case 0:
782 LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
783 "transfer=%d, write=%d, regno=0x%x",
784 command,
785 8 << get_field(command, AC_ACCESS_REGISTER_AARSIZE),
786 get_field(command, AC_ACCESS_REGISTER_POSTEXEC),
787 get_field(command, AC_ACCESS_REGISTER_TRANSFER),
788 get_field(command, AC_ACCESS_REGISTER_WRITE),
789 get_field(command, AC_ACCESS_REGISTER_REGNO));
790 break;
791 default:
792 LOG_DEBUG("command=0x%x", command);
793 break;
794 }
795 }
796
797 if (dmi_write_exec(target, DM_COMMAND, command, false) != ERROR_OK)
798 return ERROR_FAIL;
799
800 uint32_t abstractcs = 0;
801 int result = wait_for_idle(target, &abstractcs);
802
803 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
804 if (info->cmderr != 0 || result != ERROR_OK) {
805 LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
806 /* Clear the error. */
807 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
808 return ERROR_FAIL;
809 }
810
811 return ERROR_OK;
812 }
813
814 static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
815 unsigned size_bits)
816 {
817 riscv_reg_t value = 0;
818 uint32_t v;
819 unsigned offset = index * size_bits / 32;
820 switch (size_bits) {
821 default:
822 LOG_ERROR("Unsupported size: %d bits", size_bits);
823 return ~0;
824 case 64:
825 dmi_read(target, &v, DM_DATA0 + offset + 1);
826 value |= ((uint64_t) v) << 32;
827 /* falls through */
828 case 32:
829 dmi_read(target, &v, DM_DATA0 + offset);
830 value |= v;
831 }
832 return value;
833 }
834
835 static int write_abstract_arg(struct target *target, unsigned index,
836 riscv_reg_t value, unsigned size_bits)
837 {
838 unsigned offset = index * size_bits / 32;
839 switch (size_bits) {
840 default:
841 LOG_ERROR("Unsupported size: %d bits", size_bits);
842 return ERROR_FAIL;
843 case 64:
844 dmi_write(target, DM_DATA0 + offset + 1, value >> 32);
845 /* falls through */
846 case 32:
847 dmi_write(target, DM_DATA0 + offset, value);
848 }
849 return ERROR_OK;
850 }
851
852 /**
853 * @par size in bits
854 */
855 static uint32_t access_register_command(struct target *target, uint32_t number,
856 unsigned size, uint32_t flags)
857 {
858 uint32_t command = set_field(0, DM_COMMAND_CMDTYPE, 0);
859 switch (size) {
860 case 32:
861 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 2);
862 break;
863 case 64:
864 command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 3);
865 break;
866 default:
867 LOG_ERROR("%d-bit register %s not supported.", size,
868 gdb_regno_name(number));
869 assert(0);
870 }
871
872 if (number <= GDB_REGNO_XPR31) {
873 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
874 0x1000 + number - GDB_REGNO_ZERO);
875 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
876 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
877 0x1020 + number - GDB_REGNO_FPR0);
878 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
879 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
880 number - GDB_REGNO_CSR0);
881 } else if (number >= GDB_REGNO_COUNT) {
882 /* Custom register. */
883 assert(target->reg_cache->reg_list[number].arch_info);
884 riscv_reg_info_t *reg_info = target->reg_cache->reg_list[number].arch_info;
885 assert(reg_info);
886 command = set_field(command, AC_ACCESS_REGISTER_REGNO,
887 0xc000 + reg_info->custom_number);
888 } else {
889 assert(0);
890 }
891
892 command |= flags;
893
894 return command;
895 }
896
897 static int register_read_abstract(struct target *target, uint64_t *value,
898 uint32_t number, unsigned size)
899 {
900 RISCV013_INFO(info);
901
902 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
903 !info->abstract_read_fpr_supported)
904 return ERROR_FAIL;
905 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
906 !info->abstract_read_csr_supported)
907 return ERROR_FAIL;
908 /* The spec doesn't define abstract register numbers for vector registers. */
909 if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31)
910 return ERROR_FAIL;
911
912 uint32_t command = access_register_command(target, number, size,
913 AC_ACCESS_REGISTER_TRANSFER);
914
915 int result = execute_abstract_command(target, command);
916 if (result != ERROR_OK) {
917 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
918 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
919 info->abstract_read_fpr_supported = false;
920 LOG_INFO("Disabling abstract command reads from FPRs.");
921 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
922 info->abstract_read_csr_supported = false;
923 LOG_INFO("Disabling abstract command reads from CSRs.");
924 }
925 }
926 return result;
927 }
928
929 if (value)
930 *value = read_abstract_arg(target, 0, size);
931
932 return ERROR_OK;
933 }
934
935 static int register_write_abstract(struct target *target, uint32_t number,
936 uint64_t value, unsigned size)
937 {
938 RISCV013_INFO(info);
939
940 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
941 !info->abstract_write_fpr_supported)
942 return ERROR_FAIL;
943 if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
944 !info->abstract_write_csr_supported)
945 return ERROR_FAIL;
946
947 uint32_t command = access_register_command(target, number, size,
948 AC_ACCESS_REGISTER_TRANSFER |
949 AC_ACCESS_REGISTER_WRITE);
950
951 if (write_abstract_arg(target, 0, value, size) != ERROR_OK)
952 return ERROR_FAIL;
953
954 int result = execute_abstract_command(target, command);
955 if (result != ERROR_OK) {
956 if (info->cmderr == CMDERR_NOT_SUPPORTED) {
957 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
958 info->abstract_write_fpr_supported = false;
959 LOG_INFO("Disabling abstract command writes to FPRs.");
960 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
961 info->abstract_write_csr_supported = false;
962 LOG_INFO("Disabling abstract command writes to CSRs.");
963 }
964 }
965 return result;
966 }
967
968 return ERROR_OK;
969 }
970
971 /*
972 * Sets the AAMSIZE field of a memory access abstract command based on
973 * the width (bits).
974 */
975 static uint32_t abstract_memory_size(unsigned width)
976 {
977 switch (width) {
978 case 8:
979 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 0);
980 case 16:
981 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 1);
982 case 32:
983 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 2);
984 case 64:
985 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 3);
986 case 128:
987 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 4);
988 default:
989 LOG_ERROR("Unsupported memory width: %d", width);
990 return 0;
991 }
992 }
993
994 /*
995 * Creates a memory access abstract command.
996 */
997 static uint32_t access_memory_command(struct target *target, bool virtual,
998 unsigned width, bool postincrement, bool write)
999 {
1000 uint32_t command = set_field(0, AC_ACCESS_MEMORY_CMDTYPE, 2);
1001 command = set_field(command, AC_ACCESS_MEMORY_AAMVIRTUAL, virtual);
1002 command |= abstract_memory_size(width);
1003 command = set_field(command, AC_ACCESS_MEMORY_AAMPOSTINCREMENT,
1004 postincrement);
1005 command = set_field(command, AC_ACCESS_MEMORY_WRITE, write);
1006
1007 return command;
1008 }
1009
1010 static int examine_progbuf(struct target *target)
1011 {
1012 riscv013_info_t *info = get_info(target);
1013
1014 if (info->progbuf_writable != YNM_MAYBE)
1015 return ERROR_OK;
1016
1017 /* Figure out if progbuf is writable. */
1018
1019 if (info->progbufsize < 1) {
1020 info->progbuf_writable = YNM_NO;
1021 LOG_INFO("No program buffer present.");
1022 return ERROR_OK;
1023 }
1024
1025 uint64_t s0;
1026 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1027 return ERROR_FAIL;
1028
1029 struct riscv_program program;
1030 riscv_program_init(&program, target);
1031 riscv_program_insert(&program, auipc(S0));
1032 if (riscv_program_exec(&program, target) != ERROR_OK)
1033 return ERROR_FAIL;
1034
1035 if (register_read_direct(target, &info->progbuf_address, GDB_REGNO_S0) != ERROR_OK)
1036 return ERROR_FAIL;
1037
1038 riscv_program_init(&program, target);
1039 riscv_program_insert(&program, sw(S0, S0, 0));
1040 int result = riscv_program_exec(&program, target);
1041
1042 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1043 return ERROR_FAIL;
1044
1045 if (result != ERROR_OK) {
1046 /* This program might have failed if the program buffer is not
1047 * writable. */
1048 info->progbuf_writable = YNM_NO;
1049 return ERROR_OK;
1050 }
1051
1052 uint32_t written;
1053 if (dmi_read(target, &written, DM_PROGBUF0) != ERROR_OK)
1054 return ERROR_FAIL;
1055 if (written == (uint32_t) info->progbuf_address) {
1056 LOG_INFO("progbuf is writable at 0x%" PRIx64,
1057 info->progbuf_address);
1058 info->progbuf_writable = YNM_YES;
1059
1060 } else {
1061 LOG_INFO("progbuf is not writeable at 0x%" PRIx64,
1062 info->progbuf_address);
1063 info->progbuf_writable = YNM_NO;
1064 }
1065
1066 return ERROR_OK;
1067 }
1068
1069 static int is_fpu_reg(uint32_t gdb_regno)
1070 {
1071 return (gdb_regno >= GDB_REGNO_FPR0 && gdb_regno <= GDB_REGNO_FPR31) ||
1072 (gdb_regno == GDB_REGNO_CSR0 + CSR_FFLAGS) ||
1073 (gdb_regno == GDB_REGNO_CSR0 + CSR_FRM) ||
1074 (gdb_regno == GDB_REGNO_CSR0 + CSR_FCSR);
1075 }
1076
1077 static int is_vector_reg(uint32_t gdb_regno)
1078 {
1079 return (gdb_regno >= GDB_REGNO_V0 && gdb_regno <= GDB_REGNO_V31) ||
1080 gdb_regno == GDB_REGNO_VSTART ||
1081 gdb_regno == GDB_REGNO_VXSAT ||
1082 gdb_regno == GDB_REGNO_VXRM ||
1083 gdb_regno == GDB_REGNO_VL ||
1084 gdb_regno == GDB_REGNO_VTYPE ||
1085 gdb_regno == GDB_REGNO_VLENB;
1086 }
1087
1088 static int prep_for_register_access(struct target *target, uint64_t *mstatus,
1089 int regno)
1090 {
1091 if (is_fpu_reg(regno) || is_vector_reg(regno)) {
1092 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
1093 return ERROR_FAIL;
1094 if (is_fpu_reg(regno) && (*mstatus & MSTATUS_FS) == 0) {
1095 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1096 set_field(*mstatus, MSTATUS_FS, 1)) != ERROR_OK)
1097 return ERROR_FAIL;
1098 } else if (is_vector_reg(regno) && (*mstatus & MSTATUS_VS) == 0) {
1099 if (register_write_direct(target, GDB_REGNO_MSTATUS,
1100 set_field(*mstatus, MSTATUS_VS, 1)) != ERROR_OK)
1101 return ERROR_FAIL;
1102 }
1103 } else {
1104 *mstatus = 0;
1105 }
1106 return ERROR_OK;
1107 }
1108
1109 static int cleanup_after_register_access(struct target *target,
1110 uint64_t mstatus, int regno)
1111 {
1112 if ((is_fpu_reg(regno) && (mstatus & MSTATUS_FS) == 0) ||
1113 (is_vector_reg(regno) && (mstatus & MSTATUS_VS) == 0))
1114 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
1115 return ERROR_FAIL;
1116 return ERROR_OK;
1117 }
1118
1119 typedef enum {
1120 SPACE_DM_DATA,
1121 SPACE_DMI_PROGBUF,
1122 SPACE_DMI_RAM
1123 } memory_space_t;
1124
1125 typedef struct {
1126 /* How can the debugger access this memory? */
1127 memory_space_t memory_space;
1128 /* Memory address to access the scratch memory from the hart. */
1129 riscv_addr_t hart_address;
1130 /* Memory address to access the scratch memory from the debugger. */
1131 riscv_addr_t debug_address;
1132 struct working_area *area;
1133 } scratch_mem_t;
1134
1135 /**
1136 * Find some scratch memory to be used with the given program.
1137 */
1138 static int scratch_reserve(struct target *target,
1139 scratch_mem_t *scratch,
1140 struct riscv_program *program,
1141 unsigned size_bytes)
1142 {
1143 riscv_addr_t alignment = 1;
1144 while (alignment < size_bytes)
1145 alignment *= 2;
1146
1147 scratch->area = NULL;
1148
1149 riscv013_info_t *info = get_info(target);
1150
1151 /* Option 1: See if data# registers can be used as the scratch memory */
1152 if (info->dataaccess == 1) {
1153 /* Sign extend dataaddr. */
1154 scratch->hart_address = info->dataaddr;
1155 if (info->dataaddr & (1<<11))
1156 scratch->hart_address |= 0xfffffffffffff000ULL;
1157 /* Align. */
1158 scratch->hart_address = (scratch->hart_address + alignment - 1) & ~(alignment - 1);
1159
1160 if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
1161 info->datasize) {
1162 scratch->memory_space = SPACE_DM_DATA;
1163 scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
1164 return ERROR_OK;
1165 }
1166 }
1167
1168 /* Option 2: See if progbuf can be used as the scratch memory */
1169 if (examine_progbuf(target) != ERROR_OK)
1170 return ERROR_FAIL;
1171
1172 /* Allow for ebreak at the end of the program. */
1173 unsigned program_size = (program->instruction_count + 1) * 4;
1174 scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
1175 ~(alignment - 1);
1176 if ((info->progbuf_writable == YNM_YES) &&
1177 ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
1178 info->progbufsize)) {
1179 scratch->memory_space = SPACE_DMI_PROGBUF;
1180 scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
1181 return ERROR_OK;
1182 }
1183
1184 /* Option 3: User-configured memory area as scratch RAM */
1185 if (target_alloc_working_area(target, size_bytes + alignment - 1,
1186 &scratch->area) == ERROR_OK) {
1187 scratch->hart_address = (scratch->area->address + alignment - 1) &
1188 ~(alignment - 1);
1189 scratch->memory_space = SPACE_DMI_RAM;
1190 scratch->debug_address = scratch->hart_address;
1191 return ERROR_OK;
1192 }
1193
1194 LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
1195 "a work area with 'configure -work-area-phys'.", size_bytes);
1196 return ERROR_FAIL;
1197 }
1198
1199 static int scratch_release(struct target *target,
1200 scratch_mem_t *scratch)
1201 {
1202 return target_free_working_area(target, scratch->area);
1203 }
1204
1205 static int scratch_read64(struct target *target, scratch_mem_t *scratch,
1206 uint64_t *value)
1207 {
1208 uint32_t v;
1209 switch (scratch->memory_space) {
1210 case SPACE_DM_DATA:
1211 if (dmi_read(target, &v, DM_DATA0 + scratch->debug_address) != ERROR_OK)
1212 return ERROR_FAIL;
1213 *value = v;
1214 if (dmi_read(target, &v, DM_DATA1 + scratch->debug_address) != ERROR_OK)
1215 return ERROR_FAIL;
1216 *value |= ((uint64_t) v) << 32;
1217 break;
1218 case SPACE_DMI_PROGBUF:
1219 if (dmi_read(target, &v, DM_PROGBUF0 + scratch->debug_address) != ERROR_OK)
1220 return ERROR_FAIL;
1221 *value = v;
1222 if (dmi_read(target, &v, DM_PROGBUF1 + scratch->debug_address) != ERROR_OK)
1223 return ERROR_FAIL;
1224 *value |= ((uint64_t) v) << 32;
1225 break;
1226 case SPACE_DMI_RAM:
1227 {
1228 uint8_t buffer[8] = {0};
1229 if (read_memory(target, scratch->debug_address, 4, 2, buffer, 4) != ERROR_OK)
1230 return ERROR_FAIL;
1231 *value = buffer[0] |
1232 (((uint64_t) buffer[1]) << 8) |
1233 (((uint64_t) buffer[2]) << 16) |
1234 (((uint64_t) buffer[3]) << 24) |
1235 (((uint64_t) buffer[4]) << 32) |
1236 (((uint64_t) buffer[5]) << 40) |
1237 (((uint64_t) buffer[6]) << 48) |
1238 (((uint64_t) buffer[7]) << 56);
1239 }
1240 break;
1241 }
1242 return ERROR_OK;
1243 }
1244
1245 static int scratch_write64(struct target *target, scratch_mem_t *scratch,
1246 uint64_t value)
1247 {
1248 switch (scratch->memory_space) {
1249 case SPACE_DM_DATA:
1250 dmi_write(target, DM_DATA0 + scratch->debug_address, value);
1251 dmi_write(target, DM_DATA1 + scratch->debug_address, value >> 32);
1252 break;
1253 case SPACE_DMI_PROGBUF:
1254 dmi_write(target, DM_PROGBUF0 + scratch->debug_address, value);
1255 dmi_write(target, DM_PROGBUF1 + scratch->debug_address, value >> 32);
1256 break;
1257 case SPACE_DMI_RAM:
1258 {
1259 uint8_t buffer[8] = {
1260 value,
1261 value >> 8,
1262 value >> 16,
1263 value >> 24,
1264 value >> 32,
1265 value >> 40,
1266 value >> 48,
1267 value >> 56
1268 };
1269 if (write_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
1270 return ERROR_FAIL;
1271 }
1272 break;
1273 }
1274 return ERROR_OK;
1275 }
1276
1277 /** Return register size in bits. */
1278 static unsigned register_size(struct target *target, unsigned number)
1279 {
1280 /* If reg_cache hasn't been initialized yet, make a guess. We need this for
1281 * when this function is called during examine(). */
1282 if (target->reg_cache)
1283 return target->reg_cache->reg_list[number].size;
1284 else
1285 return riscv_xlen(target);
1286 }
1287
1288 static bool has_sufficient_progbuf(struct target *target, unsigned size)
1289 {
1290 RISCV013_INFO(info);
1291 RISCV_INFO(r);
1292
1293 return info->progbufsize + r->impebreak >= size;
1294 }
1295
1296 /**
1297 * Immediately write the new value to the requested register. This mechanism
1298 * bypasses any caches.
1299 */
1300 static int register_write_direct(struct target *target, unsigned number,
1301 uint64_t value)
1302 {
1303 LOG_DEBUG("{%d} %s <- 0x%" PRIx64, riscv_current_hartid(target),
1304 gdb_regno_name(number), value);
1305
1306 int result = register_write_abstract(target, number, value,
1307 register_size(target, number));
1308 if (result == ERROR_OK || !has_sufficient_progbuf(target, 2) ||
1309 !riscv_is_halted(target))
1310 return result;
1311
1312 struct riscv_program program;
1313 riscv_program_init(&program, target);
1314
1315 uint64_t s0;
1316 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1317 return ERROR_FAIL;
1318
1319 uint64_t mstatus;
1320 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1321 return ERROR_FAIL;
1322
1323 scratch_mem_t scratch;
1324 bool use_scratch = false;
1325 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
1326 riscv_supports_extension(target, 'D') &&
1327 riscv_xlen(target) < 64) {
1328 /* There are no instructions to move all the bits from a register, so
1329 * we need to use some scratch RAM. */
1330 use_scratch = true;
1331 riscv_program_insert(&program, fld(number - GDB_REGNO_FPR0, S0, 0));
1332
1333 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1334 return ERROR_FAIL;
1335
1336 if (register_write_direct(target, GDB_REGNO_S0, scratch.hart_address)
1337 != ERROR_OK) {
1338 scratch_release(target, &scratch);
1339 return ERROR_FAIL;
1340 }
1341
1342 if (scratch_write64(target, &scratch, value) != ERROR_OK) {
1343 scratch_release(target, &scratch);
1344 return ERROR_FAIL;
1345 }
1346
1347 } else if (number == GDB_REGNO_VTYPE) {
1348 riscv_program_insert(&program, csrr(S0, CSR_VL));
1349 riscv_program_insert(&program, vsetvli(ZERO, S0, value));
1350
1351 } else {
1352 if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
1353 return ERROR_FAIL;
1354
1355 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1356 if (riscv_supports_extension(target, 'D'))
1357 riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
1358 else
1359 riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
1360 } else if (number == GDB_REGNO_VL) {
1361 /* "The XLEN-bit-wide read-only vl CSR can only be updated by the
1362 * vsetvli and vsetvl instructions, and the fault-only-rst vector
1363 * load instruction variants." */
1364 riscv_reg_t vtype;
1365 if (register_read(target, &vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1366 return ERROR_FAIL;
1367 if (riscv_program_insert(&program, vsetvli(ZERO, S0, vtype)) != ERROR_OK)
1368 return ERROR_FAIL;
1369 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1370 riscv_program_csrw(&program, S0, number);
1371 } else {
1372 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
1373 return ERROR_FAIL;
1374 }
1375 }
1376
1377 int exec_out = riscv_program_exec(&program, target);
1378 /* Don't message on error. Probably the register doesn't exist. */
1379 if (exec_out == ERROR_OK && target->reg_cache) {
1380 struct reg *reg = &target->reg_cache->reg_list[number];
1381 buf_set_u64(reg->value, 0, reg->size, value);
1382 }
1383
1384 if (use_scratch)
1385 scratch_release(target, &scratch);
1386
1387 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1388 return ERROR_FAIL;
1389
1390 /* Restore S0. */
1391 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1392 return ERROR_FAIL;
1393
1394 return exec_out;
1395 }
1396
1397 /** Read register value from the target. Also update the cached value. */
1398 static int register_read(struct target *target, uint64_t *value, uint32_t number)
1399 {
1400 if (number == GDB_REGNO_ZERO) {
1401 *value = 0;
1402 return ERROR_OK;
1403 }
1404 int result = register_read_direct(target, value, number);
1405 if (result != ERROR_OK)
1406 return ERROR_FAIL;
1407 if (target->reg_cache) {
1408 struct reg *reg = &target->reg_cache->reg_list[number];
1409 buf_set_u64(reg->value, 0, reg->size, *value);
1410 }
1411 return ERROR_OK;
1412 }
1413
1414 /** Actually read registers from the target right now. */
1415 static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
1416 {
1417 int result = register_read_abstract(target, value, number,
1418 register_size(target, number));
1419
1420 if (result != ERROR_OK &&
1421 has_sufficient_progbuf(target, 2) &&
1422 number > GDB_REGNO_XPR31) {
1423 struct riscv_program program;
1424 riscv_program_init(&program, target);
1425
1426 scratch_mem_t scratch;
1427 bool use_scratch = false;
1428
1429 riscv_reg_t s0;
1430 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1431 return ERROR_FAIL;
1432
1433 /* Write program to move data into s0. */
1434
1435 uint64_t mstatus;
1436 if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
1437 return ERROR_FAIL;
1438
1439 if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
1440 if (riscv_supports_extension(target, 'D')
1441 && riscv_xlen(target) < 64) {
1442 /* There are no instructions to move all the bits from a
1443 * register, so we need to use some scratch RAM. */
1444 riscv_program_insert(&program, fsd(number - GDB_REGNO_FPR0, S0,
1445 0));
1446
1447 if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
1448 return ERROR_FAIL;
1449 use_scratch = true;
1450
1451 if (register_write_direct(target, GDB_REGNO_S0,
1452 scratch.hart_address) != ERROR_OK) {
1453 scratch_release(target, &scratch);
1454 return ERROR_FAIL;
1455 }
1456 } else if (riscv_supports_extension(target, 'D')) {
1457 riscv_program_insert(&program, fmv_x_d(S0, number - GDB_REGNO_FPR0));
1458 } else {
1459 riscv_program_insert(&program, fmv_x_w(S0, number - GDB_REGNO_FPR0));
1460 }
1461 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
1462 riscv_program_csrr(&program, S0, number);
1463 } else {
1464 LOG_ERROR("Unsupported register: %s", gdb_regno_name(number));
1465 return ERROR_FAIL;
1466 }
1467
1468 /* Execute program. */
1469 result = riscv_program_exec(&program, target);
1470 /* Don't message on error. Probably the register doesn't exist. */
1471
1472 if (use_scratch) {
1473 result = scratch_read64(target, &scratch, value);
1474 scratch_release(target, &scratch);
1475 if (result != ERROR_OK)
1476 return result;
1477 } else {
1478 /* Read S0 */
1479 if (register_read_direct(target, value, GDB_REGNO_S0) != ERROR_OK)
1480 return ERROR_FAIL;
1481 }
1482
1483 if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
1484 return ERROR_FAIL;
1485
1486 /* Restore S0. */
1487 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
1488 return ERROR_FAIL;
1489 }
1490
1491 if (result == ERROR_OK) {
1492 LOG_DEBUG("{%d} %s = 0x%" PRIx64, riscv_current_hartid(target),
1493 gdb_regno_name(number), *value);
1494 }
1495
1496 return result;
1497 }
1498
1499 static int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
1500 {
1501 time_t start = time(NULL);
1502 while (1) {
1503 uint32_t value;
1504 if (dmstatus_read(target, &value, false) != ERROR_OK)
1505 return ERROR_FAIL;
1506 if (dmstatus)
1507 *dmstatus = value;
1508 if (!get_field(value, DM_DMSTATUS_AUTHBUSY))
1509 break;
1510 if (time(NULL) - start > riscv_command_timeout_sec) {
1511 LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
1512 "Increase the timeout with riscv set_command_timeout_sec.",
1513 riscv_command_timeout_sec,
1514 value);
1515 return ERROR_FAIL;
1516 }
1517 }
1518
1519 return ERROR_OK;
1520 }
1521
1522 /*** OpenOCD target functions. ***/
1523
1524 static void deinit_target(struct target *target)
1525 {
1526 LOG_DEBUG("riscv_deinit_target()");
1527 riscv_info_t *info = (riscv_info_t *) target->arch_info;
1528 free(info->version_specific);
1529 /* TODO: free register arch_info */
1530 info->version_specific = NULL;
1531 }
1532
1533 static int set_haltgroup(struct target *target, bool *supported)
1534 {
1535 uint32_t write = set_field(DM_DMCS2_HGWRITE, DM_DMCS2_GROUP, target->smp);
1536 if (dmi_write(target, DM_DMCS2, write) != ERROR_OK)
1537 return ERROR_FAIL;
1538 uint32_t read;
1539 if (dmi_read(target, &read, DM_DMCS2) != ERROR_OK)
1540 return ERROR_FAIL;
1541 *supported = get_field(read, DM_DMCS2_GROUP) == (unsigned)target->smp;
1542 return ERROR_OK;
1543 }
1544
1545 static int discover_vlenb(struct target *target)
1546 {
1547 RISCV_INFO(r);
1548 riscv_reg_t vlenb;
1549
1550 if (register_read(target, &vlenb, GDB_REGNO_VLENB) != ERROR_OK) {
1551 LOG_WARNING("Couldn't read vlenb for %s; vector register access won't work.",
1552 target_name(target));
1553 r->vlenb = 0;
1554 return ERROR_OK;
1555 }
1556 r->vlenb = vlenb;
1557
1558 LOG_INFO("Vector support with vlenb=%d", r->vlenb);
1559
1560 return ERROR_OK;
1561 }
1562
1563 static int examine(struct target *target)
1564 {
1565 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
1566
1567 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
1568 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
1569 LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol, DTM_DTMCS_DMIRESET));
1570 LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTM_DTMCS_IDLE));
1571 LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol, DTM_DTMCS_DMISTAT));
1572 LOG_DEBUG(" abits=%d", get_field(dtmcontrol, DTM_DTMCS_ABITS));
1573 LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTM_DTMCS_VERSION));
1574 if (dtmcontrol == 0) {
1575 LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
1576 return ERROR_FAIL;
1577 }
1578 if (get_field(dtmcontrol, DTM_DTMCS_VERSION) != 1) {
1579 LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
1580 get_field(dtmcontrol, DTM_DTMCS_VERSION), dtmcontrol);
1581 return ERROR_FAIL;
1582 }
1583
1584 riscv013_info_t *info = get_info(target);
1585 /* TODO: This won't be true if there are multiple DMs. */
1586 info->index = target->coreid;
1587 info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
1588 info->dtmcs_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
1589
1590 /* Reset the Debug Module. */
1591 dm013_info_t *dm = get_dm(target);
1592 if (!dm)
1593 return ERROR_FAIL;
1594 if (!dm->was_reset) {
1595 dmi_write(target, DM_DMCONTROL, 0);
1596 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
1597 dm->was_reset = true;
1598 }
1599
1600 dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_HARTSELLO |
1601 DM_DMCONTROL_HARTSELHI | DM_DMCONTROL_DMACTIVE |
1602 DM_DMCONTROL_HASEL);
1603 uint32_t dmcontrol;
1604 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
1605 return ERROR_FAIL;
1606
1607 if (!get_field(dmcontrol, DM_DMCONTROL_DMACTIVE)) {
1608 LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
1609 dmcontrol);
1610 return ERROR_FAIL;
1611 }
1612
1613 dm->hasel_supported = get_field(dmcontrol, DM_DMCONTROL_HASEL);
1614
1615 uint32_t dmstatus;
1616 if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
1617 return ERROR_FAIL;
1618 LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
1619 int dmstatus_version = get_field(dmstatus, DM_DMSTATUS_VERSION);
1620 if (dmstatus_version != 2 && dmstatus_version != 3) {
1621 /* Error was already printed out in dmstatus_read(). */
1622 return ERROR_FAIL;
1623 }
1624
1625 uint32_t hartsel =
1626 (get_field(dmcontrol, DM_DMCONTROL_HARTSELHI) <<
1627 DM_DMCONTROL_HARTSELLO_LENGTH) |
1628 get_field(dmcontrol, DM_DMCONTROL_HARTSELLO);
1629 info->hartsellen = 0;
1630 while (hartsel & 1) {
1631 info->hartsellen++;
1632 hartsel >>= 1;
1633 }
1634 LOG_DEBUG("hartsellen=%d", info->hartsellen);
1635
1636 uint32_t hartinfo;
1637 if (dmi_read(target, &hartinfo, DM_HARTINFO) != ERROR_OK)
1638 return ERROR_FAIL;
1639
1640 info->datasize = get_field(hartinfo, DM_HARTINFO_DATASIZE);
1641 info->dataaccess = get_field(hartinfo, DM_HARTINFO_DATAACCESS);
1642 info->dataaddr = get_field(hartinfo, DM_HARTINFO_DATAADDR);
1643
1644 if (!get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
1645 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
1646 "(dmstatus=0x%x). Use `riscv authdata_read` and "
1647 "`riscv authdata_write` commands to authenticate.", dmstatus);
1648 /* If we return ERROR_FAIL here, then in a multicore setup the next
1649 * core won't be examined, which means we won't set up the
1650 * authentication commands for them, which means the config script
1651 * needs to be a lot more complex. */
1652 return ERROR_OK;
1653 }
1654
1655 if (dmi_read(target, &info->sbcs, DM_SBCS) != ERROR_OK)
1656 return ERROR_FAIL;
1657
1658 /* Check that abstract data registers are accessible. */
1659 uint32_t abstractcs;
1660 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
1661 return ERROR_FAIL;
1662 info->datacount = get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT);
1663 info->progbufsize = get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE);
1664
1665 LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
1666
1667 RISCV_INFO(r);
1668 r->impebreak = get_field(dmstatus, DM_DMSTATUS_IMPEBREAK);
1669
1670 if (!has_sufficient_progbuf(target, 2)) {
1671 LOG_WARNING("We won't be able to execute fence instructions on this "
1672 "target. Memory may not always appear consistent. "
1673 "(progbufsize=%d, impebreak=%d)", info->progbufsize,
1674 r->impebreak);
1675 }
1676
1677 if (info->progbufsize < 4 && riscv_enable_virtual) {
1678 LOG_ERROR("set_enable_virtual is not available on this target. It "
1679 "requires a program buffer size of at least 4. (progbufsize=%d) "
1680 "Use `riscv set_enable_virtual off` to continue."
1681 , info->progbufsize);
1682 }
1683
1684 /* Before doing anything else we must first enumerate the harts. */
1685 if (dm->hart_count < 0) {
1686 for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
1687 r->current_hartid = i;
1688 if (riscv013_select_current_hart(target) != ERROR_OK)
1689 return ERROR_FAIL;
1690
1691 uint32_t s;
1692 if (dmstatus_read(target, &s, true) != ERROR_OK)
1693 return ERROR_FAIL;
1694 if (get_field(s, DM_DMSTATUS_ANYNONEXISTENT))
1695 break;
1696 dm->hart_count = i + 1;
1697
1698 if (get_field(s, DM_DMSTATUS_ANYHAVERESET))
1699 dmi_write(target, DM_DMCONTROL,
1700 set_hartsel(DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_ACKHAVERESET, i));
1701 }
1702
1703 LOG_DEBUG("Detected %d harts.", dm->hart_count);
1704 }
1705
1706 r->current_hartid = target->coreid;
1707
1708 if (dm->hart_count == 0) {
1709 LOG_ERROR("No harts found!");
1710 return ERROR_FAIL;
1711 }
1712
1713 /* Don't call any riscv_* functions until after we've counted the number of
1714 * cores and initialized registers. */
1715
1716 if (riscv013_select_current_hart(target) != ERROR_OK)
1717 return ERROR_FAIL;
1718
1719 bool halted = riscv_is_halted(target);
1720 if (!halted) {
1721 if (riscv013_halt_go(target) != ERROR_OK) {
1722 LOG_ERROR("Fatal: Hart %d failed to halt during examine()", r->current_hartid);
1723 return ERROR_FAIL;
1724 }
1725 }
1726
1727 /* Without knowing anything else we can at least mess with the
1728 * program buffer. */
1729 r->debug_buffer_size = info->progbufsize;
1730
1731 int result = register_read_abstract(target, NULL, GDB_REGNO_S0, 64);
1732 if (result == ERROR_OK)
1733 r->xlen = 64;
1734 else
1735 r->xlen = 32;
1736
1737 if (register_read(target, &r->misa, GDB_REGNO_MISA)) {
1738 LOG_ERROR("Fatal: Failed to read MISA from hart %d.", r->current_hartid);
1739 return ERROR_FAIL;
1740 }
1741
1742 if (riscv_supports_extension(target, 'V')) {
1743 if (discover_vlenb(target) != ERROR_OK)
1744 return ERROR_FAIL;
1745 }
1746
1747 /* Now init registers based on what we discovered. */
1748 if (riscv_init_registers(target) != ERROR_OK)
1749 return ERROR_FAIL;
1750
1751 /* Display this as early as possible to help people who are using
1752 * really slow simulators. */
1753 LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64, r->current_hartid, r->xlen,
1754 r->misa);
1755
1756 if (!halted)
1757 riscv013_step_or_resume_current_hart(target, false, false);
1758
1759 target_set_examined(target);
1760
1761 if (target->smp) {
1762 bool haltgroup_supported;
1763 if (set_haltgroup(target, &haltgroup_supported) != ERROR_OK)
1764 return ERROR_FAIL;
1765 if (haltgroup_supported)
1766 LOG_INFO("Core %d made part of halt group %d.", target->coreid,
1767 target->smp);
1768 else
1769 LOG_INFO("Core %d could not be made part of halt group %d.",
1770 target->coreid, target->smp);
1771 }
1772
1773 /* Some regression suites rely on seeing 'Examined RISC-V core' to know
1774 * when they can connect with gdb/telnet.
1775 * We will need to update those suites if we want to change that text. */
1776 LOG_INFO("Examined RISC-V core; found %d harts",
1777 riscv_count_harts(target));
1778 LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64, r->current_hartid, r->xlen,
1779 r->misa);
1780 return ERROR_OK;
1781 }
1782
1783 static int riscv013_authdata_read(struct target *target, uint32_t *value, unsigned int index)
1784 {
1785 if (index > 0) {
1786 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1787 return ERROR_FAIL;
1788 }
1789
1790 if (wait_for_authbusy(target, NULL) != ERROR_OK)
1791 return ERROR_FAIL;
1792
1793 return dmi_read(target, value, DM_AUTHDATA);
1794 }
1795
1796 static int riscv013_authdata_write(struct target *target, uint32_t value, unsigned int index)
1797 {
1798 if (index > 0) {
1799 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1800 return ERROR_FAIL;
1801 }
1802
1803 uint32_t before, after;
1804 if (wait_for_authbusy(target, &before) != ERROR_OK)
1805 return ERROR_FAIL;
1806
1807 dmi_write(target, DM_AUTHDATA, value);
1808
1809 if (wait_for_authbusy(target, &after) != ERROR_OK)
1810 return ERROR_FAIL;
1811
1812 if (!get_field(before, DM_DMSTATUS_AUTHENTICATED) &&
1813 get_field(after, DM_DMSTATUS_AUTHENTICATED)) {
1814 LOG_INFO("authdata_write resulted in successful authentication");
1815 int result = ERROR_OK;
1816 dm013_info_t *dm = get_dm(target);
1817 if (!dm)
1818 return ERROR_FAIL;
1819 target_list_t *entry;
1820 list_for_each_entry(entry, &dm->target_list, list) {
1821 if (examine(entry->target) != ERROR_OK)
1822 result = ERROR_FAIL;
1823 }
1824 return result;
1825 }
1826
1827 return ERROR_OK;
1828 }
1829
1830 static int riscv013_hart_count(struct target *target)
1831 {
1832 dm013_info_t *dm = get_dm(target);
1833 assert(dm);
1834 return dm->hart_count;
1835 }
1836
1837 /* Try to find out the widest memory access size depending on the selected memory access methods. */
1838 static unsigned riscv013_data_bits(struct target *target)
1839 {
1840 RISCV013_INFO(info);
1841 RISCV_INFO(r);
1842
1843 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
1844 int method = r->mem_access_methods[i];
1845
1846 if (method == RISCV_MEM_ACCESS_PROGBUF) {
1847 if (has_sufficient_progbuf(target, 3))
1848 return riscv_xlen(target);
1849 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
1850 if (get_field(info->sbcs, DM_SBCS_SBACCESS128))
1851 return 128;
1852 if (get_field(info->sbcs, DM_SBCS_SBACCESS64))
1853 return 64;
1854 if (get_field(info->sbcs, DM_SBCS_SBACCESS32))
1855 return 32;
1856 if (get_field(info->sbcs, DM_SBCS_SBACCESS16))
1857 return 16;
1858 if (get_field(info->sbcs, DM_SBCS_SBACCESS8))
1859 return 8;
1860 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
1861 /* TODO: Once there is a spec for discovering abstract commands, we can
1862 * take those into account as well. For now we assume abstract commands
1863 * support XLEN-wide accesses. */
1864 return riscv_xlen(target);
1865 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
1866 /* No further mem access method to try. */
1867 break;
1868 }
1869 LOG_ERROR("Unable to determine supported data bits on this target. Assuming 32 bits.");
1870 return 32;
1871 }
1872
1873 COMMAND_HELPER(riscv013_print_info, struct target *target)
1874 {
1875 RISCV013_INFO(info);
1876
1877 /* Abstract description. */
1878 riscv_print_info_line(CMD, "target", "memory.read_while_running8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1879 riscv_print_info_line(CMD, "target", "memory.write_while_running8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1880 riscv_print_info_line(CMD, "target", "memory.read_while_running16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1881 riscv_print_info_line(CMD, "target", "memory.write_while_running16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1882 riscv_print_info_line(CMD, "target", "memory.read_while_running32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1883 riscv_print_info_line(CMD, "target", "memory.write_while_running32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1884 riscv_print_info_line(CMD, "target", "memory.read_while_running64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1885 riscv_print_info_line(CMD, "target", "memory.write_while_running64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1886 riscv_print_info_line(CMD, "target", "memory.read_while_running128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1887 riscv_print_info_line(CMD, "target", "memory.write_while_running128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1888
1889 /* Lower level description. */
1890 riscv_print_info_line(CMD, "dm", "abits", info->abits);
1891 riscv_print_info_line(CMD, "dm", "progbufsize", info->progbufsize);
1892 riscv_print_info_line(CMD, "dm", "sbversion", get_field(info->sbcs, DM_SBCS_SBVERSION));
1893 riscv_print_info_line(CMD, "dm", "sbasize", get_field(info->sbcs, DM_SBCS_SBASIZE));
1894 riscv_print_info_line(CMD, "dm", "sbaccess128", get_field(info->sbcs, DM_SBCS_SBACCESS128));
1895 riscv_print_info_line(CMD, "dm", "sbaccess64", get_field(info->sbcs, DM_SBCS_SBACCESS64));
1896 riscv_print_info_line(CMD, "dm", "sbaccess32", get_field(info->sbcs, DM_SBCS_SBACCESS32));
1897 riscv_print_info_line(CMD, "dm", "sbaccess16", get_field(info->sbcs, DM_SBCS_SBACCESS16));
1898 riscv_print_info_line(CMD, "dm", "sbaccess8", get_field(info->sbcs, DM_SBCS_SBACCESS8));
1899
1900 uint32_t dmstatus;
1901 if (dmstatus_read(target, &dmstatus, false) == ERROR_OK)
1902 riscv_print_info_line(CMD, "dm", "authenticated", get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED));
1903
1904 return 0;
1905 }
1906
1907 static int prep_for_vector_access(struct target *target, uint64_t *vtype,
1908 uint64_t *vl, unsigned *debug_vl)
1909 {
1910 RISCV_INFO(r);
1911 /* TODO: this continuous save/restore is terrible for performance. */
1912 /* Write vtype and vl. */
1913 unsigned encoded_vsew;
1914 switch (riscv_xlen(target)) {
1915 case 32:
1916 encoded_vsew = 2;
1917 break;
1918 case 64:
1919 encoded_vsew = 3;
1920 break;
1921 default:
1922 LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target));
1923 return ERROR_FAIL;
1924 }
1925
1926 /* Save vtype and vl. */
1927 if (register_read(target, vtype, GDB_REGNO_VTYPE) != ERROR_OK)
1928 return ERROR_FAIL;
1929 if (register_read(target, vl, GDB_REGNO_VL) != ERROR_OK)
1930 return ERROR_FAIL;
1931
1932 if (register_write_direct(target, GDB_REGNO_VTYPE, encoded_vsew << 3) != ERROR_OK)
1933 return ERROR_FAIL;
1934 *debug_vl = DIV_ROUND_UP(r->vlenb * 8, riscv_xlen(target));
1935 if (register_write_direct(target, GDB_REGNO_VL, *debug_vl) != ERROR_OK)
1936 return ERROR_FAIL;
1937
1938 return ERROR_OK;
1939 }
1940
1941 static int cleanup_after_vector_access(struct target *target, uint64_t vtype,
1942 uint64_t vl)
1943 {
1944 /* Restore vtype and vl. */
1945 if (register_write_direct(target, GDB_REGNO_VTYPE, vtype) != ERROR_OK)
1946 return ERROR_FAIL;
1947 if (register_write_direct(target, GDB_REGNO_VL, vl) != ERROR_OK)
1948 return ERROR_FAIL;
1949 return ERROR_OK;
1950 }
1951
1952 static int riscv013_get_register_buf(struct target *target,
1953 uint8_t *value, int regno)
1954 {
1955 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
1956
1957 if (riscv_select_current_hart(target) != ERROR_OK)
1958 return ERROR_FAIL;
1959
1960 riscv_reg_t s0;
1961 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
1962 return ERROR_FAIL;
1963
1964 uint64_t mstatus;
1965 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
1966 return ERROR_FAIL;
1967
1968 uint64_t vtype, vl;
1969 unsigned debug_vl;
1970 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
1971 return ERROR_FAIL;
1972
1973 unsigned vnum = regno - GDB_REGNO_V0;
1974 unsigned xlen = riscv_xlen(target);
1975
1976 struct riscv_program program;
1977 riscv_program_init(&program, target);
1978 riscv_program_insert(&program, vmv_x_s(S0, vnum));
1979 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
1980
1981 int result = ERROR_OK;
1982 for (unsigned i = 0; i < debug_vl; i++) {
1983 /* Executing the program might result in an exception if there is some
1984 * issue with the vector implementation/instructions we're using. If that
1985 * happens, attempt to restore as usual. We may have clobbered the
1986 * vector register we tried to read already.
1987 * For other failures, we just return error because things are probably
1988 * so messed up that attempting to restore isn't going to help. */
1989 result = riscv_program_exec(&program, target);
1990 if (result == ERROR_OK) {
1991 uint64_t v;
1992 if (register_read_direct(target, &v, GDB_REGNO_S0) != ERROR_OK)
1993 return ERROR_FAIL;
1994 buf_set_u64(value, xlen * i, xlen, v);
1995 } else {
1996 break;
1997 }
1998 }
1999
2000 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
2001 return ERROR_FAIL;
2002
2003 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2004 return ERROR_FAIL;
2005 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2006 return ERROR_FAIL;
2007
2008 return result;
2009 }
2010
2011 static int riscv013_set_register_buf(struct target *target,
2012 int regno, const uint8_t *value)
2013 {
2014 assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
2015
2016 if (riscv_select_current_hart(target) != ERROR_OK)
2017 return ERROR_FAIL;
2018
2019 riscv_reg_t s0;
2020 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
2021 return ERROR_FAIL;
2022
2023 uint64_t mstatus;
2024 if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
2025 return ERROR_FAIL;
2026
2027 uint64_t vtype, vl;
2028 unsigned debug_vl;
2029 if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
2030 return ERROR_FAIL;
2031
2032 unsigned vnum = regno - GDB_REGNO_V0;
2033 unsigned xlen = riscv_xlen(target);
2034
2035 struct riscv_program program;
2036 riscv_program_init(&program, target);
2037 riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
2038 int result = ERROR_OK;
2039 for (unsigned i = 0; i < debug_vl; i++) {
2040 if (register_write_direct(target, GDB_REGNO_S0,
2041 buf_get_u64(value, xlen * i, xlen)) != ERROR_OK)
2042 return ERROR_FAIL;
2043 result = riscv_program_exec(&program, target);
2044 if (result != ERROR_OK)
2045 break;
2046 }
2047
2048 if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
2049 return ERROR_FAIL;
2050
2051 if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
2052 return ERROR_FAIL;
2053 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
2054 return ERROR_FAIL;
2055
2056 return result;
2057 }
2058
2059 static uint32_t sb_sbaccess(unsigned int size_bytes)
2060 {
2061 switch (size_bytes) {
2062 case 1:
2063 return set_field(0, DM_SBCS_SBACCESS, 0);
2064 case 2:
2065 return set_field(0, DM_SBCS_SBACCESS, 1);
2066 case 4:
2067 return set_field(0, DM_SBCS_SBACCESS, 2);
2068 case 8:
2069 return set_field(0, DM_SBCS_SBACCESS, 3);
2070 case 16:
2071 return set_field(0, DM_SBCS_SBACCESS, 4);
2072 }
2073 assert(0);
2074 return 0;
2075 }
2076
2077 static int sb_write_address(struct target *target, target_addr_t address,
2078 bool ensure_success)
2079 {
2080 RISCV013_INFO(info);
2081 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2082 /* There currently is no support for >64-bit addresses in OpenOCD. */
2083 if (sbasize > 96)
2084 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS3, 0, false, false);
2085 if (sbasize > 64)
2086 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS2, 0, false, false);
2087 if (sbasize > 32)
2088 dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS1, address >> 32, false, false);
2089 return dmi_op(target, NULL, NULL, DMI_OP_WRITE, DM_SBADDRESS0, address,
2090 false, ensure_success);
2091 }
2092
2093 static int batch_run(const struct target *target, struct riscv_batch *batch)
2094 {
2095 RISCV013_INFO(info);
2096 RISCV_INFO(r);
2097 if (r->reset_delays_wait >= 0) {
2098 r->reset_delays_wait -= batch->used_scans;
2099 if (r->reset_delays_wait <= 0) {
2100 batch->idle_count = 0;
2101 info->dmi_busy_delay = 0;
2102 info->ac_busy_delay = 0;
2103 }
2104 }
2105 return riscv_batch_run(batch);
2106 }
2107
2108 static int sba_supports_access(struct target *target, unsigned int size_bytes)
2109 {
2110 RISCV013_INFO(info);
2111 switch (size_bytes) {
2112 case 1:
2113 return get_field(info->sbcs, DM_SBCS_SBACCESS8);
2114 case 2:
2115 return get_field(info->sbcs, DM_SBCS_SBACCESS16);
2116 case 4:
2117 return get_field(info->sbcs, DM_SBCS_SBACCESS32);
2118 case 8:
2119 return get_field(info->sbcs, DM_SBCS_SBACCESS64);
2120 case 16:
2121 return get_field(info->sbcs, DM_SBCS_SBACCESS128);
2122 default:
2123 return 0;
2124 }
2125 }
2126
2127 static int sample_memory_bus_v1(struct target *target,
2128 struct riscv_sample_buf *buf,
2129 const riscv_sample_config_t *config,
2130 int64_t until_ms)
2131 {
2132 RISCV013_INFO(info);
2133 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2134 if (sbasize > 64) {
2135 LOG_ERROR("Memory sampling is only implemented for sbasize <= 64.");
2136 return ERROR_NOT_IMPLEMENTED;
2137 }
2138
2139 if (get_field(info->sbcs, DM_SBCS_SBVERSION) != 1) {
2140 LOG_ERROR("Memory sampling is only implemented for SBA version 1.");
2141 return ERROR_NOT_IMPLEMENTED;
2142 }
2143
2144 uint32_t sbcs = 0;
2145 uint32_t sbcs_valid = false;
2146
2147 uint32_t sbaddress0 = 0;
2148 bool sbaddress0_valid = false;
2149 uint32_t sbaddress1 = 0;
2150 bool sbaddress1_valid = false;
2151
2152 /* How often to read each value in a batch. */
2153 const unsigned int repeat = 5;
2154
2155 unsigned int enabled_count = 0;
2156 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2157 if (config->bucket[i].enabled)
2158 enabled_count++;
2159 }
2160
2161 while (timeval_ms() < until_ms) {
2162 /*
2163 * batch_run() adds to the batch, so we can't simply reuse the same
2164 * batch over and over. So we create a new one every time through the
2165 * loop.
2166 */
2167 struct riscv_batch *batch = riscv_batch_alloc(
2168 target, 1 + enabled_count * 5 * repeat,
2169 info->dmi_busy_delay + info->bus_master_read_delay);
2170 if (!batch)
2171 return ERROR_FAIL;
2172
2173 unsigned int result_bytes = 0;
2174 for (unsigned int n = 0; n < repeat; n++) {
2175 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2176 if (config->bucket[i].enabled) {
2177 if (!sba_supports_access(target, config->bucket[i].size_bytes)) {
2178 LOG_ERROR("Hardware does not support SBA access for %d-byte memory sampling.",
2179 config->bucket[i].size_bytes);
2180 return ERROR_NOT_IMPLEMENTED;
2181 }
2182
2183 uint32_t sbcs_write = DM_SBCS_SBREADONADDR;
2184 if (enabled_count == 1)
2185 sbcs_write |= DM_SBCS_SBREADONDATA;
2186 sbcs_write |= sb_sbaccess(config->bucket[i].size_bytes);
2187 if (!sbcs_valid || sbcs_write != sbcs) {
2188 riscv_batch_add_dmi_write(batch, DM_SBCS, sbcs_write);
2189 sbcs = sbcs_write;
2190 sbcs_valid = true;
2191 }
2192
2193 if (sbasize > 32 &&
2194 (!sbaddress1_valid ||
2195 sbaddress1 != config->bucket[i].address >> 32)) {
2196 sbaddress1 = config->bucket[i].address >> 32;
2197 riscv_batch_add_dmi_write(batch, DM_SBADDRESS1, sbaddress1);
2198 sbaddress1_valid = true;
2199 }
2200 if (!sbaddress0_valid ||
2201 sbaddress0 != (config->bucket[i].address & 0xffffffff)) {
2202 sbaddress0 = config->bucket[i].address;
2203 riscv_batch_add_dmi_write(batch, DM_SBADDRESS0, sbaddress0);
2204 sbaddress0_valid = true;
2205 }
2206 if (config->bucket[i].size_bytes > 4)
2207 riscv_batch_add_dmi_read(batch, DM_SBDATA1);
2208 riscv_batch_add_dmi_read(batch, DM_SBDATA0);
2209 result_bytes += 1 + config->bucket[i].size_bytes;
2210 }
2211 }
2212 }
2213
2214 if (buf->used + result_bytes >= buf->size) {
2215 riscv_batch_free(batch);
2216 break;
2217 }
2218
2219 size_t sbcs_key = riscv_batch_add_dmi_read(batch, DM_SBCS);
2220
2221 int result = batch_run(target, batch);
2222 if (result != ERROR_OK)
2223 return result;
2224
2225 uint32_t sbcs_read = riscv_batch_get_dmi_read_data(batch, sbcs_key);
2226 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2227 /* Discard this batch (too much hassle to try to recover partial
2228 * data) and try again with a larger delay. */
2229 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2230 dmi_write(target, DM_SBCS, sbcs_read | DM_SBCS_SBBUSYERROR | DM_SBCS_SBERROR);
2231 riscv_batch_free(batch);
2232 continue;
2233 }
2234 if (get_field(sbcs_read, DM_SBCS_SBERROR)) {
2235 /* The memory we're sampling was unreadable, somehow. Give up. */
2236 dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR | DM_SBCS_SBERROR);
2237 riscv_batch_free(batch);
2238 return ERROR_FAIL;
2239 }
2240
2241 unsigned int read = 0;
2242 for (unsigned int n = 0; n < repeat; n++) {
2243 for (unsigned int i = 0; i < ARRAY_SIZE(config->bucket); i++) {
2244 if (config->bucket[i].enabled) {
2245 assert(i < RISCV_SAMPLE_BUF_TIMESTAMP_BEFORE);
2246 uint64_t value = 0;
2247 if (config->bucket[i].size_bytes > 4)
2248 value = ((uint64_t)riscv_batch_get_dmi_read_data(batch, read++)) << 32;
2249 value |= riscv_batch_get_dmi_read_data(batch, read++);
2250
2251 buf->buf[buf->used] = i;
2252 buf_set_u64(buf->buf + buf->used + 1, 0, config->bucket[i].size_bytes * 8, value);
2253 buf->used += 1 + config->bucket[i].size_bytes;
2254 }
2255 }
2256 }
2257
2258 riscv_batch_free(batch);
2259 }
2260
2261 return ERROR_OK;
2262 }
2263
2264 static int sample_memory(struct target *target,
2265 struct riscv_sample_buf *buf,
2266 riscv_sample_config_t *config,
2267 int64_t until_ms)
2268 {
2269 if (!config->enabled)
2270 return ERROR_OK;
2271
2272 return sample_memory_bus_v1(target, buf, config, until_ms);
2273 }
2274
2275 static int init_target(struct command_context *cmd_ctx,
2276 struct target *target)
2277 {
2278 LOG_DEBUG("init");
2279 RISCV_INFO(generic_info);
2280
2281 generic_info->get_register = &riscv013_get_register;
2282 generic_info->set_register = &riscv013_set_register;
2283 generic_info->get_register_buf = &riscv013_get_register_buf;
2284 generic_info->set_register_buf = &riscv013_set_register_buf;
2285 generic_info->select_current_hart = &riscv013_select_current_hart;
2286 generic_info->is_halted = &riscv013_is_halted;
2287 generic_info->resume_go = &riscv013_resume_go;
2288 generic_info->step_current_hart = &riscv013_step_current_hart;
2289 generic_info->on_halt = &riscv013_on_halt;
2290 generic_info->resume_prep = &riscv013_resume_prep;
2291 generic_info->halt_prep = &riscv013_halt_prep;
2292 generic_info->halt_go = &riscv013_halt_go;
2293 generic_info->on_step = &riscv013_on_step;
2294 generic_info->halt_reason = &riscv013_halt_reason;
2295 generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
2296 generic_info->write_debug_buffer = &riscv013_write_debug_buffer;
2297 generic_info->execute_debug_buffer = &riscv013_execute_debug_buffer;
2298 generic_info->fill_dmi_write_u64 = &riscv013_fill_dmi_write_u64;
2299 generic_info->fill_dmi_read_u64 = &riscv013_fill_dmi_read_u64;
2300 generic_info->fill_dmi_nop_u64 = &riscv013_fill_dmi_nop_u64;
2301 generic_info->dmi_write_u64_bits = &riscv013_dmi_write_u64_bits;
2302 generic_info->authdata_read = &riscv013_authdata_read;
2303 generic_info->authdata_write = &riscv013_authdata_write;
2304 generic_info->dmi_read = &dmi_read;
2305 generic_info->dmi_write = &dmi_write;
2306 generic_info->read_memory = read_memory;
2307 generic_info->test_sba_config_reg = &riscv013_test_sba_config_reg;
2308 generic_info->hart_count = &riscv013_hart_count;
2309 generic_info->data_bits = &riscv013_data_bits;
2310 generic_info->print_info = &riscv013_print_info;
2311 if (!generic_info->version_specific) {
2312 generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
2313 if (!generic_info->version_specific)
2314 return ERROR_FAIL;
2315 }
2316 generic_info->sample_memory = sample_memory;
2317 riscv013_info_t *info = get_info(target);
2318
2319 info->progbufsize = -1;
2320
2321 info->dmi_busy_delay = 0;
2322 info->bus_master_read_delay = 0;
2323 info->bus_master_write_delay = 0;
2324 info->ac_busy_delay = 0;
2325
2326 /* Assume all these abstract commands are supported until we learn
2327 * otherwise.
2328 * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
2329 * while another one isn't. We don't track that this closely here, but in
2330 * the future we probably should. */
2331 info->abstract_read_csr_supported = true;
2332 info->abstract_write_csr_supported = true;
2333 info->abstract_read_fpr_supported = true;
2334 info->abstract_write_fpr_supported = true;
2335
2336 info->has_aampostincrement = YNM_MAYBE;
2337
2338 return ERROR_OK;
2339 }
2340
2341 static int assert_reset(struct target *target)
2342 {
2343 RISCV_INFO(r);
2344
2345 select_dmi(target);
2346
2347 uint32_t control_base = set_field(0, DM_DMCONTROL_DMACTIVE, 1);
2348
2349 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
2350 /* Run the user-supplied script if there is one. */
2351 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
2352 } else if (target->rtos) {
2353 /* There's only one target, and OpenOCD thinks each hart is a thread.
2354 * We must reset them all. */
2355
2356 /* TODO: Try to use hasel in dmcontrol */
2357
2358 /* Set haltreq for each hart. */
2359 uint32_t control = control_base;
2360
2361 control = set_hartsel(control_base, target->coreid);
2362 control = set_field(control, DM_DMCONTROL_HALTREQ,
2363 target->reset_halt ? 1 : 0);
2364 dmi_write(target, DM_DMCONTROL, control);
2365
2366 /* Assert ndmreset */
2367 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2368 dmi_write(target, DM_DMCONTROL, control);
2369
2370 } else {
2371 /* Reset just this hart. */
2372 uint32_t control = set_hartsel(control_base, r->current_hartid);
2373 control = set_field(control, DM_DMCONTROL_HALTREQ,
2374 target->reset_halt ? 1 : 0);
2375 control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
2376 dmi_write(target, DM_DMCONTROL, control);
2377 }
2378
2379 target->state = TARGET_RESET;
2380
2381 dm013_info_t *dm = get_dm(target);
2382 if (!dm)
2383 return ERROR_FAIL;
2384
2385 /* The DM might have gotten reset if OpenOCD called us in some reset that
2386 * involves SRST being toggled. So clear our cache which may be out of
2387 * date. */
2388 memset(dm->progbuf_cache, 0, sizeof(dm->progbuf_cache));
2389
2390 return ERROR_OK;
2391 }
2392
2393 static int deassert_reset(struct target *target)
2394 {
2395 RISCV_INFO(r);
2396 RISCV013_INFO(info);
2397 select_dmi(target);
2398
2399 /* Clear the reset, but make sure haltreq is still set */
2400 uint32_t control = 0, control_haltreq;
2401 control = set_field(control, DM_DMCONTROL_DMACTIVE, 1);
2402 control_haltreq = set_field(control, DM_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
2403 dmi_write(target, DM_DMCONTROL,
2404 set_hartsel(control_haltreq, r->current_hartid));
2405
2406 uint32_t dmstatus;
2407 int dmi_busy_delay = info->dmi_busy_delay;
2408 time_t start = time(NULL);
2409
2410 for (int i = 0; i < riscv_count_harts(target); ++i) {
2411 int index = i;
2412 if (target->rtos) {
2413 if (index != target->coreid)
2414 continue;
2415 dmi_write(target, DM_DMCONTROL,
2416 set_hartsel(control_haltreq, index));
2417 } else {
2418 index = r->current_hartid;
2419 }
2420
2421 LOG_DEBUG("Waiting for hart %d to come out of reset.", index);
2422 while (1) {
2423 int result = dmstatus_read_timeout(target, &dmstatus, true,
2424 riscv_reset_timeout_sec);
2425 if (result == ERROR_TIMEOUT_REACHED)
2426 LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
2427 "reset in %ds; Increase the timeout with riscv "
2428 "set_reset_timeout_sec.",
2429 index, riscv_reset_timeout_sec);
2430 if (result != ERROR_OK)
2431 return result;
2432 /* Certain debug modules, like the one in GD32VF103
2433 * MCUs, violate the specification's requirement that
2434 * each hart is in "exactly one of four states" and,
2435 * during reset, report harts as both unavailable and
2436 * halted/running. To work around this, we check for
2437 * the absence of the unavailable state rather than
2438 * the presence of any other state. */
2439 if (!get_field(dmstatus, DM_DMSTATUS_ALLUNAVAIL))
2440 break;
2441 if (time(NULL) - start > riscv_reset_timeout_sec) {
2442 LOG_ERROR("Hart %d didn't leave reset in %ds; "
2443 "dmstatus=0x%x; "
2444 "Increase the timeout with riscv set_reset_timeout_sec.",
2445 index, riscv_reset_timeout_sec, dmstatus);
2446 return ERROR_FAIL;
2447 }
2448 }
2449 target->state = TARGET_HALTED;
2450
2451 if (get_field(dmstatus, DM_DMSTATUS_ALLHAVERESET)) {
2452 /* Ack reset and clear DM_DMCONTROL_HALTREQ if previously set */
2453 dmi_write(target, DM_DMCONTROL,
2454 set_hartsel(control, index) |
2455 DM_DMCONTROL_ACKHAVERESET);
2456 }
2457
2458 if (!target->rtos)
2459 break;
2460 }
2461 info->dmi_busy_delay = dmi_busy_delay;
2462 return ERROR_OK;
2463 }
2464
2465 static int execute_fence(struct target *target)
2466 {
2467 /* FIXME: For non-coherent systems we need to flush the caches right
2468 * here, but there's no ISA-defined way of doing that. */
2469 {
2470 struct riscv_program program;
2471 riscv_program_init(&program, target);
2472 riscv_program_fence_i(&program);
2473 riscv_program_fence(&program);
2474 int result = riscv_program_exec(&program, target);
2475 if (result != ERROR_OK)
2476 LOG_DEBUG("Unable to execute pre-fence");
2477 }
2478
2479 return ERROR_OK;
2480 }
2481
2482 static void log_memory_access(target_addr_t address, uint64_t value,
2483 unsigned size_bytes, bool read)
2484 {
2485 if (debug_level < LOG_LVL_DEBUG)
2486 return;
2487
2488 char fmt[80];
2489 sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
2490 address, read ? "read" : "write", size_bytes * 2);
2491 switch (size_bytes) {
2492 case 1:
2493 value &= 0xff;
2494 break;
2495 case 2:
2496 value &= 0xffff;
2497 break;
2498 case 4:
2499 value &= 0xffffffffUL;
2500 break;
2501 case 8:
2502 break;
2503 default:
2504 assert(false);
2505 }
2506 LOG_DEBUG(fmt, value);
2507 }
2508
2509 /* Read the relevant sbdata regs depending on size, and put the results into
2510 * buffer. */
2511 static int read_memory_bus_word(struct target *target, target_addr_t address,
2512 uint32_t size, uint8_t *buffer)
2513 {
2514 uint32_t value;
2515 int result;
2516 static int sbdata[4] = { DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3 };
2517 assert(size <= 16);
2518 for (int i = (size - 1) / 4; i >= 0; i--) {
2519 result = dmi_op(target, &value, NULL, DMI_OP_READ, sbdata[i], 0, false, true);
2520 if (result != ERROR_OK)
2521 return result;
2522 buf_set_u32(buffer + i * 4, 0, 8 * MIN(size, 4), value);
2523 log_memory_access(address + i * 4, value, MIN(size, 4), true);
2524 }
2525 return ERROR_OK;
2526 }
2527
2528 static target_addr_t sb_read_address(struct target *target)
2529 {
2530 RISCV013_INFO(info);
2531 unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2532 target_addr_t address = 0;
2533 uint32_t v;
2534 if (sbasize > 32) {
2535 dmi_read(target, &v, DM_SBADDRESS1);
2536 address |= v;
2537 address <<= 32;
2538 }
2539 dmi_read(target, &v, DM_SBADDRESS0);
2540 address |= v;
2541 return address;
2542 }
2543
2544 static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
2545 {
2546 time_t start = time(NULL);
2547 while (1) {
2548 if (dmi_read(target, sbcs, DM_SBCS) != ERROR_OK)
2549 return ERROR_FAIL;
2550 if (!get_field(*sbcs, DM_SBCS_SBBUSY))
2551 return ERROR_OK;
2552 if (time(NULL) - start > riscv_command_timeout_sec) {
2553 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
2554 "Increase the timeout with riscv set_command_timeout_sec.",
2555 riscv_command_timeout_sec, *sbcs);
2556 return ERROR_FAIL;
2557 }
2558 }
2559 }
2560
2561 static int modify_privilege(struct target *target, uint64_t *mstatus, uint64_t *mstatus_old)
2562 {
2563 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5)) {
2564 /* Read DCSR */
2565 uint64_t dcsr;
2566 if (register_read(target, &dcsr, GDB_REGNO_DCSR) != ERROR_OK)
2567 return ERROR_FAIL;
2568
2569 /* Read and save MSTATUS */
2570 if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
2571 return ERROR_FAIL;
2572 *mstatus_old = *mstatus;
2573
2574 /* If we come from m-mode with mprv set, we want to keep mpp */
2575 if (get_field(dcsr, DCSR_PRV) < 3) {
2576 /* MPP = PRIV */
2577 *mstatus = set_field(*mstatus, MSTATUS_MPP, get_field(dcsr, DCSR_PRV));
2578
2579 /* MPRV = 1 */
2580 *mstatus = set_field(*mstatus, MSTATUS_MPRV, 1);
2581
2582 /* Write MSTATUS */
2583 if (*mstatus != *mstatus_old)
2584 if (register_write_direct(target, GDB_REGNO_MSTATUS, *mstatus) != ERROR_OK)
2585 return ERROR_FAIL;
2586 }
2587 }
2588
2589 return ERROR_OK;
2590 }
2591
2592 static int read_memory_bus_v0(struct target *target, target_addr_t address,
2593 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2594 {
2595 if (size != increment) {
2596 LOG_ERROR("sba v0 reads only support size==increment");
2597 return ERROR_NOT_IMPLEMENTED;
2598 }
2599
2600 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
2601 TARGET_PRIxADDR, size, count, address);
2602 uint8_t *t_buffer = buffer;
2603 riscv_addr_t cur_addr = address;
2604 riscv_addr_t fin_addr = address + (count * size);
2605 uint32_t access = 0;
2606
2607 const int DM_SBCS_SBSINGLEREAD_OFFSET = 20;
2608 const uint32_t DM_SBCS_SBSINGLEREAD = (0x1U << DM_SBCS_SBSINGLEREAD_OFFSET);
2609
2610 const int DM_SBCS_SBAUTOREAD_OFFSET = 15;
2611 const uint32_t DM_SBCS_SBAUTOREAD = (0x1U << DM_SBCS_SBAUTOREAD_OFFSET);
2612
2613 /* ww favorise one off reading if there is an issue */
2614 if (count == 1) {
2615 for (uint32_t i = 0; i < count; i++) {
2616 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2617 return ERROR_FAIL;
2618 dmi_write(target, DM_SBADDRESS0, cur_addr);
2619 /* size/2 matching the bit access of the spec 0.13 */
2620 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2621 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2622 LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
2623 dmi_write(target, DM_SBCS, access);
2624 /* 3) read */
2625 uint32_t value;
2626 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2627 return ERROR_FAIL;
2628 LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
2629 buf_set_u32(t_buffer, 0, 8 * size, value);
2630 t_buffer += size;
2631 cur_addr += size;
2632 }
2633 return ERROR_OK;
2634 }
2635
2636 /* has to be the same size if we want to read a block */
2637 LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
2638 if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
2639 return ERROR_FAIL;
2640 /* set current address */
2641 dmi_write(target, DM_SBADDRESS0, cur_addr);
2642 /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
2643 * size/2 matching the bit access of the spec 0.13 */
2644 access = set_field(access, DM_SBCS_SBACCESS, size/2);
2645 access = set_field(access, DM_SBCS_SBAUTOREAD, 1);
2646 access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
2647 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
2648 LOG_DEBUG("\r\naccess: 0x%08x", access);
2649 dmi_write(target, DM_SBCS, access);
2650
2651 while (cur_addr < fin_addr) {
2652 LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
2653 PRIx64, size, count, cur_addr);
2654 /* read */
2655 uint32_t value;
2656 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2657 return ERROR_FAIL;
2658 buf_set_u32(t_buffer, 0, 8 * size, value);
2659 cur_addr += size;
2660 t_buffer += size;
2661
2662 /* if we are reaching last address, we must clear autoread */
2663 if (cur_addr == fin_addr && count != 1) {
2664 dmi_write(target, DM_SBCS, 0);
2665 if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
2666 return ERROR_FAIL;
2667 buf_set_u32(t_buffer, 0, 8 * size, value);
2668 }
2669 }
2670
2671 uint32_t sbcs;
2672 if (dmi_read(target, &sbcs, DM_SBCS) != ERROR_OK)
2673 return ERROR_FAIL;
2674
2675 return ERROR_OK;
2676 }
2677
2678 /**
2679 * Read the requested memory using the system bus interface.
2680 */
2681 static int read_memory_bus_v1(struct target *target, target_addr_t address,
2682 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2683 {
2684 if (increment != size && increment != 0) {
2685 LOG_ERROR("sba v1 reads only support increment of size or 0");
2686 return ERROR_NOT_IMPLEMENTED;
2687 }
2688
2689 RISCV013_INFO(info);
2690 target_addr_t next_address = address;
2691 target_addr_t end_address = address + count * size;
2692
2693 while (next_address < end_address) {
2694 uint32_t sbcs_write = set_field(0, DM_SBCS_SBREADONADDR, 1);
2695 sbcs_write |= sb_sbaccess(size);
2696 if (increment == size)
2697 sbcs_write = set_field(sbcs_write, DM_SBCS_SBAUTOINCREMENT, 1);
2698 if (count > 1)
2699 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, count > 1);
2700 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2701 return ERROR_FAIL;
2702
2703 /* This address write will trigger the first read. */
2704 if (sb_write_address(target, next_address, true) != ERROR_OK)
2705 return ERROR_FAIL;
2706
2707 if (info->bus_master_read_delay) {
2708 jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
2709 if (jtag_execute_queue() != ERROR_OK) {
2710 LOG_ERROR("Failed to scan idle sequence");
2711 return ERROR_FAIL;
2712 }
2713 }
2714
2715 /* First value has been read, and is waiting for us to issue a DMI read
2716 * to get it. */
2717
2718 static int sbdata[4] = {DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3};
2719 assert(size <= 16);
2720 target_addr_t next_read = address - 1;
2721 for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
2722 for (int j = (size - 1) / 4; j >= 0; j--) {
2723 uint32_t value;
2724 unsigned attempt = 0;
2725 while (1) {
2726 if (attempt++ > 100) {
2727 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2728 next_read);
2729 return ERROR_FAIL;
2730 }
2731 keep_alive();
2732 dmi_status_t status = dmi_scan(target, NULL, &value,
2733 DMI_OP_READ, sbdata[j], 0, false);
2734 if (status == DMI_STATUS_BUSY)
2735 increase_dmi_busy_delay(target);
2736 else if (status == DMI_STATUS_SUCCESS)
2737 break;
2738 else
2739 return ERROR_FAIL;
2740 }
2741 if (next_read != address - 1) {
2742 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2743 log_memory_access(next_read, value, MIN(size, 4), true);
2744 }
2745 next_read = address + i * size + j * 4;
2746 }
2747 }
2748
2749 uint32_t sbcs_read = 0;
2750 if (count > 1) {
2751 uint32_t value;
2752 unsigned attempt = 0;
2753 while (1) {
2754 if (attempt++ > 100) {
2755 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
2756 next_read);
2757 return ERROR_FAIL;
2758 }
2759 dmi_status_t status = dmi_scan(target, NULL, &value, DMI_OP_NOP, 0, 0, false);
2760 if (status == DMI_STATUS_BUSY)
2761 increase_dmi_busy_delay(target);
2762 else if (status == DMI_STATUS_SUCCESS)
2763 break;
2764 else
2765 return ERROR_FAIL;
2766 }
2767 buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
2768 log_memory_access(next_read, value, MIN(size, 4), true);
2769
2770 /* "Writes to sbcs while sbbusy is high result in undefined behavior.
2771 * A debugger must not write to sbcs until it reads sbbusy as 0." */
2772 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2773 return ERROR_FAIL;
2774
2775 sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, 0);
2776 if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
2777 return ERROR_FAIL;
2778 }
2779
2780 /* Read the last word, after we disabled sbreadondata if necessary. */
2781 if (!get_field(sbcs_read, DM_SBCS_SBERROR) &&
2782 !get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2783 if (read_memory_bus_word(target, address + (count - 1) * size, size,
2784 buffer + (count - 1) * size) != ERROR_OK)
2785 return ERROR_FAIL;
2786
2787 if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
2788 return ERROR_FAIL;
2789 }
2790
2791 if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
2792 /* We read while the target was busy. Slow down and try again. */
2793 if (dmi_write(target, DM_SBCS, sbcs_read | DM_SBCS_SBBUSYERROR) != ERROR_OK)
2794 return ERROR_FAIL;
2795 next_address = sb_read_address(target);
2796 info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
2797 continue;
2798 }
2799
2800 unsigned error = get_field(sbcs_read, DM_SBCS_SBERROR);
2801 if (error == 0) {
2802 next_address = end_address;
2803 } else {
2804 /* Some error indicating the bus access failed, but not because of
2805 * something we did wrong. */
2806 if (dmi_write(target, DM_SBCS, DM_SBCS_SBERROR) != ERROR_OK)
2807 return ERROR_FAIL;
2808 return ERROR_FAIL;
2809 }
2810 }
2811
2812 return ERROR_OK;
2813 }
2814
2815 static void log_mem_access_result(struct target *target, bool success, int method, bool read)
2816 {
2817 RISCV_INFO(r);
2818 bool warn = false;
2819 char msg[60];
2820
2821 /* Compose the message */
2822 snprintf(msg, 60, "%s to %s memory via %s.",
2823 success ? "Succeeded" : "Failed",
2824 read ? "read" : "write",
2825 (method == RISCV_MEM_ACCESS_PROGBUF) ? "program buffer" :
2826 (method == RISCV_MEM_ACCESS_SYSBUS) ? "system bus" : "abstract access");
2827
2828 /* Determine the log message severity. Show warnings only once. */
2829 if (!success) {
2830 if (method == RISCV_MEM_ACCESS_PROGBUF) {
2831 warn = r->mem_access_progbuf_warn;
2832 r->mem_access_progbuf_warn = false;
2833 }
2834 if (method == RISCV_MEM_ACCESS_SYSBUS) {
2835 warn = r->mem_access_sysbus_warn;
2836 r->mem_access_sysbus_warn = false;
2837 }
2838 if (method == RISCV_MEM_ACCESS_ABSTRACT) {
2839 warn = r->mem_access_abstract_warn;
2840 r->mem_access_abstract_warn = false;
2841 }
2842 }
2843
2844 if (warn)
2845 LOG_WARNING("%s", msg);
2846 else
2847 LOG_DEBUG("%s", msg);
2848 }
2849
2850 static bool mem_should_skip_progbuf(struct target *target, target_addr_t address,
2851 uint32_t size, bool read, char **skip_reason)
2852 {
2853 assert(skip_reason);
2854
2855 if (!has_sufficient_progbuf(target, 3)) {
2856 LOG_DEBUG("Skipping mem %s via progbuf - insufficient progbuf size.",
2857 read ? "read" : "write");
2858 *skip_reason = "skipped (insufficient progbuf)";
2859 return true;
2860 }
2861 if (target->state != TARGET_HALTED) {
2862 LOG_DEBUG("Skipping mem %s via progbuf - target not halted.",
2863 read ? "read" : "write");
2864 *skip_reason = "skipped (target not halted)";
2865 return true;
2866 }
2867 if (riscv_xlen(target) < size * 8) {
2868 LOG_DEBUG("Skipping mem %s via progbuf - XLEN (%d) is too short for %d-bit memory access.",
2869 read ? "read" : "write", riscv_xlen(target), size * 8);
2870 *skip_reason = "skipped (XLEN too short)";
2871 return true;
2872 }
2873 if (size > 8) {
2874 LOG_DEBUG("Skipping mem %s via progbuf - unsupported size.",
2875 read ? "read" : "write");
2876 *skip_reason = "skipped (unsupported size)";
2877 return true;
2878 }
2879 if ((sizeof(address) * 8 > riscv_xlen(target)) && (address >> riscv_xlen(target))) {
2880 LOG_DEBUG("Skipping mem %s via progbuf - progbuf only supports %u-bit address.",
2881 read ? "read" : "write", riscv_xlen(target));
2882 *skip_reason = "skipped (too large address)";
2883 return true;
2884 }
2885
2886 return false;
2887 }
2888
2889 static bool mem_should_skip_sysbus(struct target *target, target_addr_t address,
2890 uint32_t size, uint32_t increment, bool read, char **skip_reason)
2891 {
2892 assert(skip_reason);
2893
2894 RISCV013_INFO(info);
2895 if (!sba_supports_access(target, size)) {
2896 LOG_DEBUG("Skipping mem %s via system bus - unsupported size.",
2897 read ? "read" : "write");
2898 *skip_reason = "skipped (unsupported size)";
2899 return true;
2900 }
2901 unsigned int sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
2902 if ((sizeof(address) * 8 > sbasize) && (address >> sbasize)) {
2903 LOG_DEBUG("Skipping mem %s via system bus - sba only supports %u-bit address.",
2904 read ? "read" : "write", sbasize);
2905 *skip_reason = "skipped (too large address)";
2906 return true;
2907 }
2908 if (read && increment != size && (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0 || increment != 0)) {
2909 LOG_DEBUG("Skipping mem read via system bus - "
2910 "sba reads only support size==increment or also size==0 for sba v1.");
2911 *skip_reason = "skipped (unsupported increment)";
2912 return true;
2913 }
2914
2915 return false;
2916 }
2917
2918 static bool mem_should_skip_abstract(struct target *target, target_addr_t address,
2919 uint32_t size, uint32_t increment, bool read, char **skip_reason)
2920 {
2921 assert(skip_reason);
2922
2923 if (size > 8) {
2924 /* TODO: Add 128b support if it's ever used. Involves modifying
2925 read/write_abstract_arg() to work on two 64b values. */
2926 LOG_DEBUG("Skipping mem %s via abstract access - unsupported size: %d bits",
2927 read ? "read" : "write", size * 8);
2928 *skip_reason = "skipped (unsupported size)";
2929 return true;
2930 }
2931 if ((sizeof(address) * 8 > riscv_xlen(target)) && (address >> riscv_xlen(target))) {
2932 LOG_DEBUG("Skipping mem %s via abstract access - abstract access only supports %u-bit address.",
2933 read ? "read" : "write", riscv_xlen(target));
2934 *skip_reason = "skipped (too large address)";
2935 return true;
2936 }
2937 if (read && size != increment) {
2938 LOG_ERROR("Skipping mem read via abstract access - "
2939 "abstract command reads only support size==increment.");
2940 *skip_reason = "skipped (unsupported increment)";
2941 return true;
2942 }
2943
2944 return false;
2945 }
2946
2947 /*
2948 * Performs a memory read using memory access abstract commands. The read sizes
2949 * supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16 byte
2950 * aamsize fields in the memory access abstract command.
2951 */
2952 static int read_memory_abstract(struct target *target, target_addr_t address,
2953 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
2954 {
2955 RISCV013_INFO(info);
2956
2957 int result = ERROR_OK;
2958 bool use_aampostincrement = info->has_aampostincrement != YNM_NO;
2959
2960 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
2961 size, address);
2962
2963 memset(buffer, 0, count * size);
2964
2965 /* Convert the size (bytes) to width (bits) */
2966 unsigned width = size << 3;
2967
2968 /* Create the command (physical address, postincrement, read) */
2969 uint32_t command = access_memory_command(target, false, width, use_aampostincrement, false);
2970
2971 /* Execute the reads */
2972 uint8_t *p = buffer;
2973 bool updateaddr = true;
2974 unsigned int width32 = (width < 32) ? 32 : width;
2975 for (uint32_t c = 0; c < count; c++) {
2976 /* Update the address if it is the first time or aampostincrement is not supported by the target. */
2977 if (updateaddr) {
2978 /* Set arg1 to the address: address + c * size */
2979 result = write_abstract_arg(target, 1, address + c * size, riscv_xlen(target));
2980 if (result != ERROR_OK) {
2981 LOG_ERROR("Failed to write arg1 during read_memory_abstract().");
2982 return result;
2983 }
2984 }
2985
2986 /* Execute the command */
2987 result = execute_abstract_command(target, command);
2988
2989 if (info->has_aampostincrement == YNM_MAYBE) {
2990 if (result == ERROR_OK) {
2991 /* Safety: double-check that the address was really auto-incremented */
2992 riscv_reg_t new_address = read_abstract_arg(target, 1, riscv_xlen(target));
2993 if (new_address == address + size) {
2994 LOG_DEBUG("aampostincrement is supported on this target.");
2995 info->has_aampostincrement = YNM_YES;
2996 } else {
2997 LOG_WARNING("Buggy aampostincrement! Address not incremented correctly.");
2998 info->has_aampostincrement = YNM_NO;
2999 }
3000 } else {
3001 /* Try the same access but with postincrement disabled. */
3002 command = access_memory_command(target, false, width, false, false);
3003 result = execute_abstract_command(target, command);
3004 if (result == ERROR_OK) {
3005 LOG_DEBUG("aampostincrement is not supported on this target.");
3006 info->has_aampostincrement = YNM_NO;
3007 }
3008 }
3009 }
3010
3011 if (result != ERROR_OK)
3012 return result;
3013
3014 /* Copy arg0 to buffer (rounded width up to nearest 32) */
3015 riscv_reg_t value = read_abstract_arg(target, 0, width32);
3016 buf_set_u64(p, 0, 8 * size, value);
3017
3018 if (info->has_aampostincrement == YNM_YES)
3019 updateaddr = false;
3020 p += size;
3021 }
3022
3023 return result;
3024 }
3025
3026 /*
3027 * Performs a memory write using memory access abstract commands. The write
3028 * sizes supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16
3029 * byte aamsize fields in the memory access abstract command.
3030 */
3031 static int write_memory_abstract(struct target *target, target_addr_t address,
3032 uint32_t size, uint32_t count, const uint8_t *buffer)
3033 {
3034 RISCV013_INFO(info);
3035 int result = ERROR_OK;
3036 bool use_aampostincrement = info->has_aampostincrement != YNM_NO;
3037
3038 LOG_DEBUG("writing %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
3039 size, address);
3040
3041 /* Convert the size (bytes) to width (bits) */
3042 unsigned width = size << 3;
3043
3044 /* Create the command (physical address, postincrement, write) */
3045 uint32_t command = access_memory_command(target, false, width, use_aampostincrement, true);
3046
3047 /* Execute the writes */
3048 const uint8_t *p = buffer;
3049 bool updateaddr = true;
3050 for (uint32_t c = 0; c < count; c++) {
3051 /* Move data to arg0 */
3052 riscv_reg_t value = buf_get_u64(p, 0, 8 * size);
3053 result = write_abstract_arg(target, 0, value, riscv_xlen(target));
3054 if (result != ERROR_OK) {
3055 LOG_ERROR("Failed to write arg0 during write_memory_abstract().");
3056 return result;
3057 }
3058
3059 /* Update the address if it is the first time or aampostincrement is not supported by the target. */
3060 if (updateaddr) {
3061 /* Set arg1 to the address: address + c * size */
3062 result = write_abstract_arg(target, 1, address + c * size, riscv_xlen(target));
3063 if (result != ERROR_OK) {
3064 LOG_ERROR("Failed to write arg1 during write_memory_abstract().");
3065 return result;
3066 }
3067 }
3068
3069 /* Execute the command */
3070 result = execute_abstract_command(target, command);
3071
3072 if (info->has_aampostincrement == YNM_MAYBE) {
3073 if (result == ERROR_OK) {
3074 /* Safety: double-check that the address was really auto-incremented */
3075 riscv_reg_t new_address = read_abstract_arg(target, 1, riscv_xlen(target));
3076 if (new_address == address + size) {
3077 LOG_DEBUG("aampostincrement is supported on this target.");
3078 info->has_aampostincrement = YNM_YES;
3079 } else {
3080 LOG_WARNING("Buggy aampostincrement! Address not incremented correctly.");
3081 info->has_aampostincrement = YNM_NO;
3082 }
3083 } else {
3084 /* Try the same access but with postincrement disabled. */
3085 command = access_memory_command(target, false, width, false, true);
3086 result = execute_abstract_command(target, command);
3087 if (result == ERROR_OK) {
3088 LOG_DEBUG("aampostincrement is not supported on this target.");
3089 info->has_aampostincrement = YNM_NO;
3090 }
3091 }
3092 }
3093
3094 if (result != ERROR_OK)
3095 return result;
3096
3097 if (info->has_aampostincrement == YNM_YES)
3098 updateaddr = false;
3099 p += size;
3100 }
3101
3102 return result;
3103 }
3104
3105 /**
3106 * Read the requested memory, taking care to execute every read exactly once,
3107 * even if cmderr=busy is encountered.
3108 */
3109 static int read_memory_progbuf_inner(struct target *target, target_addr_t address,
3110 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3111 {
3112 RISCV013_INFO(info);
3113
3114 int result = ERROR_OK;
3115
3116 /* Write address to S0. */
3117 result = register_write_direct(target, GDB_REGNO_S0, address);
3118 if (result != ERROR_OK)
3119 return result;
3120
3121 if (increment == 0 &&
3122 register_write_direct(target, GDB_REGNO_S2, 0) != ERROR_OK)
3123 return ERROR_FAIL;
3124
3125 uint32_t command = access_register_command(target, GDB_REGNO_S1,
3126 riscv_xlen(target),
3127 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
3128 if (execute_abstract_command(target, command) != ERROR_OK)
3129 return ERROR_FAIL;
3130
3131 /* First read has just triggered. Result is in s1. */
3132 if (count == 1) {
3133 uint64_t value;
3134 if (register_read_direct(target, &value, GDB_REGNO_S1) != ERROR_OK)
3135 return ERROR_FAIL;
3136 buf_set_u64(buffer, 0, 8 * size, value);
3137 log_memory_access(address, value, size, true);
3138 return ERROR_OK;
3139 }
3140
3141 if (dmi_write(target, DM_ABSTRACTAUTO,
3142 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) != ERROR_OK)
3143 goto error;
3144 /* Read garbage from dmi_data0, which triggers another execution of the
3145 * program. Now dmi_data0 contains the first good result, and s1 the next
3146 * memory value. */
3147 if (dmi_read_exec(target, NULL, DM_DATA0) != ERROR_OK)
3148 goto error;
3149
3150 /* read_addr is the next address that the hart will read from, which is the
3151 * value in s0. */
3152 unsigned index = 2;
3153 while (index < count) {
3154 riscv_addr_t read_addr = address + index * increment;
3155 LOG_DEBUG("i=%d, count=%d, read_addr=0x%" PRIx64, index, count, read_addr);
3156 /* The pipeline looks like this:
3157 * memory -> s1 -> dm_data0 -> debugger
3158 * Right now:
3159 * s0 contains read_addr
3160 * s1 contains mem[read_addr-size]
3161 * dm_data0 contains[read_addr-size*2]
3162 */
3163
3164 struct riscv_batch *batch = riscv_batch_alloc(target, 32,
3165 info->dmi_busy_delay + info->ac_busy_delay);
3166 if (!batch)
3167 return ERROR_FAIL;
3168
3169 unsigned reads = 0;
3170 for (unsigned j = index; j < count; j++) {
3171 if (size > 4)
3172 riscv_batch_add_dmi_read(batch, DM_DATA1);
3173 riscv_batch_add_dmi_read(batch, DM_DATA0);
3174
3175 reads++;
3176 if (riscv_batch_full(batch))
3177 break;
3178 }
3179
3180 batch_run(target, batch);
3181
3182 /* Wait for the target to finish performing the last abstract command,
3183 * and update our copy of cmderr. If we see that DMI is busy here,
3184 * dmi_busy_delay will be incremented. */
3185 uint32_t abstractcs;
3186 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3187 return ERROR_FAIL;
3188 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
3189 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3190 return ERROR_FAIL;
3191 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
3192
3193 unsigned next_index;
3194 unsigned ignore_last = 0;
3195 switch (info->cmderr) {
3196 case CMDERR_NONE:
3197 LOG_DEBUG("successful (partial?) memory read");
3198 next_index = index + reads;
3199 break;
3200 case CMDERR_BUSY:
3201 LOG_DEBUG("memory read resulted in busy response");
3202
3203 increase_ac_busy_delay(target);
3204 riscv013_clear_abstract_error(target);
3205
3206 dmi_write(target, DM_ABSTRACTAUTO, 0);
3207
3208 uint32_t dmi_data0, dmi_data1 = 0;
3209 /* This is definitely a good version of the value that we
3210 * attempted to read when we discovered that the target was
3211 * busy. */
3212 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK) {
3213 riscv_batch_free(batch);
3214 goto error;
3215 }
3216 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK) {
3217 riscv_batch_free(batch);
3218 goto error;
3219 }
3220
3221 /* See how far we got, clobbering dmi_data0. */
3222 if (increment == 0) {
3223 uint64_t counter;
3224 result = register_read_direct(target, &counter, GDB_REGNO_S2);
3225 next_index = counter;
3226 } else {
3227 uint64_t next_read_addr;
3228 result = register_read_direct(target, &next_read_addr,
3229 GDB_REGNO_S0);
3230 next_index = (next_read_addr - address) / increment;
3231 }
3232 if (result != ERROR_OK) {
3233 riscv_batch_free(batch);
3234 goto error;
3235 }
3236
3237 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
3238 buf_set_u64(buffer + (next_index - 2) * size, 0, 8 * size, value64);
3239 log_memory_access(address + (next_index - 2) * size, value64, size, true);
3240
3241 /* Restore the command, and execute it.
3242 * Now DM_DATA0 contains the next value just as it would if no
3243 * error had occurred. */
3244 dmi_write_exec(target, DM_COMMAND, command, true);
3245 next_index++;
3246
3247 dmi_write(target, DM_ABSTRACTAUTO,
3248 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
3249
3250 ignore_last = 1;
3251
3252 break;
3253 default:
3254 LOG_DEBUG("error when reading memory, abstractcs=0x%08lx", (long)abstractcs);
3255 riscv013_clear_abstract_error(target);
3256 riscv_batch_free(batch);
3257 result = ERROR_FAIL;
3258 goto error;
3259 }
3260
3261 /* Now read whatever we got out of the batch. */
3262 dmi_status_t status = DMI_STATUS_SUCCESS;
3263 unsigned read = 0;
3264 assert(index >= 2);
3265 for (unsigned j = index - 2; j < index + reads; j++) {
3266 assert(j < count);
3267 LOG_DEBUG("index=%d, reads=%d, next_index=%d, ignore_last=%d, j=%d",
3268 index, reads, next_index, ignore_last, j);
3269 if (j + 3 + ignore_last > next_index)
3270 break;
3271
3272 status = riscv_batch_get_dmi_read_op(batch, read);
3273 uint64_t value = riscv_batch_get_dmi_read_data(batch, read);
3274 read++;
3275 if (status != DMI_STATUS_SUCCESS) {
3276 /* If we're here because of busy count, dmi_busy_delay will
3277 * already have been increased and busy state will have been
3278 * cleared in dmi_read(). */
3279 /* In at least some implementations, we issue a read, and then
3280 * can get busy back when we try to scan out the read result,
3281 * and the actual read value is lost forever. Since this is
3282 * rare in any case, we return error here and rely on our
3283 * caller to reread the entire block. */
3284 LOG_WARNING("Batch memory read encountered DMI error %d. "
3285 "Falling back on slower reads.", status);
3286 riscv_batch_free(batch);
3287 result = ERROR_FAIL;
3288 goto error;
3289 }
3290 if (size > 4) {
3291 status = riscv_batch_get_dmi_read_op(batch, read);
3292 if (status != DMI_STATUS_SUCCESS) {
3293 LOG_WARNING("Batch memory read encountered DMI error %d. "
3294 "Falling back on slower reads.", status);
3295 riscv_batch_free(batch);
3296 result = ERROR_FAIL;
3297 goto error;
3298 }
3299 value <<= 32;
3300 value |= riscv_batch_get_dmi_read_data(batch, read);
3301 read++;
3302 }
3303 riscv_addr_t offset = j * size;
3304 buf_set_u64(buffer + offset, 0, 8 * size, value);
3305 log_memory_access(address + j * increment, value, size, true);
3306 }
3307
3308 index = next_index;
3309
3310 riscv_batch_free(batch);
3311 }
3312
3313 dmi_write(target, DM_ABSTRACTAUTO, 0);
3314
3315 if (count > 1) {
3316 /* Read the penultimate word. */
3317 uint32_t dmi_data0, dmi_data1 = 0;
3318 if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK)
3319 return ERROR_FAIL;
3320 if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK)
3321 return ERROR_FAIL;
3322 uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
3323 buf_set_u64(buffer + size * (count - 2), 0, 8 * size, value64);
3324 log_memory_access(address + size * (count - 2), value64, size, true);
3325 }
3326
3327 /* Read the last word. */
3328 uint64_t value;
3329 result = register_read_direct(target, &value, GDB_REGNO_S1);
3330 if (result != ERROR_OK)
3331 goto error;
3332 buf_set_u64(buffer + size * (count-1), 0, 8 * size, value);
3333 log_memory_access(address + size * (count-1), value, size, true);
3334
3335 return ERROR_OK;
3336
3337 error:
3338 dmi_write(target, DM_ABSTRACTAUTO, 0);
3339
3340 return result;
3341 }
3342
3343 /* Only need to save/restore one GPR to read a single word, and the progbuf
3344 * program doesn't need to increment. */
3345 static int read_memory_progbuf_one(struct target *target, target_addr_t address,
3346 uint32_t size, uint8_t *buffer)
3347 {
3348 uint64_t mstatus = 0;
3349 uint64_t mstatus_old = 0;
3350 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3351 return ERROR_FAIL;
3352
3353 uint64_t s0;
3354 int result = ERROR_FAIL;
3355
3356 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3357 goto restore_mstatus;
3358
3359 /* Write the program (load, increment) */
3360 struct riscv_program program;
3361 riscv_program_init(&program, target);
3362 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3363 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3364 switch (size) {
3365 case 1:
3366 riscv_program_lbr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3367 break;
3368 case 2:
3369 riscv_program_lhr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3370 break;
3371 case 4:
3372 riscv_program_lwr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3373 break;
3374 case 8:
3375 riscv_program_ldr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
3376 break;
3377 default:
3378 LOG_ERROR("Unsupported size: %d", size);
3379 goto restore_mstatus;
3380 }
3381 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3382 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3383
3384 if (riscv_program_ebreak(&program) != ERROR_OK)
3385 goto restore_mstatus;
3386 if (riscv_program_write(&program) != ERROR_OK)
3387 goto restore_mstatus;
3388
3389 /* Write address to S0, and execute buffer. */
3390 if (write_abstract_arg(target, 0, address, riscv_xlen(target)) != ERROR_OK)
3391 goto restore_mstatus;
3392 uint32_t command = access_register_command(target, GDB_REGNO_S0,
3393 riscv_xlen(target), AC_ACCESS_REGISTER_WRITE |
3394 AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
3395 if (execute_abstract_command(target, command) != ERROR_OK)
3396 goto restore_s0;
3397
3398 uint64_t value;
3399 if (register_read(target, &value, GDB_REGNO_S0) != ERROR_OK)
3400 goto restore_s0;
3401 buf_set_u64(buffer, 0, 8 * size, value);
3402 log_memory_access(address, value, size, true);
3403 result = ERROR_OK;
3404
3405 restore_s0:
3406 if (riscv_set_register(target, GDB_REGNO_S0, s0) != ERROR_OK)
3407 result = ERROR_FAIL;
3408
3409 restore_mstatus:
3410 if (mstatus != mstatus_old)
3411 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3412 result = ERROR_FAIL;
3413
3414 return result;
3415 }
3416
3417 /**
3418 * Read the requested memory, silently handling memory access errors.
3419 */
3420 static int read_memory_progbuf(struct target *target, target_addr_t address,
3421 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3422 {
3423 if (riscv_xlen(target) < size * 8) {
3424 LOG_ERROR("XLEN (%d) is too short for %d-bit memory read.",
3425 riscv_xlen(target), size * 8);
3426 return ERROR_FAIL;
3427 }
3428
3429 int result = ERROR_OK;
3430
3431 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
3432 size, address);
3433
3434 select_dmi(target);
3435
3436 memset(buffer, 0, count*size);
3437
3438 if (execute_fence(target) != ERROR_OK)
3439 return ERROR_FAIL;
3440
3441 if (count == 1)
3442 return read_memory_progbuf_one(target, address, size, buffer);
3443
3444 uint64_t mstatus = 0;
3445 uint64_t mstatus_old = 0;
3446 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3447 return ERROR_FAIL;
3448
3449 /* s0 holds the next address to read from
3450 * s1 holds the next data value read
3451 * s2 is a counter in case increment is 0
3452 */
3453 uint64_t s0, s1, s2;
3454 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3455 return ERROR_FAIL;
3456 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3457 return ERROR_FAIL;
3458 if (increment == 0 && register_read(target, &s2, GDB_REGNO_S2) != ERROR_OK)
3459 return ERROR_FAIL;
3460
3461 /* Write the program (load, increment) */
3462 struct riscv_program program;
3463 riscv_program_init(&program, target);
3464 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3465 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3466
3467 switch (size) {
3468 case 1:
3469 riscv_program_lbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3470 break;
3471 case 2:
3472 riscv_program_lhr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3473 break;
3474 case 4:
3475 riscv_program_lwr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3476 break;
3477 case 8:
3478 riscv_program_ldr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3479 break;
3480 default:
3481 LOG_ERROR("Unsupported size: %d", size);
3482 return ERROR_FAIL;
3483 }
3484
3485 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3486 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3487 if (increment == 0)
3488 riscv_program_addi(&program, GDB_REGNO_S2, GDB_REGNO_S2, 1);
3489 else
3490 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, increment);
3491
3492 if (riscv_program_ebreak(&program) != ERROR_OK)
3493 return ERROR_FAIL;
3494 if (riscv_program_write(&program) != ERROR_OK)
3495 return ERROR_FAIL;
3496
3497 result = read_memory_progbuf_inner(target, address, size, count, buffer, increment);
3498
3499 if (result != ERROR_OK) {
3500 /* The full read did not succeed, so we will try to read each word individually. */
3501 /* This will not be fast, but reading outside actual memory is a special case anyway. */
3502 /* It will make the toolchain happier, especially Eclipse Memory View as it reads ahead. */
3503 target_addr_t address_i = address;
3504 uint32_t count_i = 1;
3505 uint8_t *buffer_i = buffer;
3506
3507 for (uint32_t i = 0; i < count; i++, address_i += increment, buffer_i += size) {
3508 /* TODO: This is much slower than it needs to be because we end up
3509 * writing the address to read for every word we read. */
3510 result = read_memory_progbuf_inner(target, address_i, size, count_i, buffer_i, increment);
3511
3512 /* The read of a single word failed, so we will just return 0 for that instead */
3513 if (result != ERROR_OK) {
3514 LOG_DEBUG("error reading single word of %d bytes from 0x%" TARGET_PRIxADDR,
3515 size, address_i);
3516
3517 buf_set_u64(buffer_i, 0, 8 * size, 0);
3518 }
3519 }
3520 result = ERROR_OK;
3521 }
3522
3523 riscv_set_register(target, GDB_REGNO_S0, s0);
3524 riscv_set_register(target, GDB_REGNO_S1, s1);
3525 if (increment == 0)
3526 riscv_set_register(target, GDB_REGNO_S2, s2);
3527
3528 /* Restore MSTATUS */
3529 if (mstatus != mstatus_old)
3530 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3531 return ERROR_FAIL;
3532
3533 return result;
3534 }
3535
3536 static int read_memory(struct target *target, target_addr_t address,
3537 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
3538 {
3539 if (count == 0)
3540 return ERROR_OK;
3541
3542 if (size != 1 && size != 2 && size != 4 && size != 8 && size != 16) {
3543 LOG_ERROR("BUG: Unsupported size for memory read: %d", size);
3544 return ERROR_FAIL;
3545 }
3546
3547 int ret = ERROR_FAIL;
3548 RISCV_INFO(r);
3549 RISCV013_INFO(info);
3550
3551 char *progbuf_result = "disabled";
3552 char *sysbus_result = "disabled";
3553 char *abstract_result = "disabled";
3554
3555 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
3556 int method = r->mem_access_methods[i];
3557
3558 if (method == RISCV_MEM_ACCESS_PROGBUF) {
3559 if (mem_should_skip_progbuf(target, address, size, true, &progbuf_result))
3560 continue;
3561
3562 ret = read_memory_progbuf(target, address, size, count, buffer, increment);
3563
3564 if (ret != ERROR_OK)
3565 progbuf_result = "failed";
3566 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
3567 if (mem_should_skip_sysbus(target, address, size, increment, true, &sysbus_result))
3568 continue;
3569
3570 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
3571 ret = read_memory_bus_v0(target, address, size, count, buffer, increment);
3572 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
3573 ret = read_memory_bus_v1(target, address, size, count, buffer, increment);
3574
3575 if (ret != ERROR_OK)
3576 sysbus_result = "failed";
3577 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
3578 if (mem_should_skip_abstract(target, address, size, increment, true, &abstract_result))
3579 continue;
3580
3581 ret = read_memory_abstract(target, address, size, count, buffer, increment);
3582
3583 if (ret != ERROR_OK)
3584 abstract_result = "failed";
3585 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
3586 /* No further mem access method to try. */
3587 break;
3588
3589 log_mem_access_result(target, ret == ERROR_OK, method, true);
3590
3591 if (ret == ERROR_OK)
3592 return ret;
3593 }
3594
3595 LOG_ERROR("Target %s: Failed to read memory (addr=0x%" PRIx64 ")", target_name(target), address);
3596 LOG_ERROR(" progbuf=%s, sysbus=%s, abstract=%s", progbuf_result, sysbus_result, abstract_result);
3597 return ret;
3598 }
3599
3600 static int write_memory_bus_v0(struct target *target, target_addr_t address,
3601 uint32_t size, uint32_t count, const uint8_t *buffer)
3602 {
3603 /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
3604 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
3605 TARGET_PRIxADDR, size, count, address);
3606 dmi_write(target, DM_SBADDRESS0, address);
3607 int64_t value = 0;
3608 int64_t access = 0;
3609 riscv_addr_t offset = 0;
3610 riscv_addr_t t_addr = 0;
3611 const uint8_t *t_buffer = buffer + offset;
3612
3613 /* B.8 Writing Memory, single write check if we write in one go */
3614 if (count == 1) { /* count is in bytes here */
3615 value = buf_get_u64(t_buffer, 0, 8 * size);
3616
3617 access = 0;
3618 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3619 dmi_write(target, DM_SBCS, access);
3620 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3621 LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64, value);
3622 dmi_write(target, DM_SBDATA0, value);
3623 return ERROR_OK;
3624 }
3625
3626 /*B.8 Writing Memory, using autoincrement*/
3627
3628 access = 0;
3629 access = set_field(access, DM_SBCS_SBACCESS, size/2);
3630 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
3631 LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
3632 dmi_write(target, DM_SBCS, access);
3633
3634 /*2)set the value according to the size required and write*/
3635 for (riscv_addr_t i = 0; i < count; ++i) {
3636 offset = size*i;
3637 /* for monitoring only */
3638 t_addr = address + offset;
3639 t_buffer = buffer + offset;
3640
3641 value = buf_get_u64(t_buffer, 0, 8 * size);
3642 LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
3643 PRIx64, (uint32_t)t_addr, (uint32_t)value);
3644 dmi_write(target, DM_SBDATA0, value);
3645 }
3646 /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
3647 access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 0);
3648 dmi_write(target, DM_SBCS, access);
3649
3650 return ERROR_OK;
3651 }
3652
3653 static int write_memory_bus_v1(struct target *target, target_addr_t address,
3654 uint32_t size, uint32_t count, const uint8_t *buffer)
3655 {
3656 RISCV013_INFO(info);
3657 uint32_t sbcs = sb_sbaccess(size);
3658 sbcs = set_field(sbcs, DM_SBCS_SBAUTOINCREMENT, 1);
3659 dmi_write(target, DM_SBCS, sbcs);
3660
3661 target_addr_t next_address = address;
3662 target_addr_t end_address = address + count * size;
3663
3664 int result;
3665
3666 sb_write_address(target, next_address, true);
3667 while (next_address < end_address) {
3668 LOG_DEBUG("transferring burst starting at address 0x%" TARGET_PRIxADDR,
3669 next_address);
3670
3671 struct riscv_batch *batch = riscv_batch_alloc(
3672 target,
3673 32,
3674 info->dmi_busy_delay + info->bus_master_write_delay);
3675 if (!batch)
3676 return ERROR_FAIL;
3677
3678 for (uint32_t i = (next_address - address) / size; i < count; i++) {
3679 const uint8_t *p = buffer + i * size;
3680
3681 if (riscv_batch_available_scans(batch) < (size + 3) / 4)
3682 break;
3683
3684 if (size > 12)
3685 riscv_batch_add_dmi_write(batch, DM_SBDATA3,
3686 ((uint32_t) p[12]) |
3687 (((uint32_t) p[13]) << 8) |
3688 (((uint32_t) p[14]) << 16) |
3689 (((uint32_t) p[15]) << 24));
3690
3691 if (size > 8)
3692 riscv_batch_add_dmi_write(batch, DM_SBDATA2,
3693 ((uint32_t) p[8]) |
3694 (((uint32_t) p[9]) << 8) |
3695 (((uint32_t) p[10]) << 16) |
3696 (((uint32_t) p[11]) << 24));
3697 if (size > 4)
3698 riscv_batch_add_dmi_write(batch, DM_SBDATA1,
3699 ((uint32_t) p[4]) |
3700 (((uint32_t) p[5]) << 8) |
3701 (((uint32_t) p[6]) << 16) |
3702 (((uint32_t) p[7]) << 24));
3703 uint32_t value = p[0];
3704 if (size > 2) {
3705 value |= ((uint32_t) p[2]) << 16;
3706 value |= ((uint32_t) p[3]) << 24;
3707 }
3708 if (size > 1)
3709 value |= ((uint32_t) p[1]) << 8;
3710 riscv_batch_add_dmi_write(batch, DM_SBDATA0, value);
3711
3712 log_memory_access(address + i * size, value, size, false);
3713 next_address += size;
3714 }
3715
3716 /* Execute the batch of writes */
3717 result = batch_run(target, batch);
3718 riscv_batch_free(batch);
3719 if (result != ERROR_OK)
3720 return result;
3721
3722 /* Read sbcs value.
3723 * At the same time, detect if DMI busy has occurred during the batch write. */
3724 bool dmi_busy_encountered;
3725 if (dmi_op(target, &sbcs, &dmi_busy_encountered, DMI_OP_READ,
3726 DM_SBCS, 0, false, true) != ERROR_OK)
3727 return ERROR_FAIL;
3728 if (dmi_busy_encountered)
3729 LOG_DEBUG("DMI busy encountered during system bus write.");
3730
3731 /* Wait until sbbusy goes low */
3732 time_t start = time(NULL);
3733 while (get_field(sbcs, DM_SBCS_SBBUSY)) {
3734 if (time(NULL) - start > riscv_command_timeout_sec) {
3735 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
3736 "Increase the timeout with riscv set_command_timeout_sec.",
3737 riscv_command_timeout_sec, sbcs);
3738 return ERROR_FAIL;
3739 }
3740 if (dmi_read(target, &sbcs, DM_SBCS) != ERROR_OK)
3741 return ERROR_FAIL;
3742 }
3743
3744 if (get_field(sbcs, DM_SBCS_SBBUSYERROR)) {
3745 /* We wrote while the target was busy. */
3746 LOG_DEBUG("Sbbusyerror encountered during system bus write.");
3747 /* Clear the sticky error flag. */
3748 dmi_write(target, DM_SBCS, sbcs | DM_SBCS_SBBUSYERROR);
3749 /* Slow down before trying again. */
3750 info->bus_master_write_delay += info->bus_master_write_delay / 10 + 1;
3751 }
3752
3753 if (get_field(sbcs, DM_SBCS_SBBUSYERROR) || dmi_busy_encountered) {
3754 /* Recover from the case when the write commands were issued too fast.
3755 * Determine the address from which to resume writing. */
3756 next_address = sb_read_address(target);
3757 if (next_address < address) {
3758 /* This should never happen, probably buggy hardware. */
3759 LOG_DEBUG("unexpected sbaddress=0x%" TARGET_PRIxADDR
3760 " - buggy sbautoincrement in hw?", next_address);
3761 /* Fail the whole operation. */
3762 return ERROR_FAIL;
3763 }
3764 /* Try again - resume writing. */
3765 continue;
3766 }
3767
3768 unsigned int sberror = get_field(sbcs, DM_SBCS_SBERROR);
3769 if (sberror != 0) {
3770 /* Sberror indicates the bus access failed, but not because we issued the writes
3771 * too fast. Cannot recover. Sbaddress holds the address where the error occurred
3772 * (unless sbautoincrement in the HW is buggy).
3773 */
3774 target_addr_t sbaddress = sb_read_address(target);
3775 LOG_DEBUG("System bus access failed with sberror=%u (sbaddress=0x%" TARGET_PRIxADDR ")",
3776 sberror, sbaddress);
3777 if (sbaddress < address) {
3778 /* This should never happen, probably buggy hardware.
3779 * Make a note to the user not to trust the sbaddress value. */
3780 LOG_DEBUG("unexpected sbaddress=0x%" TARGET_PRIxADDR
3781 " - buggy sbautoincrement in hw?", next_address);
3782 }
3783 /* Clear the sticky error flag */
3784 dmi_write(target, DM_SBCS, DM_SBCS_SBERROR);
3785 /* Fail the whole operation */
3786 return ERROR_FAIL;
3787 }
3788 }
3789
3790 return ERROR_OK;
3791 }
3792
3793 static int write_memory_progbuf(struct target *target, target_addr_t address,
3794 uint32_t size, uint32_t count, const uint8_t *buffer)
3795 {
3796 RISCV013_INFO(info);
3797
3798 if (riscv_xlen(target) < size * 8) {
3799 LOG_ERROR("XLEN (%d) is too short for %d-bit memory write.",
3800 riscv_xlen(target), size * 8);
3801 return ERROR_FAIL;
3802 }
3803
3804 LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count, size, (long)address);
3805
3806 select_dmi(target);
3807
3808 uint64_t mstatus = 0;
3809 uint64_t mstatus_old = 0;
3810 if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
3811 return ERROR_FAIL;
3812
3813 /* s0 holds the next address to write to
3814 * s1 holds the next data value to write
3815 */
3816
3817 int result = ERROR_OK;
3818 uint64_t s0, s1;
3819 if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
3820 return ERROR_FAIL;
3821 if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
3822 return ERROR_FAIL;
3823
3824 /* Write the program (store, increment) */
3825 struct riscv_program program;
3826 riscv_program_init(&program, target);
3827 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3828 riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3829
3830 switch (size) {
3831 case 1:
3832 riscv_program_sbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3833 break;
3834 case 2:
3835 riscv_program_shr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3836 break;
3837 case 4:
3838 riscv_program_swr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3839 break;
3840 case 8:
3841 riscv_program_sdr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
3842 break;
3843 default:
3844 LOG_ERROR("write_memory_progbuf(): Unsupported size: %d", size);
3845 result = ERROR_FAIL;
3846 goto error;
3847 }
3848
3849 if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
3850 riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
3851 riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
3852
3853 result = riscv_program_ebreak(&program);
3854 if (result != ERROR_OK)
3855 goto error;
3856 riscv_program_write(&program);
3857
3858 riscv_addr_t cur_addr = address;
3859 riscv_addr_t fin_addr = address + (count * size);
3860 bool setup_needed = true;
3861 LOG_DEBUG("writing until final address 0x%016" PRIx64, fin_addr);
3862 while (cur_addr < fin_addr) {
3863 LOG_DEBUG("transferring burst starting at address 0x%016" PRIx64,
3864 cur_addr);
3865
3866 struct riscv_batch *batch = riscv_batch_alloc(
3867 target,
3868 32,
3869 info->dmi_busy_delay + info->ac_busy_delay);
3870 if (!batch)
3871 goto error;
3872
3873 /* To write another word, we put it in S1 and execute the program. */
3874 unsigned start = (cur_addr - address) / size;
3875 for (unsigned i = start; i < count; ++i) {
3876 unsigned offset = size*i;
3877 const uint8_t *t_buffer = buffer + offset;
3878
3879 uint64_t value = buf_get_u64(t_buffer, 0, 8 * size);
3880
3881 log_memory_access(address + offset, value, size, false);
3882 cur_addr += size;
3883
3884 if (setup_needed) {
3885 result = register_write_direct(target, GDB_REGNO_S0,
3886 address + offset);
3887 if (result != ERROR_OK) {
3888 riscv_batch_free(batch);
3889 goto error;
3890 }
3891
3892 /* Write value. */
3893 if (size > 4)
3894 dmi_write(target, DM_DATA1, value >> 32);
3895 dmi_write(target, DM_DATA0, value);
3896
3897 /* Write and execute command that moves value into S1 and
3898 * executes program buffer. */
3899 uint32_t command = access_register_command(target,
3900 GDB_REGNO_S1, riscv_xlen(target),
3901 AC_ACCESS_REGISTER_POSTEXEC |
3902 AC_ACCESS_REGISTER_TRANSFER |
3903 AC_ACCESS_REGISTER_WRITE);
3904 result = execute_abstract_command(target, command);
3905 if (result != ERROR_OK) {
3906 riscv_batch_free(batch);
3907 goto error;
3908 }
3909
3910 /* Turn on autoexec */
3911 dmi_write(target, DM_ABSTRACTAUTO,
3912 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
3913
3914 setup_needed = false;
3915 } else {
3916 if (size > 4)
3917 riscv_batch_add_dmi_write(batch, DM_DATA1, value >> 32);
3918 riscv_batch_add_dmi_write(batch, DM_DATA0, value);
3919 if (riscv_batch_full(batch))
3920 break;
3921 }
3922 }
3923
3924 result = batch_run(target, batch);
3925 riscv_batch_free(batch);
3926 if (result != ERROR_OK)
3927 goto error;
3928
3929 /* Note that if the scan resulted in a Busy DMI response, it
3930 * is this read to abstractcs that will cause the dmi_busy_delay
3931 * to be incremented if necessary. */
3932
3933 uint32_t abstractcs;
3934 bool dmi_busy_encountered;
3935 result = dmi_op(target, &abstractcs, &dmi_busy_encountered,
3936 DMI_OP_READ, DM_ABSTRACTCS, 0, false, true);
3937 if (result != ERROR_OK)
3938 goto error;
3939 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
3940 if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
3941 return ERROR_FAIL;
3942 info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
3943 if (info->cmderr == CMDERR_NONE && !dmi_busy_encountered) {
3944 LOG_DEBUG("successful (partial?) memory write");
3945 } else if (info->cmderr == CMDERR_BUSY || dmi_busy_encountered) {
3946 if (info->cmderr == CMDERR_BUSY)
3947 LOG_DEBUG("Memory write resulted in abstract command busy response.");
3948 else if (dmi_busy_encountered)
3949 LOG_DEBUG("Memory write resulted in DMI busy response.");
3950 riscv013_clear_abstract_error(target);
3951 increase_ac_busy_delay(target);
3952
3953 dmi_write(target, DM_ABSTRACTAUTO, 0);
3954 result = register_read_direct(target, &cur_addr, GDB_REGNO_S0);
3955 if (result != ERROR_OK)
3956 goto error;
3957 setup_needed = true;
3958 } else {
3959 LOG_ERROR("error when writing memory, abstractcs=0x%08lx", (long)abstractcs);
3960 riscv013_clear_abstract_error(target);
3961 result = ERROR_FAIL;
3962 goto error;
3963 }
3964 }
3965
3966 error:
3967 dmi_write(target, DM_ABSTRACTAUTO, 0);
3968
3969 if (register_write_direct(target, GDB_REGNO_S1, s1) != ERROR_OK)
3970 return ERROR_FAIL;
3971 if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
3972 return ERROR_FAIL;
3973
3974 /* Restore MSTATUS */
3975 if (mstatus != mstatus_old)
3976 if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
3977 return ERROR_FAIL;
3978
3979 if (execute_fence(target) != ERROR_OK)
3980 return ERROR_FAIL;
3981
3982 return result;
3983 }
3984
3985 static int write_memory(struct target *target, target_addr_t address,
3986 uint32_t size, uint32_t count, const uint8_t *buffer)
3987 {
3988 if (size != 1 && size != 2 && size != 4 && size != 8 && size != 16) {
3989 LOG_ERROR("BUG: Unsupported size for memory write: %d", size);
3990 return ERROR_FAIL;
3991 }
3992
3993 int ret = ERROR_FAIL;
3994 RISCV_INFO(r);
3995 RISCV013_INFO(info);
3996
3997 char *progbuf_result = "disabled";
3998 char *sysbus_result = "disabled";
3999 char *abstract_result = "disabled";
4000
4001 for (unsigned int i = 0; i < RISCV_NUM_MEM_ACCESS_METHODS; i++) {
4002 int method = r->mem_access_methods[i];
4003
4004 if (method == RISCV_MEM_ACCESS_PROGBUF) {
4005 if (mem_should_skip_progbuf(target, address, size, false, &progbuf_result))
4006 continue;
4007
4008 ret = write_memory_progbuf(target, address, size, count, buffer);
4009
4010 if (ret != ERROR_OK)
4011 progbuf_result = "failed";
4012 } else if (method == RISCV_MEM_ACCESS_SYSBUS) {
4013 if (mem_should_skip_sysbus(target, address, size, 0, false, &sysbus_result))
4014 continue;
4015
4016 if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
4017 ret = write_memory_bus_v0(target, address, size, count, buffer);
4018 else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
4019 ret = write_memory_bus_v1(target, address, size, count, buffer);
4020
4021 if (ret != ERROR_OK)
4022 sysbus_result = "failed";
4023 } else if (method == RISCV_MEM_ACCESS_ABSTRACT) {
4024 if (mem_should_skip_abstract(target, address, size, 0, false, &abstract_result))
4025 continue;
4026
4027 ret = write_memory_abstract(target, address, size, count, buffer);
4028
4029 if (ret != ERROR_OK)
4030 abstract_result = "failed";
4031 } else if (method == RISCV_MEM_ACCESS_UNSPECIFIED)
4032 /* No further mem access method to try. */
4033 break;
4034
4035 log_mem_access_result(target, ret == ERROR_OK, method, false);
4036
4037 if (ret == ERROR_OK)
4038 return ret;
4039 }
4040
4041 LOG_ERROR("Target %s: Failed to write memory (addr=0x%" PRIx64 ")", target_name(target), address);
4042 LOG_ERROR(" progbuf=%s, sysbus=%s, abstract=%s", progbuf_result, sysbus_result, abstract_result);
4043 return ret;
4044 }
4045
4046 static int arch_state(struct target *target)
4047 {
4048 return ERROR_OK;
4049 }
4050
4051 struct target_type riscv013_target = {
4052 .name = "riscv",
4053
4054 .init_target = init_target,
4055 .deinit_target = deinit_target,
4056 .examine = examine,
4057
4058 .poll = &riscv_openocd_poll,
4059 .halt = &riscv_halt,
4060 .step = &riscv_openocd_step,
4061
4062 .assert_reset = assert_reset,
4063 .deassert_reset = deassert_reset,
4064
4065 .write_memory = write_memory,
4066
4067 .arch_state = arch_state
4068 };
4069
4070 /*** 0.13-specific implementations of various RISC-V helper functions. ***/
4071 static int riscv013_get_register(struct target *target,
4072 riscv_reg_t *value, int rid)
4073 {
4074 LOG_DEBUG("[%s] reading register %s", target_name(target),
4075 gdb_regno_name(rid));
4076
4077 if (riscv_select_current_hart(target) != ERROR_OK)
4078 return ERROR_FAIL;
4079
4080 int result = ERROR_OK;
4081 if (rid == GDB_REGNO_PC) {
4082 /* TODO: move this into riscv.c. */
4083 result = register_read(target, value, GDB_REGNO_DPC);
4084 LOG_DEBUG("[%d] read PC from DPC: 0x%" PRIx64, target->coreid, *value);
4085 } else if (rid == GDB_REGNO_PRIV) {
4086 uint64_t dcsr;
4087 /* TODO: move this into riscv.c. */
4088 result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4089 *value = set_field(0, VIRT_PRIV_V, get_field(dcsr, CSR_DCSR_V));
4090 *value = set_field(*value, VIRT_PRIV_PRV, get_field(dcsr, CSR_DCSR_PRV));
4091 } else {
4092 result = register_read(target, value, rid);
4093 if (result != ERROR_OK)
4094 *value = -1;
4095 }
4096
4097 return result;
4098 }
4099
4100 static int riscv013_set_register(struct target *target, int rid, uint64_t value)
4101 {
4102 riscv013_select_current_hart(target);
4103 LOG_DEBUG("[%d] writing 0x%" PRIx64 " to register %s",
4104 target->coreid, value, gdb_regno_name(rid));
4105
4106 if (rid <= GDB_REGNO_XPR31) {
4107 return register_write_direct(target, rid, value);
4108 } else if (rid == GDB_REGNO_PC) {
4109 LOG_DEBUG("[%d] writing PC to DPC: 0x%" PRIx64, target->coreid, value);
4110 register_write_direct(target, GDB_REGNO_DPC, value);
4111 uint64_t actual_value;
4112 register_read_direct(target, &actual_value, GDB_REGNO_DPC);
4113 LOG_DEBUG("[%d] actual DPC written: 0x%016" PRIx64, target->coreid, actual_value);
4114 if (value != actual_value) {
4115 LOG_ERROR("Written PC (0x%" PRIx64 ") does not match read back "
4116 "value (0x%" PRIx64 ")", value, actual_value);
4117 return ERROR_FAIL;
4118 }
4119 } else if (rid == GDB_REGNO_PRIV) {
4120 uint64_t dcsr;
4121 register_read(target, &dcsr, GDB_REGNO_DCSR);
4122 dcsr = set_field(dcsr, CSR_DCSR_PRV, get_field(value, VIRT_PRIV_PRV));
4123 dcsr = set_field(dcsr, CSR_DCSR_V, get_field(value, VIRT_PRIV_V));
4124 return register_write_direct(target, GDB_REGNO_DCSR, dcsr);
4125 } else {
4126 return register_write_direct(target, rid, value);
4127 }
4128
4129 return ERROR_OK;
4130 }
4131
4132 static int riscv013_select_current_hart(struct target *target)
4133 {
4134 RISCV_INFO(r);
4135
4136 dm013_info_t *dm = get_dm(target);
4137 if (!dm)
4138 return ERROR_FAIL;
4139 if (r->current_hartid == dm->current_hartid)
4140 return ERROR_OK;
4141
4142 uint32_t dmcontrol;
4143 /* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
4144 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
4145 return ERROR_FAIL;
4146 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4147 int result = dmi_write(target, DM_DMCONTROL, dmcontrol);
4148 dm->current_hartid = r->current_hartid;
4149 return result;
4150 }
4151
4152 /* Select all harts that were prepped and that are selectable, clearing the
4153 * prepped flag on the harts that actually were selected. */
4154 static int select_prepped_harts(struct target *target, bool *use_hasel)
4155 {
4156 dm013_info_t *dm = get_dm(target);
4157 if (!dm)
4158 return ERROR_FAIL;
4159 if (!dm->hasel_supported) {
4160 RISCV_INFO(r);
4161 r->prepped = false;
4162 *use_hasel = false;
4163 return ERROR_OK;
4164 }
4165
4166 assert(dm->hart_count);
4167 unsigned hawindow_count = (dm->hart_count + 31) / 32;
4168 uint32_t hawindow[hawindow_count];
4169
4170 memset(hawindow, 0, sizeof(uint32_t) * hawindow_count);
4171
4172 target_list_t *entry;
4173 unsigned total_selected = 0;
4174 list_for_each_entry(entry, &dm->target_list, list) {
4175 struct target *t = entry->target;
4176 riscv_info_t *r = riscv_info(t);
4177 riscv013_info_t *info = get_info(t);
4178 unsigned index = info->index;
4179 LOG_DEBUG("index=%d, coreid=%d, prepped=%d", index, t->coreid, r->prepped);
4180 r->selected = r->prepped;
4181 if (r->prepped) {
4182 hawindow[index / 32] |= 1 << (index % 32);
4183 r->prepped = false;
4184 total_selected++;
4185 }
4186 index++;
4187 }
4188
4189 /* Don't use hasel if we only need to talk to one hart. */
4190 if (total_selected <= 1) {
4191 *use_hasel = false;
4192 return ERROR_OK;
4193 }
4194
4195 for (unsigned i = 0; i < hawindow_count; i++) {
4196 if (dmi_write(target, DM_HAWINDOWSEL, i) != ERROR_OK)
4197 return ERROR_FAIL;
4198 if (dmi_write(target, DM_HAWINDOW, hawindow[i]) != ERROR_OK)
4199 return ERROR_FAIL;
4200 }
4201
4202 *use_hasel = true;
4203 return ERROR_OK;
4204 }
4205
4206 static int riscv013_halt_prep(struct target *target)
4207 {
4208 return ERROR_OK;
4209 }
4210
4211 static int riscv013_halt_go(struct target *target)
4212 {
4213 bool use_hasel = false;
4214 if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
4215 return ERROR_FAIL;
4216
4217 RISCV_INFO(r);
4218 LOG_DEBUG("halting hart %d", r->current_hartid);
4219
4220 /* Issue the halt command, and then wait for the current hart to halt. */
4221 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_HALTREQ;
4222 if (use_hasel)
4223 dmcontrol |= DM_DMCONTROL_HASEL;
4224 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4225 dmi_write(target, DM_DMCONTROL, dmcontrol);
4226 for (size_t i = 0; i < 256; ++i)
4227 if (riscv_is_halted(target))
4228 break;
4229
4230 if (!riscv_is_halted(target)) {
4231 uint32_t dmstatus;
4232 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4233 return ERROR_FAIL;
4234 if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
4235 return ERROR_FAIL;
4236
4237 LOG_ERROR("unable to halt hart %d", r->current_hartid);
4238 LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
4239 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
4240 return ERROR_FAIL;
4241 }
4242
4243 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HALTREQ, 0);
4244 dmi_write(target, DM_DMCONTROL, dmcontrol);
4245
4246 if (use_hasel) {
4247 target_list_t *entry;
4248 dm013_info_t *dm = get_dm(target);
4249 if (!dm)
4250 return ERROR_FAIL;
4251 list_for_each_entry(entry, &dm->target_list, list) {
4252 struct target *t = entry->target;
4253 t->state = TARGET_HALTED;
4254 if (t->debug_reason == DBG_REASON_NOTHALTED)
4255 t->debug_reason = DBG_REASON_DBGRQ;
4256 }
4257 }
4258 /* The "else" case is handled in halt_go(). */
4259
4260 return ERROR_OK;
4261 }
4262
4263 static int riscv013_resume_go(struct target *target)
4264 {
4265 bool use_hasel = false;
4266 if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
4267 return ERROR_FAIL;
4268
4269 return riscv013_step_or_resume_current_hart(target, false, use_hasel);
4270 }
4271
4272 static int riscv013_step_current_hart(struct target *target)
4273 {
4274 return riscv013_step_or_resume_current_hart(target, true, false);
4275 }
4276
4277 static int riscv013_resume_prep(struct target *target)
4278 {
4279 return riscv013_on_step_or_resume(target, false);
4280 }
4281
4282 static int riscv013_on_step(struct target *target)
4283 {
4284 return riscv013_on_step_or_resume(target, true);
4285 }
4286
4287 static int riscv013_on_halt(struct target *target)
4288 {
4289 return ERROR_OK;
4290 }
4291
4292 static bool riscv013_is_halted(struct target *target)
4293 {
4294 uint32_t dmstatus;
4295 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4296 return false;
4297 if (get_field(dmstatus, DM_DMSTATUS_ANYUNAVAIL))
4298 LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target));
4299 if (get_field(dmstatus, DM_DMSTATUS_ANYNONEXISTENT))
4300 LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target));
4301 if (get_field(dmstatus, DM_DMSTATUS_ANYHAVERESET)) {
4302 int hartid = riscv_current_hartid(target);
4303 LOG_INFO("Hart %d unexpectedly reset!", hartid);
4304 /* TODO: Can we make this more obvious to eg. a gdb user? */
4305 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE |
4306 DM_DMCONTROL_ACKHAVERESET;
4307 dmcontrol = set_hartsel(dmcontrol, hartid);
4308 /* If we had been halted when we reset, request another halt. If we
4309 * ended up running out of reset, then the user will (hopefully) get a
4310 * message that a reset happened, that the target is running, and then
4311 * that it is halted again once the request goes through.
4312 */
4313 if (target->state == TARGET_HALTED)
4314 dmcontrol |= DM_DMCONTROL_HALTREQ;
4315 dmi_write(target, DM_DMCONTROL, dmcontrol);
4316 }
4317 return get_field(dmstatus, DM_DMSTATUS_ALLHALTED);
4318 }
4319
4320 static enum riscv_halt_reason riscv013_halt_reason(struct target *target)
4321 {
4322 riscv_reg_t dcsr;
4323 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4324 if (result != ERROR_OK)
4325 return RISCV_HALT_UNKNOWN;
4326
4327 LOG_DEBUG("dcsr.cause: 0x%" PRIx64, get_field(dcsr, CSR_DCSR_CAUSE));
4328
4329 switch (get_field(dcsr, CSR_DCSR_CAUSE)) {
4330 case CSR_DCSR_CAUSE_SWBP:
4331 return RISCV_HALT_BREAKPOINT;
4332 case CSR_DCSR_CAUSE_TRIGGER:
4333 /* We could get here before triggers are enumerated if a trigger was
4334 * already set when we connected. Force enumeration now, which has the
4335 * side effect of clearing any triggers we did not set. */
4336 riscv_enumerate_triggers(target);
4337 LOG_DEBUG("{%d} halted because of trigger", target->coreid);
4338 return RISCV_HALT_TRIGGER;
4339 case CSR_DCSR_CAUSE_STEP:
4340 return RISCV_HALT_SINGLESTEP;
4341 case CSR_DCSR_CAUSE_DEBUGINT:
4342 case CSR_DCSR_CAUSE_HALT:
4343 return RISCV_HALT_INTERRUPT;
4344 case CSR_DCSR_CAUSE_GROUP:
4345 return RISCV_HALT_GROUP;
4346 }
4347
4348 LOG_ERROR("Unknown DCSR cause field: 0x%" PRIx64, get_field(dcsr, CSR_DCSR_CAUSE));
4349 LOG_ERROR(" dcsr=0x%016lx", (long)dcsr);
4350 return RISCV_HALT_UNKNOWN;
4351 }
4352
4353 int riscv013_write_debug_buffer(struct target *target, unsigned index, riscv_insn_t data)
4354 {
4355 dm013_info_t *dm = get_dm(target);
4356 if (!dm)
4357 return ERROR_FAIL;
4358 if (dm->progbuf_cache[index] != data) {
4359 if (dmi_write(target, DM_PROGBUF0 + index, data) != ERROR_OK)
4360 return ERROR_FAIL;
4361 dm->progbuf_cache[index] = data;
4362 } else {
4363 LOG_DEBUG("cache hit for 0x%" PRIx32 " @%d", data, index);
4364 }
4365 return ERROR_OK;
4366 }
4367
4368 riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned index)
4369 {
4370 uint32_t value;
4371 dmi_read(target, &value, DM_PROGBUF0 + index);
4372 return value;
4373 }
4374
4375 int riscv013_execute_debug_buffer(struct target *target)
4376 {
4377 uint32_t run_program = 0;
4378 run_program = set_field(run_program, AC_ACCESS_REGISTER_AARSIZE, 2);
4379 run_program = set_field(run_program, AC_ACCESS_REGISTER_POSTEXEC, 1);
4380 run_program = set_field(run_program, AC_ACCESS_REGISTER_TRANSFER, 0);
4381 run_program = set_field(run_program, AC_ACCESS_REGISTER_REGNO, 0x1000);
4382
4383 return execute_abstract_command(target, run_program);
4384 }
4385
4386 void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
4387 {
4388 RISCV013_INFO(info);
4389 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_WRITE);
4390 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, d);
4391 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
4392 }
4393
4394 void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a)
4395 {
4396 RISCV013_INFO(info);
4397 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_READ);
4398 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
4399 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
4400 }
4401
4402 void riscv013_fill_dmi_nop_u64(struct target *target, char *buf)
4403 {
4404 RISCV013_INFO(info);
4405 buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_NOP);
4406 buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
4407 buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, 0);
4408 }
4409
4410 /* Helper function for riscv013_test_sba_config_reg */
4411 static int get_max_sbaccess(struct target *target)
4412 {
4413 RISCV013_INFO(info);
4414
4415 uint32_t sbaccess128 = get_field(info->sbcs, DM_SBCS_SBACCESS128);
4416 uint32_t sbaccess64 = get_field(info->sbcs, DM_SBCS_SBACCESS64);
4417 uint32_t sbaccess32 = get_field(info->sbcs, DM_SBCS_SBACCESS32);
4418 uint32_t sbaccess16 = get_field(info->sbcs, DM_SBCS_SBACCESS16);
4419 uint32_t sbaccess8 = get_field(info->sbcs, DM_SBCS_SBACCESS8);
4420
4421 if (sbaccess128)
4422 return 4;
4423 else if (sbaccess64)
4424 return 3;
4425 else if (sbaccess32)
4426 return 2;
4427 else if (sbaccess16)
4428 return 1;
4429 else if (sbaccess8)
4430 return 0;
4431 else
4432 return -1;
4433 }
4434
4435 static uint32_t get_num_sbdata_regs(struct target *target)
4436 {
4437 RISCV013_INFO(info);
4438
4439 uint32_t sbaccess128 = get_field(info->sbcs, DM_SBCS_SBACCESS128);
4440 uint32_t sbaccess64 = get_field(info->sbcs, DM_SBCS_SBACCESS64);
4441 uint32_t sbaccess32 = get_field(info->sbcs, DM_SBCS_SBACCESS32);
4442
4443 if (sbaccess128)
4444 return 4;
4445 else if (sbaccess64)
4446 return 2;
4447 else if (sbaccess32)
4448 return 1;
4449 else
4450 return 0;
4451 }
4452
4453 static int riscv013_test_sba_config_reg(struct target *target,
4454 target_addr_t legal_address, uint32_t num_words,
4455 target_addr_t illegal_address, bool run_sbbusyerror_test)
4456 {
4457 LOG_INFO("Testing System Bus Access as defined by RISC-V Debug Spec v0.13");
4458
4459 uint32_t tests_failed = 0;
4460
4461 uint32_t rd_val;
4462 uint32_t sbcs_orig;
4463 dmi_read(target, &sbcs_orig, DM_SBCS);
4464
4465 uint32_t sbcs = sbcs_orig;
4466 bool test_passed;
4467
4468 int max_sbaccess = get_max_sbaccess(target);
4469
4470 if (max_sbaccess == -1) {
4471 LOG_ERROR("System Bus Access not supported in this config.");
4472 return ERROR_FAIL;
4473 }
4474
4475 if (get_field(sbcs, DM_SBCS_SBVERSION) != 1) {
4476 LOG_ERROR("System Bus Access unsupported SBVERSION (%d). Only version 1 is supported.",
4477 get_field(sbcs, DM_SBCS_SBVERSION));
4478 return ERROR_FAIL;
4479 }
4480
4481 uint32_t num_sbdata_regs = get_num_sbdata_regs(target);
4482 assert(num_sbdata_regs);
4483
4484 uint32_t rd_buf[num_sbdata_regs];
4485
4486 /* Test 1: Simple write/read test */
4487 test_passed = true;
4488 sbcs = set_field(sbcs_orig, DM_SBCS_SBAUTOINCREMENT, 0);
4489 dmi_write(target, DM_SBCS, sbcs);
4490
4491 uint32_t test_patterns[4] = {0xdeadbeef, 0xfeedbabe, 0x12345678, 0x08675309};
4492 for (uint32_t sbaccess = 0; sbaccess <= (uint32_t)max_sbaccess; sbaccess++) {
4493 sbcs = set_field(sbcs, DM_SBCS_SBACCESS, sbaccess);
4494 dmi_write(target, DM_SBCS, sbcs);
4495
4496 uint32_t compare_mask = (sbaccess == 0) ? 0xff : (sbaccess == 1) ? 0xffff : 0xffffffff;
4497
4498 for (uint32_t i = 0; i < num_words; i++) {
4499 uint32_t addr = legal_address + (i << sbaccess);
4500 uint32_t wr_data[num_sbdata_regs];
4501 for (uint32_t j = 0; j < num_sbdata_regs; j++)
4502 wr_data[j] = test_patterns[j] + i;
4503 write_memory_sba_simple(target, addr, wr_data, num_sbdata_regs, sbcs);
4504 }
4505
4506 for (uint32_t i = 0; i < num_words; i++) {
4507 uint32_t addr = legal_address + (i << sbaccess);
4508 read_memory_sba_simple(target, addr, rd_buf, num_sbdata_regs, sbcs);
4509 for (uint32_t j = 0; j < num_sbdata_regs; j++) {
4510 if (((test_patterns[j]+i)&compare_mask) != (rd_buf[j]&compare_mask)) {
4511 LOG_ERROR("System Bus Access Test 1: Error reading non-autoincremented address %x,"
4512 "expected val = %x, read val = %x", addr, test_patterns[j]+i, rd_buf[j]);
4513 test_passed = false;
4514 tests_failed++;
4515 }
4516 }
4517 }
4518 }
4519 if (test_passed)
4520 LOG_INFO("System Bus Access Test 1: Simple write/read test PASSED.");
4521
4522 /* Test 2: Address autoincrement test */
4523 target_addr_t curr_addr;
4524 target_addr_t prev_addr;
4525 test_passed = true;
4526 sbcs = set_field(sbcs_orig, DM_SBCS_SBAUTOINCREMENT, 1);
4527 dmi_write(target, DM_SBCS, sbcs);
4528
4529 for (uint32_t sbaccess = 0; sbaccess <= (uint32_t)max_sbaccess; sbaccess++) {
4530 sbcs = set_field(sbcs, DM_SBCS_SBACCESS, sbaccess);
4531 dmi_write(target, DM_SBCS, sbcs);
4532
4533 dmi_write(target, DM_SBADDRESS0, legal_address);
4534 read_sbcs_nonbusy(target, &sbcs);
4535 curr_addr = legal_address;
4536 for (uint32_t i = 0; i < num_words; i++) {
4537 prev_addr = curr_addr;
4538 read_sbcs_nonbusy(target, &sbcs);
4539 curr_addr = sb_read_address(target);
4540 if ((curr_addr - prev_addr != (uint32_t)(1 << sbaccess)) && (i != 0)) {
4541 LOG_ERROR("System Bus Access Test 2: Error with address auto-increment, sbaccess = %x.", sbaccess);
4542 test_passed = false;
4543 tests_failed++;
4544 }
4545 dmi_write(target, DM_SBDATA0, i);
4546 }
4547
4548 read_sbcs_nonbusy(target, &sbcs);
4549
4550 dmi_write(target, DM_SBADDRESS0, legal_address);
4551
4552 uint32_t val;
4553 sbcs = set_field(sbcs, DM_SBCS_SBREADONDATA, 1);
4554 dmi_write(target, DM_SBCS, sbcs);
4555 dmi_read(target, &val, DM_SBDATA0); /* Dummy read to trigger first system bus read */
4556 curr_addr = legal_address;
4557 for (uint32_t i = 0; i < num_words; i++) {
4558 prev_addr = curr_addr;
4559 read_sbcs_nonbusy(target, &sbcs);
4560 curr_addr = sb_read_address(target);
4561 if ((curr_addr - prev_addr != (uint32_t)(1 << sbaccess)) && (i != 0)) {
4562 LOG_ERROR("System Bus Access Test 2: Error with address auto-increment, sbaccess = %x", sbaccess);
4563 test_passed = false;
4564 tests_failed++;
4565 }
4566 dmi_read(target, &val, DM_SBDATA0);
4567 read_sbcs_nonbusy(target, &sbcs);
4568 if (i != val) {
4569 LOG_ERROR("System Bus Access Test 2: Error reading auto-incremented address,"
4570 "expected val = %x, read val = %x.", i, val);
4571 test_passed = false;
4572 tests_failed++;
4573 }
4574 }
4575 }
4576 if (test_passed)
4577 LOG_INFO("System Bus Access Test 2: Address auto-increment test PASSED.");
4578
4579 /* Test 3: Read from illegal address */
4580 read_memory_sba_simple(target, illegal_address, rd_buf, 1, sbcs_orig);
4581
4582 dmi_read(target, &rd_val, DM_SBCS);
4583 if (get_field(rd_val, DM_SBCS_SBERROR) == 2) {
4584 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 2);
4585 dmi_write(target, DM_SBCS, sbcs);
4586 dmi_read(target, &rd_val, DM_SBCS);
4587 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4588 LOG_INFO("System Bus Access Test 3: Illegal address read test PASSED.");
4589 else
4590 LOG_ERROR("System Bus Access Test 3: Illegal address read test FAILED, unable to clear to 0.");
4591 } else {
4592 LOG_ERROR("System Bus Access Test 3: Illegal address read test FAILED, unable to set error code.");
4593 }
4594
4595 /* Test 4: Write to illegal address */
4596 write_memory_sba_simple(target, illegal_address, test_patterns, 1, sbcs_orig);
4597
4598 dmi_read(target, &rd_val, DM_SBCS);
4599 if (get_field(rd_val, DM_SBCS_SBERROR) == 2) {
4600 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 2);
4601 dmi_write(target, DM_SBCS, sbcs);
4602 dmi_read(target, &rd_val, DM_SBCS);
4603 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4604 LOG_INFO("System Bus Access Test 4: Illegal address write test PASSED.");
4605 else {
4606 LOG_ERROR("System Bus Access Test 4: Illegal address write test FAILED, unable to clear to 0.");
4607 tests_failed++;
4608 }
4609 } else {
4610 LOG_ERROR("System Bus Access Test 4: Illegal address write test FAILED, unable to set error code.");
4611 tests_failed++;
4612 }
4613
4614 /* Test 5: Write with unsupported sbaccess size */
4615 uint32_t sbaccess128 = get_field(sbcs_orig, DM_SBCS_SBACCESS128);
4616
4617 if (sbaccess128) {
4618 LOG_INFO("System Bus Access Test 5: SBCS sbaccess error test PASSED, all sbaccess sizes supported.");
4619 } else {
4620 sbcs = set_field(sbcs_orig, DM_SBCS_SBACCESS, 4);
4621
4622 write_memory_sba_simple(target, legal_address, test_patterns, 1, sbcs);
4623
4624 dmi_read(target, &rd_val, DM_SBCS);
4625 if (get_field(rd_val, DM_SBCS_SBERROR) == 4) {
4626 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 4);
4627 dmi_write(target, DM_SBCS, sbcs);
4628 dmi_read(target, &rd_val, DM_SBCS);
4629 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4630 LOG_INFO("System Bus Access Test 5: SBCS sbaccess error test PASSED.");
4631 else {
4632 LOG_ERROR("System Bus Access Test 5: SBCS sbaccess error test FAILED, unable to clear to 0.");
4633 tests_failed++;
4634 }
4635 } else {
4636 LOG_ERROR("System Bus Access Test 5: SBCS sbaccess error test FAILED, unable to set error code.");
4637 tests_failed++;
4638 }
4639 }
4640
4641 /* Test 6: Write to misaligned address */
4642 sbcs = set_field(sbcs_orig, DM_SBCS_SBACCESS, 1);
4643
4644 write_memory_sba_simple(target, legal_address+1, test_patterns, 1, sbcs);
4645
4646 dmi_read(target, &rd_val, DM_SBCS);
4647 if (get_field(rd_val, DM_SBCS_SBERROR) == 3) {
4648 sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 3);
4649 dmi_write(target, DM_SBCS, sbcs);
4650 dmi_read(target, &rd_val, DM_SBCS);
4651 if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
4652 LOG_INFO("System Bus Access Test 6: SBCS address alignment error test PASSED");
4653 else {
4654 LOG_ERROR("System Bus Access Test 6: SBCS address alignment error test FAILED, unable to clear to 0.");
4655 tests_failed++;
4656 }
4657 } else {
4658 LOG_ERROR("System Bus Access Test 6: SBCS address alignment error test FAILED, unable to set error code.");
4659 tests_failed++;
4660 }
4661
4662 /* Test 7: Set sbbusyerror, only run this case in simulation as it is likely
4663 * impossible to hit otherwise */
4664 if (run_sbbusyerror_test) {
4665 sbcs = set_field(sbcs_orig, DM_SBCS_SBREADONADDR, 1);
4666 dmi_write(target, DM_SBCS, sbcs);
4667
4668 for (int i = 0; i < 16; i++)
4669 dmi_write(target, DM_SBDATA0, 0xdeadbeef);
4670
4671 for (int i = 0; i < 16; i++)
4672 dmi_write(target, DM_SBADDRESS0, legal_address);
4673
4674 dmi_read(target, &rd_val, DM_SBCS);
4675 if (get_field(rd_val, DM_SBCS_SBBUSYERROR)) {
4676 sbcs = set_field(sbcs_orig, DM_SBCS_SBBUSYERROR, 1);
4677 dmi_write(target, DM_SBCS, sbcs);
4678 dmi_read(target, &rd_val, DM_SBCS);
4679 if (get_field(rd_val, DM_SBCS_SBBUSYERROR) == 0)
4680 LOG_INFO("System Bus Access Test 7: SBCS sbbusyerror test PASSED.");
4681 else {
4682 LOG_ERROR("System Bus Access Test 7: SBCS sbbusyerror test FAILED, unable to clear to 0.");
4683 tests_failed++;
4684 }
4685 } else {
4686 LOG_ERROR("System Bus Access Test 7: SBCS sbbusyerror test FAILED, unable to set error code.");
4687 tests_failed++;
4688 }
4689 }
4690
4691 if (tests_failed == 0) {
4692 LOG_INFO("ALL TESTS PASSED");
4693 return ERROR_OK;
4694 } else {
4695 LOG_ERROR("%d TESTS FAILED", tests_failed);
4696 return ERROR_FAIL;
4697 }
4698
4699 }
4700
4701 void write_memory_sba_simple(struct target *target, target_addr_t addr,
4702 uint32_t *write_data, uint32_t write_size, uint32_t sbcs)
4703 {
4704 RISCV013_INFO(info);
4705
4706 uint32_t rd_sbcs;
4707 uint32_t masked_addr;
4708
4709 uint32_t sba_size = get_field(info->sbcs, DM_SBCS_SBASIZE);
4710
4711 read_sbcs_nonbusy(target, &rd_sbcs);
4712
4713 uint32_t sbcs_no_readonaddr = set_field(sbcs, DM_SBCS_SBREADONADDR, 0);
4714 dmi_write(target, DM_SBCS, sbcs_no_readonaddr);
4715
4716 for (uint32_t i = 0; i < sba_size/32; i++) {
4717 masked_addr = (addr >> 32*i) & 0xffffffff;
4718
4719 if (i != 3)
4720 dmi_write(target, DM_SBADDRESS0+i, masked_addr);
4721 else
4722 dmi_write(target, DM_SBADDRESS3, masked_addr);
4723 }
4724
4725 /* Write SBDATA registers starting with highest address, since write to
4726 * SBDATA0 triggers write */
4727 for (int i = write_size-1; i >= 0; i--)
4728 dmi_write(target, DM_SBDATA0+i, write_data[i]);
4729 }
4730
4731 void read_memory_sba_simple(struct target *target, target_addr_t addr,
4732 uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs)
4733 {
4734 RISCV013_INFO(info);
4735
4736 uint32_t rd_sbcs;
4737 uint32_t masked_addr;
4738
4739 uint32_t sba_size = get_field(info->sbcs, DM_SBCS_SBASIZE);
4740
4741 read_sbcs_nonbusy(target, &rd_sbcs);
4742
4743 uint32_t sbcs_readonaddr = set_field(sbcs, DM_SBCS_SBREADONADDR, 1);
4744 dmi_write(target, DM_SBCS, sbcs_readonaddr);
4745
4746 /* Write addresses starting with highest address register */
4747 for (int i = sba_size/32-1; i >= 0; i--) {
4748 masked_addr = (addr >> 32*i) & 0xffffffff;
4749
4750 if (i != 3)
4751 dmi_write(target, DM_SBADDRESS0+i, masked_addr);
4752 else
4753 dmi_write(target, DM_SBADDRESS3, masked_addr);
4754 }
4755
4756 read_sbcs_nonbusy(target, &rd_sbcs);
4757
4758 for (uint32_t i = 0; i < read_size; i++)
4759 dmi_read(target, &(rd_buf[i]), DM_SBDATA0+i);
4760 }
4761
4762 int riscv013_dmi_write_u64_bits(struct target *target)
4763 {
4764 RISCV013_INFO(info);
4765 return info->abits + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH;
4766 }
4767
4768 static int maybe_execute_fence_i(struct target *target)
4769 {
4770 if (has_sufficient_progbuf(target, 3))
4771 return execute_fence(target);
4772 return ERROR_OK;
4773 }
4774
4775 /* Helper Functions. */
4776 static int riscv013_on_step_or_resume(struct target *target, bool step)
4777 {
4778 if (maybe_execute_fence_i(target) != ERROR_OK)
4779 return ERROR_FAIL;
4780
4781 /* We want to twiddle some bits in the debug CSR so debugging works. */
4782 riscv_reg_t dcsr;
4783 int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
4784 if (result != ERROR_OK)
4785 return result;
4786 dcsr = set_field(dcsr, CSR_DCSR_STEP, step);
4787 dcsr = set_field(dcsr, CSR_DCSR_EBREAKM, riscv_ebreakm);
4788 dcsr = set_field(dcsr, CSR_DCSR_EBREAKS, riscv_ebreaks);
4789 dcsr = set_field(dcsr, CSR_DCSR_EBREAKU, riscv_ebreaku);
4790 return riscv_set_register(target, GDB_REGNO_DCSR, dcsr);
4791 }
4792
4793 static int riscv013_step_or_resume_current_hart(struct target *target,
4794 bool step, bool use_hasel)
4795 {
4796 RISCV_INFO(r);
4797 LOG_DEBUG("resuming hart %d (for step?=%d)", r->current_hartid, step);
4798 if (!riscv_is_halted(target)) {
4799 LOG_ERROR("Hart %d is not halted!", r->current_hartid);
4800 return ERROR_FAIL;
4801 }
4802
4803 /* Issue the resume command, and then wait for the current hart to resume. */
4804 uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_RESUMEREQ;
4805 if (use_hasel)
4806 dmcontrol |= DM_DMCONTROL_HASEL;
4807 dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
4808 dmi_write(target, DM_DMCONTROL, dmcontrol);
4809
4810 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HASEL, 0);
4811 dmcontrol = set_field(dmcontrol, DM_DMCONTROL_RESUMEREQ, 0);
4812
4813 uint32_t dmstatus;
4814 for (size_t i = 0; i < 256; ++i) {
4815 usleep(10);
4816 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4817 return ERROR_FAIL;
4818 if (get_field(dmstatus, DM_DMSTATUS_ALLRESUMEACK) == 0)
4819 continue;
4820 if (step && get_field(dmstatus, DM_DMSTATUS_ALLHALTED) == 0)
4821 continue;
4822
4823 dmi_write(target, DM_DMCONTROL, dmcontrol);
4824 return ERROR_OK;
4825 }
4826
4827 dmi_write(target, DM_DMCONTROL, dmcontrol);
4828
4829 LOG_ERROR("unable to resume hart %d", r->current_hartid);
4830 if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
4831 return ERROR_FAIL;
4832 LOG_ERROR(" dmstatus =0x%08x", dmstatus);
4833
4834 if (step) {
4835 LOG_ERROR(" was stepping, halting");
4836 riscv_halt(target);
4837 return ERROR_OK;
4838 }
4839
4840 return ERROR_FAIL;
4841 }
4842
4843 void riscv013_clear_abstract_error(struct target *target)
4844 {
4845 /* Wait for busy to go away. */
4846 time_t start = time(NULL);
4847 uint32_t abstractcs;
4848 dmi_read(target, &abstractcs, DM_ABSTRACTCS);
4849 while (get_field(abstractcs, DM_ABSTRACTCS_BUSY)) {
4850 dmi_read(target, &abstractcs, DM_ABSTRACTCS);
4851
4852 if (time(NULL) - start > riscv_command_timeout_sec) {
4853 LOG_ERROR("abstractcs.busy is not going low after %d seconds "
4854 "(abstractcs=0x%x). The target is either really slow or "
4855 "broken. You could increase the timeout with riscv "
4856 "set_command_timeout_sec.",
4857 riscv_command_timeout_sec, abstractcs);
4858 break;
4859 }
4860 }
4861 /* Clear the error status. */
4862 dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
4863 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)