1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
16 #include "target/target.h"
17 #include "target/algorithm.h"
18 #include "target/target_type.h"
19 #include <helper/log.h>
20 #include "jtag/jtag.h"
21 #include "target/register.h"
22 #include "target/breakpoints.h"
23 #include "helper/time_support.h"
24 #include "helper/list.h"
26 #include "debug_defines.h"
27 #include "rtos/rtos.h"
32 static int riscv013_on_step_or_resume(struct target
*target
, bool step
);
33 static int riscv013_step_or_resume_current_hart(struct target
*target
,
34 bool step
, bool use_hasel
);
35 static void riscv013_clear_abstract_error(struct target
*target
);
37 /* Implementations of the functions in struct riscv_info. */
38 static int riscv013_get_register(struct target
*target
,
39 riscv_reg_t
*value
, int rid
);
40 static int riscv013_set_register(struct target
*target
, int regid
, uint64_t value
);
41 static int riscv013_select_current_hart(struct target
*target
);
42 static int riscv013_halt_prep(struct target
*target
);
43 static int riscv013_halt_go(struct target
*target
);
44 static int riscv013_resume_go(struct target
*target
);
45 static int riscv013_step_current_hart(struct target
*target
);
46 static int riscv013_on_halt(struct target
*target
);
47 static int riscv013_on_step(struct target
*target
);
48 static int riscv013_resume_prep(struct target
*target
);
49 static bool riscv013_is_halted(struct target
*target
);
50 static enum riscv_halt_reason
riscv013_halt_reason(struct target
*target
);
51 static int riscv013_write_debug_buffer(struct target
*target
, unsigned index
,
53 static riscv_insn_t
riscv013_read_debug_buffer(struct target
*target
, unsigned
55 static int riscv013_execute_debug_buffer(struct target
*target
);
56 static void riscv013_fill_dmi_write_u64(struct target
*target
, char *buf
, int a
, uint64_t d
);
57 static void riscv013_fill_dmi_read_u64(struct target
*target
, char *buf
, int a
);
58 static int riscv013_dmi_write_u64_bits(struct target
*target
);
59 static void riscv013_fill_dmi_nop_u64(struct target
*target
, char *buf
);
60 static int register_read(struct target
*target
, uint64_t *value
, uint32_t number
);
61 static int register_read_direct(struct target
*target
, uint64_t *value
, uint32_t number
);
62 static int register_write_direct(struct target
*target
, unsigned number
,
64 static int read_memory(struct target
*target
, target_addr_t address
,
65 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
);
66 static int write_memory(struct target
*target
, target_addr_t address
,
67 uint32_t size
, uint32_t count
, const uint8_t *buffer
);
70 * Since almost everything can be accomplish by scanning the dbus register, all
71 * functions here assume dbus is already selected. The exception are functions
72 * called directly by OpenOCD, which can't assume anything about what's
73 * currently in IR. They should set IR to dbus explicitly.
76 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
77 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
79 #define CSR_DCSR_CAUSE_SWBP 1
80 #define CSR_DCSR_CAUSE_TRIGGER 2
81 #define CSR_DCSR_CAUSE_DEBUGINT 3
82 #define CSR_DCSR_CAUSE_STEP 4
83 #define CSR_DCSR_CAUSE_HALT 5
84 #define CSR_DCSR_CAUSE_GROUP 6
86 #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
88 /*** JTAG registers. ***/
96 DMI_STATUS_SUCCESS
= 0,
97 DMI_STATUS_FAILED
= 2,
107 /*** Debug Bus registers. ***/
109 #define CMDERR_NONE 0
110 #define CMDERR_BUSY 1
111 #define CMDERR_NOT_SUPPORTED 2
112 #define CMDERR_EXCEPTION 3
113 #define CMDERR_HALT_RESUME 4
114 #define CMDERR_OTHER 7
116 /*** Info about the core being debugged. ***/
123 bool read
, write
, execute
;
134 struct list_head list
;
135 int abs_chain_position
;
137 /* The number of harts connected to this DM. */
139 /* Indicates we already reset this DM, so don't need to do it again. */
141 /* Targets that are connected to this DM. */
142 struct list_head target_list
;
143 /* The currently selected hartid on this DM. */
145 bool hasel_supported
;
147 /* The program buffer stores executable code. 0 is an illegal instruction,
148 * so we use 0 to mean the cached value is invalid. */
149 uint32_t progbuf_cache
[16];
153 struct list_head list
;
154 struct target
*target
;
158 /* The indexed used to address this hart in its DM. */
160 /* Number of address bits in the dbus register. */
162 /* Number of abstract command data registers. */
164 /* Number of words in the Program Buffer. */
165 unsigned progbufsize
;
167 /* We cache the read-only bits of sbcs here. */
170 yes_no_maybe_t progbuf_writable
;
171 /* We only need the address so that we know the alignment of the buffer. */
172 riscv_addr_t progbuf_address
;
174 /* Number of run-test/idle cycles the target requests we do after each dbus
176 unsigned int dtmcs_idle
;
178 /* This value is incremented every time a dbus access comes back as "busy".
179 * It's used to determine how many run-test/idle cycles to feed the target
180 * in between accesses. */
181 unsigned int dmi_busy_delay
;
183 /* Number of run-test/idle cycles to add between consecutive bus master
184 * reads/writes respectively. */
185 unsigned int bus_master_write_delay
, bus_master_read_delay
;
187 /* This value is increased every time we tried to execute two commands
188 * consecutively, and the second one failed because the previous hadn't
189 * completed yet. It's used to add extra run-test/idle cycles after
190 * starting a command, so we don't have to waste time checking for busy to
192 unsigned int ac_busy_delay
;
194 bool abstract_read_csr_supported
;
195 bool abstract_write_csr_supported
;
196 bool abstract_read_fpr_supported
;
197 bool abstract_write_fpr_supported
;
199 yes_no_maybe_t has_aampostincrement
;
201 /* When a function returns some error due to a failure indicated by the
202 * target in cmderr, the caller can look here to see what that error was.
203 * (Compare with errno.) */
206 /* Some fields from hartinfo. */
211 /* The width of the hartsel field. */
214 /* DM that provides access to this target. */
218 static LIST_HEAD(dm_list
);
220 static riscv013_info_t
*get_info(const struct target
*target
)
222 struct riscv_info
*info
= target
->arch_info
;
224 assert(info
->version_specific
);
225 return info
->version_specific
;
229 * Return the DM structure for this target. If there isn't one, find it in the
230 * global list of DMs. If it's not in there, then create one and initialize it
233 static dm013_info_t
*get_dm(struct target
*target
)
239 int abs_chain_position
= target
->tap
->abs_chain_position
;
242 dm013_info_t
*dm
= NULL
;
243 list_for_each_entry(entry
, &dm_list
, list
) {
244 if (entry
->abs_chain_position
== abs_chain_position
) {
251 LOG_DEBUG("[%d] Allocating new DM", target
->coreid
);
252 dm
= calloc(1, sizeof(dm013_info_t
));
255 dm
->abs_chain_position
= abs_chain_position
;
256 dm
->current_hartid
= -1;
258 INIT_LIST_HEAD(&dm
->target_list
);
259 list_add(&dm
->list
, &dm_list
);
263 target_list_t
*target_entry
;
264 list_for_each_entry(target_entry
, &dm
->target_list
, list
) {
265 if (target_entry
->target
== target
)
268 target_entry
= calloc(1, sizeof(*target_entry
));
273 target_entry
->target
= target
;
274 list_add(&target_entry
->list
, &dm
->target_list
);
279 static uint32_t set_hartsel(uint32_t initial
, uint32_t index
)
281 initial
&= ~DM_DMCONTROL_HARTSELLO
;
282 initial
&= ~DM_DMCONTROL_HARTSELHI
;
284 uint32_t index_lo
= index
& ((1 << DM_DMCONTROL_HARTSELLO_LENGTH
) - 1);
285 initial
|= index_lo
<< DM_DMCONTROL_HARTSELLO_OFFSET
;
286 uint32_t index_hi
= index
>> DM_DMCONTROL_HARTSELLO_LENGTH
;
287 assert(index_hi
< 1 << DM_DMCONTROL_HARTSELHI_LENGTH
);
288 initial
|= index_hi
<< DM_DMCONTROL_HARTSELHI_OFFSET
;
293 static void decode_dmi(char *text
, unsigned address
, unsigned data
)
295 static const struct {
300 { DM_DMCONTROL
, DM_DMCONTROL_HALTREQ
, "haltreq" },
301 { DM_DMCONTROL
, DM_DMCONTROL_RESUMEREQ
, "resumereq" },
302 { DM_DMCONTROL
, DM_DMCONTROL_HARTRESET
, "hartreset" },
303 { DM_DMCONTROL
, DM_DMCONTROL_HASEL
, "hasel" },
304 { DM_DMCONTROL
, DM_DMCONTROL_HARTSELHI
, "hartselhi" },
305 { DM_DMCONTROL
, DM_DMCONTROL_HARTSELLO
, "hartsello" },
306 { DM_DMCONTROL
, DM_DMCONTROL_NDMRESET
, "ndmreset" },
307 { DM_DMCONTROL
, DM_DMCONTROL_DMACTIVE
, "dmactive" },
308 { DM_DMCONTROL
, DM_DMCONTROL_ACKHAVERESET
, "ackhavereset" },
310 { DM_DMSTATUS
, DM_DMSTATUS_IMPEBREAK
, "impebreak" },
311 { DM_DMSTATUS
, DM_DMSTATUS_ALLHAVERESET
, "allhavereset" },
312 { DM_DMSTATUS
, DM_DMSTATUS_ANYHAVERESET
, "anyhavereset" },
313 { DM_DMSTATUS
, DM_DMSTATUS_ALLRESUMEACK
, "allresumeack" },
314 { DM_DMSTATUS
, DM_DMSTATUS_ANYRESUMEACK
, "anyresumeack" },
315 { DM_DMSTATUS
, DM_DMSTATUS_ALLNONEXISTENT
, "allnonexistent" },
316 { DM_DMSTATUS
, DM_DMSTATUS_ANYNONEXISTENT
, "anynonexistent" },
317 { DM_DMSTATUS
, DM_DMSTATUS_ALLUNAVAIL
, "allunavail" },
318 { DM_DMSTATUS
, DM_DMSTATUS_ANYUNAVAIL
, "anyunavail" },
319 { DM_DMSTATUS
, DM_DMSTATUS_ALLRUNNING
, "allrunning" },
320 { DM_DMSTATUS
, DM_DMSTATUS_ANYRUNNING
, "anyrunning" },
321 { DM_DMSTATUS
, DM_DMSTATUS_ALLHALTED
, "allhalted" },
322 { DM_DMSTATUS
, DM_DMSTATUS_ANYHALTED
, "anyhalted" },
323 { DM_DMSTATUS
, DM_DMSTATUS_AUTHENTICATED
, "authenticated" },
324 { DM_DMSTATUS
, DM_DMSTATUS_AUTHBUSY
, "authbusy" },
325 { DM_DMSTATUS
, DM_DMSTATUS_HASRESETHALTREQ
, "hasresethaltreq" },
326 { DM_DMSTATUS
, DM_DMSTATUS_CONFSTRPTRVALID
, "confstrptrvalid" },
327 { DM_DMSTATUS
, DM_DMSTATUS_VERSION
, "version" },
329 { DM_ABSTRACTCS
, DM_ABSTRACTCS_PROGBUFSIZE
, "progbufsize" },
330 { DM_ABSTRACTCS
, DM_ABSTRACTCS_BUSY
, "busy" },
331 { DM_ABSTRACTCS
, DM_ABSTRACTCS_CMDERR
, "cmderr" },
332 { DM_ABSTRACTCS
, DM_ABSTRACTCS_DATACOUNT
, "datacount" },
334 { DM_COMMAND
, DM_COMMAND_CMDTYPE
, "cmdtype" },
336 { DM_SBCS
, DM_SBCS_SBVERSION
, "sbversion" },
337 { DM_SBCS
, DM_SBCS_SBBUSYERROR
, "sbbusyerror" },
338 { DM_SBCS
, DM_SBCS_SBBUSY
, "sbbusy" },
339 { DM_SBCS
, DM_SBCS_SBREADONADDR
, "sbreadonaddr" },
340 { DM_SBCS
, DM_SBCS_SBACCESS
, "sbaccess" },
341 { DM_SBCS
, DM_SBCS_SBAUTOINCREMENT
, "sbautoincrement" },
342 { DM_SBCS
, DM_SBCS_SBREADONDATA
, "sbreadondata" },
343 { DM_SBCS
, DM_SBCS_SBERROR
, "sberror" },
344 { DM_SBCS
, DM_SBCS_SBASIZE
, "sbasize" },
345 { DM_SBCS
, DM_SBCS_SBACCESS128
, "sbaccess128" },
346 { DM_SBCS
, DM_SBCS_SBACCESS64
, "sbaccess64" },
347 { DM_SBCS
, DM_SBCS_SBACCESS32
, "sbaccess32" },
348 { DM_SBCS
, DM_SBCS_SBACCESS16
, "sbaccess16" },
349 { DM_SBCS
, DM_SBCS_SBACCESS8
, "sbaccess8" },
353 for (unsigned i
= 0; i
< ARRAY_SIZE(description
); i
++) {
354 if (description
[i
].address
== address
) {
355 uint64_t mask
= description
[i
].mask
;
356 unsigned value
= get_field(data
, mask
);
360 if (mask
& (mask
>> 1)) {
361 /* If the field is more than 1 bit wide. */
362 sprintf(text
, "%s=%d", description
[i
].name
, value
);
364 strcpy(text
, description
[i
].name
);
366 text
+= strlen(text
);
372 static void dump_field(int idle
, const struct scan_field
*field
)
374 static const char * const op_string
[] = {"-", "r", "w", "?"};
375 static const char * const status_string
[] = {"+", "?", "F", "b"};
377 if (debug_level
< LOG_LVL_DEBUG
)
380 uint64_t out
= buf_get_u64(field
->out_value
, 0, field
->num_bits
);
381 unsigned int out_op
= get_field(out
, DTM_DMI_OP
);
382 unsigned int out_data
= get_field(out
, DTM_DMI_DATA
);
383 unsigned int out_address
= out
>> DTM_DMI_ADDRESS_OFFSET
;
385 uint64_t in
= buf_get_u64(field
->in_value
, 0, field
->num_bits
);
386 unsigned int in_op
= get_field(in
, DTM_DMI_OP
);
387 unsigned int in_data
= get_field(in
, DTM_DMI_DATA
);
388 unsigned int in_address
= in
>> DTM_DMI_ADDRESS_OFFSET
;
390 log_printf_lf(LOG_LVL_DEBUG
,
391 __FILE__
, __LINE__
, "scan",
392 "%db %s %08x @%02x -> %s %08x @%02x; %di",
393 field
->num_bits
, op_string
[out_op
], out_data
, out_address
,
394 status_string
[in_op
], in_data
, in_address
, idle
);
398 decode_dmi(out_text
, out_address
, out_data
);
399 decode_dmi(in_text
, in_address
, in_data
);
400 if (in_text
[0] || out_text
[0]) {
401 log_printf_lf(LOG_LVL_DEBUG
, __FILE__
, __LINE__
, "scan", "%s -> %s",
406 /*** Utility functions. ***/
408 static void select_dmi(struct target
*target
)
410 if (bscan_tunnel_ir_width
!= 0) {
411 select_dmi_via_bscan(target
);
414 jtag_add_ir_scan(target
->tap
, &select_dbus
, TAP_IDLE
);
417 static uint32_t dtmcontrol_scan(struct target
*target
, uint32_t out
)
419 struct scan_field field
;
421 uint8_t out_value
[4] = { 0 };
423 if (bscan_tunnel_ir_width
!= 0)
424 return dtmcontrol_scan_via_bscan(target
, out
);
426 buf_set_u32(out_value
, 0, 32, out
);
428 jtag_add_ir_scan(target
->tap
, &select_dtmcontrol
, TAP_IDLE
);
431 field
.out_value
= out_value
;
432 field
.in_value
= in_value
;
433 jtag_add_dr_scan(target
->tap
, 1, &field
, TAP_IDLE
);
435 /* Always return to dmi. */
438 int retval
= jtag_execute_queue();
439 if (retval
!= ERROR_OK
) {
440 LOG_ERROR("failed jtag scan: %d", retval
);
444 uint32_t in
= buf_get_u32(field
.in_value
, 0, 32);
445 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out
, in
);
450 static void increase_dmi_busy_delay(struct target
*target
)
452 riscv013_info_t
*info
= get_info(target
);
453 info
->dmi_busy_delay
+= info
->dmi_busy_delay
/ 10 + 1;
454 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
455 info
->dtmcs_idle
, info
->dmi_busy_delay
,
456 info
->ac_busy_delay
);
458 dtmcontrol_scan(target
, DTM_DTMCS_DMIRESET
);
462 * exec: If this is set, assume the scan results in an execution, so more
463 * run-test/idle cycles may be required.
465 static dmi_status_t
dmi_scan(struct target
*target
, uint32_t *address_in
,
466 uint32_t *data_in
, dmi_op_t op
, uint32_t address_out
, uint32_t data_out
,
469 riscv013_info_t
*info
= get_info(target
);
471 unsigned num_bits
= info
->abits
+ DTM_DMI_OP_LENGTH
+ DTM_DMI_DATA_LENGTH
;
472 size_t num_bytes
= (num_bits
+ 7) / 8;
473 uint8_t in
[num_bytes
];
474 uint8_t out
[num_bytes
];
475 struct scan_field field
= {
476 .num_bits
= num_bits
,
480 riscv_bscan_tunneled_scan_context_t bscan_ctxt
;
482 if (r
->reset_delays_wait
>= 0) {
483 r
->reset_delays_wait
--;
484 if (r
->reset_delays_wait
< 0) {
485 info
->dmi_busy_delay
= 0;
486 info
->ac_busy_delay
= 0;
490 memset(in
, 0, num_bytes
);
491 memset(out
, 0, num_bytes
);
493 assert(info
->abits
!= 0);
495 buf_set_u32(out
, DTM_DMI_OP_OFFSET
, DTM_DMI_OP_LENGTH
, op
);
496 buf_set_u32(out
, DTM_DMI_DATA_OFFSET
, DTM_DMI_DATA_LENGTH
, data_out
);
497 buf_set_u32(out
, DTM_DMI_ADDRESS_OFFSET
, info
->abits
, address_out
);
499 /* I wanted to place this code in a different function, but the way JTAG command
500 queueing works in the jtag handling functions, the scan fields either have to be
501 heap allocated, global/static, or else they need to stay on the stack until
502 the jtag_execute_queue() call. Heap or static fields in this case doesn't seem
503 the best fit. Declaring stack based field values in a subsidiary function call wouldn't
505 if (bscan_tunnel_ir_width
!= 0) {
506 riscv_add_bscan_tunneled_scan(target
, &field
, &bscan_ctxt
);
508 /* Assume dbus is already selected. */
509 jtag_add_dr_scan(target
->tap
, 1, &field
, TAP_IDLE
);
512 int idle_count
= info
->dmi_busy_delay
;
514 idle_count
+= info
->ac_busy_delay
;
517 jtag_add_runtest(idle_count
, TAP_IDLE
);
519 int retval
= jtag_execute_queue();
520 if (retval
!= ERROR_OK
) {
521 LOG_ERROR("dmi_scan failed jtag scan");
524 return DMI_STATUS_FAILED
;
527 if (bscan_tunnel_ir_width
!= 0) {
528 /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
529 buffer_shr(in
, num_bytes
, 1);
533 *data_in
= buf_get_u32(in
, DTM_DMI_DATA_OFFSET
, DTM_DMI_DATA_LENGTH
);
536 *address_in
= buf_get_u32(in
, DTM_DMI_ADDRESS_OFFSET
, info
->abits
);
537 dump_field(idle_count
, &field
);
538 return buf_get_u32(in
, DTM_DMI_OP_OFFSET
, DTM_DMI_OP_LENGTH
);
543 * @param data_in The data we received from the target.
544 * @param dmi_busy_encountered
545 * If non-NULL, will be updated to reflect whether DMI busy was
546 * encountered while executing this operation or not.
547 * @param dmi_op The operation to perform (read/write/nop).
548 * @param address The address argument to that operation.
549 * @param data_out The data to send to the target.
551 * @param exec When true, this scan will execute something, so extra RTI
552 * cycles may be added.
553 * @param ensure_success
554 * Scan a nop after the requested operation, ensuring the
555 * DMI operation succeeded.
557 static int dmi_op_timeout(struct target
*target
, uint32_t *data_in
,
558 bool *dmi_busy_encountered
, int dmi_op
, uint32_t address
,
559 uint32_t data_out
, int timeout_sec
, bool exec
, bool ensure_success
)
566 if (dmi_busy_encountered
)
567 *dmi_busy_encountered
= false;
581 LOG_ERROR("Invalid DMI operation: %d", dmi_op
);
587 time_t start
= time(NULL
);
588 /* This first loop performs the request. Note that if for some reason this
589 * stays busy, it is actually due to the previous access. */
591 status
= dmi_scan(target
, NULL
, NULL
, dmi_op
, address
, data_out
,
593 if (status
== DMI_STATUS_BUSY
) {
594 increase_dmi_busy_delay(target
);
595 if (dmi_busy_encountered
)
596 *dmi_busy_encountered
= true;
597 } else if (status
== DMI_STATUS_SUCCESS
) {
600 LOG_ERROR("failed %s at 0x%x, status=%d", op_name
, address
, status
);
603 if (time(NULL
) - start
> timeout_sec
)
604 return ERROR_TIMEOUT_REACHED
;
607 if (status
!= DMI_STATUS_SUCCESS
) {
608 LOG_ERROR("Failed %s at 0x%x; status=%d", op_name
, address
, status
);
612 if (ensure_success
) {
613 /* This second loop ensures the request succeeded, and gets back data.
614 * Note that NOP can result in a 'busy' result as well, but that would be
615 * noticed on the next DMI access we do. */
617 status
= dmi_scan(target
, &address_in
, data_in
, DMI_OP_NOP
, address
, 0,
619 if (status
== DMI_STATUS_BUSY
) {
620 increase_dmi_busy_delay(target
);
621 if (dmi_busy_encountered
)
622 *dmi_busy_encountered
= true;
623 } else if (status
== DMI_STATUS_SUCCESS
) {
627 LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
628 op_name
, address
, *data_in
, status
);
630 LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name
, address
,
635 if (time(NULL
) - start
> timeout_sec
)
636 return ERROR_TIMEOUT_REACHED
;
643 static int dmi_op(struct target
*target
, uint32_t *data_in
,
644 bool *dmi_busy_encountered
, int dmi_op
, uint32_t address
,
645 uint32_t data_out
, bool exec
, bool ensure_success
)
647 int result
= dmi_op_timeout(target
, data_in
, dmi_busy_encountered
, dmi_op
,
648 address
, data_out
, riscv_command_timeout_sec
, exec
, ensure_success
);
649 if (result
== ERROR_TIMEOUT_REACHED
) {
650 LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
651 "either really slow or broken. You could increase the "
652 "timeout with riscv set_command_timeout_sec.",
653 riscv_command_timeout_sec
);
659 static int dmi_read(struct target
*target
, uint32_t *value
, uint32_t address
)
661 return dmi_op(target
, value
, NULL
, DMI_OP_READ
, address
, 0, false, true);
664 static int dmi_read_exec(struct target
*target
, uint32_t *value
, uint32_t address
)
666 return dmi_op(target
, value
, NULL
, DMI_OP_READ
, address
, 0, true, true);
669 static int dmi_write(struct target
*target
, uint32_t address
, uint32_t value
)
671 return dmi_op(target
, NULL
, NULL
, DMI_OP_WRITE
, address
, value
, false, true);
674 static int dmi_write_exec(struct target
*target
, uint32_t address
,
675 uint32_t value
, bool ensure_success
)
677 return dmi_op(target
, NULL
, NULL
, DMI_OP_WRITE
, address
, value
, true, ensure_success
);
680 static int dmstatus_read_timeout(struct target
*target
, uint32_t *dmstatus
,
681 bool authenticated
, unsigned timeout_sec
)
683 int result
= dmi_op_timeout(target
, dmstatus
, NULL
, DMI_OP_READ
,
684 DM_DMSTATUS
, 0, timeout_sec
, false, true);
685 if (result
!= ERROR_OK
)
687 int dmstatus_version
= get_field(*dmstatus
, DM_DMSTATUS_VERSION
);
688 if (dmstatus_version
!= 2 && dmstatus_version
!= 3) {
689 LOG_ERROR("OpenOCD only supports Debug Module version 2 (0.13) and 3 (1.0), not "
690 "%d (dmstatus=0x%x). This error might be caused by a JTAG "
691 "signal issue. Try reducing the JTAG clock speed.",
692 get_field(*dmstatus
, DM_DMSTATUS_VERSION
), *dmstatus
);
693 } else if (authenticated
&& !get_field(*dmstatus
, DM_DMSTATUS_AUTHENTICATED
)) {
694 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
695 "(dmstatus=0x%x). Use `riscv authdata_read` and "
696 "`riscv authdata_write` commands to authenticate.", *dmstatus
);
702 static int dmstatus_read(struct target
*target
, uint32_t *dmstatus
,
705 return dmstatus_read_timeout(target
, dmstatus
, authenticated
,
706 riscv_command_timeout_sec
);
709 static void increase_ac_busy_delay(struct target
*target
)
711 riscv013_info_t
*info
= get_info(target
);
712 info
->ac_busy_delay
+= info
->ac_busy_delay
/ 10 + 1;
713 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
714 info
->dtmcs_idle
, info
->dmi_busy_delay
,
715 info
->ac_busy_delay
);
718 static uint32_t __attribute__((unused
)) abstract_register_size(unsigned width
)
722 return set_field(0, AC_ACCESS_REGISTER_AARSIZE
, 2);
724 return set_field(0, AC_ACCESS_REGISTER_AARSIZE
, 3);
726 return set_field(0, AC_ACCESS_REGISTER_AARSIZE
, 4);
728 LOG_ERROR("Unsupported register width: %d", width
);
733 static int wait_for_idle(struct target
*target
, uint32_t *abstractcs
)
736 time_t start
= time(NULL
);
738 if (dmi_read(target
, abstractcs
, DM_ABSTRACTCS
) != ERROR_OK
)
741 if (get_field(*abstractcs
, DM_ABSTRACTCS_BUSY
) == 0)
744 if (time(NULL
) - start
> riscv_command_timeout_sec
) {
745 info
->cmderr
= get_field(*abstractcs
, DM_ABSTRACTCS_CMDERR
);
746 if (info
->cmderr
!= CMDERR_NONE
) {
747 const char *errors
[8] = {
757 LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
758 errors
[info
->cmderr
], *abstractcs
);
761 LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
762 "Increase the timeout with riscv set_command_timeout_sec.",
763 riscv_command_timeout_sec
,
770 static int execute_abstract_command(struct target
*target
, uint32_t command
)
773 if (debug_level
>= LOG_LVL_DEBUG
) {
774 switch (get_field(command
, DM_COMMAND_CMDTYPE
)) {
776 LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
777 "transfer=%d, write=%d, regno=0x%x",
779 8 << get_field(command
, AC_ACCESS_REGISTER_AARSIZE
),
780 get_field(command
, AC_ACCESS_REGISTER_POSTEXEC
),
781 get_field(command
, AC_ACCESS_REGISTER_TRANSFER
),
782 get_field(command
, AC_ACCESS_REGISTER_WRITE
),
783 get_field(command
, AC_ACCESS_REGISTER_REGNO
));
786 LOG_DEBUG("command=0x%x", command
);
791 if (dmi_write_exec(target
, DM_COMMAND
, command
, false) != ERROR_OK
)
794 uint32_t abstractcs
= 0;
795 int result
= wait_for_idle(target
, &abstractcs
);
797 info
->cmderr
= get_field(abstractcs
, DM_ABSTRACTCS_CMDERR
);
798 if (info
->cmderr
!= 0 || result
!= ERROR_OK
) {
799 LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command
, abstractcs
);
800 /* Clear the error. */
801 dmi_write(target
, DM_ABSTRACTCS
, DM_ABSTRACTCS_CMDERR
);
808 static riscv_reg_t
read_abstract_arg(struct target
*target
, unsigned index
,
811 riscv_reg_t value
= 0;
813 unsigned offset
= index
* size_bits
/ 32;
816 LOG_ERROR("Unsupported size: %d bits", size_bits
);
819 dmi_read(target
, &v
, DM_DATA0
+ offset
+ 1);
820 value
|= ((uint64_t) v
) << 32;
823 dmi_read(target
, &v
, DM_DATA0
+ offset
);
829 static int write_abstract_arg(struct target
*target
, unsigned index
,
830 riscv_reg_t value
, unsigned size_bits
)
832 unsigned offset
= index
* size_bits
/ 32;
835 LOG_ERROR("Unsupported size: %d bits", size_bits
);
838 dmi_write(target
, DM_DATA0
+ offset
+ 1, value
>> 32);
841 dmi_write(target
, DM_DATA0
+ offset
, value
);
849 static uint32_t access_register_command(struct target
*target
, uint32_t number
,
850 unsigned size
, uint32_t flags
)
852 uint32_t command
= set_field(0, DM_COMMAND_CMDTYPE
, 0);
855 command
= set_field(command
, AC_ACCESS_REGISTER_AARSIZE
, 2);
858 command
= set_field(command
, AC_ACCESS_REGISTER_AARSIZE
, 3);
861 LOG_ERROR("%d-bit register %s not supported.", size
,
862 gdb_regno_name(number
));
866 if (number
<= GDB_REGNO_XPR31
) {
867 command
= set_field(command
, AC_ACCESS_REGISTER_REGNO
,
868 0x1000 + number
- GDB_REGNO_ZERO
);
869 } else if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
) {
870 command
= set_field(command
, AC_ACCESS_REGISTER_REGNO
,
871 0x1020 + number
- GDB_REGNO_FPR0
);
872 } else if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
) {
873 command
= set_field(command
, AC_ACCESS_REGISTER_REGNO
,
874 number
- GDB_REGNO_CSR0
);
875 } else if (number
>= GDB_REGNO_COUNT
) {
876 /* Custom register. */
877 assert(target
->reg_cache
->reg_list
[number
].arch_info
);
878 riscv_reg_info_t
*reg_info
= target
->reg_cache
->reg_list
[number
].arch_info
;
880 command
= set_field(command
, AC_ACCESS_REGISTER_REGNO
,
881 0xc000 + reg_info
->custom_number
);
891 static int register_read_abstract(struct target
*target
, uint64_t *value
,
892 uint32_t number
, unsigned size
)
896 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
&&
897 !info
->abstract_read_fpr_supported
)
899 if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
&&
900 !info
->abstract_read_csr_supported
)
902 /* The spec doesn't define abstract register numbers for vector registers. */
903 if (number
>= GDB_REGNO_V0
&& number
<= GDB_REGNO_V31
)
906 uint32_t command
= access_register_command(target
, number
, size
,
907 AC_ACCESS_REGISTER_TRANSFER
);
909 int result
= execute_abstract_command(target
, command
);
910 if (result
!= ERROR_OK
) {
911 if (info
->cmderr
== CMDERR_NOT_SUPPORTED
) {
912 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
) {
913 info
->abstract_read_fpr_supported
= false;
914 LOG_INFO("Disabling abstract command reads from FPRs.");
915 } else if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
) {
916 info
->abstract_read_csr_supported
= false;
917 LOG_INFO("Disabling abstract command reads from CSRs.");
924 *value
= read_abstract_arg(target
, 0, size
);
929 static int register_write_abstract(struct target
*target
, uint32_t number
,
930 uint64_t value
, unsigned size
)
934 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
&&
935 !info
->abstract_write_fpr_supported
)
937 if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
&&
938 !info
->abstract_write_csr_supported
)
941 uint32_t command
= access_register_command(target
, number
, size
,
942 AC_ACCESS_REGISTER_TRANSFER
|
943 AC_ACCESS_REGISTER_WRITE
);
945 if (write_abstract_arg(target
, 0, value
, size
) != ERROR_OK
)
948 int result
= execute_abstract_command(target
, command
);
949 if (result
!= ERROR_OK
) {
950 if (info
->cmderr
== CMDERR_NOT_SUPPORTED
) {
951 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
) {
952 info
->abstract_write_fpr_supported
= false;
953 LOG_INFO("Disabling abstract command writes to FPRs.");
954 } else if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
) {
955 info
->abstract_write_csr_supported
= false;
956 LOG_INFO("Disabling abstract command writes to CSRs.");
966 * Sets the AAMSIZE field of a memory access abstract command based on
969 static uint32_t abstract_memory_size(unsigned width
)
973 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE
, 0);
975 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE
, 1);
977 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE
, 2);
979 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE
, 3);
981 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE
, 4);
983 LOG_ERROR("Unsupported memory width: %d", width
);
989 * Creates a memory access abstract command.
991 static uint32_t access_memory_command(struct target
*target
, bool virtual,
992 unsigned width
, bool postincrement
, bool write
)
994 uint32_t command
= set_field(0, AC_ACCESS_MEMORY_CMDTYPE
, 2);
995 command
= set_field(command
, AC_ACCESS_MEMORY_AAMVIRTUAL
, virtual);
996 command
|= abstract_memory_size(width
);
997 command
= set_field(command
, AC_ACCESS_MEMORY_AAMPOSTINCREMENT
,
999 command
= set_field(command
, AC_ACCESS_MEMORY_WRITE
, write
);
1004 static int examine_progbuf(struct target
*target
)
1006 riscv013_info_t
*info
= get_info(target
);
1008 if (info
->progbuf_writable
!= YNM_MAYBE
)
1011 /* Figure out if progbuf is writable. */
1013 if (info
->progbufsize
< 1) {
1014 info
->progbuf_writable
= YNM_NO
;
1015 LOG_INFO("No program buffer present.");
1020 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
1023 struct riscv_program program
;
1024 riscv_program_init(&program
, target
);
1025 riscv_program_insert(&program
, auipc(S0
));
1026 if (riscv_program_exec(&program
, target
) != ERROR_OK
)
1029 if (register_read_direct(target
, &info
->progbuf_address
, GDB_REGNO_S0
) != ERROR_OK
)
1032 riscv_program_init(&program
, target
);
1033 riscv_program_insert(&program
, sw(S0
, S0
, 0));
1034 int result
= riscv_program_exec(&program
, target
);
1036 if (register_write_direct(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
1039 if (result
!= ERROR_OK
) {
1040 /* This program might have failed if the program buffer is not
1042 info
->progbuf_writable
= YNM_NO
;
1047 if (dmi_read(target
, &written
, DM_PROGBUF0
) != ERROR_OK
)
1049 if (written
== (uint32_t) info
->progbuf_address
) {
1050 LOG_INFO("progbuf is writable at 0x%" PRIx64
,
1051 info
->progbuf_address
);
1052 info
->progbuf_writable
= YNM_YES
;
1055 LOG_INFO("progbuf is not writeable at 0x%" PRIx64
,
1056 info
->progbuf_address
);
1057 info
->progbuf_writable
= YNM_NO
;
1063 static int is_fpu_reg(uint32_t gdb_regno
)
1065 return (gdb_regno
>= GDB_REGNO_FPR0
&& gdb_regno
<= GDB_REGNO_FPR31
) ||
1066 (gdb_regno
== GDB_REGNO_CSR0
+ CSR_FFLAGS
) ||
1067 (gdb_regno
== GDB_REGNO_CSR0
+ CSR_FRM
) ||
1068 (gdb_regno
== GDB_REGNO_CSR0
+ CSR_FCSR
);
1071 static int is_vector_reg(uint32_t gdb_regno
)
1073 return (gdb_regno
>= GDB_REGNO_V0
&& gdb_regno
<= GDB_REGNO_V31
) ||
1074 gdb_regno
== GDB_REGNO_VSTART
||
1075 gdb_regno
== GDB_REGNO_VXSAT
||
1076 gdb_regno
== GDB_REGNO_VXRM
||
1077 gdb_regno
== GDB_REGNO_VL
||
1078 gdb_regno
== GDB_REGNO_VTYPE
||
1079 gdb_regno
== GDB_REGNO_VLENB
;
1082 static int prep_for_register_access(struct target
*target
, uint64_t *mstatus
,
1085 if (is_fpu_reg(regno
) || is_vector_reg(regno
)) {
1086 if (register_read(target
, mstatus
, GDB_REGNO_MSTATUS
) != ERROR_OK
)
1088 if (is_fpu_reg(regno
) && (*mstatus
& MSTATUS_FS
) == 0) {
1089 if (register_write_direct(target
, GDB_REGNO_MSTATUS
,
1090 set_field(*mstatus
, MSTATUS_FS
, 1)) != ERROR_OK
)
1092 } else if (is_vector_reg(regno
) && (*mstatus
& MSTATUS_VS
) == 0) {
1093 if (register_write_direct(target
, GDB_REGNO_MSTATUS
,
1094 set_field(*mstatus
, MSTATUS_VS
, 1)) != ERROR_OK
)
1103 static int cleanup_after_register_access(struct target
*target
,
1104 uint64_t mstatus
, int regno
)
1106 if ((is_fpu_reg(regno
) && (mstatus
& MSTATUS_FS
) == 0) ||
1107 (is_vector_reg(regno
) && (mstatus
& MSTATUS_VS
) == 0))
1108 if (register_write_direct(target
, GDB_REGNO_MSTATUS
, mstatus
) != ERROR_OK
)
1120 /* How can the debugger access this memory? */
1121 memory_space_t memory_space
;
1122 /* Memory address to access the scratch memory from the hart. */
1123 riscv_addr_t hart_address
;
1124 /* Memory address to access the scratch memory from the debugger. */
1125 riscv_addr_t debug_address
;
1126 struct working_area
*area
;
1130 * Find some scratch memory to be used with the given program.
1132 static int scratch_reserve(struct target
*target
,
1133 scratch_mem_t
*scratch
,
1134 struct riscv_program
*program
,
1135 unsigned size_bytes
)
1137 riscv_addr_t alignment
= 1;
1138 while (alignment
< size_bytes
)
1141 scratch
->area
= NULL
;
1143 riscv013_info_t
*info
= get_info(target
);
1145 /* Option 1: See if data# registers can be used as the scratch memory */
1146 if (info
->dataaccess
== 1) {
1147 /* Sign extend dataaddr. */
1148 scratch
->hart_address
= info
->dataaddr
;
1149 if (info
->dataaddr
& (1<<11))
1150 scratch
->hart_address
|= 0xfffffffffffff000ULL
;
1152 scratch
->hart_address
= (scratch
->hart_address
+ alignment
- 1) & ~(alignment
- 1);
1154 if ((size_bytes
+ scratch
->hart_address
- info
->dataaddr
+ 3) / 4 >=
1156 scratch
->memory_space
= SPACE_DM_DATA
;
1157 scratch
->debug_address
= (scratch
->hart_address
- info
->dataaddr
) / 4;
1162 /* Option 2: See if progbuf can be used as the scratch memory */
1163 if (examine_progbuf(target
) != ERROR_OK
)
1166 /* Allow for ebreak at the end of the program. */
1167 unsigned program_size
= (program
->instruction_count
+ 1) * 4;
1168 scratch
->hart_address
= (info
->progbuf_address
+ program_size
+ alignment
- 1) &
1170 if ((info
->progbuf_writable
== YNM_YES
) &&
1171 ((size_bytes
+ scratch
->hart_address
- info
->progbuf_address
+ 3) / 4 >=
1172 info
->progbufsize
)) {
1173 scratch
->memory_space
= SPACE_DMI_PROGBUF
;
1174 scratch
->debug_address
= (scratch
->hart_address
- info
->progbuf_address
) / 4;
1178 /* Option 3: User-configured memory area as scratch RAM */
1179 if (target_alloc_working_area(target
, size_bytes
+ alignment
- 1,
1180 &scratch
->area
) == ERROR_OK
) {
1181 scratch
->hart_address
= (scratch
->area
->address
+ alignment
- 1) &
1183 scratch
->memory_space
= SPACE_DMI_RAM
;
1184 scratch
->debug_address
= scratch
->hart_address
;
1188 LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
1189 "a work area with 'configure -work-area-phys'.", size_bytes
);
1193 static int scratch_release(struct target
*target
,
1194 scratch_mem_t
*scratch
)
1196 return target_free_working_area(target
, scratch
->area
);
1199 static int scratch_read64(struct target
*target
, scratch_mem_t
*scratch
,
1203 switch (scratch
->memory_space
) {
1205 if (dmi_read(target
, &v
, DM_DATA0
+ scratch
->debug_address
) != ERROR_OK
)
1208 if (dmi_read(target
, &v
, DM_DATA1
+ scratch
->debug_address
) != ERROR_OK
)
1210 *value
|= ((uint64_t) v
) << 32;
1212 case SPACE_DMI_PROGBUF
:
1213 if (dmi_read(target
, &v
, DM_PROGBUF0
+ scratch
->debug_address
) != ERROR_OK
)
1216 if (dmi_read(target
, &v
, DM_PROGBUF1
+ scratch
->debug_address
) != ERROR_OK
)
1218 *value
|= ((uint64_t) v
) << 32;
1222 uint8_t buffer
[8] = {0};
1223 if (read_memory(target
, scratch
->debug_address
, 4, 2, buffer
, 4) != ERROR_OK
)
1225 *value
= buffer
[0] |
1226 (((uint64_t) buffer
[1]) << 8) |
1227 (((uint64_t) buffer
[2]) << 16) |
1228 (((uint64_t) buffer
[3]) << 24) |
1229 (((uint64_t) buffer
[4]) << 32) |
1230 (((uint64_t) buffer
[5]) << 40) |
1231 (((uint64_t) buffer
[6]) << 48) |
1232 (((uint64_t) buffer
[7]) << 56);
1239 static int scratch_write64(struct target
*target
, scratch_mem_t
*scratch
,
1242 switch (scratch
->memory_space
) {
1244 dmi_write(target
, DM_DATA0
+ scratch
->debug_address
, value
);
1245 dmi_write(target
, DM_DATA1
+ scratch
->debug_address
, value
>> 32);
1247 case SPACE_DMI_PROGBUF
:
1248 dmi_write(target
, DM_PROGBUF0
+ scratch
->debug_address
, value
);
1249 dmi_write(target
, DM_PROGBUF1
+ scratch
->debug_address
, value
>> 32);
1253 uint8_t buffer
[8] = {
1263 if (write_memory(target
, scratch
->debug_address
, 4, 2, buffer
) != ERROR_OK
)
1271 /** Return register size in bits. */
1272 static unsigned register_size(struct target
*target
, unsigned number
)
1274 /* If reg_cache hasn't been initialized yet, make a guess. We need this for
1275 * when this function is called during examine(). */
1276 if (target
->reg_cache
)
1277 return target
->reg_cache
->reg_list
[number
].size
;
1279 return riscv_xlen(target
);
1282 static bool has_sufficient_progbuf(struct target
*target
, unsigned size
)
1284 RISCV013_INFO(info
);
1287 return info
->progbufsize
+ r
->impebreak
>= size
;
1291 * Immediately write the new value to the requested register. This mechanism
1292 * bypasses any caches.
1294 static int register_write_direct(struct target
*target
, unsigned number
,
1297 LOG_DEBUG("{%d} %s <- 0x%" PRIx64
, riscv_current_hartid(target
),
1298 gdb_regno_name(number
), value
);
1300 int result
= register_write_abstract(target
, number
, value
,
1301 register_size(target
, number
));
1302 if (result
== ERROR_OK
|| !has_sufficient_progbuf(target
, 2) ||
1303 !riscv_is_halted(target
))
1306 struct riscv_program program
;
1307 riscv_program_init(&program
, target
);
1310 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
1314 if (prep_for_register_access(target
, &mstatus
, number
) != ERROR_OK
)
1317 scratch_mem_t scratch
;
1318 bool use_scratch
= false;
1319 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
&&
1320 riscv_supports_extension(target
, 'D') &&
1321 riscv_xlen(target
) < 64) {
1322 /* There are no instructions to move all the bits from a register, so
1323 * we need to use some scratch RAM. */
1325 riscv_program_insert(&program
, fld(number
- GDB_REGNO_FPR0
, S0
, 0));
1327 if (scratch_reserve(target
, &scratch
, &program
, 8) != ERROR_OK
)
1330 if (register_write_direct(target
, GDB_REGNO_S0
, scratch
.hart_address
)
1332 scratch_release(target
, &scratch
);
1336 if (scratch_write64(target
, &scratch
, value
) != ERROR_OK
) {
1337 scratch_release(target
, &scratch
);
1341 } else if (number
== GDB_REGNO_VTYPE
) {
1342 riscv_program_insert(&program
, csrr(S0
, CSR_VL
));
1343 riscv_program_insert(&program
, vsetvli(ZERO
, S0
, value
));
1346 if (register_write_direct(target
, GDB_REGNO_S0
, value
) != ERROR_OK
)
1349 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
) {
1350 if (riscv_supports_extension(target
, 'D'))
1351 riscv_program_insert(&program
, fmv_d_x(number
- GDB_REGNO_FPR0
, S0
));
1353 riscv_program_insert(&program
, fmv_w_x(number
- GDB_REGNO_FPR0
, S0
));
1354 } else if (number
== GDB_REGNO_VL
) {
1355 /* "The XLEN-bit-wide read-only vl CSR can only be updated by the
1356 * vsetvli and vsetvl instructions, and the fault-only-rst vector
1357 * load instruction variants." */
1359 if (register_read(target
, &vtype
, GDB_REGNO_VTYPE
) != ERROR_OK
)
1361 if (riscv_program_insert(&program
, vsetvli(ZERO
, S0
, vtype
)) != ERROR_OK
)
1363 } else if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
) {
1364 riscv_program_csrw(&program
, S0
, number
);
1366 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number
);
1371 int exec_out
= riscv_program_exec(&program
, target
);
1372 /* Don't message on error. Probably the register doesn't exist. */
1373 if (exec_out
== ERROR_OK
&& target
->reg_cache
) {
1374 struct reg
*reg
= &target
->reg_cache
->reg_list
[number
];
1375 buf_set_u64(reg
->value
, 0, reg
->size
, value
);
1379 scratch_release(target
, &scratch
);
1381 if (cleanup_after_register_access(target
, mstatus
, number
) != ERROR_OK
)
1385 if (register_write_direct(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
1391 /** Read register value from the target. Also update the cached value. */
1392 static int register_read(struct target
*target
, uint64_t *value
, uint32_t number
)
1394 if (number
== GDB_REGNO_ZERO
) {
1398 int result
= register_read_direct(target
, value
, number
);
1399 if (result
!= ERROR_OK
)
1401 if (target
->reg_cache
) {
1402 struct reg
*reg
= &target
->reg_cache
->reg_list
[number
];
1403 buf_set_u64(reg
->value
, 0, reg
->size
, *value
);
1408 /** Actually read registers from the target right now. */
1409 static int register_read_direct(struct target
*target
, uint64_t *value
, uint32_t number
)
1411 int result
= register_read_abstract(target
, value
, number
,
1412 register_size(target
, number
));
1414 if (result
!= ERROR_OK
&&
1415 has_sufficient_progbuf(target
, 2) &&
1416 number
> GDB_REGNO_XPR31
) {
1417 struct riscv_program program
;
1418 riscv_program_init(&program
, target
);
1420 scratch_mem_t scratch
;
1421 bool use_scratch
= false;
1424 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
1427 /* Write program to move data into s0. */
1430 if (prep_for_register_access(target
, &mstatus
, number
) != ERROR_OK
)
1433 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
) {
1434 if (riscv_supports_extension(target
, 'D')
1435 && riscv_xlen(target
) < 64) {
1436 /* There are no instructions to move all the bits from a
1437 * register, so we need to use some scratch RAM. */
1438 riscv_program_insert(&program
, fsd(number
- GDB_REGNO_FPR0
, S0
,
1441 if (scratch_reserve(target
, &scratch
, &program
, 8) != ERROR_OK
)
1445 if (register_write_direct(target
, GDB_REGNO_S0
,
1446 scratch
.hart_address
) != ERROR_OK
) {
1447 scratch_release(target
, &scratch
);
1450 } else if (riscv_supports_extension(target
, 'D')) {
1451 riscv_program_insert(&program
, fmv_x_d(S0
, number
- GDB_REGNO_FPR0
));
1453 riscv_program_insert(&program
, fmv_x_w(S0
, number
- GDB_REGNO_FPR0
));
1455 } else if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
) {
1456 riscv_program_csrr(&program
, S0
, number
);
1458 LOG_ERROR("Unsupported register: %s", gdb_regno_name(number
));
1462 /* Execute program. */
1463 result
= riscv_program_exec(&program
, target
);
1464 /* Don't message on error. Probably the register doesn't exist. */
1467 result
= scratch_read64(target
, &scratch
, value
);
1468 scratch_release(target
, &scratch
);
1469 if (result
!= ERROR_OK
)
1473 if (register_read_direct(target
, value
, GDB_REGNO_S0
) != ERROR_OK
)
1477 if (cleanup_after_register_access(target
, mstatus
, number
) != ERROR_OK
)
1481 if (register_write_direct(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
1485 if (result
== ERROR_OK
) {
1486 LOG_DEBUG("{%d} %s = 0x%" PRIx64
, riscv_current_hartid(target
),
1487 gdb_regno_name(number
), *value
);
1493 static int wait_for_authbusy(struct target
*target
, uint32_t *dmstatus
)
1495 time_t start
= time(NULL
);
1498 if (dmstatus_read(target
, &value
, false) != ERROR_OK
)
1502 if (!get_field(value
, DM_DMSTATUS_AUTHBUSY
))
1504 if (time(NULL
) - start
> riscv_command_timeout_sec
) {
1505 LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
1506 "Increase the timeout with riscv set_command_timeout_sec.",
1507 riscv_command_timeout_sec
,
1516 /*** OpenOCD target functions. ***/
1518 static void deinit_target(struct target
*target
)
1520 LOG_DEBUG("riscv_deinit_target()");
1521 struct riscv_info
*info
= target
->arch_info
;
1525 free(info
->version_specific
);
1526 /* TODO: free register arch_info */
1527 info
->version_specific
= NULL
;
1530 static int set_haltgroup(struct target
*target
, bool *supported
)
1532 uint32_t write
= set_field(DM_DMCS2_HGWRITE
, DM_DMCS2_GROUP
, target
->smp
);
1533 if (dmi_write(target
, DM_DMCS2
, write
) != ERROR_OK
)
1536 if (dmi_read(target
, &read
, DM_DMCS2
) != ERROR_OK
)
1538 *supported
= get_field(read
, DM_DMCS2_GROUP
) == (unsigned)target
->smp
;
1542 static int discover_vlenb(struct target
*target
)
1547 if (register_read(target
, &vlenb
, GDB_REGNO_VLENB
) != ERROR_OK
) {
1548 LOG_WARNING("Couldn't read vlenb for %s; vector register access won't work.",
1549 target_name(target
));
1555 LOG_INFO("Vector support with vlenb=%d", r
->vlenb
);
1560 static int examine(struct target
*target
)
1562 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
1564 uint32_t dtmcontrol
= dtmcontrol_scan(target
, 0);
1565 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol
);
1566 LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol
, DTM_DTMCS_DMIRESET
));
1567 LOG_DEBUG(" idle=%d", get_field(dtmcontrol
, DTM_DTMCS_IDLE
));
1568 LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol
, DTM_DTMCS_DMISTAT
));
1569 LOG_DEBUG(" abits=%d", get_field(dtmcontrol
, DTM_DTMCS_ABITS
));
1570 LOG_DEBUG(" version=%d", get_field(dtmcontrol
, DTM_DTMCS_VERSION
));
1571 if (dtmcontrol
== 0) {
1572 LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
1575 if (get_field(dtmcontrol
, DTM_DTMCS_VERSION
) != 1) {
1576 LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
1577 get_field(dtmcontrol
, DTM_DTMCS_VERSION
), dtmcontrol
);
1581 riscv013_info_t
*info
= get_info(target
);
1582 /* TODO: This won't be true if there are multiple DMs. */
1583 info
->index
= target
->coreid
;
1584 info
->abits
= get_field(dtmcontrol
, DTM_DTMCS_ABITS
);
1585 info
->dtmcs_idle
= get_field(dtmcontrol
, DTM_DTMCS_IDLE
);
1587 /* Reset the Debug Module. */
1588 dm013_info_t
*dm
= get_dm(target
);
1591 if (!dm
->was_reset
) {
1592 dmi_write(target
, DM_DMCONTROL
, 0);
1593 dmi_write(target
, DM_DMCONTROL
, DM_DMCONTROL_DMACTIVE
);
1594 dm
->was_reset
= true;
1597 dmi_write(target
, DM_DMCONTROL
, DM_DMCONTROL_HARTSELLO
|
1598 DM_DMCONTROL_HARTSELHI
| DM_DMCONTROL_DMACTIVE
|
1599 DM_DMCONTROL_HASEL
);
1601 if (dmi_read(target
, &dmcontrol
, DM_DMCONTROL
) != ERROR_OK
)
1604 if (!get_field(dmcontrol
, DM_DMCONTROL_DMACTIVE
)) {
1605 LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
1610 dm
->hasel_supported
= get_field(dmcontrol
, DM_DMCONTROL_HASEL
);
1613 if (dmstatus_read(target
, &dmstatus
, false) != ERROR_OK
)
1615 LOG_DEBUG("dmstatus: 0x%08x", dmstatus
);
1616 int dmstatus_version
= get_field(dmstatus
, DM_DMSTATUS_VERSION
);
1617 if (dmstatus_version
!= 2 && dmstatus_version
!= 3) {
1618 /* Error was already printed out in dmstatus_read(). */
1623 (get_field(dmcontrol
, DM_DMCONTROL_HARTSELHI
) <<
1624 DM_DMCONTROL_HARTSELLO_LENGTH
) |
1625 get_field(dmcontrol
, DM_DMCONTROL_HARTSELLO
);
1626 info
->hartsellen
= 0;
1627 while (hartsel
& 1) {
1631 LOG_DEBUG("hartsellen=%d", info
->hartsellen
);
1634 if (dmi_read(target
, &hartinfo
, DM_HARTINFO
) != ERROR_OK
)
1637 info
->datasize
= get_field(hartinfo
, DM_HARTINFO_DATASIZE
);
1638 info
->dataaccess
= get_field(hartinfo
, DM_HARTINFO_DATAACCESS
);
1639 info
->dataaddr
= get_field(hartinfo
, DM_HARTINFO_DATAADDR
);
1641 if (!get_field(dmstatus
, DM_DMSTATUS_AUTHENTICATED
)) {
1642 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
1643 "(dmstatus=0x%x). Use `riscv authdata_read` and "
1644 "`riscv authdata_write` commands to authenticate.", dmstatus
);
1645 /* If we return ERROR_FAIL here, then in a multicore setup the next
1646 * core won't be examined, which means we won't set up the
1647 * authentication commands for them, which means the config script
1648 * needs to be a lot more complex. */
1652 if (dmi_read(target
, &info
->sbcs
, DM_SBCS
) != ERROR_OK
)
1655 /* Check that abstract data registers are accessible. */
1656 uint32_t abstractcs
;
1657 if (dmi_read(target
, &abstractcs
, DM_ABSTRACTCS
) != ERROR_OK
)
1659 info
->datacount
= get_field(abstractcs
, DM_ABSTRACTCS_DATACOUNT
);
1660 info
->progbufsize
= get_field(abstractcs
, DM_ABSTRACTCS_PROGBUFSIZE
);
1662 LOG_INFO("datacount=%d progbufsize=%d", info
->datacount
, info
->progbufsize
);
1665 r
->impebreak
= get_field(dmstatus
, DM_DMSTATUS_IMPEBREAK
);
1667 if (!has_sufficient_progbuf(target
, 2)) {
1668 LOG_WARNING("We won't be able to execute fence instructions on this "
1669 "target. Memory may not always appear consistent. "
1670 "(progbufsize=%d, impebreak=%d)", info
->progbufsize
,
1674 if (info
->progbufsize
< 4 && riscv_enable_virtual
) {
1675 LOG_ERROR("set_enable_virtual is not available on this target. It "
1676 "requires a program buffer size of at least 4. (progbufsize=%d) "
1677 "Use `riscv set_enable_virtual off` to continue."
1678 , info
->progbufsize
);
1681 /* Before doing anything else we must first enumerate the harts. */
1682 if (dm
->hart_count
< 0) {
1683 for (int i
= 0; i
< MIN(RISCV_MAX_HARTS
, 1 << info
->hartsellen
); ++i
) {
1684 r
->current_hartid
= i
;
1685 if (riscv013_select_current_hart(target
) != ERROR_OK
)
1689 if (dmstatus_read(target
, &s
, true) != ERROR_OK
)
1691 if (get_field(s
, DM_DMSTATUS_ANYNONEXISTENT
))
1693 dm
->hart_count
= i
+ 1;
1695 if (get_field(s
, DM_DMSTATUS_ANYHAVERESET
))
1696 dmi_write(target
, DM_DMCONTROL
,
1697 set_hartsel(DM_DMCONTROL_DMACTIVE
| DM_DMCONTROL_ACKHAVERESET
, i
));
1700 LOG_DEBUG("Detected %d harts.", dm
->hart_count
);
1703 r
->current_hartid
= target
->coreid
;
1705 if (dm
->hart_count
== 0) {
1706 LOG_ERROR("No harts found!");
1710 /* Don't call any riscv_* functions until after we've counted the number of
1711 * cores and initialized registers. */
1713 if (riscv013_select_current_hart(target
) != ERROR_OK
)
1716 bool halted
= riscv_is_halted(target
);
1718 if (riscv013_halt_go(target
) != ERROR_OK
) {
1719 LOG_ERROR("Fatal: Hart %d failed to halt during examine()", r
->current_hartid
);
1724 /* Without knowing anything else we can at least mess with the
1725 * program buffer. */
1726 r
->debug_buffer_size
= info
->progbufsize
;
1728 int result
= register_read_abstract(target
, NULL
, GDB_REGNO_S0
, 64);
1729 if (result
== ERROR_OK
)
1734 if (register_read(target
, &r
->misa
, GDB_REGNO_MISA
)) {
1735 LOG_ERROR("Fatal: Failed to read MISA from hart %d.", r
->current_hartid
);
1739 if (riscv_supports_extension(target
, 'V')) {
1740 if (discover_vlenb(target
) != ERROR_OK
)
1744 /* Now init registers based on what we discovered. */
1745 if (riscv_init_registers(target
) != ERROR_OK
)
1748 /* Display this as early as possible to help people who are using
1749 * really slow simulators. */
1750 LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64
, r
->current_hartid
, r
->xlen
,
1754 riscv013_step_or_resume_current_hart(target
, false, false);
1756 target_set_examined(target
);
1759 bool haltgroup_supported
;
1760 if (set_haltgroup(target
, &haltgroup_supported
) != ERROR_OK
)
1762 if (haltgroup_supported
)
1763 LOG_INFO("Core %d made part of halt group %d.", target
->coreid
,
1766 LOG_INFO("Core %d could not be made part of halt group %d.",
1767 target
->coreid
, target
->smp
);
1770 /* Some regression suites rely on seeing 'Examined RISC-V core' to know
1771 * when they can connect with gdb/telnet.
1772 * We will need to update those suites if we want to change that text. */
1773 LOG_INFO("Examined RISC-V core; found %d harts",
1774 riscv_count_harts(target
));
1775 LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64
, r
->current_hartid
, r
->xlen
,
1780 static int riscv013_authdata_read(struct target
*target
, uint32_t *value
, unsigned int index
)
1783 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1787 if (wait_for_authbusy(target
, NULL
) != ERROR_OK
)
1790 return dmi_read(target
, value
, DM_AUTHDATA
);
1793 static int riscv013_authdata_write(struct target
*target
, uint32_t value
, unsigned int index
)
1796 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1800 uint32_t before
, after
;
1801 if (wait_for_authbusy(target
, &before
) != ERROR_OK
)
1804 dmi_write(target
, DM_AUTHDATA
, value
);
1806 if (wait_for_authbusy(target
, &after
) != ERROR_OK
)
1809 if (!get_field(before
, DM_DMSTATUS_AUTHENTICATED
) &&
1810 get_field(after
, DM_DMSTATUS_AUTHENTICATED
)) {
1811 LOG_INFO("authdata_write resulted in successful authentication");
1812 int result
= ERROR_OK
;
1813 dm013_info_t
*dm
= get_dm(target
);
1816 target_list_t
*entry
;
1817 list_for_each_entry(entry
, &dm
->target_list
, list
) {
1818 if (examine(entry
->target
) != ERROR_OK
)
1819 result
= ERROR_FAIL
;
1827 static int riscv013_hart_count(struct target
*target
)
1829 dm013_info_t
*dm
= get_dm(target
);
1831 return dm
->hart_count
;
1834 /* Try to find out the widest memory access size depending on the selected memory access methods. */
1835 static unsigned riscv013_data_bits(struct target
*target
)
1837 RISCV013_INFO(info
);
1840 for (unsigned int i
= 0; i
< RISCV_NUM_MEM_ACCESS_METHODS
; i
++) {
1841 int method
= r
->mem_access_methods
[i
];
1843 if (method
== RISCV_MEM_ACCESS_PROGBUF
) {
1844 if (has_sufficient_progbuf(target
, 3))
1845 return riscv_xlen(target
);
1846 } else if (method
== RISCV_MEM_ACCESS_SYSBUS
) {
1847 if (get_field(info
->sbcs
, DM_SBCS_SBACCESS128
))
1849 if (get_field(info
->sbcs
, DM_SBCS_SBACCESS64
))
1851 if (get_field(info
->sbcs
, DM_SBCS_SBACCESS32
))
1853 if (get_field(info
->sbcs
, DM_SBCS_SBACCESS16
))
1855 if (get_field(info
->sbcs
, DM_SBCS_SBACCESS8
))
1857 } else if (method
== RISCV_MEM_ACCESS_ABSTRACT
) {
1858 /* TODO: Once there is a spec for discovering abstract commands, we can
1859 * take those into account as well. For now we assume abstract commands
1860 * support XLEN-wide accesses. */
1861 return riscv_xlen(target
);
1862 } else if (method
== RISCV_MEM_ACCESS_UNSPECIFIED
)
1863 /* No further mem access method to try. */
1866 LOG_ERROR("Unable to determine supported data bits on this target. Assuming 32 bits.");
1870 static COMMAND_HELPER(riscv013_print_info
, struct target
*target
)
1872 RISCV013_INFO(info
);
1874 /* Abstract description. */
1875 riscv_print_info_line(CMD
, "target", "memory.read_while_running8", get_field(info
->sbcs
, DM_SBCS_SBACCESS8
));
1876 riscv_print_info_line(CMD
, "target", "memory.write_while_running8", get_field(info
->sbcs
, DM_SBCS_SBACCESS8
));
1877 riscv_print_info_line(CMD
, "target", "memory.read_while_running16", get_field(info
->sbcs
, DM_SBCS_SBACCESS16
));
1878 riscv_print_info_line(CMD
, "target", "memory.write_while_running16", get_field(info
->sbcs
, DM_SBCS_SBACCESS16
));
1879 riscv_print_info_line(CMD
, "target", "memory.read_while_running32", get_field(info
->sbcs
, DM_SBCS_SBACCESS32
));
1880 riscv_print_info_line(CMD
, "target", "memory.write_while_running32", get_field(info
->sbcs
, DM_SBCS_SBACCESS32
));
1881 riscv_print_info_line(CMD
, "target", "memory.read_while_running64", get_field(info
->sbcs
, DM_SBCS_SBACCESS64
));
1882 riscv_print_info_line(CMD
, "target", "memory.write_while_running64", get_field(info
->sbcs
, DM_SBCS_SBACCESS64
));
1883 riscv_print_info_line(CMD
, "target", "memory.read_while_running128", get_field(info
->sbcs
, DM_SBCS_SBACCESS128
));
1884 riscv_print_info_line(CMD
, "target", "memory.write_while_running128", get_field(info
->sbcs
, DM_SBCS_SBACCESS128
));
1886 /* Lower level description. */
1887 riscv_print_info_line(CMD
, "dm", "abits", info
->abits
);
1888 riscv_print_info_line(CMD
, "dm", "progbufsize", info
->progbufsize
);
1889 riscv_print_info_line(CMD
, "dm", "sbversion", get_field(info
->sbcs
, DM_SBCS_SBVERSION
));
1890 riscv_print_info_line(CMD
, "dm", "sbasize", get_field(info
->sbcs
, DM_SBCS_SBASIZE
));
1891 riscv_print_info_line(CMD
, "dm", "sbaccess128", get_field(info
->sbcs
, DM_SBCS_SBACCESS128
));
1892 riscv_print_info_line(CMD
, "dm", "sbaccess64", get_field(info
->sbcs
, DM_SBCS_SBACCESS64
));
1893 riscv_print_info_line(CMD
, "dm", "sbaccess32", get_field(info
->sbcs
, DM_SBCS_SBACCESS32
));
1894 riscv_print_info_line(CMD
, "dm", "sbaccess16", get_field(info
->sbcs
, DM_SBCS_SBACCESS16
));
1895 riscv_print_info_line(CMD
, "dm", "sbaccess8", get_field(info
->sbcs
, DM_SBCS_SBACCESS8
));
1898 if (dmstatus_read(target
, &dmstatus
, false) == ERROR_OK
)
1899 riscv_print_info_line(CMD
, "dm", "authenticated", get_field(dmstatus
, DM_DMSTATUS_AUTHENTICATED
));
1904 static int prep_for_vector_access(struct target
*target
, uint64_t *vtype
,
1905 uint64_t *vl
, unsigned *debug_vl
)
1908 /* TODO: this continuous save/restore is terrible for performance. */
1909 /* Write vtype and vl. */
1910 unsigned encoded_vsew
;
1911 switch (riscv_xlen(target
)) {
1919 LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target
));
1923 /* Save vtype and vl. */
1924 if (register_read(target
, vtype
, GDB_REGNO_VTYPE
) != ERROR_OK
)
1926 if (register_read(target
, vl
, GDB_REGNO_VL
) != ERROR_OK
)
1929 if (register_write_direct(target
, GDB_REGNO_VTYPE
, encoded_vsew
<< 3) != ERROR_OK
)
1931 *debug_vl
= DIV_ROUND_UP(r
->vlenb
* 8, riscv_xlen(target
));
1932 if (register_write_direct(target
, GDB_REGNO_VL
, *debug_vl
) != ERROR_OK
)
1938 static int cleanup_after_vector_access(struct target
*target
, uint64_t vtype
,
1941 /* Restore vtype and vl. */
1942 if (register_write_direct(target
, GDB_REGNO_VTYPE
, vtype
) != ERROR_OK
)
1944 if (register_write_direct(target
, GDB_REGNO_VL
, vl
) != ERROR_OK
)
1949 static int riscv013_get_register_buf(struct target
*target
,
1950 uint8_t *value
, int regno
)
1952 assert(regno
>= GDB_REGNO_V0
&& regno
<= GDB_REGNO_V31
);
1954 if (riscv_select_current_hart(target
) != ERROR_OK
)
1958 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
1962 if (prep_for_register_access(target
, &mstatus
, regno
) != ERROR_OK
)
1967 if (prep_for_vector_access(target
, &vtype
, &vl
, &debug_vl
) != ERROR_OK
)
1970 unsigned vnum
= regno
- GDB_REGNO_V0
;
1971 unsigned xlen
= riscv_xlen(target
);
1973 struct riscv_program program
;
1974 riscv_program_init(&program
, target
);
1975 riscv_program_insert(&program
, vmv_x_s(S0
, vnum
));
1976 riscv_program_insert(&program
, vslide1down_vx(vnum
, vnum
, S0
, true));
1978 int result
= ERROR_OK
;
1979 for (unsigned i
= 0; i
< debug_vl
; i
++) {
1980 /* Executing the program might result in an exception if there is some
1981 * issue with the vector implementation/instructions we're using. If that
1982 * happens, attempt to restore as usual. We may have clobbered the
1983 * vector register we tried to read already.
1984 * For other failures, we just return error because things are probably
1985 * so messed up that attempting to restore isn't going to help. */
1986 result
= riscv_program_exec(&program
, target
);
1987 if (result
== ERROR_OK
) {
1989 if (register_read_direct(target
, &v
, GDB_REGNO_S0
) != ERROR_OK
)
1991 buf_set_u64(value
, xlen
* i
, xlen
, v
);
1997 if (cleanup_after_vector_access(target
, vtype
, vl
) != ERROR_OK
)
2000 if (cleanup_after_register_access(target
, mstatus
, regno
) != ERROR_OK
)
2002 if (register_write_direct(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
2008 static int riscv013_set_register_buf(struct target
*target
,
2009 int regno
, const uint8_t *value
)
2011 assert(regno
>= GDB_REGNO_V0
&& regno
<= GDB_REGNO_V31
);
2013 if (riscv_select_current_hart(target
) != ERROR_OK
)
2017 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
2021 if (prep_for_register_access(target
, &mstatus
, regno
) != ERROR_OK
)
2026 if (prep_for_vector_access(target
, &vtype
, &vl
, &debug_vl
) != ERROR_OK
)
2029 unsigned vnum
= regno
- GDB_REGNO_V0
;
2030 unsigned xlen
= riscv_xlen(target
);
2032 struct riscv_program program
;
2033 riscv_program_init(&program
, target
);
2034 riscv_program_insert(&program
, vslide1down_vx(vnum
, vnum
, S0
, true));
2035 int result
= ERROR_OK
;
2036 for (unsigned i
= 0; i
< debug_vl
; i
++) {
2037 if (register_write_direct(target
, GDB_REGNO_S0
,
2038 buf_get_u64(value
, xlen
* i
, xlen
)) != ERROR_OK
)
2040 result
= riscv_program_exec(&program
, target
);
2041 if (result
!= ERROR_OK
)
2045 if (cleanup_after_vector_access(target
, vtype
, vl
) != ERROR_OK
)
2048 if (cleanup_after_register_access(target
, mstatus
, regno
) != ERROR_OK
)
2050 if (register_write_direct(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
2056 static uint32_t sb_sbaccess(unsigned int size_bytes
)
2058 switch (size_bytes
) {
2060 return set_field(0, DM_SBCS_SBACCESS
, 0);
2062 return set_field(0, DM_SBCS_SBACCESS
, 1);
2064 return set_field(0, DM_SBCS_SBACCESS
, 2);
2066 return set_field(0, DM_SBCS_SBACCESS
, 3);
2068 return set_field(0, DM_SBCS_SBACCESS
, 4);
2074 static int sb_write_address(struct target
*target
, target_addr_t address
,
2075 bool ensure_success
)
2077 RISCV013_INFO(info
);
2078 unsigned int sbasize
= get_field(info
->sbcs
, DM_SBCS_SBASIZE
);
2079 /* There currently is no support for >64-bit addresses in OpenOCD. */
2081 dmi_op(target
, NULL
, NULL
, DMI_OP_WRITE
, DM_SBADDRESS3
, 0, false, false);
2083 dmi_op(target
, NULL
, NULL
, DMI_OP_WRITE
, DM_SBADDRESS2
, 0, false, false);
2085 dmi_op(target
, NULL
, NULL
, DMI_OP_WRITE
, DM_SBADDRESS1
, address
>> 32, false, false);
2086 return dmi_op(target
, NULL
, NULL
, DMI_OP_WRITE
, DM_SBADDRESS0
, address
,
2087 false, ensure_success
);
2090 static int batch_run(const struct target
*target
, struct riscv_batch
*batch
)
2092 RISCV013_INFO(info
);
2094 if (r
->reset_delays_wait
>= 0) {
2095 r
->reset_delays_wait
-= batch
->used_scans
;
2096 if (r
->reset_delays_wait
<= 0) {
2097 batch
->idle_count
= 0;
2098 info
->dmi_busy_delay
= 0;
2099 info
->ac_busy_delay
= 0;
2102 return riscv_batch_run(batch
);
2105 static int sba_supports_access(struct target
*target
, unsigned int size_bytes
)
2107 RISCV013_INFO(info
);
2108 switch (size_bytes
) {
2110 return get_field(info
->sbcs
, DM_SBCS_SBACCESS8
);
2112 return get_field(info
->sbcs
, DM_SBCS_SBACCESS16
);
2114 return get_field(info
->sbcs
, DM_SBCS_SBACCESS32
);
2116 return get_field(info
->sbcs
, DM_SBCS_SBACCESS64
);
2118 return get_field(info
->sbcs
, DM_SBCS_SBACCESS128
);
2124 static int sample_memory_bus_v1(struct target
*target
,
2125 struct riscv_sample_buf
*buf
,
2126 const riscv_sample_config_t
*config
,
2129 RISCV013_INFO(info
);
2130 unsigned int sbasize
= get_field(info
->sbcs
, DM_SBCS_SBASIZE
);
2132 LOG_ERROR("Memory sampling is only implemented for sbasize <= 64.");
2133 return ERROR_NOT_IMPLEMENTED
;
2136 if (get_field(info
->sbcs
, DM_SBCS_SBVERSION
) != 1) {
2137 LOG_ERROR("Memory sampling is only implemented for SBA version 1.");
2138 return ERROR_NOT_IMPLEMENTED
;
2142 uint32_t sbcs_valid
= false;
2144 uint32_t sbaddress0
= 0;
2145 bool sbaddress0_valid
= false;
2146 uint32_t sbaddress1
= 0;
2147 bool sbaddress1_valid
= false;
2149 /* How often to read each value in a batch. */
2150 const unsigned int repeat
= 5;
2152 unsigned int enabled_count
= 0;
2153 for (unsigned int i
= 0; i
< ARRAY_SIZE(config
->bucket
); i
++) {
2154 if (config
->bucket
[i
].enabled
)
2158 while (timeval_ms() < until_ms
) {
2160 * batch_run() adds to the batch, so we can't simply reuse the same
2161 * batch over and over. So we create a new one every time through the
2164 struct riscv_batch
*batch
= riscv_batch_alloc(
2165 target
, 1 + enabled_count
* 5 * repeat
,
2166 info
->dmi_busy_delay
+ info
->bus_master_read_delay
);
2170 unsigned int result_bytes
= 0;
2171 for (unsigned int n
= 0; n
< repeat
; n
++) {
2172 for (unsigned int i
= 0; i
< ARRAY_SIZE(config
->bucket
); i
++) {
2173 if (config
->bucket
[i
].enabled
) {
2174 if (!sba_supports_access(target
, config
->bucket
[i
].size_bytes
)) {
2175 LOG_ERROR("Hardware does not support SBA access for %d-byte memory sampling.",
2176 config
->bucket
[i
].size_bytes
);
2177 return ERROR_NOT_IMPLEMENTED
;
2180 uint32_t sbcs_write
= DM_SBCS_SBREADONADDR
;
2181 if (enabled_count
== 1)
2182 sbcs_write
|= DM_SBCS_SBREADONDATA
;
2183 sbcs_write
|= sb_sbaccess(config
->bucket
[i
].size_bytes
);
2184 if (!sbcs_valid
|| sbcs_write
!= sbcs
) {
2185 riscv_batch_add_dmi_write(batch
, DM_SBCS
, sbcs_write
);
2191 (!sbaddress1_valid
||
2192 sbaddress1
!= config
->bucket
[i
].address
>> 32)) {
2193 sbaddress1
= config
->bucket
[i
].address
>> 32;
2194 riscv_batch_add_dmi_write(batch
, DM_SBADDRESS1
, sbaddress1
);
2195 sbaddress1_valid
= true;
2197 if (!sbaddress0_valid
||
2198 sbaddress0
!= (config
->bucket
[i
].address
& 0xffffffff)) {
2199 sbaddress0
= config
->bucket
[i
].address
;
2200 riscv_batch_add_dmi_write(batch
, DM_SBADDRESS0
, sbaddress0
);
2201 sbaddress0_valid
= true;
2203 if (config
->bucket
[i
].size_bytes
> 4)
2204 riscv_batch_add_dmi_read(batch
, DM_SBDATA1
);
2205 riscv_batch_add_dmi_read(batch
, DM_SBDATA0
);
2206 result_bytes
+= 1 + config
->bucket
[i
].size_bytes
;
2211 if (buf
->used
+ result_bytes
>= buf
->size
) {
2212 riscv_batch_free(batch
);
2216 size_t sbcs_key
= riscv_batch_add_dmi_read(batch
, DM_SBCS
);
2218 int result
= batch_run(target
, batch
);
2219 if (result
!= ERROR_OK
)
2222 uint32_t sbcs_read
= riscv_batch_get_dmi_read_data(batch
, sbcs_key
);
2223 if (get_field(sbcs_read
, DM_SBCS_SBBUSYERROR
)) {
2224 /* Discard this batch (too much hassle to try to recover partial
2225 * data) and try again with a larger delay. */
2226 info
->bus_master_read_delay
+= info
->bus_master_read_delay
/ 10 + 1;
2227 dmi_write(target
, DM_SBCS
, sbcs_read
| DM_SBCS_SBBUSYERROR
| DM_SBCS_SBERROR
);
2228 riscv_batch_free(batch
);
2231 if (get_field(sbcs_read
, DM_SBCS_SBERROR
)) {
2232 /* The memory we're sampling was unreadable, somehow. Give up. */
2233 dmi_write(target
, DM_SBCS
, DM_SBCS_SBBUSYERROR
| DM_SBCS_SBERROR
);
2234 riscv_batch_free(batch
);
2238 unsigned int read
= 0;
2239 for (unsigned int n
= 0; n
< repeat
; n
++) {
2240 for (unsigned int i
= 0; i
< ARRAY_SIZE(config
->bucket
); i
++) {
2241 if (config
->bucket
[i
].enabled
) {
2242 assert(i
< RISCV_SAMPLE_BUF_TIMESTAMP_BEFORE
);
2244 if (config
->bucket
[i
].size_bytes
> 4)
2245 value
= ((uint64_t)riscv_batch_get_dmi_read_data(batch
, read
++)) << 32;
2246 value
|= riscv_batch_get_dmi_read_data(batch
, read
++);
2248 buf
->buf
[buf
->used
] = i
;
2249 buf_set_u64(buf
->buf
+ buf
->used
+ 1, 0, config
->bucket
[i
].size_bytes
* 8, value
);
2250 buf
->used
+= 1 + config
->bucket
[i
].size_bytes
;
2255 riscv_batch_free(batch
);
2261 static int sample_memory(struct target
*target
,
2262 struct riscv_sample_buf
*buf
,
2263 riscv_sample_config_t
*config
,
2266 if (!config
->enabled
)
2269 return sample_memory_bus_v1(target
, buf
, config
, until_ms
);
2272 static int init_target(struct command_context
*cmd_ctx
,
2273 struct target
*target
)
2276 RISCV_INFO(generic_info
);
2278 generic_info
->get_register
= &riscv013_get_register
;
2279 generic_info
->set_register
= &riscv013_set_register
;
2280 generic_info
->get_register_buf
= &riscv013_get_register_buf
;
2281 generic_info
->set_register_buf
= &riscv013_set_register_buf
;
2282 generic_info
->select_current_hart
= &riscv013_select_current_hart
;
2283 generic_info
->is_halted
= &riscv013_is_halted
;
2284 generic_info
->resume_go
= &riscv013_resume_go
;
2285 generic_info
->step_current_hart
= &riscv013_step_current_hart
;
2286 generic_info
->on_halt
= &riscv013_on_halt
;
2287 generic_info
->resume_prep
= &riscv013_resume_prep
;
2288 generic_info
->halt_prep
= &riscv013_halt_prep
;
2289 generic_info
->halt_go
= &riscv013_halt_go
;
2290 generic_info
->on_step
= &riscv013_on_step
;
2291 generic_info
->halt_reason
= &riscv013_halt_reason
;
2292 generic_info
->read_debug_buffer
= &riscv013_read_debug_buffer
;
2293 generic_info
->write_debug_buffer
= &riscv013_write_debug_buffer
;
2294 generic_info
->execute_debug_buffer
= &riscv013_execute_debug_buffer
;
2295 generic_info
->fill_dmi_write_u64
= &riscv013_fill_dmi_write_u64
;
2296 generic_info
->fill_dmi_read_u64
= &riscv013_fill_dmi_read_u64
;
2297 generic_info
->fill_dmi_nop_u64
= &riscv013_fill_dmi_nop_u64
;
2298 generic_info
->dmi_write_u64_bits
= &riscv013_dmi_write_u64_bits
;
2299 generic_info
->authdata_read
= &riscv013_authdata_read
;
2300 generic_info
->authdata_write
= &riscv013_authdata_write
;
2301 generic_info
->dmi_read
= &dmi_read
;
2302 generic_info
->dmi_write
= &dmi_write
;
2303 generic_info
->read_memory
= read_memory
;
2304 generic_info
->hart_count
= &riscv013_hart_count
;
2305 generic_info
->data_bits
= &riscv013_data_bits
;
2306 generic_info
->print_info
= &riscv013_print_info
;
2307 if (!generic_info
->version_specific
) {
2308 generic_info
->version_specific
= calloc(1, sizeof(riscv013_info_t
));
2309 if (!generic_info
->version_specific
)
2312 generic_info
->sample_memory
= sample_memory
;
2313 riscv013_info_t
*info
= get_info(target
);
2315 info
->progbufsize
= -1;
2317 info
->dmi_busy_delay
= 0;
2318 info
->bus_master_read_delay
= 0;
2319 info
->bus_master_write_delay
= 0;
2320 info
->ac_busy_delay
= 0;
2322 /* Assume all these abstract commands are supported until we learn
2324 * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
2325 * while another one isn't. We don't track that this closely here, but in
2326 * the future we probably should. */
2327 info
->abstract_read_csr_supported
= true;
2328 info
->abstract_write_csr_supported
= true;
2329 info
->abstract_read_fpr_supported
= true;
2330 info
->abstract_write_fpr_supported
= true;
2332 info
->has_aampostincrement
= YNM_MAYBE
;
2337 static int assert_reset(struct target
*target
)
2343 uint32_t control_base
= set_field(0, DM_DMCONTROL_DMACTIVE
, 1);
2345 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
)) {
2346 /* Run the user-supplied script if there is one. */
2347 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
2348 } else if (target
->rtos
) {
2349 /* There's only one target, and OpenOCD thinks each hart is a thread.
2350 * We must reset them all. */
2352 /* TODO: Try to use hasel in dmcontrol */
2354 /* Set haltreq for each hart. */
2355 uint32_t control
= set_hartsel(control_base
, target
->coreid
);
2356 control
= set_field(control
, DM_DMCONTROL_HALTREQ
,
2357 target
->reset_halt
? 1 : 0);
2358 dmi_write(target
, DM_DMCONTROL
, control
);
2360 /* Assert ndmreset */
2361 control
= set_field(control
, DM_DMCONTROL_NDMRESET
, 1);
2362 dmi_write(target
, DM_DMCONTROL
, control
);
2365 /* Reset just this hart. */
2366 uint32_t control
= set_hartsel(control_base
, r
->current_hartid
);
2367 control
= set_field(control
, DM_DMCONTROL_HALTREQ
,
2368 target
->reset_halt
? 1 : 0);
2369 control
= set_field(control
, DM_DMCONTROL_NDMRESET
, 1);
2370 dmi_write(target
, DM_DMCONTROL
, control
);
2373 target
->state
= TARGET_RESET
;
2375 dm013_info_t
*dm
= get_dm(target
);
2379 /* The DM might have gotten reset if OpenOCD called us in some reset that
2380 * involves SRST being toggled. So clear our cache which may be out of
2382 memset(dm
->progbuf_cache
, 0, sizeof(dm
->progbuf_cache
));
2387 static int deassert_reset(struct target
*target
)
2390 RISCV013_INFO(info
);
2393 /* Clear the reset, but make sure haltreq is still set */
2394 uint32_t control
= 0, control_haltreq
;
2395 control
= set_field(control
, DM_DMCONTROL_DMACTIVE
, 1);
2396 control_haltreq
= set_field(control
, DM_DMCONTROL_HALTREQ
, target
->reset_halt
? 1 : 0);
2397 dmi_write(target
, DM_DMCONTROL
,
2398 set_hartsel(control_haltreq
, r
->current_hartid
));
2401 int dmi_busy_delay
= info
->dmi_busy_delay
;
2402 time_t start
= time(NULL
);
2404 for (int i
= 0; i
< riscv_count_harts(target
); ++i
) {
2407 if (index
!= target
->coreid
)
2409 dmi_write(target
, DM_DMCONTROL
,
2410 set_hartsel(control_haltreq
, index
));
2412 index
= r
->current_hartid
;
2415 LOG_DEBUG("Waiting for hart %d to come out of reset.", index
);
2417 int result
= dmstatus_read_timeout(target
, &dmstatus
, true,
2418 riscv_reset_timeout_sec
);
2419 if (result
== ERROR_TIMEOUT_REACHED
)
2420 LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
2421 "reset in %ds; Increase the timeout with riscv "
2422 "set_reset_timeout_sec.",
2423 index
, riscv_reset_timeout_sec
);
2424 if (result
!= ERROR_OK
)
2426 /* Certain debug modules, like the one in GD32VF103
2427 * MCUs, violate the specification's requirement that
2428 * each hart is in "exactly one of four states" and,
2429 * during reset, report harts as both unavailable and
2430 * halted/running. To work around this, we check for
2431 * the absence of the unavailable state rather than
2432 * the presence of any other state. */
2433 if (!get_field(dmstatus
, DM_DMSTATUS_ALLUNAVAIL
))
2435 if (time(NULL
) - start
> riscv_reset_timeout_sec
) {
2436 LOG_ERROR("Hart %d didn't leave reset in %ds; "
2438 "Increase the timeout with riscv set_reset_timeout_sec.",
2439 index
, riscv_reset_timeout_sec
, dmstatus
);
2443 target
->state
= TARGET_HALTED
;
2445 if (get_field(dmstatus
, DM_DMSTATUS_ALLHAVERESET
)) {
2446 /* Ack reset and clear DM_DMCONTROL_HALTREQ if previously set */
2447 dmi_write(target
, DM_DMCONTROL
,
2448 set_hartsel(control
, index
) |
2449 DM_DMCONTROL_ACKHAVERESET
);
2455 info
->dmi_busy_delay
= dmi_busy_delay
;
2459 static int execute_fence(struct target
*target
)
2461 /* FIXME: For non-coherent systems we need to flush the caches right
2462 * here, but there's no ISA-defined way of doing that. */
2464 struct riscv_program program
;
2465 riscv_program_init(&program
, target
);
2466 riscv_program_fence_i(&program
);
2467 riscv_program_fence(&program
);
2468 int result
= riscv_program_exec(&program
, target
);
2469 if (result
!= ERROR_OK
)
2470 LOG_DEBUG("Unable to execute pre-fence");
2476 static void log_memory_access(target_addr_t address
, uint64_t value
,
2477 unsigned size_bytes
, bool read
)
2479 if (debug_level
< LOG_LVL_DEBUG
)
2483 sprintf(fmt
, "M[0x%" TARGET_PRIxADDR
"] %ss 0x%%0%d" PRIx64
,
2484 address
, read
? "read" : "write", size_bytes
* 2);
2485 switch (size_bytes
) {
2493 value
&= 0xffffffffUL
;
2500 LOG_DEBUG(fmt
, value
);
2503 /* Read the relevant sbdata regs depending on size, and put the results into
2505 static int read_memory_bus_word(struct target
*target
, target_addr_t address
,
2506 uint32_t size
, uint8_t *buffer
)
2510 static int sbdata
[4] = { DM_SBDATA0
, DM_SBDATA1
, DM_SBDATA2
, DM_SBDATA3
};
2512 for (int i
= (size
- 1) / 4; i
>= 0; i
--) {
2513 result
= dmi_op(target
, &value
, NULL
, DMI_OP_READ
, sbdata
[i
], 0, false, true);
2514 if (result
!= ERROR_OK
)
2516 buf_set_u32(buffer
+ i
* 4, 0, 8 * MIN(size
, 4), value
);
2517 log_memory_access(address
+ i
* 4, value
, MIN(size
, 4), true);
2522 static target_addr_t
sb_read_address(struct target
*target
)
2524 RISCV013_INFO(info
);
2525 unsigned sbasize
= get_field(info
->sbcs
, DM_SBCS_SBASIZE
);
2526 target_addr_t address
= 0;
2529 dmi_read(target
, &v
, DM_SBADDRESS1
);
2533 dmi_read(target
, &v
, DM_SBADDRESS0
);
2538 static int read_sbcs_nonbusy(struct target
*target
, uint32_t *sbcs
)
2540 time_t start
= time(NULL
);
2542 if (dmi_read(target
, sbcs
, DM_SBCS
) != ERROR_OK
)
2544 if (!get_field(*sbcs
, DM_SBCS_SBBUSY
))
2546 if (time(NULL
) - start
> riscv_command_timeout_sec
) {
2547 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
2548 "Increase the timeout with riscv set_command_timeout_sec.",
2549 riscv_command_timeout_sec
, *sbcs
);
2555 static int modify_privilege(struct target
*target
, uint64_t *mstatus
, uint64_t *mstatus_old
)
2557 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5)) {
2560 if (register_read(target
, &dcsr
, GDB_REGNO_DCSR
) != ERROR_OK
)
2563 /* Read and save MSTATUS */
2564 if (register_read(target
, mstatus
, GDB_REGNO_MSTATUS
) != ERROR_OK
)
2566 *mstatus_old
= *mstatus
;
2568 /* If we come from m-mode with mprv set, we want to keep mpp */
2569 if (get_field(dcsr
, DCSR_PRV
) < 3) {
2571 *mstatus
= set_field(*mstatus
, MSTATUS_MPP
, get_field(dcsr
, DCSR_PRV
));
2574 *mstatus
= set_field(*mstatus
, MSTATUS_MPRV
, 1);
2577 if (*mstatus
!= *mstatus_old
)
2578 if (register_write_direct(target
, GDB_REGNO_MSTATUS
, *mstatus
) != ERROR_OK
)
2586 static int read_memory_bus_v0(struct target
*target
, target_addr_t address
,
2587 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
)
2589 if (size
!= increment
) {
2590 LOG_ERROR("sba v0 reads only support size==increment");
2591 return ERROR_NOT_IMPLEMENTED
;
2594 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
2595 TARGET_PRIxADDR
, size
, count
, address
);
2596 uint8_t *t_buffer
= buffer
;
2597 riscv_addr_t cur_addr
= address
;
2598 riscv_addr_t fin_addr
= address
+ (count
* size
);
2599 uint32_t access
= 0;
2601 const int DM_SBCS_SBSINGLEREAD_OFFSET
= 20;
2602 const uint32_t DM_SBCS_SBSINGLEREAD
= (0x1U
<< DM_SBCS_SBSINGLEREAD_OFFSET
);
2604 const int DM_SBCS_SBAUTOREAD_OFFSET
= 15;
2605 const uint32_t DM_SBCS_SBAUTOREAD
= (0x1U
<< DM_SBCS_SBAUTOREAD_OFFSET
);
2607 /* ww favorise one off reading if there is an issue */
2609 for (uint32_t i
= 0; i
< count
; i
++) {
2610 if (dmi_read(target
, &access
, DM_SBCS
) != ERROR_OK
)
2612 dmi_write(target
, DM_SBADDRESS0
, cur_addr
);
2613 /* size/2 matching the bit access of the spec 0.13 */
2614 access
= set_field(access
, DM_SBCS_SBACCESS
, size
/2);
2615 access
= set_field(access
, DM_SBCS_SBSINGLEREAD
, 1);
2616 LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access
);
2617 dmi_write(target
, DM_SBCS
, access
);
2620 if (dmi_read(target
, &value
, DM_SBDATA0
) != ERROR_OK
)
2622 LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value
);
2623 buf_set_u32(t_buffer
, 0, 8 * size
, value
);
2630 /* has to be the same size if we want to read a block */
2631 LOG_DEBUG("reading block until final address 0x%" PRIx64
, fin_addr
);
2632 if (dmi_read(target
, &access
, DM_SBCS
) != ERROR_OK
)
2634 /* set current address */
2635 dmi_write(target
, DM_SBADDRESS0
, cur_addr
);
2636 /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
2637 * size/2 matching the bit access of the spec 0.13 */
2638 access
= set_field(access
, DM_SBCS_SBACCESS
, size
/2);
2639 access
= set_field(access
, DM_SBCS_SBAUTOREAD
, 1);
2640 access
= set_field(access
, DM_SBCS_SBSINGLEREAD
, 1);
2641 access
= set_field(access
, DM_SBCS_SBAUTOINCREMENT
, 1);
2642 LOG_DEBUG("\r\naccess: 0x%08x", access
);
2643 dmi_write(target
, DM_SBCS
, access
);
2645 while (cur_addr
< fin_addr
) {
2646 LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
2647 PRIx64
, size
, count
, cur_addr
);
2650 if (dmi_read(target
, &value
, DM_SBDATA0
) != ERROR_OK
)
2652 buf_set_u32(t_buffer
, 0, 8 * size
, value
);
2656 /* if we are reaching last address, we must clear autoread */
2657 if (cur_addr
== fin_addr
&& count
!= 1) {
2658 dmi_write(target
, DM_SBCS
, 0);
2659 if (dmi_read(target
, &value
, DM_SBDATA0
) != ERROR_OK
)
2661 buf_set_u32(t_buffer
, 0, 8 * size
, value
);
2666 if (dmi_read(target
, &sbcs
, DM_SBCS
) != ERROR_OK
)
2673 * Read the requested memory using the system bus interface.
2675 static int read_memory_bus_v1(struct target
*target
, target_addr_t address
,
2676 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
)
2678 if (increment
!= size
&& increment
!= 0) {
2679 LOG_ERROR("sba v1 reads only support increment of size or 0");
2680 return ERROR_NOT_IMPLEMENTED
;
2683 RISCV013_INFO(info
);
2684 target_addr_t next_address
= address
;
2685 target_addr_t end_address
= address
+ count
* size
;
2687 while (next_address
< end_address
) {
2688 uint32_t sbcs_write
= set_field(0, DM_SBCS_SBREADONADDR
, 1);
2689 sbcs_write
|= sb_sbaccess(size
);
2690 if (increment
== size
)
2691 sbcs_write
= set_field(sbcs_write
, DM_SBCS_SBAUTOINCREMENT
, 1);
2693 sbcs_write
= set_field(sbcs_write
, DM_SBCS_SBREADONDATA
, count
> 1);
2694 if (dmi_write(target
, DM_SBCS
, sbcs_write
) != ERROR_OK
)
2697 /* This address write will trigger the first read. */
2698 if (sb_write_address(target
, next_address
, true) != ERROR_OK
)
2701 if (info
->bus_master_read_delay
) {
2702 jtag_add_runtest(info
->bus_master_read_delay
, TAP_IDLE
);
2703 if (jtag_execute_queue() != ERROR_OK
) {
2704 LOG_ERROR("Failed to scan idle sequence");
2709 /* First value has been read, and is waiting for us to issue a DMI read
2712 static int sbdata
[4] = {DM_SBDATA0
, DM_SBDATA1
, DM_SBDATA2
, DM_SBDATA3
};
2714 target_addr_t next_read
= address
- 1;
2715 for (uint32_t i
= (next_address
- address
) / size
; i
< count
- 1; i
++) {
2716 for (int j
= (size
- 1) / 4; j
>= 0; j
--) {
2718 unsigned attempt
= 0;
2720 if (attempt
++ > 100) {
2721 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT
,
2726 dmi_status_t status
= dmi_scan(target
, NULL
, &value
,
2727 DMI_OP_READ
, sbdata
[j
], 0, false);
2728 if (status
== DMI_STATUS_BUSY
)
2729 increase_dmi_busy_delay(target
);
2730 else if (status
== DMI_STATUS_SUCCESS
)
2735 if (next_read
!= address
- 1) {
2736 buf_set_u32(buffer
+ next_read
- address
, 0, 8 * MIN(size
, 4), value
);
2737 log_memory_access(next_read
, value
, MIN(size
, 4), true);
2739 next_read
= address
+ i
* size
+ j
* 4;
2743 uint32_t sbcs_read
= 0;
2746 unsigned attempt
= 0;
2748 if (attempt
++ > 100) {
2749 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT
,
2753 dmi_status_t status
= dmi_scan(target
, NULL
, &value
, DMI_OP_NOP
, 0, 0, false);
2754 if (status
== DMI_STATUS_BUSY
)
2755 increase_dmi_busy_delay(target
);
2756 else if (status
== DMI_STATUS_SUCCESS
)
2761 buf_set_u32(buffer
+ next_read
- address
, 0, 8 * MIN(size
, 4), value
);
2762 log_memory_access(next_read
, value
, MIN(size
, 4), true);
2764 /* "Writes to sbcs while sbbusy is high result in undefined behavior.
2765 * A debugger must not write to sbcs until it reads sbbusy as 0." */
2766 if (read_sbcs_nonbusy(target
, &sbcs_read
) != ERROR_OK
)
2769 sbcs_write
= set_field(sbcs_write
, DM_SBCS_SBREADONDATA
, 0);
2770 if (dmi_write(target
, DM_SBCS
, sbcs_write
) != ERROR_OK
)
2774 /* Read the last word, after we disabled sbreadondata if necessary. */
2775 if (!get_field(sbcs_read
, DM_SBCS_SBERROR
) &&
2776 !get_field(sbcs_read
, DM_SBCS_SBBUSYERROR
)) {
2777 if (read_memory_bus_word(target
, address
+ (count
- 1) * size
, size
,
2778 buffer
+ (count
- 1) * size
) != ERROR_OK
)
2781 if (read_sbcs_nonbusy(target
, &sbcs_read
) != ERROR_OK
)
2785 if (get_field(sbcs_read
, DM_SBCS_SBBUSYERROR
)) {
2786 /* We read while the target was busy. Slow down and try again. */
2787 if (dmi_write(target
, DM_SBCS
, sbcs_read
| DM_SBCS_SBBUSYERROR
) != ERROR_OK
)
2789 next_address
= sb_read_address(target
);
2790 info
->bus_master_read_delay
+= info
->bus_master_read_delay
/ 10 + 1;
2794 unsigned error
= get_field(sbcs_read
, DM_SBCS_SBERROR
);
2796 next_address
= end_address
;
2798 /* Some error indicating the bus access failed, but not because of
2799 * something we did wrong. */
2800 if (dmi_write(target
, DM_SBCS
, DM_SBCS_SBERROR
) != ERROR_OK
)
2809 static void log_mem_access_result(struct target
*target
, bool success
, int method
, bool read
)
2815 /* Compose the message */
2816 snprintf(msg
, 60, "%s to %s memory via %s.",
2817 success
? "Succeeded" : "Failed",
2818 read
? "read" : "write",
2819 (method
== RISCV_MEM_ACCESS_PROGBUF
) ? "program buffer" :
2820 (method
== RISCV_MEM_ACCESS_SYSBUS
) ? "system bus" : "abstract access");
2822 /* Determine the log message severity. Show warnings only once. */
2824 if (method
== RISCV_MEM_ACCESS_PROGBUF
) {
2825 warn
= r
->mem_access_progbuf_warn
;
2826 r
->mem_access_progbuf_warn
= false;
2828 if (method
== RISCV_MEM_ACCESS_SYSBUS
) {
2829 warn
= r
->mem_access_sysbus_warn
;
2830 r
->mem_access_sysbus_warn
= false;
2832 if (method
== RISCV_MEM_ACCESS_ABSTRACT
) {
2833 warn
= r
->mem_access_abstract_warn
;
2834 r
->mem_access_abstract_warn
= false;
2839 LOG_WARNING("%s", msg
);
2841 LOG_DEBUG("%s", msg
);
2844 static bool mem_should_skip_progbuf(struct target
*target
, target_addr_t address
,
2845 uint32_t size
, bool read
, char **skip_reason
)
2847 assert(skip_reason
);
2849 if (!has_sufficient_progbuf(target
, 3)) {
2850 LOG_DEBUG("Skipping mem %s via progbuf - insufficient progbuf size.",
2851 read
? "read" : "write");
2852 *skip_reason
= "skipped (insufficient progbuf)";
2855 if (target
->state
!= TARGET_HALTED
) {
2856 LOG_DEBUG("Skipping mem %s via progbuf - target not halted.",
2857 read
? "read" : "write");
2858 *skip_reason
= "skipped (target not halted)";
2861 if (riscv_xlen(target
) < size
* 8) {
2862 LOG_DEBUG("Skipping mem %s via progbuf - XLEN (%d) is too short for %d-bit memory access.",
2863 read
? "read" : "write", riscv_xlen(target
), size
* 8);
2864 *skip_reason
= "skipped (XLEN too short)";
2868 LOG_DEBUG("Skipping mem %s via progbuf - unsupported size.",
2869 read
? "read" : "write");
2870 *skip_reason
= "skipped (unsupported size)";
2873 if ((sizeof(address
) * 8 > riscv_xlen(target
)) && (address
>> riscv_xlen(target
))) {
2874 LOG_DEBUG("Skipping mem %s via progbuf - progbuf only supports %u-bit address.",
2875 read
? "read" : "write", riscv_xlen(target
));
2876 *skip_reason
= "skipped (too large address)";
2883 static bool mem_should_skip_sysbus(struct target
*target
, target_addr_t address
,
2884 uint32_t size
, uint32_t increment
, bool read
, char **skip_reason
)
2886 assert(skip_reason
);
2888 RISCV013_INFO(info
);
2889 if (!sba_supports_access(target
, size
)) {
2890 LOG_DEBUG("Skipping mem %s via system bus - unsupported size.",
2891 read
? "read" : "write");
2892 *skip_reason
= "skipped (unsupported size)";
2895 unsigned int sbasize
= get_field(info
->sbcs
, DM_SBCS_SBASIZE
);
2896 if ((sizeof(address
) * 8 > sbasize
) && (address
>> sbasize
)) {
2897 LOG_DEBUG("Skipping mem %s via system bus - sba only supports %u-bit address.",
2898 read
? "read" : "write", sbasize
);
2899 *skip_reason
= "skipped (too large address)";
2902 if (read
&& increment
!= size
&& (get_field(info
->sbcs
, DM_SBCS_SBVERSION
) == 0 || increment
!= 0)) {
2903 LOG_DEBUG("Skipping mem read via system bus - "
2904 "sba reads only support size==increment or also size==0 for sba v1.");
2905 *skip_reason
= "skipped (unsupported increment)";
2912 static bool mem_should_skip_abstract(struct target
*target
, target_addr_t address
,
2913 uint32_t size
, uint32_t increment
, bool read
, char **skip_reason
)
2915 assert(skip_reason
);
2918 /* TODO: Add 128b support if it's ever used. Involves modifying
2919 read/write_abstract_arg() to work on two 64b values. */
2920 LOG_DEBUG("Skipping mem %s via abstract access - unsupported size: %d bits",
2921 read
? "read" : "write", size
* 8);
2922 *skip_reason
= "skipped (unsupported size)";
2925 if ((sizeof(address
) * 8 > riscv_xlen(target
)) && (address
>> riscv_xlen(target
))) {
2926 LOG_DEBUG("Skipping mem %s via abstract access - abstract access only supports %u-bit address.",
2927 read
? "read" : "write", riscv_xlen(target
));
2928 *skip_reason
= "skipped (too large address)";
2931 if (read
&& size
!= increment
) {
2932 LOG_ERROR("Skipping mem read via abstract access - "
2933 "abstract command reads only support size==increment.");
2934 *skip_reason
= "skipped (unsupported increment)";
2942 * Performs a memory read using memory access abstract commands. The read sizes
2943 * supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16 byte
2944 * aamsize fields in the memory access abstract command.
2946 static int read_memory_abstract(struct target
*target
, target_addr_t address
,
2947 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
)
2949 RISCV013_INFO(info
);
2951 int result
= ERROR_OK
;
2952 bool use_aampostincrement
= info
->has_aampostincrement
!= YNM_NO
;
2954 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR
, count
,
2957 memset(buffer
, 0, count
* size
);
2959 /* Convert the size (bytes) to width (bits) */
2960 unsigned width
= size
<< 3;
2962 /* Create the command (physical address, postincrement, read) */
2963 uint32_t command
= access_memory_command(target
, false, width
, use_aampostincrement
, false);
2965 /* Execute the reads */
2966 uint8_t *p
= buffer
;
2967 bool updateaddr
= true;
2968 unsigned int width32
= (width
< 32) ? 32 : width
;
2969 for (uint32_t c
= 0; c
< count
; c
++) {
2970 /* Update the address if it is the first time or aampostincrement is not supported by the target. */
2972 /* Set arg1 to the address: address + c * size */
2973 result
= write_abstract_arg(target
, 1, address
+ c
* size
, riscv_xlen(target
));
2974 if (result
!= ERROR_OK
) {
2975 LOG_ERROR("Failed to write arg1 during read_memory_abstract().");
2980 /* Execute the command */
2981 result
= execute_abstract_command(target
, command
);
2983 if (info
->has_aampostincrement
== YNM_MAYBE
) {
2984 if (result
== ERROR_OK
) {
2985 /* Safety: double-check that the address was really auto-incremented */
2986 riscv_reg_t new_address
= read_abstract_arg(target
, 1, riscv_xlen(target
));
2987 if (new_address
== address
+ size
) {
2988 LOG_DEBUG("aampostincrement is supported on this target.");
2989 info
->has_aampostincrement
= YNM_YES
;
2991 LOG_WARNING("Buggy aampostincrement! Address not incremented correctly.");
2992 info
->has_aampostincrement
= YNM_NO
;
2995 /* Try the same access but with postincrement disabled. */
2996 command
= access_memory_command(target
, false, width
, false, false);
2997 result
= execute_abstract_command(target
, command
);
2998 if (result
== ERROR_OK
) {
2999 LOG_DEBUG("aampostincrement is not supported on this target.");
3000 info
->has_aampostincrement
= YNM_NO
;
3005 if (result
!= ERROR_OK
)
3008 /* Copy arg0 to buffer (rounded width up to nearest 32) */
3009 riscv_reg_t value
= read_abstract_arg(target
, 0, width32
);
3010 buf_set_u64(p
, 0, 8 * size
, value
);
3012 if (info
->has_aampostincrement
== YNM_YES
)
3021 * Performs a memory write using memory access abstract commands. The write
3022 * sizes supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16
3023 * byte aamsize fields in the memory access abstract command.
3025 static int write_memory_abstract(struct target
*target
, target_addr_t address
,
3026 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
3028 RISCV013_INFO(info
);
3029 int result
= ERROR_OK
;
3030 bool use_aampostincrement
= info
->has_aampostincrement
!= YNM_NO
;
3032 LOG_DEBUG("writing %d words of %d bytes from 0x%" TARGET_PRIxADDR
, count
,
3035 /* Convert the size (bytes) to width (bits) */
3036 unsigned width
= size
<< 3;
3038 /* Create the command (physical address, postincrement, write) */
3039 uint32_t command
= access_memory_command(target
, false, width
, use_aampostincrement
, true);
3041 /* Execute the writes */
3042 const uint8_t *p
= buffer
;
3043 bool updateaddr
= true;
3044 for (uint32_t c
= 0; c
< count
; c
++) {
3045 /* Move data to arg0 */
3046 riscv_reg_t value
= buf_get_u64(p
, 0, 8 * size
);
3047 result
= write_abstract_arg(target
, 0, value
, riscv_xlen(target
));
3048 if (result
!= ERROR_OK
) {
3049 LOG_ERROR("Failed to write arg0 during write_memory_abstract().");
3053 /* Update the address if it is the first time or aampostincrement is not supported by the target. */
3055 /* Set arg1 to the address: address + c * size */
3056 result
= write_abstract_arg(target
, 1, address
+ c
* size
, riscv_xlen(target
));
3057 if (result
!= ERROR_OK
) {
3058 LOG_ERROR("Failed to write arg1 during write_memory_abstract().");
3063 /* Execute the command */
3064 result
= execute_abstract_command(target
, command
);
3066 if (info
->has_aampostincrement
== YNM_MAYBE
) {
3067 if (result
== ERROR_OK
) {
3068 /* Safety: double-check that the address was really auto-incremented */
3069 riscv_reg_t new_address
= read_abstract_arg(target
, 1, riscv_xlen(target
));
3070 if (new_address
== address
+ size
) {
3071 LOG_DEBUG("aampostincrement is supported on this target.");
3072 info
->has_aampostincrement
= YNM_YES
;
3074 LOG_WARNING("Buggy aampostincrement! Address not incremented correctly.");
3075 info
->has_aampostincrement
= YNM_NO
;
3078 /* Try the same access but with postincrement disabled. */
3079 command
= access_memory_command(target
, false, width
, false, true);
3080 result
= execute_abstract_command(target
, command
);
3081 if (result
== ERROR_OK
) {
3082 LOG_DEBUG("aampostincrement is not supported on this target.");
3083 info
->has_aampostincrement
= YNM_NO
;
3088 if (result
!= ERROR_OK
)
3091 if (info
->has_aampostincrement
== YNM_YES
)
3100 * Read the requested memory, taking care to execute every read exactly once,
3101 * even if cmderr=busy is encountered.
3103 static int read_memory_progbuf_inner(struct target
*target
, target_addr_t address
,
3104 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
)
3106 RISCV013_INFO(info
);
3108 int result
= ERROR_OK
;
3110 /* Write address to S0. */
3111 result
= register_write_direct(target
, GDB_REGNO_S0
, address
);
3112 if (result
!= ERROR_OK
)
3115 if (increment
== 0 &&
3116 register_write_direct(target
, GDB_REGNO_S2
, 0) != ERROR_OK
)
3119 uint32_t command
= access_register_command(target
, GDB_REGNO_S1
,
3121 AC_ACCESS_REGISTER_TRANSFER
| AC_ACCESS_REGISTER_POSTEXEC
);
3122 if (execute_abstract_command(target
, command
) != ERROR_OK
)
3125 /* First read has just triggered. Result is in s1. */
3128 if (register_read_direct(target
, &value
, GDB_REGNO_S1
) != ERROR_OK
)
3130 buf_set_u64(buffer
, 0, 8 * size
, value
);
3131 log_memory_access(address
, value
, size
, true);
3135 if (dmi_write(target
, DM_ABSTRACTAUTO
,
3136 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET
) != ERROR_OK
)
3138 /* Read garbage from dmi_data0, which triggers another execution of the
3139 * program. Now dmi_data0 contains the first good result, and s1 the next
3141 if (dmi_read_exec(target
, NULL
, DM_DATA0
) != ERROR_OK
)
3144 /* read_addr is the next address that the hart will read from, which is the
3147 while (index
< count
) {
3148 riscv_addr_t read_addr
= address
+ index
* increment
;
3149 LOG_DEBUG("i=%d, count=%d, read_addr=0x%" PRIx64
, index
, count
, read_addr
);
3150 /* The pipeline looks like this:
3151 * memory -> s1 -> dm_data0 -> debugger
3153 * s0 contains read_addr
3154 * s1 contains mem[read_addr-size]
3155 * dm_data0 contains[read_addr-size*2]
3158 struct riscv_batch
*batch
= riscv_batch_alloc(target
, 32,
3159 info
->dmi_busy_delay
+ info
->ac_busy_delay
);
3164 for (unsigned j
= index
; j
< count
; j
++) {
3166 riscv_batch_add_dmi_read(batch
, DM_DATA1
);
3167 riscv_batch_add_dmi_read(batch
, DM_DATA0
);
3170 if (riscv_batch_full(batch
))
3174 batch_run(target
, batch
);
3176 /* Wait for the target to finish performing the last abstract command,
3177 * and update our copy of cmderr. If we see that DMI is busy here,
3178 * dmi_busy_delay will be incremented. */
3179 uint32_t abstractcs
;
3180 if (dmi_read(target
, &abstractcs
, DM_ABSTRACTCS
) != ERROR_OK
)
3182 while (get_field(abstractcs
, DM_ABSTRACTCS_BUSY
))
3183 if (dmi_read(target
, &abstractcs
, DM_ABSTRACTCS
) != ERROR_OK
)
3185 info
->cmderr
= get_field(abstractcs
, DM_ABSTRACTCS_CMDERR
);
3187 unsigned next_index
;
3188 unsigned ignore_last
= 0;
3189 switch (info
->cmderr
) {
3191 LOG_DEBUG("successful (partial?) memory read");
3192 next_index
= index
+ reads
;
3195 LOG_DEBUG("memory read resulted in busy response");
3197 increase_ac_busy_delay(target
);
3198 riscv013_clear_abstract_error(target
);
3200 dmi_write(target
, DM_ABSTRACTAUTO
, 0);
3202 uint32_t dmi_data0
, dmi_data1
= 0;
3203 /* This is definitely a good version of the value that we
3204 * attempted to read when we discovered that the target was
3206 if (dmi_read(target
, &dmi_data0
, DM_DATA0
) != ERROR_OK
) {
3207 riscv_batch_free(batch
);
3210 if (size
> 4 && dmi_read(target
, &dmi_data1
, DM_DATA1
) != ERROR_OK
) {
3211 riscv_batch_free(batch
);
3215 /* See how far we got, clobbering dmi_data0. */
3216 if (increment
== 0) {
3218 result
= register_read_direct(target
, &counter
, GDB_REGNO_S2
);
3219 next_index
= counter
;
3221 uint64_t next_read_addr
;
3222 result
= register_read_direct(target
, &next_read_addr
,
3224 next_index
= (next_read_addr
- address
) / increment
;
3226 if (result
!= ERROR_OK
) {
3227 riscv_batch_free(batch
);
3231 uint64_t value64
= (((uint64_t)dmi_data1
) << 32) | dmi_data0
;
3232 buf_set_u64(buffer
+ (next_index
- 2) * size
, 0, 8 * size
, value64
);
3233 log_memory_access(address
+ (next_index
- 2) * size
, value64
, size
, true);
3235 /* Restore the command, and execute it.
3236 * Now DM_DATA0 contains the next value just as it would if no
3237 * error had occurred. */
3238 dmi_write_exec(target
, DM_COMMAND
, command
, true);
3241 dmi_write(target
, DM_ABSTRACTAUTO
,
3242 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET
);
3248 LOG_DEBUG("error when reading memory, abstractcs=0x%08lx", (long)abstractcs
);
3249 riscv013_clear_abstract_error(target
);
3250 riscv_batch_free(batch
);
3251 result
= ERROR_FAIL
;
3255 /* Now read whatever we got out of the batch. */
3256 dmi_status_t status
= DMI_STATUS_SUCCESS
;
3259 for (unsigned j
= index
- 2; j
< index
+ reads
; j
++) {
3261 LOG_DEBUG("index=%d, reads=%d, next_index=%d, ignore_last=%d, j=%d",
3262 index
, reads
, next_index
, ignore_last
, j
);
3263 if (j
+ 3 + ignore_last
> next_index
)
3266 status
= riscv_batch_get_dmi_read_op(batch
, read
);
3267 uint64_t value
= riscv_batch_get_dmi_read_data(batch
, read
);
3269 if (status
!= DMI_STATUS_SUCCESS
) {
3270 /* If we're here because of busy count, dmi_busy_delay will
3271 * already have been increased and busy state will have been
3272 * cleared in dmi_read(). */
3273 /* In at least some implementations, we issue a read, and then
3274 * can get busy back when we try to scan out the read result,
3275 * and the actual read value is lost forever. Since this is
3276 * rare in any case, we return error here and rely on our
3277 * caller to reread the entire block. */
3278 LOG_WARNING("Batch memory read encountered DMI error %d. "
3279 "Falling back on slower reads.", status
);
3280 riscv_batch_free(batch
);
3281 result
= ERROR_FAIL
;
3285 status
= riscv_batch_get_dmi_read_op(batch
, read
);
3286 if (status
!= DMI_STATUS_SUCCESS
) {
3287 LOG_WARNING("Batch memory read encountered DMI error %d. "
3288 "Falling back on slower reads.", status
);
3289 riscv_batch_free(batch
);
3290 result
= ERROR_FAIL
;
3294 value
|= riscv_batch_get_dmi_read_data(batch
, read
);
3297 riscv_addr_t offset
= j
* size
;
3298 buf_set_u64(buffer
+ offset
, 0, 8 * size
, value
);
3299 log_memory_access(address
+ j
* increment
, value
, size
, true);
3304 riscv_batch_free(batch
);
3307 dmi_write(target
, DM_ABSTRACTAUTO
, 0);
3310 /* Read the penultimate word. */
3311 uint32_t dmi_data0
, dmi_data1
= 0;
3312 if (dmi_read(target
, &dmi_data0
, DM_DATA0
) != ERROR_OK
)
3314 if (size
> 4 && dmi_read(target
, &dmi_data1
, DM_DATA1
) != ERROR_OK
)
3316 uint64_t value64
= (((uint64_t)dmi_data1
) << 32) | dmi_data0
;
3317 buf_set_u64(buffer
+ size
* (count
- 2), 0, 8 * size
, value64
);
3318 log_memory_access(address
+ size
* (count
- 2), value64
, size
, true);
3321 /* Read the last word. */
3323 result
= register_read_direct(target
, &value
, GDB_REGNO_S1
);
3324 if (result
!= ERROR_OK
)
3326 buf_set_u64(buffer
+ size
* (count
-1), 0, 8 * size
, value
);
3327 log_memory_access(address
+ size
* (count
-1), value
, size
, true);
3332 dmi_write(target
, DM_ABSTRACTAUTO
, 0);
3337 /* Only need to save/restore one GPR to read a single word, and the progbuf
3338 * program doesn't need to increment. */
3339 static int read_memory_progbuf_one(struct target
*target
, target_addr_t address
,
3340 uint32_t size
, uint8_t *buffer
)
3342 uint64_t mstatus
= 0;
3343 uint64_t mstatus_old
= 0;
3344 if (modify_privilege(target
, &mstatus
, &mstatus_old
) != ERROR_OK
)
3348 int result
= ERROR_FAIL
;
3350 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
3351 goto restore_mstatus
;
3353 /* Write the program (load, increment) */
3354 struct riscv_program program
;
3355 riscv_program_init(&program
, target
);
3356 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5) && get_field(mstatus
, MSTATUS_MPRV
))
3357 riscv_program_csrrsi(&program
, GDB_REGNO_ZERO
, CSR_DCSR_MPRVEN
, GDB_REGNO_DCSR
);
3360 riscv_program_lbr(&program
, GDB_REGNO_S0
, GDB_REGNO_S0
, 0);
3363 riscv_program_lhr(&program
, GDB_REGNO_S0
, GDB_REGNO_S0
, 0);
3366 riscv_program_lwr(&program
, GDB_REGNO_S0
, GDB_REGNO_S0
, 0);
3369 riscv_program_ldr(&program
, GDB_REGNO_S0
, GDB_REGNO_S0
, 0);
3372 LOG_ERROR("Unsupported size: %d", size
);
3373 goto restore_mstatus
;
3375 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5) && get_field(mstatus
, MSTATUS_MPRV
))
3376 riscv_program_csrrci(&program
, GDB_REGNO_ZERO
, CSR_DCSR_MPRVEN
, GDB_REGNO_DCSR
);
3378 if (riscv_program_ebreak(&program
) != ERROR_OK
)
3379 goto restore_mstatus
;
3380 if (riscv_program_write(&program
) != ERROR_OK
)
3381 goto restore_mstatus
;
3383 /* Write address to S0, and execute buffer. */
3384 if (write_abstract_arg(target
, 0, address
, riscv_xlen(target
)) != ERROR_OK
)
3385 goto restore_mstatus
;
3386 uint32_t command
= access_register_command(target
, GDB_REGNO_S0
,
3387 riscv_xlen(target
), AC_ACCESS_REGISTER_WRITE
|
3388 AC_ACCESS_REGISTER_TRANSFER
| AC_ACCESS_REGISTER_POSTEXEC
);
3389 if (execute_abstract_command(target
, command
) != ERROR_OK
)
3393 if (register_read(target
, &value
, GDB_REGNO_S0
) != ERROR_OK
)
3395 buf_set_u64(buffer
, 0, 8 * size
, value
);
3396 log_memory_access(address
, value
, size
, true);
3400 if (riscv_set_register(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
3401 result
= ERROR_FAIL
;
3404 if (mstatus
!= mstatus_old
)
3405 if (register_write_direct(target
, GDB_REGNO_MSTATUS
, mstatus_old
))
3406 result
= ERROR_FAIL
;
3412 * Read the requested memory, silently handling memory access errors.
3414 static int read_memory_progbuf(struct target
*target
, target_addr_t address
,
3415 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
)
3417 if (riscv_xlen(target
) < size
* 8) {
3418 LOG_ERROR("XLEN (%d) is too short for %d-bit memory read.",
3419 riscv_xlen(target
), size
* 8);
3423 int result
= ERROR_OK
;
3425 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR
, count
,
3430 memset(buffer
, 0, count
*size
);
3432 if (execute_fence(target
) != ERROR_OK
)
3436 return read_memory_progbuf_one(target
, address
, size
, buffer
);
3438 uint64_t mstatus
= 0;
3439 uint64_t mstatus_old
= 0;
3440 if (modify_privilege(target
, &mstatus
, &mstatus_old
) != ERROR_OK
)
3443 /* s0 holds the next address to read from
3444 * s1 holds the next data value read
3445 * s2 is a counter in case increment is 0
3447 uint64_t s0
, s1
, s2
;
3448 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
3450 if (register_read(target
, &s1
, GDB_REGNO_S1
) != ERROR_OK
)
3452 if (increment
== 0 && register_read(target
, &s2
, GDB_REGNO_S2
) != ERROR_OK
)
3455 /* Write the program (load, increment) */
3456 struct riscv_program program
;
3457 riscv_program_init(&program
, target
);
3458 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5) && get_field(mstatus
, MSTATUS_MPRV
))
3459 riscv_program_csrrsi(&program
, GDB_REGNO_ZERO
, CSR_DCSR_MPRVEN
, GDB_REGNO_DCSR
);
3463 riscv_program_lbr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3466 riscv_program_lhr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3469 riscv_program_lwr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3472 riscv_program_ldr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3475 LOG_ERROR("Unsupported size: %d", size
);
3479 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5) && get_field(mstatus
, MSTATUS_MPRV
))
3480 riscv_program_csrrci(&program
, GDB_REGNO_ZERO
, CSR_DCSR_MPRVEN
, GDB_REGNO_DCSR
);
3482 riscv_program_addi(&program
, GDB_REGNO_S2
, GDB_REGNO_S2
, 1);
3484 riscv_program_addi(&program
, GDB_REGNO_S0
, GDB_REGNO_S0
, increment
);
3486 if (riscv_program_ebreak(&program
) != ERROR_OK
)
3488 if (riscv_program_write(&program
) != ERROR_OK
)
3491 result
= read_memory_progbuf_inner(target
, address
, size
, count
, buffer
, increment
);
3493 if (result
!= ERROR_OK
) {
3494 /* The full read did not succeed, so we will try to read each word individually. */
3495 /* This will not be fast, but reading outside actual memory is a special case anyway. */
3496 /* It will make the toolchain happier, especially Eclipse Memory View as it reads ahead. */
3497 target_addr_t address_i
= address
;
3498 uint32_t count_i
= 1;
3499 uint8_t *buffer_i
= buffer
;
3501 for (uint32_t i
= 0; i
< count
; i
++, address_i
+= increment
, buffer_i
+= size
) {
3502 /* TODO: This is much slower than it needs to be because we end up
3503 * writing the address to read for every word we read. */
3504 result
= read_memory_progbuf_inner(target
, address_i
, size
, count_i
, buffer_i
, increment
);
3506 /* The read of a single word failed, so we will just return 0 for that instead */
3507 if (result
!= ERROR_OK
) {
3508 LOG_DEBUG("error reading single word of %d bytes from 0x%" TARGET_PRIxADDR
,
3511 buf_set_u64(buffer_i
, 0, 8 * size
, 0);
3517 riscv_set_register(target
, GDB_REGNO_S0
, s0
);
3518 riscv_set_register(target
, GDB_REGNO_S1
, s1
);
3520 riscv_set_register(target
, GDB_REGNO_S2
, s2
);
3522 /* Restore MSTATUS */
3523 if (mstatus
!= mstatus_old
)
3524 if (register_write_direct(target
, GDB_REGNO_MSTATUS
, mstatus_old
))
3530 static int read_memory(struct target
*target
, target_addr_t address
,
3531 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
)
3536 if (size
!= 1 && size
!= 2 && size
!= 4 && size
!= 8 && size
!= 16) {
3537 LOG_ERROR("BUG: Unsupported size for memory read: %d", size
);
3541 int ret
= ERROR_FAIL
;
3543 RISCV013_INFO(info
);
3545 char *progbuf_result
= "disabled";
3546 char *sysbus_result
= "disabled";
3547 char *abstract_result
= "disabled";
3549 for (unsigned int i
= 0; i
< RISCV_NUM_MEM_ACCESS_METHODS
; i
++) {
3550 int method
= r
->mem_access_methods
[i
];
3552 if (method
== RISCV_MEM_ACCESS_PROGBUF
) {
3553 if (mem_should_skip_progbuf(target
, address
, size
, true, &progbuf_result
))
3556 ret
= read_memory_progbuf(target
, address
, size
, count
, buffer
, increment
);
3558 if (ret
!= ERROR_OK
)
3559 progbuf_result
= "failed";
3560 } else if (method
== RISCV_MEM_ACCESS_SYSBUS
) {
3561 if (mem_should_skip_sysbus(target
, address
, size
, increment
, true, &sysbus_result
))
3564 if (get_field(info
->sbcs
, DM_SBCS_SBVERSION
) == 0)
3565 ret
= read_memory_bus_v0(target
, address
, size
, count
, buffer
, increment
);
3566 else if (get_field(info
->sbcs
, DM_SBCS_SBVERSION
) == 1)
3567 ret
= read_memory_bus_v1(target
, address
, size
, count
, buffer
, increment
);
3569 if (ret
!= ERROR_OK
)
3570 sysbus_result
= "failed";
3571 } else if (method
== RISCV_MEM_ACCESS_ABSTRACT
) {
3572 if (mem_should_skip_abstract(target
, address
, size
, increment
, true, &abstract_result
))
3575 ret
= read_memory_abstract(target
, address
, size
, count
, buffer
, increment
);
3577 if (ret
!= ERROR_OK
)
3578 abstract_result
= "failed";
3579 } else if (method
== RISCV_MEM_ACCESS_UNSPECIFIED
)
3580 /* No further mem access method to try. */
3583 log_mem_access_result(target
, ret
== ERROR_OK
, method
, true);
3585 if (ret
== ERROR_OK
)
3589 LOG_ERROR("Target %s: Failed to read memory (addr=0x%" PRIx64
")", target_name(target
), address
);
3590 LOG_ERROR(" progbuf=%s, sysbus=%s, abstract=%s", progbuf_result
, sysbus_result
, abstract_result
);
3594 static int write_memory_bus_v0(struct target
*target
, target_addr_t address
,
3595 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
3597 /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
3598 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
3599 TARGET_PRIxADDR
, size
, count
, address
);
3600 dmi_write(target
, DM_SBADDRESS0
, address
);
3603 riscv_addr_t offset
= 0;
3604 riscv_addr_t t_addr
= 0;
3605 const uint8_t *t_buffer
= buffer
+ offset
;
3607 /* B.8 Writing Memory, single write check if we write in one go */
3608 if (count
== 1) { /* count is in bytes here */
3609 value
= buf_get_u64(t_buffer
, 0, 8 * size
);
3612 access
= set_field(access
, DM_SBCS_SBACCESS
, size
/2);
3613 dmi_write(target
, DM_SBCS
, access
);
3614 LOG_DEBUG("\r\naccess: 0x%08" PRIx64
, access
);
3615 LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64
, value
);
3616 dmi_write(target
, DM_SBDATA0
, value
);
3620 /*B.8 Writing Memory, using autoincrement*/
3623 access
= set_field(access
, DM_SBCS_SBACCESS
, size
/2);
3624 access
= set_field(access
, DM_SBCS_SBAUTOINCREMENT
, 1);
3625 LOG_DEBUG("\r\naccess: 0x%08" PRIx64
, access
);
3626 dmi_write(target
, DM_SBCS
, access
);
3628 /*2)set the value according to the size required and write*/
3629 for (riscv_addr_t i
= 0; i
< count
; ++i
) {
3631 /* for monitoring only */
3632 t_addr
= address
+ offset
;
3633 t_buffer
= buffer
+ offset
;
3635 value
= buf_get_u64(t_buffer
, 0, 8 * size
);
3636 LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
3637 PRIx64
, (uint32_t)t_addr
, (uint32_t)value
);
3638 dmi_write(target
, DM_SBDATA0
, value
);
3640 /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
3641 access
= set_field(access
, DM_SBCS_SBAUTOINCREMENT
, 0);
3642 dmi_write(target
, DM_SBCS
, access
);
3647 static int write_memory_bus_v1(struct target
*target
, target_addr_t address
,
3648 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
3650 RISCV013_INFO(info
);
3651 uint32_t sbcs
= sb_sbaccess(size
);
3652 sbcs
= set_field(sbcs
, DM_SBCS_SBAUTOINCREMENT
, 1);
3653 dmi_write(target
, DM_SBCS
, sbcs
);
3655 target_addr_t next_address
= address
;
3656 target_addr_t end_address
= address
+ count
* size
;
3660 sb_write_address(target
, next_address
, true);
3661 while (next_address
< end_address
) {
3662 LOG_DEBUG("transferring burst starting at address 0x%" TARGET_PRIxADDR
,
3665 struct riscv_batch
*batch
= riscv_batch_alloc(
3668 info
->dmi_busy_delay
+ info
->bus_master_write_delay
);
3672 for (uint32_t i
= (next_address
- address
) / size
; i
< count
; i
++) {
3673 const uint8_t *p
= buffer
+ i
* size
;
3675 if (riscv_batch_available_scans(batch
) < (size
+ 3) / 4)
3679 riscv_batch_add_dmi_write(batch
, DM_SBDATA3
,
3680 ((uint32_t) p
[12]) |
3681 (((uint32_t) p
[13]) << 8) |
3682 (((uint32_t) p
[14]) << 16) |
3683 (((uint32_t) p
[15]) << 24));
3686 riscv_batch_add_dmi_write(batch
, DM_SBDATA2
,
3688 (((uint32_t) p
[9]) << 8) |
3689 (((uint32_t) p
[10]) << 16) |
3690 (((uint32_t) p
[11]) << 24));
3692 riscv_batch_add_dmi_write(batch
, DM_SBDATA1
,
3694 (((uint32_t) p
[5]) << 8) |
3695 (((uint32_t) p
[6]) << 16) |
3696 (((uint32_t) p
[7]) << 24));
3697 uint32_t value
= p
[0];
3699 value
|= ((uint32_t) p
[2]) << 16;
3700 value
|= ((uint32_t) p
[3]) << 24;
3703 value
|= ((uint32_t) p
[1]) << 8;
3704 riscv_batch_add_dmi_write(batch
, DM_SBDATA0
, value
);
3706 log_memory_access(address
+ i
* size
, value
, size
, false);
3707 next_address
+= size
;
3710 /* Execute the batch of writes */
3711 result
= batch_run(target
, batch
);
3712 riscv_batch_free(batch
);
3713 if (result
!= ERROR_OK
)
3717 * At the same time, detect if DMI busy has occurred during the batch write. */
3718 bool dmi_busy_encountered
;
3719 if (dmi_op(target
, &sbcs
, &dmi_busy_encountered
, DMI_OP_READ
,
3720 DM_SBCS
, 0, false, true) != ERROR_OK
)
3722 if (dmi_busy_encountered
)
3723 LOG_DEBUG("DMI busy encountered during system bus write.");
3725 /* Wait until sbbusy goes low */
3726 time_t start
= time(NULL
);
3727 while (get_field(sbcs
, DM_SBCS_SBBUSY
)) {
3728 if (time(NULL
) - start
> riscv_command_timeout_sec
) {
3729 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
3730 "Increase the timeout with riscv set_command_timeout_sec.",
3731 riscv_command_timeout_sec
, sbcs
);
3734 if (dmi_read(target
, &sbcs
, DM_SBCS
) != ERROR_OK
)
3738 if (get_field(sbcs
, DM_SBCS_SBBUSYERROR
)) {
3739 /* We wrote while the target was busy. */
3740 LOG_DEBUG("Sbbusyerror encountered during system bus write.");
3741 /* Clear the sticky error flag. */
3742 dmi_write(target
, DM_SBCS
, sbcs
| DM_SBCS_SBBUSYERROR
);
3743 /* Slow down before trying again. */
3744 info
->bus_master_write_delay
+= info
->bus_master_write_delay
/ 10 + 1;
3747 if (get_field(sbcs
, DM_SBCS_SBBUSYERROR
) || dmi_busy_encountered
) {
3748 /* Recover from the case when the write commands were issued too fast.
3749 * Determine the address from which to resume writing. */
3750 next_address
= sb_read_address(target
);
3751 if (next_address
< address
) {
3752 /* This should never happen, probably buggy hardware. */
3753 LOG_DEBUG("unexpected sbaddress=0x%" TARGET_PRIxADDR
3754 " - buggy sbautoincrement in hw?", next_address
);
3755 /* Fail the whole operation. */
3758 /* Try again - resume writing. */
3762 unsigned int sberror
= get_field(sbcs
, DM_SBCS_SBERROR
);
3764 /* Sberror indicates the bus access failed, but not because we issued the writes
3765 * too fast. Cannot recover. Sbaddress holds the address where the error occurred
3766 * (unless sbautoincrement in the HW is buggy).
3768 target_addr_t sbaddress
= sb_read_address(target
);
3769 LOG_DEBUG("System bus access failed with sberror=%u (sbaddress=0x%" TARGET_PRIxADDR
")",
3770 sberror
, sbaddress
);
3771 if (sbaddress
< address
) {
3772 /* This should never happen, probably buggy hardware.
3773 * Make a note to the user not to trust the sbaddress value. */
3774 LOG_DEBUG("unexpected sbaddress=0x%" TARGET_PRIxADDR
3775 " - buggy sbautoincrement in hw?", next_address
);
3777 /* Clear the sticky error flag */
3778 dmi_write(target
, DM_SBCS
, DM_SBCS_SBERROR
);
3779 /* Fail the whole operation */
3787 static int write_memory_progbuf(struct target
*target
, target_addr_t address
,
3788 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
3790 RISCV013_INFO(info
);
3792 if (riscv_xlen(target
) < size
* 8) {
3793 LOG_ERROR("XLEN (%d) is too short for %d-bit memory write.",
3794 riscv_xlen(target
), size
* 8);
3798 LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count
, size
, (long)address
);
3802 uint64_t mstatus
= 0;
3803 uint64_t mstatus_old
= 0;
3804 if (modify_privilege(target
, &mstatus
, &mstatus_old
) != ERROR_OK
)
3807 /* s0 holds the next address to write to
3808 * s1 holds the next data value to write
3811 int result
= ERROR_OK
;
3813 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
3815 if (register_read(target
, &s1
, GDB_REGNO_S1
) != ERROR_OK
)
3818 /* Write the program (store, increment) */
3819 struct riscv_program program
;
3820 riscv_program_init(&program
, target
);
3821 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5) && get_field(mstatus
, MSTATUS_MPRV
))
3822 riscv_program_csrrsi(&program
, GDB_REGNO_ZERO
, CSR_DCSR_MPRVEN
, GDB_REGNO_DCSR
);
3826 riscv_program_sbr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3829 riscv_program_shr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3832 riscv_program_swr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3835 riscv_program_sdr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3838 LOG_ERROR("write_memory_progbuf(): Unsupported size: %d", size
);
3839 result
= ERROR_FAIL
;
3843 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5) && get_field(mstatus
, MSTATUS_MPRV
))
3844 riscv_program_csrrci(&program
, GDB_REGNO_ZERO
, CSR_DCSR_MPRVEN
, GDB_REGNO_DCSR
);
3845 riscv_program_addi(&program
, GDB_REGNO_S0
, GDB_REGNO_S0
, size
);
3847 result
= riscv_program_ebreak(&program
);
3848 if (result
!= ERROR_OK
)
3850 riscv_program_write(&program
);
3852 riscv_addr_t cur_addr
= address
;
3853 riscv_addr_t fin_addr
= address
+ (count
* size
);
3854 bool setup_needed
= true;
3855 LOG_DEBUG("writing until final address 0x%016" PRIx64
, fin_addr
);
3856 while (cur_addr
< fin_addr
) {
3857 LOG_DEBUG("transferring burst starting at address 0x%016" PRIx64
,
3860 struct riscv_batch
*batch
= riscv_batch_alloc(
3863 info
->dmi_busy_delay
+ info
->ac_busy_delay
);
3867 /* To write another word, we put it in S1 and execute the program. */
3868 unsigned start
= (cur_addr
- address
) / size
;
3869 for (unsigned i
= start
; i
< count
; ++i
) {
3870 unsigned offset
= size
*i
;
3871 const uint8_t *t_buffer
= buffer
+ offset
;
3873 uint64_t value
= buf_get_u64(t_buffer
, 0, 8 * size
);
3875 log_memory_access(address
+ offset
, value
, size
, false);
3879 result
= register_write_direct(target
, GDB_REGNO_S0
,
3881 if (result
!= ERROR_OK
) {
3882 riscv_batch_free(batch
);
3888 dmi_write(target
, DM_DATA1
, value
>> 32);
3889 dmi_write(target
, DM_DATA0
, value
);
3891 /* Write and execute command that moves value into S1 and
3892 * executes program buffer. */
3893 uint32_t command
= access_register_command(target
,
3894 GDB_REGNO_S1
, riscv_xlen(target
),
3895 AC_ACCESS_REGISTER_POSTEXEC
|
3896 AC_ACCESS_REGISTER_TRANSFER
|
3897 AC_ACCESS_REGISTER_WRITE
);
3898 result
= execute_abstract_command(target
, command
);
3899 if (result
!= ERROR_OK
) {
3900 riscv_batch_free(batch
);
3904 /* Turn on autoexec */
3905 dmi_write(target
, DM_ABSTRACTAUTO
,
3906 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET
);
3908 setup_needed
= false;
3911 riscv_batch_add_dmi_write(batch
, DM_DATA1
, value
>> 32);
3912 riscv_batch_add_dmi_write(batch
, DM_DATA0
, value
);
3913 if (riscv_batch_full(batch
))
3918 result
= batch_run(target
, batch
);
3919 riscv_batch_free(batch
);
3920 if (result
!= ERROR_OK
)
3923 /* Note that if the scan resulted in a Busy DMI response, it
3924 * is this read to abstractcs that will cause the dmi_busy_delay
3925 * to be incremented if necessary. */
3927 uint32_t abstractcs
;
3928 bool dmi_busy_encountered
;
3929 result
= dmi_op(target
, &abstractcs
, &dmi_busy_encountered
,
3930 DMI_OP_READ
, DM_ABSTRACTCS
, 0, false, true);
3931 if (result
!= ERROR_OK
)
3933 while (get_field(abstractcs
, DM_ABSTRACTCS_BUSY
))
3934 if (dmi_read(target
, &abstractcs
, DM_ABSTRACTCS
) != ERROR_OK
)
3936 info
->cmderr
= get_field(abstractcs
, DM_ABSTRACTCS_CMDERR
);
3937 if (info
->cmderr
== CMDERR_NONE
&& !dmi_busy_encountered
) {
3938 LOG_DEBUG("successful (partial?) memory write");
3939 } else if (info
->cmderr
== CMDERR_BUSY
|| dmi_busy_encountered
) {
3940 if (info
->cmderr
== CMDERR_BUSY
)
3941 LOG_DEBUG("Memory write resulted in abstract command busy response.");
3942 else if (dmi_busy_encountered
)
3943 LOG_DEBUG("Memory write resulted in DMI busy response.");
3944 riscv013_clear_abstract_error(target
);
3945 increase_ac_busy_delay(target
);
3947 dmi_write(target
, DM_ABSTRACTAUTO
, 0);
3948 result
= register_read_direct(target
, &cur_addr
, GDB_REGNO_S0
);
3949 if (result
!= ERROR_OK
)
3951 setup_needed
= true;
3953 LOG_ERROR("error when writing memory, abstractcs=0x%08lx", (long)abstractcs
);
3954 riscv013_clear_abstract_error(target
);
3955 result
= ERROR_FAIL
;
3961 dmi_write(target
, DM_ABSTRACTAUTO
, 0);
3963 if (register_write_direct(target
, GDB_REGNO_S1
, s1
) != ERROR_OK
)
3965 if (register_write_direct(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
3968 /* Restore MSTATUS */
3969 if (mstatus
!= mstatus_old
)
3970 if (register_write_direct(target
, GDB_REGNO_MSTATUS
, mstatus_old
))
3973 if (execute_fence(target
) != ERROR_OK
)
3979 static int write_memory(struct target
*target
, target_addr_t address
,
3980 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
3982 if (size
!= 1 && size
!= 2 && size
!= 4 && size
!= 8 && size
!= 16) {
3983 LOG_ERROR("BUG: Unsupported size for memory write: %d", size
);
3987 int ret
= ERROR_FAIL
;
3989 RISCV013_INFO(info
);
3991 char *progbuf_result
= "disabled";
3992 char *sysbus_result
= "disabled";
3993 char *abstract_result
= "disabled";
3995 for (unsigned int i
= 0; i
< RISCV_NUM_MEM_ACCESS_METHODS
; i
++) {
3996 int method
= r
->mem_access_methods
[i
];
3998 if (method
== RISCV_MEM_ACCESS_PROGBUF
) {
3999 if (mem_should_skip_progbuf(target
, address
, size
, false, &progbuf_result
))
4002 ret
= write_memory_progbuf(target
, address
, size
, count
, buffer
);
4004 if (ret
!= ERROR_OK
)
4005 progbuf_result
= "failed";
4006 } else if (method
== RISCV_MEM_ACCESS_SYSBUS
) {
4007 if (mem_should_skip_sysbus(target
, address
, size
, 0, false, &sysbus_result
))
4010 if (get_field(info
->sbcs
, DM_SBCS_SBVERSION
) == 0)
4011 ret
= write_memory_bus_v0(target
, address
, size
, count
, buffer
);
4012 else if (get_field(info
->sbcs
, DM_SBCS_SBVERSION
) == 1)
4013 ret
= write_memory_bus_v1(target
, address
, size
, count
, buffer
);
4015 if (ret
!= ERROR_OK
)
4016 sysbus_result
= "failed";
4017 } else if (method
== RISCV_MEM_ACCESS_ABSTRACT
) {
4018 if (mem_should_skip_abstract(target
, address
, size
, 0, false, &abstract_result
))
4021 ret
= write_memory_abstract(target
, address
, size
, count
, buffer
);
4023 if (ret
!= ERROR_OK
)
4024 abstract_result
= "failed";
4025 } else if (method
== RISCV_MEM_ACCESS_UNSPECIFIED
)
4026 /* No further mem access method to try. */
4029 log_mem_access_result(target
, ret
== ERROR_OK
, method
, false);
4031 if (ret
== ERROR_OK
)
4035 LOG_ERROR("Target %s: Failed to write memory (addr=0x%" PRIx64
")", target_name(target
), address
);
4036 LOG_ERROR(" progbuf=%s, sysbus=%s, abstract=%s", progbuf_result
, sysbus_result
, abstract_result
);
4040 static int arch_state(struct target
*target
)
4045 struct target_type riscv013_target
= {
4048 .init_target
= init_target
,
4049 .deinit_target
= deinit_target
,
4052 .poll
= &riscv_openocd_poll
,
4053 .halt
= &riscv_halt
,
4054 .step
= &riscv_openocd_step
,
4056 .assert_reset
= assert_reset
,
4057 .deassert_reset
= deassert_reset
,
4059 .write_memory
= write_memory
,
4061 .arch_state
= arch_state
4064 /*** 0.13-specific implementations of various RISC-V helper functions. ***/
4065 static int riscv013_get_register(struct target
*target
,
4066 riscv_reg_t
*value
, int rid
)
4068 LOG_DEBUG("[%s] reading register %s", target_name(target
),
4069 gdb_regno_name(rid
));
4071 if (riscv_select_current_hart(target
) != ERROR_OK
)
4074 int result
= ERROR_OK
;
4075 if (rid
== GDB_REGNO_PC
) {
4076 /* TODO: move this into riscv.c. */
4077 result
= register_read(target
, value
, GDB_REGNO_DPC
);
4078 LOG_DEBUG("[%d] read PC from DPC: 0x%" PRIx64
, target
->coreid
, *value
);
4079 } else if (rid
== GDB_REGNO_PRIV
) {
4081 /* TODO: move this into riscv.c. */
4082 result
= register_read(target
, &dcsr
, GDB_REGNO_DCSR
);
4083 *value
= set_field(0, VIRT_PRIV_V
, get_field(dcsr
, CSR_DCSR_V
));
4084 *value
= set_field(*value
, VIRT_PRIV_PRV
, get_field(dcsr
, CSR_DCSR_PRV
));
4086 result
= register_read(target
, value
, rid
);
4087 if (result
!= ERROR_OK
)
4094 static int riscv013_set_register(struct target
*target
, int rid
, uint64_t value
)
4096 riscv013_select_current_hart(target
);
4097 LOG_DEBUG("[%d] writing 0x%" PRIx64
" to register %s",
4098 target
->coreid
, value
, gdb_regno_name(rid
));
4100 if (rid
<= GDB_REGNO_XPR31
) {
4101 return register_write_direct(target
, rid
, value
);
4102 } else if (rid
== GDB_REGNO_PC
) {
4103 LOG_DEBUG("[%d] writing PC to DPC: 0x%" PRIx64
, target
->coreid
, value
);
4104 register_write_direct(target
, GDB_REGNO_DPC
, value
);
4105 uint64_t actual_value
;
4106 register_read_direct(target
, &actual_value
, GDB_REGNO_DPC
);
4107 LOG_DEBUG("[%d] actual DPC written: 0x%016" PRIx64
, target
->coreid
, actual_value
);
4108 if (value
!= actual_value
) {
4109 LOG_ERROR("Written PC (0x%" PRIx64
") does not match read back "
4110 "value (0x%" PRIx64
")", value
, actual_value
);
4113 } else if (rid
== GDB_REGNO_PRIV
) {
4115 register_read(target
, &dcsr
, GDB_REGNO_DCSR
);
4116 dcsr
= set_field(dcsr
, CSR_DCSR_PRV
, get_field(value
, VIRT_PRIV_PRV
));
4117 dcsr
= set_field(dcsr
, CSR_DCSR_V
, get_field(value
, VIRT_PRIV_V
));
4118 return register_write_direct(target
, GDB_REGNO_DCSR
, dcsr
);
4120 return register_write_direct(target
, rid
, value
);
4126 static int riscv013_select_current_hart(struct target
*target
)
4130 dm013_info_t
*dm
= get_dm(target
);
4133 if (r
->current_hartid
== dm
->current_hartid
)
4137 /* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
4138 if (dmi_read(target
, &dmcontrol
, DM_DMCONTROL
) != ERROR_OK
)
4140 dmcontrol
= set_hartsel(dmcontrol
, r
->current_hartid
);
4141 int result
= dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4142 dm
->current_hartid
= r
->current_hartid
;
4146 /* Select all harts that were prepped and that are selectable, clearing the
4147 * prepped flag on the harts that actually were selected. */
4148 static int select_prepped_harts(struct target
*target
, bool *use_hasel
)
4150 dm013_info_t
*dm
= get_dm(target
);
4153 if (!dm
->hasel_supported
) {
4160 assert(dm
->hart_count
);
4161 unsigned hawindow_count
= (dm
->hart_count
+ 31) / 32;
4162 uint32_t hawindow
[hawindow_count
];
4164 memset(hawindow
, 0, sizeof(uint32_t) * hawindow_count
);
4166 target_list_t
*entry
;
4167 unsigned total_selected
= 0;
4168 list_for_each_entry(entry
, &dm
->target_list
, list
) {
4169 struct target
*t
= entry
->target
;
4170 struct riscv_info
*r
= riscv_info(t
);
4171 riscv013_info_t
*info
= get_info(t
);
4172 unsigned index
= info
->index
;
4173 LOG_DEBUG("index=%d, coreid=%d, prepped=%d", index
, t
->coreid
, r
->prepped
);
4174 r
->selected
= r
->prepped
;
4176 hawindow
[index
/ 32] |= 1 << (index
% 32);
4183 /* Don't use hasel if we only need to talk to one hart. */
4184 if (total_selected
<= 1) {
4189 for (unsigned i
= 0; i
< hawindow_count
; i
++) {
4190 if (dmi_write(target
, DM_HAWINDOWSEL
, i
) != ERROR_OK
)
4192 if (dmi_write(target
, DM_HAWINDOW
, hawindow
[i
]) != ERROR_OK
)
4200 static int riscv013_halt_prep(struct target
*target
)
4205 static int riscv013_halt_go(struct target
*target
)
4207 bool use_hasel
= false;
4208 if (select_prepped_harts(target
, &use_hasel
) != ERROR_OK
)
4212 LOG_DEBUG("halting hart %d", r
->current_hartid
);
4214 /* Issue the halt command, and then wait for the current hart to halt. */
4215 uint32_t dmcontrol
= DM_DMCONTROL_DMACTIVE
| DM_DMCONTROL_HALTREQ
;
4217 dmcontrol
|= DM_DMCONTROL_HASEL
;
4218 dmcontrol
= set_hartsel(dmcontrol
, r
->current_hartid
);
4219 dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4220 for (size_t i
= 0; i
< 256; ++i
)
4221 if (riscv_is_halted(target
))
4224 if (!riscv_is_halted(target
)) {
4226 if (dmstatus_read(target
, &dmstatus
, true) != ERROR_OK
)
4228 if (dmi_read(target
, &dmcontrol
, DM_DMCONTROL
) != ERROR_OK
)
4231 LOG_ERROR("unable to halt hart %d", r
->current_hartid
);
4232 LOG_ERROR(" dmcontrol=0x%08x", dmcontrol
);
4233 LOG_ERROR(" dmstatus =0x%08x", dmstatus
);
4237 dmcontrol
= set_field(dmcontrol
, DM_DMCONTROL_HALTREQ
, 0);
4238 dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4241 target_list_t
*entry
;
4242 dm013_info_t
*dm
= get_dm(target
);
4245 list_for_each_entry(entry
, &dm
->target_list
, list
) {
4246 struct target
*t
= entry
->target
;
4247 t
->state
= TARGET_HALTED
;
4248 if (t
->debug_reason
== DBG_REASON_NOTHALTED
)
4249 t
->debug_reason
= DBG_REASON_DBGRQ
;
4252 /* The "else" case is handled in halt_go(). */
4257 static int riscv013_resume_go(struct target
*target
)
4259 bool use_hasel
= false;
4260 if (select_prepped_harts(target
, &use_hasel
) != ERROR_OK
)
4263 return riscv013_step_or_resume_current_hart(target
, false, use_hasel
);
4266 static int riscv013_step_current_hart(struct target
*target
)
4268 return riscv013_step_or_resume_current_hart(target
, true, false);
4271 static int riscv013_resume_prep(struct target
*target
)
4273 return riscv013_on_step_or_resume(target
, false);
4276 static int riscv013_on_step(struct target
*target
)
4278 return riscv013_on_step_or_resume(target
, true);
4281 static int riscv013_on_halt(struct target
*target
)
4286 static bool riscv013_is_halted(struct target
*target
)
4289 if (dmstatus_read(target
, &dmstatus
, true) != ERROR_OK
)
4291 if (get_field(dmstatus
, DM_DMSTATUS_ANYUNAVAIL
))
4292 LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target
));
4293 if (get_field(dmstatus
, DM_DMSTATUS_ANYNONEXISTENT
))
4294 LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target
));
4295 if (get_field(dmstatus
, DM_DMSTATUS_ANYHAVERESET
)) {
4296 int hartid
= riscv_current_hartid(target
);
4297 LOG_INFO("Hart %d unexpectedly reset!", hartid
);
4298 /* TODO: Can we make this more obvious to eg. a gdb user? */
4299 uint32_t dmcontrol
= DM_DMCONTROL_DMACTIVE
|
4300 DM_DMCONTROL_ACKHAVERESET
;
4301 dmcontrol
= set_hartsel(dmcontrol
, hartid
);
4302 /* If we had been halted when we reset, request another halt. If we
4303 * ended up running out of reset, then the user will (hopefully) get a
4304 * message that a reset happened, that the target is running, and then
4305 * that it is halted again once the request goes through.
4307 if (target
->state
== TARGET_HALTED
)
4308 dmcontrol
|= DM_DMCONTROL_HALTREQ
;
4309 dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4311 return get_field(dmstatus
, DM_DMSTATUS_ALLHALTED
);
4314 static enum riscv_halt_reason
riscv013_halt_reason(struct target
*target
)
4317 int result
= register_read(target
, &dcsr
, GDB_REGNO_DCSR
);
4318 if (result
!= ERROR_OK
)
4319 return RISCV_HALT_UNKNOWN
;
4321 LOG_DEBUG("dcsr.cause: 0x%" PRIx64
, get_field(dcsr
, CSR_DCSR_CAUSE
));
4323 switch (get_field(dcsr
, CSR_DCSR_CAUSE
)) {
4324 case CSR_DCSR_CAUSE_SWBP
:
4325 return RISCV_HALT_BREAKPOINT
;
4326 case CSR_DCSR_CAUSE_TRIGGER
:
4327 /* We could get here before triggers are enumerated if a trigger was
4328 * already set when we connected. Force enumeration now, which has the
4329 * side effect of clearing any triggers we did not set. */
4330 riscv_enumerate_triggers(target
);
4331 LOG_DEBUG("{%d} halted because of trigger", target
->coreid
);
4332 return RISCV_HALT_TRIGGER
;
4333 case CSR_DCSR_CAUSE_STEP
:
4334 return RISCV_HALT_SINGLESTEP
;
4335 case CSR_DCSR_CAUSE_DEBUGINT
:
4336 case CSR_DCSR_CAUSE_HALT
:
4337 return RISCV_HALT_INTERRUPT
;
4338 case CSR_DCSR_CAUSE_GROUP
:
4339 return RISCV_HALT_GROUP
;
4342 LOG_ERROR("Unknown DCSR cause field: 0x%" PRIx64
, get_field(dcsr
, CSR_DCSR_CAUSE
));
4343 LOG_ERROR(" dcsr=0x%016lx", (long)dcsr
);
4344 return RISCV_HALT_UNKNOWN
;
4347 int riscv013_write_debug_buffer(struct target
*target
, unsigned index
, riscv_insn_t data
)
4349 dm013_info_t
*dm
= get_dm(target
);
4352 if (dm
->progbuf_cache
[index
] != data
) {
4353 if (dmi_write(target
, DM_PROGBUF0
+ index
, data
) != ERROR_OK
)
4355 dm
->progbuf_cache
[index
] = data
;
4357 LOG_DEBUG("cache hit for 0x%" PRIx32
" @%d", data
, index
);
4362 riscv_insn_t
riscv013_read_debug_buffer(struct target
*target
, unsigned index
)
4365 dmi_read(target
, &value
, DM_PROGBUF0
+ index
);
4369 int riscv013_execute_debug_buffer(struct target
*target
)
4371 uint32_t run_program
= 0;
4372 run_program
= set_field(run_program
, AC_ACCESS_REGISTER_AARSIZE
, 2);
4373 run_program
= set_field(run_program
, AC_ACCESS_REGISTER_POSTEXEC
, 1);
4374 run_program
= set_field(run_program
, AC_ACCESS_REGISTER_TRANSFER
, 0);
4375 run_program
= set_field(run_program
, AC_ACCESS_REGISTER_REGNO
, 0x1000);
4377 return execute_abstract_command(target
, run_program
);
4380 void riscv013_fill_dmi_write_u64(struct target
*target
, char *buf
, int a
, uint64_t d
)
4382 RISCV013_INFO(info
);
4383 buf_set_u64((unsigned char *)buf
, DTM_DMI_OP_OFFSET
, DTM_DMI_OP_LENGTH
, DMI_OP_WRITE
);
4384 buf_set_u64((unsigned char *)buf
, DTM_DMI_DATA_OFFSET
, DTM_DMI_DATA_LENGTH
, d
);
4385 buf_set_u64((unsigned char *)buf
, DTM_DMI_ADDRESS_OFFSET
, info
->abits
, a
);
4388 void riscv013_fill_dmi_read_u64(struct target
*target
, char *buf
, int a
)
4390 RISCV013_INFO(info
);
4391 buf_set_u64((unsigned char *)buf
, DTM_DMI_OP_OFFSET
, DTM_DMI_OP_LENGTH
, DMI_OP_READ
);
4392 buf_set_u64((unsigned char *)buf
, DTM_DMI_DATA_OFFSET
, DTM_DMI_DATA_LENGTH
, 0);
4393 buf_set_u64((unsigned char *)buf
, DTM_DMI_ADDRESS_OFFSET
, info
->abits
, a
);
4396 void riscv013_fill_dmi_nop_u64(struct target
*target
, char *buf
)
4398 RISCV013_INFO(info
);
4399 buf_set_u64((unsigned char *)buf
, DTM_DMI_OP_OFFSET
, DTM_DMI_OP_LENGTH
, DMI_OP_NOP
);
4400 buf_set_u64((unsigned char *)buf
, DTM_DMI_DATA_OFFSET
, DTM_DMI_DATA_LENGTH
, 0);
4401 buf_set_u64((unsigned char *)buf
, DTM_DMI_ADDRESS_OFFSET
, info
->abits
, 0);
4404 int riscv013_dmi_write_u64_bits(struct target
*target
)
4406 RISCV013_INFO(info
);
4407 return info
->abits
+ DTM_DMI_DATA_LENGTH
+ DTM_DMI_OP_LENGTH
;
4410 static int maybe_execute_fence_i(struct target
*target
)
4412 if (has_sufficient_progbuf(target
, 3))
4413 return execute_fence(target
);
4417 /* Helper Functions. */
4418 static int riscv013_on_step_or_resume(struct target
*target
, bool step
)
4420 if (maybe_execute_fence_i(target
) != ERROR_OK
)
4423 /* We want to twiddle some bits in the debug CSR so debugging works. */
4425 int result
= register_read(target
, &dcsr
, GDB_REGNO_DCSR
);
4426 if (result
!= ERROR_OK
)
4428 dcsr
= set_field(dcsr
, CSR_DCSR_STEP
, step
);
4429 dcsr
= set_field(dcsr
, CSR_DCSR_EBREAKM
, riscv_ebreakm
);
4430 dcsr
= set_field(dcsr
, CSR_DCSR_EBREAKS
, riscv_ebreaks
);
4431 dcsr
= set_field(dcsr
, CSR_DCSR_EBREAKU
, riscv_ebreaku
);
4432 return riscv_set_register(target
, GDB_REGNO_DCSR
, dcsr
);
4435 static int riscv013_step_or_resume_current_hart(struct target
*target
,
4436 bool step
, bool use_hasel
)
4439 LOG_DEBUG("resuming hart %d (for step?=%d)", r
->current_hartid
, step
);
4440 if (!riscv_is_halted(target
)) {
4441 LOG_ERROR("Hart %d is not halted!", r
->current_hartid
);
4445 /* Issue the resume command, and then wait for the current hart to resume. */
4446 uint32_t dmcontrol
= DM_DMCONTROL_DMACTIVE
| DM_DMCONTROL_RESUMEREQ
;
4448 dmcontrol
|= DM_DMCONTROL_HASEL
;
4449 dmcontrol
= set_hartsel(dmcontrol
, r
->current_hartid
);
4450 dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4452 dmcontrol
= set_field(dmcontrol
, DM_DMCONTROL_HASEL
, 0);
4453 dmcontrol
= set_field(dmcontrol
, DM_DMCONTROL_RESUMEREQ
, 0);
4456 for (size_t i
= 0; i
< 256; ++i
) {
4458 if (dmstatus_read(target
, &dmstatus
, true) != ERROR_OK
)
4460 if (get_field(dmstatus
, DM_DMSTATUS_ALLRESUMEACK
) == 0)
4462 if (step
&& get_field(dmstatus
, DM_DMSTATUS_ALLHALTED
) == 0)
4465 dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4469 dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4471 LOG_ERROR("unable to resume hart %d", r
->current_hartid
);
4472 if (dmstatus_read(target
, &dmstatus
, true) != ERROR_OK
)
4474 LOG_ERROR(" dmstatus =0x%08x", dmstatus
);
4477 LOG_ERROR(" was stepping, halting");
4485 void riscv013_clear_abstract_error(struct target
*target
)
4487 /* Wait for busy to go away. */
4488 time_t start
= time(NULL
);
4489 uint32_t abstractcs
;
4490 dmi_read(target
, &abstractcs
, DM_ABSTRACTCS
);
4491 while (get_field(abstractcs
, DM_ABSTRACTCS_BUSY
)) {
4492 dmi_read(target
, &abstractcs
, DM_ABSTRACTCS
);
4494 if (time(NULL
) - start
> riscv_command_timeout_sec
) {
4495 LOG_ERROR("abstractcs.busy is not going low after %d seconds "
4496 "(abstractcs=0x%x). The target is either really slow or "
4497 "broken. You could increase the timeout with riscv "
4498 "set_command_timeout_sec.",
4499 riscv_command_timeout_sec
, abstractcs
);
4503 /* Clear the error status. */
4504 dmi_write(target
, DM_ABSTRACTCS
, DM_ABSTRACTCS_CMDERR
);