1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
16 #include "target/target.h"
17 #include "target/algorithm.h"
18 #include "target/target_type.h"
19 #include <helper/log.h>
20 #include "jtag/jtag.h"
21 #include "target/register.h"
22 #include "target/breakpoints.h"
23 #include "helper/time_support.h"
24 #include "helper/list.h"
26 #include "debug_defines.h"
27 #include "rtos/rtos.h"
32 static int riscv013_on_step_or_resume(struct target
*target
, bool step
);
33 static int riscv013_step_or_resume_current_hart(struct target
*target
,
34 bool step
, bool use_hasel
);
35 static void riscv013_clear_abstract_error(struct target
*target
);
37 /* Implementations of the functions in struct riscv_info. */
38 static int riscv013_get_register(struct target
*target
,
39 riscv_reg_t
*value
, int rid
);
40 static int riscv013_set_register(struct target
*target
, int regid
, uint64_t value
);
41 static int riscv013_select_current_hart(struct target
*target
);
42 static int riscv013_halt_prep(struct target
*target
);
43 static int riscv013_halt_go(struct target
*target
);
44 static int riscv013_resume_go(struct target
*target
);
45 static int riscv013_step_current_hart(struct target
*target
);
46 static int riscv013_on_halt(struct target
*target
);
47 static int riscv013_on_step(struct target
*target
);
48 static int riscv013_resume_prep(struct target
*target
);
49 static bool riscv013_is_halted(struct target
*target
);
50 static enum riscv_halt_reason
riscv013_halt_reason(struct target
*target
);
51 static int riscv013_write_debug_buffer(struct target
*target
, unsigned index
,
53 static riscv_insn_t
riscv013_read_debug_buffer(struct target
*target
, unsigned
55 static int riscv013_execute_debug_buffer(struct target
*target
);
56 static void riscv013_fill_dmi_write_u64(struct target
*target
, char *buf
, int a
, uint64_t d
);
57 static void riscv013_fill_dmi_read_u64(struct target
*target
, char *buf
, int a
);
58 static int riscv013_dmi_write_u64_bits(struct target
*target
);
59 static void riscv013_fill_dmi_nop_u64(struct target
*target
, char *buf
);
60 static int register_read(struct target
*target
, uint64_t *value
, uint32_t number
);
61 static int register_read_direct(struct target
*target
, uint64_t *value
, uint32_t number
);
62 static int register_write_direct(struct target
*target
, unsigned number
,
64 static int read_memory(struct target
*target
, target_addr_t address
,
65 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
);
66 static int write_memory(struct target
*target
, target_addr_t address
,
67 uint32_t size
, uint32_t count
, const uint8_t *buffer
);
70 * Since almost everything can be accomplish by scanning the dbus register, all
71 * functions here assume dbus is already selected. The exception are functions
72 * called directly by OpenOCD, which can't assume anything about what's
73 * currently in IR. They should set IR to dbus explicitly.
76 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
77 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
79 #define CSR_DCSR_CAUSE_SWBP 1
80 #define CSR_DCSR_CAUSE_TRIGGER 2
81 #define CSR_DCSR_CAUSE_DEBUGINT 3
82 #define CSR_DCSR_CAUSE_STEP 4
83 #define CSR_DCSR_CAUSE_HALT 5
84 #define CSR_DCSR_CAUSE_GROUP 6
86 #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
88 /*** JTAG registers. ***/
96 DMI_STATUS_SUCCESS
= 0,
97 DMI_STATUS_FAILED
= 2,
107 /*** Debug Bus registers. ***/
109 #define CMDERR_NONE 0
110 #define CMDERR_BUSY 1
111 #define CMDERR_NOT_SUPPORTED 2
112 #define CMDERR_EXCEPTION 3
113 #define CMDERR_HALT_RESUME 4
114 #define CMDERR_OTHER 7
116 /*** Info about the core being debugged. ***/
123 bool read
, write
, execute
;
134 struct list_head list
;
135 int abs_chain_position
;
137 /* The number of harts connected to this DM. */
139 /* Indicates we already reset this DM, so don't need to do it again. */
141 /* Targets that are connected to this DM. */
142 struct list_head target_list
;
143 /* The currently selected hartid on this DM. */
145 bool hasel_supported
;
147 /* The program buffer stores executable code. 0 is an illegal instruction,
148 * so we use 0 to mean the cached value is invalid. */
149 uint32_t progbuf_cache
[16];
153 struct list_head list
;
154 struct target
*target
;
158 /* The indexed used to address this hart in its DM. */
160 /* Number of address bits in the dbus register. */
162 /* Number of abstract command data registers. */
164 /* Number of words in the Program Buffer. */
165 unsigned progbufsize
;
167 /* We cache the read-only bits of sbcs here. */
170 yes_no_maybe_t progbuf_writable
;
171 /* We only need the address so that we know the alignment of the buffer. */
172 riscv_addr_t progbuf_address
;
174 /* Number of run-test/idle cycles the target requests we do after each dbus
176 unsigned int dtmcs_idle
;
178 /* This value is incremented every time a dbus access comes back as "busy".
179 * It's used to determine how many run-test/idle cycles to feed the target
180 * in between accesses. */
181 unsigned int dmi_busy_delay
;
183 /* Number of run-test/idle cycles to add between consecutive bus master
184 * reads/writes respectively. */
185 unsigned int bus_master_write_delay
, bus_master_read_delay
;
187 /* This value is increased every time we tried to execute two commands
188 * consecutively, and the second one failed because the previous hadn't
189 * completed yet. It's used to add extra run-test/idle cycles after
190 * starting a command, so we don't have to waste time checking for busy to
192 unsigned int ac_busy_delay
;
194 bool abstract_read_csr_supported
;
195 bool abstract_write_csr_supported
;
196 bool abstract_read_fpr_supported
;
197 bool abstract_write_fpr_supported
;
199 yes_no_maybe_t has_aampostincrement
;
201 /* When a function returns some error due to a failure indicated by the
202 * target in cmderr, the caller can look here to see what that error was.
203 * (Compare with errno.) */
206 /* Some fields from hartinfo. */
211 /* The width of the hartsel field. */
214 /* DM that provides access to this target. */
218 static LIST_HEAD(dm_list
);
220 static riscv013_info_t
*get_info(const struct target
*target
)
222 struct riscv_info
*info
= target
->arch_info
;
224 assert(info
->version_specific
);
225 return info
->version_specific
;
229 * Return the DM structure for this target. If there isn't one, find it in the
230 * global list of DMs. If it's not in there, then create one and initialize it
233 static dm013_info_t
*get_dm(struct target
*target
)
239 int abs_chain_position
= target
->tap
->abs_chain_position
;
242 dm013_info_t
*dm
= NULL
;
243 list_for_each_entry(entry
, &dm_list
, list
) {
244 if (entry
->abs_chain_position
== abs_chain_position
) {
251 LOG_DEBUG("[%d] Allocating new DM", target
->coreid
);
252 dm
= calloc(1, sizeof(dm013_info_t
));
255 dm
->abs_chain_position
= abs_chain_position
;
256 dm
->current_hartid
= -1;
258 INIT_LIST_HEAD(&dm
->target_list
);
259 list_add(&dm
->list
, &dm_list
);
263 target_list_t
*target_entry
;
264 list_for_each_entry(target_entry
, &dm
->target_list
, list
) {
265 if (target_entry
->target
== target
)
268 target_entry
= calloc(1, sizeof(*target_entry
));
273 target_entry
->target
= target
;
274 list_add(&target_entry
->list
, &dm
->target_list
);
279 static uint32_t set_hartsel(uint32_t initial
, uint32_t index
)
281 initial
&= ~DM_DMCONTROL_HARTSELLO
;
282 initial
&= ~DM_DMCONTROL_HARTSELHI
;
284 uint32_t index_lo
= index
& ((1 << DM_DMCONTROL_HARTSELLO_LENGTH
) - 1);
285 initial
|= index_lo
<< DM_DMCONTROL_HARTSELLO_OFFSET
;
286 uint32_t index_hi
= index
>> DM_DMCONTROL_HARTSELLO_LENGTH
;
287 assert(index_hi
< 1 << DM_DMCONTROL_HARTSELHI_LENGTH
);
288 initial
|= index_hi
<< DM_DMCONTROL_HARTSELHI_OFFSET
;
293 static void decode_dmi(char *text
, unsigned address
, unsigned data
)
295 static const struct {
300 { DM_DMCONTROL
, DM_DMCONTROL_HALTREQ
, "haltreq" },
301 { DM_DMCONTROL
, DM_DMCONTROL_RESUMEREQ
, "resumereq" },
302 { DM_DMCONTROL
, DM_DMCONTROL_HARTRESET
, "hartreset" },
303 { DM_DMCONTROL
, DM_DMCONTROL_HASEL
, "hasel" },
304 { DM_DMCONTROL
, DM_DMCONTROL_HARTSELHI
, "hartselhi" },
305 { DM_DMCONTROL
, DM_DMCONTROL_HARTSELLO
, "hartsello" },
306 { DM_DMCONTROL
, DM_DMCONTROL_NDMRESET
, "ndmreset" },
307 { DM_DMCONTROL
, DM_DMCONTROL_DMACTIVE
, "dmactive" },
308 { DM_DMCONTROL
, DM_DMCONTROL_ACKHAVERESET
, "ackhavereset" },
310 { DM_DMSTATUS
, DM_DMSTATUS_IMPEBREAK
, "impebreak" },
311 { DM_DMSTATUS
, DM_DMSTATUS_ALLHAVERESET
, "allhavereset" },
312 { DM_DMSTATUS
, DM_DMSTATUS_ANYHAVERESET
, "anyhavereset" },
313 { DM_DMSTATUS
, DM_DMSTATUS_ALLRESUMEACK
, "allresumeack" },
314 { DM_DMSTATUS
, DM_DMSTATUS_ANYRESUMEACK
, "anyresumeack" },
315 { DM_DMSTATUS
, DM_DMSTATUS_ALLNONEXISTENT
, "allnonexistent" },
316 { DM_DMSTATUS
, DM_DMSTATUS_ANYNONEXISTENT
, "anynonexistent" },
317 { DM_DMSTATUS
, DM_DMSTATUS_ALLUNAVAIL
, "allunavail" },
318 { DM_DMSTATUS
, DM_DMSTATUS_ANYUNAVAIL
, "anyunavail" },
319 { DM_DMSTATUS
, DM_DMSTATUS_ALLRUNNING
, "allrunning" },
320 { DM_DMSTATUS
, DM_DMSTATUS_ANYRUNNING
, "anyrunning" },
321 { DM_DMSTATUS
, DM_DMSTATUS_ALLHALTED
, "allhalted" },
322 { DM_DMSTATUS
, DM_DMSTATUS_ANYHALTED
, "anyhalted" },
323 { DM_DMSTATUS
, DM_DMSTATUS_AUTHENTICATED
, "authenticated" },
324 { DM_DMSTATUS
, DM_DMSTATUS_AUTHBUSY
, "authbusy" },
325 { DM_DMSTATUS
, DM_DMSTATUS_HASRESETHALTREQ
, "hasresethaltreq" },
326 { DM_DMSTATUS
, DM_DMSTATUS_CONFSTRPTRVALID
, "confstrptrvalid" },
327 { DM_DMSTATUS
, DM_DMSTATUS_VERSION
, "version" },
329 { DM_ABSTRACTCS
, DM_ABSTRACTCS_PROGBUFSIZE
, "progbufsize" },
330 { DM_ABSTRACTCS
, DM_ABSTRACTCS_BUSY
, "busy" },
331 { DM_ABSTRACTCS
, DM_ABSTRACTCS_CMDERR
, "cmderr" },
332 { DM_ABSTRACTCS
, DM_ABSTRACTCS_DATACOUNT
, "datacount" },
334 { DM_COMMAND
, DM_COMMAND_CMDTYPE
, "cmdtype" },
336 { DM_SBCS
, DM_SBCS_SBVERSION
, "sbversion" },
337 { DM_SBCS
, DM_SBCS_SBBUSYERROR
, "sbbusyerror" },
338 { DM_SBCS
, DM_SBCS_SBBUSY
, "sbbusy" },
339 { DM_SBCS
, DM_SBCS_SBREADONADDR
, "sbreadonaddr" },
340 { DM_SBCS
, DM_SBCS_SBACCESS
, "sbaccess" },
341 { DM_SBCS
, DM_SBCS_SBAUTOINCREMENT
, "sbautoincrement" },
342 { DM_SBCS
, DM_SBCS_SBREADONDATA
, "sbreadondata" },
343 { DM_SBCS
, DM_SBCS_SBERROR
, "sberror" },
344 { DM_SBCS
, DM_SBCS_SBASIZE
, "sbasize" },
345 { DM_SBCS
, DM_SBCS_SBACCESS128
, "sbaccess128" },
346 { DM_SBCS
, DM_SBCS_SBACCESS64
, "sbaccess64" },
347 { DM_SBCS
, DM_SBCS_SBACCESS32
, "sbaccess32" },
348 { DM_SBCS
, DM_SBCS_SBACCESS16
, "sbaccess16" },
349 { DM_SBCS
, DM_SBCS_SBACCESS8
, "sbaccess8" },
353 for (unsigned i
= 0; i
< ARRAY_SIZE(description
); i
++) {
354 if (description
[i
].address
== address
) {
355 uint64_t mask
= description
[i
].mask
;
356 unsigned value
= get_field(data
, mask
);
360 if (mask
& (mask
>> 1)) {
361 /* If the field is more than 1 bit wide. */
362 sprintf(text
, "%s=%d", description
[i
].name
, value
);
364 strcpy(text
, description
[i
].name
);
366 text
+= strlen(text
);
372 static void dump_field(int idle
, const struct scan_field
*field
)
374 static const char * const op_string
[] = {"-", "r", "w", "?"};
375 static const char * const status_string
[] = {"+", "?", "F", "b"};
377 if (debug_level
< LOG_LVL_DEBUG
)
380 uint64_t out
= buf_get_u64(field
->out_value
, 0, field
->num_bits
);
381 unsigned int out_op
= get_field(out
, DTM_DMI_OP
);
382 unsigned int out_data
= get_field(out
, DTM_DMI_DATA
);
383 unsigned int out_address
= out
>> DTM_DMI_ADDRESS_OFFSET
;
385 uint64_t in
= buf_get_u64(field
->in_value
, 0, field
->num_bits
);
386 unsigned int in_op
= get_field(in
, DTM_DMI_OP
);
387 unsigned int in_data
= get_field(in
, DTM_DMI_DATA
);
388 unsigned int in_address
= in
>> DTM_DMI_ADDRESS_OFFSET
;
390 log_printf_lf(LOG_LVL_DEBUG
,
391 __FILE__
, __LINE__
, "scan",
392 "%db %s %08x @%02x -> %s %08x @%02x; %di",
393 field
->num_bits
, op_string
[out_op
], out_data
, out_address
,
394 status_string
[in_op
], in_data
, in_address
, idle
);
398 decode_dmi(out_text
, out_address
, out_data
);
399 decode_dmi(in_text
, in_address
, in_data
);
400 if (in_text
[0] || out_text
[0]) {
401 log_printf_lf(LOG_LVL_DEBUG
, __FILE__
, __LINE__
, "scan", "%s -> %s",
406 /*** Utility functions. ***/
408 static void select_dmi(struct target
*target
)
410 if (bscan_tunnel_ir_width
!= 0) {
411 select_dmi_via_bscan(target
);
414 jtag_add_ir_scan(target
->tap
, &select_dbus
, TAP_IDLE
);
417 static uint32_t dtmcontrol_scan(struct target
*target
, uint32_t out
)
419 struct scan_field field
;
421 uint8_t out_value
[4] = { 0 };
423 if (bscan_tunnel_ir_width
!= 0)
424 return dtmcontrol_scan_via_bscan(target
, out
);
426 buf_set_u32(out_value
, 0, 32, out
);
428 jtag_add_ir_scan(target
->tap
, &select_dtmcontrol
, TAP_IDLE
);
431 field
.out_value
= out_value
;
432 field
.in_value
= in_value
;
433 jtag_add_dr_scan(target
->tap
, 1, &field
, TAP_IDLE
);
435 /* Always return to dmi. */
438 int retval
= jtag_execute_queue();
439 if (retval
!= ERROR_OK
) {
440 LOG_ERROR("failed jtag scan: %d", retval
);
444 uint32_t in
= buf_get_u32(field
.in_value
, 0, 32);
445 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out
, in
);
450 static void increase_dmi_busy_delay(struct target
*target
)
452 riscv013_info_t
*info
= get_info(target
);
453 info
->dmi_busy_delay
+= info
->dmi_busy_delay
/ 10 + 1;
454 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
455 info
->dtmcs_idle
, info
->dmi_busy_delay
,
456 info
->ac_busy_delay
);
458 dtmcontrol_scan(target
, DTM_DTMCS_DMIRESET
);
462 * exec: If this is set, assume the scan results in an execution, so more
463 * run-test/idle cycles may be required.
465 static dmi_status_t
dmi_scan(struct target
*target
, uint32_t *address_in
,
466 uint32_t *data_in
, dmi_op_t op
, uint32_t address_out
, uint32_t data_out
,
469 riscv013_info_t
*info
= get_info(target
);
471 unsigned num_bits
= info
->abits
+ DTM_DMI_OP_LENGTH
+ DTM_DMI_DATA_LENGTH
;
472 size_t num_bytes
= (num_bits
+ 7) / 8;
473 uint8_t in
[num_bytes
];
474 uint8_t out
[num_bytes
];
475 struct scan_field field
= {
476 .num_bits
= num_bits
,
480 riscv_bscan_tunneled_scan_context_t bscan_ctxt
;
482 if (r
->reset_delays_wait
>= 0) {
483 r
->reset_delays_wait
--;
484 if (r
->reset_delays_wait
< 0) {
485 info
->dmi_busy_delay
= 0;
486 info
->ac_busy_delay
= 0;
490 memset(in
, 0, num_bytes
);
491 memset(out
, 0, num_bytes
);
493 assert(info
->abits
!= 0);
495 buf_set_u32(out
, DTM_DMI_OP_OFFSET
, DTM_DMI_OP_LENGTH
, op
);
496 buf_set_u32(out
, DTM_DMI_DATA_OFFSET
, DTM_DMI_DATA_LENGTH
, data_out
);
497 buf_set_u32(out
, DTM_DMI_ADDRESS_OFFSET
, info
->abits
, address_out
);
499 /* I wanted to place this code in a different function, but the way JTAG command
500 queueing works in the jtag handling functions, the scan fields either have to be
501 heap allocated, global/static, or else they need to stay on the stack until
502 the jtag_execute_queue() call. Heap or static fields in this case doesn't seem
503 the best fit. Declaring stack based field values in a subsidiary function call wouldn't
505 if (bscan_tunnel_ir_width
!= 0) {
506 riscv_add_bscan_tunneled_scan(target
, &field
, &bscan_ctxt
);
508 /* Assume dbus is already selected. */
509 jtag_add_dr_scan(target
->tap
, 1, &field
, TAP_IDLE
);
512 int idle_count
= info
->dmi_busy_delay
;
514 idle_count
+= info
->ac_busy_delay
;
517 jtag_add_runtest(idle_count
, TAP_IDLE
);
519 int retval
= jtag_execute_queue();
520 if (retval
!= ERROR_OK
) {
521 LOG_ERROR("dmi_scan failed jtag scan");
524 return DMI_STATUS_FAILED
;
527 if (bscan_tunnel_ir_width
!= 0) {
528 /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
529 buffer_shr(in
, num_bytes
, 1);
533 *data_in
= buf_get_u32(in
, DTM_DMI_DATA_OFFSET
, DTM_DMI_DATA_LENGTH
);
536 *address_in
= buf_get_u32(in
, DTM_DMI_ADDRESS_OFFSET
, info
->abits
);
537 dump_field(idle_count
, &field
);
538 return buf_get_u32(in
, DTM_DMI_OP_OFFSET
, DTM_DMI_OP_LENGTH
);
543 * @param data_in The data we received from the target.
544 * @param dmi_busy_encountered
545 * If non-NULL, will be updated to reflect whether DMI busy was
546 * encountered while executing this operation or not.
547 * @param dmi_op The operation to perform (read/write/nop).
548 * @param address The address argument to that operation.
549 * @param data_out The data to send to the target.
551 * @param exec When true, this scan will execute something, so extra RTI
552 * cycles may be added.
553 * @param ensure_success
554 * Scan a nop after the requested operation, ensuring the
555 * DMI operation succeeded.
557 static int dmi_op_timeout(struct target
*target
, uint32_t *data_in
,
558 bool *dmi_busy_encountered
, int dmi_op
, uint32_t address
,
559 uint32_t data_out
, int timeout_sec
, bool exec
, bool ensure_success
)
566 if (dmi_busy_encountered
)
567 *dmi_busy_encountered
= false;
581 LOG_ERROR("Invalid DMI operation: %d", dmi_op
);
587 time_t start
= time(NULL
);
588 /* This first loop performs the request. Note that if for some reason this
589 * stays busy, it is actually due to the previous access. */
591 status
= dmi_scan(target
, NULL
, NULL
, dmi_op
, address
, data_out
,
593 if (status
== DMI_STATUS_BUSY
) {
594 increase_dmi_busy_delay(target
);
595 if (dmi_busy_encountered
)
596 *dmi_busy_encountered
= true;
597 } else if (status
== DMI_STATUS_SUCCESS
) {
600 LOG_ERROR("failed %s at 0x%x, status=%d", op_name
, address
, status
);
601 dtmcontrol_scan(target
, DTM_DTMCS_DMIRESET
);
604 if (time(NULL
) - start
> timeout_sec
)
605 return ERROR_TIMEOUT_REACHED
;
608 if (status
!= DMI_STATUS_SUCCESS
) {
609 LOG_ERROR("Failed %s at 0x%x; status=%d", op_name
, address
, status
);
613 if (ensure_success
) {
614 /* This second loop ensures the request succeeded, and gets back data.
615 * Note that NOP can result in a 'busy' result as well, but that would be
616 * noticed on the next DMI access we do. */
618 status
= dmi_scan(target
, &address_in
, data_in
, DMI_OP_NOP
, address
, 0,
620 if (status
== DMI_STATUS_BUSY
) {
621 increase_dmi_busy_delay(target
);
622 if (dmi_busy_encountered
)
623 *dmi_busy_encountered
= true;
624 } else if (status
== DMI_STATUS_SUCCESS
) {
628 LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
629 op_name
, address
, *data_in
, status
);
631 LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name
, address
,
634 dtmcontrol_scan(target
, DTM_DTMCS_DMIRESET
);
637 if (time(NULL
) - start
> timeout_sec
)
638 return ERROR_TIMEOUT_REACHED
;
645 static int dmi_op(struct target
*target
, uint32_t *data_in
,
646 bool *dmi_busy_encountered
, int dmi_op
, uint32_t address
,
647 uint32_t data_out
, bool exec
, bool ensure_success
)
649 int result
= dmi_op_timeout(target
, data_in
, dmi_busy_encountered
, dmi_op
,
650 address
, data_out
, riscv_command_timeout_sec
, exec
, ensure_success
);
651 if (result
== ERROR_TIMEOUT_REACHED
) {
652 LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
653 "either really slow or broken. You could increase the "
654 "timeout with riscv set_command_timeout_sec.",
655 riscv_command_timeout_sec
);
661 static int dmi_read(struct target
*target
, uint32_t *value
, uint32_t address
)
663 return dmi_op(target
, value
, NULL
, DMI_OP_READ
, address
, 0, false, true);
666 static int dmi_read_exec(struct target
*target
, uint32_t *value
, uint32_t address
)
668 return dmi_op(target
, value
, NULL
, DMI_OP_READ
, address
, 0, true, true);
671 static int dmi_write(struct target
*target
, uint32_t address
, uint32_t value
)
673 return dmi_op(target
, NULL
, NULL
, DMI_OP_WRITE
, address
, value
, false, true);
676 static int dmi_write_exec(struct target
*target
, uint32_t address
,
677 uint32_t value
, bool ensure_success
)
679 return dmi_op(target
, NULL
, NULL
, DMI_OP_WRITE
, address
, value
, true, ensure_success
);
682 static int dmstatus_read_timeout(struct target
*target
, uint32_t *dmstatus
,
683 bool authenticated
, unsigned timeout_sec
)
685 int result
= dmi_op_timeout(target
, dmstatus
, NULL
, DMI_OP_READ
,
686 DM_DMSTATUS
, 0, timeout_sec
, false, true);
687 if (result
!= ERROR_OK
)
689 int dmstatus_version
= get_field(*dmstatus
, DM_DMSTATUS_VERSION
);
690 if (dmstatus_version
!= 2 && dmstatus_version
!= 3) {
691 LOG_ERROR("OpenOCD only supports Debug Module version 2 (0.13) and 3 (1.0), not "
692 "%d (dmstatus=0x%x). This error might be caused by a JTAG "
693 "signal issue. Try reducing the JTAG clock speed.",
694 get_field(*dmstatus
, DM_DMSTATUS_VERSION
), *dmstatus
);
695 } else if (authenticated
&& !get_field(*dmstatus
, DM_DMSTATUS_AUTHENTICATED
)) {
696 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
697 "(dmstatus=0x%x). Use `riscv authdata_read` and "
698 "`riscv authdata_write` commands to authenticate.", *dmstatus
);
704 static int dmstatus_read(struct target
*target
, uint32_t *dmstatus
,
707 return dmstatus_read_timeout(target
, dmstatus
, authenticated
,
708 riscv_command_timeout_sec
);
711 static void increase_ac_busy_delay(struct target
*target
)
713 riscv013_info_t
*info
= get_info(target
);
714 info
->ac_busy_delay
+= info
->ac_busy_delay
/ 10 + 1;
715 LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
716 info
->dtmcs_idle
, info
->dmi_busy_delay
,
717 info
->ac_busy_delay
);
720 static uint32_t __attribute__((unused
)) abstract_register_size(unsigned width
)
724 return set_field(0, AC_ACCESS_REGISTER_AARSIZE
, 2);
726 return set_field(0, AC_ACCESS_REGISTER_AARSIZE
, 3);
728 return set_field(0, AC_ACCESS_REGISTER_AARSIZE
, 4);
730 LOG_ERROR("Unsupported register width: %d", width
);
735 static int wait_for_idle(struct target
*target
, uint32_t *abstractcs
)
738 time_t start
= time(NULL
);
740 if (dmi_read(target
, abstractcs
, DM_ABSTRACTCS
) != ERROR_OK
)
743 if (get_field(*abstractcs
, DM_ABSTRACTCS_BUSY
) == 0)
746 if (time(NULL
) - start
> riscv_command_timeout_sec
) {
747 info
->cmderr
= get_field(*abstractcs
, DM_ABSTRACTCS_CMDERR
);
748 if (info
->cmderr
!= CMDERR_NONE
) {
749 const char *errors
[8] = {
759 LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
760 errors
[info
->cmderr
], *abstractcs
);
763 LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
764 "Increase the timeout with riscv set_command_timeout_sec.",
765 riscv_command_timeout_sec
,
772 static int execute_abstract_command(struct target
*target
, uint32_t command
)
775 if (debug_level
>= LOG_LVL_DEBUG
) {
776 switch (get_field(command
, DM_COMMAND_CMDTYPE
)) {
778 LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
779 "transfer=%d, write=%d, regno=0x%x",
781 8 << get_field(command
, AC_ACCESS_REGISTER_AARSIZE
),
782 get_field(command
, AC_ACCESS_REGISTER_POSTEXEC
),
783 get_field(command
, AC_ACCESS_REGISTER_TRANSFER
),
784 get_field(command
, AC_ACCESS_REGISTER_WRITE
),
785 get_field(command
, AC_ACCESS_REGISTER_REGNO
));
788 LOG_DEBUG("command=0x%x", command
);
793 if (dmi_write_exec(target
, DM_COMMAND
, command
, false) != ERROR_OK
)
796 uint32_t abstractcs
= 0;
797 int result
= wait_for_idle(target
, &abstractcs
);
799 info
->cmderr
= get_field(abstractcs
, DM_ABSTRACTCS_CMDERR
);
800 if (info
->cmderr
!= 0 || result
!= ERROR_OK
) {
801 LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command
, abstractcs
);
802 /* Clear the error. */
803 dmi_write(target
, DM_ABSTRACTCS
, DM_ABSTRACTCS_CMDERR
);
810 static riscv_reg_t
read_abstract_arg(struct target
*target
, unsigned index
,
813 riscv_reg_t value
= 0;
815 unsigned offset
= index
* size_bits
/ 32;
818 LOG_ERROR("Unsupported size: %d bits", size_bits
);
821 dmi_read(target
, &v
, DM_DATA0
+ offset
+ 1);
822 value
|= ((uint64_t) v
) << 32;
825 dmi_read(target
, &v
, DM_DATA0
+ offset
);
831 static int write_abstract_arg(struct target
*target
, unsigned index
,
832 riscv_reg_t value
, unsigned size_bits
)
834 unsigned offset
= index
* size_bits
/ 32;
837 LOG_ERROR("Unsupported size: %d bits", size_bits
);
840 dmi_write(target
, DM_DATA0
+ offset
+ 1, value
>> 32);
843 dmi_write(target
, DM_DATA0
+ offset
, value
);
851 static uint32_t access_register_command(struct target
*target
, uint32_t number
,
852 unsigned size
, uint32_t flags
)
854 uint32_t command
= set_field(0, DM_COMMAND_CMDTYPE
, 0);
857 command
= set_field(command
, AC_ACCESS_REGISTER_AARSIZE
, 2);
860 command
= set_field(command
, AC_ACCESS_REGISTER_AARSIZE
, 3);
863 LOG_ERROR("%d-bit register %s not supported.", size
,
864 gdb_regno_name(number
));
868 if (number
<= GDB_REGNO_XPR31
) {
869 command
= set_field(command
, AC_ACCESS_REGISTER_REGNO
,
870 0x1000 + number
- GDB_REGNO_ZERO
);
871 } else if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
) {
872 command
= set_field(command
, AC_ACCESS_REGISTER_REGNO
,
873 0x1020 + number
- GDB_REGNO_FPR0
);
874 } else if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
) {
875 command
= set_field(command
, AC_ACCESS_REGISTER_REGNO
,
876 number
- GDB_REGNO_CSR0
);
877 } else if (number
>= GDB_REGNO_COUNT
) {
878 /* Custom register. */
879 assert(target
->reg_cache
->reg_list
[number
].arch_info
);
880 riscv_reg_info_t
*reg_info
= target
->reg_cache
->reg_list
[number
].arch_info
;
882 command
= set_field(command
, AC_ACCESS_REGISTER_REGNO
,
883 0xc000 + reg_info
->custom_number
);
893 static int register_read_abstract(struct target
*target
, uint64_t *value
,
894 uint32_t number
, unsigned size
)
898 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
&&
899 !info
->abstract_read_fpr_supported
)
901 if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
&&
902 !info
->abstract_read_csr_supported
)
904 /* The spec doesn't define abstract register numbers for vector registers. */
905 if (number
>= GDB_REGNO_V0
&& number
<= GDB_REGNO_V31
)
908 uint32_t command
= access_register_command(target
, number
, size
,
909 AC_ACCESS_REGISTER_TRANSFER
);
911 int result
= execute_abstract_command(target
, command
);
912 if (result
!= ERROR_OK
) {
913 if (info
->cmderr
== CMDERR_NOT_SUPPORTED
) {
914 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
) {
915 info
->abstract_read_fpr_supported
= false;
916 LOG_INFO("Disabling abstract command reads from FPRs.");
917 } else if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
) {
918 info
->abstract_read_csr_supported
= false;
919 LOG_INFO("Disabling abstract command reads from CSRs.");
926 *value
= read_abstract_arg(target
, 0, size
);
931 static int register_write_abstract(struct target
*target
, uint32_t number
,
932 uint64_t value
, unsigned size
)
936 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
&&
937 !info
->abstract_write_fpr_supported
)
939 if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
&&
940 !info
->abstract_write_csr_supported
)
943 uint32_t command
= access_register_command(target
, number
, size
,
944 AC_ACCESS_REGISTER_TRANSFER
|
945 AC_ACCESS_REGISTER_WRITE
);
947 if (write_abstract_arg(target
, 0, value
, size
) != ERROR_OK
)
950 int result
= execute_abstract_command(target
, command
);
951 if (result
!= ERROR_OK
) {
952 if (info
->cmderr
== CMDERR_NOT_SUPPORTED
) {
953 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
) {
954 info
->abstract_write_fpr_supported
= false;
955 LOG_INFO("Disabling abstract command writes to FPRs.");
956 } else if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
) {
957 info
->abstract_write_csr_supported
= false;
958 LOG_INFO("Disabling abstract command writes to CSRs.");
968 * Sets the AAMSIZE field of a memory access abstract command based on
971 static uint32_t abstract_memory_size(unsigned width
)
975 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE
, 0);
977 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE
, 1);
979 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE
, 2);
981 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE
, 3);
983 return set_field(0, AC_ACCESS_MEMORY_AAMSIZE
, 4);
985 LOG_ERROR("Unsupported memory width: %d", width
);
991 * Creates a memory access abstract command.
993 static uint32_t access_memory_command(struct target
*target
, bool virtual,
994 unsigned width
, bool postincrement
, bool write
)
996 uint32_t command
= set_field(0, AC_ACCESS_MEMORY_CMDTYPE
, 2);
997 command
= set_field(command
, AC_ACCESS_MEMORY_AAMVIRTUAL
, virtual);
998 command
|= abstract_memory_size(width
);
999 command
= set_field(command
, AC_ACCESS_MEMORY_AAMPOSTINCREMENT
,
1001 command
= set_field(command
, AC_ACCESS_MEMORY_WRITE
, write
);
1006 static int examine_progbuf(struct target
*target
)
1008 riscv013_info_t
*info
= get_info(target
);
1010 if (info
->progbuf_writable
!= YNM_MAYBE
)
1013 /* Figure out if progbuf is writable. */
1015 if (info
->progbufsize
< 1) {
1016 info
->progbuf_writable
= YNM_NO
;
1017 LOG_INFO("No program buffer present.");
1022 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
1025 struct riscv_program program
;
1026 riscv_program_init(&program
, target
);
1027 riscv_program_insert(&program
, auipc(S0
));
1028 if (riscv_program_exec(&program
, target
) != ERROR_OK
)
1031 if (register_read_direct(target
, &info
->progbuf_address
, GDB_REGNO_S0
) != ERROR_OK
)
1034 riscv_program_init(&program
, target
);
1035 riscv_program_insert(&program
, sw(S0
, S0
, 0));
1036 int result
= riscv_program_exec(&program
, target
);
1038 if (register_write_direct(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
1041 if (result
!= ERROR_OK
) {
1042 /* This program might have failed if the program buffer is not
1044 info
->progbuf_writable
= YNM_NO
;
1049 if (dmi_read(target
, &written
, DM_PROGBUF0
) != ERROR_OK
)
1051 if (written
== (uint32_t) info
->progbuf_address
) {
1052 LOG_INFO("progbuf is writable at 0x%" PRIx64
,
1053 info
->progbuf_address
);
1054 info
->progbuf_writable
= YNM_YES
;
1057 LOG_INFO("progbuf is not writeable at 0x%" PRIx64
,
1058 info
->progbuf_address
);
1059 info
->progbuf_writable
= YNM_NO
;
1065 static int is_fpu_reg(uint32_t gdb_regno
)
1067 return (gdb_regno
>= GDB_REGNO_FPR0
&& gdb_regno
<= GDB_REGNO_FPR31
) ||
1068 (gdb_regno
== GDB_REGNO_CSR0
+ CSR_FFLAGS
) ||
1069 (gdb_regno
== GDB_REGNO_CSR0
+ CSR_FRM
) ||
1070 (gdb_regno
== GDB_REGNO_CSR0
+ CSR_FCSR
);
1073 static int is_vector_reg(uint32_t gdb_regno
)
1075 return (gdb_regno
>= GDB_REGNO_V0
&& gdb_regno
<= GDB_REGNO_V31
) ||
1076 gdb_regno
== GDB_REGNO_VSTART
||
1077 gdb_regno
== GDB_REGNO_VXSAT
||
1078 gdb_regno
== GDB_REGNO_VXRM
||
1079 gdb_regno
== GDB_REGNO_VL
||
1080 gdb_regno
== GDB_REGNO_VTYPE
||
1081 gdb_regno
== GDB_REGNO_VLENB
;
1084 static int prep_for_register_access(struct target
*target
, uint64_t *mstatus
,
1087 if (is_fpu_reg(regno
) || is_vector_reg(regno
)) {
1088 if (register_read(target
, mstatus
, GDB_REGNO_MSTATUS
) != ERROR_OK
)
1090 if (is_fpu_reg(regno
) && (*mstatus
& MSTATUS_FS
) == 0) {
1091 if (register_write_direct(target
, GDB_REGNO_MSTATUS
,
1092 set_field(*mstatus
, MSTATUS_FS
, 1)) != ERROR_OK
)
1094 } else if (is_vector_reg(regno
) && (*mstatus
& MSTATUS_VS
) == 0) {
1095 if (register_write_direct(target
, GDB_REGNO_MSTATUS
,
1096 set_field(*mstatus
, MSTATUS_VS
, 1)) != ERROR_OK
)
1105 static int cleanup_after_register_access(struct target
*target
,
1106 uint64_t mstatus
, int regno
)
1108 if ((is_fpu_reg(regno
) && (mstatus
& MSTATUS_FS
) == 0) ||
1109 (is_vector_reg(regno
) && (mstatus
& MSTATUS_VS
) == 0))
1110 if (register_write_direct(target
, GDB_REGNO_MSTATUS
, mstatus
) != ERROR_OK
)
1122 /* How can the debugger access this memory? */
1123 memory_space_t memory_space
;
1124 /* Memory address to access the scratch memory from the hart. */
1125 riscv_addr_t hart_address
;
1126 /* Memory address to access the scratch memory from the debugger. */
1127 riscv_addr_t debug_address
;
1128 struct working_area
*area
;
1132 * Find some scratch memory to be used with the given program.
1134 static int scratch_reserve(struct target
*target
,
1135 scratch_mem_t
*scratch
,
1136 struct riscv_program
*program
,
1137 unsigned size_bytes
)
1139 riscv_addr_t alignment
= 1;
1140 while (alignment
< size_bytes
)
1143 scratch
->area
= NULL
;
1145 riscv013_info_t
*info
= get_info(target
);
1147 /* Option 1: See if data# registers can be used as the scratch memory */
1148 if (info
->dataaccess
== 1) {
1149 /* Sign extend dataaddr. */
1150 scratch
->hart_address
= info
->dataaddr
;
1151 if (info
->dataaddr
& (1<<11))
1152 scratch
->hart_address
|= 0xfffffffffffff000ULL
;
1154 scratch
->hart_address
= (scratch
->hart_address
+ alignment
- 1) & ~(alignment
- 1);
1156 if ((size_bytes
+ scratch
->hart_address
- info
->dataaddr
+ 3) / 4 >=
1158 scratch
->memory_space
= SPACE_DM_DATA
;
1159 scratch
->debug_address
= (scratch
->hart_address
- info
->dataaddr
) / 4;
1164 /* Option 2: See if progbuf can be used as the scratch memory */
1165 if (examine_progbuf(target
) != ERROR_OK
)
1168 /* Allow for ebreak at the end of the program. */
1169 unsigned program_size
= (program
->instruction_count
+ 1) * 4;
1170 scratch
->hart_address
= (info
->progbuf_address
+ program_size
+ alignment
- 1) &
1172 if ((info
->progbuf_writable
== YNM_YES
) &&
1173 ((size_bytes
+ scratch
->hart_address
- info
->progbuf_address
+ 3) / 4 >=
1174 info
->progbufsize
)) {
1175 scratch
->memory_space
= SPACE_DMI_PROGBUF
;
1176 scratch
->debug_address
= (scratch
->hart_address
- info
->progbuf_address
) / 4;
1180 /* Option 3: User-configured memory area as scratch RAM */
1181 if (target_alloc_working_area(target
, size_bytes
+ alignment
- 1,
1182 &scratch
->area
) == ERROR_OK
) {
1183 scratch
->hart_address
= (scratch
->area
->address
+ alignment
- 1) &
1185 scratch
->memory_space
= SPACE_DMI_RAM
;
1186 scratch
->debug_address
= scratch
->hart_address
;
1190 LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
1191 "a work area with 'configure -work-area-phys'.", size_bytes
);
1195 static int scratch_release(struct target
*target
,
1196 scratch_mem_t
*scratch
)
1198 return target_free_working_area(target
, scratch
->area
);
1201 static int scratch_read64(struct target
*target
, scratch_mem_t
*scratch
,
1205 switch (scratch
->memory_space
) {
1207 if (dmi_read(target
, &v
, DM_DATA0
+ scratch
->debug_address
) != ERROR_OK
)
1210 if (dmi_read(target
, &v
, DM_DATA1
+ scratch
->debug_address
) != ERROR_OK
)
1212 *value
|= ((uint64_t) v
) << 32;
1214 case SPACE_DMI_PROGBUF
:
1215 if (dmi_read(target
, &v
, DM_PROGBUF0
+ scratch
->debug_address
) != ERROR_OK
)
1218 if (dmi_read(target
, &v
, DM_PROGBUF1
+ scratch
->debug_address
) != ERROR_OK
)
1220 *value
|= ((uint64_t) v
) << 32;
1224 uint8_t buffer
[8] = {0};
1225 if (read_memory(target
, scratch
->debug_address
, 4, 2, buffer
, 4) != ERROR_OK
)
1227 *value
= buffer
[0] |
1228 (((uint64_t) buffer
[1]) << 8) |
1229 (((uint64_t) buffer
[2]) << 16) |
1230 (((uint64_t) buffer
[3]) << 24) |
1231 (((uint64_t) buffer
[4]) << 32) |
1232 (((uint64_t) buffer
[5]) << 40) |
1233 (((uint64_t) buffer
[6]) << 48) |
1234 (((uint64_t) buffer
[7]) << 56);
1241 static int scratch_write64(struct target
*target
, scratch_mem_t
*scratch
,
1244 switch (scratch
->memory_space
) {
1246 dmi_write(target
, DM_DATA0
+ scratch
->debug_address
, value
);
1247 dmi_write(target
, DM_DATA1
+ scratch
->debug_address
, value
>> 32);
1249 case SPACE_DMI_PROGBUF
:
1250 dmi_write(target
, DM_PROGBUF0
+ scratch
->debug_address
, value
);
1251 dmi_write(target
, DM_PROGBUF1
+ scratch
->debug_address
, value
>> 32);
1255 uint8_t buffer
[8] = {
1265 if (write_memory(target
, scratch
->debug_address
, 4, 2, buffer
) != ERROR_OK
)
1273 /** Return register size in bits. */
1274 static unsigned register_size(struct target
*target
, unsigned number
)
1276 /* If reg_cache hasn't been initialized yet, make a guess. We need this for
1277 * when this function is called during examine(). */
1278 if (target
->reg_cache
)
1279 return target
->reg_cache
->reg_list
[number
].size
;
1281 return riscv_xlen(target
);
1284 static bool has_sufficient_progbuf(struct target
*target
, unsigned size
)
1286 RISCV013_INFO(info
);
1289 return info
->progbufsize
+ r
->impebreak
>= size
;
1293 * Immediately write the new value to the requested register. This mechanism
1294 * bypasses any caches.
1296 static int register_write_direct(struct target
*target
, unsigned number
,
1299 LOG_DEBUG("{%d} %s <- 0x%" PRIx64
, riscv_current_hartid(target
),
1300 gdb_regno_name(number
), value
);
1302 int result
= register_write_abstract(target
, number
, value
,
1303 register_size(target
, number
));
1304 if (result
== ERROR_OK
|| !has_sufficient_progbuf(target
, 2) ||
1305 !riscv_is_halted(target
))
1308 struct riscv_program program
;
1309 riscv_program_init(&program
, target
);
1312 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
1316 if (prep_for_register_access(target
, &mstatus
, number
) != ERROR_OK
)
1319 scratch_mem_t scratch
;
1320 bool use_scratch
= false;
1321 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
&&
1322 riscv_supports_extension(target
, 'D') &&
1323 riscv_xlen(target
) < 64) {
1324 /* There are no instructions to move all the bits from a register, so
1325 * we need to use some scratch RAM. */
1327 riscv_program_insert(&program
, fld(number
- GDB_REGNO_FPR0
, S0
, 0));
1329 if (scratch_reserve(target
, &scratch
, &program
, 8) != ERROR_OK
)
1332 if (register_write_direct(target
, GDB_REGNO_S0
, scratch
.hart_address
)
1334 scratch_release(target
, &scratch
);
1338 if (scratch_write64(target
, &scratch
, value
) != ERROR_OK
) {
1339 scratch_release(target
, &scratch
);
1343 } else if (number
== GDB_REGNO_VTYPE
) {
1344 riscv_program_insert(&program
, csrr(S0
, CSR_VL
));
1345 riscv_program_insert(&program
, vsetvli(ZERO
, S0
, value
));
1348 if (register_write_direct(target
, GDB_REGNO_S0
, value
) != ERROR_OK
)
1351 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
) {
1352 if (riscv_supports_extension(target
, 'D'))
1353 riscv_program_insert(&program
, fmv_d_x(number
- GDB_REGNO_FPR0
, S0
));
1355 riscv_program_insert(&program
, fmv_w_x(number
- GDB_REGNO_FPR0
, S0
));
1356 } else if (number
== GDB_REGNO_VL
) {
1357 /* "The XLEN-bit-wide read-only vl CSR can only be updated by the
1358 * vsetvli and vsetvl instructions, and the fault-only-rst vector
1359 * load instruction variants." */
1361 if (register_read(target
, &vtype
, GDB_REGNO_VTYPE
) != ERROR_OK
)
1363 if (riscv_program_insert(&program
, vsetvli(ZERO
, S0
, vtype
)) != ERROR_OK
)
1365 } else if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
) {
1366 riscv_program_csrw(&program
, S0
, number
);
1368 LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number
);
1373 int exec_out
= riscv_program_exec(&program
, target
);
1374 /* Don't message on error. Probably the register doesn't exist. */
1375 if (exec_out
== ERROR_OK
&& target
->reg_cache
) {
1376 struct reg
*reg
= &target
->reg_cache
->reg_list
[number
];
1377 buf_set_u64(reg
->value
, 0, reg
->size
, value
);
1381 scratch_release(target
, &scratch
);
1383 if (cleanup_after_register_access(target
, mstatus
, number
) != ERROR_OK
)
1387 if (register_write_direct(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
1393 /** Read register value from the target. Also update the cached value. */
1394 static int register_read(struct target
*target
, uint64_t *value
, uint32_t number
)
1396 if (number
== GDB_REGNO_ZERO
) {
1400 int result
= register_read_direct(target
, value
, number
);
1401 if (result
!= ERROR_OK
)
1403 if (target
->reg_cache
) {
1404 struct reg
*reg
= &target
->reg_cache
->reg_list
[number
];
1405 buf_set_u64(reg
->value
, 0, reg
->size
, *value
);
1410 /** Actually read registers from the target right now. */
1411 static int register_read_direct(struct target
*target
, uint64_t *value
, uint32_t number
)
1413 int result
= register_read_abstract(target
, value
, number
,
1414 register_size(target
, number
));
1416 if (result
!= ERROR_OK
&&
1417 has_sufficient_progbuf(target
, 2) &&
1418 number
> GDB_REGNO_XPR31
) {
1419 struct riscv_program program
;
1420 riscv_program_init(&program
, target
);
1422 scratch_mem_t scratch
;
1423 bool use_scratch
= false;
1426 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
1429 /* Write program to move data into s0. */
1432 if (prep_for_register_access(target
, &mstatus
, number
) != ERROR_OK
)
1435 if (number
>= GDB_REGNO_FPR0
&& number
<= GDB_REGNO_FPR31
) {
1436 if (riscv_supports_extension(target
, 'D')
1437 && riscv_xlen(target
) < 64) {
1438 /* There are no instructions to move all the bits from a
1439 * register, so we need to use some scratch RAM. */
1440 riscv_program_insert(&program
, fsd(number
- GDB_REGNO_FPR0
, S0
,
1443 if (scratch_reserve(target
, &scratch
, &program
, 8) != ERROR_OK
)
1447 if (register_write_direct(target
, GDB_REGNO_S0
,
1448 scratch
.hart_address
) != ERROR_OK
) {
1449 scratch_release(target
, &scratch
);
1452 } else if (riscv_supports_extension(target
, 'D')) {
1453 riscv_program_insert(&program
, fmv_x_d(S0
, number
- GDB_REGNO_FPR0
));
1455 riscv_program_insert(&program
, fmv_x_w(S0
, number
- GDB_REGNO_FPR0
));
1457 } else if (number
>= GDB_REGNO_CSR0
&& number
<= GDB_REGNO_CSR4095
) {
1458 riscv_program_csrr(&program
, S0
, number
);
1460 LOG_ERROR("Unsupported register: %s", gdb_regno_name(number
));
1464 /* Execute program. */
1465 result
= riscv_program_exec(&program
, target
);
1466 /* Don't message on error. Probably the register doesn't exist. */
1469 result
= scratch_read64(target
, &scratch
, value
);
1470 scratch_release(target
, &scratch
);
1471 if (result
!= ERROR_OK
)
1475 if (register_read_direct(target
, value
, GDB_REGNO_S0
) != ERROR_OK
)
1479 if (cleanup_after_register_access(target
, mstatus
, number
) != ERROR_OK
)
1483 if (register_write_direct(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
1487 if (result
== ERROR_OK
) {
1488 LOG_DEBUG("{%d} %s = 0x%" PRIx64
, riscv_current_hartid(target
),
1489 gdb_regno_name(number
), *value
);
1495 static int wait_for_authbusy(struct target
*target
, uint32_t *dmstatus
)
1497 time_t start
= time(NULL
);
1500 if (dmstatus_read(target
, &value
, false) != ERROR_OK
)
1504 if (!get_field(value
, DM_DMSTATUS_AUTHBUSY
))
1506 if (time(NULL
) - start
> riscv_command_timeout_sec
) {
1507 LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
1508 "Increase the timeout with riscv set_command_timeout_sec.",
1509 riscv_command_timeout_sec
,
1518 /*** OpenOCD target functions. ***/
1520 static void deinit_target(struct target
*target
)
1522 LOG_DEBUG("riscv_deinit_target()");
1523 struct riscv_info
*info
= target
->arch_info
;
1527 free(info
->version_specific
);
1528 /* TODO: free register arch_info */
1529 info
->version_specific
= NULL
;
1532 static int set_haltgroup(struct target
*target
, bool *supported
)
1534 uint32_t write
= set_field(DM_DMCS2_HGWRITE
, DM_DMCS2_GROUP
, target
->smp
);
1535 if (dmi_write(target
, DM_DMCS2
, write
) != ERROR_OK
)
1538 if (dmi_read(target
, &read
, DM_DMCS2
) != ERROR_OK
)
1540 *supported
= get_field(read
, DM_DMCS2_GROUP
) == (unsigned)target
->smp
;
1544 static int discover_vlenb(struct target
*target
)
1549 if (register_read(target
, &vlenb
, GDB_REGNO_VLENB
) != ERROR_OK
) {
1550 LOG_WARNING("Couldn't read vlenb for %s; vector register access won't work.",
1551 target_name(target
));
1557 LOG_INFO("Vector support with vlenb=%d", r
->vlenb
);
1562 static int examine(struct target
*target
)
1564 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
1566 uint32_t dtmcontrol
= dtmcontrol_scan(target
, 0);
1567 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol
);
1568 LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol
, DTM_DTMCS_DMIRESET
));
1569 LOG_DEBUG(" idle=%d", get_field(dtmcontrol
, DTM_DTMCS_IDLE
));
1570 LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol
, DTM_DTMCS_DMISTAT
));
1571 LOG_DEBUG(" abits=%d", get_field(dtmcontrol
, DTM_DTMCS_ABITS
));
1572 LOG_DEBUG(" version=%d", get_field(dtmcontrol
, DTM_DTMCS_VERSION
));
1573 if (dtmcontrol
== 0) {
1574 LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
1577 if (get_field(dtmcontrol
, DTM_DTMCS_VERSION
) != 1) {
1578 LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
1579 get_field(dtmcontrol
, DTM_DTMCS_VERSION
), dtmcontrol
);
1583 riscv013_info_t
*info
= get_info(target
);
1584 /* TODO: This won't be true if there are multiple DMs. */
1585 info
->index
= target
->coreid
;
1586 info
->abits
= get_field(dtmcontrol
, DTM_DTMCS_ABITS
);
1587 info
->dtmcs_idle
= get_field(dtmcontrol
, DTM_DTMCS_IDLE
);
1589 /* Reset the Debug Module. */
1590 dm013_info_t
*dm
= get_dm(target
);
1593 if (!dm
->was_reset
) {
1594 dmi_write(target
, DM_DMCONTROL
, 0);
1595 dmi_write(target
, DM_DMCONTROL
, DM_DMCONTROL_DMACTIVE
);
1596 dm
->was_reset
= true;
1599 dmi_write(target
, DM_DMCONTROL
, DM_DMCONTROL_HARTSELLO
|
1600 DM_DMCONTROL_HARTSELHI
| DM_DMCONTROL_DMACTIVE
|
1601 DM_DMCONTROL_HASEL
);
1603 if (dmi_read(target
, &dmcontrol
, DM_DMCONTROL
) != ERROR_OK
)
1606 if (!get_field(dmcontrol
, DM_DMCONTROL_DMACTIVE
)) {
1607 LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
1612 dm
->hasel_supported
= get_field(dmcontrol
, DM_DMCONTROL_HASEL
);
1615 if (dmstatus_read(target
, &dmstatus
, false) != ERROR_OK
)
1617 LOG_DEBUG("dmstatus: 0x%08x", dmstatus
);
1618 int dmstatus_version
= get_field(dmstatus
, DM_DMSTATUS_VERSION
);
1619 if (dmstatus_version
!= 2 && dmstatus_version
!= 3) {
1620 /* Error was already printed out in dmstatus_read(). */
1625 (get_field(dmcontrol
, DM_DMCONTROL_HARTSELHI
) <<
1626 DM_DMCONTROL_HARTSELLO_LENGTH
) |
1627 get_field(dmcontrol
, DM_DMCONTROL_HARTSELLO
);
1628 info
->hartsellen
= 0;
1629 while (hartsel
& 1) {
1633 LOG_DEBUG("hartsellen=%d", info
->hartsellen
);
1636 if (dmi_read(target
, &hartinfo
, DM_HARTINFO
) != ERROR_OK
)
1639 info
->datasize
= get_field(hartinfo
, DM_HARTINFO_DATASIZE
);
1640 info
->dataaccess
= get_field(hartinfo
, DM_HARTINFO_DATAACCESS
);
1641 info
->dataaddr
= get_field(hartinfo
, DM_HARTINFO_DATAADDR
);
1643 if (!get_field(dmstatus
, DM_DMSTATUS_AUTHENTICATED
)) {
1644 LOG_ERROR("Debugger is not authenticated to target Debug Module. "
1645 "(dmstatus=0x%x). Use `riscv authdata_read` and "
1646 "`riscv authdata_write` commands to authenticate.", dmstatus
);
1647 /* If we return ERROR_FAIL here, then in a multicore setup the next
1648 * core won't be examined, which means we won't set up the
1649 * authentication commands for them, which means the config script
1650 * needs to be a lot more complex. */
1654 if (dmi_read(target
, &info
->sbcs
, DM_SBCS
) != ERROR_OK
)
1657 /* Check that abstract data registers are accessible. */
1658 uint32_t abstractcs
;
1659 if (dmi_read(target
, &abstractcs
, DM_ABSTRACTCS
) != ERROR_OK
)
1661 info
->datacount
= get_field(abstractcs
, DM_ABSTRACTCS_DATACOUNT
);
1662 info
->progbufsize
= get_field(abstractcs
, DM_ABSTRACTCS_PROGBUFSIZE
);
1664 LOG_INFO("datacount=%d progbufsize=%d", info
->datacount
, info
->progbufsize
);
1667 r
->impebreak
= get_field(dmstatus
, DM_DMSTATUS_IMPEBREAK
);
1669 if (!has_sufficient_progbuf(target
, 2)) {
1670 LOG_WARNING("We won't be able to execute fence instructions on this "
1671 "target. Memory may not always appear consistent. "
1672 "(progbufsize=%d, impebreak=%d)", info
->progbufsize
,
1676 if (info
->progbufsize
< 4 && riscv_enable_virtual
) {
1677 LOG_ERROR("set_enable_virtual is not available on this target. It "
1678 "requires a program buffer size of at least 4. (progbufsize=%d) "
1679 "Use `riscv set_enable_virtual off` to continue."
1680 , info
->progbufsize
);
1683 /* Before doing anything else we must first enumerate the harts. */
1684 if (dm
->hart_count
< 0) {
1685 for (int i
= 0; i
< MIN(RISCV_MAX_HARTS
, 1 << info
->hartsellen
); ++i
) {
1686 r
->current_hartid
= i
;
1687 if (riscv013_select_current_hart(target
) != ERROR_OK
)
1691 if (dmstatus_read(target
, &s
, true) != ERROR_OK
)
1693 if (get_field(s
, DM_DMSTATUS_ANYNONEXISTENT
))
1695 dm
->hart_count
= i
+ 1;
1697 if (get_field(s
, DM_DMSTATUS_ANYHAVERESET
))
1698 dmi_write(target
, DM_DMCONTROL
,
1699 set_hartsel(DM_DMCONTROL_DMACTIVE
| DM_DMCONTROL_ACKHAVERESET
, i
));
1702 LOG_DEBUG("Detected %d harts.", dm
->hart_count
);
1705 r
->current_hartid
= target
->coreid
;
1707 if (dm
->hart_count
== 0) {
1708 LOG_ERROR("No harts found!");
1712 /* Don't call any riscv_* functions until after we've counted the number of
1713 * cores and initialized registers. */
1715 if (riscv013_select_current_hart(target
) != ERROR_OK
)
1718 bool halted
= riscv_is_halted(target
);
1720 if (riscv013_halt_go(target
) != ERROR_OK
) {
1721 LOG_ERROR("Fatal: Hart %d failed to halt during examine()", r
->current_hartid
);
1726 /* Without knowing anything else we can at least mess with the
1727 * program buffer. */
1728 r
->debug_buffer_size
= info
->progbufsize
;
1730 int result
= register_read_abstract(target
, NULL
, GDB_REGNO_S0
, 64);
1731 if (result
== ERROR_OK
)
1736 if (register_read(target
, &r
->misa
, GDB_REGNO_MISA
)) {
1737 LOG_ERROR("Fatal: Failed to read MISA from hart %d.", r
->current_hartid
);
1741 if (riscv_supports_extension(target
, 'V')) {
1742 if (discover_vlenb(target
) != ERROR_OK
)
1746 /* Now init registers based on what we discovered. */
1747 if (riscv_init_registers(target
) != ERROR_OK
)
1750 /* Display this as early as possible to help people who are using
1751 * really slow simulators. */
1752 LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64
, r
->current_hartid
, r
->xlen
,
1756 riscv013_step_or_resume_current_hart(target
, false, false);
1758 target_set_examined(target
);
1761 bool haltgroup_supported
;
1762 if (set_haltgroup(target
, &haltgroup_supported
) != ERROR_OK
)
1764 if (haltgroup_supported
)
1765 LOG_INFO("Core %d made part of halt group %d.", target
->coreid
,
1768 LOG_INFO("Core %d could not be made part of halt group %d.",
1769 target
->coreid
, target
->smp
);
1772 /* Some regression suites rely on seeing 'Examined RISC-V core' to know
1773 * when they can connect with gdb/telnet.
1774 * We will need to update those suites if we want to change that text. */
1775 LOG_INFO("Examined RISC-V core; found %d harts",
1776 riscv_count_harts(target
));
1777 LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64
, r
->current_hartid
, r
->xlen
,
1782 static int riscv013_authdata_read(struct target
*target
, uint32_t *value
, unsigned int index
)
1785 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1789 if (wait_for_authbusy(target
, NULL
) != ERROR_OK
)
1792 return dmi_read(target
, value
, DM_AUTHDATA
);
1795 static int riscv013_authdata_write(struct target
*target
, uint32_t value
, unsigned int index
)
1798 LOG_ERROR("Spec 0.13 only has a single authdata register.");
1802 uint32_t before
, after
;
1803 if (wait_for_authbusy(target
, &before
) != ERROR_OK
)
1806 dmi_write(target
, DM_AUTHDATA
, value
);
1808 if (wait_for_authbusy(target
, &after
) != ERROR_OK
)
1811 if (!get_field(before
, DM_DMSTATUS_AUTHENTICATED
) &&
1812 get_field(after
, DM_DMSTATUS_AUTHENTICATED
)) {
1813 LOG_INFO("authdata_write resulted in successful authentication");
1814 int result
= ERROR_OK
;
1815 dm013_info_t
*dm
= get_dm(target
);
1818 target_list_t
*entry
;
1819 list_for_each_entry(entry
, &dm
->target_list
, list
) {
1820 if (examine(entry
->target
) != ERROR_OK
)
1821 result
= ERROR_FAIL
;
1829 static int riscv013_hart_count(struct target
*target
)
1831 dm013_info_t
*dm
= get_dm(target
);
1833 return dm
->hart_count
;
1836 /* Try to find out the widest memory access size depending on the selected memory access methods. */
1837 static unsigned riscv013_data_bits(struct target
*target
)
1839 RISCV013_INFO(info
);
1842 for (unsigned int i
= 0; i
< RISCV_NUM_MEM_ACCESS_METHODS
; i
++) {
1843 int method
= r
->mem_access_methods
[i
];
1845 if (method
== RISCV_MEM_ACCESS_PROGBUF
) {
1846 if (has_sufficient_progbuf(target
, 3))
1847 return riscv_xlen(target
);
1848 } else if (method
== RISCV_MEM_ACCESS_SYSBUS
) {
1849 if (get_field(info
->sbcs
, DM_SBCS_SBACCESS128
))
1851 if (get_field(info
->sbcs
, DM_SBCS_SBACCESS64
))
1853 if (get_field(info
->sbcs
, DM_SBCS_SBACCESS32
))
1855 if (get_field(info
->sbcs
, DM_SBCS_SBACCESS16
))
1857 if (get_field(info
->sbcs
, DM_SBCS_SBACCESS8
))
1859 } else if (method
== RISCV_MEM_ACCESS_ABSTRACT
) {
1860 /* TODO: Once there is a spec for discovering abstract commands, we can
1861 * take those into account as well. For now we assume abstract commands
1862 * support XLEN-wide accesses. */
1863 return riscv_xlen(target
);
1864 } else if (method
== RISCV_MEM_ACCESS_UNSPECIFIED
)
1865 /* No further mem access method to try. */
1868 LOG_ERROR("Unable to determine supported data bits on this target. Assuming 32 bits.");
1872 static COMMAND_HELPER(riscv013_print_info
, struct target
*target
)
1874 RISCV013_INFO(info
);
1876 /* Abstract description. */
1877 riscv_print_info_line(CMD
, "target", "memory.read_while_running8", get_field(info
->sbcs
, DM_SBCS_SBACCESS8
));
1878 riscv_print_info_line(CMD
, "target", "memory.write_while_running8", get_field(info
->sbcs
, DM_SBCS_SBACCESS8
));
1879 riscv_print_info_line(CMD
, "target", "memory.read_while_running16", get_field(info
->sbcs
, DM_SBCS_SBACCESS16
));
1880 riscv_print_info_line(CMD
, "target", "memory.write_while_running16", get_field(info
->sbcs
, DM_SBCS_SBACCESS16
));
1881 riscv_print_info_line(CMD
, "target", "memory.read_while_running32", get_field(info
->sbcs
, DM_SBCS_SBACCESS32
));
1882 riscv_print_info_line(CMD
, "target", "memory.write_while_running32", get_field(info
->sbcs
, DM_SBCS_SBACCESS32
));
1883 riscv_print_info_line(CMD
, "target", "memory.read_while_running64", get_field(info
->sbcs
, DM_SBCS_SBACCESS64
));
1884 riscv_print_info_line(CMD
, "target", "memory.write_while_running64", get_field(info
->sbcs
, DM_SBCS_SBACCESS64
));
1885 riscv_print_info_line(CMD
, "target", "memory.read_while_running128", get_field(info
->sbcs
, DM_SBCS_SBACCESS128
));
1886 riscv_print_info_line(CMD
, "target", "memory.write_while_running128", get_field(info
->sbcs
, DM_SBCS_SBACCESS128
));
1888 /* Lower level description. */
1889 riscv_print_info_line(CMD
, "dm", "abits", info
->abits
);
1890 riscv_print_info_line(CMD
, "dm", "progbufsize", info
->progbufsize
);
1891 riscv_print_info_line(CMD
, "dm", "sbversion", get_field(info
->sbcs
, DM_SBCS_SBVERSION
));
1892 riscv_print_info_line(CMD
, "dm", "sbasize", get_field(info
->sbcs
, DM_SBCS_SBASIZE
));
1893 riscv_print_info_line(CMD
, "dm", "sbaccess128", get_field(info
->sbcs
, DM_SBCS_SBACCESS128
));
1894 riscv_print_info_line(CMD
, "dm", "sbaccess64", get_field(info
->sbcs
, DM_SBCS_SBACCESS64
));
1895 riscv_print_info_line(CMD
, "dm", "sbaccess32", get_field(info
->sbcs
, DM_SBCS_SBACCESS32
));
1896 riscv_print_info_line(CMD
, "dm", "sbaccess16", get_field(info
->sbcs
, DM_SBCS_SBACCESS16
));
1897 riscv_print_info_line(CMD
, "dm", "sbaccess8", get_field(info
->sbcs
, DM_SBCS_SBACCESS8
));
1900 if (dmstatus_read(target
, &dmstatus
, false) == ERROR_OK
)
1901 riscv_print_info_line(CMD
, "dm", "authenticated", get_field(dmstatus
, DM_DMSTATUS_AUTHENTICATED
));
1906 static int prep_for_vector_access(struct target
*target
, uint64_t *vtype
,
1907 uint64_t *vl
, unsigned *debug_vl
)
1910 /* TODO: this continuous save/restore is terrible for performance. */
1911 /* Write vtype and vl. */
1912 unsigned encoded_vsew
;
1913 switch (riscv_xlen(target
)) {
1921 LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target
));
1925 /* Save vtype and vl. */
1926 if (register_read(target
, vtype
, GDB_REGNO_VTYPE
) != ERROR_OK
)
1928 if (register_read(target
, vl
, GDB_REGNO_VL
) != ERROR_OK
)
1931 if (register_write_direct(target
, GDB_REGNO_VTYPE
, encoded_vsew
<< 3) != ERROR_OK
)
1933 *debug_vl
= DIV_ROUND_UP(r
->vlenb
* 8, riscv_xlen(target
));
1934 if (register_write_direct(target
, GDB_REGNO_VL
, *debug_vl
) != ERROR_OK
)
1940 static int cleanup_after_vector_access(struct target
*target
, uint64_t vtype
,
1943 /* Restore vtype and vl. */
1944 if (register_write_direct(target
, GDB_REGNO_VTYPE
, vtype
) != ERROR_OK
)
1946 if (register_write_direct(target
, GDB_REGNO_VL
, vl
) != ERROR_OK
)
1951 static int riscv013_get_register_buf(struct target
*target
,
1952 uint8_t *value
, int regno
)
1954 assert(regno
>= GDB_REGNO_V0
&& regno
<= GDB_REGNO_V31
);
1956 if (riscv_select_current_hart(target
) != ERROR_OK
)
1960 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
1964 if (prep_for_register_access(target
, &mstatus
, regno
) != ERROR_OK
)
1969 if (prep_for_vector_access(target
, &vtype
, &vl
, &debug_vl
) != ERROR_OK
)
1972 unsigned vnum
= regno
- GDB_REGNO_V0
;
1973 unsigned xlen
= riscv_xlen(target
);
1975 struct riscv_program program
;
1976 riscv_program_init(&program
, target
);
1977 riscv_program_insert(&program
, vmv_x_s(S0
, vnum
));
1978 riscv_program_insert(&program
, vslide1down_vx(vnum
, vnum
, S0
, true));
1980 int result
= ERROR_OK
;
1981 for (unsigned i
= 0; i
< debug_vl
; i
++) {
1982 /* Executing the program might result in an exception if there is some
1983 * issue with the vector implementation/instructions we're using. If that
1984 * happens, attempt to restore as usual. We may have clobbered the
1985 * vector register we tried to read already.
1986 * For other failures, we just return error because things are probably
1987 * so messed up that attempting to restore isn't going to help. */
1988 result
= riscv_program_exec(&program
, target
);
1989 if (result
== ERROR_OK
) {
1991 if (register_read_direct(target
, &v
, GDB_REGNO_S0
) != ERROR_OK
)
1993 buf_set_u64(value
, xlen
* i
, xlen
, v
);
1999 if (cleanup_after_vector_access(target
, vtype
, vl
) != ERROR_OK
)
2002 if (cleanup_after_register_access(target
, mstatus
, regno
) != ERROR_OK
)
2004 if (register_write_direct(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
2010 static int riscv013_set_register_buf(struct target
*target
,
2011 int regno
, const uint8_t *value
)
2013 assert(regno
>= GDB_REGNO_V0
&& regno
<= GDB_REGNO_V31
);
2015 if (riscv_select_current_hart(target
) != ERROR_OK
)
2019 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
2023 if (prep_for_register_access(target
, &mstatus
, regno
) != ERROR_OK
)
2028 if (prep_for_vector_access(target
, &vtype
, &vl
, &debug_vl
) != ERROR_OK
)
2031 unsigned vnum
= regno
- GDB_REGNO_V0
;
2032 unsigned xlen
= riscv_xlen(target
);
2034 struct riscv_program program
;
2035 riscv_program_init(&program
, target
);
2036 riscv_program_insert(&program
, vslide1down_vx(vnum
, vnum
, S0
, true));
2037 int result
= ERROR_OK
;
2038 for (unsigned i
= 0; i
< debug_vl
; i
++) {
2039 if (register_write_direct(target
, GDB_REGNO_S0
,
2040 buf_get_u64(value
, xlen
* i
, xlen
)) != ERROR_OK
)
2042 result
= riscv_program_exec(&program
, target
);
2043 if (result
!= ERROR_OK
)
2047 if (cleanup_after_vector_access(target
, vtype
, vl
) != ERROR_OK
)
2050 if (cleanup_after_register_access(target
, mstatus
, regno
) != ERROR_OK
)
2052 if (register_write_direct(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
2058 static uint32_t sb_sbaccess(unsigned int size_bytes
)
2060 switch (size_bytes
) {
2062 return set_field(0, DM_SBCS_SBACCESS
, 0);
2064 return set_field(0, DM_SBCS_SBACCESS
, 1);
2066 return set_field(0, DM_SBCS_SBACCESS
, 2);
2068 return set_field(0, DM_SBCS_SBACCESS
, 3);
2070 return set_field(0, DM_SBCS_SBACCESS
, 4);
2076 static int sb_write_address(struct target
*target
, target_addr_t address
,
2077 bool ensure_success
)
2079 RISCV013_INFO(info
);
2080 unsigned int sbasize
= get_field(info
->sbcs
, DM_SBCS_SBASIZE
);
2081 /* There currently is no support for >64-bit addresses in OpenOCD. */
2083 dmi_op(target
, NULL
, NULL
, DMI_OP_WRITE
, DM_SBADDRESS3
, 0, false, false);
2085 dmi_op(target
, NULL
, NULL
, DMI_OP_WRITE
, DM_SBADDRESS2
, 0, false, false);
2087 dmi_op(target
, NULL
, NULL
, DMI_OP_WRITE
, DM_SBADDRESS1
, address
>> 32, false, false);
2088 return dmi_op(target
, NULL
, NULL
, DMI_OP_WRITE
, DM_SBADDRESS0
, address
,
2089 false, ensure_success
);
2092 static int batch_run(const struct target
*target
, struct riscv_batch
*batch
)
2094 RISCV013_INFO(info
);
2096 if (r
->reset_delays_wait
>= 0) {
2097 r
->reset_delays_wait
-= batch
->used_scans
;
2098 if (r
->reset_delays_wait
<= 0) {
2099 batch
->idle_count
= 0;
2100 info
->dmi_busy_delay
= 0;
2101 info
->ac_busy_delay
= 0;
2104 return riscv_batch_run(batch
);
2107 static int sba_supports_access(struct target
*target
, unsigned int size_bytes
)
2109 RISCV013_INFO(info
);
2110 switch (size_bytes
) {
2112 return get_field(info
->sbcs
, DM_SBCS_SBACCESS8
);
2114 return get_field(info
->sbcs
, DM_SBCS_SBACCESS16
);
2116 return get_field(info
->sbcs
, DM_SBCS_SBACCESS32
);
2118 return get_field(info
->sbcs
, DM_SBCS_SBACCESS64
);
2120 return get_field(info
->sbcs
, DM_SBCS_SBACCESS128
);
2126 static int sample_memory_bus_v1(struct target
*target
,
2127 struct riscv_sample_buf
*buf
,
2128 const riscv_sample_config_t
*config
,
2131 RISCV013_INFO(info
);
2132 unsigned int sbasize
= get_field(info
->sbcs
, DM_SBCS_SBASIZE
);
2134 LOG_ERROR("Memory sampling is only implemented for sbasize <= 64.");
2135 return ERROR_NOT_IMPLEMENTED
;
2138 if (get_field(info
->sbcs
, DM_SBCS_SBVERSION
) != 1) {
2139 LOG_ERROR("Memory sampling is only implemented for SBA version 1.");
2140 return ERROR_NOT_IMPLEMENTED
;
2144 uint32_t sbcs_valid
= false;
2146 uint32_t sbaddress0
= 0;
2147 bool sbaddress0_valid
= false;
2148 uint32_t sbaddress1
= 0;
2149 bool sbaddress1_valid
= false;
2151 /* How often to read each value in a batch. */
2152 const unsigned int repeat
= 5;
2154 unsigned int enabled_count
= 0;
2155 for (unsigned int i
= 0; i
< ARRAY_SIZE(config
->bucket
); i
++) {
2156 if (config
->bucket
[i
].enabled
)
2160 while (timeval_ms() < until_ms
) {
2162 * batch_run() adds to the batch, so we can't simply reuse the same
2163 * batch over and over. So we create a new one every time through the
2166 struct riscv_batch
*batch
= riscv_batch_alloc(
2167 target
, 1 + enabled_count
* 5 * repeat
,
2168 info
->dmi_busy_delay
+ info
->bus_master_read_delay
);
2172 unsigned int result_bytes
= 0;
2173 for (unsigned int n
= 0; n
< repeat
; n
++) {
2174 for (unsigned int i
= 0; i
< ARRAY_SIZE(config
->bucket
); i
++) {
2175 if (config
->bucket
[i
].enabled
) {
2176 if (!sba_supports_access(target
, config
->bucket
[i
].size_bytes
)) {
2177 LOG_ERROR("Hardware does not support SBA access for %d-byte memory sampling.",
2178 config
->bucket
[i
].size_bytes
);
2179 return ERROR_NOT_IMPLEMENTED
;
2182 uint32_t sbcs_write
= DM_SBCS_SBREADONADDR
;
2183 if (enabled_count
== 1)
2184 sbcs_write
|= DM_SBCS_SBREADONDATA
;
2185 sbcs_write
|= sb_sbaccess(config
->bucket
[i
].size_bytes
);
2186 if (!sbcs_valid
|| sbcs_write
!= sbcs
) {
2187 riscv_batch_add_dmi_write(batch
, DM_SBCS
, sbcs_write
);
2193 (!sbaddress1_valid
||
2194 sbaddress1
!= config
->bucket
[i
].address
>> 32)) {
2195 sbaddress1
= config
->bucket
[i
].address
>> 32;
2196 riscv_batch_add_dmi_write(batch
, DM_SBADDRESS1
, sbaddress1
);
2197 sbaddress1_valid
= true;
2199 if (!sbaddress0_valid
||
2200 sbaddress0
!= (config
->bucket
[i
].address
& 0xffffffff)) {
2201 sbaddress0
= config
->bucket
[i
].address
;
2202 riscv_batch_add_dmi_write(batch
, DM_SBADDRESS0
, sbaddress0
);
2203 sbaddress0_valid
= true;
2205 if (config
->bucket
[i
].size_bytes
> 4)
2206 riscv_batch_add_dmi_read(batch
, DM_SBDATA1
);
2207 riscv_batch_add_dmi_read(batch
, DM_SBDATA0
);
2208 result_bytes
+= 1 + config
->bucket
[i
].size_bytes
;
2213 if (buf
->used
+ result_bytes
>= buf
->size
) {
2214 riscv_batch_free(batch
);
2218 size_t sbcs_key
= riscv_batch_add_dmi_read(batch
, DM_SBCS
);
2220 int result
= batch_run(target
, batch
);
2221 if (result
!= ERROR_OK
)
2224 uint32_t sbcs_read
= riscv_batch_get_dmi_read_data(batch
, sbcs_key
);
2225 if (get_field(sbcs_read
, DM_SBCS_SBBUSYERROR
)) {
2226 /* Discard this batch (too much hassle to try to recover partial
2227 * data) and try again with a larger delay. */
2228 info
->bus_master_read_delay
+= info
->bus_master_read_delay
/ 10 + 1;
2229 dmi_write(target
, DM_SBCS
, sbcs_read
| DM_SBCS_SBBUSYERROR
| DM_SBCS_SBERROR
);
2230 riscv_batch_free(batch
);
2233 if (get_field(sbcs_read
, DM_SBCS_SBERROR
)) {
2234 /* The memory we're sampling was unreadable, somehow. Give up. */
2235 dmi_write(target
, DM_SBCS
, DM_SBCS_SBBUSYERROR
| DM_SBCS_SBERROR
);
2236 riscv_batch_free(batch
);
2240 unsigned int read
= 0;
2241 for (unsigned int n
= 0; n
< repeat
; n
++) {
2242 for (unsigned int i
= 0; i
< ARRAY_SIZE(config
->bucket
); i
++) {
2243 if (config
->bucket
[i
].enabled
) {
2244 assert(i
< RISCV_SAMPLE_BUF_TIMESTAMP_BEFORE
);
2246 if (config
->bucket
[i
].size_bytes
> 4)
2247 value
= ((uint64_t)riscv_batch_get_dmi_read_data(batch
, read
++)) << 32;
2248 value
|= riscv_batch_get_dmi_read_data(batch
, read
++);
2250 buf
->buf
[buf
->used
] = i
;
2251 buf_set_u64(buf
->buf
+ buf
->used
+ 1, 0, config
->bucket
[i
].size_bytes
* 8, value
);
2252 buf
->used
+= 1 + config
->bucket
[i
].size_bytes
;
2257 riscv_batch_free(batch
);
2263 static int sample_memory(struct target
*target
,
2264 struct riscv_sample_buf
*buf
,
2265 riscv_sample_config_t
*config
,
2268 if (!config
->enabled
)
2271 return sample_memory_bus_v1(target
, buf
, config
, until_ms
);
2274 static int init_target(struct command_context
*cmd_ctx
,
2275 struct target
*target
)
2278 RISCV_INFO(generic_info
);
2280 generic_info
->get_register
= &riscv013_get_register
;
2281 generic_info
->set_register
= &riscv013_set_register
;
2282 generic_info
->get_register_buf
= &riscv013_get_register_buf
;
2283 generic_info
->set_register_buf
= &riscv013_set_register_buf
;
2284 generic_info
->select_current_hart
= &riscv013_select_current_hart
;
2285 generic_info
->is_halted
= &riscv013_is_halted
;
2286 generic_info
->resume_go
= &riscv013_resume_go
;
2287 generic_info
->step_current_hart
= &riscv013_step_current_hart
;
2288 generic_info
->on_halt
= &riscv013_on_halt
;
2289 generic_info
->resume_prep
= &riscv013_resume_prep
;
2290 generic_info
->halt_prep
= &riscv013_halt_prep
;
2291 generic_info
->halt_go
= &riscv013_halt_go
;
2292 generic_info
->on_step
= &riscv013_on_step
;
2293 generic_info
->halt_reason
= &riscv013_halt_reason
;
2294 generic_info
->read_debug_buffer
= &riscv013_read_debug_buffer
;
2295 generic_info
->write_debug_buffer
= &riscv013_write_debug_buffer
;
2296 generic_info
->execute_debug_buffer
= &riscv013_execute_debug_buffer
;
2297 generic_info
->fill_dmi_write_u64
= &riscv013_fill_dmi_write_u64
;
2298 generic_info
->fill_dmi_read_u64
= &riscv013_fill_dmi_read_u64
;
2299 generic_info
->fill_dmi_nop_u64
= &riscv013_fill_dmi_nop_u64
;
2300 generic_info
->dmi_write_u64_bits
= &riscv013_dmi_write_u64_bits
;
2301 generic_info
->authdata_read
= &riscv013_authdata_read
;
2302 generic_info
->authdata_write
= &riscv013_authdata_write
;
2303 generic_info
->dmi_read
= &dmi_read
;
2304 generic_info
->dmi_write
= &dmi_write
;
2305 generic_info
->read_memory
= read_memory
;
2306 generic_info
->hart_count
= &riscv013_hart_count
;
2307 generic_info
->data_bits
= &riscv013_data_bits
;
2308 generic_info
->print_info
= &riscv013_print_info
;
2309 if (!generic_info
->version_specific
) {
2310 generic_info
->version_specific
= calloc(1, sizeof(riscv013_info_t
));
2311 if (!generic_info
->version_specific
)
2314 generic_info
->sample_memory
= sample_memory
;
2315 riscv013_info_t
*info
= get_info(target
);
2317 info
->progbufsize
= -1;
2319 info
->dmi_busy_delay
= 0;
2320 info
->bus_master_read_delay
= 0;
2321 info
->bus_master_write_delay
= 0;
2322 info
->ac_busy_delay
= 0;
2324 /* Assume all these abstract commands are supported until we learn
2326 * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
2327 * while another one isn't. We don't track that this closely here, but in
2328 * the future we probably should. */
2329 info
->abstract_read_csr_supported
= true;
2330 info
->abstract_write_csr_supported
= true;
2331 info
->abstract_read_fpr_supported
= true;
2332 info
->abstract_write_fpr_supported
= true;
2334 info
->has_aampostincrement
= YNM_MAYBE
;
2339 static int assert_reset(struct target
*target
)
2345 uint32_t control_base
= set_field(0, DM_DMCONTROL_DMACTIVE
, 1);
2347 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
)) {
2348 /* Run the user-supplied script if there is one. */
2349 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
2350 } else if (target
->rtos
) {
2351 /* There's only one target, and OpenOCD thinks each hart is a thread.
2352 * We must reset them all. */
2354 /* TODO: Try to use hasel in dmcontrol */
2356 /* Set haltreq for each hart. */
2357 uint32_t control
= set_hartsel(control_base
, target
->coreid
);
2358 control
= set_field(control
, DM_DMCONTROL_HALTREQ
,
2359 target
->reset_halt
? 1 : 0);
2360 dmi_write(target
, DM_DMCONTROL
, control
);
2362 /* Assert ndmreset */
2363 control
= set_field(control
, DM_DMCONTROL_NDMRESET
, 1);
2364 dmi_write(target
, DM_DMCONTROL
, control
);
2367 /* Reset just this hart. */
2368 uint32_t control
= set_hartsel(control_base
, r
->current_hartid
);
2369 control
= set_field(control
, DM_DMCONTROL_HALTREQ
,
2370 target
->reset_halt
? 1 : 0);
2371 control
= set_field(control
, DM_DMCONTROL_NDMRESET
, 1);
2372 dmi_write(target
, DM_DMCONTROL
, control
);
2375 target
->state
= TARGET_RESET
;
2377 dm013_info_t
*dm
= get_dm(target
);
2381 /* The DM might have gotten reset if OpenOCD called us in some reset that
2382 * involves SRST being toggled. So clear our cache which may be out of
2384 memset(dm
->progbuf_cache
, 0, sizeof(dm
->progbuf_cache
));
2389 static int deassert_reset(struct target
*target
)
2392 RISCV013_INFO(info
);
2395 /* Clear the reset, but make sure haltreq is still set */
2396 uint32_t control
= 0, control_haltreq
;
2397 control
= set_field(control
, DM_DMCONTROL_DMACTIVE
, 1);
2398 control_haltreq
= set_field(control
, DM_DMCONTROL_HALTREQ
, target
->reset_halt
? 1 : 0);
2399 dmi_write(target
, DM_DMCONTROL
,
2400 set_hartsel(control_haltreq
, r
->current_hartid
));
2403 int dmi_busy_delay
= info
->dmi_busy_delay
;
2404 time_t start
= time(NULL
);
2406 for (int i
= 0; i
< riscv_count_harts(target
); ++i
) {
2409 if (index
!= target
->coreid
)
2411 dmi_write(target
, DM_DMCONTROL
,
2412 set_hartsel(control_haltreq
, index
));
2414 index
= r
->current_hartid
;
2417 LOG_DEBUG("Waiting for hart %d to come out of reset.", index
);
2419 int result
= dmstatus_read_timeout(target
, &dmstatus
, true,
2420 riscv_reset_timeout_sec
);
2421 if (result
== ERROR_TIMEOUT_REACHED
)
2422 LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
2423 "reset in %ds; Increase the timeout with riscv "
2424 "set_reset_timeout_sec.",
2425 index
, riscv_reset_timeout_sec
);
2426 if (result
!= ERROR_OK
)
2428 /* Certain debug modules, like the one in GD32VF103
2429 * MCUs, violate the specification's requirement that
2430 * each hart is in "exactly one of four states" and,
2431 * during reset, report harts as both unavailable and
2432 * halted/running. To work around this, we check for
2433 * the absence of the unavailable state rather than
2434 * the presence of any other state. */
2435 if (!get_field(dmstatus
, DM_DMSTATUS_ALLUNAVAIL
))
2437 if (time(NULL
) - start
> riscv_reset_timeout_sec
) {
2438 LOG_ERROR("Hart %d didn't leave reset in %ds; "
2440 "Increase the timeout with riscv set_reset_timeout_sec.",
2441 index
, riscv_reset_timeout_sec
, dmstatus
);
2445 target
->state
= TARGET_HALTED
;
2447 if (get_field(dmstatus
, DM_DMSTATUS_ALLHAVERESET
)) {
2448 /* Ack reset and clear DM_DMCONTROL_HALTREQ if previously set */
2449 dmi_write(target
, DM_DMCONTROL
,
2450 set_hartsel(control
, index
) |
2451 DM_DMCONTROL_ACKHAVERESET
);
2457 info
->dmi_busy_delay
= dmi_busy_delay
;
2461 static int execute_fence(struct target
*target
)
2463 /* FIXME: For non-coherent systems we need to flush the caches right
2464 * here, but there's no ISA-defined way of doing that. */
2466 struct riscv_program program
;
2467 riscv_program_init(&program
, target
);
2468 riscv_program_fence_i(&program
);
2469 riscv_program_fence(&program
);
2470 int result
= riscv_program_exec(&program
, target
);
2471 if (result
!= ERROR_OK
)
2472 LOG_DEBUG("Unable to execute pre-fence");
2478 static void log_memory_access(target_addr_t address
, uint64_t value
,
2479 unsigned size_bytes
, bool read
)
2481 if (debug_level
< LOG_LVL_DEBUG
)
2485 sprintf(fmt
, "M[0x%" TARGET_PRIxADDR
"] %ss 0x%%0%d" PRIx64
,
2486 address
, read
? "read" : "write", size_bytes
* 2);
2487 switch (size_bytes
) {
2495 value
&= 0xffffffffUL
;
2502 LOG_DEBUG(fmt
, value
);
2505 /* Read the relevant sbdata regs depending on size, and put the results into
2507 static int read_memory_bus_word(struct target
*target
, target_addr_t address
,
2508 uint32_t size
, uint8_t *buffer
)
2512 static int sbdata
[4] = { DM_SBDATA0
, DM_SBDATA1
, DM_SBDATA2
, DM_SBDATA3
};
2514 for (int i
= (size
- 1) / 4; i
>= 0; i
--) {
2515 result
= dmi_op(target
, &value
, NULL
, DMI_OP_READ
, sbdata
[i
], 0, false, true);
2516 if (result
!= ERROR_OK
)
2518 buf_set_u32(buffer
+ i
* 4, 0, 8 * MIN(size
, 4), value
);
2519 log_memory_access(address
+ i
* 4, value
, MIN(size
, 4), true);
2524 static target_addr_t
sb_read_address(struct target
*target
)
2526 RISCV013_INFO(info
);
2527 unsigned sbasize
= get_field(info
->sbcs
, DM_SBCS_SBASIZE
);
2528 target_addr_t address
= 0;
2531 dmi_read(target
, &v
, DM_SBADDRESS1
);
2535 dmi_read(target
, &v
, DM_SBADDRESS0
);
2540 static int read_sbcs_nonbusy(struct target
*target
, uint32_t *sbcs
)
2542 time_t start
= time(NULL
);
2544 if (dmi_read(target
, sbcs
, DM_SBCS
) != ERROR_OK
)
2546 if (!get_field(*sbcs
, DM_SBCS_SBBUSY
))
2548 if (time(NULL
) - start
> riscv_command_timeout_sec
) {
2549 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
2550 "Increase the timeout with riscv set_command_timeout_sec.",
2551 riscv_command_timeout_sec
, *sbcs
);
2557 static int modify_privilege(struct target
*target
, uint64_t *mstatus
, uint64_t *mstatus_old
)
2559 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5)) {
2562 if (register_read(target
, &dcsr
, GDB_REGNO_DCSR
) != ERROR_OK
)
2565 /* Read and save MSTATUS */
2566 if (register_read(target
, mstatus
, GDB_REGNO_MSTATUS
) != ERROR_OK
)
2568 *mstatus_old
= *mstatus
;
2570 /* If we come from m-mode with mprv set, we want to keep mpp */
2571 if (get_field(dcsr
, DCSR_PRV
) < 3) {
2573 *mstatus
= set_field(*mstatus
, MSTATUS_MPP
, get_field(dcsr
, DCSR_PRV
));
2576 *mstatus
= set_field(*mstatus
, MSTATUS_MPRV
, 1);
2579 if (*mstatus
!= *mstatus_old
)
2580 if (register_write_direct(target
, GDB_REGNO_MSTATUS
, *mstatus
) != ERROR_OK
)
2588 static int read_memory_bus_v0(struct target
*target
, target_addr_t address
,
2589 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
)
2591 if (size
!= increment
) {
2592 LOG_ERROR("sba v0 reads only support size==increment");
2593 return ERROR_NOT_IMPLEMENTED
;
2596 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
2597 TARGET_PRIxADDR
, size
, count
, address
);
2598 uint8_t *t_buffer
= buffer
;
2599 riscv_addr_t cur_addr
= address
;
2600 riscv_addr_t fin_addr
= address
+ (count
* size
);
2601 uint32_t access
= 0;
2603 const int DM_SBCS_SBSINGLEREAD_OFFSET
= 20;
2604 const uint32_t DM_SBCS_SBSINGLEREAD
= (0x1U
<< DM_SBCS_SBSINGLEREAD_OFFSET
);
2606 const int DM_SBCS_SBAUTOREAD_OFFSET
= 15;
2607 const uint32_t DM_SBCS_SBAUTOREAD
= (0x1U
<< DM_SBCS_SBAUTOREAD_OFFSET
);
2609 /* ww favorise one off reading if there is an issue */
2611 for (uint32_t i
= 0; i
< count
; i
++) {
2612 if (dmi_read(target
, &access
, DM_SBCS
) != ERROR_OK
)
2614 dmi_write(target
, DM_SBADDRESS0
, cur_addr
);
2615 /* size/2 matching the bit access of the spec 0.13 */
2616 access
= set_field(access
, DM_SBCS_SBACCESS
, size
/2);
2617 access
= set_field(access
, DM_SBCS_SBSINGLEREAD
, 1);
2618 LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access
);
2619 dmi_write(target
, DM_SBCS
, access
);
2622 if (dmi_read(target
, &value
, DM_SBDATA0
) != ERROR_OK
)
2624 LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value
);
2625 buf_set_u32(t_buffer
, 0, 8 * size
, value
);
2632 /* has to be the same size if we want to read a block */
2633 LOG_DEBUG("reading block until final address 0x%" PRIx64
, fin_addr
);
2634 if (dmi_read(target
, &access
, DM_SBCS
) != ERROR_OK
)
2636 /* set current address */
2637 dmi_write(target
, DM_SBADDRESS0
, cur_addr
);
2638 /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
2639 * size/2 matching the bit access of the spec 0.13 */
2640 access
= set_field(access
, DM_SBCS_SBACCESS
, size
/2);
2641 access
= set_field(access
, DM_SBCS_SBAUTOREAD
, 1);
2642 access
= set_field(access
, DM_SBCS_SBSINGLEREAD
, 1);
2643 access
= set_field(access
, DM_SBCS_SBAUTOINCREMENT
, 1);
2644 LOG_DEBUG("\r\naccess: 0x%08x", access
);
2645 dmi_write(target
, DM_SBCS
, access
);
2647 while (cur_addr
< fin_addr
) {
2648 LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
2649 PRIx64
, size
, count
, cur_addr
);
2652 if (dmi_read(target
, &value
, DM_SBDATA0
) != ERROR_OK
)
2654 buf_set_u32(t_buffer
, 0, 8 * size
, value
);
2658 /* if we are reaching last address, we must clear autoread */
2659 if (cur_addr
== fin_addr
&& count
!= 1) {
2660 dmi_write(target
, DM_SBCS
, 0);
2661 if (dmi_read(target
, &value
, DM_SBDATA0
) != ERROR_OK
)
2663 buf_set_u32(t_buffer
, 0, 8 * size
, value
);
2668 if (dmi_read(target
, &sbcs
, DM_SBCS
) != ERROR_OK
)
2675 * Read the requested memory using the system bus interface.
2677 static int read_memory_bus_v1(struct target
*target
, target_addr_t address
,
2678 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
)
2680 if (increment
!= size
&& increment
!= 0) {
2681 LOG_ERROR("sba v1 reads only support increment of size or 0");
2682 return ERROR_NOT_IMPLEMENTED
;
2685 RISCV013_INFO(info
);
2686 target_addr_t next_address
= address
;
2687 target_addr_t end_address
= address
+ count
* size
;
2689 while (next_address
< end_address
) {
2690 uint32_t sbcs_write
= set_field(0, DM_SBCS_SBREADONADDR
, 1);
2691 sbcs_write
|= sb_sbaccess(size
);
2692 if (increment
== size
)
2693 sbcs_write
= set_field(sbcs_write
, DM_SBCS_SBAUTOINCREMENT
, 1);
2695 sbcs_write
= set_field(sbcs_write
, DM_SBCS_SBREADONDATA
, count
> 1);
2696 if (dmi_write(target
, DM_SBCS
, sbcs_write
) != ERROR_OK
)
2699 /* This address write will trigger the first read. */
2700 if (sb_write_address(target
, next_address
, true) != ERROR_OK
)
2703 if (info
->bus_master_read_delay
) {
2704 jtag_add_runtest(info
->bus_master_read_delay
, TAP_IDLE
);
2705 if (jtag_execute_queue() != ERROR_OK
) {
2706 LOG_ERROR("Failed to scan idle sequence");
2711 /* First value has been read, and is waiting for us to issue a DMI read
2714 static int sbdata
[4] = {DM_SBDATA0
, DM_SBDATA1
, DM_SBDATA2
, DM_SBDATA3
};
2716 target_addr_t next_read
= address
- 1;
2717 for (uint32_t i
= (next_address
- address
) / size
; i
< count
- 1; i
++) {
2718 for (int j
= (size
- 1) / 4; j
>= 0; j
--) {
2720 unsigned attempt
= 0;
2722 if (attempt
++ > 100) {
2723 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT
,
2728 dmi_status_t status
= dmi_scan(target
, NULL
, &value
,
2729 DMI_OP_READ
, sbdata
[j
], 0, false);
2730 if (status
== DMI_STATUS_BUSY
)
2731 increase_dmi_busy_delay(target
);
2732 else if (status
== DMI_STATUS_SUCCESS
)
2737 if (next_read
!= address
- 1) {
2738 buf_set_u32(buffer
+ next_read
- address
, 0, 8 * MIN(size
, 4), value
);
2739 log_memory_access(next_read
, value
, MIN(size
, 4), true);
2741 next_read
= address
+ i
* size
+ j
* 4;
2745 uint32_t sbcs_read
= 0;
2748 unsigned attempt
= 0;
2750 if (attempt
++ > 100) {
2751 LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT
,
2755 dmi_status_t status
= dmi_scan(target
, NULL
, &value
, DMI_OP_NOP
, 0, 0, false);
2756 if (status
== DMI_STATUS_BUSY
)
2757 increase_dmi_busy_delay(target
);
2758 else if (status
== DMI_STATUS_SUCCESS
)
2763 buf_set_u32(buffer
+ next_read
- address
, 0, 8 * MIN(size
, 4), value
);
2764 log_memory_access(next_read
, value
, MIN(size
, 4), true);
2766 /* "Writes to sbcs while sbbusy is high result in undefined behavior.
2767 * A debugger must not write to sbcs until it reads sbbusy as 0." */
2768 if (read_sbcs_nonbusy(target
, &sbcs_read
) != ERROR_OK
)
2771 sbcs_write
= set_field(sbcs_write
, DM_SBCS_SBREADONDATA
, 0);
2772 if (dmi_write(target
, DM_SBCS
, sbcs_write
) != ERROR_OK
)
2776 /* Read the last word, after we disabled sbreadondata if necessary. */
2777 if (!get_field(sbcs_read
, DM_SBCS_SBERROR
) &&
2778 !get_field(sbcs_read
, DM_SBCS_SBBUSYERROR
)) {
2779 if (read_memory_bus_word(target
, address
+ (count
- 1) * size
, size
,
2780 buffer
+ (count
- 1) * size
) != ERROR_OK
)
2783 if (read_sbcs_nonbusy(target
, &sbcs_read
) != ERROR_OK
)
2787 if (get_field(sbcs_read
, DM_SBCS_SBBUSYERROR
)) {
2788 /* We read while the target was busy. Slow down and try again. */
2789 if (dmi_write(target
, DM_SBCS
, sbcs_read
| DM_SBCS_SBBUSYERROR
) != ERROR_OK
)
2791 next_address
= sb_read_address(target
);
2792 info
->bus_master_read_delay
+= info
->bus_master_read_delay
/ 10 + 1;
2796 unsigned error
= get_field(sbcs_read
, DM_SBCS_SBERROR
);
2798 next_address
= end_address
;
2800 /* Some error indicating the bus access failed, but not because of
2801 * something we did wrong. */
2802 if (dmi_write(target
, DM_SBCS
, DM_SBCS_SBERROR
) != ERROR_OK
)
2811 static void log_mem_access_result(struct target
*target
, bool success
, int method
, bool read
)
2817 /* Compose the message */
2818 snprintf(msg
, 60, "%s to %s memory via %s.",
2819 success
? "Succeeded" : "Failed",
2820 read
? "read" : "write",
2821 (method
== RISCV_MEM_ACCESS_PROGBUF
) ? "program buffer" :
2822 (method
== RISCV_MEM_ACCESS_SYSBUS
) ? "system bus" : "abstract access");
2824 /* Determine the log message severity. Show warnings only once. */
2826 if (method
== RISCV_MEM_ACCESS_PROGBUF
) {
2827 warn
= r
->mem_access_progbuf_warn
;
2828 r
->mem_access_progbuf_warn
= false;
2830 if (method
== RISCV_MEM_ACCESS_SYSBUS
) {
2831 warn
= r
->mem_access_sysbus_warn
;
2832 r
->mem_access_sysbus_warn
= false;
2834 if (method
== RISCV_MEM_ACCESS_ABSTRACT
) {
2835 warn
= r
->mem_access_abstract_warn
;
2836 r
->mem_access_abstract_warn
= false;
2841 LOG_WARNING("%s", msg
);
2843 LOG_DEBUG("%s", msg
);
2846 static bool mem_should_skip_progbuf(struct target
*target
, target_addr_t address
,
2847 uint32_t size
, bool read
, char **skip_reason
)
2849 assert(skip_reason
);
2851 if (!has_sufficient_progbuf(target
, 3)) {
2852 LOG_DEBUG("Skipping mem %s via progbuf - insufficient progbuf size.",
2853 read
? "read" : "write");
2854 *skip_reason
= "skipped (insufficient progbuf)";
2857 if (target
->state
!= TARGET_HALTED
) {
2858 LOG_DEBUG("Skipping mem %s via progbuf - target not halted.",
2859 read
? "read" : "write");
2860 *skip_reason
= "skipped (target not halted)";
2863 if (riscv_xlen(target
) < size
* 8) {
2864 LOG_DEBUG("Skipping mem %s via progbuf - XLEN (%d) is too short for %d-bit memory access.",
2865 read
? "read" : "write", riscv_xlen(target
), size
* 8);
2866 *skip_reason
= "skipped (XLEN too short)";
2870 LOG_DEBUG("Skipping mem %s via progbuf - unsupported size.",
2871 read
? "read" : "write");
2872 *skip_reason
= "skipped (unsupported size)";
2875 if ((sizeof(address
) * 8 > riscv_xlen(target
)) && (address
>> riscv_xlen(target
))) {
2876 LOG_DEBUG("Skipping mem %s via progbuf - progbuf only supports %u-bit address.",
2877 read
? "read" : "write", riscv_xlen(target
));
2878 *skip_reason
= "skipped (too large address)";
2885 static bool mem_should_skip_sysbus(struct target
*target
, target_addr_t address
,
2886 uint32_t size
, uint32_t increment
, bool read
, char **skip_reason
)
2888 assert(skip_reason
);
2890 RISCV013_INFO(info
);
2891 if (!sba_supports_access(target
, size
)) {
2892 LOG_DEBUG("Skipping mem %s via system bus - unsupported size.",
2893 read
? "read" : "write");
2894 *skip_reason
= "skipped (unsupported size)";
2897 unsigned int sbasize
= get_field(info
->sbcs
, DM_SBCS_SBASIZE
);
2898 if ((sizeof(address
) * 8 > sbasize
) && (address
>> sbasize
)) {
2899 LOG_DEBUG("Skipping mem %s via system bus - sba only supports %u-bit address.",
2900 read
? "read" : "write", sbasize
);
2901 *skip_reason
= "skipped (too large address)";
2904 if (read
&& increment
!= size
&& (get_field(info
->sbcs
, DM_SBCS_SBVERSION
) == 0 || increment
!= 0)) {
2905 LOG_DEBUG("Skipping mem read via system bus - "
2906 "sba reads only support size==increment or also size==0 for sba v1.");
2907 *skip_reason
= "skipped (unsupported increment)";
2914 static bool mem_should_skip_abstract(struct target
*target
, target_addr_t address
,
2915 uint32_t size
, uint32_t increment
, bool read
, char **skip_reason
)
2917 assert(skip_reason
);
2920 /* TODO: Add 128b support if it's ever used. Involves modifying
2921 read/write_abstract_arg() to work on two 64b values. */
2922 LOG_DEBUG("Skipping mem %s via abstract access - unsupported size: %d bits",
2923 read
? "read" : "write", size
* 8);
2924 *skip_reason
= "skipped (unsupported size)";
2927 if ((sizeof(address
) * 8 > riscv_xlen(target
)) && (address
>> riscv_xlen(target
))) {
2928 LOG_DEBUG("Skipping mem %s via abstract access - abstract access only supports %u-bit address.",
2929 read
? "read" : "write", riscv_xlen(target
));
2930 *skip_reason
= "skipped (too large address)";
2933 if (read
&& size
!= increment
) {
2934 LOG_ERROR("Skipping mem read via abstract access - "
2935 "abstract command reads only support size==increment.");
2936 *skip_reason
= "skipped (unsupported increment)";
2944 * Performs a memory read using memory access abstract commands. The read sizes
2945 * supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16 byte
2946 * aamsize fields in the memory access abstract command.
2948 static int read_memory_abstract(struct target
*target
, target_addr_t address
,
2949 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
)
2951 RISCV013_INFO(info
);
2953 int result
= ERROR_OK
;
2954 bool use_aampostincrement
= info
->has_aampostincrement
!= YNM_NO
;
2956 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR
, count
,
2959 memset(buffer
, 0, count
* size
);
2961 /* Convert the size (bytes) to width (bits) */
2962 unsigned width
= size
<< 3;
2964 /* Create the command (physical address, postincrement, read) */
2965 uint32_t command
= access_memory_command(target
, false, width
, use_aampostincrement
, false);
2967 /* Execute the reads */
2968 uint8_t *p
= buffer
;
2969 bool updateaddr
= true;
2970 unsigned int width32
= (width
< 32) ? 32 : width
;
2971 for (uint32_t c
= 0; c
< count
; c
++) {
2972 /* Update the address if it is the first time or aampostincrement is not supported by the target. */
2974 /* Set arg1 to the address: address + c * size */
2975 result
= write_abstract_arg(target
, 1, address
+ c
* size
, riscv_xlen(target
));
2976 if (result
!= ERROR_OK
) {
2977 LOG_ERROR("Failed to write arg1 during read_memory_abstract().");
2982 /* Execute the command */
2983 result
= execute_abstract_command(target
, command
);
2985 if (info
->has_aampostincrement
== YNM_MAYBE
) {
2986 if (result
== ERROR_OK
) {
2987 /* Safety: double-check that the address was really auto-incremented */
2988 riscv_reg_t new_address
= read_abstract_arg(target
, 1, riscv_xlen(target
));
2989 if (new_address
== address
+ size
) {
2990 LOG_DEBUG("aampostincrement is supported on this target.");
2991 info
->has_aampostincrement
= YNM_YES
;
2993 LOG_WARNING("Buggy aampostincrement! Address not incremented correctly.");
2994 info
->has_aampostincrement
= YNM_NO
;
2997 /* Try the same access but with postincrement disabled. */
2998 command
= access_memory_command(target
, false, width
, false, false);
2999 result
= execute_abstract_command(target
, command
);
3000 if (result
== ERROR_OK
) {
3001 LOG_DEBUG("aampostincrement is not supported on this target.");
3002 info
->has_aampostincrement
= YNM_NO
;
3007 if (result
!= ERROR_OK
)
3010 /* Copy arg0 to buffer (rounded width up to nearest 32) */
3011 riscv_reg_t value
= read_abstract_arg(target
, 0, width32
);
3012 buf_set_u64(p
, 0, 8 * size
, value
);
3014 if (info
->has_aampostincrement
== YNM_YES
)
3023 * Performs a memory write using memory access abstract commands. The write
3024 * sizes supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16
3025 * byte aamsize fields in the memory access abstract command.
3027 static int write_memory_abstract(struct target
*target
, target_addr_t address
,
3028 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
3030 RISCV013_INFO(info
);
3031 int result
= ERROR_OK
;
3032 bool use_aampostincrement
= info
->has_aampostincrement
!= YNM_NO
;
3034 LOG_DEBUG("writing %d words of %d bytes from 0x%" TARGET_PRIxADDR
, count
,
3037 /* Convert the size (bytes) to width (bits) */
3038 unsigned width
= size
<< 3;
3040 /* Create the command (physical address, postincrement, write) */
3041 uint32_t command
= access_memory_command(target
, false, width
, use_aampostincrement
, true);
3043 /* Execute the writes */
3044 const uint8_t *p
= buffer
;
3045 bool updateaddr
= true;
3046 for (uint32_t c
= 0; c
< count
; c
++) {
3047 /* Move data to arg0 */
3048 riscv_reg_t value
= buf_get_u64(p
, 0, 8 * size
);
3049 result
= write_abstract_arg(target
, 0, value
, riscv_xlen(target
));
3050 if (result
!= ERROR_OK
) {
3051 LOG_ERROR("Failed to write arg0 during write_memory_abstract().");
3055 /* Update the address if it is the first time or aampostincrement is not supported by the target. */
3057 /* Set arg1 to the address: address + c * size */
3058 result
= write_abstract_arg(target
, 1, address
+ c
* size
, riscv_xlen(target
));
3059 if (result
!= ERROR_OK
) {
3060 LOG_ERROR("Failed to write arg1 during write_memory_abstract().");
3065 /* Execute the command */
3066 result
= execute_abstract_command(target
, command
);
3068 if (info
->has_aampostincrement
== YNM_MAYBE
) {
3069 if (result
== ERROR_OK
) {
3070 /* Safety: double-check that the address was really auto-incremented */
3071 riscv_reg_t new_address
= read_abstract_arg(target
, 1, riscv_xlen(target
));
3072 if (new_address
== address
+ size
) {
3073 LOG_DEBUG("aampostincrement is supported on this target.");
3074 info
->has_aampostincrement
= YNM_YES
;
3076 LOG_WARNING("Buggy aampostincrement! Address not incremented correctly.");
3077 info
->has_aampostincrement
= YNM_NO
;
3080 /* Try the same access but with postincrement disabled. */
3081 command
= access_memory_command(target
, false, width
, false, true);
3082 result
= execute_abstract_command(target
, command
);
3083 if (result
== ERROR_OK
) {
3084 LOG_DEBUG("aampostincrement is not supported on this target.");
3085 info
->has_aampostincrement
= YNM_NO
;
3090 if (result
!= ERROR_OK
)
3093 if (info
->has_aampostincrement
== YNM_YES
)
3102 * Read the requested memory, taking care to execute every read exactly once,
3103 * even if cmderr=busy is encountered.
3105 static int read_memory_progbuf_inner(struct target
*target
, target_addr_t address
,
3106 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
)
3108 RISCV013_INFO(info
);
3110 int result
= ERROR_OK
;
3112 /* Write address to S0. */
3113 result
= register_write_direct(target
, GDB_REGNO_S0
, address
);
3114 if (result
!= ERROR_OK
)
3117 if (increment
== 0 &&
3118 register_write_direct(target
, GDB_REGNO_S2
, 0) != ERROR_OK
)
3121 uint32_t command
= access_register_command(target
, GDB_REGNO_S1
,
3123 AC_ACCESS_REGISTER_TRANSFER
| AC_ACCESS_REGISTER_POSTEXEC
);
3124 if (execute_abstract_command(target
, command
) != ERROR_OK
)
3127 /* First read has just triggered. Result is in s1. */
3130 if (register_read_direct(target
, &value
, GDB_REGNO_S1
) != ERROR_OK
)
3132 buf_set_u64(buffer
, 0, 8 * size
, value
);
3133 log_memory_access(address
, value
, size
, true);
3137 if (dmi_write(target
, DM_ABSTRACTAUTO
,
3138 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET
) != ERROR_OK
)
3140 /* Read garbage from dmi_data0, which triggers another execution of the
3141 * program. Now dmi_data0 contains the first good result, and s1 the next
3143 if (dmi_read_exec(target
, NULL
, DM_DATA0
) != ERROR_OK
)
3146 /* read_addr is the next address that the hart will read from, which is the
3149 while (index
< count
) {
3150 riscv_addr_t read_addr
= address
+ index
* increment
;
3151 LOG_DEBUG("i=%d, count=%d, read_addr=0x%" PRIx64
, index
, count
, read_addr
);
3152 /* The pipeline looks like this:
3153 * memory -> s1 -> dm_data0 -> debugger
3155 * s0 contains read_addr
3156 * s1 contains mem[read_addr-size]
3157 * dm_data0 contains[read_addr-size*2]
3160 struct riscv_batch
*batch
= riscv_batch_alloc(target
, 32,
3161 info
->dmi_busy_delay
+ info
->ac_busy_delay
);
3166 for (unsigned j
= index
; j
< count
; j
++) {
3168 riscv_batch_add_dmi_read(batch
, DM_DATA1
);
3169 riscv_batch_add_dmi_read(batch
, DM_DATA0
);
3172 if (riscv_batch_full(batch
))
3176 batch_run(target
, batch
);
3178 /* Wait for the target to finish performing the last abstract command,
3179 * and update our copy of cmderr. If we see that DMI is busy here,
3180 * dmi_busy_delay will be incremented. */
3181 uint32_t abstractcs
;
3182 if (dmi_read(target
, &abstractcs
, DM_ABSTRACTCS
) != ERROR_OK
)
3184 while (get_field(abstractcs
, DM_ABSTRACTCS_BUSY
))
3185 if (dmi_read(target
, &abstractcs
, DM_ABSTRACTCS
) != ERROR_OK
)
3187 info
->cmderr
= get_field(abstractcs
, DM_ABSTRACTCS_CMDERR
);
3189 unsigned next_index
;
3190 unsigned ignore_last
= 0;
3191 switch (info
->cmderr
) {
3193 LOG_DEBUG("successful (partial?) memory read");
3194 next_index
= index
+ reads
;
3197 LOG_DEBUG("memory read resulted in busy response");
3199 increase_ac_busy_delay(target
);
3200 riscv013_clear_abstract_error(target
);
3202 dmi_write(target
, DM_ABSTRACTAUTO
, 0);
3204 uint32_t dmi_data0
, dmi_data1
= 0;
3205 /* This is definitely a good version of the value that we
3206 * attempted to read when we discovered that the target was
3208 if (dmi_read(target
, &dmi_data0
, DM_DATA0
) != ERROR_OK
) {
3209 riscv_batch_free(batch
);
3212 if (size
> 4 && dmi_read(target
, &dmi_data1
, DM_DATA1
) != ERROR_OK
) {
3213 riscv_batch_free(batch
);
3217 /* See how far we got, clobbering dmi_data0. */
3218 if (increment
== 0) {
3220 result
= register_read_direct(target
, &counter
, GDB_REGNO_S2
);
3221 next_index
= counter
;
3223 uint64_t next_read_addr
;
3224 result
= register_read_direct(target
, &next_read_addr
,
3226 next_index
= (next_read_addr
- address
) / increment
;
3228 if (result
!= ERROR_OK
) {
3229 riscv_batch_free(batch
);
3233 uint64_t value64
= (((uint64_t)dmi_data1
) << 32) | dmi_data0
;
3234 buf_set_u64(buffer
+ (next_index
- 2) * size
, 0, 8 * size
, value64
);
3235 log_memory_access(address
+ (next_index
- 2) * size
, value64
, size
, true);
3237 /* Restore the command, and execute it.
3238 * Now DM_DATA0 contains the next value just as it would if no
3239 * error had occurred. */
3240 dmi_write_exec(target
, DM_COMMAND
, command
, true);
3243 dmi_write(target
, DM_ABSTRACTAUTO
,
3244 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET
);
3250 LOG_DEBUG("error when reading memory, abstractcs=0x%08lx", (long)abstractcs
);
3251 riscv013_clear_abstract_error(target
);
3252 riscv_batch_free(batch
);
3253 result
= ERROR_FAIL
;
3257 /* Now read whatever we got out of the batch. */
3258 dmi_status_t status
= DMI_STATUS_SUCCESS
;
3261 for (unsigned j
= index
- 2; j
< index
+ reads
; j
++) {
3263 LOG_DEBUG("index=%d, reads=%d, next_index=%d, ignore_last=%d, j=%d",
3264 index
, reads
, next_index
, ignore_last
, j
);
3265 if (j
+ 3 + ignore_last
> next_index
)
3268 status
= riscv_batch_get_dmi_read_op(batch
, read
);
3269 uint64_t value
= riscv_batch_get_dmi_read_data(batch
, read
);
3271 if (status
!= DMI_STATUS_SUCCESS
) {
3272 /* If we're here because of busy count, dmi_busy_delay will
3273 * already have been increased and busy state will have been
3274 * cleared in dmi_read(). */
3275 /* In at least some implementations, we issue a read, and then
3276 * can get busy back when we try to scan out the read result,
3277 * and the actual read value is lost forever. Since this is
3278 * rare in any case, we return error here and rely on our
3279 * caller to reread the entire block. */
3280 LOG_WARNING("Batch memory read encountered DMI error %d. "
3281 "Falling back on slower reads.", status
);
3282 riscv_batch_free(batch
);
3283 result
= ERROR_FAIL
;
3287 status
= riscv_batch_get_dmi_read_op(batch
, read
);
3288 if (status
!= DMI_STATUS_SUCCESS
) {
3289 LOG_WARNING("Batch memory read encountered DMI error %d. "
3290 "Falling back on slower reads.", status
);
3291 riscv_batch_free(batch
);
3292 result
= ERROR_FAIL
;
3296 value
|= riscv_batch_get_dmi_read_data(batch
, read
);
3299 riscv_addr_t offset
= j
* size
;
3300 buf_set_u64(buffer
+ offset
, 0, 8 * size
, value
);
3301 log_memory_access(address
+ j
* increment
, value
, size
, true);
3306 riscv_batch_free(batch
);
3309 dmi_write(target
, DM_ABSTRACTAUTO
, 0);
3312 /* Read the penultimate word. */
3313 uint32_t dmi_data0
, dmi_data1
= 0;
3314 if (dmi_read(target
, &dmi_data0
, DM_DATA0
) != ERROR_OK
)
3316 if (size
> 4 && dmi_read(target
, &dmi_data1
, DM_DATA1
) != ERROR_OK
)
3318 uint64_t value64
= (((uint64_t)dmi_data1
) << 32) | dmi_data0
;
3319 buf_set_u64(buffer
+ size
* (count
- 2), 0, 8 * size
, value64
);
3320 log_memory_access(address
+ size
* (count
- 2), value64
, size
, true);
3323 /* Read the last word. */
3325 result
= register_read_direct(target
, &value
, GDB_REGNO_S1
);
3326 if (result
!= ERROR_OK
)
3328 buf_set_u64(buffer
+ size
* (count
-1), 0, 8 * size
, value
);
3329 log_memory_access(address
+ size
* (count
-1), value
, size
, true);
3334 dmi_write(target
, DM_ABSTRACTAUTO
, 0);
3339 /* Only need to save/restore one GPR to read a single word, and the progbuf
3340 * program doesn't need to increment. */
3341 static int read_memory_progbuf_one(struct target
*target
, target_addr_t address
,
3342 uint32_t size
, uint8_t *buffer
)
3344 uint64_t mstatus
= 0;
3345 uint64_t mstatus_old
= 0;
3346 if (modify_privilege(target
, &mstatus
, &mstatus_old
) != ERROR_OK
)
3350 int result
= ERROR_FAIL
;
3352 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
3353 goto restore_mstatus
;
3355 /* Write the program (load, increment) */
3356 struct riscv_program program
;
3357 riscv_program_init(&program
, target
);
3358 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5) && get_field(mstatus
, MSTATUS_MPRV
))
3359 riscv_program_csrrsi(&program
, GDB_REGNO_ZERO
, CSR_DCSR_MPRVEN
, GDB_REGNO_DCSR
);
3362 riscv_program_lbr(&program
, GDB_REGNO_S0
, GDB_REGNO_S0
, 0);
3365 riscv_program_lhr(&program
, GDB_REGNO_S0
, GDB_REGNO_S0
, 0);
3368 riscv_program_lwr(&program
, GDB_REGNO_S0
, GDB_REGNO_S0
, 0);
3371 riscv_program_ldr(&program
, GDB_REGNO_S0
, GDB_REGNO_S0
, 0);
3374 LOG_ERROR("Unsupported size: %d", size
);
3375 goto restore_mstatus
;
3377 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5) && get_field(mstatus
, MSTATUS_MPRV
))
3378 riscv_program_csrrci(&program
, GDB_REGNO_ZERO
, CSR_DCSR_MPRVEN
, GDB_REGNO_DCSR
);
3380 if (riscv_program_ebreak(&program
) != ERROR_OK
)
3381 goto restore_mstatus
;
3382 if (riscv_program_write(&program
) != ERROR_OK
)
3383 goto restore_mstatus
;
3385 /* Write address to S0, and execute buffer. */
3386 if (write_abstract_arg(target
, 0, address
, riscv_xlen(target
)) != ERROR_OK
)
3387 goto restore_mstatus
;
3388 uint32_t command
= access_register_command(target
, GDB_REGNO_S0
,
3389 riscv_xlen(target
), AC_ACCESS_REGISTER_WRITE
|
3390 AC_ACCESS_REGISTER_TRANSFER
| AC_ACCESS_REGISTER_POSTEXEC
);
3391 if (execute_abstract_command(target
, command
) != ERROR_OK
)
3395 if (register_read(target
, &value
, GDB_REGNO_S0
) != ERROR_OK
)
3397 buf_set_u64(buffer
, 0, 8 * size
, value
);
3398 log_memory_access(address
, value
, size
, true);
3402 if (riscv_set_register(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
3403 result
= ERROR_FAIL
;
3406 if (mstatus
!= mstatus_old
)
3407 if (register_write_direct(target
, GDB_REGNO_MSTATUS
, mstatus_old
))
3408 result
= ERROR_FAIL
;
3414 * Read the requested memory, silently handling memory access errors.
3416 static int read_memory_progbuf(struct target
*target
, target_addr_t address
,
3417 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
)
3419 if (riscv_xlen(target
) < size
* 8) {
3420 LOG_ERROR("XLEN (%d) is too short for %d-bit memory read.",
3421 riscv_xlen(target
), size
* 8);
3425 int result
= ERROR_OK
;
3427 LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR
, count
,
3432 memset(buffer
, 0, count
*size
);
3434 if (execute_fence(target
) != ERROR_OK
)
3438 return read_memory_progbuf_one(target
, address
, size
, buffer
);
3440 uint64_t mstatus
= 0;
3441 uint64_t mstatus_old
= 0;
3442 if (modify_privilege(target
, &mstatus
, &mstatus_old
) != ERROR_OK
)
3445 /* s0 holds the next address to read from
3446 * s1 holds the next data value read
3447 * s2 is a counter in case increment is 0
3449 uint64_t s0
, s1
, s2
;
3450 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
3452 if (register_read(target
, &s1
, GDB_REGNO_S1
) != ERROR_OK
)
3454 if (increment
== 0 && register_read(target
, &s2
, GDB_REGNO_S2
) != ERROR_OK
)
3457 /* Write the program (load, increment) */
3458 struct riscv_program program
;
3459 riscv_program_init(&program
, target
);
3460 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5) && get_field(mstatus
, MSTATUS_MPRV
))
3461 riscv_program_csrrsi(&program
, GDB_REGNO_ZERO
, CSR_DCSR_MPRVEN
, GDB_REGNO_DCSR
);
3465 riscv_program_lbr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3468 riscv_program_lhr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3471 riscv_program_lwr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3474 riscv_program_ldr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3477 LOG_ERROR("Unsupported size: %d", size
);
3481 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5) && get_field(mstatus
, MSTATUS_MPRV
))
3482 riscv_program_csrrci(&program
, GDB_REGNO_ZERO
, CSR_DCSR_MPRVEN
, GDB_REGNO_DCSR
);
3484 riscv_program_addi(&program
, GDB_REGNO_S2
, GDB_REGNO_S2
, 1);
3486 riscv_program_addi(&program
, GDB_REGNO_S0
, GDB_REGNO_S0
, increment
);
3488 if (riscv_program_ebreak(&program
) != ERROR_OK
)
3490 if (riscv_program_write(&program
) != ERROR_OK
)
3493 result
= read_memory_progbuf_inner(target
, address
, size
, count
, buffer
, increment
);
3495 if (result
!= ERROR_OK
) {
3496 /* The full read did not succeed, so we will try to read each word individually. */
3497 /* This will not be fast, but reading outside actual memory is a special case anyway. */
3498 /* It will make the toolchain happier, especially Eclipse Memory View as it reads ahead. */
3499 target_addr_t address_i
= address
;
3500 uint32_t count_i
= 1;
3501 uint8_t *buffer_i
= buffer
;
3503 for (uint32_t i
= 0; i
< count
; i
++, address_i
+= increment
, buffer_i
+= size
) {
3504 /* TODO: This is much slower than it needs to be because we end up
3505 * writing the address to read for every word we read. */
3506 result
= read_memory_progbuf_inner(target
, address_i
, size
, count_i
, buffer_i
, increment
);
3508 /* The read of a single word failed, so we will just return 0 for that instead */
3509 if (result
!= ERROR_OK
) {
3510 LOG_DEBUG("error reading single word of %d bytes from 0x%" TARGET_PRIxADDR
,
3513 buf_set_u64(buffer_i
, 0, 8 * size
, 0);
3519 riscv_set_register(target
, GDB_REGNO_S0
, s0
);
3520 riscv_set_register(target
, GDB_REGNO_S1
, s1
);
3522 riscv_set_register(target
, GDB_REGNO_S2
, s2
);
3524 /* Restore MSTATUS */
3525 if (mstatus
!= mstatus_old
)
3526 if (register_write_direct(target
, GDB_REGNO_MSTATUS
, mstatus_old
))
3532 static int read_memory(struct target
*target
, target_addr_t address
,
3533 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t increment
)
3538 if (size
!= 1 && size
!= 2 && size
!= 4 && size
!= 8 && size
!= 16) {
3539 LOG_ERROR("BUG: Unsupported size for memory read: %d", size
);
3543 int ret
= ERROR_FAIL
;
3545 RISCV013_INFO(info
);
3547 char *progbuf_result
= "disabled";
3548 char *sysbus_result
= "disabled";
3549 char *abstract_result
= "disabled";
3551 for (unsigned int i
= 0; i
< RISCV_NUM_MEM_ACCESS_METHODS
; i
++) {
3552 int method
= r
->mem_access_methods
[i
];
3554 if (method
== RISCV_MEM_ACCESS_PROGBUF
) {
3555 if (mem_should_skip_progbuf(target
, address
, size
, true, &progbuf_result
))
3558 ret
= read_memory_progbuf(target
, address
, size
, count
, buffer
, increment
);
3560 if (ret
!= ERROR_OK
)
3561 progbuf_result
= "failed";
3562 } else if (method
== RISCV_MEM_ACCESS_SYSBUS
) {
3563 if (mem_should_skip_sysbus(target
, address
, size
, increment
, true, &sysbus_result
))
3566 if (get_field(info
->sbcs
, DM_SBCS_SBVERSION
) == 0)
3567 ret
= read_memory_bus_v0(target
, address
, size
, count
, buffer
, increment
);
3568 else if (get_field(info
->sbcs
, DM_SBCS_SBVERSION
) == 1)
3569 ret
= read_memory_bus_v1(target
, address
, size
, count
, buffer
, increment
);
3571 if (ret
!= ERROR_OK
)
3572 sysbus_result
= "failed";
3573 } else if (method
== RISCV_MEM_ACCESS_ABSTRACT
) {
3574 if (mem_should_skip_abstract(target
, address
, size
, increment
, true, &abstract_result
))
3577 ret
= read_memory_abstract(target
, address
, size
, count
, buffer
, increment
);
3579 if (ret
!= ERROR_OK
)
3580 abstract_result
= "failed";
3581 } else if (method
== RISCV_MEM_ACCESS_UNSPECIFIED
)
3582 /* No further mem access method to try. */
3585 log_mem_access_result(target
, ret
== ERROR_OK
, method
, true);
3587 if (ret
== ERROR_OK
)
3591 LOG_ERROR("Target %s: Failed to read memory (addr=0x%" PRIx64
")", target_name(target
), address
);
3592 LOG_ERROR(" progbuf=%s, sysbus=%s, abstract=%s", progbuf_result
, sysbus_result
, abstract_result
);
3596 static int write_memory_bus_v0(struct target
*target
, target_addr_t address
,
3597 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
3599 /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
3600 LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
3601 TARGET_PRIxADDR
, size
, count
, address
);
3602 dmi_write(target
, DM_SBADDRESS0
, address
);
3605 riscv_addr_t offset
= 0;
3606 riscv_addr_t t_addr
= 0;
3607 const uint8_t *t_buffer
= buffer
+ offset
;
3609 /* B.8 Writing Memory, single write check if we write in one go */
3610 if (count
== 1) { /* count is in bytes here */
3611 value
= buf_get_u64(t_buffer
, 0, 8 * size
);
3614 access
= set_field(access
, DM_SBCS_SBACCESS
, size
/2);
3615 dmi_write(target
, DM_SBCS
, access
);
3616 LOG_DEBUG("\r\naccess: 0x%08" PRIx64
, access
);
3617 LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64
, value
);
3618 dmi_write(target
, DM_SBDATA0
, value
);
3622 /*B.8 Writing Memory, using autoincrement*/
3625 access
= set_field(access
, DM_SBCS_SBACCESS
, size
/2);
3626 access
= set_field(access
, DM_SBCS_SBAUTOINCREMENT
, 1);
3627 LOG_DEBUG("\r\naccess: 0x%08" PRIx64
, access
);
3628 dmi_write(target
, DM_SBCS
, access
);
3630 /*2)set the value according to the size required and write*/
3631 for (riscv_addr_t i
= 0; i
< count
; ++i
) {
3633 /* for monitoring only */
3634 t_addr
= address
+ offset
;
3635 t_buffer
= buffer
+ offset
;
3637 value
= buf_get_u64(t_buffer
, 0, 8 * size
);
3638 LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
3639 PRIx64
, (uint32_t)t_addr
, (uint32_t)value
);
3640 dmi_write(target
, DM_SBDATA0
, value
);
3642 /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
3643 access
= set_field(access
, DM_SBCS_SBAUTOINCREMENT
, 0);
3644 dmi_write(target
, DM_SBCS
, access
);
3649 static int write_memory_bus_v1(struct target
*target
, target_addr_t address
,
3650 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
3652 RISCV013_INFO(info
);
3653 uint32_t sbcs
= sb_sbaccess(size
);
3654 sbcs
= set_field(sbcs
, DM_SBCS_SBAUTOINCREMENT
, 1);
3655 dmi_write(target
, DM_SBCS
, sbcs
);
3657 target_addr_t next_address
= address
;
3658 target_addr_t end_address
= address
+ count
* size
;
3662 sb_write_address(target
, next_address
, true);
3663 while (next_address
< end_address
) {
3664 LOG_DEBUG("transferring burst starting at address 0x%" TARGET_PRIxADDR
,
3667 struct riscv_batch
*batch
= riscv_batch_alloc(
3670 info
->dmi_busy_delay
+ info
->bus_master_write_delay
);
3674 for (uint32_t i
= (next_address
- address
) / size
; i
< count
; i
++) {
3675 const uint8_t *p
= buffer
+ i
* size
;
3677 if (riscv_batch_available_scans(batch
) < (size
+ 3) / 4)
3681 riscv_batch_add_dmi_write(batch
, DM_SBDATA3
,
3682 ((uint32_t) p
[12]) |
3683 (((uint32_t) p
[13]) << 8) |
3684 (((uint32_t) p
[14]) << 16) |
3685 (((uint32_t) p
[15]) << 24));
3688 riscv_batch_add_dmi_write(batch
, DM_SBDATA2
,
3690 (((uint32_t) p
[9]) << 8) |
3691 (((uint32_t) p
[10]) << 16) |
3692 (((uint32_t) p
[11]) << 24));
3694 riscv_batch_add_dmi_write(batch
, DM_SBDATA1
,
3696 (((uint32_t) p
[5]) << 8) |
3697 (((uint32_t) p
[6]) << 16) |
3698 (((uint32_t) p
[7]) << 24));
3699 uint32_t value
= p
[0];
3701 value
|= ((uint32_t) p
[2]) << 16;
3702 value
|= ((uint32_t) p
[3]) << 24;
3705 value
|= ((uint32_t) p
[1]) << 8;
3706 riscv_batch_add_dmi_write(batch
, DM_SBDATA0
, value
);
3708 log_memory_access(address
+ i
* size
, value
, size
, false);
3709 next_address
+= size
;
3712 /* Execute the batch of writes */
3713 result
= batch_run(target
, batch
);
3714 riscv_batch_free(batch
);
3715 if (result
!= ERROR_OK
)
3719 * At the same time, detect if DMI busy has occurred during the batch write. */
3720 bool dmi_busy_encountered
;
3721 if (dmi_op(target
, &sbcs
, &dmi_busy_encountered
, DMI_OP_READ
,
3722 DM_SBCS
, 0, false, true) != ERROR_OK
)
3724 if (dmi_busy_encountered
)
3725 LOG_DEBUG("DMI busy encountered during system bus write.");
3727 /* Wait until sbbusy goes low */
3728 time_t start
= time(NULL
);
3729 while (get_field(sbcs
, DM_SBCS_SBBUSY
)) {
3730 if (time(NULL
) - start
> riscv_command_timeout_sec
) {
3731 LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
3732 "Increase the timeout with riscv set_command_timeout_sec.",
3733 riscv_command_timeout_sec
, sbcs
);
3736 if (dmi_read(target
, &sbcs
, DM_SBCS
) != ERROR_OK
)
3740 if (get_field(sbcs
, DM_SBCS_SBBUSYERROR
)) {
3741 /* We wrote while the target was busy. */
3742 LOG_DEBUG("Sbbusyerror encountered during system bus write.");
3743 /* Clear the sticky error flag. */
3744 dmi_write(target
, DM_SBCS
, sbcs
| DM_SBCS_SBBUSYERROR
);
3745 /* Slow down before trying again. */
3746 info
->bus_master_write_delay
+= info
->bus_master_write_delay
/ 10 + 1;
3749 if (get_field(sbcs
, DM_SBCS_SBBUSYERROR
) || dmi_busy_encountered
) {
3750 /* Recover from the case when the write commands were issued too fast.
3751 * Determine the address from which to resume writing. */
3752 next_address
= sb_read_address(target
);
3753 if (next_address
< address
) {
3754 /* This should never happen, probably buggy hardware. */
3755 LOG_DEBUG("unexpected sbaddress=0x%" TARGET_PRIxADDR
3756 " - buggy sbautoincrement in hw?", next_address
);
3757 /* Fail the whole operation. */
3760 /* Try again - resume writing. */
3764 unsigned int sberror
= get_field(sbcs
, DM_SBCS_SBERROR
);
3766 /* Sberror indicates the bus access failed, but not because we issued the writes
3767 * too fast. Cannot recover. Sbaddress holds the address where the error occurred
3768 * (unless sbautoincrement in the HW is buggy).
3770 target_addr_t sbaddress
= sb_read_address(target
);
3771 LOG_DEBUG("System bus access failed with sberror=%u (sbaddress=0x%" TARGET_PRIxADDR
")",
3772 sberror
, sbaddress
);
3773 if (sbaddress
< address
) {
3774 /* This should never happen, probably buggy hardware.
3775 * Make a note to the user not to trust the sbaddress value. */
3776 LOG_DEBUG("unexpected sbaddress=0x%" TARGET_PRIxADDR
3777 " - buggy sbautoincrement in hw?", next_address
);
3779 /* Clear the sticky error flag */
3780 dmi_write(target
, DM_SBCS
, DM_SBCS_SBERROR
);
3781 /* Fail the whole operation */
3789 static int write_memory_progbuf(struct target
*target
, target_addr_t address
,
3790 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
3792 RISCV013_INFO(info
);
3794 if (riscv_xlen(target
) < size
* 8) {
3795 LOG_ERROR("XLEN (%d) is too short for %d-bit memory write.",
3796 riscv_xlen(target
), size
* 8);
3800 LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count
, size
, (long)address
);
3804 uint64_t mstatus
= 0;
3805 uint64_t mstatus_old
= 0;
3806 if (modify_privilege(target
, &mstatus
, &mstatus_old
) != ERROR_OK
)
3809 /* s0 holds the next address to write to
3810 * s1 holds the next data value to write
3813 int result
= ERROR_OK
;
3815 if (register_read(target
, &s0
, GDB_REGNO_S0
) != ERROR_OK
)
3817 if (register_read(target
, &s1
, GDB_REGNO_S1
) != ERROR_OK
)
3820 /* Write the program (store, increment) */
3821 struct riscv_program program
;
3822 riscv_program_init(&program
, target
);
3823 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5) && get_field(mstatus
, MSTATUS_MPRV
))
3824 riscv_program_csrrsi(&program
, GDB_REGNO_ZERO
, CSR_DCSR_MPRVEN
, GDB_REGNO_DCSR
);
3828 riscv_program_sbr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3831 riscv_program_shr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3834 riscv_program_swr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3837 riscv_program_sdr(&program
, GDB_REGNO_S1
, GDB_REGNO_S0
, 0);
3840 LOG_ERROR("write_memory_progbuf(): Unsupported size: %d", size
);
3841 result
= ERROR_FAIL
;
3845 if (riscv_enable_virtual
&& has_sufficient_progbuf(target
, 5) && get_field(mstatus
, MSTATUS_MPRV
))
3846 riscv_program_csrrci(&program
, GDB_REGNO_ZERO
, CSR_DCSR_MPRVEN
, GDB_REGNO_DCSR
);
3847 riscv_program_addi(&program
, GDB_REGNO_S0
, GDB_REGNO_S0
, size
);
3849 result
= riscv_program_ebreak(&program
);
3850 if (result
!= ERROR_OK
)
3852 riscv_program_write(&program
);
3854 riscv_addr_t cur_addr
= address
;
3855 riscv_addr_t fin_addr
= address
+ (count
* size
);
3856 bool setup_needed
= true;
3857 LOG_DEBUG("writing until final address 0x%016" PRIx64
, fin_addr
);
3858 while (cur_addr
< fin_addr
) {
3859 LOG_DEBUG("transferring burst starting at address 0x%016" PRIx64
,
3862 struct riscv_batch
*batch
= riscv_batch_alloc(
3865 info
->dmi_busy_delay
+ info
->ac_busy_delay
);
3869 /* To write another word, we put it in S1 and execute the program. */
3870 unsigned start
= (cur_addr
- address
) / size
;
3871 for (unsigned i
= start
; i
< count
; ++i
) {
3872 unsigned offset
= size
*i
;
3873 const uint8_t *t_buffer
= buffer
+ offset
;
3875 uint64_t value
= buf_get_u64(t_buffer
, 0, 8 * size
);
3877 log_memory_access(address
+ offset
, value
, size
, false);
3881 result
= register_write_direct(target
, GDB_REGNO_S0
,
3883 if (result
!= ERROR_OK
) {
3884 riscv_batch_free(batch
);
3890 dmi_write(target
, DM_DATA1
, value
>> 32);
3891 dmi_write(target
, DM_DATA0
, value
);
3893 /* Write and execute command that moves value into S1 and
3894 * executes program buffer. */
3895 uint32_t command
= access_register_command(target
,
3896 GDB_REGNO_S1
, riscv_xlen(target
),
3897 AC_ACCESS_REGISTER_POSTEXEC
|
3898 AC_ACCESS_REGISTER_TRANSFER
|
3899 AC_ACCESS_REGISTER_WRITE
);
3900 result
= execute_abstract_command(target
, command
);
3901 if (result
!= ERROR_OK
) {
3902 riscv_batch_free(batch
);
3906 /* Turn on autoexec */
3907 dmi_write(target
, DM_ABSTRACTAUTO
,
3908 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET
);
3910 setup_needed
= false;
3913 riscv_batch_add_dmi_write(batch
, DM_DATA1
, value
>> 32);
3914 riscv_batch_add_dmi_write(batch
, DM_DATA0
, value
);
3915 if (riscv_batch_full(batch
))
3920 result
= batch_run(target
, batch
);
3921 riscv_batch_free(batch
);
3922 if (result
!= ERROR_OK
)
3925 /* Note that if the scan resulted in a Busy DMI response, it
3926 * is this read to abstractcs that will cause the dmi_busy_delay
3927 * to be incremented if necessary. */
3929 uint32_t abstractcs
;
3930 bool dmi_busy_encountered
;
3931 result
= dmi_op(target
, &abstractcs
, &dmi_busy_encountered
,
3932 DMI_OP_READ
, DM_ABSTRACTCS
, 0, false, true);
3933 if (result
!= ERROR_OK
)
3935 while (get_field(abstractcs
, DM_ABSTRACTCS_BUSY
))
3936 if (dmi_read(target
, &abstractcs
, DM_ABSTRACTCS
) != ERROR_OK
)
3938 info
->cmderr
= get_field(abstractcs
, DM_ABSTRACTCS_CMDERR
);
3939 if (info
->cmderr
== CMDERR_NONE
&& !dmi_busy_encountered
) {
3940 LOG_DEBUG("successful (partial?) memory write");
3941 } else if (info
->cmderr
== CMDERR_BUSY
|| dmi_busy_encountered
) {
3942 if (info
->cmderr
== CMDERR_BUSY
)
3943 LOG_DEBUG("Memory write resulted in abstract command busy response.");
3944 else if (dmi_busy_encountered
)
3945 LOG_DEBUG("Memory write resulted in DMI busy response.");
3946 riscv013_clear_abstract_error(target
);
3947 increase_ac_busy_delay(target
);
3949 dmi_write(target
, DM_ABSTRACTAUTO
, 0);
3950 result
= register_read_direct(target
, &cur_addr
, GDB_REGNO_S0
);
3951 if (result
!= ERROR_OK
)
3953 setup_needed
= true;
3955 LOG_ERROR("error when writing memory, abstractcs=0x%08lx", (long)abstractcs
);
3956 riscv013_clear_abstract_error(target
);
3957 result
= ERROR_FAIL
;
3963 dmi_write(target
, DM_ABSTRACTAUTO
, 0);
3965 if (register_write_direct(target
, GDB_REGNO_S1
, s1
) != ERROR_OK
)
3967 if (register_write_direct(target
, GDB_REGNO_S0
, s0
) != ERROR_OK
)
3970 /* Restore MSTATUS */
3971 if (mstatus
!= mstatus_old
)
3972 if (register_write_direct(target
, GDB_REGNO_MSTATUS
, mstatus_old
))
3975 if (execute_fence(target
) != ERROR_OK
)
3981 static int write_memory(struct target
*target
, target_addr_t address
,
3982 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
3984 if (size
!= 1 && size
!= 2 && size
!= 4 && size
!= 8 && size
!= 16) {
3985 LOG_ERROR("BUG: Unsupported size for memory write: %d", size
);
3989 int ret
= ERROR_FAIL
;
3991 RISCV013_INFO(info
);
3993 char *progbuf_result
= "disabled";
3994 char *sysbus_result
= "disabled";
3995 char *abstract_result
= "disabled";
3997 for (unsigned int i
= 0; i
< RISCV_NUM_MEM_ACCESS_METHODS
; i
++) {
3998 int method
= r
->mem_access_methods
[i
];
4000 if (method
== RISCV_MEM_ACCESS_PROGBUF
) {
4001 if (mem_should_skip_progbuf(target
, address
, size
, false, &progbuf_result
))
4004 ret
= write_memory_progbuf(target
, address
, size
, count
, buffer
);
4006 if (ret
!= ERROR_OK
)
4007 progbuf_result
= "failed";
4008 } else if (method
== RISCV_MEM_ACCESS_SYSBUS
) {
4009 if (mem_should_skip_sysbus(target
, address
, size
, 0, false, &sysbus_result
))
4012 if (get_field(info
->sbcs
, DM_SBCS_SBVERSION
) == 0)
4013 ret
= write_memory_bus_v0(target
, address
, size
, count
, buffer
);
4014 else if (get_field(info
->sbcs
, DM_SBCS_SBVERSION
) == 1)
4015 ret
= write_memory_bus_v1(target
, address
, size
, count
, buffer
);
4017 if (ret
!= ERROR_OK
)
4018 sysbus_result
= "failed";
4019 } else if (method
== RISCV_MEM_ACCESS_ABSTRACT
) {
4020 if (mem_should_skip_abstract(target
, address
, size
, 0, false, &abstract_result
))
4023 ret
= write_memory_abstract(target
, address
, size
, count
, buffer
);
4025 if (ret
!= ERROR_OK
)
4026 abstract_result
= "failed";
4027 } else if (method
== RISCV_MEM_ACCESS_UNSPECIFIED
)
4028 /* No further mem access method to try. */
4031 log_mem_access_result(target
, ret
== ERROR_OK
, method
, false);
4033 if (ret
== ERROR_OK
)
4037 LOG_ERROR("Target %s: Failed to write memory (addr=0x%" PRIx64
")", target_name(target
), address
);
4038 LOG_ERROR(" progbuf=%s, sysbus=%s, abstract=%s", progbuf_result
, sysbus_result
, abstract_result
);
4042 static int arch_state(struct target
*target
)
4047 struct target_type riscv013_target
= {
4050 .init_target
= init_target
,
4051 .deinit_target
= deinit_target
,
4054 .poll
= &riscv_openocd_poll
,
4055 .halt
= &riscv_halt
,
4056 .step
= &riscv_openocd_step
,
4058 .assert_reset
= assert_reset
,
4059 .deassert_reset
= deassert_reset
,
4061 .write_memory
= write_memory
,
4063 .arch_state
= arch_state
4066 /*** 0.13-specific implementations of various RISC-V helper functions. ***/
4067 static int riscv013_get_register(struct target
*target
,
4068 riscv_reg_t
*value
, int rid
)
4070 LOG_DEBUG("[%s] reading register %s", target_name(target
),
4071 gdb_regno_name(rid
));
4073 if (riscv_select_current_hart(target
) != ERROR_OK
)
4076 int result
= ERROR_OK
;
4077 if (rid
== GDB_REGNO_PC
) {
4078 /* TODO: move this into riscv.c. */
4079 result
= register_read(target
, value
, GDB_REGNO_DPC
);
4080 LOG_DEBUG("[%d] read PC from DPC: 0x%" PRIx64
, target
->coreid
, *value
);
4081 } else if (rid
== GDB_REGNO_PRIV
) {
4083 /* TODO: move this into riscv.c. */
4084 result
= register_read(target
, &dcsr
, GDB_REGNO_DCSR
);
4085 *value
= set_field(0, VIRT_PRIV_V
, get_field(dcsr
, CSR_DCSR_V
));
4086 *value
= set_field(*value
, VIRT_PRIV_PRV
, get_field(dcsr
, CSR_DCSR_PRV
));
4088 result
= register_read(target
, value
, rid
);
4089 if (result
!= ERROR_OK
)
4096 static int riscv013_set_register(struct target
*target
, int rid
, uint64_t value
)
4098 riscv013_select_current_hart(target
);
4099 LOG_DEBUG("[%d] writing 0x%" PRIx64
" to register %s",
4100 target
->coreid
, value
, gdb_regno_name(rid
));
4102 if (rid
<= GDB_REGNO_XPR31
) {
4103 return register_write_direct(target
, rid
, value
);
4104 } else if (rid
== GDB_REGNO_PC
) {
4105 LOG_DEBUG("[%d] writing PC to DPC: 0x%" PRIx64
, target
->coreid
, value
);
4106 register_write_direct(target
, GDB_REGNO_DPC
, value
);
4107 uint64_t actual_value
;
4108 register_read_direct(target
, &actual_value
, GDB_REGNO_DPC
);
4109 LOG_DEBUG("[%d] actual DPC written: 0x%016" PRIx64
, target
->coreid
, actual_value
);
4110 if (value
!= actual_value
) {
4111 LOG_ERROR("Written PC (0x%" PRIx64
") does not match read back "
4112 "value (0x%" PRIx64
")", value
, actual_value
);
4115 } else if (rid
== GDB_REGNO_PRIV
) {
4117 register_read(target
, &dcsr
, GDB_REGNO_DCSR
);
4118 dcsr
= set_field(dcsr
, CSR_DCSR_PRV
, get_field(value
, VIRT_PRIV_PRV
));
4119 dcsr
= set_field(dcsr
, CSR_DCSR_V
, get_field(value
, VIRT_PRIV_V
));
4120 return register_write_direct(target
, GDB_REGNO_DCSR
, dcsr
);
4122 return register_write_direct(target
, rid
, value
);
4128 static int riscv013_select_current_hart(struct target
*target
)
4132 dm013_info_t
*dm
= get_dm(target
);
4135 if (r
->current_hartid
== dm
->current_hartid
)
4139 /* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
4140 if (dmi_read(target
, &dmcontrol
, DM_DMCONTROL
) != ERROR_OK
)
4142 dmcontrol
= set_hartsel(dmcontrol
, r
->current_hartid
);
4143 int result
= dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4144 dm
->current_hartid
= r
->current_hartid
;
4148 /* Select all harts that were prepped and that are selectable, clearing the
4149 * prepped flag on the harts that actually were selected. */
4150 static int select_prepped_harts(struct target
*target
, bool *use_hasel
)
4152 dm013_info_t
*dm
= get_dm(target
);
4155 if (!dm
->hasel_supported
) {
4162 assert(dm
->hart_count
);
4163 unsigned hawindow_count
= (dm
->hart_count
+ 31) / 32;
4164 uint32_t hawindow
[hawindow_count
];
4166 memset(hawindow
, 0, sizeof(uint32_t) * hawindow_count
);
4168 target_list_t
*entry
;
4169 unsigned total_selected
= 0;
4170 list_for_each_entry(entry
, &dm
->target_list
, list
) {
4171 struct target
*t
= entry
->target
;
4172 struct riscv_info
*r
= riscv_info(t
);
4173 riscv013_info_t
*info
= get_info(t
);
4174 unsigned index
= info
->index
;
4175 LOG_DEBUG("index=%d, coreid=%d, prepped=%d", index
, t
->coreid
, r
->prepped
);
4176 r
->selected
= r
->prepped
;
4178 hawindow
[index
/ 32] |= 1 << (index
% 32);
4185 /* Don't use hasel if we only need to talk to one hart. */
4186 if (total_selected
<= 1) {
4191 for (unsigned i
= 0; i
< hawindow_count
; i
++) {
4192 if (dmi_write(target
, DM_HAWINDOWSEL
, i
) != ERROR_OK
)
4194 if (dmi_write(target
, DM_HAWINDOW
, hawindow
[i
]) != ERROR_OK
)
4202 static int riscv013_halt_prep(struct target
*target
)
4207 static int riscv013_halt_go(struct target
*target
)
4209 bool use_hasel
= false;
4210 if (select_prepped_harts(target
, &use_hasel
) != ERROR_OK
)
4214 LOG_DEBUG("halting hart %d", r
->current_hartid
);
4216 /* Issue the halt command, and then wait for the current hart to halt. */
4217 uint32_t dmcontrol
= DM_DMCONTROL_DMACTIVE
| DM_DMCONTROL_HALTREQ
;
4219 dmcontrol
|= DM_DMCONTROL_HASEL
;
4220 dmcontrol
= set_hartsel(dmcontrol
, r
->current_hartid
);
4221 dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4222 for (size_t i
= 0; i
< 256; ++i
)
4223 if (riscv_is_halted(target
))
4226 if (!riscv_is_halted(target
)) {
4228 if (dmstatus_read(target
, &dmstatus
, true) != ERROR_OK
)
4230 if (dmi_read(target
, &dmcontrol
, DM_DMCONTROL
) != ERROR_OK
)
4233 LOG_ERROR("unable to halt hart %d", r
->current_hartid
);
4234 LOG_ERROR(" dmcontrol=0x%08x", dmcontrol
);
4235 LOG_ERROR(" dmstatus =0x%08x", dmstatus
);
4239 dmcontrol
= set_field(dmcontrol
, DM_DMCONTROL_HALTREQ
, 0);
4240 dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4243 target_list_t
*entry
;
4244 dm013_info_t
*dm
= get_dm(target
);
4247 list_for_each_entry(entry
, &dm
->target_list
, list
) {
4248 struct target
*t
= entry
->target
;
4249 t
->state
= TARGET_HALTED
;
4250 if (t
->debug_reason
== DBG_REASON_NOTHALTED
)
4251 t
->debug_reason
= DBG_REASON_DBGRQ
;
4254 /* The "else" case is handled in halt_go(). */
4259 static int riscv013_resume_go(struct target
*target
)
4261 bool use_hasel
= false;
4262 if (select_prepped_harts(target
, &use_hasel
) != ERROR_OK
)
4265 return riscv013_step_or_resume_current_hart(target
, false, use_hasel
);
4268 static int riscv013_step_current_hart(struct target
*target
)
4270 return riscv013_step_or_resume_current_hart(target
, true, false);
4273 static int riscv013_resume_prep(struct target
*target
)
4275 return riscv013_on_step_or_resume(target
, false);
4278 static int riscv013_on_step(struct target
*target
)
4280 return riscv013_on_step_or_resume(target
, true);
4283 static int riscv013_on_halt(struct target
*target
)
4288 static bool riscv013_is_halted(struct target
*target
)
4291 if (dmstatus_read(target
, &dmstatus
, true) != ERROR_OK
)
4293 if (get_field(dmstatus
, DM_DMSTATUS_ANYUNAVAIL
))
4294 LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target
));
4295 if (get_field(dmstatus
, DM_DMSTATUS_ANYNONEXISTENT
))
4296 LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target
));
4297 if (get_field(dmstatus
, DM_DMSTATUS_ANYHAVERESET
)) {
4298 int hartid
= riscv_current_hartid(target
);
4299 LOG_INFO("Hart %d unexpectedly reset!", hartid
);
4300 /* TODO: Can we make this more obvious to eg. a gdb user? */
4301 uint32_t dmcontrol
= DM_DMCONTROL_DMACTIVE
|
4302 DM_DMCONTROL_ACKHAVERESET
;
4303 dmcontrol
= set_hartsel(dmcontrol
, hartid
);
4304 /* If we had been halted when we reset, request another halt. If we
4305 * ended up running out of reset, then the user will (hopefully) get a
4306 * message that a reset happened, that the target is running, and then
4307 * that it is halted again once the request goes through.
4309 if (target
->state
== TARGET_HALTED
)
4310 dmcontrol
|= DM_DMCONTROL_HALTREQ
;
4311 dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4313 return get_field(dmstatus
, DM_DMSTATUS_ALLHALTED
);
4316 static enum riscv_halt_reason
riscv013_halt_reason(struct target
*target
)
4319 int result
= register_read(target
, &dcsr
, GDB_REGNO_DCSR
);
4320 if (result
!= ERROR_OK
)
4321 return RISCV_HALT_UNKNOWN
;
4323 LOG_DEBUG("dcsr.cause: 0x%" PRIx64
, get_field(dcsr
, CSR_DCSR_CAUSE
));
4325 switch (get_field(dcsr
, CSR_DCSR_CAUSE
)) {
4326 case CSR_DCSR_CAUSE_SWBP
:
4327 return RISCV_HALT_BREAKPOINT
;
4328 case CSR_DCSR_CAUSE_TRIGGER
:
4329 /* We could get here before triggers are enumerated if a trigger was
4330 * already set when we connected. Force enumeration now, which has the
4331 * side effect of clearing any triggers we did not set. */
4332 riscv_enumerate_triggers(target
);
4333 LOG_DEBUG("{%d} halted because of trigger", target
->coreid
);
4334 return RISCV_HALT_TRIGGER
;
4335 case CSR_DCSR_CAUSE_STEP
:
4336 return RISCV_HALT_SINGLESTEP
;
4337 case CSR_DCSR_CAUSE_DEBUGINT
:
4338 case CSR_DCSR_CAUSE_HALT
:
4339 return RISCV_HALT_INTERRUPT
;
4340 case CSR_DCSR_CAUSE_GROUP
:
4341 return RISCV_HALT_GROUP
;
4344 LOG_ERROR("Unknown DCSR cause field: 0x%" PRIx64
, get_field(dcsr
, CSR_DCSR_CAUSE
));
4345 LOG_ERROR(" dcsr=0x%016lx", (long)dcsr
);
4346 return RISCV_HALT_UNKNOWN
;
4349 int riscv013_write_debug_buffer(struct target
*target
, unsigned index
, riscv_insn_t data
)
4351 dm013_info_t
*dm
= get_dm(target
);
4354 if (dm
->progbuf_cache
[index
] != data
) {
4355 if (dmi_write(target
, DM_PROGBUF0
+ index
, data
) != ERROR_OK
)
4357 dm
->progbuf_cache
[index
] = data
;
4359 LOG_DEBUG("cache hit for 0x%" PRIx32
" @%d", data
, index
);
4364 riscv_insn_t
riscv013_read_debug_buffer(struct target
*target
, unsigned index
)
4367 dmi_read(target
, &value
, DM_PROGBUF0
+ index
);
4371 int riscv013_execute_debug_buffer(struct target
*target
)
4373 uint32_t run_program
= 0;
4374 run_program
= set_field(run_program
, AC_ACCESS_REGISTER_AARSIZE
, 2);
4375 run_program
= set_field(run_program
, AC_ACCESS_REGISTER_POSTEXEC
, 1);
4376 run_program
= set_field(run_program
, AC_ACCESS_REGISTER_TRANSFER
, 0);
4377 run_program
= set_field(run_program
, AC_ACCESS_REGISTER_REGNO
, 0x1000);
4379 return execute_abstract_command(target
, run_program
);
4382 void riscv013_fill_dmi_write_u64(struct target
*target
, char *buf
, int a
, uint64_t d
)
4384 RISCV013_INFO(info
);
4385 buf_set_u64((unsigned char *)buf
, DTM_DMI_OP_OFFSET
, DTM_DMI_OP_LENGTH
, DMI_OP_WRITE
);
4386 buf_set_u64((unsigned char *)buf
, DTM_DMI_DATA_OFFSET
, DTM_DMI_DATA_LENGTH
, d
);
4387 buf_set_u64((unsigned char *)buf
, DTM_DMI_ADDRESS_OFFSET
, info
->abits
, a
);
4390 void riscv013_fill_dmi_read_u64(struct target
*target
, char *buf
, int a
)
4392 RISCV013_INFO(info
);
4393 buf_set_u64((unsigned char *)buf
, DTM_DMI_OP_OFFSET
, DTM_DMI_OP_LENGTH
, DMI_OP_READ
);
4394 buf_set_u64((unsigned char *)buf
, DTM_DMI_DATA_OFFSET
, DTM_DMI_DATA_LENGTH
, 0);
4395 buf_set_u64((unsigned char *)buf
, DTM_DMI_ADDRESS_OFFSET
, info
->abits
, a
);
4398 void riscv013_fill_dmi_nop_u64(struct target
*target
, char *buf
)
4400 RISCV013_INFO(info
);
4401 buf_set_u64((unsigned char *)buf
, DTM_DMI_OP_OFFSET
, DTM_DMI_OP_LENGTH
, DMI_OP_NOP
);
4402 buf_set_u64((unsigned char *)buf
, DTM_DMI_DATA_OFFSET
, DTM_DMI_DATA_LENGTH
, 0);
4403 buf_set_u64((unsigned char *)buf
, DTM_DMI_ADDRESS_OFFSET
, info
->abits
, 0);
4406 int riscv013_dmi_write_u64_bits(struct target
*target
)
4408 RISCV013_INFO(info
);
4409 return info
->abits
+ DTM_DMI_DATA_LENGTH
+ DTM_DMI_OP_LENGTH
;
4412 static int maybe_execute_fence_i(struct target
*target
)
4414 if (has_sufficient_progbuf(target
, 3))
4415 return execute_fence(target
);
4419 /* Helper Functions. */
4420 static int riscv013_on_step_or_resume(struct target
*target
, bool step
)
4422 if (maybe_execute_fence_i(target
) != ERROR_OK
)
4425 /* We want to twiddle some bits in the debug CSR so debugging works. */
4427 int result
= register_read(target
, &dcsr
, GDB_REGNO_DCSR
);
4428 if (result
!= ERROR_OK
)
4430 dcsr
= set_field(dcsr
, CSR_DCSR_STEP
, step
);
4431 dcsr
= set_field(dcsr
, CSR_DCSR_EBREAKM
, riscv_ebreakm
);
4432 dcsr
= set_field(dcsr
, CSR_DCSR_EBREAKS
, riscv_ebreaks
);
4433 dcsr
= set_field(dcsr
, CSR_DCSR_EBREAKU
, riscv_ebreaku
);
4434 return riscv_set_register(target
, GDB_REGNO_DCSR
, dcsr
);
4437 static int riscv013_step_or_resume_current_hart(struct target
*target
,
4438 bool step
, bool use_hasel
)
4441 LOG_DEBUG("resuming hart %d (for step?=%d)", r
->current_hartid
, step
);
4442 if (!riscv_is_halted(target
)) {
4443 LOG_ERROR("Hart %d is not halted!", r
->current_hartid
);
4447 /* Issue the resume command, and then wait for the current hart to resume. */
4448 uint32_t dmcontrol
= DM_DMCONTROL_DMACTIVE
| DM_DMCONTROL_RESUMEREQ
;
4450 dmcontrol
|= DM_DMCONTROL_HASEL
;
4451 dmcontrol
= set_hartsel(dmcontrol
, r
->current_hartid
);
4452 dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4454 dmcontrol
= set_field(dmcontrol
, DM_DMCONTROL_HASEL
, 0);
4455 dmcontrol
= set_field(dmcontrol
, DM_DMCONTROL_RESUMEREQ
, 0);
4458 for (size_t i
= 0; i
< 256; ++i
) {
4460 if (dmstatus_read(target
, &dmstatus
, true) != ERROR_OK
)
4462 if (get_field(dmstatus
, DM_DMSTATUS_ALLRESUMEACK
) == 0)
4464 if (step
&& get_field(dmstatus
, DM_DMSTATUS_ALLHALTED
) == 0)
4467 dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4471 dmi_write(target
, DM_DMCONTROL
, dmcontrol
);
4473 LOG_ERROR("unable to resume hart %d", r
->current_hartid
);
4474 if (dmstatus_read(target
, &dmstatus
, true) != ERROR_OK
)
4476 LOG_ERROR(" dmstatus =0x%08x", dmstatus
);
4479 LOG_ERROR(" was stepping, halting");
4487 void riscv013_clear_abstract_error(struct target
*target
)
4489 /* Wait for busy to go away. */
4490 time_t start
= time(NULL
);
4491 uint32_t abstractcs
;
4492 dmi_read(target
, &abstractcs
, DM_ABSTRACTCS
);
4493 while (get_field(abstractcs
, DM_ABSTRACTCS_BUSY
)) {
4494 dmi_read(target
, &abstractcs
, DM_ABSTRACTCS
);
4496 if (time(NULL
) - start
> riscv_command_timeout_sec
) {
4497 LOG_ERROR("abstractcs.busy is not going low after %d seconds "
4498 "(abstractcs=0x%x). The target is either really slow or "
4499 "broken. You could increase the timeout with riscv "
4500 "set_command_timeout_sec.",
4501 riscv_command_timeout_sec
, abstractcs
);
4505 /* Clear the error status. */
4506 dmi_write(target
, DM_ABSTRACTCS
, DM_ABSTRACTCS_CMDERR
);