+ RISCV_INFO(r);
+
+ if (!r->is_halted) {
+ struct target_type *tt = get_target_type(target);
+ return tt->halt(target);
+ }
+
+ LOG_DEBUG("[%d] halting all harts", target->coreid);
+
+ int result = ERROR_OK;
+ if (target->smp) {
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ if (halt_prep(t) != ERROR_OK)
+ result = ERROR_FAIL;
+ }
+
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ riscv_info_t *i = riscv_info(t);
+ if (i->prepped) {
+ if (halt_go(t) != ERROR_OK)
+ result = ERROR_FAIL;
+ }
+ }
+
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ if (halt_finish(t) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ } else {
+ if (halt_prep(target) != ERROR_OK)
+ result = ERROR_FAIL;
+ if (halt_go(target) != ERROR_OK)
+ result = ERROR_FAIL;
+ if (halt_finish(target) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ return result;
+}
+
+static int riscv_assert_reset(struct target *target)
+{
+ LOG_DEBUG("[%d]", target->coreid);
+ struct target_type *tt = get_target_type(target);
+ riscv_invalidate_register_cache(target);
+ return tt->assert_reset(target);
+}
+
+static int riscv_deassert_reset(struct target *target)
+{
+ LOG_DEBUG("[%d]", target->coreid);
+ struct target_type *tt = get_target_type(target);
+ return tt->deassert_reset(target);
+}
+
+int riscv_resume_prep_all_harts(struct target *target)
+{
+ RISCV_INFO(r);
+
+ LOG_DEBUG("[%s] prep hart", target_name(target));
+ if (riscv_select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_is_halted(target)) {
+ if (r->resume_prep(target) != ERROR_OK)
+ return ERROR_FAIL;
+ } else {
+ LOG_DEBUG("[%s] hart requested resume, but was already resumed",
+ target_name(target));
+ }
+
+ LOG_DEBUG("[%s] mark as prepped", target_name(target));
+ r->prepped = true;
+
+ return ERROR_OK;
+}
+
+/* state must be riscv_reg_t state[RISCV_MAX_HWBPS] = {0}; */
+static int disable_triggers(struct target *target, riscv_reg_t *state)
+{
+ RISCV_INFO(r);
+
+ LOG_DEBUG("deal with triggers");
+
+ if (riscv_enumerate_triggers(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (r->manual_hwbp_set) {
+ /* Look at every trigger that may have been set. */
+ riscv_reg_t tselect;
+ if (riscv_get_register(target, &tselect, GDB_REGNO_TSELECT) != ERROR_OK)
+ return ERROR_FAIL;
+ for (unsigned int t = 0; t < r->trigger_count; t++) {
+ if (riscv_set_register(target, GDB_REGNO_TSELECT, t) != ERROR_OK)
+ return ERROR_FAIL;
+ riscv_reg_t tdata1;
+ if (riscv_get_register(target, &tdata1, GDB_REGNO_TDATA1) != ERROR_OK)
+ return ERROR_FAIL;
+ if (tdata1 & MCONTROL_DMODE(riscv_xlen(target))) {
+ state[t] = tdata1;
+ if (riscv_set_register(target, GDB_REGNO_TDATA1, 0) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ }
+ if (riscv_set_register(target, GDB_REGNO_TSELECT, tselect) != ERROR_OK)
+ return ERROR_FAIL;
+
+ } else {
+ /* Just go through the triggers we manage. */
+ struct watchpoint *watchpoint = target->watchpoints;
+ int i = 0;
+ while (watchpoint) {
+ LOG_DEBUG("watchpoint %d: set=%d", i, watchpoint->set);
+ state[i] = watchpoint->set;
+ if (watchpoint->set) {
+ if (riscv_remove_watchpoint(target, watchpoint) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ watchpoint = watchpoint->next;
+ i++;
+ }
+ }
+
+ return ERROR_OK;
+}
+
+static int enable_triggers(struct target *target, riscv_reg_t *state)
+{
+ RISCV_INFO(r);
+
+ if (r->manual_hwbp_set) {
+ /* Look at every trigger that may have been set. */
+ riscv_reg_t tselect;
+ if (riscv_get_register(target, &tselect, GDB_REGNO_TSELECT) != ERROR_OK)
+ return ERROR_FAIL;
+ for (unsigned int t = 0; t < r->trigger_count; t++) {
+ if (state[t] != 0) {
+ if (riscv_set_register(target, GDB_REGNO_TSELECT, t) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_set_register(target, GDB_REGNO_TDATA1, state[t]) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ }
+ if (riscv_set_register(target, GDB_REGNO_TSELECT, tselect) != ERROR_OK)
+ return ERROR_FAIL;
+
+ } else {
+ struct watchpoint *watchpoint = target->watchpoints;
+ int i = 0;
+ while (watchpoint) {
+ LOG_DEBUG("watchpoint %d: cleared=%" PRId64, i, state[i]);
+ if (state[i]) {
+ if (riscv_add_watchpoint(target, watchpoint) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ watchpoint = watchpoint->next;
+ i++;
+ }
+ }
+
+ return ERROR_OK;
+}
+
+/**
+ * Get everything ready to resume.
+ */
+static int resume_prep(struct target *target, int current,
+ target_addr_t address, int handle_breakpoints, int debug_execution)
+{
+ RISCV_INFO(r);
+ LOG_DEBUG("[%d]", target->coreid);
+
+ if (!current)
+ riscv_set_register(target, GDB_REGNO_PC, address);
+
+ if (target->debug_reason == DBG_REASON_WATCHPOINT) {
+ /* To be able to run off a trigger, disable all the triggers, step, and
+ * then resume as usual. */
+ riscv_reg_t trigger_state[RISCV_MAX_HWBPS] = {0};
+
+ if (disable_triggers(target, trigger_state) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (old_or_new_riscv_step(target, true, 0, false) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (enable_triggers(target, trigger_state) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ if (r->is_halted) {
+ if (riscv_resume_prep_all_harts(target) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ LOG_DEBUG("[%d] mark as prepped", target->coreid);
+ r->prepped = true;
+
+ return ERROR_OK;
+}
+
+/**
+ * Resume all the harts that have been prepped, as close to instantaneous as
+ * possible.
+ */
+static int resume_go(struct target *target, int current,
+ target_addr_t address, int handle_breakpoints, int debug_execution)
+{
+ riscv_info_t *r = riscv_info(target);
+ int result;
+ if (!r->is_halted) {
+ struct target_type *tt = get_target_type(target);
+ result = tt->resume(target, current, address, handle_breakpoints,
+ debug_execution);
+ } else {
+ result = riscv_resume_go_all_harts(target);
+ }
+
+ return result;
+}
+
+static int resume_finish(struct target *target)
+{
+ register_cache_invalidate(target->reg_cache);
+
+ target->state = TARGET_RUNNING;
+ target->debug_reason = DBG_REASON_NOTHALTED;
+ return target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
+}
+
+/**
+ * @par single_hart When true, only resume a single hart even if SMP is
+ * configured. This is used to run algorithms on just one hart.
+ */
+int riscv_resume(
+ struct target *target,
+ int current,
+ target_addr_t address,
+ int handle_breakpoints,
+ int debug_execution,
+ bool single_hart)
+{
+ LOG_DEBUG("handle_breakpoints=%d", handle_breakpoints);
+ int result = ERROR_OK;
+ if (target->smp && !single_hart) {
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ if (resume_prep(t, current, address, handle_breakpoints,
+ debug_execution) != ERROR_OK)
+ result = ERROR_FAIL;
+ }
+
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ riscv_info_t *i = riscv_info(t);
+ if (i->prepped) {
+ if (resume_go(t, current, address, handle_breakpoints,
+ debug_execution) != ERROR_OK)
+ result = ERROR_FAIL;
+ }
+ }
+
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ if (resume_finish(t) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ } else {
+ if (resume_prep(target, current, address, handle_breakpoints,
+ debug_execution) != ERROR_OK)
+ result = ERROR_FAIL;
+ if (resume_go(target, current, address, handle_breakpoints,
+ debug_execution) != ERROR_OK)
+ result = ERROR_FAIL;
+ if (resume_finish(target) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ return result;
+}
+
+static int riscv_target_resume(struct target *target, int current, target_addr_t address,
+ int handle_breakpoints, int debug_execution)
+{
+ return riscv_resume(target, current, address, handle_breakpoints,
+ debug_execution, false);
+}
+
+static int riscv_mmu(struct target *target, int *enabled)
+{
+ if (!riscv_enable_virt2phys) {
+ *enabled = 0;
+ return ERROR_OK;
+ }
+
+ /* Don't use MMU in explicit or effective M (machine) mode */
+ riscv_reg_t priv;
+ if (riscv_get_register(target, &priv, GDB_REGNO_PRIV) != ERROR_OK) {
+ LOG_ERROR("Failed to read priv register.");
+ return ERROR_FAIL;
+ }
+
+ riscv_reg_t mstatus;
+ if (riscv_get_register(target, &mstatus, GDB_REGNO_MSTATUS) != ERROR_OK) {
+ LOG_ERROR("Failed to read mstatus register.");
+ return ERROR_FAIL;
+ }
+
+ if ((get_field(mstatus, MSTATUS_MPRV) ? get_field(mstatus, MSTATUS_MPP) : priv) == PRV_M) {
+ LOG_DEBUG("SATP/MMU ignored in Machine mode (mstatus=0x%" PRIx64 ").", mstatus);
+ *enabled = 0;
+ return ERROR_OK;
+ }
+
+ riscv_reg_t satp;
+ if (riscv_get_register(target, &satp, GDB_REGNO_SATP) != ERROR_OK) {
+ LOG_DEBUG("Couldn't read SATP.");
+ /* If we can't read SATP, then there must not be an MMU. */
+ *enabled = 0;
+ return ERROR_OK;
+ }
+
+ if (get_field(satp, RISCV_SATP_MODE(riscv_xlen(target))) == SATP_MODE_OFF) {
+ LOG_DEBUG("MMU is disabled.");
+ *enabled = 0;
+ } else {
+ LOG_DEBUG("MMU is enabled.");
+ *enabled = 1;
+ }
+
+ return ERROR_OK;
+}
+
+static int riscv_address_translate(struct target *target,
+ target_addr_t virtual, target_addr_t *physical)
+{
+ RISCV_INFO(r);
+ riscv_reg_t satp_value;
+ int mode;
+ uint64_t ppn_value;
+ target_addr_t table_address;
+ const virt2phys_info_t *info;
+ uint64_t pte = 0;
+ int i;
+
+ int result = riscv_get_register(target, &satp_value, GDB_REGNO_SATP);
+ if (result != ERROR_OK)
+ return result;
+
+ unsigned xlen = riscv_xlen(target);
+ mode = get_field(satp_value, RISCV_SATP_MODE(xlen));
+ switch (mode) {
+ case SATP_MODE_SV32:
+ info = &sv32;
+ break;
+ case SATP_MODE_SV39:
+ info = &sv39;
+ break;
+ case SATP_MODE_SV48:
+ info = &sv48;
+ break;
+ case SATP_MODE_OFF:
+ LOG_ERROR("No translation or protection." \
+ " (satp: 0x%" PRIx64 ")", satp_value);
+ return ERROR_FAIL;
+ default:
+ LOG_ERROR("The translation mode is not supported." \
+ " (satp: 0x%" PRIx64 ")", satp_value);
+ return ERROR_FAIL;
+ }
+ LOG_DEBUG("virtual=0x%" TARGET_PRIxADDR "; mode=%s", virtual, info->name);
+
+ /* verify bits xlen-1:va_bits-1 are all equal */
+ target_addr_t mask = ((target_addr_t)1 << (xlen - (info->va_bits - 1))) - 1;
+ target_addr_t masked_msbs = (virtual >> (info->va_bits - 1)) & mask;
+ if (masked_msbs != 0 && masked_msbs != mask) {
+ LOG_ERROR("Virtual address 0x%" TARGET_PRIxADDR " is not sign-extended "
+ "for %s mode.", virtual, info->name);
+ return ERROR_FAIL;
+ }
+
+ ppn_value = get_field(satp_value, RISCV_SATP_PPN(xlen));
+ table_address = ppn_value << RISCV_PGSHIFT;
+ i = info->level - 1;
+ while (i >= 0) {
+ uint64_t vpn = virtual >> info->vpn_shift[i];
+ vpn &= info->vpn_mask[i];
+ target_addr_t pte_address = table_address +
+ (vpn << info->pte_shift);
+ uint8_t buffer[8];
+ assert(info->pte_shift <= 3);
+ int retval = r->read_memory(target, pte_address,
+ 4, (1 << info->pte_shift) / 4, buffer, 4);
+ if (retval != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (info->pte_shift == 2)
+ pte = buf_get_u32(buffer, 0, 32);
+ else
+ pte = buf_get_u64(buffer, 0, 64);
+
+ LOG_DEBUG("i=%d; PTE @0x%" TARGET_PRIxADDR " = 0x%" PRIx64, i,
+ pte_address, pte);
+
+ if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W)))
+ return ERROR_FAIL;
+
+ if ((pte & PTE_R) || (pte & PTE_X)) /* Found leaf PTE. */
+ break;
+
+ i--;
+ if (i < 0)
+ break;
+ ppn_value = pte >> PTE_PPN_SHIFT;
+ table_address = ppn_value << RISCV_PGSHIFT;
+ }
+
+ if (i < 0) {
+ LOG_ERROR("Couldn't find the PTE.");
+ return ERROR_FAIL;
+ }
+
+ /* Make sure to clear out the high bits that may be set. */
+ *physical = virtual & (((target_addr_t)1 << info->va_bits) - 1);
+
+ while (i < info->level) {
+ ppn_value = pte >> info->pte_ppn_shift[i];
+ ppn_value &= info->pte_ppn_mask[i];
+ *physical &= ~(((target_addr_t)info->pa_ppn_mask[i]) <<
+ info->pa_ppn_shift[i]);
+ *physical |= (ppn_value << info->pa_ppn_shift[i]);
+ i++;
+ }
+ LOG_DEBUG("0x%" TARGET_PRIxADDR " -> 0x%" TARGET_PRIxADDR, virtual,
+ *physical);
+
+ return ERROR_OK;
+}
+
+static int riscv_virt2phys(struct target *target, target_addr_t virtual, target_addr_t *physical)
+{
+ int enabled;
+ if (riscv_mmu(target, &enabled) == ERROR_OK) {
+ if (!enabled)
+ return ERROR_FAIL;
+
+ if (riscv_address_translate(target, virtual, physical) == ERROR_OK)
+ return ERROR_OK;
+ }
+
+ return ERROR_FAIL;
+}
+
+static int riscv_read_phys_memory(struct target *target, target_addr_t phys_address,
+ uint32_t size, uint32_t count, uint8_t *buffer)
+{
+ RISCV_INFO(r);
+ if (riscv_select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+ return r->read_memory(target, phys_address, size, count, buffer, size);
+}
+
+static int riscv_read_memory(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, uint8_t *buffer)
+{
+ if (count == 0) {
+ LOG_WARNING("0-length read from 0x%" TARGET_PRIxADDR, address);
+ return ERROR_OK;
+ }
+
+ if (riscv_select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ target_addr_t physical_addr;
+ if (target->type->virt2phys(target, address, &physical_addr) == ERROR_OK)
+ address = physical_addr;
+
+ RISCV_INFO(r);
+ return r->read_memory(target, address, size, count, buffer, size);
+}
+
+static int riscv_write_phys_memory(struct target *target, target_addr_t phys_address,
+ uint32_t size, uint32_t count, const uint8_t *buffer)
+{
+ if (riscv_select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+ struct target_type *tt = get_target_type(target);
+ return tt->write_memory(target, phys_address, size, count, buffer);
+}