+static int halt_finish(struct target *target)
+{
+ return target_call_event_callbacks(target, TARGET_EVENT_HALTED);
+}
+
+int riscv_halt(struct target *target)
+{
+ RISCV_INFO(r);
+
+ if (!r->is_halted) {
+ struct target_type *tt = get_target_type(target);
+ return tt->halt(target);
+ }
+
+ LOG_DEBUG("[%d] halting all harts", target->coreid);
+
+ int result = ERROR_OK;
+ if (target->smp) {
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ if (halt_prep(t) != ERROR_OK)
+ result = ERROR_FAIL;
+ }
+
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ riscv_info_t *i = riscv_info(t);
+ if (i->prepped) {
+ if (halt_go(t) != ERROR_OK)
+ result = ERROR_FAIL;
+ }
+ }
+
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ if (halt_finish(t) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ } else {
+ if (halt_prep(target) != ERROR_OK)
+ result = ERROR_FAIL;
+ if (halt_go(target) != ERROR_OK)
+ result = ERROR_FAIL;
+ if (halt_finish(target) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ return result;
+}
+
+static int riscv_assert_reset(struct target *target)
+{
+ LOG_DEBUG("[%d]", target->coreid);
+ struct target_type *tt = get_target_type(target);
+ riscv_invalidate_register_cache(target);
+ return tt->assert_reset(target);
+}
+
+static int riscv_deassert_reset(struct target *target)
+{
+ LOG_DEBUG("[%d]", target->coreid);
+ struct target_type *tt = get_target_type(target);
+ return tt->deassert_reset(target);
+}
+
+int riscv_resume_prep_all_harts(struct target *target)
+{
+ RISCV_INFO(r);
+
+ LOG_DEBUG("[%s] prep hart", target_name(target));
+ if (riscv_select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_is_halted(target)) {
+ if (r->resume_prep(target) != ERROR_OK)
+ return ERROR_FAIL;
+ } else {
+ LOG_DEBUG("[%s] hart requested resume, but was already resumed",
+ target_name(target));
+ }
+
+ LOG_DEBUG("[%s] mark as prepped", target_name(target));
+ r->prepped = true;
+
+ return ERROR_OK;
+}
+
+/* state must be riscv_reg_t state[RISCV_MAX_HWBPS] = {0}; */
+static int disable_triggers(struct target *target, riscv_reg_t *state)
+{
+ RISCV_INFO(r);
+
+ LOG_DEBUG("deal with triggers");
+
+ if (riscv_enumerate_triggers(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (r->manual_hwbp_set) {
+ /* Look at every trigger that may have been set. */
+ riscv_reg_t tselect;
+ if (riscv_get_register(target, &tselect, GDB_REGNO_TSELECT) != ERROR_OK)
+ return ERROR_FAIL;
+ for (unsigned int t = 0; t < r->trigger_count; t++) {
+ if (riscv_set_register(target, GDB_REGNO_TSELECT, t) != ERROR_OK)
+ return ERROR_FAIL;
+ riscv_reg_t tdata1;
+ if (riscv_get_register(target, &tdata1, GDB_REGNO_TDATA1) != ERROR_OK)
+ return ERROR_FAIL;
+ if (tdata1 & MCONTROL_DMODE(riscv_xlen(target))) {
+ state[t] = tdata1;
+ if (riscv_set_register(target, GDB_REGNO_TDATA1, 0) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ }
+ if (riscv_set_register(target, GDB_REGNO_TSELECT, tselect) != ERROR_OK)
+ return ERROR_FAIL;
+
+ } else {
+ /* Just go through the triggers we manage. */
+ struct watchpoint *watchpoint = target->watchpoints;
+ int i = 0;
+ while (watchpoint) {
+ LOG_DEBUG("watchpoint %d: set=%d", i, watchpoint->set);
+ state[i] = watchpoint->set;
+ if (watchpoint->set) {
+ if (riscv_remove_watchpoint(target, watchpoint) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ watchpoint = watchpoint->next;
+ i++;
+ }
+ }
+
+ return ERROR_OK;
+}
+
+static int enable_triggers(struct target *target, riscv_reg_t *state)
+{
+ RISCV_INFO(r);
+
+ if (r->manual_hwbp_set) {
+ /* Look at every trigger that may have been set. */
+ riscv_reg_t tselect;
+ if (riscv_get_register(target, &tselect, GDB_REGNO_TSELECT) != ERROR_OK)
+ return ERROR_FAIL;
+ for (unsigned int t = 0; t < r->trigger_count; t++) {
+ if (state[t] != 0) {
+ if (riscv_set_register(target, GDB_REGNO_TSELECT, t) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_set_register(target, GDB_REGNO_TDATA1, state[t]) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ }
+ if (riscv_set_register(target, GDB_REGNO_TSELECT, tselect) != ERROR_OK)
+ return ERROR_FAIL;
+
+ } else {
+ struct watchpoint *watchpoint = target->watchpoints;
+ int i = 0;
+ while (watchpoint) {
+ LOG_DEBUG("watchpoint %d: cleared=%" PRId64, i, state[i]);
+ if (state[i]) {
+ if (riscv_add_watchpoint(target, watchpoint) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ watchpoint = watchpoint->next;
+ i++;
+ }
+ }
+
+ return ERROR_OK;
+}
+
+/**
+ * Get everything ready to resume.
+ */
+static int resume_prep(struct target *target, int current,
+ target_addr_t address, int handle_breakpoints, int debug_execution)
+{
+ RISCV_INFO(r);
+ LOG_DEBUG("[%d]", target->coreid);
+
+ if (!current)
+ riscv_set_register(target, GDB_REGNO_PC, address);
+
+ if (target->debug_reason == DBG_REASON_WATCHPOINT) {
+ /* To be able to run off a trigger, disable all the triggers, step, and
+ * then resume as usual. */
+ riscv_reg_t trigger_state[RISCV_MAX_HWBPS] = {0};
+
+ if (disable_triggers(target, trigger_state) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (old_or_new_riscv_step(target, true, 0, false) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (enable_triggers(target, trigger_state) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ if (r->is_halted) {
+ if (riscv_resume_prep_all_harts(target) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ LOG_DEBUG("[%d] mark as prepped", target->coreid);
+ r->prepped = true;
+
+ return ERROR_OK;
+}
+
+/**
+ * Resume all the harts that have been prepped, as close to instantaneous as
+ * possible.
+ */
+static int resume_go(struct target *target, int current,
+ target_addr_t address, int handle_breakpoints, int debug_execution)
+{
+ riscv_info_t *r = riscv_info(target);
+ int result;
+ if (!r->is_halted) {
+ struct target_type *tt = get_target_type(target);
+ result = tt->resume(target, current, address, handle_breakpoints,
+ debug_execution);
+ } else {
+ result = riscv_resume_go_all_harts(target);
+ }
+
+ return result;
+}
+
+static int resume_finish(struct target *target)
+{
+ register_cache_invalidate(target->reg_cache);
+
+ target->state = TARGET_RUNNING;
+ target->debug_reason = DBG_REASON_NOTHALTED;
+ return target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
+}
+
+/**
+ * @par single_hart When true, only resume a single hart even if SMP is
+ * configured. This is used to run algorithms on just one hart.
+ */
+int riscv_resume(
+ struct target *target,
+ int current,
+ target_addr_t address,
+ int handle_breakpoints,
+ int debug_execution,
+ bool single_hart)
+{
+ LOG_DEBUG("handle_breakpoints=%d", handle_breakpoints);
+ int result = ERROR_OK;
+ if (target->smp && !single_hart) {
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ if (resume_prep(t, current, address, handle_breakpoints,
+ debug_execution) != ERROR_OK)
+ result = ERROR_FAIL;
+ }
+
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ riscv_info_t *i = riscv_info(t);
+ if (i->prepped) {
+ if (resume_go(t, current, address, handle_breakpoints,
+ debug_execution) != ERROR_OK)
+ result = ERROR_FAIL;
+ }
+ }
+
+ for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
+ struct target *t = tlist->target;
+ if (resume_finish(t) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ } else {
+ if (resume_prep(target, current, address, handle_breakpoints,
+ debug_execution) != ERROR_OK)
+ result = ERROR_FAIL;
+ if (resume_go(target, current, address, handle_breakpoints,
+ debug_execution) != ERROR_OK)
+ result = ERROR_FAIL;
+ if (resume_finish(target) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ return result;
+}
+
+static int riscv_target_resume(struct target *target, int current, target_addr_t address,
+ int handle_breakpoints, int debug_execution)
+{
+ return riscv_resume(target, current, address, handle_breakpoints,
+ debug_execution, false);
+}
+
+static int riscv_mmu(struct target *target, int *enabled)
+{
+ if (!riscv_enable_virt2phys) {
+ *enabled = 0;
+ return ERROR_OK;
+ }
+
+ /* Don't use MMU in explicit or effective M (machine) mode */
+ riscv_reg_t priv;
+ if (riscv_get_register(target, &priv, GDB_REGNO_PRIV) != ERROR_OK) {
+ LOG_ERROR("Failed to read priv register.");
+ return ERROR_FAIL;
+ }
+
+ riscv_reg_t mstatus;
+ if (riscv_get_register(target, &mstatus, GDB_REGNO_MSTATUS) != ERROR_OK) {
+ LOG_ERROR("Failed to read mstatus register.");
+ return ERROR_FAIL;
+ }
+
+ if ((get_field(mstatus, MSTATUS_MPRV) ? get_field(mstatus, MSTATUS_MPP) : priv) == PRV_M) {
+ LOG_DEBUG("SATP/MMU ignored in Machine mode (mstatus=0x%" PRIx64 ").", mstatus);
+ *enabled = 0;
+ return ERROR_OK;
+ }
+
+ riscv_reg_t satp;
+ if (riscv_get_register(target, &satp, GDB_REGNO_SATP) != ERROR_OK) {
+ LOG_DEBUG("Couldn't read SATP.");
+ /* If we can't read SATP, then there must not be an MMU. */
+ *enabled = 0;
+ return ERROR_OK;
+ }
+
+ if (get_field(satp, RISCV_SATP_MODE(riscv_xlen(target))) == SATP_MODE_OFF) {
+ LOG_DEBUG("MMU is disabled.");
+ *enabled = 0;
+ } else {
+ LOG_DEBUG("MMU is enabled.");
+ *enabled = 1;
+ }
+
+ return ERROR_OK;
+}
+
+static int riscv_address_translate(struct target *target,
+ target_addr_t virtual, target_addr_t *physical)