+ uint8_t saved_apsel = dap_ap_get_select(swjdp);
+ dap_ap_select(swjdp, swjdp_debugap);
+
+ if (!debug_execution)
+ {
+ target_free_all_working_areas(target);
+// cortex_m3_enable_breakpoints(target);
+// cortex_m3_enable_watchpoints(target);
+ }
+
+#if 0
+ if (debug_execution)
+ {
+ /* Disable interrupts */
+ /* We disable interrupts in the PRIMASK register instead of
+ * masking with C_MASKINTS,
+ * This is probably the same issue as Cortex-M3 Errata 377493:
+ * C_MASKINTS in parallel with disabled interrupts can cause
+ * local faults to not be taken. */
+ buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
+ armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
+ armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
+
+ /* Make sure we are in Thumb mode */
+ buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
+ buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
+ armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
+ armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
+ }
+#endif
+
+ /* current = 1: continue on current pc, otherwise continue at <address> */
+ resume_pc = buf_get_u32(
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).value,
+ 0, 32);
+ if (!current)
+ resume_pc = address;
+
+ /* Make sure that the Armv7 gdb thumb fixups does not
+ * kill the return address
+ */
+ switch (armv4_5->core_state)
+ {
+ case ARMV4_5_STATE_ARM:
+ resume_pc &= 0xFFFFFFFC;
+ break;
+ case ARMV4_5_STATE_THUMB:
+ case ARM_STATE_THUMB_EE:
+ /* When the return address is loaded into PC
+ * bit 0 must be 1 to stay in Thumb state
+ */
+ resume_pc |= 0x1;
+ break;
+ case ARMV4_5_STATE_JAZELLE:
+ LOG_ERROR("How do I resume into Jazelle state??");
+ return ERROR_FAIL;
+ }
+ LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
+ buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).value,
+ 0, 32, resume_pc);
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).dirty = 1;
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).valid = 1;
+
+ cortex_a8_restore_context(target);
+// arm7_9_restore_context(target); TODO Context is currently NOT Properly restored
+#if 0
+ /* the front-end may request us not to handle breakpoints */
+ if (handle_breakpoints)
+ {
+ /* Single step past breakpoint at current address */
+ if ((breakpoint = breakpoint_find(target, resume_pc)))
+ {
+ LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
+ cortex_m3_unset_breakpoint(target, breakpoint);
+ cortex_m3_single_step_core(target);
+ cortex_m3_set_breakpoint(target, breakpoint);
+ }
+ }
+
+#endif
+ /* Restart core and wait for it to be started */
+ mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
+
+ do {
+ mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, &dscr);
+ } while ((dscr & (1 << DSCR_CORE_RESTARTED)) == 0);
+
+ target->debug_reason = DBG_REASON_NOTHALTED;
+ target->state = TARGET_RUNNING;
+
+ /* registers are now invalid */
+ register_cache_invalidate(armv4_5->core_cache);
+
+ if (!debug_execution)
+ {
+ target->state = TARGET_RUNNING;
+ target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
+ LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
+ }
+ else
+ {
+ target->state = TARGET_DEBUG_RUNNING;
+ target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
+ LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
+ }
+
+ dap_ap_select(swjdp, saved_apsel);
+
+ return ERROR_OK;
+}
+
+static int cortex_a8_debug_entry(struct target *target)
+{
+ int i;
+ uint32_t regfile[16], pc, cpsr, dscr;
+ int retval = ERROR_OK;
+ struct working_area *regfile_working_area = NULL;
+ struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct armv4_5_common_s *armv4_5 = &armv7a->armv4_5_common;
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+ struct reg *reg;
+
+ LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
+
+ /* Enable the ITR execution once we are in debug mode */
+ mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, &dscr);
+ dscr |= (1 << DSCR_EXT_INT_EN);
+ retval = mem_ap_write_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, dscr);
+
+ /* Examine debug reason */
+ switch ((cortex_a8->cpudbg_dscr >> 2)&0xF)
+ {
+ case 0:
+ case 4:
+ target->debug_reason = DBG_REASON_DBGRQ;
+ break;
+ case 1:
+ case 3:
+ target->debug_reason = DBG_REASON_BREAKPOINT;
+ break;
+ case 10:
+ target->debug_reason = DBG_REASON_WATCHPOINT;
+ break;
+ default:
+ target->debug_reason = DBG_REASON_UNDEFINED;
+ break;
+ }
+
+ /* Examine target state and mode */
+ if (cortex_a8->fast_reg_read)
+ target_alloc_working_area(target, 64, ®file_working_area);
+
+ /* First load register acessible through core debug port*/
+ if (!regfile_working_area)
+ {
+ /* FIXME we don't actually need all these registers;
+ * reading them slows us down. Just R0, PC, CPSR...
+ */
+ for (i = 0; i <= 15; i++)
+ cortex_a8_dap_read_coreregister_u32(target,
+ ®file[i], i);
+ }
+ else
+ {
+ dap_ap_select(swjdp, swjdp_memoryap);
+ cortex_a8_read_regs_through_mem(target,
+ regfile_working_area->address, regfile);
+ dap_ap_select(swjdp, swjdp_memoryap);
+ target_free_working_area(target, regfile_working_area);
+ }
+
+ /* read Current PSR */
+ cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
+ pc = regfile[15];
+ dap_ap_select(swjdp, swjdp_debugap);
+ LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
+
+ armv4_5->core_mode = cpsr & 0x1F;
+
+ i = (cpsr >> 5) & 1; /* T */
+ i |= (cpsr >> 23) & 1; /* J << 1 */
+ switch (i) {
+ case 0: /* J = 0, T = 0 */
+ armv4_5->core_state = ARMV4_5_STATE_ARM;
+ break;
+ case 1: /* J = 0, T = 1 */
+ armv4_5->core_state = ARMV4_5_STATE_THUMB;
+ break;
+ case 2: /* J = 1, T = 0 */
+ LOG_WARNING("Jazelle state -- not handled");
+ armv4_5->core_state = ARMV4_5_STATE_JAZELLE;
+ break;
+ case 3: /* J = 1, T = 1 */
+ /* ThumbEE is very much like Thumb, but some of the
+ * instructions are different. Single stepping and
+ * breakpoints need updating...
+ */
+ LOG_WARNING("ThumbEE -- incomplete support");
+ armv4_5->core_state = ARM_STATE_THUMB_EE;
+ break;
+ }
+
+ /* update cache */
+ reg = armv4_5->core_cache->reg_list + ARMV4_5_CPSR;
+ buf_set_u32(reg->value, 0, 32, cpsr);
+ reg->valid = 1;
+ reg->dirty = 0;
+
+ for (i = 0; i <= ARM_PC; i++)
+ {
+ reg = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, i);
+
+ buf_set_u32(reg->value, 0, 32, regfile[i]);
+ reg->valid = 1;
+ reg->dirty = 0;
+ }
+
+ /* Fixup PC Resume Address */
+ if (cpsr & (1 << 5))
+ {
+ // T bit set for Thumb or ThumbEE state
+ regfile[ARM_PC] -= 4;
+ }
+ else
+ {
+ // ARM state
+ regfile[ARM_PC] -= 8;
+ }
+ buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, ARM_PC).value,
+ 0, 32, regfile[ARM_PC]);
+
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 0)
+ .dirty = ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 0).valid;
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 15)
+ .dirty = ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).valid;
+
+#if 0
+/* TODO, Move this */
+ uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
+ cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
+ LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
+
+ cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
+ LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
+
+ cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
+ LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
+#endif
+
+ /* Are we in an exception handler */
+// armv4_5->exception_number = 0;
+ if (armv7a->post_debug_entry)
+ armv7a->post_debug_entry(target);
+
+
+
+ return retval;
+
+}
+
+static void cortex_a8_post_debug_entry(struct target *target)
+{
+ struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
+ struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
+
+// cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
+ /* examine cp15 control reg */
+ armv7a->read_cp15(target, 0, 0, 1, 0, &cortex_a8->cp15_control_reg);
+ jtag_execute_queue();
+ LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
+
+ if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
+ {
+ uint32_t cache_type_reg;
+ /* identify caches */
+ armv7a->read_cp15(target, 0, 1, 0, 0, &cache_type_reg);
+ jtag_execute_queue();
+ /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
+ armv4_5_identify_cache(cache_type_reg,
+ &armv7a->armv4_5_mmu.armv4_5_cache);
+ }
+
+ armv7a->armv4_5_mmu.mmu_enabled =
+ (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
+ armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
+ (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
+ armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
+ (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
+
+
+}
+
+static int cortex_a8_step(struct target *target, int current, uint32_t address,
+ int handle_breakpoints)
+{
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct armv4_5_common_s *armv4_5 = &armv7a->armv4_5_common;
+ struct breakpoint *breakpoint = NULL;
+ struct breakpoint stepbreakpoint;
+
+ int timeout = 100;
+
+ if (target->state != TARGET_HALTED)
+ {
+ LOG_WARNING("target not halted");
+ return ERROR_TARGET_NOT_HALTED;
+ }
+
+ /* current = 1: continue on current pc, otherwise continue at <address> */
+ if (!current)
+ {
+ buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, ARM_PC).value,
+ 0, 32, address);
+ }
+ else
+ {
+ address = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, ARM_PC).value,
+ 0, 32);
+ }
+
+ /* The front-end may request us not to handle breakpoints.
+ * But since Cortex-A8 uses breakpoint for single step,
+ * we MUST handle breakpoints.
+ */
+ handle_breakpoints = 1;
+ if (handle_breakpoints) {
+ breakpoint = breakpoint_find(target,
+ buf_get_u32(ARMV4_5_CORE_REG_MODE(
+ armv4_5->core_cache,
+ armv4_5->core_mode, 15).value,
+ 0, 32));
+ if (breakpoint)
+ cortex_a8_unset_breakpoint(target, breakpoint);
+ }
+
+ /* Setup single step breakpoint */
+ stepbreakpoint.address = address;
+ stepbreakpoint.length = (armv4_5->core_state == ARMV4_5_STATE_THUMB)
+ ? 2 : 4;
+ stepbreakpoint.type = BKPT_HARD;
+ stepbreakpoint.set = 0;
+
+ /* Break on IVA mismatch */
+ cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
+
+ target->debug_reason = DBG_REASON_SINGLESTEP;
+
+ cortex_a8_resume(target, 1, address, 0, 0);
+
+ while (target->state != TARGET_HALTED)
+ {
+ cortex_a8_poll(target);
+ if (--timeout == 0)
+ {
+ LOG_WARNING("timeout waiting for target halt");
+ break;
+ }
+ }
+
+ cortex_a8_unset_breakpoint(target, &stepbreakpoint);
+ if (timeout > 0) target->debug_reason = DBG_REASON_BREAKPOINT;
+
+ if (breakpoint)
+ cortex_a8_set_breakpoint(target, breakpoint, 0);
+
+ if (target->state != TARGET_HALTED)
+ LOG_DEBUG("target stepped");
+
+ return ERROR_OK;
+}
+
+static int cortex_a8_restore_context(struct target *target)
+{
+ int i;
+ uint32_t value;
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct armv4_5_common_s *armv4_5 = &armv7a->armv4_5_common;
+
+ LOG_DEBUG(" ");
+
+ if (armv7a->pre_restore_context)
+ armv7a->pre_restore_context(target);
+
+ for (i = 15; i >= 0; i--)
+ {
+ if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, i).dirty)
+ {
+ value = buf_get_u32(ARMV4_5_CORE_REG_MODE(
+ armv4_5->core_cache,
+ armv4_5->core_mode, i).value,
+ 0, 32);
+ /* TODO Check return values */
+ cortex_a8_dap_write_coreregister_u32(target, value, i);
+ }
+ }
+
+ if (armv7a->post_restore_context)
+ armv7a->post_restore_context(target);
+
+ return ERROR_OK;
+}
+
+
+#if 0
+/*
+ * Cortex-A8 Core register functions
+ */
+static int cortex_a8_load_core_reg_u32(struct target *target, int num,
+ armv4_5_mode_t mode, uint32_t * value)
+{
+ int retval;
+ struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
+
+ if ((num <= ARM_CPSR))
+ {
+ /* read a normal core register */
+ retval = cortex_a8_dap_read_coreregister_u32(target, value, num);
+
+ if (retval != ERROR_OK)
+ {
+ LOG_ERROR("JTAG failure %i", retval);
+ return ERROR_JTAG_DEVICE_ERROR;
+ }
+ LOG_DEBUG("load from core reg %i value 0x%" PRIx32, num, *value);
+ }
+ else
+ {
+ return ERROR_INVALID_ARGUMENTS;
+ }
+
+ /* Register other than r0 - r14 uses r0 for access */
+ if (num > 14)
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 0).dirty =
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 0).valid;
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).dirty =
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).valid;
+
+ return ERROR_OK;
+}