+/*
+ * Cortex-A8 Basic debug access, very low level assumes state is saved
+ */
+static int cortex_a8_init_debug_access(struct target *target)
+{
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+
+ int retval;
+ uint32_t dummy;
+
+ LOG_DEBUG(" ");
+
+ /* Unlocking the debug registers for modification */
+ /* The debugport might be uninitialised so try twice */
+ retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
+ if (retval != ERROR_OK)
+ mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
+ /* Clear Sticky Power Down status Bit in PRSR to enable access to
+ the registers in the Core Power Domain */
+ retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
+ /* Enabling of instruction execution in debug mode is done in debug_entry code */
+
+ /* Resync breakpoint registers */
+
+ /* Since this is likley called from init or reset, update targtet state information*/
+ cortex_a8_poll(target);
+
+ return retval;
+}
+
+int cortex_a8_exec_opcode(struct target *target, uint32_t opcode)
+{
+ uint32_t dscr;
+ int retval;
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+
+ LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
+ do
+ {
+ retval = mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, &dscr);
+ if (retval != ERROR_OK)
+ {
+ LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
+ return retval;
+ }
+ }
+ while ((dscr & (1 << DSCR_INSTR_COMP)) == 0); /* Wait for InstrCompl bit to be set */
+
+ mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
+
+ do
+ {
+ retval = mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, &dscr);
+ if (retval != ERROR_OK)
+ {
+ LOG_ERROR("Could not read DSCR register");
+ return retval;
+ }
+ }
+ while ((dscr & (1 << DSCR_INSTR_COMP)) == 0); /* Wait for InstrCompl bit to be set */
+
+ return retval;
+}
+
+/**************************************************************************
+Read core register with very few exec_opcode, fast but needs work_area.
+This can cause problems with MMU active.
+**************************************************************************/
+static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
+ uint32_t * regfile)
+{
+ int retval = ERROR_OK;
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+
+ cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
+ cortex_a8_dap_write_coreregister_u32(target, address, 0);
+ cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0));
+ dap_ap_select(swjdp, swjdp_memoryap);
+ mem_ap_read_buf_u32(swjdp, (uint8_t *)(®file[1]), 4*15, address);
+ dap_ap_select(swjdp, swjdp_debugap);
+
+ return retval;
+}
+
+static int cortex_a8_read_cp(struct target *target, uint32_t *value, uint8_t CP,
+ uint8_t op1, uint8_t CRn, uint8_t CRm, uint8_t op2)
+{
+ int retval;
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+
+ cortex_a8_exec_opcode(target, ARMV4_5_MRC(CP, op1, 0, CRn, CRm, op2));
+ /* Move R0 to DTRTX */
+ cortex_a8_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
+
+ /* Read DCCTX */
+ retval = mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DTRTX, value);
+
+ return retval;
+}
+
+static int cortex_a8_write_cp(struct target *target, uint32_t value,
+ uint8_t CP, uint8_t op1, uint8_t CRn, uint8_t CRm, uint8_t op2)
+{
+ int retval;
+ uint32_t dscr;
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+
+ LOG_DEBUG("CP%i, CRn %i, value 0x%08" PRIx32, CP, CRn, value);
+
+ /* Check that DCCRX is not full */
+ retval = mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, &dscr);
+ if (dscr & (1 << DSCR_DTR_RX_FULL))
+ {
+ LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
+ /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
+ cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
+ }
+
+ retval = mem_ap_write_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DTRRX, value);
+ /* Move DTRRX to r0 */
+ cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
+
+ cortex_a8_exec_opcode(target, ARMV4_5_MCR(CP, op1, 0, CRn, CRm, op2));
+ return retval;
+}
+
+static int cortex_a8_read_cp15(struct target *target, uint32_t op1, uint32_t op2,
+ uint32_t CRn, uint32_t CRm, uint32_t *value)
+{
+ return cortex_a8_read_cp(target, value, 15, op1, CRn, CRm, op2);
+}
+
+static int cortex_a8_write_cp15(struct target *target, uint32_t op1, uint32_t op2,
+ uint32_t CRn, uint32_t CRm, uint32_t value)
+{
+ return cortex_a8_write_cp(target, value, 15, op1, CRn, CRm, op2);
+}
+
+static int cortex_a8_mrc(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm, uint32_t *value)
+{
+ if (cpnum!=15)
+ {
+ LOG_ERROR("Only cp15 is supported");
+ return ERROR_FAIL;
+ }
+ return cortex_a8_read_cp15(target, op1, op2, CRn, CRm, value);
+}
+
+static int cortex_a8_mcr(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm, uint32_t value)
+{
+ if (cpnum!=15)
+ {
+ LOG_ERROR("Only cp15 is supported");
+ return ERROR_FAIL;
+ }
+ return cortex_a8_write_cp15(target, op1, op2, CRn, CRm, value);
+}
+
+
+
+static int cortex_a8_dap_read_coreregister_u32(struct target *target,
+ uint32_t *value, int regnum)
+{
+ int retval = ERROR_OK;
+ uint8_t reg = regnum&0xFF;
+ uint32_t dscr;
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+
+ if (reg > 17)
+ return retval;
+
+ if (reg < 15)
+ {
+ /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
+ cortex_a8_exec_opcode(target, ARMV4_5_MCR(14, 0, reg, 0, 5, 0));
+ }
+ else if (reg == 15)
+ {
+ /* "MOV r0, r15"; then move r0 to DCCTX */
+ cortex_a8_exec_opcode(target, 0xE1A0000F);
+ cortex_a8_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
+ }
+ else
+ {
+ /* "MRS r0, CPSR" or "MRS r0, SPSR"
+ * then move r0 to DCCTX
+ */
+ cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1));
+ cortex_a8_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
+ }
+
+ /* Read DTRRTX */
+ do
+ {
+ retval = mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, &dscr);
+ }
+ while ((dscr & (1 << DSCR_DTR_TX_FULL)) == 0); /* Wait for DTRRXfull */
+
+ retval = mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DTRTX, value);
+ LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
+
+ return retval;
+}
+
+static int cortex_a8_dap_write_coreregister_u32(struct target *target,
+ uint32_t value, int regnum)
+{
+ int retval = ERROR_OK;
+ uint8_t Rd = regnum&0xFF;
+ uint32_t dscr;
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+
+ LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
+
+ /* Check that DCCRX is not full */
+ retval = mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, &dscr);
+ if (dscr & (1 << DSCR_DTR_RX_FULL))
+ {
+ LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
+ /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
+ cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
+ }
+
+ if (Rd > 17)
+ return retval;
+
+ /* Write to DCCRX */
+ LOG_DEBUG("write DCC 0x%08" PRIx32, value);
+ retval = mem_ap_write_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DTRRX, value);
+
+ if (Rd < 15)
+ {
+ /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
+ cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0));
+ }
+ else if (Rd == 15)
+ {
+ /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
+ * then "mov r15, r0"
+ */
+ cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
+ cortex_a8_exec_opcode(target, 0xE1A0F000);
+ }
+ else
+ {
+ /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
+ * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
+ */
+ cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
+ cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1));
+
+ /* "Prefetch flush" after modifying execution status in CPSR */
+ if (Rd == 16)
+ cortex_a8_exec_opcode(target,
+ ARMV4_5_MCR(15, 0, 0, 7, 5, 4));
+ }
+
+ return retval;
+}
+
+/* Write to memory mapped registers directly with no cache or mmu handling */
+static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
+{
+ int retval;
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+
+ retval = mem_ap_write_atomic_u32(swjdp, address, value);
+
+ return retval;
+}
+
+/*
+ * Cortex-A8 Run control
+ */
+
+static int cortex_a8_poll(struct target *target)
+{
+ int retval = ERROR_OK;
+ uint32_t dscr;
+ struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
+ struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+ enum target_state prev_target_state = target->state;
+ uint8_t saved_apsel = dap_ap_get_select(swjdp);
+
+ dap_ap_select(swjdp, swjdp_debugap);
+ retval = mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, &dscr);
+ if (retval != ERROR_OK)
+ {
+ dap_ap_select(swjdp, saved_apsel);
+ return retval;
+ }
+ cortex_a8->cpudbg_dscr = dscr;
+
+ if ((dscr & 0x3) == 0x3)
+ {
+ if (prev_target_state != TARGET_HALTED)
+ {
+ /* We have a halting debug event */
+ LOG_DEBUG("Target halted");
+ target->state = TARGET_HALTED;
+ if ((prev_target_state == TARGET_RUNNING)
+ || (prev_target_state == TARGET_RESET))
+ {
+ retval = cortex_a8_debug_entry(target);
+ if (retval != ERROR_OK)
+ return retval;
+
+ target_call_event_callbacks(target,
+ TARGET_EVENT_HALTED);
+ }
+ if (prev_target_state == TARGET_DEBUG_RUNNING)
+ {
+ LOG_DEBUG(" ");
+
+ retval = cortex_a8_debug_entry(target);
+ if (retval != ERROR_OK)
+ return retval;
+
+ target_call_event_callbacks(target,
+ TARGET_EVENT_DEBUG_HALTED);
+ }
+ }
+ }
+ else if ((dscr & 0x3) == 0x2)
+ {
+ target->state = TARGET_RUNNING;
+ }
+ else
+ {
+ LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
+ target->state = TARGET_UNKNOWN;
+ }
+
+ dap_ap_select(swjdp, saved_apsel);
+
+ return retval;
+}
+
+static int cortex_a8_halt(struct target *target)
+{
+ int retval = ERROR_OK;
+ uint32_t dscr;
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+ uint8_t saved_apsel = dap_ap_get_select(swjdp);
+ dap_ap_select(swjdp, swjdp_debugap);
+
+ /*
+ * Tell the core to be halted by writing DRCR with 0x1
+ * and then wait for the core to be halted.
+ */
+ retval = mem_ap_write_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DRCR, 0x1);
+
+ /*
+ * enter halting debug mode
+ */
+ mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
+ retval = mem_ap_write_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, dscr | (1 << DSCR_HALT_DBG_MODE));
+
+ if (retval != ERROR_OK)
+ goto out;
+
+ do {
+ mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, &dscr);
+ } while ((dscr & (1 << DSCR_CORE_HALTED)) == 0);
+
+ target->debug_reason = DBG_REASON_DBGRQ;
+
+out:
+ dap_ap_select(swjdp, saved_apsel);
+ return retval;
+}
+
+static int cortex_a8_resume(struct target *target, int current,
+ uint32_t address, int handle_breakpoints, int debug_execution)
+{
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct armv4_5_common_s *armv4_5 = &armv7a->armv4_5_common;
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+
+// struct breakpoint *breakpoint = NULL;
+ uint32_t resume_pc, dscr;
+
+ uint8_t saved_apsel = dap_ap_get_select(swjdp);
+ dap_ap_select(swjdp, swjdp_debugap);
+
+ if (!debug_execution)
+ {
+ target_free_all_working_areas(target);
+// cortex_m3_enable_breakpoints(target);
+// cortex_m3_enable_watchpoints(target);
+ }
+
+#if 0
+ if (debug_execution)
+ {
+ /* Disable interrupts */
+ /* We disable interrupts in the PRIMASK register instead of
+ * masking with C_MASKINTS,
+ * This is probably the same issue as Cortex-M3 Errata 377493:
+ * C_MASKINTS in parallel with disabled interrupts can cause
+ * local faults to not be taken. */
+ buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
+ armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
+ armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
+
+ /* Make sure we are in Thumb mode */
+ buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
+ buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
+ armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
+ armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
+ }
+#endif
+
+ /* current = 1: continue on current pc, otherwise continue at <address> */
+ resume_pc = buf_get_u32(
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).value,
+ 0, 32);
+ if (!current)
+ resume_pc = address;
+
+ /* Make sure that the Armv7 gdb thumb fixups does not
+ * kill the return address
+ */
+ switch (armv4_5->core_state)
+ {
+ case ARMV4_5_STATE_ARM:
+ resume_pc &= 0xFFFFFFFC;
+ break;
+ case ARMV4_5_STATE_THUMB:
+ case ARM_STATE_THUMB_EE:
+ /* When the return address is loaded into PC
+ * bit 0 must be 1 to stay in Thumb state
+ */
+ resume_pc |= 0x1;
+ break;
+ case ARMV4_5_STATE_JAZELLE:
+ LOG_ERROR("How do I resume into Jazelle state??");
+ return ERROR_FAIL;
+ }
+ LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
+ buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).value,
+ 0, 32, resume_pc);
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).dirty = 1;
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).valid = 1;
+
+ cortex_a8_restore_context(target);
+
+#if 0
+ /* the front-end may request us not to handle breakpoints */
+ if (handle_breakpoints)
+ {
+ /* Single step past breakpoint at current address */
+ if ((breakpoint = breakpoint_find(target, resume_pc)))
+ {
+ LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
+ cortex_m3_unset_breakpoint(target, breakpoint);
+ cortex_m3_single_step_core(target);
+ cortex_m3_set_breakpoint(target, breakpoint);
+ }
+ }
+
+#endif
+ /* Restart core and wait for it to be started */
+ mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
+
+ do {
+ mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, &dscr);
+ } while ((dscr & (1 << DSCR_CORE_RESTARTED)) == 0);
+
+ target->debug_reason = DBG_REASON_NOTHALTED;
+ target->state = TARGET_RUNNING;
+
+ /* registers are now invalid */
+ register_cache_invalidate(armv4_5->core_cache);
+
+ if (!debug_execution)
+ {
+ target->state = TARGET_RUNNING;
+ target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
+ LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
+ }
+ else
+ {
+ target->state = TARGET_DEBUG_RUNNING;
+ target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
+ LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
+ }
+
+ dap_ap_select(swjdp, saved_apsel);
+
+ return ERROR_OK;
+}
+
+static int cortex_a8_debug_entry(struct target *target)
+{
+ int i;
+ uint32_t regfile[16], pc, cpsr, dscr;
+ int retval = ERROR_OK;
+ struct working_area *regfile_working_area = NULL;
+ struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct armv4_5_common_s *armv4_5 = &armv7a->armv4_5_common;
+ struct swjdp_common *swjdp = &armv7a->swjdp_info;
+ struct reg *reg;
+
+ LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
+
+ /* Enable the ITR execution once we are in debug mode */
+ mem_ap_read_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, &dscr);
+ dscr |= (1 << DSCR_EXT_INT_EN);
+ retval = mem_ap_write_atomic_u32(swjdp,
+ armv7a->debug_base + CPUDBG_DSCR, dscr);
+
+ /* Examine debug reason */
+ switch ((cortex_a8->cpudbg_dscr >> 2)&0xF)
+ {
+ case 0:
+ case 4:
+ target->debug_reason = DBG_REASON_DBGRQ;
+ break;
+ case 1:
+ case 3:
+ target->debug_reason = DBG_REASON_BREAKPOINT;
+ break;
+ case 10:
+ target->debug_reason = DBG_REASON_WATCHPOINT;
+ break;
+ default:
+ target->debug_reason = DBG_REASON_UNDEFINED;
+ break;
+ }
+
+ /* Examine target state and mode */
+ if (cortex_a8->fast_reg_read)
+ target_alloc_working_area(target, 64, ®file_working_area);
+
+ /* First load register acessible through core debug port*/
+ if (!regfile_working_area)
+ {
+ /* FIXME we don't actually need all these registers;
+ * reading them slows us down. Just R0, PC, CPSR...
+ */
+ for (i = 0; i <= 15; i++)
+ cortex_a8_dap_read_coreregister_u32(target,
+ ®file[i], i);
+ }
+ else
+ {
+ dap_ap_select(swjdp, swjdp_memoryap);
+ cortex_a8_read_regs_through_mem(target,
+ regfile_working_area->address, regfile);
+ dap_ap_select(swjdp, swjdp_memoryap);
+ target_free_working_area(target, regfile_working_area);
+ }
+
+ /* read Current PSR */
+ cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
+ pc = regfile[15];
+ dap_ap_select(swjdp, swjdp_debugap);
+ LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
+
+ armv4_5->core_mode = cpsr & 0x1F;
+
+ i = (cpsr >> 5) & 1; /* T */
+ i |= (cpsr >> 23) & 1; /* J << 1 */
+ switch (i) {
+ case 0: /* J = 0, T = 0 */
+ armv4_5->core_state = ARMV4_5_STATE_ARM;
+ break;
+ case 1: /* J = 0, T = 1 */
+ armv4_5->core_state = ARMV4_5_STATE_THUMB;
+ break;
+ case 2: /* J = 1, T = 0 */
+ LOG_WARNING("Jazelle state -- not handled");
+ armv4_5->core_state = ARMV4_5_STATE_JAZELLE;
+ break;
+ case 3: /* J = 1, T = 1 */
+ /* ThumbEE is very much like Thumb, but some of the
+ * instructions are different. Single stepping and
+ * breakpoints need updating...
+ */
+ LOG_WARNING("ThumbEE -- incomplete support");
+ armv4_5->core_state = ARM_STATE_THUMB_EE;
+ break;
+ }
+
+ /* update cache */
+ reg = armv4_5->core_cache->reg_list + ARMV4_5_CPSR;
+ buf_set_u32(reg->value, 0, 32, cpsr);
+ reg->valid = 1;
+ reg->dirty = 0;
+
+ for (i = 0; i <= ARM_PC; i++)
+ {
+ reg = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, i);
+
+ buf_set_u32(reg->value, 0, 32, regfile[i]);
+ reg->valid = 1;
+ reg->dirty = 0;
+ }
+
+ /* Fixup PC Resume Address */
+ if (cpsr & (1 << 5))
+ {
+ // T bit set for Thumb or ThumbEE state
+ regfile[ARM_PC] -= 4;
+ }
+ else
+ {
+ // ARM state
+ regfile[ARM_PC] -= 8;
+ }
+ buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, ARM_PC).value,
+ 0, 32, regfile[ARM_PC]);
+
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 0)
+ .dirty = ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 0).valid;
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 15)
+ .dirty = ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).valid;
+
+#if 0
+/* TODO, Move this */
+ uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
+ cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
+ LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
+
+ cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
+ LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
+
+ cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
+ LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
+#endif
+
+ /* Are we in an exception handler */
+// armv4_5->exception_number = 0;
+ if (armv7a->post_debug_entry)
+ armv7a->post_debug_entry(target);
+
+
+
+ return retval;
+
+}
+
+static void cortex_a8_post_debug_entry(struct target *target)
+{
+ struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
+ struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
+
+// cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
+ /* examine cp15 control reg */
+ armv7a->read_cp15(target, 0, 0, 1, 0, &cortex_a8->cp15_control_reg);
+ jtag_execute_queue();
+ LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
+
+ if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
+ {
+ uint32_t cache_type_reg;
+ /* identify caches */
+ armv7a->read_cp15(target, 0, 1, 0, 0, &cache_type_reg);
+ jtag_execute_queue();
+ /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
+ armv4_5_identify_cache(cache_type_reg,
+ &armv7a->armv4_5_mmu.armv4_5_cache);
+ }
+
+ armv7a->armv4_5_mmu.mmu_enabled =
+ (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
+ armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
+ (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
+ armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
+ (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
+
+
+}
+
+static int cortex_a8_step(struct target *target, int current, uint32_t address,
+ int handle_breakpoints)
+{
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct armv4_5_common_s *armv4_5 = &armv7a->armv4_5_common;
+ struct breakpoint *breakpoint = NULL;
+ struct breakpoint stepbreakpoint;
+
+ int timeout = 100;
+
+ if (target->state != TARGET_HALTED)
+ {
+ LOG_WARNING("target not halted");
+ return ERROR_TARGET_NOT_HALTED;
+ }
+
+ /* current = 1: continue on current pc, otherwise continue at <address> */
+ if (!current)
+ {
+ buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, ARM_PC).value,
+ 0, 32, address);
+ }
+ else
+ {
+ address = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, ARM_PC).value,
+ 0, 32);
+ }
+
+ /* The front-end may request us not to handle breakpoints.
+ * But since Cortex-A8 uses breakpoint for single step,
+ * we MUST handle breakpoints.
+ */
+ handle_breakpoints = 1;
+ if (handle_breakpoints) {
+ breakpoint = breakpoint_find(target,
+ buf_get_u32(ARMV4_5_CORE_REG_MODE(
+ armv4_5->core_cache,
+ armv4_5->core_mode, 15).value,
+ 0, 32));
+ if (breakpoint)
+ cortex_a8_unset_breakpoint(target, breakpoint);
+ }
+
+ /* Setup single step breakpoint */
+ stepbreakpoint.address = address;
+ stepbreakpoint.length = (armv4_5->core_state == ARMV4_5_STATE_THUMB)
+ ? 2 : 4;
+ stepbreakpoint.type = BKPT_HARD;
+ stepbreakpoint.set = 0;
+
+ /* Break on IVA mismatch */
+ cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
+
+ target->debug_reason = DBG_REASON_SINGLESTEP;
+
+ cortex_a8_resume(target, 1, address, 0, 0);
+
+ while (target->state != TARGET_HALTED)
+ {
+ cortex_a8_poll(target);
+ if (--timeout == 0)
+ {
+ LOG_WARNING("timeout waiting for target halt");
+ break;
+ }
+ }
+
+ cortex_a8_unset_breakpoint(target, &stepbreakpoint);
+ if (timeout > 0) target->debug_reason = DBG_REASON_BREAKPOINT;
+
+ if (breakpoint)
+ cortex_a8_set_breakpoint(target, breakpoint, 0);
+
+ if (target->state != TARGET_HALTED)
+ LOG_DEBUG("target stepped");
+
+ return ERROR_OK;
+}
+
+static int cortex_a8_restore_context(struct target *target)
+{
+ uint32_t value;
+ struct armv7a_common *armv7a = target_to_armv7a(target);
+ struct reg_cache *cache = armv7a->armv4_5_common.core_cache;
+ unsigned max = cache->num_regs;
+ struct reg *r;
+ bool flushed, flush_cpsr = false;
+
+ LOG_DEBUG(" ");
+
+ if (armv7a->pre_restore_context)
+ armv7a->pre_restore_context(target);
+
+ /* Flush all dirty registers from the cache, one mode at a time so
+ * that we write CPSR as little as possible. Save CPSR and R0 for
+ * last; they're used to change modes and write other registers.
+ *
+ * REVISIT be smarter: save eventual mode for last loop, don't
+ * need to write CPSR an extra time.
+ */
+ do {
+ enum armv4_5_mode mode = ARMV4_5_MODE_ANY;
+ unsigned i;
+
+ flushed = false;
+
+ /* write dirty non-{R0,CPSR} registers sharing the same mode */
+ for (i = max - 1, r = cache->reg_list + 1; i > 0; i--, r++) {
+ struct armv4_5_core_reg *reg;
+
+ if (!r->dirty || i == ARMV4_5_CPSR)
+ continue;
+ reg = r->arch_info;
+
+ /* TODO Check return values */
+
+ /* Pick a mode and update CPSR; else ignore this
+ * register if it's for a different mode than what
+ * we're handling on this pass.
+ *
+ * REVISIT don't distinguish SYS and USR modes.
+ *
+ * FIXME if we restore from FIQ mode, R8..R12 will
+ * get wrongly flushed onto FIQ shadows...
+ */
+ if (mode == ARMV4_5_MODE_ANY) {
+ mode = reg->mode;
+ if (mode != ARMV4_5_MODE_ANY) {
+ cortex_a8_dap_write_coreregister_u32(
+ target, mode, 16);
+ flush_cpsr = true;
+ }
+ } else if (mode != reg->mode)
+ continue;
+
+ /* Write this register */
+ value = buf_get_u32(r->value, 0, 32);
+ cortex_a8_dap_write_coreregister_u32(target, value,
+ (reg->num == 16) ? 17 : reg->num);
+ r->dirty = false;
+ flushed = true;
+ }
+
+ } while (flushed);
+
+ /* now flush CPSR if needed ... */
+ r = cache->reg_list + ARMV4_5_CPSR;
+ if (flush_cpsr || r->dirty) {
+ value = buf_get_u32(r->value, 0, 32);
+ cortex_a8_dap_write_coreregister_u32(target, value, 16);
+ r->dirty = false;
+ }
+
+ /* ... and R0 always (it was dirtied when we saved context) */
+ r = cache->reg_list + 0;
+ value = buf_get_u32(r->value, 0, 32);
+ cortex_a8_dap_write_coreregister_u32(target, value, 0);
+ r->dirty = false;
+
+ if (armv7a->post_restore_context)
+ armv7a->post_restore_context(target);
+
+ return ERROR_OK;
+}
+
+
+#if 0
+/*
+ * Cortex-A8 Core register functions
+ */
+static int cortex_a8_load_core_reg_u32(struct target *target, int num,
+ armv4_5_mode_t mode, uint32_t * value)
+{
+ int retval;
+ struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
+
+ if ((num <= ARM_CPSR))
+ {
+ /* read a normal core register */
+ retval = cortex_a8_dap_read_coreregister_u32(target, value, num);
+
+ if (retval != ERROR_OK)
+ {
+ LOG_ERROR("JTAG failure %i", retval);
+ return ERROR_JTAG_DEVICE_ERROR;
+ }
+ LOG_DEBUG("load from core reg %i value 0x%" PRIx32, num, *value);
+ }
+ else
+ {
+ return ERROR_INVALID_ARGUMENTS;
+ }
+
+ /* Register other than r0 - r14 uses r0 for access */
+ if (num > 14)
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 0).dirty =
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 0).valid;
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).dirty =
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, 15).valid;
+
+ return ERROR_OK;
+}
+
+static int cortex_a8_store_core_reg_u32(struct target *target, int num,
+ armv4_5_mode_t mode, uint32_t value)
+{
+ int retval;
+// uint32_t reg;
+ struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
+
+#ifdef ARMV7_GDB_HACKS
+ /* If the LR register is being modified, make sure it will put us
+ * in "thumb" mode, or an INVSTATE exception will occur. This is a
+ * hack to deal with the fact that gdb will sometimes "forge"
+ * return addresses, and doesn't set the LSB correctly (i.e., when
+ * printing expressions containing function calls, it sets LR=0.) */
+
+ if (num == 14)
+ value |= 0x01;
+#endif
+
+ if ((num <= ARM_CPSR))
+ {
+ retval = cortex_a8_dap_write_coreregister_u32(target, value, num);
+ if (retval != ERROR_OK)
+ {
+ LOG_ERROR("JTAG failure %i", retval);
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, num).dirty =
+ ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
+ armv4_5->core_mode, num).valid;
+ return ERROR_JTAG_DEVICE_ERROR;
+ }
+ LOG_DEBUG("write core reg %i value 0x%" PRIx32, num, value);
+ }
+ else
+ {
+ return ERROR_INVALID_ARGUMENTS;
+ }
+
+ return ERROR_OK;
+}
+#endif