mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-02-25 14:56:52 +00:00
target-arm: add SCTLR_EL3 and make SCTLR banked
Implements SCTLR_EL3 and uses secure/non-secure instance when needed. Backports commit 137feaa9a1622620adf19c0b707883dd990738e2 from qemu
This commit is contained in:
parent
ca0608f68d
commit
9087027b85
|
@ -105,7 +105,7 @@ static void arm_cpu_reset(CPUState *s)
|
|||
#if defined(CONFIG_USER_ONLY)
|
||||
env->pstate = PSTATE_MODE_EL0t;
|
||||
/* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
|
||||
env->cp15.c1_sys |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
|
||||
env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
|
||||
/* and to the FP/Neon instructions */
|
||||
env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 2, 3);
|
||||
#else
|
||||
|
@ -173,7 +173,11 @@ static void arm_cpu_reset(CPUState *s)
|
|||
// Unicorn: force Thumb mode by setting of uc_open()
|
||||
env->thumb = env->uc->thumb;
|
||||
|
||||
if (env->cp15.c1_sys & SCTLR_V) {
|
||||
/* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
|
||||
* executing as AArch32 then check if highvecs are enabled and
|
||||
* adjust the PC accordingly.
|
||||
*/
|
||||
if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
|
||||
env->regs[15] = 0xFFFF0000;
|
||||
}
|
||||
|
||||
|
|
|
@ -180,7 +180,15 @@ typedef struct CPUARMState {
|
|||
struct {
|
||||
uint32_t c0_cpuid;
|
||||
uint64_t c0_cssel; /* Cache size selection. */
|
||||
uint64_t c1_sys; /* System control register. */
|
||||
union { /* System control register. */
|
||||
struct {
|
||||
uint64_t _unused_sctlr;
|
||||
uint64_t sctlr_ns;
|
||||
uint64_t hsctlr;
|
||||
uint64_t sctlr_s;
|
||||
};
|
||||
uint64_t sctlr_el[4];
|
||||
};
|
||||
uint64_t c1_coproc; /* Coprocessor access register. */
|
||||
uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
|
||||
uint64_t sder; /* Secure debug enable register. */
|
||||
|
|
|
@ -1625,7 +1625,7 @@ static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
|
||||
static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
|
||||
return CP_ACCESS_TRAP;
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
|
@ -1643,7 +1643,7 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
|
|||
/* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
|
||||
* SCTLR_EL1.UCI is set.
|
||||
*/
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) {
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
|
||||
return CP_ACCESS_TRAP;
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
|
@ -1714,7 +1714,7 @@ static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
|||
/* We don't implement EL2, so the only control on DC ZVA is the
|
||||
* bit in the SCTLR which can prohibit access for EL0.
|
||||
*/
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) {
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
|
||||
return CP_ACCESS_TRAP;
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
|
@ -1753,6 +1753,24 @@ static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
|
|||
update_spsel(env, val);
|
||||
}
|
||||
|
||||
static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
|
||||
if (raw_read(env, ri) == value) {
|
||||
/* Skip the TLB flush if nothing actually changed; Linux likes
|
||||
* to do a lot of pointless SCTLR writes.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
raw_write(env, ri, value);
|
||||
/* ??? Lots of these bits are not implemented. */
|
||||
/* This may enable/disable the MMU, so do a TLB flush. */
|
||||
tlb_flush(CPU(cpu), 1);
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo v8_cp_reginfo[] = {
|
||||
/* Minimal set of EL0-visible registers. This will need to be expanded
|
||||
* significantly for system emulation of AArch64 CPUs.
|
||||
|
@ -1973,6 +1991,9 @@ static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
|
|||
};
|
||||
|
||||
static const ARMCPRegInfo v8_el3_cp_reginfo[] = {
|
||||
{ "SCTLR_EL3", 0,1,0, 3,6,0, ARM_CP_STATE_AA64,0,
|
||||
PL3_RW, 0, NULL, 0, offsetof(CPUARMState, cp15.sctlr_el[3]), {0, 0},
|
||||
NULL, NULL, sctlr_write, NULL, raw_write, },
|
||||
{ "ELR_EL3", 0,4,0, 3,6,1, ARM_CP_STATE_AA64,
|
||||
ARM_CP_NO_MIGRATE, PL3_RW, 0, NULL, 0, offsetof(CPUARMState, elr_el[3]) },
|
||||
{ "ESR_EL3", 0,5,2, 3,6,0, ARM_CP_STATE_AA64,
|
||||
|
@ -2010,30 +2031,12 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
|
|||
REGINFO_SENTINEL
|
||||
};
|
||||
|
||||
static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
|
||||
if (raw_read(env, ri) == value) {
|
||||
/* Skip the TLB flush if nothing actually changed; Linux likes
|
||||
* to do a lot of pointless SCTLR writes.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
raw_write(env, ri, value);
|
||||
/* ??? Lots of these bits are not implemented. */
|
||||
/* This may enable/disable the MMU, so do a TLB flush. */
|
||||
tlb_flush(CPU(cpu), 1);
|
||||
}
|
||||
|
||||
static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
/* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
|
||||
* but the AArch32 CTR has its own reginfo struct)
|
||||
*/
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) {
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
|
||||
return CP_ACCESS_TRAP;
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
|
@ -2739,7 +2742,8 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
|||
{
|
||||
ARMCPRegInfo sctlr = {
|
||||
"SCTLR", 0,1,0, 3,0,0, ARM_CP_STATE_BOTH,
|
||||
0, PL1_RW, 0, NULL, cpu->reset_sctlr, offsetof(CPUARMState, cp15.c1_sys), {0, 0},
|
||||
0, PL1_RW, 0, NULL, cpu->reset_sctlr, 0,
|
||||
{offsetof(CPUARMState, cp15.sctlr_s), offsetof(CPUARMState, cp15.sctlr_ns)},
|
||||
NULL, NULL,sctlr_write, NULL,raw_write,
|
||||
};
|
||||
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
|
||||
|
@ -3786,7 +3790,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
|
|||
/* High vectors. */
|
||||
if (new_mode == ARM_CPU_MODE_MON) {
|
||||
addr += env->cp15.mvbar;
|
||||
} else if (env->cp15.c1_sys & SCTLR_V) {
|
||||
} else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
|
||||
/* High vectors. When enabled, base address cannot be remapped. */
|
||||
addr += 0xffff0000;
|
||||
} else {
|
||||
|
@ -3816,7 +3820,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
|
|||
/* this is a lie, as the was no c1_sys on V4T/V5, but who cares
|
||||
* and we should just guard the thumb mode on V4 */
|
||||
if (arm_feature(env, ARM_FEATURE_V4T)) {
|
||||
env->thumb = (env->cp15.c1_sys & SCTLR_TE) != 0;
|
||||
env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
|
||||
}
|
||||
env->regs[14] = env->regs[15] + offset;
|
||||
env->regs[15] = addr;
|
||||
|
@ -3847,7 +3851,7 @@ static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
|
|||
}
|
||||
if (access_type == 1)
|
||||
return 0;
|
||||
switch (env->cp15.c1_sys & (SCTLR_S | SCTLR_R)) {
|
||||
switch (A32_BANKED_CURRENT_REG_GET(env, sctlr) & (SCTLR_S | SCTLR_R)) {
|
||||
case SCTLR_S:
|
||||
return is_user ? 0 : PAGE_READ;
|
||||
case SCTLR_R:
|
||||
|
@ -4096,7 +4100,8 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
|
|||
goto do_fault;
|
||||
|
||||
/* The simplified model uses AP[0] as an access control bit. */
|
||||
if ((env->cp15.c1_sys & SCTLR_AFE) && (ap & 1) == 0) {
|
||||
if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_AFE)
|
||||
&& (ap & 1) == 0) {
|
||||
/* Access flag fault. */
|
||||
code = (code == 15) ? 6 : 3;
|
||||
goto do_fault;
|
||||
|
@ -4431,11 +4436,16 @@ static inline int get_phys_addr(CPUARMState *env, target_ulong address,
|
|||
hwaddr *phys_ptr, int *prot,
|
||||
target_ulong *page_size)
|
||||
{
|
||||
/* This is not entirely correct as get_phys_addr() can also be called
|
||||
* from ats_write() for an address translation of a specific regime.
|
||||
*/
|
||||
uint32_t sctlr = A32_BANKED_CURRENT_REG_GET(env, sctlr);
|
||||
|
||||
/* Fast Context Switch Extension. */
|
||||
if (address < 0x02000000)
|
||||
address += env->cp15.c13_fcse;
|
||||
|
||||
if ((env->cp15.c1_sys & SCTLR_M) == 0) {
|
||||
if ((sctlr & SCTLR_M) == 0) {
|
||||
/* MMU/MPU disabled. */
|
||||
*phys_ptr = address;
|
||||
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
|
@ -4448,7 +4458,7 @@ static inline int get_phys_addr(CPUARMState *env, target_ulong address,
|
|||
} else if (extended_addresses_enabled(env)) {
|
||||
return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr,
|
||||
prot, page_size);
|
||||
} else if (env->cp15.c1_sys & SCTLR_XP) {
|
||||
} else if (sctlr & SCTLR_XP) {
|
||||
return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
|
||||
prot, page_size);
|
||||
} else {
|
||||
|
|
|
@ -361,7 +361,7 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
|
|||
* Note that SPSel is never OK from EL0; we rely on handle_msr_i()
|
||||
* to catch that case at translate time.
|
||||
*/
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
|
||||
raise_exception(env, EXCP_UDEF);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue