mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-11 19:45:29 +00:00
target-arm: Handle exception return from AArch64 to non-EL0 AArch32
Remove the assumptions that the AArch64 exception return code was making about a return to AArch32 always being a return to EL0. This includes pulling out the illegal-SPSR checks so we can apply them for return to 32 bit as well as return to 64-bit. Backports commit 3809951bf61605974b91578c582de4da28f8ed07 from qemu
This commit is contained in:
parent
134eeeeacc
commit
2ffb545ec3
|
@ -5210,7 +5210,7 @@ static void arm_cpu_do_interrupt_aarch64_(CPUState *cs)
|
||||||
/* Entry vector offset depends on whether the implemented EL
|
/* Entry vector offset depends on whether the implemented EL
|
||||||
* immediately lower than the target level is using AArch32 or AArch64
|
* immediately lower than the target level is using AArch32 or AArch64
|
||||||
*/
|
*/
|
||||||
bool is_aa64;
|
bool is_aa64 = false;
|
||||||
|
|
||||||
switch (new_el) {
|
switch (new_el) {
|
||||||
case 3:
|
case 3:
|
||||||
|
|
|
@ -641,12 +641,51 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int el_from_spsr(uint32_t spsr)
|
||||||
|
{
|
||||||
|
/* Return the exception level that this SPSR is requesting a return to,
|
||||||
|
* or -1 if it is invalid (an illegal return)
|
||||||
|
*/
|
||||||
|
if (spsr & PSTATE_nRW) {
|
||||||
|
switch (spsr & CPSR_M) {
|
||||||
|
case ARM_CPU_MODE_USR:
|
||||||
|
return 0;
|
||||||
|
case ARM_CPU_MODE_HYP:
|
||||||
|
return 2;
|
||||||
|
case ARM_CPU_MODE_FIQ:
|
||||||
|
case ARM_CPU_MODE_IRQ:
|
||||||
|
case ARM_CPU_MODE_SVC:
|
||||||
|
case ARM_CPU_MODE_ABT:
|
||||||
|
case ARM_CPU_MODE_UND:
|
||||||
|
case ARM_CPU_MODE_SYS:
|
||||||
|
return 1;
|
||||||
|
case ARM_CPU_MODE_MON:
|
||||||
|
/* Returning to Mon from AArch64 is never possible,
|
||||||
|
* so this is an illegal return.
|
||||||
|
*/
|
||||||
|
default:
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (extract32(spsr, 1, 1)) {
|
||||||
|
/* Return with reserved M[1] bit set */
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (extract32(spsr, 0, 4) == 1) {
|
||||||
|
/* return to EL0 with M[0] bit set */
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return extract32(spsr, 2, 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void HELPER(exception_return)(CPUARMState *env)
|
void HELPER(exception_return)(CPUARMState *env)
|
||||||
{
|
{
|
||||||
int cur_el = arm_current_el(env);
|
int cur_el = arm_current_el(env);
|
||||||
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
|
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
|
||||||
uint32_t spsr = env->banked_spsr[spsr_idx];
|
uint32_t spsr = env->banked_spsr[spsr_idx];
|
||||||
int new_el;
|
int new_el;
|
||||||
|
bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
|
||||||
|
|
||||||
aarch64_save_sp(env, cur_el);
|
aarch64_save_sp(env, cur_el);
|
||||||
|
|
||||||
|
@ -663,20 +702,10 @@ void HELPER(exception_return)(CPUARMState *env)
|
||||||
spsr &= ~PSTATE_SS;
|
spsr &= ~PSTATE_SS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (spsr & PSTATE_nRW) {
|
new_el = el_from_spsr(spsr);
|
||||||
/* TODO: We currently assume EL1/2/3 are running in AArch64. */
|
if (new_el == -1) {
|
||||||
env->aarch64 = 0;
|
goto illegal_return;
|
||||||
new_el = 0;
|
|
||||||
env->uncached_cpsr = 0x10;
|
|
||||||
cpsr_write(env, spsr, ~0);
|
|
||||||
if (!arm_singlestep_active(env)) {
|
|
||||||
env->uncached_cpsr &= ~PSTATE_SS;
|
|
||||||
}
|
}
|
||||||
aarch64_sync_64_to_32(env);
|
|
||||||
|
|
||||||
env->regs[15] = env->elr_el[1] & ~0x1;
|
|
||||||
} else {
|
|
||||||
new_el = extract32(spsr, 2, 2);
|
|
||||||
if (new_el > cur_el
|
if (new_el > cur_el
|
||||||
|| (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
|
|| (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
|
||||||
/* Disallow return to an EL which is unimplemented or higher
|
/* Disallow return to an EL which is unimplemented or higher
|
||||||
|
@ -684,14 +713,23 @@ void HELPER(exception_return)(CPUARMState *env)
|
||||||
*/
|
*/
|
||||||
goto illegal_return;
|
goto illegal_return;
|
||||||
}
|
}
|
||||||
if (extract32(spsr, 1, 1)) {
|
|
||||||
/* Return with reserved M[1] bit set */
|
if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
|
||||||
|
/* Return to an EL which is configured for a different register width */
|
||||||
goto illegal_return;
|
goto illegal_return;
|
||||||
}
|
}
|
||||||
if (new_el == 0 && (spsr & PSTATE_SP)) {
|
|
||||||
/* Return to EL0 with M[0] bit set */
|
if (!return_to_aa64) {
|
||||||
goto illegal_return;
|
env->aarch64 = 0;
|
||||||
|
env->uncached_cpsr = spsr & CPSR_M;
|
||||||
|
cpsr_write(env, spsr, ~0);
|
||||||
|
if (!arm_singlestep_active(env)) {
|
||||||
|
env->uncached_cpsr &= ~PSTATE_SS;
|
||||||
}
|
}
|
||||||
|
aarch64_sync_64_to_32(env);
|
||||||
|
|
||||||
|
env->regs[15] = env->elr_el[cur_el] & ~0x1;
|
||||||
|
} else {
|
||||||
env->aarch64 = 1;
|
env->aarch64 = 1;
|
||||||
pstate_write(env, spsr);
|
pstate_write(env, spsr);
|
||||||
if (!arm_singlestep_active(env)) {
|
if (!arm_singlestep_active(env)) {
|
||||||
|
|
Loading…
Reference in a new issue