mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-23 17:55:27 +00:00
target-arm: Use correct mmu_idx for unprivileged loads and stores
The MMU index to use for unprivileged loads and stores is more complicated than we currently implement: * for A64, it should be "if at EL1, access as if EL0; otherwise access at current EL" * for A32/T32, it should be "if EL2, UNPREDICTABLE; otherwise access as if at EL0". In both cases, if we want to make the access for Secure EL0 this is not the same mmu_idx as for Non-Secure EL0. Backports commit 579d21cce63f3dd2f6ee49c0b02a14e92cb4a836 from qemu
This commit is contained in:
parent
3261ed5801
commit
da216e211f
|
@ -114,6 +114,23 @@ void a64_translate_init(struct uc_struct *uc)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline ARMMMUIdx get_a64_user_mem_index(DisasContext *s)
|
||||
{
|
||||
/* Return the mmu_idx to use for A64 "unprivileged load/store" insns:
|
||||
* if EL1, access as if EL0; otherwise access at current EL
|
||||
*/
|
||||
switch (s->mmu_idx) {
|
||||
case ARMMMUIdx_S12NSE1:
|
||||
return ARMMMUIdx_S12NSE0;
|
||||
case ARMMMUIdx_S1SE1:
|
||||
return ARMMMUIdx_S1SE0;
|
||||
case ARMMMUIdx_S2NS:
|
||||
g_assert_not_reached();
|
||||
default:
|
||||
return s->mmu_idx;
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
|
||||
fprintf_function cpu_fprintf, int flags)
|
||||
|
@ -2138,7 +2155,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
|
|||
}
|
||||
} else {
|
||||
TCGv_i64 tcg_rt = cpu_reg(s, rt);
|
||||
int memidx = is_unpriv ? MMU_USER_IDX : get_mem_index(s);
|
||||
int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
|
||||
|
||||
if (is_store) {
|
||||
do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx);
|
||||
|
|
|
@ -100,6 +100,28 @@ void arm_translate_init(struct uc_struct *uc)
|
|||
a64_translate_init(uc);
|
||||
}
|
||||
|
||||
static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
|
||||
{
|
||||
/* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
|
||||
* insns:
|
||||
* if PL2, UNPREDICTABLE (we choose to implement as if PL0)
|
||||
* otherwise, access as if at PL0.
|
||||
*/
|
||||
switch (s->mmu_idx) {
|
||||
case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
|
||||
case ARMMMUIdx_S12NSE0:
|
||||
case ARMMMUIdx_S12NSE1:
|
||||
return ARMMMUIdx_S12NSE0;
|
||||
case ARMMMUIdx_S1E3:
|
||||
case ARMMMUIdx_S1SE0:
|
||||
case ARMMMUIdx_S1SE1:
|
||||
return ARMMMUIdx_S1SE0;
|
||||
case ARMMMUIdx_S2NS:
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
static inline TCGv_i32 load_cpu_offset(struct uc_struct *uc, int offset)
|
||||
{
|
||||
TCGContext *tcg_ctx = uc->tcg_ctx;
|
||||
|
@ -8968,7 +8990,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) // qq
|
|||
tmp2 = load_reg(s, rn);
|
||||
if ((insn & 0x01200000) == 0x00200000) {
|
||||
/* ldrt/strt */
|
||||
i = MMU_USER_IDX;
|
||||
i = get_a32_user_mem_index(s);
|
||||
} else {
|
||||
i = get_mem_index(s);
|
||||
}
|
||||
|
@ -10442,7 +10464,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
|||
break;
|
||||
case 0xe: /* User privilege. */
|
||||
tcg_gen_addi_i32(tcg_ctx, addr, addr, imm);
|
||||
memidx = MMU_USER_IDX;
|
||||
memidx = get_a32_user_mem_index(s);
|
||||
break;
|
||||
case 0x9: /* Post-decrement. */
|
||||
imm = 0-imm;
|
||||
|
|
Loading…
Reference in a new issue