mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-22 19:45:35 +00:00
target/riscv: Split the Hypervisor execute load helpers
Split the hypervisor execute load functions into two seperate functions. This avoids us having to pass the memop to the C helper functions. Backports 7687537ab0c16e0b1e69e7707456573a64b8e13b
This commit is contained in:
parent
4762dcda3c
commit
416b2a0077
|
@ -6261,7 +6261,8 @@ riscv_symbols = (
|
|||
'helper_fsub_s',
|
||||
'helper_hyp_gvma_tlb_flush',
|
||||
'helper_hyp_tlb_flush',
|
||||
'helper_hyp_x_load',
|
||||
'helper_hyp_hlvx_hu',
|
||||
'helper_hyp_hlvx_wu',
|
||||
'helper_mret',
|
||||
'helper_tlb_flush',
|
||||
'helper_set_rounding_mode',
|
||||
|
|
|
@ -3697,7 +3697,8 @@
|
|||
#define helper_fsub_s helper_fsub_s_riscv32
|
||||
#define helper_hyp_gvma_tlb_flush helper_hyp_gvma_tlb_flush_riscv32
|
||||
#define helper_hyp_tlb_flush helper_hyp_tlb_flush_riscv32
|
||||
#define helper_hyp_x_load helper_hyp_x_load_riscv32
|
||||
#define helper_hyp_hlvx_hu helper_hyp_hlvx_hu_riscv32
|
||||
#define helper_hyp_hlvx_wu helper_hyp_hlvx_wu_riscv32
|
||||
#define helper_mret helper_mret_riscv32
|
||||
#define helper_tlb_flush helper_tlb_flush_riscv32
|
||||
#define helper_set_rounding_mode helper_set_rounding_mode_riscv32
|
||||
|
|
|
@ -3697,7 +3697,8 @@
|
|||
#define helper_fsub_s helper_fsub_s_riscv64
|
||||
#define helper_hyp_gvma_tlb_flush helper_hyp_gvma_tlb_flush_riscv64
|
||||
#define helper_hyp_tlb_flush helper_hyp_tlb_flush_riscv64
|
||||
#define helper_hyp_x_load helper_hyp_x_load_riscv64
|
||||
#define helper_hyp_hlvx_hu helper_hyp_hlvx_hu_riscv64
|
||||
#define helper_hyp_hlvx_wu helper_hyp_hlvx_wu_riscv64
|
||||
#define helper_mret helper_mret_riscv64
|
||||
#define helper_tlb_flush helper_tlb_flush_riscv64
|
||||
#define helper_set_rounding_mode helper_set_rounding_mode_riscv64
|
||||
|
|
|
@ -84,7 +84,8 @@ DEF_HELPER_1(tlb_flush, void, env)
|
|||
#ifndef CONFIG_USER_ONLY
|
||||
DEF_HELPER_1(hyp_tlb_flush, void, env)
|
||||
DEF_HELPER_1(hyp_gvma_tlb_flush, void, env)
|
||||
DEF_HELPER_4(hyp_x_load, tl, env, tl, tl, tl)
|
||||
DEF_HELPER_2(hyp_hlvx_hu, tl, env, tl)
|
||||
DEF_HELPER_2(hyp_hlvx_wu, tl, env, tl)
|
||||
#endif
|
||||
|
||||
/* Vector functions */
|
||||
|
|
|
@ -279,20 +279,15 @@ static bool trans_hlvx_hu(DisasContext *ctx, arg_hlvx_hu *a)
|
|||
TCGContext *tcg_ctx = ctx->uc->tcg_ctx;
|
||||
TCGv t0 = tcg_temp_new(tcg_ctx);
|
||||
TCGv t1 = tcg_temp_new(tcg_ctx);
|
||||
TCGv mem_idx = tcg_temp_new(tcg_ctx);
|
||||
TCGv memop = tcg_temp_new(tcg_ctx);
|
||||
|
||||
check_access(ctx);
|
||||
|
||||
gen_get_gpr(ctx, t0, a->rs1);
|
||||
tcg_gen_movi_tl(tcg_ctx, mem_idx, ctx->mem_idx);
|
||||
tcg_gen_movi_tl(tcg_ctx, memop, MO_TEUW);
|
||||
|
||||
gen_helper_hyp_x_load(tcg_ctx, t1, tcg_ctx->cpu_env, t0, mem_idx, memop);
|
||||
gen_helper_hyp_hlvx_hu(tcg_ctx, t1, tcg_ctx->cpu_env, t0);
|
||||
gen_set_gpr(ctx, a->rd, t1);
|
||||
|
||||
tcg_temp_free(tcg_ctx, t0);
|
||||
tcg_temp_free(tcg_ctx, t1);
|
||||
tcg_temp_free(tcg_ctx, mem_idx);
|
||||
tcg_temp_free(tcg_ctx, memop);
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
|
@ -306,20 +301,15 @@ static bool trans_hlvx_wu(DisasContext *ctx, arg_hlvx_wu *a)
|
|||
TCGContext *tcg_ctx = ctx->uc->tcg_ctx;
|
||||
TCGv t0 = tcg_temp_new(tcg_ctx);
|
||||
TCGv t1 = tcg_temp_new(tcg_ctx);
|
||||
TCGv mem_idx = tcg_temp_new(tcg_ctx);
|
||||
TCGv memop = tcg_temp_new(tcg_ctx);
|
||||
|
||||
check_access(ctx);
|
||||
|
||||
gen_get_gpr(ctx, t0, a->rs1);
|
||||
tcg_gen_movi_tl(tcg_ctx, mem_idx, ctx->mem_idx);
|
||||
tcg_gen_movi_tl(tcg_ctx, memop, MO_TEUL);
|
||||
|
||||
gen_helper_hyp_x_load(tcg_ctx, t1, tcg_ctx->cpu_env, t0, mem_idx, memop);
|
||||
gen_helper_hyp_hlvx_wu(tcg_ctx, t1, tcg_ctx->cpu_env, t0);
|
||||
gen_set_gpr(ctx, a->rd, t1);
|
||||
|
||||
tcg_temp_free(tcg_ctx, t0);
|
||||
tcg_temp_free(tcg_ctx, t1);
|
||||
tcg_temp_free(tcg_ctx, mem_idx);
|
||||
tcg_temp_free(tcg_ctx, memop);
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
|
|
|
@ -227,36 +227,18 @@ void helper_hyp_gvma_tlb_flush(CPURISCVState *env)
|
|||
helper_hyp_tlb_flush(env);
|
||||
}
|
||||
|
||||
target_ulong helper_hyp_x_load(CPURISCVState *env, target_ulong address,
|
||||
target_ulong attrs, target_ulong memop)
|
||||
target_ulong helper_hyp_hlvx_hu(CPURISCVState *env, target_ulong address)
|
||||
{
|
||||
if (env->priv == PRV_M ||
|
||||
(env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
|
||||
(env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
|
||||
get_field(env->hstatus, HSTATUS_HU))) {
|
||||
target_ulong pte;
|
||||
int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
|
||||
int mmu_idx = cpu_mmu_index(env, true) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
|
||||
|
||||
switch (memop) {
|
||||
case MO_TEUW:
|
||||
pte = cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC());
|
||||
break;
|
||||
case MO_TEUL:
|
||||
pte = cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC());
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
return cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC());
|
||||
}
|
||||
|
||||
return pte;
|
||||
}
|
||||
target_ulong helper_hyp_hlvx_wu(CPURISCVState *env, target_ulong address)
|
||||
{
|
||||
int mmu_idx = cpu_mmu_index(env, true) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
|
||||
|
||||
if (riscv_cpu_virt_enabled(env)) {
|
||||
riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
|
||||
} else {
|
||||
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
|
||||
}
|
||||
return 0;
|
||||
return cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC());
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
|
Loading…
Reference in a new issue