mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-03-23 05:25:11 +00:00
tlb: Add ifetch argument to cpu_mmu_index()
This is set to true when the index is for an instruction fetch translation. The core get_page_addr_code() sets it, as do the SOFTMMU_CODE_ACCESS acessors. All targets ignore it for now, and all other callers pass "false". This will allow targets who wish to split the mmu index between instruction and data accesses to do so. A subsequent patch will do just that for PowerPC. Backports commit 97ed5ccdee95f0b98bedc601ff979e368583472c from qemu
This commit is contained in:
parent
97ad660361
commit
1722be3e73
|
@ -289,7 +289,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
|
|||
CPUState *cpu = ENV_GET_CPU(env1);
|
||||
|
||||
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
mmu_idx = cpu_mmu_index(env1);
|
||||
mmu_idx = cpu_mmu_index(env1, true);
|
||||
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
|
||||
(addr & TARGET_PAGE_MASK))) {
|
||||
cpu_ldub_code(env1, addr);
|
||||
|
|
|
@ -409,7 +409,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
|||
#endif /* (NB_MMU_MODES > 12) */
|
||||
|
||||
/* these access are slower, they must be as rare as possible */
|
||||
#define CPU_MMU_INDEX (cpu_mmu_index(env))
|
||||
#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
|
||||
#define MEMSUFFIX _data
|
||||
#define DATA_SIZE 1
|
||||
#include "exec/cpu_ldst_template.h"
|
||||
|
@ -437,7 +437,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
|||
#define stl(p, v) stl_data(p, v)
|
||||
#define stq(p, v) stq_data(p, v)
|
||||
|
||||
#define CPU_MMU_INDEX (cpu_mmu_index(env))
|
||||
#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
|
||||
#define MEMSUFFIX _code
|
||||
#define SOFTMMU_CODE_ACCESS
|
||||
|
||||
|
|
|
@ -1702,7 +1702,7 @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
|
|||
}
|
||||
|
||||
/* Determine the current mmu_idx to use for normal loads/stores */
|
||||
static inline int cpu_mmu_index(CPUARMState *env)
|
||||
static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
|
||||
{
|
||||
int el = arm_current_el(env);
|
||||
|
||||
|
@ -1935,7 +1935,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
|||
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
|
||||
}
|
||||
|
||||
*flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
|
||||
*flags |= (cpu_mmu_index(env, false) << ARM_TBFLAG_MMUIDX_SHIFT);
|
||||
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
|
||||
* states defined in the ARM ARM for software singlestep:
|
||||
* SS_ACTIVE PSTATE.SS State
|
||||
|
|
|
@ -6276,7 +6276,7 @@ hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
|||
uint32_t fsr;
|
||||
MemTxAttrs attrs = {0};
|
||||
|
||||
ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env), &phys_addr,
|
||||
ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
|
||||
&attrs, &prot, &page_size, &fsr);
|
||||
|
||||
if (ret) {
|
||||
|
@ -6452,7 +6452,7 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
|
|||
void *hostaddr[maxidx];
|
||||
#endif
|
||||
int try, i;
|
||||
unsigned mmu_idx = cpu_mmu_index(env);
|
||||
unsigned mmu_idx = cpu_mmu_index(env, false);
|
||||
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
||||
|
||||
for (try = 0; try < 2; try++) {
|
||||
|
|
|
@ -1207,7 +1207,7 @@ CPUX86State *cpu_x86_init_user(struct uc_struct *uc, const char *cpu_model);
|
|||
#define MMU_KSMAP_IDX 0
|
||||
#define MMU_USER_IDX 1
|
||||
#define MMU_KNOSMAP_IDX 2
|
||||
static inline int cpu_mmu_index(CPUX86State *env)
|
||||
static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
|
||||
{
|
||||
return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
|
||||
(!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
|
||||
|
|
|
@ -8621,7 +8621,7 @@ static inline void gen_intermediate_code_internal(uint8_t *gen_opc_cc_op,
|
|||
/* select memory access functions */
|
||||
dc->mem_index = 0;
|
||||
if (flags & HF_SOFTMMU_MASK) {
|
||||
dc->mem_index = cpu_mmu_index(env);
|
||||
dc->mem_index = cpu_mmu_index(env, false);
|
||||
}
|
||||
dc->cpuid_features = env->features[FEAT_1_EDX];
|
||||
dc->cpuid_ext_features = env->features[FEAT_1_ECX];
|
||||
|
|
|
@ -234,7 +234,7 @@ static inline CPUM68KState *cpu_init(struct uc_struct *uc, const char *cpu_model
|
|||
#define MMU_MODE0_SUFFIX _kernel
|
||||
#define MMU_MODE1_SUFFIX _user
|
||||
#define MMU_USER_IDX 1
|
||||
static inline int cpu_mmu_index (CPUM68KState *env)
|
||||
static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
|
||||
{
|
||||
return (env->sr & SR_S) == 0 ? 1 : 0;
|
||||
}
|
||||
|
|
|
@ -640,7 +640,7 @@ extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env);
|
|||
#define MMU_MODE1_SUFFIX _super
|
||||
#define MMU_MODE2_SUFFIX _user
|
||||
#define MMU_USER_IDX 2
|
||||
static inline int cpu_mmu_index (CPUMIPSState *env)
|
||||
static inline int cpu_mmu_index (CPUMIPSState *env, bool ifetch)
|
||||
{
|
||||
return env->hflags & MIPS_HFLAG_KSU;
|
||||
}
|
||||
|
|
|
@ -3632,7 +3632,7 @@ FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status)
|
|||
#if !defined(CONFIG_USER_ONLY)
|
||||
#define MEMOP_IDX(DF) \
|
||||
TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
|
||||
cpu_mmu_index(env));
|
||||
cpu_mmu_index(env, false));
|
||||
#else
|
||||
#define MEMOP_IDX(DF)
|
||||
#endif
|
||||
|
@ -3677,7 +3677,7 @@ void helper_msa_st_ ## TYPE(CPUMIPSState *env, uint32_t wd, \
|
|||
target_ulong addr) \
|
||||
{ \
|
||||
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
|
||||
int mmu_idx = cpu_mmu_index(env); \
|
||||
int mmu_idx = cpu_mmu_index(env, false); \
|
||||
int i; \
|
||||
MEMOP_IDX(DF) \
|
||||
ensure_writable_pages(env, addr, mmu_idx, GETRA()); \
|
||||
|
|
|
@ -654,7 +654,7 @@ static inline int cpu_supervisor_mode(CPUSPARCState *env1)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline int cpu_mmu_index(CPUSPARCState *env1)
|
||||
static inline int cpu_mmu_index(CPUSPARCState *env1, bool ifetch)
|
||||
{
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
return MMU_USER_IDX;
|
||||
|
|
|
@ -849,7 +849,7 @@ hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
|||
SPARCCPU *cpu = SPARC_CPU(cs->uc, cs);
|
||||
CPUSPARCState *env = &cpu->env;
|
||||
hwaddr phys_addr;
|
||||
int mmu_idx = cpu_mmu_index(env);
|
||||
int mmu_idx = cpu_mmu_index(env, false);
|
||||
MemoryRegionSection section;
|
||||
|
||||
if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
|
||||
|
|
|
@ -5379,7 +5379,7 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
|
|||
last_pc = dc->pc;
|
||||
dc->npc = (target_ulong) tb->cs_base;
|
||||
dc->cc_op = CC_OP_DYNAMIC;
|
||||
dc->mem_idx = cpu_mmu_index(env);
|
||||
dc->mem_idx = cpu_mmu_index(env, false);
|
||||
dc->def = env->def;
|
||||
dc->fpu_enabled = tb_fpu_enabled(tb->flags);
|
||||
dc->address_mask_32bit = tb_am_enabled(tb->flags);
|
||||
|
|
Loading…
Reference in a new issue