mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-11 04:05:37 +00:00
cputlb: Move VICTIM_TLB_HIT out of line
There are currently 22 invocations of this function, and we're about to increase that number. Backports commit 7e9a7c50d9a400ef51242d661a261123c2cc9485 from qemu
This commit is contained in:
parent
2893a1c381
commit
9e2422032a
|
@ -487,6 +487,35 @@ void tlb_unprotect_code(CPUState *cpu, ram_addr_t ram_addr)
|
|||
cpu_physical_memory_set_dirty_flag(cpu->uc, ram_addr, DIRTY_MEMORY_CODE);
|
||||
}
|
||||
|
||||
/* Return true if ADDR is present in the victim tlb, and has been copied
|
||||
back to the main tlb. */
|
||||
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
||||
size_t elt_ofs, target_ulong page)
|
||||
{
|
||||
size_t vidx;
|
||||
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
|
||||
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
|
||||
target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
|
||||
|
||||
if (cmp == page) {
|
||||
/* Found entry in victim tlb, swap tlb and iotlb. */
|
||||
CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
|
||||
CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
|
||||
CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
|
||||
|
||||
tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
|
||||
tmpio = *io; *io = *vio; *vio = tmpio;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Macro to call the above, with local variables from the use context. */
|
||||
#define VICTIM_TLB_HIT(TY) \
|
||||
victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
|
||||
addr & TARGET_PAGE_MASK)
|
||||
|
||||
#define MMUSUFFIX _mmu
|
||||
|
||||
#define SHIFT 0
|
||||
|
|
|
@ -119,42 +119,6 @@
|
|||
# define helper_te_st_name helper_le_st_name
|
||||
#endif
|
||||
|
||||
/* macro to check the victim tlb */
|
||||
#define VICTIM_TLB_HIT(ty) \
|
||||
/* we are about to do a page table walk. our last hope is the \
|
||||
* victim tlb. try to refill from the victim tlb before walking the \
|
||||
* page table. */ \
|
||||
int vidx; \
|
||||
CPUIOTLBEntry tmpiotlb; \
|
||||
CPUTLBEntry tmptlb; \
|
||||
for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \
|
||||
if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
|
||||
/* found entry in victim tlb, swap tlb and iotlb */ \
|
||||
tmptlb = env->tlb_table[mmu_idx][index]; \
|
||||
env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
|
||||
env->tlb_v_table[mmu_idx][vidx] = tmptlb; \
|
||||
tmpiotlb = env->iotlb[mmu_idx][index]; \
|
||||
env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \
|
||||
env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
/* return true when there is a vtlb hit, i.e. vidx >=0 */ \
|
||||
return (vidx >= 0)
|
||||
|
||||
#ifndef victim_tlb_hit_funcs
|
||||
#define victim_tlb_hit_funcs
|
||||
static inline bool victim_tlb_hit_read(CPUArchState *env, target_ulong addr, int mmu_idx, int index)
|
||||
{
|
||||
VICTIM_TLB_HIT(ADDR_READ);
|
||||
}
|
||||
|
||||
static inline bool victim_tlb_hit_write(CPUArchState *env, target_ulong addr, int mmu_idx, int index)
|
||||
{
|
||||
VICTIM_TLB_HIT(addr_write);
|
||||
}
|
||||
#endif // victim_tlb_hit_funcs
|
||||
|
||||
#ifndef SOFTMMU_CODE_ACCESS
|
||||
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
|
||||
CPUIOTLBEntry *iotlbentry,
|
||||
|
@ -297,7 +261,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
|||
/* If the TLB entry is for a different page, reload and try again. */
|
||||
if ((addr & TARGET_PAGE_MASK)
|
||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) {
|
||||
if (!VICTIM_TLB_HIT(ADDR_READ)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
|
@ -487,7 +451,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
|||
/* If the TLB entry is for a different page, reload and try again. */
|
||||
if ((addr & TARGET_PAGE_MASK)
|
||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) {
|
||||
if (!VICTIM_TLB_HIT(ADDR_READ)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
|
@ -674,7 +638,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
|||
/* If the TLB entry is for a different page, reload and try again. */
|
||||
if ((addr & TARGET_PAGE_MASK)
|
||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) {
|
||||
if (!VICTIM_TLB_HIT(addr_write)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
||||
}
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||
|
@ -810,7 +774,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
|||
/* If the TLB entry is for a different page, reload and try again. */
|
||||
if ((addr & TARGET_PAGE_MASK)
|
||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) {
|
||||
if (!VICTIM_TLB_HIT(addr_write)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
||||
}
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||
|
|
Loading…
Reference in a new issue