mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-23 06:25:28 +00:00
cputlb: update TLB entry/index after tlb_fill
We are failing to take into account that tlb_fill() can cause a TLB resize, which renders prior TLB entry pointers/indices stale. Fix it by re-doing the TLB entry lookups immediately after tlb_fill. Fixes: 86e1eff8bc ("tcg: introduce dynamic TLB sizing", 2019-01-28) Backports commit 6d967cb86d5b4a60ba15b497126b621ce9ca6609 from qemu
This commit is contained in:
parent
1b44fd94ac
commit
f31764dd5b
|
@ -349,7 +349,8 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
||||||
|
|
||||||
tlb_fill(cpu, addr, 0, MMU_INST_FETCH, mmu_idx, 0);
|
tlb_fill(cpu, addr, 0, MMU_INST_FETCH, mmu_idx, 0);
|
||||||
|
|
||||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
index = tlb_index(env, mmu_idx, addr);
|
||||||
|
entry = tlb_entry(env, mmu_idx, addr);
|
||||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_code;
|
tlb_addr = env->tlb_table[mmu_idx][index].addr_code;
|
||||||
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
|
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
|
||||||
/* RAM access. We can't handle this, so for now just stop */
|
/* RAM access. We can't handle this, so for now just stop */
|
||||||
|
@ -528,7 +529,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
|
|
||||||
tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
|
tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
|
||||||
|
|
||||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
index = tlb_index(env, mmu_idx, addr);
|
||||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
|
tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
|
||||||
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
|
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
|
||||||
/* RAM access */
|
/* RAM access */
|
||||||
|
@ -584,7 +585,7 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
|
|
||||||
tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
|
tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
|
||||||
|
|
||||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
index = tlb_index(env, mmu_idx, addr);
|
||||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||||
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
|
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
|
||||||
/* RAM access */
|
/* RAM access */
|
||||||
|
@ -711,6 +712,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
|
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
index = tlb_index(env, mmu_idx, addr);
|
||||||
|
tlbe = tlb_entry(env, mmu_idx, addr);
|
||||||
}
|
}
|
||||||
tlb_addr = tlb_addr_write(tlbe);
|
tlb_addr = tlb_addr_write(tlbe);
|
||||||
}
|
}
|
||||||
|
|
|
@ -226,6 +226,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
|
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
|
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
index = tlb_index(env, mmu_idx, addr);
|
||||||
|
entry = tlb_entry(env, mmu_idx, addr);
|
||||||
}
|
}
|
||||||
tlb_addr = entry->ADDR_READ;
|
tlb_addr = entry->ADDR_READ;
|
||||||
}
|
}
|
||||||
|
@ -412,6 +414,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
|
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
|
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
index = tlb_index(env, mmu_idx, addr);
|
||||||
|
entry = tlb_entry(env, mmu_idx, addr);
|
||||||
}
|
}
|
||||||
tlb_addr = entry->ADDR_READ;
|
tlb_addr = entry->ADDR_READ;
|
||||||
}
|
}
|
||||||
|
@ -585,6 +589,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
|
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
index = tlb_index(env, mmu_idx, addr);
|
||||||
|
entry = tlb_entry(env, mmu_idx, addr);
|
||||||
}
|
}
|
||||||
tlb_addr = tlb_addr_write(entry);
|
tlb_addr = tlb_addr_write(entry);
|
||||||
}
|
}
|
||||||
|
@ -731,6 +737,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
|
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
index = tlb_index(env, mmu_idx, addr);
|
||||||
|
entry = tlb_entry(env, mmu_idx, addr);
|
||||||
}
|
}
|
||||||
tlb_addr = tlb_addr_write(entry);
|
tlb_addr = tlb_addr_write(entry);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue