From f31764dd5b895ea6935368632c963709fcafbeed Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 12 Feb 2019 11:48:43 -0500 Subject: [PATCH] cputlb: update TLB entry/index after tlb_fill We are failing to take into account that tlb_fill() can cause a TLB resize, which renders prior TLB entry pointers/indices stale. Fix it by re-doing the TLB entry lookups immediately after tlb_fill. Fixes: 86e1eff8bc ("tcg: introduce dynamic TLB sizing", 2019-01-28) Backports commit 6d967cb86d5b4a60ba15b497126b621ce9ca6609 from qemu --- qemu/accel/tcg/cputlb.c | 9 ++++++--- qemu/accel/tcg/softmmu_template.h | 8 ++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/qemu/accel/tcg/cputlb.c b/qemu/accel/tcg/cputlb.c index 876d4fb9..661cfe66 100644 --- a/qemu/accel/tcg/cputlb.c +++ b/qemu/accel/tcg/cputlb.c @@ -349,7 +349,8 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) tlb_fill(cpu, addr, 0, MMU_INST_FETCH, mmu_idx, 0); - index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); tlb_addr = env->tlb_table[mmu_idx][index].addr_code; if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { /* RAM access. We can't handle this, so for now just stop */ @@ -528,7 +529,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); - index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + index = tlb_index(env, mmu_idx, addr); tlb_addr = env->tlb_table[mmu_idx][index].addr_read; if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { /* RAM access */ @@ -584,7 +585,7 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); - index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + index = tlb_index(env, mmu_idx, addr); tlb_addr = env->tlb_table[mmu_idx][index].addr_write; if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { /* RAM access */ @@ -711,6 +712,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); } tlb_addr = tlb_addr_write(tlbe); } diff --git a/qemu/accel/tcg/softmmu_template.h b/qemu/accel/tcg/softmmu_template.h index b0272c00..f519c5e8 100644 --- a/qemu/accel/tcg/softmmu_template.h +++ b/qemu/accel/tcg/softmmu_template.h @@ -226,6 +226,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = entry->ADDR_READ; } @@ -412,6 +414,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = entry->ADDR_READ; } @@ -585,6 +589,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = tlb_addr_write(entry); } @@ -731,6 +737,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = tlb_addr_write(entry); }