From 6543f9ea2641c8a7792db62bfbd6b0b7a336408a Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 3 Jul 2018 19:21:22 -0400 Subject: [PATCH] tcg: Define and use new tlb_hit() and tlb_hit_page() functions The condition to check whether an address has hit against a particular TLB entry is not completely trivial. We do this in various places, and in fact in one place (get_page_addr_code()) we have got the condition wrong. Abstract it out into new tlb_hit() and tlb_hit_page() inline functions (one for a known-page-aligned address and one for an arbitrary address), and use them in all the places where we had the condition correct. This is a no-behaviour-change patch; we leave fixing the buggy code in get_page_addr_code() to a subsequent patch Backports commit 334692bce7f0653a93b8d84ecde8c847b08dec38 from qemu --- qemu/accel/tcg/cputlb.c | 15 +++++---------- qemu/accel/tcg/softmmu_template.h | 16 ++++++---------- qemu/include/exec/cpu-all.h | 23 +++++++++++++++++++++++ qemu/include/exec/cpu_ldst.h | 3 +-- 4 files changed, 35 insertions(+), 22 deletions(-) diff --git a/qemu/accel/tcg/cputlb.c b/qemu/accel/tcg/cputlb.c index a02cc6e8..ac014929 100644 --- a/qemu/accel/tcg/cputlb.c +++ b/qemu/accel/tcg/cputlb.c @@ -423,12 +423,9 @@ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) { - if (addr == (tlb_entry->addr_read & - (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || - addr == (tlb_entry->addr_write & - (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || - addr == (tlb_entry->addr_code & - (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (tlb_hit_page(tlb_entry->addr_read, addr) || + tlb_hit_page(tlb_entry->addr_write, addr) || + tlb_hit_page(tlb_entry->addr_code, addr)) { memset(tlb_entry, -1, sizeof(*tlb_entry)); } } @@ -572,8 +569,7 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { /* TLB entry is for a different page */ if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, @@ -615,8 +611,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, } /* Check TLB entry and enforce page permissions. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, mmu_idx, retaddr); diff --git a/qemu/accel/tcg/softmmu_template.h b/qemu/accel/tcg/softmmu_template.h index 48fbfb9c..68a51bdf 100644 --- a/qemu/accel/tcg/softmmu_template.h +++ b/qemu/accel/tcg/softmmu_template.h @@ -219,8 +219,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, } /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE, mmu_idx, retaddr); @@ -404,8 +403,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, } /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE, mmu_idx, retaddr); @@ -574,8 +572,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, } /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); @@ -618,7 +615,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; - if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + if (!tlb_hit_page(tlb_addr2, page2) && !VICTIM_TLB_HIT(addr_write, page2)) { tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); @@ -719,8 +716,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, } /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); @@ -763,7 +759,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; - if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + if (!tlb_hit_page(tlb_addr2, page2) && !VICTIM_TLB_HIT(addr_write, page2)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); diff --git a/qemu/include/exec/cpu-all.h b/qemu/include/exec/cpu-all.h index 7b6b0dee..b16bde91 100644 --- a/qemu/include/exec/cpu-all.h +++ b/qemu/include/exec/cpu-all.h @@ -369,6 +369,29 @@ CPUArchState *cpu_copy(CPUArchState *env); */ #define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO) +/** + * tlb_hit_page: return true if page aligned @addr is a hit against the + * TLB entry @tlb_addr + * + * @addr: virtual address to test (must be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr) +{ + return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); +} + +/** + * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr + * + * @addr: virtual address to test (need not be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr) +{ + return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); +} + ram_addr_t last_ram_offset(struct uc_struct *uc); void qemu_mutex_lock_ramlist(struct uc_struct *uc); void qemu_mutex_unlock_ramlist(struct uc_struct *uc); diff --git a/qemu/include/exec/cpu_ldst.h b/qemu/include/exec/cpu_ldst.h index bd37a55a..b0c6b83d 100644 --- a/qemu/include/exec/cpu_ldst.h +++ b/qemu/include/exec/cpu_ldst.h @@ -480,8 +480,7 @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr, g_assert_not_reached(); } - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { /* TLB entry is for a different page */ return NULL; }