Revert "cputlb: Filter flushes on already clean tlbs"

This reverts commit 5ab9723787.
This commit is contained in:
Lioncash 2019-06-30 19:21:20 -04:00
parent 576be63f06
commit 802c626145
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
3 changed files with 23 additions and 73 deletions

View file

@ -64,10 +64,6 @@
void tlb_init(CPUState *cpu) void tlb_init(CPUState *cpu)
{ {
CPUArchState *env = cpu->env_ptr;
/* Ensure that cpu_reset performs a full flush. */
env->tlb_c.dirty = ALL_MMUIDX_BITS;
} }
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
@ -82,20 +78,16 @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
uint16_t asked = data.host_int; unsigned long mmu_idx_bitmask = data.host_int;
uint16_t all_dirty, work, to_clean; int mmu_idx;
tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); tlb_debug("mmu_idx:0x%04lx\n", mmu_idx_bitmask);
all_dirty = env->tlb_c.dirty; for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
to_clean = asked & all_dirty; if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
all_dirty &= ~to_clean;
env->tlb_c.dirty = all_dirty;
for (work = to_clean; work != 0; work &= work - 1) {
int mmu_idx = ctz32(work);
tlb_flush_one_mmuidx_locked(env, mmu_idx); tlb_flush_one_mmuidx_locked(env, mmu_idx);
} }
}
cpu_tb_jmp_cache_clear(cpu); cpu_tb_jmp_cache_clear(cpu);
} }
@ -362,9 +354,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
target_ulong address; target_ulong address;
target_ulong code_address; target_ulong code_address;
uintptr_t addend; uintptr_t addend;
CPUTLBEntry *te, tn; CPUTLBEntry *te;
hwaddr iotlb, xlat, sz, paddr_page; hwaddr iotlb, xlat, sz, paddr_page;
target_ulong vaddr_page; target_ulong vaddr_page;
unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
int asidx = cpu_asidx_from_attrs(cpu, attrs); int asidx = cpu_asidx_from_attrs(cpu, attrs);
if (size <= TARGET_PAGE_SIZE) { if (size <= TARGET_PAGE_SIZE) {
@ -409,24 +402,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
index = tlb_index(env, mmu_idx, vaddr_page); index = tlb_index(env, mmu_idx, vaddr_page);
te = tlb_entry(env, mmu_idx, vaddr_page); te = tlb_entry(env, mmu_idx, vaddr_page);
/* Note that the tlb is no longer clean. */ /* do not discard the translation in te, evict it into a victim tlb */
env->tlb_c.dirty |= 1 << mmu_idx; env->tlb_v_table[mmu_idx][vidx] = *te;
/* Make sure there's no cached translation for the new page. */
tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
/*
* Only evict the old entry to the victim tlb if it's for a
* different page; otherwise just overwrite the stale data.
*/
if (!tlb_hit_page_anyprot(te, vaddr_page)) {
unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
/* Evict the old entry into the victim tlb. */
copy_tlb_helper_locked(tv, te);
env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
}
/* refill the tlb */ /* refill the tlb */
/* /*
@ -443,39 +421,31 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
*/ */
env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page; env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
env->iotlb[mmu_idx][index].attrs = attrs; env->iotlb[mmu_idx][index].attrs = attrs;
te->addend = addend - vaddr_page;
/* Now calculate the new entry */
tn.addend = addend - vaddr_page;
if (prot & PAGE_READ) { if (prot & PAGE_READ) {
tn.addr_read = address; te->addr_read = address;
} else { } else {
tn.addr_read = -1; te->addr_read = -1;
} }
if (prot & PAGE_EXEC) { if (prot & PAGE_EXEC) {
tn.addr_code = code_address; te->addr_code = code_address;
} else { } else {
tn.addr_code = -1; te->addr_code = -1;
} }
tn.addr_write = -1;
if (prot & PAGE_WRITE) { if (prot & PAGE_WRITE) {
if ((memory_region_is_ram(section->mr) && section->readonly) if ((memory_region_is_ram(section->mr) && section->readonly)
|| memory_region_is_romd(section->mr)) { || memory_region_is_romd(section->mr)) {
/* Write access calls the I/O callback. */ /* Write access calls the I/O callback. */
tn.addr_write = address | TLB_MMIO; te->addr_write = address | TLB_MMIO;
} else if (memory_region_is_ram(section->mr)) { } else if (memory_region_is_ram(section->mr)) {
tn.addr_write = address | TLB_NOTDIRTY; te->addr_write = address | TLB_NOTDIRTY;
} else { } else {
tn.addr_write = address; te->addr_write = address;
} }
} else {
if (prot & PAGE_WRITE_INV) { te->addr_write = -1;
tn.addr_write |= TLB_INVALID_MASK;
} }
}
copy_tlb_helper_locked(te, &tn);
} }
/* Add a new TLB entry, but without specifying the memory /* Add a new TLB entry, but without specifying the memory

View file

@ -249,9 +249,6 @@ void address_space_stq_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t va
/* original state of the write flag (used when tracking self-modifying /* original state of the write flag (used when tracking self-modifying
code */ code */
#define PAGE_WRITE_ORG 0x0010 #define PAGE_WRITE_ORG 0x0010
/* Invalidate the TLB entry immediately, helpful for s390x
* Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */
#define PAGE_WRITE_INV 0x0040
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
/* FIXME: Code that sets/uses this is broken and needs to go away. */ /* FIXME: Code that sets/uses this is broken and needs to go away. */
#define PAGE_RESERVED 0x0020 #define PAGE_RESERVED 0x0020

View file

@ -163,18 +163,6 @@ typedef struct CPUIOTLBEntry {
MemTxAttrs attrs; MemTxAttrs attrs;
} CPUIOTLBEntry; } CPUIOTLBEntry;
/*
* Data elements that are shared between all MMU modes.
*/
typedef struct CPUTLBCommon {
/*
* Within dirty, for each bit N, modifications have been made to
* mmu_idx N since the last time that mmu_idx was flushed.
* Protected by tlb_c.lock.
*/
uint16_t dirty;
} CPUTLBCommon;
typedef struct CPUTLBDesc { typedef struct CPUTLBDesc {
/* /*
* Describe a region covering all of the large pages allocated * Describe a region covering all of the large pages allocated
@ -188,13 +176,8 @@ typedef struct CPUTLBDesc {
size_t vindex; size_t vindex;
} CPUTLBDesc; } CPUTLBDesc;
/*
* The meaning of each of the MMU modes is defined in the target code.
* Note that NB_MMU_MODES is not yet defined; we can only reference it
* within preprocessor defines that will be expanded later.
*/
#define CPU_COMMON_TLB \ #define CPU_COMMON_TLB \
CPUTLBCommon tlb_c; \ /* The meaning of the MMU modes is defined in the target code. */ \
CPUTLBDesc tlb_d[NB_MMU_MODES]; \ CPUTLBDesc tlb_d[NB_MMU_MODES]; \
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \ CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \