cputlb: Move TLB_RECHECK handling into load/store_helper

Having this in io_readx/io_writex meant that we forgot to
re-compute index after tlb_fill. It also means we can use
the normal aligned memory load path. It also fixes a bug
in that we had cached a use of index across a tlb_fill.

Backports commit f1be36969de2fb9b6b64397db1098f115210fcd9 from qemu
This commit is contained in:
Richard Henderson 2019-05-14 07:26:01 -04:00 committed by Lioncash
parent ccee796272
commit 7991cd601f
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7

View file

@ -565,9 +565,8 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
} }
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, int mmu_idx, target_ulong addr, uintptr_t retaddr,
target_ulong addr, uintptr_t retaddr, MMUAccessType access_type, int size)
bool recheck, MMUAccessType access_type, int size)
{ {
CPUState *cpu = ENV_GET_CPU(env); CPUState *cpu = ENV_GET_CPU(env);
hwaddr mr_offset; hwaddr mr_offset;
@ -576,30 +575,6 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
uint64_t val; uint64_t val;
MemTxResult r; MemTxResult r;
if (recheck) {
/*
* This is a TLB_RECHECK access, where the MMU protection
* covers a smaller range than a target page, and we must
* repeat the MMU check here. This tlb_fill() call might
* longjump out if this access should cause a guest exception.
*/
CPUTLBEntry *entry;
target_ulong tlb_addr;
tlb_fill(cpu, addr, size, access_type, mmu_idx, retaddr);
entry = tlb_entry(env, mmu_idx, addr);
tlb_addr = (access_type == MMU_DATA_LOAD ?
entry->addr_read : entry->addr_code);
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
/* RAM access */
uintptr_t haddr = addr + entry->addend;
return ldn_p((void *)haddr, size);
}
/* Fall through for handling IO accesses */
}
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr; mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
@ -625,9 +600,8 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
} }
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, int mmu_idx, uint64_t val, target_ulong addr,
uint64_t val, target_ulong addr, uintptr_t retaddr, int size)
uintptr_t retaddr, bool recheck, int size)
{ {
CPUState *cpu = ENV_GET_CPU(env); CPUState *cpu = ENV_GET_CPU(env);
hwaddr mr_offset; hwaddr mr_offset;
@ -635,30 +609,6 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
MemoryRegion *mr; MemoryRegion *mr;
MemTxResult r; MemTxResult r;
if (recheck) {
/*
* This is a TLB_RECHECK access, where the MMU protection
* covers a smaller range than a target page, and we must
* repeat the MMU check here. This tlb_fill() call might
* longjump out if this access should cause a guest exception.
*/
CPUTLBEntry *entry;
target_ulong tlb_addr;
tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
entry = tlb_entry(env, mmu_idx, addr);
tlb_addr = tlb_addr_write(entry);
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
/* RAM access */
uintptr_t haddr = addr + entry->addend;
stn_p((void *)haddr, size, val);
return;
}
/* Fall through for handling IO accesses */
}
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr; mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
@ -863,6 +813,8 @@ static uint64_t load_helper(CPUArchState *env, target_ulong addr,
target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
const size_t tlb_off = code_read ? const size_t tlb_off = code_read ?
offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
const MMUAccessType access_type =
code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
unsigned a_bits = get_alignment_bits(get_memop(oi)); unsigned a_bits = get_alignment_bits(get_memop(oi));
void *haddr; void *haddr;
uint64_t res; uint64_t res;
@ -974,8 +926,7 @@ static uint64_t load_helper(CPUArchState *env, target_ulong addr,
/* Handle CPU specific unaligned behaviour */ /* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) { if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type,
code_read ? MMU_INST_FETCH : MMU_DATA_LOAD,
mmu_idx, retaddr); mmu_idx, retaddr);
} }
@ -984,8 +935,7 @@ static uint64_t load_helper(CPUArchState *env, target_ulong addr,
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
addr & TARGET_PAGE_MASK)) { addr & TARGET_PAGE_MASK)) {
tlb_fill(ENV_GET_CPU(env), addr, size, tlb_fill(ENV_GET_CPU(env), addr, size,
code_read ? MMU_INST_FETCH : MMU_DATA_LOAD, access_type, mmu_idx, retaddr);
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr); index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr);
} }
@ -994,17 +944,33 @@ static uint64_t load_helper(CPUArchState *env, target_ulong addr,
/* Handle an IO access. */ /* Handle an IO access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
uint64_t tmp;
if ((addr & (size - 1)) != 0) { if ((addr & (size - 1)) != 0) {
goto do_unaligned_access; goto do_unaligned_access;
} }
tmp = io_readx(env, iotlbentry, mmu_idx, addr, retaddr, if (tlb_addr & TLB_RECHECK) {
tlb_addr & TLB_RECHECK, /*
code_read ? MMU_INST_FETCH : MMU_DATA_LOAD, size); * This is a TLB_RECHECK access, where the MMU protection
return handle_bswap(tmp, size, big_endian); * covers a smaller range than a target page, and we must
* repeat the MMU check here. This tlb_fill() call might
* longjump out if this access should cause a guest exception.
*/
tlb_fill(ENV_GET_CPU(env), addr, size,
access_type, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
tlb_addr = code_read ? entry->addr_code : entry->addr_read;
tlb_addr &= ~TLB_RECHECK;
if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
/* RAM access */
goto do_aligned_access;
}
}
res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
retaddr, access_type, size);
return handle_bswap(res, size, big_endian);
} }
/* Handle slow unaligned access (it spans two pages or IO). */ /* Handle slow unaligned access (it spans two pages or IO). */
@ -1032,8 +998,8 @@ static uint64_t load_helper(CPUArchState *env, target_ulong addr,
goto finished; goto finished;
} }
do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend); haddr = (void *)((uintptr_t)addr + entry->addend);
switch (size) { switch (size) {
case 1: case 1:
res = ldub_p(haddr); res = ldub_p(haddr);
@ -1270,15 +1236,33 @@ static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* Handle an IO access. */ /* Handle an IO access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
if ((addr & (size - 1)) != 0) { if ((addr & (size - 1)) != 0) {
goto do_unaligned_access; goto do_unaligned_access;
} }
io_writex(env, iotlbentry, mmu_idx, if (tlb_addr & TLB_RECHECK) {
/*
* This is a TLB_RECHECK access, where the MMU protection
* covers a smaller range than a target page, and we must
* repeat the MMU check here. This tlb_fill() call might
* longjump out if this access should cause a guest exception.
*/
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
tlb_addr = tlb_addr_write(entry);
tlb_addr &= ~TLB_RECHECK;
if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
/* RAM access */
goto do_aligned_access;
}
}
io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
handle_bswap(val, size, big_endian), handle_bswap(val, size, big_endian),
addr, retaddr, tlb_addr & TLB_RECHECK, size); addr, retaddr, size);
return; return;
} }
@ -1326,8 +1310,8 @@ static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
return; return;
} }
do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend); haddr = (void *)((uintptr_t)addr + entry->addend);
switch (size) { switch (size) {
case 1: case 1:
stb_p(haddr, val); stb_p(haddr, val);