accel/tcg: add size paremeter in tlb_fill()

The MC68040 MMU provides the size of the access that
triggers the page fault.

This size is set in the Special Status Word which
is written in the stack frame of the access fault
exception.

So we need the size in m68k_cpu_unassigned_access() and
m68k_cpu_handle_mmu_fault().

To be able to do that, this patch modifies the prototype of
handle_mmu_fault handler, tlb_fill() and probe_write().
do_unassigned_access() already includes a size parameter.

This patch also updates handle_mmu_fault handlers and
tlb_fill() of all targets (only parameter, no code change).

Backports commit 98670d47cd8d63a529ff230fd39ddaa186156f8c from qemu
This commit is contained in:
Laurent Vivier 2018-03-06 10:48:51 -05:00 committed by Lioncash
parent e039ae7a66
commit 0aecb15f3b
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
18 changed files with 49 additions and 44 deletions

View file

@ -553,7 +553,7 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
* Otherwise the function will return, and there will be a valid
* entry in the TLB for this access.
*/
void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
@ -563,7 +563,8 @@ void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
/* TLB entry is for a different page */
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
mmu_idx, retaddr);
}
}
}
@ -604,7 +605,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
mmu_idx, retaddr);
}
tlb_addr = tlbe->addr_write;
}
@ -624,7 +626,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
/* Let the guest notice RMW on a write-only page. */
if (unlikely(tlbe->addr_read != tlb_addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
mmu_idx, retaddr);
/* Since we don't support reads and writes to different addresses,
and we do have the proper page loaded for write, this shouldn't
ever return. But just in case, handle via stop-the-world. */

View file

@ -170,7 +170,7 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
int mmu_idx, target_ulong size);
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
uintptr_t retaddr);
#else
@ -308,8 +308,8 @@ void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
hwaddr index, MemTxAttrs attrs);
void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
void tlb_fill(CPUState *cpu, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
#endif
#if defined(CONFIG_USER_ONLY)

View file

@ -154,7 +154,7 @@ typedef struct CPUClass {
Error **errp);
void (*set_pc)(CPUState *cpu, vaddr value);
void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw,
int mmu_index);
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,

View file

@ -222,7 +222,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
@ -407,7 +407,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
@ -577,7 +577,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
@ -619,7 +620,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
&& !VICTIM_TLB_HIT(addr_write, page2)) {
tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@ -721,7 +722,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
@ -763,7 +765,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
&& !VICTIM_TLB_HIT(addr_write, page2)) {
tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
}

View file

@ -1456,8 +1456,8 @@ static const ARMCPUInfo arm_cpus[] = {
};
#ifdef CONFIG_USER_ONLY
static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
ARMCPU *cpu = ARM_CPU(NULL, cs);
CPUARMState *env = &cpu->env;

View file

@ -168,8 +168,8 @@ static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)
*/
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
bool ret;
ARMMMUFaultInfo fi = {0};

View file

@ -1435,7 +1435,7 @@ void host_cpuid(uint32_t function, uint32_t count,
void host_vendor_fms(char *vendor, int *family, int *model, int *stepping);
/* helper.c */
int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr,
int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr, int size,
int is_write, int mmu_idx);
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);

View file

@ -530,7 +530,7 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
#if defined(CONFIG_USER_ONLY)
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
int is_write, int mmu_idx)
{
X86CPU *cpu = X86_CPU(cs);
@ -554,7 +554,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
* 0 = nothing more to do
* 1 = generate PF fault
*/
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
int is_write1, int mmu_idx)
{
X86CPU *cpu = X86_CPU(cs->uc, cs);

View file

@ -199,12 +199,12 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v)
* from generated code or from helper.c)
*/
/* XXX: fix it to restore all registers */
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = x86_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = x86_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
X86CPU *cpu = X86_CPU(cs->uc, cs);
CPUX86State *env = &cpu->env;

View file

@ -420,7 +420,7 @@ static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
return (env->sr & SR_S) == 0 ? 1 : 0;
}
int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
#include "exec/cpu-all.h"

View file

@ -183,7 +183,7 @@ void m68k_switch_sp(CPUM68KState *env)
#if defined(CONFIG_USER_ONLY)
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
M68kCPU *cpu = M68K_CPU(cs);
@ -203,7 +203,7 @@ hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return addr;
}
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
int prot;

View file

@ -40,12 +40,12 @@ extern int semihosting_enabled;
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = m68k_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = m68k_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);

View file

@ -525,7 +525,7 @@ hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
#endif
int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
MIPSCPU *cpu = MIPS_CPU(cs->uc, cs);

View file

@ -201,7 +201,7 @@ void cpu_mips_start_count(CPUMIPSState *env);
void cpu_mips_stop_count(CPUMIPSState *env);
/* helper.c */
int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
/* op_helper.c */

View file

@ -2447,12 +2447,12 @@ void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
do_raise_exception_err(env, excp, error_code, retaddr);
}
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = mips_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = mips_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
MIPSCPU *cpu = MIPS_CPU(cs->uc, cs);
CPUMIPSState *env = &cpu->env;
@ -4170,10 +4170,10 @@ static inline void ensure_writable_pages(CPUMIPSState *env,
target_ulong page_addr;
if (unlikely(MSA_PAGESPAN(addr))) {
/* first page */
probe_write(env, addr, mmu_idx, retaddr);
probe_write(env, addr, 0, mmu_idx, retaddr);
/* second page */
page_addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
probe_write(env, page_addr, mmu_idx, retaddr);
probe_write(env, page_addr, 0, mmu_idx, retaddr);
}
#endif
}

View file

@ -574,7 +574,7 @@ SPARCCPU *cpu_sparc_init(struct uc_struct *uc, const char *cpu_model);
void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf);
/* mmu_helper.c */
int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev);
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env);

View file

@ -1934,12 +1934,12 @@ void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = sparc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = sparc_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
cpu_loop_exit_restore(cs, retaddr);
}

View file

@ -25,7 +25,7 @@
#if defined(CONFIG_USER_ONLY)
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
if (rw & 2) {
@ -198,7 +198,7 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
}
/* Perform address translation */
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
SPARCCPU *cpu = SPARC_CPU(cs->uc, cs);
@ -705,7 +705,7 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
}
/* Perform address translation */
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
SPARCCPU *cpu = SPARC_CPU(cs->uc, cs);