mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-07-23 10:28:25 +00:00
target/m68k: Convert to CPUClass::tlb_fill
Backports commit fe5f7b1b3a2317f598687218c348b54e02a75e1f from qemu
This commit is contained in:
parent
fe9ac6e1c4
commit
52998fe46d
|
@ -251,7 +251,7 @@ static void m68k_cpu_class_init(struct uc_struct *uc, ObjectClass *c, void *data
|
||||||
cc->do_interrupt = m68k_cpu_do_interrupt;
|
cc->do_interrupt = m68k_cpu_do_interrupt;
|
||||||
cc->cpu_exec_interrupt = m68k_cpu_exec_interrupt;
|
cc->cpu_exec_interrupt = m68k_cpu_exec_interrupt;
|
||||||
cc->set_pc = m68k_cpu_set_pc;
|
cc->set_pc = m68k_cpu_set_pc;
|
||||||
cc->handle_mmu_fault = m68k_cpu_handle_mmu_fault;
|
cc->tlb_fill = m68k_cpu_tlb_fill;
|
||||||
#if defined(CONFIG_SOFTMMU)
|
#if defined(CONFIG_SOFTMMU)
|
||||||
cc->do_unassigned_access = m68k_cpu_unassigned_access;
|
cc->do_unassigned_access = m68k_cpu_unassigned_access;
|
||||||
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
|
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
|
||||||
|
|
|
@ -537,8 +537,9 @@ static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
|
||||||
return (env->sr & SR_S) == 0 ? 1 : 0;
|
return (env->sr & SR_S) == 0 ? 1 : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
|
bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
int mmu_idx);
|
MMUAccessType access_type, int mmu_idx,
|
||||||
|
bool probe, uintptr_t retaddr);
|
||||||
void m68k_cpu_unassigned_access(CPUState *cs, hwaddr addr,
|
void m68k_cpu_unassigned_access(CPUState *cs, hwaddr addr,
|
||||||
bool is_write, bool is_exec, int is_asi,
|
bool is_write, bool is_exec, int is_asi,
|
||||||
unsigned size);
|
unsigned size);
|
||||||
|
|
|
@ -214,20 +214,7 @@ void m68k_switch_sp(CPUM68KState *env)
|
||||||
env->current_sp = new_sp;
|
env->current_sp = new_sp;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
|
||||||
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
|
|
||||||
int mmu_idx)
|
|
||||||
{
|
|
||||||
M68kCPU *cpu = M68K_CPU(cs->uc, cs);
|
|
||||||
|
|
||||||
cs->exception_index = EXCP_ACCESS;
|
|
||||||
cpu->env.mmu.ar = address;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
/* MMU: 68040 only */
|
/* MMU: 68040 only */
|
||||||
|
|
||||||
static int check_TTR(uint32_t ttr, int *prot, target_ulong addr,
|
static int check_TTR(uint32_t ttr, int *prot, target_ulong addr,
|
||||||
|
@ -441,11 +428,36 @@ hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||||
return phys_addr;
|
return phys_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
|
/*
|
||||||
int mmu_idx)
|
* Notify CPU of a pending interrupt. Prioritization and vectoring should
|
||||||
|
* be handled by the interrupt controller. Real hardware only requests
|
||||||
|
* the vector when the interrupt is acknowledged by the CPU. For
|
||||||
|
* simplicity we calculate it when the interrupt is signalled.
|
||||||
|
*/
|
||||||
|
void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector)
|
||||||
|
{
|
||||||
|
CPUState *cs = CPU(cpu);
|
||||||
|
CPUM68KState *env = &cpu->env;
|
||||||
|
|
||||||
|
env->pending_level = level;
|
||||||
|
env->pending_vector = vector;
|
||||||
|
if (level) {
|
||||||
|
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||||
|
} else {
|
||||||
|
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
|
MMUAccessType qemu_access_type, int mmu_idx,
|
||||||
|
bool probe, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
M68kCPU *cpu = M68K_CPU(cs->uc, cs);
|
M68kCPU *cpu = M68K_CPU(cs->uc, cs);
|
||||||
CPUM68KState *env = &cpu->env;
|
CPUM68KState *env = &cpu->env;
|
||||||
|
|
||||||
|
#ifndef CONFIG_USER_ONLY
|
||||||
hwaddr physical;
|
hwaddr physical;
|
||||||
int prot;
|
int prot;
|
||||||
int access_type;
|
int access_type;
|
||||||
|
@ -458,32 +470,35 @@ int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
|
||||||
address & TARGET_PAGE_MASK,
|
address & TARGET_PAGE_MASK,
|
||||||
PAGE_READ | PAGE_WRITE | PAGE_EXEC,
|
PAGE_READ | PAGE_WRITE | PAGE_EXEC,
|
||||||
mmu_idx, TARGET_PAGE_SIZE);
|
mmu_idx, TARGET_PAGE_SIZE);
|
||||||
return 0;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rw == 2) {
|
if (qemu_access_type == MMU_INST_FETCH) {
|
||||||
access_type = ACCESS_CODE;
|
access_type = ACCESS_CODE;
|
||||||
rw = 0;
|
|
||||||
} else {
|
} else {
|
||||||
access_type = ACCESS_DATA;
|
access_type = ACCESS_DATA;
|
||||||
if (rw) {
|
if (qemu_access_type == MMU_DATA_STORE) {
|
||||||
access_type |= ACCESS_STORE;
|
access_type |= ACCESS_STORE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mmu_idx != MMU_USER_IDX) {
|
if (mmu_idx != MMU_USER_IDX) {
|
||||||
access_type |= ACCESS_SUPER;
|
access_type |= ACCESS_SUPER;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = get_physical_address(&cpu->env, &physical, &prot,
|
ret = get_physical_address(&cpu->env, &physical, &prot,
|
||||||
address, access_type, &page_size);
|
address, access_type, &page_size);
|
||||||
if (ret == 0) {
|
if (likely(ret == 0)) {
|
||||||
address &= TARGET_PAGE_MASK;
|
address &= TARGET_PAGE_MASK;
|
||||||
physical += address & (page_size - 1);
|
physical += address & (page_size - 1);
|
||||||
tlb_set_page(cs, address, physical,
|
tlb_set_page(cs, address, physical,
|
||||||
prot, mmu_idx, TARGET_PAGE_SIZE);
|
prot, mmu_idx, TARGET_PAGE_SIZE);
|
||||||
return 0;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (probe) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/* page fault */
|
/* page fault */
|
||||||
env->mmu.ssw = M68K_ATC_040;
|
env->mmu.ssw = M68K_ATC_040;
|
||||||
switch (size) {
|
switch (size) {
|
||||||
|
@ -508,29 +523,19 @@ int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
|
||||||
if (!(access_type & ACCESS_STORE)) {
|
if (!(access_type & ACCESS_STORE)) {
|
||||||
env->mmu.ssw |= M68K_RW_040;
|
env->mmu.ssw |= M68K_RW_040;
|
||||||
}
|
}
|
||||||
env->mmu.ar = address;
|
#endif
|
||||||
|
|
||||||
cs->exception_index = EXCP_ACCESS;
|
cs->exception_index = EXCP_ACCESS;
|
||||||
return 1;
|
env->mmu.ar = address;
|
||||||
|
cpu_loop_exit_restore(cs, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Notify CPU of a pending interrupt. Prioritization and vectoring should
|
#ifndef CONFIG_USER_ONLY
|
||||||
be handled by the interrupt controller. Real hardware only requests
|
void tlb_fill(CPUState *cs, target_ulong addr, int size,
|
||||||
the vector when the interrupt is acknowledged by the CPU. For
|
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
||||||
simplicitly we calculate it when the interrupt is signalled. */
|
|
||||||
void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector)
|
|
||||||
{
|
{
|
||||||
CPUState *cs = CPU(cpu);
|
m68k_cpu_tlb_fill(cs, addr, size, access_type, mmu_idx, false, retaddr);
|
||||||
CPUM68KState *env = &cpu->env;
|
|
||||||
|
|
||||||
env->pending_level = level;
|
|
||||||
env->pending_vector = vector;
|
|
||||||
if (level) {
|
|
||||||
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
|
|
||||||
} else {
|
|
||||||
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
uint32_t HELPER(bitrev)(uint32_t x)
|
uint32_t HELPER(bitrev)(uint32_t x)
|
||||||
|
|
|
@ -37,21 +37,6 @@ static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
|
||||||
|
|
||||||
extern int semihosting_enabled;
|
extern int semihosting_enabled;
|
||||||
|
|
||||||
/* Try to fill the TLB and return an exception if error. If retaddr is
|
|
||||||
NULL, it means that the function was called in C code (i.e. not
|
|
||||||
from generated code or from helper.c) */
|
|
||||||
void tlb_fill(CPUState *cs, target_ulong addr, int size,
|
|
||||||
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = m68k_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
|
|
||||||
if (unlikely(ret)) {
|
|
||||||
/* now we have a real cpu fault */
|
|
||||||
cpu_loop_exit_restore(cs, retaddr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cf_rte(CPUM68KState *env)
|
static void cf_rte(CPUM68KState *env)
|
||||||
{
|
{
|
||||||
uint32_t sp;
|
uint32_t sp;
|
||||||
|
|
Loading…
Reference in a new issue