mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-07-09 21:47:33 +00:00
tcg: Remove needless CPUState::current_tb
This field was used for telling cpu_interrupt() to unlink a chain of TBs being executed when it worked that way. Now, cpu_interrupt() don't do this anymore. So we don't need this field anymore. Backports commit 3213525f8ab48742db09dab18cb9ae6f36a6c921 from qemu
This commit is contained in:
parent
73c75b4cf7
commit
1a768018c2
|
@ -38,7 +38,6 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc)
|
||||||
|
|
||||||
void cpu_loop_exit(CPUState *cpu)
|
void cpu_loop_exit(CPUState *cpu)
|
||||||
{
|
{
|
||||||
cpu->current_tb = NULL;
|
|
||||||
siglongjmp(cpu->jmp_env, 1);
|
siglongjmp(cpu->jmp_env, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,6 +46,5 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
|
||||||
if (pc) {
|
if (pc) {
|
||||||
cpu_restore_state(cpu, pc);
|
cpu_restore_state(cpu, pc);
|
||||||
}
|
}
|
||||||
cpu->current_tb = NULL;
|
|
||||||
siglongjmp(cpu->jmp_env, 1);
|
siglongjmp(cpu->jmp_env, 1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -192,10 +192,8 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
|
||||||
}
|
}
|
||||||
if (likely(!cpu->exit_request)) {
|
if (likely(!cpu->exit_request)) {
|
||||||
uintptr_t ret;
|
uintptr_t ret;
|
||||||
cpu->current_tb = tb;
|
|
||||||
/* execute the generated code */
|
/* execute the generated code */
|
||||||
ret = cpu_tb_exec(cpu, tb);
|
ret = cpu_tb_exec(cpu, tb);
|
||||||
cpu->current_tb = NULL;
|
|
||||||
last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
|
last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
|
||||||
tb_exit = ret & TB_EXIT_MASK;
|
tb_exit = ret & TB_EXIT_MASK;
|
||||||
switch (tb_exit) {
|
switch (tb_exit) {
|
||||||
|
|
|
@ -87,10 +87,6 @@ void tlb_flush(CPUState *cpu, int flush_global)
|
||||||
|
|
||||||
tlb_debug("(%d)\n", flush_global);
|
tlb_debug("(%d)\n", flush_global);
|
||||||
|
|
||||||
/* must reset current TB so that interrupts cannot modify the
|
|
||||||
links while we are modifying them */
|
|
||||||
cpu->current_tb = NULL;
|
|
||||||
|
|
||||||
memset(env->tlb_table, -1, sizeof(env->tlb_table));
|
memset(env->tlb_table, -1, sizeof(env->tlb_table));
|
||||||
memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
|
memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
|
||||||
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
|
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
|
||||||
|
@ -118,9 +114,6 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
|
||||||
tlb_flush(cpu, 1);
|
tlb_flush(cpu, 1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/* must reset current TB so that interrupts cannot modify the
|
|
||||||
links while we are modifying them */
|
|
||||||
cpu->current_tb = NULL;
|
|
||||||
|
|
||||||
addr &= TARGET_PAGE_MASK;
|
addr &= TARGET_PAGE_MASK;
|
||||||
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
|
@ -391,9 +384,6 @@ static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
|
||||||
CPUArchState *env = cpu->env_ptr;
|
CPUArchState *env = cpu->env_ptr;
|
||||||
|
|
||||||
tlb_debug("start\n");
|
tlb_debug("start\n");
|
||||||
/* must reset current TB so that interrupts cannot modify the
|
|
||||||
links while we are modifying them */
|
|
||||||
cpu->current_tb = NULL;
|
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
int mmu_idx = va_arg(argp, int);
|
int mmu_idx = va_arg(argp, int);
|
||||||
|
@ -451,9 +441,6 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
|
||||||
va_end(argp);
|
va_end(argp);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/* must reset current TB so that interrupts cannot modify the
|
|
||||||
links while we are modifying them */
|
|
||||||
cpu->current_tb = NULL;
|
|
||||||
|
|
||||||
addr &= TARGET_PAGE_MASK;
|
addr &= TARGET_PAGE_MASK;
|
||||||
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
|
|
|
@ -225,7 +225,6 @@ struct CPUAddressSpace {
|
||||||
* @as: Pointer to the first AddressSpace, for the convenience of targets which
|
* @as: Pointer to the first AddressSpace, for the convenience of targets which
|
||||||
* only have a single AddressSpace
|
* only have a single AddressSpace
|
||||||
* @env_ptr: Pointer to subclass-specific CPUArchState field.
|
* @env_ptr: Pointer to subclass-specific CPUArchState field.
|
||||||
* @current_tb: Currently executing TB.
|
|
||||||
* @next_cpu: Next CPU sharing TB cache.
|
* @next_cpu: Next CPU sharing TB cache.
|
||||||
* @opaque: User data.
|
* @opaque: User data.
|
||||||
* @mem_io_pc: Host Program Counter at which the memory was accessed.
|
* @mem_io_pc: Host Program Counter at which the memory was accessed.
|
||||||
|
@ -269,7 +268,6 @@ struct CPUState {
|
||||||
MemoryRegion *memory;
|
MemoryRegion *memory;
|
||||||
|
|
||||||
void *env_ptr; /* CPUArchState */
|
void *env_ptr; /* CPUArchState */
|
||||||
struct TranslationBlock *current_tb;
|
|
||||||
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
|
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
|
||||||
QTAILQ_ENTRY(CPUState) node;
|
QTAILQ_ENTRY(CPUState) node;
|
||||||
|
|
||||||
|
|
|
@ -162,7 +162,6 @@ static void cpu_common_reset(CPUState *cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu->interrupt_request = 0;
|
cpu->interrupt_request = 0;
|
||||||
cpu->current_tb = NULL;
|
|
||||||
cpu->halted = 0;
|
cpu->halted = 0;
|
||||||
cpu->mem_io_pc = 0;
|
cpu->mem_io_pc = 0;
|
||||||
cpu->mem_io_vaddr = 0;
|
cpu->mem_io_vaddr = 0;
|
||||||
|
|
|
@ -297,7 +297,6 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
|
||||||
cpu_restore_state_from_tb(cpu, tb, retaddr);
|
cpu_restore_state_from_tb(cpu, tb, retaddr);
|
||||||
if (tb->cflags & CF_NOCACHE) {
|
if (tb->cflags & CF_NOCACHE) {
|
||||||
/* one-shot translation, invalidate it immediately */
|
/* one-shot translation, invalidate it immediately */
|
||||||
cpu->current_tb = NULL;
|
|
||||||
tb_phys_invalidate(cpu->uc, tb, -1);
|
tb_phys_invalidate(cpu->uc, tb, -1);
|
||||||
tb_free(cpu->uc, tb);
|
tb_free(cpu->uc, tb);
|
||||||
}
|
}
|
||||||
|
@ -1417,7 +1416,7 @@ void tb_invalidate_phys_range(struct uc_struct *uc, tb_page_addr_t start, tb_pag
|
||||||
void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end,
|
void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end,
|
||||||
int is_cpu_write_access)
|
int is_cpu_write_access)
|
||||||
{
|
{
|
||||||
TranslationBlock *tb, *tb_next, *saved_tb;
|
TranslationBlock *tb, *tb_next;
|
||||||
CPUState *cpu = uc->current_cpu;
|
CPUState *cpu = uc->current_cpu;
|
||||||
#if defined(TARGET_HAS_PRECISE_SMC)
|
#if defined(TARGET_HAS_PRECISE_SMC)
|
||||||
CPUArchState *env = NULL;
|
CPUArchState *env = NULL;
|
||||||
|
@ -1487,20 +1486,7 @@ void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, t
|
||||||
¤t_flags);
|
¤t_flags);
|
||||||
}
|
}
|
||||||
#endif /* TARGET_HAS_PRECISE_SMC */
|
#endif /* TARGET_HAS_PRECISE_SMC */
|
||||||
/* we need to do that to handle the case where a signal
|
|
||||||
occurs while doing tb_phys_invalidate() */
|
|
||||||
saved_tb = NULL;
|
|
||||||
if (cpu != NULL) {
|
|
||||||
saved_tb = cpu->current_tb;
|
|
||||||
cpu->current_tb = NULL;
|
|
||||||
}
|
|
||||||
tb_phys_invalidate(uc, tb, -1);
|
tb_phys_invalidate(uc, tb, -1);
|
||||||
if (cpu != NULL) {
|
|
||||||
cpu->current_tb = saved_tb;
|
|
||||||
if (cpu->interrupt_request && cpu->current_tb) {
|
|
||||||
cpu_interrupt(cpu, cpu->interrupt_request);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
tb = tb_next;
|
tb = tb_next;
|
||||||
}
|
}
|
||||||
|
@ -1516,7 +1502,6 @@ void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, t
|
||||||
/* we generate a block containing just the instruction
|
/* we generate a block containing just the instruction
|
||||||
modifying the memory. It will ensure that it cannot modify
|
modifying the memory. It will ensure that it cannot modify
|
||||||
itself */
|
itself */
|
||||||
cpu->current_tb = NULL;
|
|
||||||
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
|
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
|
||||||
cpu_resume_from_signal(cpu, NULL);
|
cpu_resume_from_signal(cpu, NULL);
|
||||||
}
|
}
|
||||||
|
@ -1620,7 +1605,6 @@ static void tb_invalidate_phys_page(struct uc_struct *uc, tb_page_addr_t addr,
|
||||||
/* we generate a block containing just the instruction
|
/* we generate a block containing just the instruction
|
||||||
modifying the memory. It will ensure that it cannot modify
|
modifying the memory. It will ensure that it cannot modify
|
||||||
itself */
|
itself */
|
||||||
cpu->current_tb = NULL;
|
|
||||||
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
|
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
|
||||||
if (locked) {
|
if (locked) {
|
||||||
mmap_unlock();
|
mmap_unlock();
|
||||||
|
|
Loading…
Reference in a new issue