mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-18 14:47:17 +00:00
tcg: introduce tcg_current_cpu
This is already useful on Windows in order to remove tls.h, because accesses to current_cpu are done from a different thread on that platform. It will be used on POSIX platforms as soon TCG stops using signals to interrupt the execution of translated code. Backports commit 9373e63297c43752f9cf085feb7f5aed57d959f8 from qemu
This commit is contained in:
parent
03da6ff6db
commit
96e5a7ced3
|
@ -174,15 +174,22 @@ struct uc_struct {
|
|||
|
||||
uc_insn_hook_validate insn_hook_validate;
|
||||
|
||||
MemoryRegion *system_memory; // qemu/exec.c
|
||||
MemoryRegion io_mem_rom; // qemu/exec.c
|
||||
MemoryRegion io_mem_notdirty; // qemu/exec.c
|
||||
MemoryRegion io_mem_unassigned; // qemu/exec.c
|
||||
MemoryRegion io_mem_watch; // qemu/exec.c
|
||||
RAMList ram_list; // qemu/exec.c
|
||||
BounceBuffer bounce; // qemu/cpu-exec.c
|
||||
volatile sig_atomic_t exit_request; // qemu/cpu-exec.c
|
||||
bool global_dirty_log; // qemu/memory.c
|
||||
// qemu/exec.c
|
||||
MemoryRegion *system_memory;
|
||||
MemoryRegion io_mem_rom;
|
||||
MemoryRegion io_mem_notdirty;
|
||||
MemoryRegion io_mem_unassigned;
|
||||
MemoryRegion io_mem_watch;
|
||||
RAMList ram_list;
|
||||
|
||||
// qemu/cpu-exec.c
|
||||
BounceBuffer bounce;
|
||||
volatile sig_atomic_t exit_request;
|
||||
CPUState *tcg_current_cpu;
|
||||
|
||||
// qemu/memory.c
|
||||
bool global_dirty_log;
|
||||
|
||||
/* This is a multi-level map on the virtual address space.
|
||||
The bottom level has pointers to PageDesc. */
|
||||
void **l1_map; // qemu/translate-all.c
|
||||
|
|
|
@ -88,14 +88,7 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
|
|||
}
|
||||
|
||||
uc->current_cpu = cpu;
|
||||
|
||||
/* As long as current_cpu is null, up to the assignment just above,
|
||||
* requests by other threads to exit the execution loop are expected to
|
||||
* be issued using the exit_request global. We must make sure that our
|
||||
* evaluation of the global value is performed past the current_cpu
|
||||
* value transition point, which requires a memory barrier as well as
|
||||
* an instruction scheduling constraint on modern architectures. */
|
||||
smp_mb();
|
||||
atomic_mb_set(&uc->tcg_current_cpu, cpu);
|
||||
|
||||
if (unlikely(uc->exit_request)) {
|
||||
cpu->exit_request = 1;
|
||||
|
@ -295,6 +288,8 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
|
|||
|
||||
/* fail safe : never use current_cpu outside cpu_exec() */
|
||||
uc->current_cpu = NULL;
|
||||
/* Does not need atomic_mb_set because a spurious wakeup is okay. */
|
||||
atomic_set(&uc->tcg_current_cpu, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue