cpu-exec: lookup/generate TB outside exclusive region during step_atomic

Now that all code generation has been converted to check CF_PARALLEL, we can
generate !CF_PARALLEL code without having yet set !parallel_cpus --
and therefore without having to be in the exclusive region during
cpu_exec_step_atomic.

While at it, merge cpu_exec_step into cpu_exec_step_atomic.

Backports commit ac03ee5331612e44beb393df2b578c951d27dc0d from qemu
This commit is contained in:
Emilio G. Cota 2018-03-13 15:22:46 -04:00 committed by Lioncash
parent f593db445a
commit 4f7dcf149e
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7

View file

@ -473,23 +473,41 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
}
}
static void cpu_exec_step(struct uc_struct *uc, CPUState *cpu)
void cpu_exec_step_atomic(struct uc_struct *uc, CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(uc, cpu);
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
uint32_t cflags = 1 | CF_IGNORE_ICOUNT;
uint32_t cf_mask = cflags & CF_HASH_MASK;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags,
cflags & CF_HASH_MASK);
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
if (tb == NULL) {
mmap_lock();
//tb_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
if (likely(tb == NULL)) {
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
}
//tb_unlock();
mmap_unlock();
}
// Unicorn: commented out
//start_exclusive();
/* Since we got here, we know that parallel_cpus must be true. */
uc->parallel_cpus = false;
cc->cpu_exec_enter(cpu);
/* execute the generated code */
cpu_tb_exec(cpu, tb);
cc->cpu_exec_exit(cpu);
uc->parallel_cpus = true;
// Unicorn: commented out
//end_exclusive()
} else {
/* We may have exited due to another problem here, so we need
* to reset any tb_locks we may have taken but didn't release.
@ -505,20 +523,6 @@ static void cpu_exec_step(struct uc_struct *uc, CPUState *cpu)
}
}
void cpu_exec_step_atomic(struct uc_struct *uc, CPUState *cpu)
{
// Unicorn: commented out
//start_exclusive();
/* Since we got here, we know that parallel_cpus must be true. */
uc->parallel_cpus = false;
cpu_exec_step(uc, cpu);
uc->parallel_cpus = true;
// Unicorn: commented out
//end_exclusive();
}
/* main execution loop */
int cpu_exec(struct uc_struct *uc, CPUState *cpu)