mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-11 02:35:29 +00:00
tcg: Prepare safe access to tb_flushed out of tb_lock
Ensure atomicity and ordering of CPU's 'tb_flushed' access for future translation block lookup out of 'tb_lock'. This field can only be touched from another thread by tb_flush() in user mode emulation. So the only access to be sequential atomic is: * a single write in tb_flush(); * reads/writes out of 'tb_lock'. In future, before enabling MTTCG in system mode, tb_flush() must be safe and this field becomes unnecessary. Backports commit 118b07308a8cedc16ef63d7ab243a95f1701db40 from qemu
This commit is contained in:
parent
9eb02a540d
commit
c0dda5fbe9
|
@ -200,13 +200,6 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
|
|||
tb->flags != flags)) {
|
||||
tb = tb_find_slow(cpu, pc, cs_base, flags);
|
||||
}
|
||||
if (cpu->tb_flushed) {
|
||||
/* Ensure that no TB jump will be modified as the
|
||||
* translation buffer has been flushed.
|
||||
*/
|
||||
last_tb = NULL;
|
||||
cpu->tb_flushed = false;
|
||||
}
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* We don't take care of direct jumps when address mapping changes in
|
||||
* system emulation. So it's not safe to make a direct jump to a TB
|
||||
|
@ -218,7 +211,12 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
|
|||
#endif
|
||||
/* See if we can patch the calling TB. */
|
||||
if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
|
||||
tb_add_jump(last_tb, tb_exit, tb);
|
||||
/* Check if translation buffer has been flushed */
|
||||
if (cpu->tb_flushed) {
|
||||
cpu->tb_flushed = false;
|
||||
} else {
|
||||
tb_add_jump(last_tb, tb_exit, tb);
|
||||
}
|
||||
}
|
||||
// Unicorn: commented out
|
||||
//tb_unlock();
|
||||
|
@ -442,7 +440,7 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
|
|||
}
|
||||
|
||||
last_tb = NULL; /* forget the last executed TB after exception */
|
||||
cpu->tb_flushed = false; /* reset before first TB lookup */
|
||||
atomic_mb_set(&cpu->tb_flushed, false); /* reset before first TB lookup */
|
||||
for(;;) {
|
||||
cpu_handle_interrupt(cpu, &last_tb);
|
||||
tb = tb_find_fast(cpu, last_tb, tb_exit);
|
||||
|
|
|
@ -917,13 +917,13 @@ void tb_flush(CPUState *cpu)
|
|||
> tcg_ctx->code_gen_buffer_size) {
|
||||
cpu_abort(cpu, "Internal error: code buffer overflow\n");
|
||||
}
|
||||
tcg_ctx->tb_ctx.nb_tbs = 0;
|
||||
|
||||
for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
|
||||
atomic_set(&cpu->tb_jmp_cache[i], NULL);
|
||||
}
|
||||
cpu->tb_flushed = true;
|
||||
atomic_mb_set(&cpu->tb_flushed, true);
|
||||
|
||||
tcg_ctx->tb_ctx.nb_tbs = 0;
|
||||
memset(tcg_ctx->tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx->tb_ctx.tb_phys_hash));
|
||||
page_flush_tb(uc);
|
||||
|
||||
|
|
Loading…
Reference in a new issue