tcg: Prepare safe access to tb_flushed out of tb_lock

Ensure atomicity and ordering of CPU's 'tb_flushed' access for future
translation block lookup out of 'tb_lock'.

This field can only be touched from another thread by tb_flush() in user
mode emulation. So the only access to be sequential atomic is:
* a single write in tb_flush();
* reads/writes out of 'tb_lock'.

In future, before enabling MTTCG in system mode, tb_flush() must be safe
and this field becomes unnecessary.

Backports commit 118b07308a8cedc16ef63d7ab243a95f1701db40 from qemu
This commit is contained in:
Sergey Fedorov 2018-02-25 23:33:51 -05:00 committed by Lioncash
parent 9eb02a540d
commit c0dda5fbe9
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
2 changed files with 9 additions and 11 deletions

View file

@ -200,13 +200,6 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
tb->flags != flags)) { tb->flags != flags)) {
tb = tb_find_slow(cpu, pc, cs_base, flags); tb = tb_find_slow(cpu, pc, cs_base, flags);
} }
if (cpu->tb_flushed) {
/* Ensure that no TB jump will be modified as the
* translation buffer has been flushed.
*/
last_tb = NULL;
cpu->tb_flushed = false;
}
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
/* We don't take care of direct jumps when address mapping changes in /* We don't take care of direct jumps when address mapping changes in
* system emulation. So it's not safe to make a direct jump to a TB * system emulation. So it's not safe to make a direct jump to a TB
@ -218,8 +211,13 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
#endif #endif
/* See if we can patch the calling TB. */ /* See if we can patch the calling TB. */
if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
/* Check if translation buffer has been flushed */
if (cpu->tb_flushed) {
cpu->tb_flushed = false;
} else {
tb_add_jump(last_tb, tb_exit, tb); tb_add_jump(last_tb, tb_exit, tb);
} }
}
// Unicorn: commented out // Unicorn: commented out
//tb_unlock(); //tb_unlock();
return tb; return tb;
@ -442,7 +440,7 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
} }
last_tb = NULL; /* forget the last executed TB after exception */ last_tb = NULL; /* forget the last executed TB after exception */
cpu->tb_flushed = false; /* reset before first TB lookup */ atomic_mb_set(&cpu->tb_flushed, false); /* reset before first TB lookup */
for(;;) { for(;;) {
cpu_handle_interrupt(cpu, &last_tb); cpu_handle_interrupt(cpu, &last_tb);
tb = tb_find_fast(cpu, last_tb, tb_exit); tb = tb_find_fast(cpu, last_tb, tb_exit);

View file

@ -917,13 +917,13 @@ void tb_flush(CPUState *cpu)
> tcg_ctx->code_gen_buffer_size) { > tcg_ctx->code_gen_buffer_size) {
cpu_abort(cpu, "Internal error: code buffer overflow\n"); cpu_abort(cpu, "Internal error: code buffer overflow\n");
} }
tcg_ctx->tb_ctx.nb_tbs = 0;
for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) { for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
atomic_set(&cpu->tb_jmp_cache[i], NULL); atomic_set(&cpu->tb_jmp_cache[i], NULL);
} }
cpu->tb_flushed = true; atomic_mb_set(&cpu->tb_flushed, true);
tcg_ctx->tb_ctx.nb_tbs = 0;
memset(tcg_ctx->tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx->tb_ctx.tb_phys_hash)); memset(tcg_ctx->tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx->tb_ctx.tb_phys_hash));
page_flush_tb(uc); page_flush_tb(uc);