tcg: consistently access cpu->tb_jmp_cache atomically

Some code paths can lead to atomic accesses racing with memset()
on cpu->tb_jmp_cache, which can result in torn reads/writes
and is undefined behaviour in C11.

These torn accesses are unlikely to show up as bugs, but from code
inspection they seem possible. For example, tb_phys_invalidate does:
/* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
atomic_set(&cpu->tb_jmp_cache[h], NULL);
}
}
Here atomic_set might race with a concurrent memset (such as the
ones scheduled via "unsafe" async work, e.g. tlb_flush_page) and
therefore we might end up with a torn pointer (or who knows what,
because we are under undefined behaviour).

This patch converts parallel accesses to cpu->tb_jmp_cache to use
atomic primitives, thereby bringing these accesses back to defined
behaviour. The price to pay is to potentially execute more instructions
when clearing cpu->tb_jmp_cache, but given how infrequently they happen
and the small size of the cache, the performance impact I have measured
is within noise range when booting debian-arm.

Note that under "safe async" work (e.g. do_tb_flush) we could use memset
because no other vcpus are running. However I'm keeping these accesses
atomic as well to keep things simple and to avoid confusing analysis
tools such as ThreadSanitizer.

Backports commit f3ced3c59287dabc253f83f0c70aa4934470c15e from qemu
This commit is contained in:
Emilio G. Cota 2018-03-03 21:12:34 -05:00 committed by Lioncash
parent 1a4e5da043
commit f66e74d65b
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
4 changed files with 26 additions and 16 deletions

View file

@ -77,7 +77,7 @@ void tlb_flush(CPUState *cpu)
memset(env->tlb_table, -1, sizeof(env->tlb_table));
memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
cpu_tb_jmp_cache_clear(cpu);
env->vtlb_index = 0;
env->tlb_flush_addr = -1;
@ -385,7 +385,7 @@ static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
}
}
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
cpu_tb_jmp_cache_clear(cpu);
}
void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)

View file

@ -281,7 +281,7 @@ struct CPUState {
void *env_ptr; /* CPUArchState */
/* Writes protected by tb_lock, reads not thread-safe */
/* Accessed in parallel; all accesses must be atomic */
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
QTAILQ_ENTRY(CPUState) node;
@ -328,6 +328,15 @@ struct CPUState {
struct uc_struct* uc;
};
static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
{
unsigned int i;
for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
atomic_set(&cpu->tb_jmp_cache[i], NULL);
}
}
/**
* qemu_tcg_mttcg_enabled:
* Check whether we are running MultiThread TCG or not.

View file

@ -176,9 +176,7 @@ static void cpu_common_reset(CPUState *cpu)
// unicorn's crappy symbol deduplication
// makes it impossible right now
//if (tcg_enabled(cpu->uc)) {
for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
atomic_set(&cpu->tb_jmp_cache[i], NULL);
}
cpu_tb_jmp_cache_clear(cpu);
#ifdef CONFIG_SOFTMMU
tlb_flush(cpu);

View file

@ -963,9 +963,7 @@ void tb_flush(CPUState *cpu)
cpu_abort(cpu, "Internal error: code buffer overflow\n");
}
for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
atomic_set(&cpu->tb_jmp_cache[i], NULL);
}
cpu_tb_jmp_cache_clear(cpu);
atomic_mb_set(&cpu->tb_flushed, true);
tcg_ctx->tb_ctx.nb_tbs = 0;
@ -1860,19 +1858,24 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cpu_loop_exit_noexc(cpu);
}
static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
{
unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
}
}
void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
{
unsigned int i;
/* Discard jump cache entries for any tb which might potentially
overlap the flushed page. */
i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
memset(&cpu->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
i = tb_jmp_cache_hash_page(addr);
memset(&cpu->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
tb_jmp_cache_clear_page(cpu, addr);
}
#if 0