mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-23 12:35:36 +00:00
tcg: Increase hit rate of lookup_tb_ptr
We can call tb_htable_lookup even when the tb_jmp_cache is completely empty. Therefore, un-nest most of the code dependent on tb != NULL from the read from the cache. This improves the hit rate of lookup_tb_ptr; for instance, when booting and immediately shutting down debian-arm, the hit rate improves from 93.2% to 99.4%. Backports commit b97a879de980e99452063851597edb98e7e8039c from qemu
This commit is contained in:
parent
9ec975448b
commit
dd1473f582
|
@ -149,23 +149,24 @@ void *HELPER(lookup_tb_ptr)(CPUArchState *env, target_ulong addr)
|
|||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
TranslationBlock *tb;
|
||||
target_ulong cs_base, pc;
|
||||
uint32_t flags;
|
||||
uint32_t flags, addr_hash;
|
||||
|
||||
tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(addr)]);
|
||||
if (likely(tb)) {
|
||||
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
||||
if (likely(tb->pc == addr && tb->cs_base == cs_base &&
|
||||
tb->flags == flags)) {
|
||||
goto found;
|
||||
}
|
||||
addr_hash = tb_jmp_cache_hash_func(addr);
|
||||
// Unicorn: atomic_read used instead of atomic_rcu_read
|
||||
tb = atomic_read(&cpu->tb_jmp_cache[addr_hash]);
|
||||
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
||||
|
||||
if (unlikely(!(tb
|
||||
&& tb->pc == addr
|
||||
&& tb->cs_base == cs_base
|
||||
&& tb->flags == flags))) {
|
||||
tb = tb_htable_lookup(cpu, addr, cs_base, flags);
|
||||
if (likely(tb)) {
|
||||
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(addr)], tb);
|
||||
goto found;
|
||||
if (!tb) {
|
||||
return tcg_ctx->code_gen_epilogue;
|
||||
}
|
||||
atomic_set(&cpu->tb_jmp_cache[addr_hash], tb);
|
||||
}
|
||||
return tcg_ctx->code_gen_epilogue;
|
||||
found:
|
||||
|
||||
// Unicorn: commented out
|
||||
//qemu_log_mask_and_addr(CPU_LOG_EXEC, addr,
|
||||
// "Chain %p [%d: " TARGET_FMT_lx "] %s\n",
|
||||
|
|
Loading…
Reference in a new issue