mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-22 16:50:58 +00:00
tb hash: hash phys_pc, pc, and flags with xxhash
For some workloads such as arm bootup, tb_phys_hash is performance-critical. The is due to the high frequency of accesses to the hash table, originated by (frequent) TLB flushes that wipe out the cpu-private tb_jmp_cache's. More info: https://lists.nongnu.org/archive/html/qemu-devel/2016-03/msg05098.html To dig further into this I modified an arm image booting debian jessie to immediately shut down after boot. Analysis revealed that quite a bit of time is unnecessarily spent in tb_phys_hash: the cause is poor hashing that results in very uneven loading of chains in the hash table's buckets; the longest observed chain had ~550 elements. The appended addresses this with two changes: 1) Use xxhash as the hash table's hash function. xxhash is a fast, high-quality hashing function. 2) Feed the hashing function with not just tb_phys, but also pc and flags. This improves performance over using just tb_phys for hashing, since that resulted in some hash buckets having many TB's, while others getting very few; with these changes, the longest observed chain on a single hash bucket is brought down from ~550 to ~40. Tests show that the other element checked for in tb_find_physical, cs_base, is always a match when tb_phys+pc+flags are a match, so hashing cs_base is wasteful. It could be that this is an ARM-only thing, though. UPDATE: On Tue, Apr 05, 2016 at 08:41:43 -0700, Richard Henderson wrote: > The cs_base field is only used by i386 (in 16-bit modes), and sparc (for a TB > consisting of only a delay slot). > It may well still turn out to be reasonable to ignore cs_base for hashing. BTW, after this change the hash table should not be called "tb_hash_phys" anymore; this is addressed later in this series. This change gives consistent bootup time improvements. I tested two host machines: - Intel Xeon E5-2690: 11.6% less time - Intel i7-4790K: 19.2% less time Increasing the number of hash buckets yields further improvements. However, using a larger, fixed number of buckets can degrade performance for other workloads that do not translate as many blocks (600K+ for debian-jessie arm bootup). This is dealt with later in this series. Backports commit 42bd32287f3a18d823f2258b813824a39ed7c6d9 from qemu
This commit is contained in:
parent
9ef9de9cf8
commit
ae3e22a689
125
qemu/cpu-exec.c
125
qemu/cpu-exec.c
|
@ -83,60 +83,97 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static TranslationBlock *tb_find_physical(CPUState *cpu,
|
||||
target_ulong pc,
|
||||
target_ulong cs_base,
|
||||
uint32_t flags)
|
||||
{
|
||||
TCGContext *tcg_ctx = cpu->uc->tcg_ctx;
|
||||
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
|
||||
TranslationBlock *tb, **tb_hash_head, **ptb1;
|
||||
uint32_t h;
|
||||
tb_page_addr_t phys_pc, phys_page1;
|
||||
|
||||
/* find translated block using physical mappings */
|
||||
phys_pc = get_page_addr_code(env, pc);
|
||||
phys_page1 = phys_pc & TARGET_PAGE_MASK;
|
||||
h = tb_hash_func(phys_pc, pc, flags);
|
||||
|
||||
/* Start at head of the hash entry */
|
||||
ptb1 = tb_hash_head = &tcg_ctx->tb_ctx.tb_phys_hash[h];
|
||||
tb = *ptb1;
|
||||
|
||||
while (tb) {
|
||||
if (tb->pc == pc &&
|
||||
tb->page_addr[0] == phys_page1 &&
|
||||
tb->cs_base == cs_base &&
|
||||
tb->flags == flags) {
|
||||
|
||||
if (tb->page_addr[1] == -1) {
|
||||
/* done, we have a match */
|
||||
break;
|
||||
} else {
|
||||
/* check next page if needed */
|
||||
target_ulong virt_page2 = (pc & TARGET_PAGE_MASK) +
|
||||
TARGET_PAGE_SIZE;
|
||||
tb_page_addr_t phys_page2 = get_page_addr_code(env, virt_page2);
|
||||
|
||||
if (tb->page_addr[1] == phys_page2) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ptb1 = &tb->phys_hash_next;
|
||||
tb = *ptb1;
|
||||
}
|
||||
|
||||
if (tb) {
|
||||
/* Move the TB to the head of the list */
|
||||
*ptb1 = tb->phys_hash_next;
|
||||
tb->phys_hash_next = *tb_hash_head;
|
||||
*tb_hash_head = tb;
|
||||
}
|
||||
return tb;
|
||||
}
|
||||
|
||||
static TranslationBlock *tb_find_slow(CPUState *cpu,
|
||||
target_ulong pc,
|
||||
target_ulong cs_base,
|
||||
uint64_t flags)
|
||||
uint32_t flags)
|
||||
{
|
||||
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
|
||||
TCGContext *tcg_ctx = env->uc->tcg_ctx;
|
||||
TranslationBlock *tb, **ptb1;
|
||||
unsigned int h;
|
||||
tb_page_addr_t phys_pc, phys_page1;
|
||||
target_ulong virt_page2;
|
||||
TranslationBlock *tb;
|
||||
|
||||
/* find translated block using physical mappings */
|
||||
phys_pc = get_page_addr_code(env, pc); // qq
|
||||
if (phys_pc == -1) { // invalid code?
|
||||
return NULL;
|
||||
tb = tb_find_physical(cpu, pc, cs_base, flags);
|
||||
if (tb) {
|
||||
goto found;
|
||||
}
|
||||
phys_page1 = phys_pc & TARGET_PAGE_MASK;
|
||||
h = tb_phys_hash_func(phys_pc);
|
||||
ptb1 = &tcg_ctx->tb_ctx.tb_phys_hash[h];
|
||||
for(;;) {
|
||||
tb = *ptb1;
|
||||
if (!tb)
|
||||
goto not_found;
|
||||
if (tb->pc == pc &&
|
||||
tb->page_addr[0] == phys_page1 &&
|
||||
tb->cs_base == cs_base &&
|
||||
tb->flags == flags) {
|
||||
/* check next page if needed */
|
||||
if (tb->page_addr[1] != -1) {
|
||||
tb_page_addr_t phys_page2;
|
||||
|
||||
virt_page2 = (pc & TARGET_PAGE_MASK) +
|
||||
TARGET_PAGE_SIZE;
|
||||
phys_page2 = get_page_addr_code(env, virt_page2);
|
||||
if (tb->page_addr[1] == phys_page2)
|
||||
goto found;
|
||||
} else {
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
ptb1 = &tb->phys_hash_next;
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
|
||||
* taken outside tb_lock. Since we're momentarily dropping
|
||||
* tb_lock, there's a chance that our desired tb has been
|
||||
* translated.
|
||||
*/
|
||||
// Unicorn: commented out
|
||||
//tb_unlock();
|
||||
mmap_lock();
|
||||
//tb_lock();
|
||||
tb = tb_find_physical(cpu, pc, cs_base, flags);
|
||||
if (tb) {
|
||||
mmap_unlock();
|
||||
goto found;
|
||||
}
|
||||
not_found:
|
||||
#endif
|
||||
|
||||
/* if no translated code available, then translate it now */
|
||||
tb = tb_gen_code(cpu, pc, cs_base, (int)flags, 0); // qq
|
||||
tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
mmap_unlock();
|
||||
#endif
|
||||
|
||||
found:
|
||||
/* Move the last found TB to the head of the list */
|
||||
if (likely(*ptb1)) {
|
||||
*ptb1 = tb->phys_hash_next;
|
||||
tb->phys_hash_next = tcg_ctx->tb_ctx.tb_phys_hash[h];
|
||||
tcg_ctx->tb_ctx.tb_phys_hash[h] = tb;
|
||||
}
|
||||
/* we add the TB in the virtual pc hash table */
|
||||
cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
|
||||
return tb;
|
||||
|
@ -160,7 +197,7 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
|
|||
tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
|
||||
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
|
||||
tb->flags != flags)) {
|
||||
tb = tb_find_slow(cpu, pc, cs_base, flags); // qq
|
||||
tb = tb_find_slow(cpu, pc, cs_base, flags);
|
||||
}
|
||||
if (cpu->tb_flushed) {
|
||||
/* Ensure that no TB jump will be modified as the
|
||||
|
|
|
@ -20,6 +20,9 @@
|
|||
#ifndef EXEC_TB_HASH
|
||||
#define EXEC_TB_HASH
|
||||
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/tb-hash-xx.h"
|
||||
|
||||
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
|
||||
addresses on the same page. The top bits are the same. This allows
|
||||
TLB invalidation to quickly clear a subset of the hash table. */
|
||||
|
@ -43,9 +46,10 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
|
|||
| (tmp & TB_JMP_ADDR_MASK));
|
||||
}
|
||||
|
||||
static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
|
||||
static inline
|
||||
uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags)
|
||||
{
|
||||
return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
|
||||
return tb_hash_func5(phys_pc, pc, flags) & (CODE_GEN_PHYS_HASH_SIZE - 1);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1070,12 +1070,12 @@ void tb_phys_invalidate(struct uc_struct *uc,
|
|||
TCGContext *tcg_ctx = uc->tcg_ctx;
|
||||
CPUState *cpu = uc->cpu;
|
||||
PageDesc *p;
|
||||
unsigned int h;
|
||||
uint32_t h;
|
||||
tb_page_addr_t phys_pc;
|
||||
|
||||
/* remove the TB from the hash list */
|
||||
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
|
||||
h = tb_phys_hash_func(phys_pc);
|
||||
h = tb_hash_func(phys_pc, tb->pc, tb->flags);
|
||||
tb_hash_remove(&tcg_ctx->tb_ctx.tb_phys_hash[h], tb);
|
||||
|
||||
/* remove the TB from the page list */
|
||||
|
@ -1220,19 +1220,19 @@ static inline void tb_alloc_page(struct uc_struct *uc, TranslationBlock *tb,
|
|||
}
|
||||
|
||||
/* add a new TB and link it to the physical page tables. phys_page2 is
|
||||
(-1) to indicate that only one page contains the TB. */
|
||||
* (-1) to indicate that only one page contains the TB.
|
||||
*
|
||||
* Called with mmap_lock held for user-mode emulation.
|
||||
*/
|
||||
static void tb_link_page(struct uc_struct *uc,
|
||||
TranslationBlock *tb, tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
|
||||
{
|
||||
TCGContext *tcg_ctx = uc->tcg_ctx;
|
||||
unsigned int h;
|
||||
uint32_t h;
|
||||
TranslationBlock **ptb;
|
||||
|
||||
/* Grab the mmap lock to stop another thread invalidating this TB
|
||||
before we are done. */
|
||||
mmap_lock();
|
||||
/* add in the physical hash table */
|
||||
h = tb_phys_hash_func(phys_pc);
|
||||
/* add in the hash table */
|
||||
h = tb_hash_func(phys_pc, tb->pc, tb->flags);
|
||||
ptb = &tcg_ctx->tb_ctx.tb_phys_hash[h];
|
||||
tb->phys_hash_next = *ptb;
|
||||
*ptb = tb;
|
||||
|
@ -1248,7 +1248,6 @@ static void tb_link_page(struct uc_struct *uc,
|
|||
#ifdef DEBUG_TB_CHECK
|
||||
tb_page_check();
|
||||
#endif
|
||||
mmap_unlock();
|
||||
}
|
||||
|
||||
TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
|
|
Loading…
Reference in a new issue