tb hash: hash phys_pc, pc, and flags with xxhash

For some workloads such as arm bootup, tb_phys_hash is performance-critical.
The is due to the high frequency of accesses to the hash table, originated
by (frequent) TLB flushes that wipe out the cpu-private tb_jmp_cache's.
More info:
https://lists.nongnu.org/archive/html/qemu-devel/2016-03/msg05098.html

To dig further into this I modified an arm image booting debian jessie to
immediately shut down after boot. Analysis revealed that quite a bit of time
is unnecessarily spent in tb_phys_hash: the cause is poor hashing that
results in very uneven loading of chains in the hash table's buckets;
the longest observed chain had ~550 elements.

The appended addresses this with two changes:

1) Use xxhash as the hash table's hash function. xxhash is a fast,
high-quality hashing function.

2) Feed the hashing function with not just tb_phys, but also pc and flags.

This improves performance over using just tb_phys for hashing, since that
resulted in some hash buckets having many TB's, while others getting very few;
with these changes, the longest observed chain on a single hash bucket is
brought down from ~550 to ~40.

Tests show that the other element checked for in tb_find_physical,
cs_base, is always a match when tb_phys+pc+flags are a match,
so hashing cs_base is wasteful. It could be that this is an ARM-only
thing, though. UPDATE:
On Tue, Apr 05, 2016 at 08:41:43 -0700, Richard Henderson wrote:
> The cs_base field is only used by i386 (in 16-bit modes), and sparc (for a TB
> consisting of only a delay slot).
> It may well still turn out to be reasonable to ignore cs_base for hashing.

BTW, after this change the hash table should not be called "tb_hash_phys"
anymore; this is addressed later in this series.

This change gives consistent bootup time improvements. I tested two
host machines:
- Intel Xeon E5-2690: 11.6% less time
- Intel i7-4790K: 19.2% less time

Increasing the number of hash buckets yields further improvements. However,
using a larger, fixed number of buckets can degrade performance for other
workloads that do not translate as many blocks (600K+ for debian-jessie arm
bootup). This is dealt with later in this series.

Backports commit 42bd32287f3a18d823f2258b813824a39ed7c6d9 from qemu
This commit is contained in:
Emilio G. Cota 2018-02-24 17:45:39 -05:00 committed by Lioncash
parent 9ef9de9cf8
commit ae3e22a689
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
3 changed files with 96 additions and 56 deletions

View file

@ -83,60 +83,97 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
return ret; return ret;
} }
static TranslationBlock *tb_find_slow(CPUState *cpu, static TranslationBlock *tb_find_physical(CPUState *cpu,
target_ulong pc, target_ulong pc,
target_ulong cs_base, target_ulong cs_base,
uint64_t flags) uint32_t flags)
{ {
TCGContext *tcg_ctx = cpu->uc->tcg_ctx;
CPUArchState *env = (CPUArchState *)cpu->env_ptr; CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TCGContext *tcg_ctx = env->uc->tcg_ctx; TranslationBlock *tb, **tb_hash_head, **ptb1;
TranslationBlock *tb, **ptb1; uint32_t h;
unsigned int h;
tb_page_addr_t phys_pc, phys_page1; tb_page_addr_t phys_pc, phys_page1;
target_ulong virt_page2;
/* find translated block using physical mappings */ /* find translated block using physical mappings */
phys_pc = get_page_addr_code(env, pc); // qq phys_pc = get_page_addr_code(env, pc);
if (phys_pc == -1) { // invalid code?
return NULL;
}
phys_page1 = phys_pc & TARGET_PAGE_MASK; phys_page1 = phys_pc & TARGET_PAGE_MASK;
h = tb_phys_hash_func(phys_pc); h = tb_hash_func(phys_pc, pc, flags);
ptb1 = &tcg_ctx->tb_ctx.tb_phys_hash[h];
for(;;) { /* Start at head of the hash entry */
ptb1 = tb_hash_head = &tcg_ctx->tb_ctx.tb_phys_hash[h];
tb = *ptb1; tb = *ptb1;
if (!tb)
goto not_found; while (tb) {
if (tb->pc == pc && if (tb->pc == pc &&
tb->page_addr[0] == phys_page1 && tb->page_addr[0] == phys_page1 &&
tb->cs_base == cs_base && tb->cs_base == cs_base &&
tb->flags == flags) { tb->flags == flags) {
/* check next page if needed */
if (tb->page_addr[1] != -1) {
tb_page_addr_t phys_page2;
virt_page2 = (pc & TARGET_PAGE_MASK) + if (tb->page_addr[1] == -1) {
TARGET_PAGE_SIZE; /* done, we have a match */
phys_page2 = get_page_addr_code(env, virt_page2); break;
if (tb->page_addr[1] == phys_page2)
goto found;
} else { } else {
/* check next page if needed */
target_ulong virt_page2 = (pc & TARGET_PAGE_MASK) +
TARGET_PAGE_SIZE;
tb_page_addr_t phys_page2 = get_page_addr_code(env, virt_page2);
if (tb->page_addr[1] == phys_page2) {
break;
}
}
}
ptb1 = &tb->phys_hash_next;
tb = *ptb1;
}
if (tb) {
/* Move the TB to the head of the list */
*ptb1 = tb->phys_hash_next;
tb->phys_hash_next = *tb_hash_head;
*tb_hash_head = tb;
}
return tb;
}
static TranslationBlock *tb_find_slow(CPUState *cpu,
target_ulong pc,
target_ulong cs_base,
uint32_t flags)
{
TranslationBlock *tb;
tb = tb_find_physical(cpu, pc, cs_base, flags);
if (tb) {
goto found; goto found;
} }
#ifdef CONFIG_USER_ONLY
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
* taken outside tb_lock. Since we're momentarily dropping
* tb_lock, there's a chance that our desired tb has been
* translated.
*/
// Unicorn: commented out
//tb_unlock();
mmap_lock();
//tb_lock();
tb = tb_find_physical(cpu, pc, cs_base, flags);
if (tb) {
mmap_unlock();
goto found;
} }
ptb1 = &tb->phys_hash_next; #endif
}
not_found:
/* if no translated code available, then translate it now */ /* if no translated code available, then translate it now */
tb = tb_gen_code(cpu, pc, cs_base, (int)flags, 0); // qq tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
#ifdef CONFIG_USER_ONLY
mmap_unlock();
#endif
found: found:
/* Move the last found TB to the head of the list */
if (likely(*ptb1)) {
*ptb1 = tb->phys_hash_next;
tb->phys_hash_next = tcg_ctx->tb_ctx.tb_phys_hash[h];
tcg_ctx->tb_ctx.tb_phys_hash[h] = tb;
}
/* we add the TB in the virtual pc hash table */ /* we add the TB in the virtual pc hash table */
cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb; cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
return tb; return tb;
@ -160,7 +197,7 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) { tb->flags != flags)) {
tb = tb_find_slow(cpu, pc, cs_base, flags); // qq tb = tb_find_slow(cpu, pc, cs_base, flags);
} }
if (cpu->tb_flushed) { if (cpu->tb_flushed) {
/* Ensure that no TB jump will be modified as the /* Ensure that no TB jump will be modified as the

View file

@ -20,6 +20,9 @@
#ifndef EXEC_TB_HASH #ifndef EXEC_TB_HASH
#define EXEC_TB_HASH #define EXEC_TB_HASH
#include "exec/exec-all.h"
#include "exec/tb-hash-xx.h"
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for /* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
addresses on the same page. The top bits are the same. This allows addresses on the same page. The top bits are the same. This allows
TLB invalidation to quickly clear a subset of the hash table. */ TLB invalidation to quickly clear a subset of the hash table. */
@ -43,9 +46,10 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
| (tmp & TB_JMP_ADDR_MASK)); | (tmp & TB_JMP_ADDR_MASK));
} }
static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc) static inline
uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags)
{ {
return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1); return tb_hash_func5(phys_pc, pc, flags) & (CODE_GEN_PHYS_HASH_SIZE - 1);
} }
#endif #endif

View file

@ -1070,12 +1070,12 @@ void tb_phys_invalidate(struct uc_struct *uc,
TCGContext *tcg_ctx = uc->tcg_ctx; TCGContext *tcg_ctx = uc->tcg_ctx;
CPUState *cpu = uc->cpu; CPUState *cpu = uc->cpu;
PageDesc *p; PageDesc *p;
unsigned int h; uint32_t h;
tb_page_addr_t phys_pc; tb_page_addr_t phys_pc;
/* remove the TB from the hash list */ /* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
h = tb_phys_hash_func(phys_pc); h = tb_hash_func(phys_pc, tb->pc, tb->flags);
tb_hash_remove(&tcg_ctx->tb_ctx.tb_phys_hash[h], tb); tb_hash_remove(&tcg_ctx->tb_ctx.tb_phys_hash[h], tb);
/* remove the TB from the page list */ /* remove the TB from the page list */
@ -1220,19 +1220,19 @@ static inline void tb_alloc_page(struct uc_struct *uc, TranslationBlock *tb,
} }
/* add a new TB and link it to the physical page tables. phys_page2 is /* add a new TB and link it to the physical page tables. phys_page2 is
(-1) to indicate that only one page contains the TB. */ * (-1) to indicate that only one page contains the TB.
*
* Called with mmap_lock held for user-mode emulation.
*/
static void tb_link_page(struct uc_struct *uc, static void tb_link_page(struct uc_struct *uc,
TranslationBlock *tb, tb_page_addr_t phys_pc, tb_page_addr_t phys_page2) TranslationBlock *tb, tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
{ {
TCGContext *tcg_ctx = uc->tcg_ctx; TCGContext *tcg_ctx = uc->tcg_ctx;
unsigned int h; uint32_t h;
TranslationBlock **ptb; TranslationBlock **ptb;
/* Grab the mmap lock to stop another thread invalidating this TB /* add in the hash table */
before we are done. */ h = tb_hash_func(phys_pc, tb->pc, tb->flags);
mmap_lock();
/* add in the physical hash table */
h = tb_phys_hash_func(phys_pc);
ptb = &tcg_ctx->tb_ctx.tb_phys_hash[h]; ptb = &tcg_ctx->tb_ctx.tb_phys_hash[h];
tb->phys_hash_next = *ptb; tb->phys_hash_next = *ptb;
*ptb = tb; *ptb = tb;
@ -1248,7 +1248,6 @@ static void tb_link_page(struct uc_struct *uc,
#ifdef DEBUG_TB_CHECK #ifdef DEBUG_TB_CHECK
tb_page_check(); tb_page_check();
#endif #endif
mmap_unlock();
} }
TranslationBlock *tb_gen_code(CPUState *cpu, TranslationBlock *tb_gen_code(CPUState *cpu,