mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-11 10:15:36 +00:00
6bc05eeee4
Having a fixed-size hash table for keeping track of all translation blocks is suboptimal: some workloads are just too big or too small to get maximum performance from the hash table. The MRU promotion policy helps improve performance when the hash table is a little undersized, but it cannot make up for severely undersized hash tables. Furthermore, frequent MRU promotions result in writes that are a scalability bottleneck. For scalability, lookups should only perform reads, not writes. This is not a big deal for now, but it will become one once MTTCG matures. The appended fixes these issues by using qht as the implementation of the TB hash table. This solution is superior to other alternatives considered, namely: - master: implementation in QEMU before this patchset - xxhash: before this patch, i.e. fixed buckets + xxhash hashing + MRU. - xxhash-rcu: fixed buckets + xxhash + RCU list + MRU. MRU is implemented here by adding an intermediate struct that contains the u32 hash and a pointer to the TB; this allows us, on an MRU promotion, to copy said struct (that is not at the head), and put this new copy at the head. After a grace period, the original non-head struct can be eliminated, and after another grace period, freed. - qht-fixed-nomru: fixed buckets + xxhash + qht without auto-resize + no MRU for lookups; MRU for inserts. The appended solution is the following: - qht-dyn-nomru: dynamic number of buckets + xxhash + qht w/ auto-resize + no MRU for lookups; MRU for inserts. The plots below compare the considered solutions. The Y axis shows the boot time (in seconds) of a debian jessie image with arm-softmmu; the X axis sweeps the number of buckets (or initial number of buckets for qht-autoresize). The plots in PNG format (and with errorbars) can be seen here: http://imgur.com/a/Awgnq Each test runs 5 times, and the entire QEMU process is pinned to a single core for repeatability of results. Host: Intel Xeon E5-2690 28 ++------------+-------------+-------------+-------------+------------++ A***** + + + master **A*** + 27 ++ * xxhash ##B###++ | A******A****** xxhash-rcu $$C$$$ | 26 C$$ A******A****** qht-fixed-nomru*%%D%%%++ D%%$$ A******A******A*qht-dyn-mru A*E****A 25 ++ %%$$ qht-dyn-nomru &&F&&&++ B#####% | 24 ++ #C$$$$$ ++ | B### $ | | ## C$$$$$$ | 23 ++ # C$$$$$$ ++ | B###### C$$$$$$ %%%D 22 ++ %B###### C$$$$$$C$$$$$$C$$$$$$C$$$$$$C$$$$$$C | D%%%%%%B###### @E@@@@@@ %%%D%%%@@@E@@@@@@E 21 E@@@@@@E@@@@@@F&&&@@@E@@@&&&D%%%%%%B######B######B######B######B######B + E@@@ F&&& + E@ + F&&& + + 20 ++------------+-------------+-------------+-------------+------------++ 14 16 18 20 22 24 log2 number of buckets Host: Intel i7-4790K 14.5 ++------------+------------+-------------+------------+------------++ A** + + + master **A*** + 14 ++ ** xxhash ##B###++ 13.5 ++ ** xxhash-rcu $$C$$$++ | qht-fixed-nomru %%D%%% | 13 ++ A****** qht-dyn-mru @@E@@@++ | A*****A******A****** qht-dyn-nomru &&F&&& | 12.5 C$$ A******A******A*****A****** ***A 12 ++ $$ A*** ++ D%%% $$ | 11.5 ++ %% ++ B### %C$$$$$$ | 11 ++ ## D%%%%% C$$$$$ ++ | # % C$$$$$$ | 10.5 F&&&&&&B######D%%%%% C$$$$$$C$$$$$$C$$$$$$C$$$$$C$$$$$$ $$$C 10 E@@@@@@E@@@@@@B#####B######B######E@@@@@@E@@@%%%D%%%%%D%%%###B######B + F&& D%%%%%%B######B######B#####B###@@@D%%% + 9.5 ++------------+------------+-------------+------------+------------++ 14 16 18 20 22 24 log2 number of buckets Note that the original point before this patch series is X=15 for "master"; the little sensitivity to the increased number of buckets is due to the poor hashing function in master. xxhash-rcu has significant overhead due to the constant churn of allocating and deallocating intermediate structs for implementing MRU. An alternative would be do consider failed lookups as "maybe not there", and then acquire the external lock (tb_lock in this case) to really confirm that there was indeed a failed lookup. This, however, would not be enough to implement dynamic resizing--this is more complex: see "Resizable, Scalable, Concurrent Hash Tables via Relativistic Programming" by Triplett, McKenney and Walpole. This solution was discarded due to the very coarse RCU read critical sections that we have in MTTCG; resizing requires waiting for readers after every pointer update, and resizes require many pointer updates, so this would quickly become prohibitive. qht-fixed-nomru shows that MRU promotion is advisable for undersized hash tables. However, qht-dyn-mru shows that MRU promotion is not important if the hash table is properly sized: there is virtually no difference in performance between qht-dyn-nomru and qht-dyn-mru. Before this patch, we're at X=15 on "xxhash"; after this patch, we're at X=15 @ qht-dyn-nomru. This patch thus matches the best performance that we can achieve with optimum sizing of the hash table, while keeping the hash table scalable for readers. The improvement we get before and after this patch for booting debian jessie with arm-softmmu is: - Intel Xeon E5-2690: 10.5% less time - Intel i7-4790K: 5.2% less time We could get this same improvement _for this particular workload_ by statically increasing the size of the hash table. But this would hurt workloads that do not need a large hash table. The dynamic (upward) resizing allows us to start small and enlarge the hash table as needed. A quick note on downsizing: the table is resized back to 2**15 buckets on every tb_flush; this makes sense because it is not guaranteed that the table will reach the same number of TBs later on (e.g. most bootup code is thrown away after boot); it makes sense to grow the hash table as more code blocks are translated. This also avoids the complication of having to build downsizing hysteresis logic into qht. Backports commit 909eaac9bbc2ed4f3a82ce38e905b87d478a3e00 from qemu
119 lines
3.2 KiB
C
119 lines
3.2 KiB
C
#ifndef UNICORN_COMMON_H_
|
|
#define UNICORN_COMMON_H_
|
|
|
|
#include "tcg.h"
|
|
|
|
// This header define common patterns/codes that will be included in all arch-sepcific
|
|
// codes for unicorns purposes.
|
|
|
|
// return true on success, false on failure
|
|
static inline bool cpu_physical_mem_read(AddressSpace *as, hwaddr addr,
|
|
uint8_t *buf, int len)
|
|
{
|
|
return !cpu_physical_memory_rw(as, addr, (void *)buf, len, 0);
|
|
}
|
|
|
|
static inline bool cpu_physical_mem_write(AddressSpace *as, hwaddr addr,
|
|
const uint8_t *buf, int len)
|
|
{
|
|
return !cpu_physical_memory_rw(as, addr, (void *)buf, len, 1);
|
|
}
|
|
|
|
void tb_cleanup(struct uc_struct *uc);
|
|
void free_code_gen_buffer(struct uc_struct *uc);
|
|
|
|
static inline void free_address_spaces(struct uc_struct *uc)
|
|
{
|
|
int i;
|
|
|
|
address_space_destroy(&uc->as);
|
|
for (i = 0; i < uc->cpu->num_ases; i++) {
|
|
AddressSpace *as = uc->cpu->cpu_ases[i].as;
|
|
address_space_destroy(as);
|
|
g_free(as);
|
|
}
|
|
}
|
|
|
|
/* This is *supposed* to be done by the class finalizer but it never executes */
|
|
static inline void free_machine_class_name(struct uc_struct *uc) {
|
|
MachineClass *mc = MACHINE_GET_CLASS(uc, uc->machine_state);
|
|
|
|
g_free(mc->name);
|
|
mc->name = NULL;
|
|
}
|
|
|
|
static inline void free_tcg_temp_names(TCGContext *s)
|
|
{
|
|
#if TCG_TARGET_REG_BITS == 32
|
|
int i;
|
|
|
|
for (i = 0; i < s->nb_globals; i++) {
|
|
TCGTemp *ts = &s->temps[i];
|
|
if (ts->base_type == TCG_TYPE_I64) {
|
|
if (ts->name && ((strcmp(ts->name+(strlen(ts->name)-2), "_0") == 0) ||
|
|
(strcmp(ts->name+(strlen(ts->name)-2), "_1") == 0))) {
|
|
free((void *)ts->name);
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
|
|
/** Freeing common resources */
|
|
static void release_common(void *t)
|
|
{
|
|
TCGPool *po, *to;
|
|
TCGContext *s = (TCGContext *)t;
|
|
|
|
// Clean TCG.
|
|
TCGOpDef* def = &s->tcg_op_defs[0];
|
|
g_free(def->args_ct);
|
|
g_free(def->sorted_args);
|
|
qht_destroy(&s->tb_ctx.htable);
|
|
g_free(s->tcg_op_defs);
|
|
|
|
for (po = s->pool_first; po; po = to) {
|
|
to = po->next;
|
|
g_free(po);
|
|
}
|
|
tcg_pool_reset(s);
|
|
g_hash_table_destroy(s->helpers);
|
|
|
|
// Destory flat view hash table
|
|
g_hash_table_destroy(s->uc->flat_views);
|
|
unicorn_free_empty_flat_view(s->uc);
|
|
|
|
// TODO(danghvu): these function is not available outside qemu
|
|
// so we keep them here instead of outside uc_close.
|
|
free_address_spaces(s->uc);
|
|
memory_free(s->uc);
|
|
tb_cleanup(s->uc);
|
|
free_code_gen_buffer(s->uc);
|
|
free_machine_class_name(s->uc);
|
|
free_tcg_temp_names(s);
|
|
}
|
|
|
|
static inline void uc_common_init(struct uc_struct* uc)
|
|
{
|
|
memory_register_types(uc);
|
|
uc->write_mem = cpu_physical_mem_write;
|
|
uc->read_mem = cpu_physical_mem_read;
|
|
uc->tcg_enabled = tcg_enabled;
|
|
uc->tcg_exec_init = tcg_exec_init;
|
|
uc->cpu_exec_init_all = cpu_exec_init_all;
|
|
uc->vm_start = vm_start;
|
|
uc->memory_map = memory_map;
|
|
uc->memory_map_ptr = memory_map_ptr;
|
|
uc->memory_unmap = memory_unmap;
|
|
uc->readonly_mem = memory_region_set_readonly;
|
|
|
|
uc->target_page_size = TARGET_PAGE_SIZE;
|
|
uc->target_page_align = TARGET_PAGE_SIZE - 1;
|
|
|
|
if (!uc->release) {
|
|
uc->release = release_common;
|
|
}
|
|
}
|
|
|
|
#endif
|