unicorn/qemu/unicorn_common.h

141 lines
3.6 KiB
C
Raw Normal View History

2015-08-21 07:04:50 +00:00
#ifndef UNICORN_COMMON_H_
#define UNICORN_COMMON_H_
#include "tcg.h"
// This header define common patterns/codes that will be included in all arch-sepcific
// codes for unicorns purposes.
// return true on success, false on failure
static inline bool cpu_physical_mem_read(AddressSpace *as, hwaddr addr,
uint8_t *buf, int len)
{
return !cpu_physical_memory_rw(as, addr, (void *)buf, len, 0);
}
static inline bool cpu_physical_mem_write(AddressSpace *as, hwaddr addr,
const uint8_t *buf, int len)
2015-08-21 07:04:50 +00:00
{
return !cpu_physical_memory_rw(as, addr, (void *)buf, len, 1);
}
void tb_cleanup(struct uc_struct *uc);
void free_code_gen_buffer(struct uc_struct *uc);
2015-08-21 07:04:50 +00:00
2018-03-11 23:54:46 +00:00
static inline void free_address_spaces(struct uc_struct *uc)
{
int i;
address_space_destroy(&uc->as);
for (i = 0; i < uc->cpu->num_ases; i++) {
AddressSpace *as = uc->cpu->cpu_ases[i].as;
address_space_destroy(as);
g_free(as);
}
}
/* This is *supposed* to be done by the class finalizer but it never executes */
static inline void free_machine_class_name(struct uc_struct *uc) {
MachineClass *mc = MACHINE_GET_CLASS(uc, uc->machine_state);
g_free(mc->name);
mc->name = NULL;
}
static inline void free_tcg_temp_names(TCGContext *s)
{
#if TCG_TARGET_REG_BITS == 32
int i;
for (i = 0; i < s->nb_globals; i++) {
TCGTemp *ts = &s->temps[i];
if (ts->base_type == TCG_TYPE_I64) {
if (ts->name && ((strcmp(ts->name+(strlen(ts->name)-2), "_0") == 0) ||
(strcmp(ts->name+(strlen(ts->name)-2), "_1") == 0))) {
free((void *)ts->name);
}
}
}
#endif
}
tcg: enable multiple TCG contexts in softmmu This enables parallel TCG code generation. However, we do not take advantage of it yet since tb_lock is still held during tb_gen_code. In user-mode we use a single TCG context; see the documentation added to tcg_region_init for the rationale. Note that targets do not need any conversion: targets initialize a TCGContext (e.g. defining TCG globals), and after this initialization has finished, the context is cloned by the vCPU threads, each of them keeping a separate copy. TCG threads claim one entry in tcg_ctxs[] by atomically increasing n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's of that variable and tcg_ctxs; they are there just to play nice with analysis tools such as thread sanitizer. Note that we do not allocate an array of contexts (we allocate an array of pointers instead) because when tcg_context_init is called, we do not know yet how many contexts we'll use since the bool behind qemu_tcg_mttcg_enabled() isn't set yet. Previous patches folded some TCG globals into TCGContext. The non-const globals remaining are only set at init time, i.e. before the TCG threads are spawned. Here is a list of these set-at-init-time globals under tcg/: Only written by tcg_context_init: - indirect_reg_alloc_order - tcg_op_defs Only written by tcg_target_init (called from tcg_context_init): - tcg_target_available_regs - tcg_target_call_clobber_regs - arm: arm_arch, use_idiv_instructions - i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt, have_movbe, have_popcnt - mips: use_movnz_instructions, use_mips32_instructions, use_mips32r2_instructions, got_sigill (tcg_target_detect_isa) - ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr - s390: tb_ret_addr, s390_facilities - sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines), use_vis3_instructions Only written by tcg_prologue_init: - 'struct jit_code_entry one_entry' - aarch64: tb_ret_addr - arm: tb_ret_addr - i386: tb_ret_addr, guest_base_flags - ia64: tb_ret_addr - mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr Backports commit 3468b59e18b179bc63c7ce934de912dfa9596122 from qemu
2018-03-14 16:55:59 +00:00
static inline void free_tcg_context(TCGContext *s)
2015-08-21 07:04:50 +00:00
{
tcg: enable multiple TCG contexts in softmmu This enables parallel TCG code generation. However, we do not take advantage of it yet since tb_lock is still held during tb_gen_code. In user-mode we use a single TCG context; see the documentation added to tcg_region_init for the rationale. Note that targets do not need any conversion: targets initialize a TCGContext (e.g. defining TCG globals), and after this initialization has finished, the context is cloned by the vCPU threads, each of them keeping a separate copy. TCG threads claim one entry in tcg_ctxs[] by atomically increasing n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's of that variable and tcg_ctxs; they are there just to play nice with analysis tools such as thread sanitizer. Note that we do not allocate an array of contexts (we allocate an array of pointers instead) because when tcg_context_init is called, we do not know yet how many contexts we'll use since the bool behind qemu_tcg_mttcg_enabled() isn't set yet. Previous patches folded some TCG globals into TCGContext. The non-const globals remaining are only set at init time, i.e. before the TCG threads are spawned. Here is a list of these set-at-init-time globals under tcg/: Only written by tcg_context_init: - indirect_reg_alloc_order - tcg_op_defs Only written by tcg_target_init (called from tcg_context_init): - tcg_target_available_regs - tcg_target_call_clobber_regs - arm: arm_arch, use_idiv_instructions - i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt, have_movbe, have_popcnt - mips: use_movnz_instructions, use_mips32_instructions, use_mips32r2_instructions, got_sigill (tcg_target_detect_isa) - ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr - s390: tb_ret_addr, s390_facilities - sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines), use_vis3_instructions Only written by tcg_prologue_init: - 'struct jit_code_entry one_entry' - aarch64: tb_ret_addr - arm: tb_ret_addr - i386: tb_ret_addr, guest_base_flags - ia64: tb_ret_addr - mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr Backports commit 3468b59e18b179bc63c7ce934de912dfa9596122 from qemu
2018-03-14 16:55:59 +00:00
TCGOpDef* def = &s->tcg_op_defs[0];
TCGPool *po, *to;
2015-08-21 07:04:50 +00:00
g_free(def->args_ct);
g_free(def->sorted_args);
g_free(s->tcg_op_defs);
2016-01-31 22:22:20 +00:00
2015-08-21 07:04:50 +00:00
for (po = s->pool_first; po; po = to) {
to = po->next;
g_free(po);
2015-08-21 07:04:50 +00:00
}
tcg_pool_reset(s);
tcg: enable multiple TCG contexts in softmmu This enables parallel TCG code generation. However, we do not take advantage of it yet since tb_lock is still held during tb_gen_code. In user-mode we use a single TCG context; see the documentation added to tcg_region_init for the rationale. Note that targets do not need any conversion: targets initialize a TCGContext (e.g. defining TCG globals), and after this initialization has finished, the context is cloned by the vCPU threads, each of them keeping a separate copy. TCG threads claim one entry in tcg_ctxs[] by atomically increasing n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's of that variable and tcg_ctxs; they are there just to play nice with analysis tools such as thread sanitizer. Note that we do not allocate an array of contexts (we allocate an array of pointers instead) because when tcg_context_init is called, we do not know yet how many contexts we'll use since the bool behind qemu_tcg_mttcg_enabled() isn't set yet. Previous patches folded some TCG globals into TCGContext. The non-const globals remaining are only set at init time, i.e. before the TCG threads are spawned. Here is a list of these set-at-init-time globals under tcg/: Only written by tcg_context_init: - indirect_reg_alloc_order - tcg_op_defs Only written by tcg_target_init (called from tcg_context_init): - tcg_target_available_regs - tcg_target_call_clobber_regs - arm: arm_arch, use_idiv_instructions - i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt, have_movbe, have_popcnt - mips: use_movnz_instructions, use_mips32_instructions, use_mips32r2_instructions, got_sigill (tcg_target_detect_isa) - ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr - s390: tb_ret_addr, s390_facilities - sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines), use_vis3_instructions Only written by tcg_prologue_init: - 'struct jit_code_entry one_entry' - aarch64: tb_ret_addr - arm: tb_ret_addr - i386: tb_ret_addr, guest_base_flags - ia64: tb_ret_addr - mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr Backports commit 3468b59e18b179bc63c7ce934de912dfa9596122 from qemu
2018-03-14 16:55:59 +00:00
2015-08-21 07:04:50 +00:00
g_hash_table_destroy(s->helpers);
tcg: enable multiple TCG contexts in softmmu This enables parallel TCG code generation. However, we do not take advantage of it yet since tb_lock is still held during tb_gen_code. In user-mode we use a single TCG context; see the documentation added to tcg_region_init for the rationale. Note that targets do not need any conversion: targets initialize a TCGContext (e.g. defining TCG globals), and after this initialization has finished, the context is cloned by the vCPU threads, each of them keeping a separate copy. TCG threads claim one entry in tcg_ctxs[] by atomically increasing n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's of that variable and tcg_ctxs; they are there just to play nice with analysis tools such as thread sanitizer. Note that we do not allocate an array of contexts (we allocate an array of pointers instead) because when tcg_context_init is called, we do not know yet how many contexts we'll use since the bool behind qemu_tcg_mttcg_enabled() isn't set yet. Previous patches folded some TCG globals into TCGContext. The non-const globals remaining are only set at init time, i.e. before the TCG threads are spawned. Here is a list of these set-at-init-time globals under tcg/: Only written by tcg_context_init: - indirect_reg_alloc_order - tcg_op_defs Only written by tcg_target_init (called from tcg_context_init): - tcg_target_available_regs - tcg_target_call_clobber_regs - arm: arm_arch, use_idiv_instructions - i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt, have_movbe, have_popcnt - mips: use_movnz_instructions, use_mips32_instructions, use_mips32r2_instructions, got_sigill (tcg_target_detect_isa) - ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr - s390: tb_ret_addr, s390_facilities - sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines), use_vis3_instructions Only written by tcg_prologue_init: - 'struct jit_code_entry one_entry' - aarch64: tb_ret_addr - arm: tb_ret_addr - i386: tb_ret_addr, guest_base_flags - ia64: tb_ret_addr - mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr Backports commit 3468b59e18b179bc63c7ce934de912dfa9596122 from qemu
2018-03-14 16:55:59 +00:00
free_tcg_temp_names(s);
g_free(s);
}
static inline void free_tcg_contexts(struct uc_struct *uc)
{
int i;
TCGContext **tcg_ctxs = uc->tcg_ctxs;
for (i = 0; i < uc->n_tcg_ctxs; i++) {
free_tcg_context(tcg_ctxs[i]);
}
g_free(tcg_ctxs);
}
/** Freeing common resources */
static void release_common(void *t)
{
TCGContext *s = (TCGContext *)t;
struct uc_struct *uc = s->uc;
// Clean TCG.
free_tcg_contexts(uc);
g_tree_destroy(uc->tb_ctx.tb_tree);
qht_destroy(&uc->tb_ctx.htable);
2015-08-21 07:04:50 +00:00
// Destory flat view hash table
tcg: enable multiple TCG contexts in softmmu This enables parallel TCG code generation. However, we do not take advantage of it yet since tb_lock is still held during tb_gen_code. In user-mode we use a single TCG context; see the documentation added to tcg_region_init for the rationale. Note that targets do not need any conversion: targets initialize a TCGContext (e.g. defining TCG globals), and after this initialization has finished, the context is cloned by the vCPU threads, each of them keeping a separate copy. TCG threads claim one entry in tcg_ctxs[] by atomically increasing n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's of that variable and tcg_ctxs; they are there just to play nice with analysis tools such as thread sanitizer. Note that we do not allocate an array of contexts (we allocate an array of pointers instead) because when tcg_context_init is called, we do not know yet how many contexts we'll use since the bool behind qemu_tcg_mttcg_enabled() isn't set yet. Previous patches folded some TCG globals into TCGContext. The non-const globals remaining are only set at init time, i.e. before the TCG threads are spawned. Here is a list of these set-at-init-time globals under tcg/: Only written by tcg_context_init: - indirect_reg_alloc_order - tcg_op_defs Only written by tcg_target_init (called from tcg_context_init): - tcg_target_available_regs - tcg_target_call_clobber_regs - arm: arm_arch, use_idiv_instructions - i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt, have_movbe, have_popcnt - mips: use_movnz_instructions, use_mips32_instructions, use_mips32r2_instructions, got_sigill (tcg_target_detect_isa) - ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr - s390: tb_ret_addr, s390_facilities - sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines), use_vis3_instructions Only written by tcg_prologue_init: - 'struct jit_code_entry one_entry' - aarch64: tb_ret_addr - arm: tb_ret_addr - i386: tb_ret_addr, guest_base_flags - ia64: tb_ret_addr - mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr Backports commit 3468b59e18b179bc63c7ce934de912dfa9596122 from qemu
2018-03-14 16:55:59 +00:00
g_hash_table_destroy(uc->flat_views);
unicorn_free_empty_flat_view(uc);
// TODO(danghvu): these function is not available outside qemu
// so we keep them here instead of outside uc_close.
tcg: enable multiple TCG contexts in softmmu This enables parallel TCG code generation. However, we do not take advantage of it yet since tb_lock is still held during tb_gen_code. In user-mode we use a single TCG context; see the documentation added to tcg_region_init for the rationale. Note that targets do not need any conversion: targets initialize a TCGContext (e.g. defining TCG globals), and after this initialization has finished, the context is cloned by the vCPU threads, each of them keeping a separate copy. TCG threads claim one entry in tcg_ctxs[] by atomically increasing n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's of that variable and tcg_ctxs; they are there just to play nice with analysis tools such as thread sanitizer. Note that we do not allocate an array of contexts (we allocate an array of pointers instead) because when tcg_context_init is called, we do not know yet how many contexts we'll use since the bool behind qemu_tcg_mttcg_enabled() isn't set yet. Previous patches folded some TCG globals into TCGContext. The non-const globals remaining are only set at init time, i.e. before the TCG threads are spawned. Here is a list of these set-at-init-time globals under tcg/: Only written by tcg_context_init: - indirect_reg_alloc_order - tcg_op_defs Only written by tcg_target_init (called from tcg_context_init): - tcg_target_available_regs - tcg_target_call_clobber_regs - arm: arm_arch, use_idiv_instructions - i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt, have_movbe, have_popcnt - mips: use_movnz_instructions, use_mips32_instructions, use_mips32r2_instructions, got_sigill (tcg_target_detect_isa) - ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr - s390: tb_ret_addr, s390_facilities - sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines), use_vis3_instructions Only written by tcg_prologue_init: - 'struct jit_code_entry one_entry' - aarch64: tb_ret_addr - arm: tb_ret_addr - i386: tb_ret_addr, guest_base_flags - ia64: tb_ret_addr - mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr Backports commit 3468b59e18b179bc63c7ce934de912dfa9596122 from qemu
2018-03-14 16:55:59 +00:00
free_address_spaces(uc);
memory_free(uc);
tb_cleanup(uc);
free_code_gen_buffer(uc);
free_machine_class_name(uc);
2015-08-21 07:04:50 +00:00
}
static inline void uc_common_init(struct uc_struct* uc)
{
memory_register_types(uc);
uc->write_mem = cpu_physical_mem_write;
uc->read_mem = cpu_physical_mem_read;
uc->tcg_enabled = tcg_enabled;
uc->tcg_exec_init = tcg_exec_init;
uc->cpu_exec_init_all = cpu_exec_init_all;
uc->vm_start = vm_start;
uc->memory_map = memory_map;
uc->memory_map_ptr = memory_map_ptr;
uc->memory_unmap = memory_unmap;
uc->readonly_mem = memory_region_set_readonly;
2015-08-21 07:04:50 +00:00
uc->target_page_size = TARGET_PAGE_SIZE;
2015-09-03 10:16:49 +00:00
uc->target_page_align = TARGET_PAGE_SIZE - 1;
2018-03-11 23:54:46 +00:00
if (!uc->release) {
2015-08-21 07:04:50 +00:00
uc->release = release_common;
2018-03-11 23:54:46 +00:00
}
2015-08-21 07:04:50 +00:00
}
#endif