2015-08-21 07:04:50 +00:00
|
|
|
/* Unicorn Emulator Engine */
|
|
|
|
/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */
|
|
|
|
|
|
|
|
#ifndef UC_PRIV_H
|
|
|
|
#define UC_PRIV_H
|
|
|
|
|
2017-01-20 13:13:21 +00:00
|
|
|
#include "unicorn/platform.h"
|
2015-08-21 07:04:50 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
#include "qemu.h"
|
2018-02-20 13:41:52 +00:00
|
|
|
#include "exec/ramlist.h"
|
2018-03-14 13:08:01 +00:00
|
|
|
#include "exec/tb-context.h"
|
2015-08-21 07:04:50 +00:00
|
|
|
#include "unicorn/unicorn.h"
|
2016-01-16 08:44:02 +00:00
|
|
|
#include "list.h"
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2016-01-22 22:47:29 +00:00
|
|
|
// These are masks of supported modes for each cpu/arch.
|
|
|
|
// They should be updated when changes are made to the uc_mode enum typedef.
|
2017-03-13 14:32:44 +00:00
|
|
|
#define UC_MODE_ARM_MASK (UC_MODE_ARM|UC_MODE_THUMB|UC_MODE_LITTLE_ENDIAN|UC_MODE_MCLASS|UC_MODE_BIG_ENDIAN)
|
2016-01-23 01:08:49 +00:00
|
|
|
#define UC_MODE_MIPS_MASK (UC_MODE_MIPS32|UC_MODE_MIPS64|UC_MODE_LITTLE_ENDIAN|UC_MODE_BIG_ENDIAN)
|
2016-01-22 22:47:29 +00:00
|
|
|
#define UC_MODE_X86_MASK (UC_MODE_16|UC_MODE_32|UC_MODE_64|UC_MODE_LITTLE_ENDIAN)
|
2016-01-23 01:08:49 +00:00
|
|
|
#define UC_MODE_PPC_MASK (UC_MODE_PPC64|UC_MODE_BIG_ENDIAN)
|
2016-01-23 02:48:18 +00:00
|
|
|
#define UC_MODE_SPARC_MASK (UC_MODE_SPARC32|UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN)
|
2016-01-23 01:08:49 +00:00
|
|
|
#define UC_MODE_M68K_MASK (UC_MODE_BIG_ENDIAN)
|
2016-01-22 22:47:29 +00:00
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
#define ARR_SIZE(a) (sizeof(a)/sizeof(a[0]))
|
|
|
|
|
2018-02-19 06:00:47 +00:00
|
|
|
#define READ_QWORD(x) ((uint64_t)x)
|
2016-03-02 03:43:02 +00:00
|
|
|
#define READ_DWORD(x) (x & 0xffffffff)
|
|
|
|
#define READ_WORD(x) (x & 0xffff)
|
|
|
|
#define READ_BYTE_H(x) ((x & 0xffff) >> 8)
|
|
|
|
#define READ_BYTE_L(x) (x & 0xff)
|
2017-01-15 12:13:35 +00:00
|
|
|
#define WRITE_DWORD(x, w) (x = (x & ~0xffffffffLL) | (w & 0xffffffff))
|
2016-03-02 03:43:02 +00:00
|
|
|
#define WRITE_WORD(x, w) (x = (x & ~0xffff) | (w & 0xffff))
|
|
|
|
#define WRITE_BYTE_H(x, b) (x = (x & ~0xff00) | ((b & 0xff) << 8))
|
|
|
|
#define WRITE_BYTE_L(x, b) (x = (x & ~0xff) | (b & 0xff))
|
|
|
|
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
typedef struct ModuleEntry {
|
|
|
|
void (*init)(void);
|
|
|
|
QTAILQ_ENTRY(ModuleEntry) node;
|
|
|
|
module_init_type type;
|
|
|
|
} ModuleEntry;
|
|
|
|
|
|
|
|
typedef QTAILQ_HEAD(, ModuleEntry) ModuleTypeList;
|
|
|
|
|
2016-01-23 09:14:44 +00:00
|
|
|
typedef uc_err (*query_t)(struct uc_struct *uc, uc_query_type type, size_t *result);
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
// return 0 on success, -1 on failure
|
2016-04-04 15:25:30 +00:00
|
|
|
typedef int (*reg_read_t)(struct uc_struct *uc, unsigned int *regs, void **vals, int count);
|
|
|
|
typedef int (*reg_write_t)(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2015-08-26 12:00:00 +00:00
|
|
|
typedef void (*reg_reset_t)(struct uc_struct *uc);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2015-08-24 15:02:14 +00:00
|
|
|
typedef bool (*uc_write_mem_t)(AddressSpace *as, hwaddr addr, const uint8_t *buf, int len);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
typedef bool (*uc_read_mem_t)(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
|
|
|
|
|
|
|
|
typedef void (*uc_args_void_t)(void*);
|
|
|
|
|
|
|
|
typedef void (*uc_args_uc_t)(struct uc_struct*);
|
2015-11-11 17:43:41 +00:00
|
|
|
typedef int (*uc_args_int_uc_t)(struct uc_struct*);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
typedef bool (*uc_args_tcg_enable_t)(struct uc_struct*);
|
|
|
|
|
|
|
|
typedef void (*uc_args_uc_long_t)(struct uc_struct*, unsigned long);
|
|
|
|
|
|
|
|
typedef void (*uc_args_uc_u64_t)(struct uc_struct *, uint64_t addr);
|
|
|
|
|
2016-08-27 13:49:11 +00:00
|
|
|
typedef MemoryRegion* (*uc_args_uc_ram_size_t)(struct uc_struct*, hwaddr begin, size_t size, uint32_t perms);
|
2015-08-26 20:29:54 +00:00
|
|
|
|
2016-08-27 13:49:11 +00:00
|
|
|
typedef MemoryRegion* (*uc_args_uc_ram_size_ptr_t)(struct uc_struct*, hwaddr begin, size_t size, uint32_t perms, void *ptr);
|
2015-11-28 01:25:53 +00:00
|
|
|
|
2015-08-30 04:17:30 +00:00
|
|
|
typedef void (*uc_mem_unmap_t)(struct uc_struct*, MemoryRegion *mr);
|
|
|
|
|
2015-08-26 20:29:54 +00:00
|
|
|
typedef void (*uc_readonly_mem_t)(MemoryRegion *mr, bool readonly);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
// which interrupt should make emulation stop?
|
|
|
|
typedef bool (*uc_args_int_t)(int intno);
|
|
|
|
|
2015-10-27 06:37:03 +00:00
|
|
|
// some architecture redirect virtual memory to physical memory like Mips
|
|
|
|
typedef uint64_t (*uc_mem_redirect_t)(uint64_t address);
|
|
|
|
|
2017-05-13 17:16:17 +00:00
|
|
|
// validate if Unicorn supports hooking a given instruction
|
|
|
|
typedef bool(*uc_insn_hook_validate)(uint32_t insn_enum);
|
|
|
|
|
2016-01-16 08:44:02 +00:00
|
|
|
struct hook {
|
|
|
|
int type; // UC_HOOK_*
|
|
|
|
int insn; // instruction for HOOK_INSN
|
|
|
|
int refs; // reference count to free hook stored in multiple lists
|
|
|
|
uint64_t begin, end; // only trigger if PC or memory access is in this address (depends on hook type)
|
|
|
|
void *callback; // a uc_cb_* type
|
2015-08-21 07:04:50 +00:00
|
|
|
void *user_data;
|
|
|
|
};
|
|
|
|
|
2016-01-16 08:44:02 +00:00
|
|
|
// hook list offsets
|
|
|
|
// mirrors the order of uc_hook_type from include/unicorn/unicorn.h
|
|
|
|
enum uc_hook_idx {
|
|
|
|
UC_HOOK_INTR_IDX,
|
|
|
|
UC_HOOK_INSN_IDX,
|
|
|
|
UC_HOOK_CODE_IDX,
|
|
|
|
UC_HOOK_BLOCK_IDX,
|
|
|
|
UC_HOOK_MEM_READ_UNMAPPED_IDX,
|
|
|
|
UC_HOOK_MEM_WRITE_UNMAPPED_IDX,
|
|
|
|
UC_HOOK_MEM_FETCH_UNMAPPED_IDX,
|
|
|
|
UC_HOOK_MEM_READ_PROT_IDX,
|
|
|
|
UC_HOOK_MEM_WRITE_PROT_IDX,
|
|
|
|
UC_HOOK_MEM_FETCH_PROT_IDX,
|
|
|
|
UC_HOOK_MEM_READ_IDX,
|
|
|
|
UC_HOOK_MEM_WRITE_IDX,
|
|
|
|
UC_HOOK_MEM_FETCH_IDX,
|
2016-10-22 03:19:55 +00:00
|
|
|
UC_HOOK_MEM_READ_AFTER_IDX,
|
2016-01-16 08:44:02 +00:00
|
|
|
|
|
|
|
UC_HOOK_MAX,
|
|
|
|
};
|
|
|
|
|
2017-01-19 11:50:28 +00:00
|
|
|
#define HOOK_FOREACH_VAR_DECLARE \
|
|
|
|
struct list_item *cur
|
|
|
|
|
2016-01-16 08:44:02 +00:00
|
|
|
// for loop macro to loop over hook lists
|
|
|
|
#define HOOK_FOREACH(uc, hh, idx) \
|
|
|
|
for ( \
|
|
|
|
cur = (uc)->hook[idx##_IDX].head; \
|
|
|
|
cur != NULL && ((hh) = (struct hook *)cur->data) \
|
|
|
|
/* stop excuting callbacks on stop request */ \
|
|
|
|
&& !uc->stop_request; \
|
|
|
|
cur = cur->next)
|
|
|
|
|
|
|
|
// if statement to check hook bounds
|
|
|
|
#define HOOK_BOUND_CHECK(hh, addr) \
|
2016-01-23 03:24:45 +00:00
|
|
|
((((addr) >= (hh)->begin && (addr) <= (hh)->end) \
|
2016-01-16 08:44:02 +00:00
|
|
|
|| (hh)->begin > (hh)->end))
|
|
|
|
|
|
|
|
#define HOOK_EXISTS(uc, idx) ((uc)->hook[idx##_IDX].head != NULL)
|
|
|
|
#define HOOK_EXISTS_BOUNDED(uc, idx, addr) _hook_exists_bounded((uc)->hook[idx##_IDX].head, addr)
|
|
|
|
|
|
|
|
static inline bool _hook_exists_bounded(struct list_item *cur, uint64_t addr)
|
|
|
|
{
|
|
|
|
while (cur != NULL) {
|
|
|
|
if (HOOK_BOUND_CHECK((struct hook *)cur->data, addr))
|
|
|
|
return true;
|
|
|
|
cur = cur->next;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2015-08-26 06:08:18 +00:00
|
|
|
//relloc increment, KEEP THIS A POWER OF 2!
|
|
|
|
#define MEM_BLOCK_INCR 32
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
struct uc_struct {
|
|
|
|
uc_arch arch;
|
|
|
|
uc_mode mode;
|
|
|
|
uc_err errnum; // qemu/cpu-exec.c
|
|
|
|
AddressSpace as;
|
2016-01-23 09:14:44 +00:00
|
|
|
query_t query;
|
2015-08-24 16:42:50 +00:00
|
|
|
reg_read_t reg_read;
|
|
|
|
reg_write_t reg_write;
|
2015-08-21 07:04:50 +00:00
|
|
|
reg_reset_t reg_reset;
|
|
|
|
|
|
|
|
uc_write_mem_t write_mem;
|
|
|
|
uc_read_mem_t read_mem;
|
|
|
|
uc_args_void_t release; // release resource when uc_close()
|
|
|
|
uc_args_uc_u64_t set_pc; // set PC for tracecode
|
|
|
|
uc_args_int_t stop_interrupt; // check if the interrupt should stop emulation
|
|
|
|
|
2016-03-26 00:24:28 +00:00
|
|
|
uc_args_uc_t init_arch, cpu_exec_init_all;
|
2015-11-11 17:43:41 +00:00
|
|
|
uc_args_int_uc_t vm_start;
|
2015-08-21 07:04:50 +00:00
|
|
|
uc_args_tcg_enable_t tcg_enabled;
|
|
|
|
uc_args_uc_long_t tcg_exec_init;
|
|
|
|
uc_args_uc_ram_size_t memory_map;
|
2015-11-28 01:25:53 +00:00
|
|
|
uc_args_uc_ram_size_ptr_t memory_map_ptr;
|
2015-08-30 04:17:30 +00:00
|
|
|
uc_mem_unmap_t memory_unmap;
|
2015-08-26 20:29:54 +00:00
|
|
|
uc_readonly_mem_t readonly_mem;
|
2015-10-27 06:37:03 +00:00
|
|
|
uc_mem_redirect_t mem_redirect;
|
2016-09-23 14:38:21 +00:00
|
|
|
// TODO: remove current_cpu, as it's a flag for something else ("cpu running"?)
|
|
|
|
CPUState *cpu, *current_cpu;
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2017-05-13 17:16:17 +00:00
|
|
|
uc_insn_hook_validate insn_hook_validate;
|
|
|
|
|
2018-03-02 14:17:49 +00:00
|
|
|
// qemu/cpus.c
|
|
|
|
bool mttcg_enabled;
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Backports commit e8feb96fcc6c16eab8923332e86ff4ef0e2ac276 from qemu
2018-03-14 15:14:31 +00:00
|
|
|
int tcg_region_inited;
|
2018-03-02 14:17:49 +00:00
|
|
|
|
2018-02-15 18:03:22 +00:00
|
|
|
// qemu/exec.c
|
|
|
|
MemoryRegion *system_memory;
|
|
|
|
MemoryRegion io_mem_rom;
|
|
|
|
MemoryRegion io_mem_notdirty;
|
|
|
|
MemoryRegion io_mem_unassigned;
|
|
|
|
MemoryRegion io_mem_watch;
|
|
|
|
RAMList ram_list;
|
2018-02-26 00:24:46 +00:00
|
|
|
// Renamed from "alloc_hint" in qemu.
|
|
|
|
unsigned phys_map_node_alloc_hint;
|
2018-02-26 16:54:41 +00:00
|
|
|
// Used when a target's page bits can vary
|
|
|
|
int target_page_bits;
|
|
|
|
bool target_page_bits_decided;
|
2018-02-15 18:03:22 +00:00
|
|
|
|
|
|
|
// qemu/cpu-exec.c
|
|
|
|
BounceBuffer bounce;
|
2018-03-02 14:26:31 +00:00
|
|
|
CPUState *tcg_current_rr_cpu;
|
2018-02-15 18:03:22 +00:00
|
|
|
|
2018-03-05 17:01:31 +00:00
|
|
|
// qemu/user-exec.c
|
2018-03-05 17:06:06 +00:00
|
|
|
uintptr_t helper_retaddr;
|
2018-03-05 17:01:31 +00:00
|
|
|
|
2018-02-15 18:03:22 +00:00
|
|
|
// qemu/memory.c
|
2018-03-12 02:23:21 +00:00
|
|
|
FlatView *empty_view;
|
2018-03-12 01:48:48 +00:00
|
|
|
GHashTable *flat_views;
|
2018-02-15 18:03:22 +00:00
|
|
|
bool global_dirty_log;
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
/* This is a multi-level map on the virtual address space.
|
|
|
|
The bottom level has pointers to PageDesc. */
|
2015-11-06 12:08:12 +00:00
|
|
|
void **l1_map; // qemu/translate-all.c
|
2015-08-21 07:04:50 +00:00
|
|
|
size_t l1_map_size;
|
2018-02-26 16:10:57 +00:00
|
|
|
int v_l1_size;
|
|
|
|
int v_l1_shift;
|
|
|
|
int v_l2_levels;
|
|
|
|
uintptr_t qemu_real_host_page_size;
|
|
|
|
intptr_t qemu_real_host_page_mask;
|
|
|
|
uintptr_t qemu_host_page_size;
|
|
|
|
intptr_t qemu_host_page_mask;
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
/* code generation context */
|
2018-03-14 13:49:41 +00:00
|
|
|
// translate-all.c
|
|
|
|
void *tcg_ctx; // actually "TCGContext *tcg_ctx"
|
|
|
|
void *tcg_init_ctx; // actually "TCGContext *init_tcg_contex"
|
2018-03-14 13:08:01 +00:00
|
|
|
TBContext tb_ctx;
|
2018-03-14 13:49:41 +00:00
|
|
|
bool parallel_cpus;
|
|
|
|
|
|
|
|
// tcg.c
|
|
|
|
void *tcg_ctxs; // actually "TCGContext **tcg_ctxs"
|
|
|
|
unsigned int n_tcg_ctxs;
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Backports commit e8feb96fcc6c16eab8923332e86ff4ef0e2ac276 from qemu
2018-03-14 15:14:31 +00:00
|
|
|
struct tcg_region_state region;
|
2018-03-14 19:22:54 +00:00
|
|
|
void *cpu_env; // actually "TCGv_env cpu_env"
|
2018-02-27 16:12:36 +00:00
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
/* memory.c */
|
|
|
|
unsigned memory_region_transaction_depth;
|
|
|
|
bool memory_region_update_pending;
|
|
|
|
bool ioeventfd_update_pending;
|
|
|
|
QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners;
|
|
|
|
QTAILQ_HEAD(, AddressSpace) address_spaces;
|
2016-01-07 23:41:45 +00:00
|
|
|
MachineState *machine_state;
|
2015-08-21 07:04:50 +00:00
|
|
|
// qom/object.c
|
|
|
|
GHashTable *type_table;
|
|
|
|
Type type_interface;
|
|
|
|
Object *root;
|
2016-01-07 23:41:45 +00:00
|
|
|
Object *owner;
|
2015-08-21 07:04:50 +00:00
|
|
|
bool enumerating_types;
|
|
|
|
// util/module.c
|
|
|
|
ModuleTypeList init_type_list[MODULE_INIT_MAX];
|
|
|
|
// hw/intc/apic_common.c
|
|
|
|
DeviceState *vapic;
|
|
|
|
int apic_no;
|
|
|
|
bool mmio_registered;
|
|
|
|
bool apic_report_tpr_access;
|
|
|
|
|
2016-01-16 08:44:02 +00:00
|
|
|
// linked lists containing hooks per type
|
|
|
|
struct list hook[UC_HOOK_MAX];
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
// hook to count number of instructions for uc_emu_start()
|
2016-01-16 08:44:02 +00:00
|
|
|
uc_hook count_hook;
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
size_t emu_counter; // current counter of uc_emu_start()
|
|
|
|
size_t emu_count; // save counter of uc_emu_start()
|
|
|
|
|
|
|
|
uint64_t block_addr; // save the last block address we hooked
|
|
|
|
|
|
|
|
bool init_tcg; // already initialized local TCGv variables?
|
|
|
|
bool stop_request; // request to immediately stop emulation - for uc_emu_stop()
|
2016-01-27 16:56:55 +00:00
|
|
|
bool quit_request; // request to quit the current TB, but continue to emulate - for uc_mem_protect()
|
2015-08-21 07:04:50 +00:00
|
|
|
bool emulation_done; // emulation is done by uc_emu_start()
|
|
|
|
QemuThread timer; // timer for emulation timeout
|
|
|
|
uint64_t timeout; // timeout for uc_emu_start()
|
|
|
|
|
|
|
|
uint64_t invalid_addr; // invalid address to be accessed
|
|
|
|
int invalid_error; // invalid memory code: 1 = READ, 2 = WRITE, 3 = CODE
|
|
|
|
|
|
|
|
uint64_t addr_end; // address where emulation stops (@end param of uc_emu_start())
|
|
|
|
|
|
|
|
int thumb; // thumb mode for ARM
|
2015-08-25 06:50:55 +00:00
|
|
|
// full TCG cache leads to middle-block break in the last translation?
|
|
|
|
bool block_full;
|
2016-07-14 16:40:45 +00:00
|
|
|
int size_arg; // what tcg arg slot do we need to update with the size of the block?
|
2015-08-28 06:19:32 +00:00
|
|
|
MemoryRegion **mapped_blocks;
|
2015-08-26 04:52:18 +00:00
|
|
|
uint32_t mapped_block_count;
|
2015-09-04 07:40:47 +00:00
|
|
|
uint32_t mapped_block_cache_index;
|
2015-09-02 08:13:12 +00:00
|
|
|
void *qemu_thread_data; // to support cross compile to Windows (qemu-thread-win32.c)
|
2015-08-31 08:00:44 +00:00
|
|
|
uint32_t target_page_size;
|
|
|
|
uint32_t target_page_align;
|
2015-09-28 02:58:43 +00:00
|
|
|
uint64_t next_pc; // save next PC for some special cases
|
2017-06-16 05:22:38 +00:00
|
|
|
bool hook_insert; // insert new hook at begin of the hook list (append by default)
|
2018-03-03 21:48:17 +00:00
|
|
|
|
|
|
|
// util/cacheinfo.c
|
|
|
|
int qemu_icache_linesize;
|
|
|
|
int qemu_dcache_linesize;
|
2015-08-21 07:04:50 +00:00
|
|
|
};
|
|
|
|
|
2016-10-10 21:04:51 +00:00
|
|
|
// Metadata stub for the variable-size cpu context used with uc_context_*()
|
|
|
|
struct uc_context {
|
|
|
|
size_t size;
|
|
|
|
char data[0];
|
|
|
|
};
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
// check if this address is mapped in (via uc_mem_map())
|
2015-08-28 01:03:17 +00:00
|
|
|
MemoryRegion *memory_mapping(struct uc_struct* uc, uint64_t address);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2018-03-03 21:48:17 +00:00
|
|
|
// Defined in util/cacheinfo.c. Made externally linked to
|
|
|
|
// allow calling it directly.
|
|
|
|
void init_cache_info(struct uc_struct *uc);
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
#endif
|
2017-02-24 13:37:19 +00:00
|
|
|
/* vim: set ts=4 noet: */
|