mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-03 15:45:35 +00:00
remove uc->cpus
This commit is contained in:
parent
60a7371ec2
commit
cb615fdba7
|
@ -1,13 +0,0 @@
|
|||
/* By Dang Hoang Vu <dang.hvu -at- gmail.com>, 2015 */
|
||||
|
||||
#ifndef UC_QEMU_MACRO_H
|
||||
#define UC_QEMU_MACRO_H
|
||||
|
||||
#define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
|
||||
#define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &uc->cpus, node)
|
||||
#define CPU_FOREACH_SAFE(cpu, next_cpu) \
|
||||
QTAILQ_FOREACH_SAFE(cpu, &cpu->uc->cpus, node, next_cpu)
|
||||
#define first_cpu QTAILQ_FIRST(&uc->cpus)
|
||||
|
||||
#endif
|
||||
|
|
@ -33,8 +33,6 @@
|
|||
#define WRITE_BYTE_L(x, b) (x = (x & ~0xff) | (b & 0xff))
|
||||
|
||||
|
||||
QTAILQ_HEAD(CPUTailQ, CPUState);
|
||||
|
||||
typedef struct ModuleEntry {
|
||||
void (*init)(void);
|
||||
QTAILQ_ENTRY(ModuleEntry) node;
|
||||
|
@ -148,7 +146,6 @@ struct uc_struct {
|
|||
QemuMutex qemu_global_mutex; // qemu/cpus.c
|
||||
QemuCond qemu_cpu_cond; // qemu/cpus.c
|
||||
QemuCond *tcg_halt_cond; // qemu/cpus.c
|
||||
struct CPUTailQ cpus; // qemu/cpu-exec.c
|
||||
uc_err errnum; // qemu/cpu-exec.c
|
||||
AddressSpace as;
|
||||
query_t query;
|
||||
|
@ -171,8 +168,8 @@ struct uc_struct {
|
|||
uc_mem_unmap_t memory_unmap;
|
||||
uc_readonly_mem_t readonly_mem;
|
||||
uc_mem_redirect_t mem_redirect;
|
||||
// list of cpu
|
||||
void* cpu;
|
||||
// TODO: remove current_cpu, as it's a flag for something else ("cpu running"?)
|
||||
CPUState *cpu, *current_cpu;
|
||||
|
||||
MemoryRegion *system_memory; // qemu/exec.c
|
||||
MemoryRegion io_mem_rom; // qemu/exec.c
|
||||
|
@ -180,7 +177,6 @@ struct uc_struct {
|
|||
MemoryRegion io_mem_unassigned; // qemu/exec.c
|
||||
MemoryRegion io_mem_watch; // qemu/exec.c
|
||||
RAMList ram_list; // qemu/exec.c
|
||||
CPUState *next_cpu; // qemu/cpus.c
|
||||
BounceBuffer bounce; // qemu/cpu-exec.c
|
||||
volatile sig_atomic_t exit_request; // qemu/cpu-exec.c
|
||||
spinlock_t x86_global_cpu_lock; // for X86 arch only
|
||||
|
@ -212,7 +208,6 @@ struct uc_struct {
|
|||
int apic_no;
|
||||
bool mmio_registered;
|
||||
bool apic_report_tpr_access;
|
||||
CPUState *current_cpu;
|
||||
|
||||
// linked lists containing hooks per type
|
||||
struct list hook[UC_HOOK_MAX];
|
||||
|
@ -250,8 +245,6 @@ struct uc_struct {
|
|||
uint64_t next_pc; // save next PC for some special cases
|
||||
};
|
||||
|
||||
#include "qemu_macro.h"
|
||||
|
||||
// check if this address is mapped in (via uc_mem_map())
|
||||
MemoryRegion *memory_mapping(struct uc_struct* uc, uint64_t address);
|
||||
|
||||
|
|
56
qemu/cpus.c
56
qemu/cpus.c
|
@ -61,29 +61,18 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
|
|||
|
||||
int resume_all_vcpus(struct uc_struct *uc)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
{
|
||||
// Fix call multiple time (vu).
|
||||
// We have to check whether this is the second time, then reset all CPU.
|
||||
bool created = false;
|
||||
CPU_FOREACH(cpu) {
|
||||
created |= cpu->created;
|
||||
}
|
||||
if (!created) {
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu->created = true;
|
||||
cpu->halted = 0;
|
||||
if (qemu_init_vcpu(cpu))
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
CPUState *cpu = uc->cpu;
|
||||
// Fix call multiple time (vu).
|
||||
// We have to check whether this is the second time, then reset all CPU.
|
||||
if (!cpu->created) {
|
||||
cpu->created = true;
|
||||
cpu->halted = 0;
|
||||
if (qemu_init_vcpu(cpu))
|
||||
return -1;
|
||||
}
|
||||
|
||||
//qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_resume(cpu);
|
||||
}
|
||||
cpu_resume(cpu);
|
||||
qemu_tcg_cpu_loop(uc);
|
||||
|
||||
return 0;
|
||||
|
@ -104,14 +93,12 @@ int qemu_init_vcpu(CPUState *cpu)
|
|||
|
||||
static void *qemu_tcg_cpu_loop(struct uc_struct *uc)
|
||||
{
|
||||
CPUState *cpu;
|
||||
CPUState *cpu = uc->cpu;
|
||||
|
||||
//qemu_tcg_init_cpu_signals();
|
||||
|
||||
qemu_mutex_lock(&uc->qemu_global_mutex);
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu->created = true;
|
||||
}
|
||||
cpu->created = true;
|
||||
qemu_cond_signal(&uc->qemu_cpu_cond);
|
||||
|
||||
while (1) {
|
||||
|
@ -119,15 +106,12 @@ static void *qemu_tcg_cpu_loop(struct uc_struct *uc)
|
|||
break;
|
||||
}
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu->created = false;
|
||||
qemu_cond_destroy(cpu->halt_cond);
|
||||
g_free(cpu->halt_cond);
|
||||
cpu->halt_cond = NULL;
|
||||
}
|
||||
cpu->created = false;
|
||||
qemu_cond_destroy(cpu->halt_cond);
|
||||
g_free(cpu->halt_cond);
|
||||
cpu->halt_cond = NULL;
|
||||
|
||||
qemu_mutex_unlock(&uc->qemu_global_mutex);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -158,14 +142,8 @@ static bool tcg_exec_all(struct uc_struct* uc)
|
|||
{
|
||||
int r;
|
||||
bool finish = false;
|
||||
CPUState *next_cpu = uc->next_cpu;
|
||||
|
||||
if (next_cpu == NULL) {
|
||||
next_cpu = first_cpu;
|
||||
}
|
||||
|
||||
for (; next_cpu != NULL && !uc->exit_request; next_cpu = CPU_NEXT(next_cpu)) {
|
||||
CPUState *cpu = next_cpu;
|
||||
while (!uc->exit_request) {
|
||||
CPUState *cpu = uc->cpu;
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
//qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
|
||||
|
|
|
@ -152,25 +152,23 @@ void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
|
|||
void cpu_tlb_reset_dirty_all(struct uc_struct *uc,
|
||||
ram_addr_t start1, ram_addr_t length)
|
||||
{
|
||||
CPUState *cpu;
|
||||
CPUState *cpu = uc->cpu;
|
||||
CPUArchState *env;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
int mmu_idx;
|
||||
int mmu_idx;
|
||||
|
||||
env = cpu->env_ptr;
|
||||
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
||||
unsigned int i;
|
||||
env = cpu->env_ptr;
|
||||
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < CPU_TLB_SIZE; i++) {
|
||||
tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
|
||||
start1, length);
|
||||
}
|
||||
for (i = 0; i < CPU_TLB_SIZE; i++) {
|
||||
tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
|
||||
start1, length);
|
||||
}
|
||||
|
||||
for (i = 0; i < CPU_VTLB_SIZE; i++) {
|
||||
tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
|
||||
start1, length);
|
||||
}
|
||||
for (i = 0; i < CPU_VTLB_SIZE; i++) {
|
||||
tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
|
||||
start1, length);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
39
qemu/exec.c
39
qemu/exec.c
|
@ -382,14 +382,10 @@ address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
|
|||
|
||||
CPUState *qemu_get_cpu(struct uc_struct *uc, int index)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu->cpu_index == index) {
|
||||
return cpu;
|
||||
}
|
||||
CPUState *cpu = uc->cpu;
|
||||
if (cpu->cpu_index == index) {
|
||||
return cpu;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -413,31 +409,19 @@ void cpu_exec_init(CPUArchState *env, void *opaque)
|
|||
{
|
||||
struct uc_struct *uc = opaque;
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
CPUState *some_cpu;
|
||||
int cpu_index;
|
||||
|
||||
cpu->uc = uc;
|
||||
env->uc = uc;
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
cpu_list_lock();
|
||||
#endif
|
||||
cpu_index = 0;
|
||||
CPU_FOREACH(some_cpu) {
|
||||
cpu_index++;
|
||||
}
|
||||
cpu->cpu_index = cpu_index;
|
||||
cpu->cpu_index = 0;
|
||||
cpu->numa_node = 0;
|
||||
QTAILQ_INIT(&cpu->breakpoints);
|
||||
QTAILQ_INIT(&cpu->watchpoints);
|
||||
|
||||
cpu->as = &uc->as;
|
||||
|
||||
QTAILQ_INSERT_TAIL(&uc->cpus, cpu, node);
|
||||
//QTAILQ_INSERT_TAIL(&uc->cpus, cpu, node);
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
cpu_list_unlock();
|
||||
#endif
|
||||
// TODO: assert uc does not already have a cpu?
|
||||
uc->cpu = cpu;
|
||||
}
|
||||
|
||||
#if defined(TARGET_HAS_ICE)
|
||||
|
@ -1518,19 +1502,10 @@ static void tcg_commit(MemoryListener *listener)
|
|||
{
|
||||
struct uc_struct* uc = listener->address_space_filter->uc;
|
||||
|
||||
CPUState *cpu;
|
||||
|
||||
/* since each CPU stores ram addresses in its TLB cache, we must
|
||||
reset the modified entries */
|
||||
/* XXX: slow ! */
|
||||
CPU_FOREACH(cpu) {
|
||||
/* FIXME: Disentangle the cpu.h circular files deps so we can
|
||||
directly get the right CPU from listener. */
|
||||
if (cpu->tcg_as_listener != listener) {
|
||||
continue;
|
||||
}
|
||||
tlb_flush(cpu, 1);
|
||||
}
|
||||
tlb_flush(uc->cpu, 1);
|
||||
}
|
||||
|
||||
void address_space_init_dispatch(AddressSpace *as)
|
||||
|
|
|
@ -58,7 +58,7 @@ void cpu_smm_update(CPUX86State *env)
|
|||
{
|
||||
struct uc_struct *uc = x86_env_get_cpu(env)->parent_obj.uc;
|
||||
|
||||
if (smm_set && smm_arg && CPU(x86_env_get_cpu(env)) == first_cpu) {
|
||||
if (smm_set && smm_arg && CPU(x86_env_get_cpu(env)) == uc->cpu) {
|
||||
smm_set(!!(env->hflags & HF_SMM_MASK), smm_arg);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ void qemu_mutex_lock_iothread(struct uc_struct* uc)
|
|||
qemu_mutex_lock(&uc->qemu_global_mutex);
|
||||
} else {
|
||||
if (qemu_mutex_trylock(&uc->qemu_global_mutex)) {
|
||||
qemu_cpu_kick_thread(first_cpu);
|
||||
qemu_cpu_kick_thread(uc->cpu);
|
||||
qemu_mutex_lock(&uc->qemu_global_mutex);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -197,38 +197,21 @@ typedef struct GuestPhysListener {
|
|||
MemoryListener listener;
|
||||
} GuestPhysListener;
|
||||
|
||||
static CPUState *find_paging_enabled_cpu(struct uc_struct* uc)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu_paging_enabled(cpu)) {
|
||||
return cpu;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void qemu_get_guest_memory_mapping(struct uc_struct *uc,
|
||||
MemoryMappingList *list,
|
||||
const GuestPhysBlockList *guest_phys_blocks,
|
||||
Error **errp)
|
||||
{
|
||||
CPUState *cpu, *first_paging_enabled_cpu;
|
||||
CPUState *cpu = uc->cpu;
|
||||
GuestPhysBlock *block;
|
||||
ram_addr_t offset, length;
|
||||
|
||||
first_paging_enabled_cpu = find_paging_enabled_cpu(uc);
|
||||
if (first_paging_enabled_cpu) {
|
||||
for (cpu = first_paging_enabled_cpu; cpu != NULL;
|
||||
cpu = CPU_NEXT(cpu)) {
|
||||
Error *err = NULL;
|
||||
cpu_get_memory_mapping(cpu, list, &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
return;
|
||||
}
|
||||
if (cpu_paging_enabled(cpu)) {
|
||||
Error *err = NULL;
|
||||
cpu_get_memory_mapping(cpu, list, &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -24,14 +24,11 @@
|
|||
|
||||
bool cpu_exists(struct uc_struct* uc, int64_t id)
|
||||
{
|
||||
CPUState *cpu;
|
||||
CPUState *cpu = uc->cpu;
|
||||
CPUClass *cc = CPU_GET_CLASS(uc, cpu);
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
CPUClass *cc = CPU_GET_CLASS(uc, cpu);
|
||||
|
||||
if (cc->get_arch_id(cpu) == id) {
|
||||
return true;
|
||||
}
|
||||
if (cc->get_arch_id(cpu) == id) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -284,45 +284,33 @@ static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
struct uc_struct *uc = env->uc;
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush(other_cs, 1);
|
||||
}
|
||||
// TODO: issue #642
|
||||
// tlb_flush(other_cpu, 1);
|
||||
}
|
||||
|
||||
static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
struct uc_struct *uc = env->uc;
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush(other_cs, value == 0);
|
||||
}
|
||||
// TODO: issue #642
|
||||
// tlb_flush(other_cpu, value == 0);
|
||||
}
|
||||
|
||||
static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
struct uc_struct *uc = env->uc;
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
|
||||
}
|
||||
// TODO: issue #642
|
||||
// tlb_flush(other_cpu, value & TARGET_PAGE_MASK);
|
||||
}
|
||||
|
||||
static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
struct uc_struct *uc = env->uc;
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
|
||||
}
|
||||
// TODO: issue #642
|
||||
// tlb_flush(other_cpu, value & TARGET_PAGE_MASK);
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo cp_reginfo[] = {
|
||||
|
@ -1874,37 +1862,28 @@ static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
static void tlbi_aa64_va_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
struct uc_struct *uc = env->uc;
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_page(other_cs, pageaddr);
|
||||
}
|
||||
// TODO: issue #642
|
||||
// tlb_flush(other_cpu, pageaddr);
|
||||
}
|
||||
|
||||
static void tlbi_aa64_vaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
struct uc_struct *uc = env->uc;
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_page(other_cs, pageaddr);
|
||||
}
|
||||
// TODO: issue #642
|
||||
// tlb_flush(other_cpu, pageaddr);
|
||||
}
|
||||
|
||||
static void tlbi_aa64_asid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
int asid = extract64(value, 48, 16);
|
||||
struct uc_struct *uc = env->uc;
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush(other_cs, asid == 0);
|
||||
}
|
||||
// TODO: issue #642
|
||||
// tlb_flush(other_cpu, asid == 0);
|
||||
}
|
||||
|
||||
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
|
|
|
@ -34,7 +34,7 @@ void arm64_release(void* ctx)
|
|||
|
||||
void arm64_reg_reset(struct uc_struct *uc)
|
||||
{
|
||||
CPUArchState *env = first_cpu->env_ptr;
|
||||
CPUArchState *env = uc->cpu->env_ptr;
|
||||
memset(env->xregs, 0, sizeof(env->xregs));
|
||||
|
||||
env->pc = 0;
|
||||
|
@ -42,7 +42,7 @@ void arm64_reg_reset(struct uc_struct *uc)
|
|||
|
||||
int arm64_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -74,7 +74,7 @@ int arm64_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int co
|
|||
|
||||
int arm64_reg_write(struct uc_struct *uc, unsigned int *regs, void* const* vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
|
|
@ -38,7 +38,7 @@ void arm_reg_reset(struct uc_struct *uc)
|
|||
(void)uc;
|
||||
CPUArchState *env;
|
||||
|
||||
env = first_cpu->env_ptr;
|
||||
env = uc->cpu->env_ptr;
|
||||
memset(env->regs, 0, sizeof(env->regs));
|
||||
|
||||
env->pc = 0;
|
||||
|
@ -49,7 +49,7 @@ int arm_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int coun
|
|||
CPUState *mycpu;
|
||||
int i;
|
||||
|
||||
mycpu = first_cpu;
|
||||
mycpu = uc->cpu;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
unsigned int regid = regs[i];
|
||||
|
@ -84,7 +84,7 @@ int arm_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int coun
|
|||
|
||||
int arm_reg_write(struct uc_struct *uc, unsigned int *regs, void* const* vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -135,7 +135,7 @@ static bool arm_stop_interrupt(int intno)
|
|||
|
||||
static uc_err arm_query(struct uc_struct *uc, uc_query_type type, size_t *result)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
uint32_t mode;
|
||||
|
||||
switch(type) {
|
||||
|
|
|
@ -578,11 +578,7 @@ void helper_mwait(CPUX86State *env, int next_eip_addend)
|
|||
cpu = x86_env_get_cpu(env);
|
||||
cs = CPU(cpu);
|
||||
/* XXX: not complete but not completely erroneous */
|
||||
if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
|
||||
do_pause(cpu);
|
||||
} else {
|
||||
do_hlt(cpu);
|
||||
}
|
||||
do_hlt(cpu);
|
||||
}
|
||||
|
||||
void helper_pause(CPUX86State *env, int next_eip_addend)
|
||||
|
|
|
@ -46,7 +46,7 @@ void x86_release(void *ctx)
|
|||
|
||||
void x86_reg_reset(struct uc_struct *uc)
|
||||
{
|
||||
CPUArchState *env = first_cpu->env_ptr;
|
||||
CPUArchState *env = uc->cpu->env_ptr;
|
||||
|
||||
env->features[FEAT_1_EDX] = CPUID_CX8 | CPUID_CMOV | CPUID_SSE2 | CPUID_FXSR | CPUID_SSE | CPUID_CLFLUSH;
|
||||
env->features[FEAT_1_ECX] = CPUID_EXT_SSSE3 | CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_AES;
|
||||
|
@ -139,7 +139,7 @@ void x86_reg_reset(struct uc_struct *uc)
|
|||
|
||||
int x86_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -636,7 +636,7 @@ int x86_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int coun
|
|||
|
||||
int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
|
|
@ -17,7 +17,7 @@ static void m68k_set_pc(struct uc_struct *uc, uint64_t address)
|
|||
|
||||
void m68k_reg_reset(struct uc_struct *uc)
|
||||
{
|
||||
CPUArchState *env = first_cpu->env_ptr;
|
||||
CPUArchState *env = uc->cpu->env_ptr;
|
||||
|
||||
memset(env->aregs, 0, sizeof(env->aregs));
|
||||
memset(env->dregs, 0, sizeof(env->dregs));
|
||||
|
@ -27,7 +27,7 @@ void m68k_reg_reset(struct uc_struct *uc)
|
|||
|
||||
int m68k_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -52,7 +52,7 @@ int m68k_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int cou
|
|||
|
||||
int m68k_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
|
|
@ -1798,37 +1798,43 @@ target_ulong helper_emt(void)
|
|||
target_ulong helper_dvpe(CPUMIPSState *env)
|
||||
{
|
||||
struct uc_struct *uc = env->uc;
|
||||
CPUState *other_cs = first_cpu;
|
||||
CPUState *other_cs = uc->cpu;
|
||||
target_ulong prev = env->mvp->CP0_MVPControl;
|
||||
|
||||
// TODO: #642 SMP groups
|
||||
/*
|
||||
CPU_FOREACH(other_cs) {
|
||||
MIPSCPU *other_cpu = MIPS_CPU(uc, other_cs);
|
||||
/* Turn off all VPEs except the one executing the dvpe. */
|
||||
// Turn off all VPEs except the one executing the dvpe.
|
||||
if (&other_cpu->env != env) {
|
||||
other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
|
||||
mips_vpe_sleep(other_cpu);
|
||||
}
|
||||
}
|
||||
*/
|
||||
return prev;
|
||||
}
|
||||
|
||||
target_ulong helper_evpe(CPUMIPSState *env)
|
||||
{
|
||||
struct uc_struct *uc = env->uc;
|
||||
CPUState *other_cs = first_cpu;
|
||||
CPUState *other_cs = uc->cpu;
|
||||
target_ulong prev = env->mvp->CP0_MVPControl;
|
||||
|
||||
// TODO: #642 SMP groups
|
||||
/*
|
||||
CPU_FOREACH(other_cs) {
|
||||
MIPSCPU *other_cpu = MIPS_CPU(uc, other_cs);
|
||||
|
||||
if (&other_cpu->env != env
|
||||
/* If the VPE is WFI, don't disturb its sleep. */
|
||||
// If the VPE is WFI, don't disturb its sleep.
|
||||
&& !mips_vpe_is_wfi(other_cpu)) {
|
||||
/* Enable the VPE. */
|
||||
// Enable the VPE.
|
||||
other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
|
||||
mips_vpe_wake(other_cpu); /* And wake it up. */
|
||||
mips_vpe_wake(other_cpu); // And wake it up.
|
||||
}
|
||||
}
|
||||
*/
|
||||
return prev;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
|
|
@ -61,7 +61,7 @@ void mips_release(void *ctx)
|
|||
void mips_reg_reset(struct uc_struct *uc)
|
||||
{
|
||||
(void)uc;
|
||||
CPUArchState *env = first_cpu->env_ptr;
|
||||
CPUArchState *env = uc->cpu->env_ptr;
|
||||
memset(env->active_tc.gpr, 0, sizeof(env->active_tc.gpr));
|
||||
|
||||
env->active_tc.PC = 0;
|
||||
|
@ -69,7 +69,7 @@ void mips_reg_reset(struct uc_struct *uc)
|
|||
|
||||
int mips_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -92,7 +92,7 @@ int mips_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int cou
|
|||
|
||||
int mips_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
|
|
@ -28,7 +28,7 @@ static void sparc_set_pc(struct uc_struct *uc, uint64_t address)
|
|||
|
||||
void sparc_reg_reset(struct uc_struct *uc)
|
||||
{
|
||||
CPUArchState *env = first_cpu->env_ptr;
|
||||
CPUArchState *env = uc->cpu->env_ptr;
|
||||
|
||||
memset(env->gregs, 0, sizeof(env->gregs));
|
||||
memset(env->fpr, 0, sizeof(env->fpr));
|
||||
|
@ -41,7 +41,7 @@ void sparc_reg_reset(struct uc_struct *uc)
|
|||
|
||||
int sparc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -70,7 +70,7 @@ int sparc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int co
|
|||
|
||||
int sparc_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
|
|
@ -28,7 +28,7 @@ static void sparc_set_pc(struct uc_struct *uc, uint64_t address)
|
|||
|
||||
void sparc_reg_reset(struct uc_struct *uc)
|
||||
{
|
||||
CPUArchState *env = first_cpu->env_ptr;
|
||||
CPUArchState *env = uc->cpu->env_ptr;
|
||||
|
||||
memset(env->gregs, 0, sizeof(env->gregs));
|
||||
memset(env->fpr, 0, sizeof(env->fpr));
|
||||
|
@ -41,7 +41,7 @@ void sparc_reg_reset(struct uc_struct *uc)
|
|||
|
||||
int sparc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -70,7 +70,7 @@ int sparc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int co
|
|||
|
||||
int sparc_reg_write(struct uc_struct *uc, unsigned int *regs, void* const* vals, int count)
|
||||
{
|
||||
CPUState *mycpu = first_cpu;
|
||||
CPUState *mycpu = uc->cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
|
|
@ -855,9 +855,7 @@ void tb_flush(CPUArchState *env1)
|
|||
}
|
||||
tcg_ctx->tb_ctx.nb_tbs = 0;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
|
||||
}
|
||||
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
|
||||
|
||||
memset(tcg_ctx->tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx->tb_ctx.tb_phys_hash));
|
||||
page_flush_tb(uc);
|
||||
|
@ -982,7 +980,7 @@ void tb_phys_invalidate(struct uc_struct *uc,
|
|||
TranslationBlock *tb, tb_page_addr_t page_addr)
|
||||
{
|
||||
TCGContext *tcg_ctx = uc->tcg_ctx;
|
||||
CPUState *cpu;
|
||||
CPUState *cpu = uc->cpu;
|
||||
PageDesc *p;
|
||||
unsigned int h, n1;
|
||||
tb_page_addr_t phys_pc;
|
||||
|
@ -1009,10 +1007,8 @@ void tb_phys_invalidate(struct uc_struct *uc,
|
|||
|
||||
/* remove the TB from the hash list */
|
||||
h = tb_jmp_cache_hash_func(tb->pc);
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu->tb_jmp_cache[h] == tb) {
|
||||
cpu->tb_jmp_cache[h] = NULL;
|
||||
}
|
||||
if (cpu->tb_jmp_cache[h] == tb) {
|
||||
cpu->tb_jmp_cache[h] = NULL;
|
||||
}
|
||||
|
||||
/* suppress this TB from the two jump lists */
|
||||
|
|
14
uc.c
14
uc.c
|
@ -156,9 +156,6 @@ uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result)
|
|||
uc->arch = arch;
|
||||
uc->mode = mode;
|
||||
|
||||
// uc->cpus = QTAILQ_HEAD_INITIALIZER(uc->cpus);
|
||||
uc->cpus.tqh_first = NULL;
|
||||
uc->cpus.tqh_last = &(uc->cpus.tqh_first);
|
||||
// uc->ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
|
||||
uc->ram_list.blocks.tqh_first = NULL;
|
||||
uc->ram_list.blocks.tqh_last = &(uc->ram_list.blocks.tqh_first);
|
||||
|
@ -289,7 +286,6 @@ uc_err uc_close(uc_engine *uc)
|
|||
int i;
|
||||
struct list_item *cur;
|
||||
struct hook *hook;
|
||||
CPUState *cpu;
|
||||
|
||||
// Cleanup internally.
|
||||
if (uc->release)
|
||||
|
@ -297,11 +293,9 @@ uc_err uc_close(uc_engine *uc)
|
|||
g_free(uc->tcg_ctx);
|
||||
|
||||
// Cleanup CPU.
|
||||
CPU_FOREACH(cpu) {
|
||||
g_free(cpu->tcg_as_listener);
|
||||
g_free(cpu->thread);
|
||||
g_free(cpu->halt_cond);
|
||||
}
|
||||
g_free(uc->cpu->tcg_as_listener);
|
||||
g_free(uc->cpu->thread);
|
||||
g_free(uc->cpu->halt_cond);
|
||||
|
||||
// Cleanup all objects.
|
||||
OBJECT(uc->machine_state->accelerator)->ref = 1;
|
||||
|
@ -311,7 +305,6 @@ uc_err uc_close(uc_engine *uc)
|
|||
|
||||
object_unref(uc, OBJECT(uc->machine_state->accelerator));
|
||||
object_unref(uc, OBJECT(uc->machine_state));
|
||||
object_unref(uc, uc->cpu);
|
||||
object_unref(uc, OBJECT(&uc->io_mem_notdirty));
|
||||
object_unref(uc, OBJECT(&uc->io_mem_unassigned));
|
||||
object_unref(uc, OBJECT(&uc->io_mem_rom));
|
||||
|
@ -634,6 +627,7 @@ uc_err uc_emu_stop(uc_engine *uc)
|
|||
return UC_ERR_OK;
|
||||
|
||||
uc->stop_request = true;
|
||||
// TODO: make this atomic somehow?
|
||||
if (uc->current_cpu) {
|
||||
// exit the current TB
|
||||
cpu_exit(uc->current_cpu);
|
||||
|
|
Loading…
Reference in a new issue