cpu: Move cpu_exec_* to tcg_ops

Backports 48c1a3e303b5a2cca48679645ad3fbb914db741a
This commit is contained in:
Eduardo Habkost 2021-03-04 16:56:51 -05:00 committed by Lioncash
parent eb38ac1809
commit 03cc62e39c
9 changed files with 33 additions and 28 deletions

View file

@ -400,7 +400,8 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
if (cc->tcg_ops.cpu_exec_interrupt &&
cc->tcg_ops.cpu_exec_interrupt(cpu, interrupt_request)) {
cpu->exception_index = -1;
*last_tb = NULL;
}
@ -539,7 +540,9 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
atomic_mb_set(&uc->current_cpu, cpu);
atomic_mb_set(&uc->tcg_current_rr_cpu, cpu);
cc->cpu_exec_enter(cpu);
if (cc->tcg_ops.cpu_exec_enter) {
cc->tcg_ops.cpu_exec_enter(cpu);
}
cpu->exception_index = -1;
env->invalid_error = UC_ERR_OK;
@ -592,7 +595,9 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
}
}
cc->cpu_exec_exit(cpu);
if (cc->tcg_ops.cpu_exec_exit) {
cc->tcg_ops.cpu_exec_exit(cpu);
}
// Unicorn: flush JIT cache to because emulation might stop in
// the middle of translation, thus generate incomplete code.

View file

@ -98,6 +98,12 @@ typedef struct TcgCpuOperations {
*/
void (*synchronize_from_tb)(CPUState *cpu,
const struct TranslationBlock *tb);
/** @cpu_exec_enter: Callback for cpu_exec preparation */
void (*cpu_exec_enter)(CPUState *cpu);
/** @cpu_exec_exit: Callback for cpu_exec cleanup */
void (*cpu_exec_exit)(CPUState *cpu);
/** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
} TcgCpuOperations;
@ -147,9 +153,6 @@ typedef struct TcgCpuOperations {
* @debug_check_watchpoint: Callback: return true if the architectural
* watchpoint whose address has matched should really fire.
* @vmsd: State description for migration.
* @cpu_exec_enter: Callback for cpu_exec preparation.
* @cpu_exec_exit: Callback for cpu_exec cleanup.
* @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
* @adjust_watchpoint_address: Perform a target-specific adjustment to an
* address before attempting to match it against watchpoints.
*
@ -198,9 +201,6 @@ typedef struct CPUClass {
const struct VMStateDescription *vmsd;
void (*cpu_exec_enter)(CPUState *cpu);
void (*cpu_exec_exit)(CPUState *cpu);
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
/* Keep non-pointer data at the end to minimize holes. */

View file

@ -295,9 +295,9 @@ static void cpu_class_init(struct uc_struct *uc, ObjectClass *klass, void *data)
k->get_memory_mapping = cpu_common_get_memory_mapping;
k->debug_excp_handler = cpu_common_noop;
k->debug_check_watchpoint = cpu_common_debug_check_watchpoint;
k->cpu_exec_enter = cpu_common_noop;
k->cpu_exec_exit = cpu_common_noop;
k->cpu_exec_interrupt = cpu_common_exec_interrupt;
k->tcg_ops.cpu_exec_enter = cpu_common_noop;
k->tcg_ops.cpu_exec_exit = cpu_common_noop;
k->tcg_ops.cpu_exec_interrupt = cpu_common_exec_interrupt;
k->adjust_watchpoint_address = cpu_adjust_watchpoint_address;
dc->realize = cpu_common_realizefn;
/*

View file

@ -1513,7 +1513,7 @@ static void arm_v7m_class_init(struct uc_struct *uc, ObjectClass *oc, void *data
cc->do_interrupt = arm_v7m_cpu_do_interrupt;
#endif
cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
cc->tcg_ops.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
}
@ -2097,7 +2097,6 @@ static void arm_cpu_class_init(struct uc_struct *uc, ObjectClass *oc, void *data
cc->class_by_name = arm_cpu_class_by_name;
cc->has_work = arm_cpu_has_work;
cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
//cc->dump_state = arm_cpu_dump_state;
cc->set_pc = arm_cpu_set_pc;
#ifndef CONFIG_USER_ONLY
@ -2110,6 +2109,7 @@ static void arm_cpu_class_init(struct uc_struct *uc, ObjectClass *oc, void *data
#endif
#ifdef CONFIG_TCG
cc->tcg_ops.initialize = arm_translate_init;
cc->tcg_ops.cpu_exec_interrupt = arm_cpu_exec_interrupt;
cc->tcg_ops.synchronize_from_tb = arm_cpu_synchronize_from_tb;
cc->tlb_fill = arm_cpu_tlb_fill;
cc->debug_excp_handler = arm_debug_excp_handler;

View file

@ -407,7 +407,9 @@ static void aarch64_cpu_class_init(struct uc_struct *uc, ObjectClass *oc, void *
{
CPUClass *cc = CPU_CLASS(uc, oc);
cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
#ifdef CONFIG_TCG
cc->tcg_ops.cpu_exec_interrupt = arm_cpu_exec_interrupt;
#endif /* CONFIG_TCG */
}
void aarch64_cpu_register(struct uc_struct *uc, const ARMCPUInfo *info)

View file

@ -5873,13 +5873,8 @@ static void x86_cpu_common_class_init(struct uc_struct *uc, ObjectClass *oc, voi
cc->class_by_name = x86_cpu_class_by_name;
cc->parse_features = x86_cpu_parse_featurestr;
cc->has_work = x86_cpu_has_work;
#ifdef CONFIG_TCG
cc->do_interrupt = x86_cpu_do_interrupt;
cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
#endif
cc->dump_state = x86_cpu_dump_state;
cc->set_pc = x86_cpu_set_pc;
cc->tcg_ops.synchronize_from_tb = x86_cpu_synchronize_from_tb;
cc->get_arch_id = x86_cpu_get_arch_id;
cc->get_paging_enabled = x86_cpu_get_paging_enabled;
#ifndef CONFIG_USER_ONLY
@ -5887,15 +5882,18 @@ static void x86_cpu_common_class_init(struct uc_struct *uc, ObjectClass *oc, voi
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
#endif
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
cc->debug_excp_handler = breakpoint_handler;
#endif
cc->cpu_exec_enter = x86_cpu_exec_enter;
cc->cpu_exec_exit = x86_cpu_exec_exit;
#ifdef CONFIG_TCG
cc->tcg_ops.initialize = tcg_x86_init;
cc->tcg_ops.synchronize_from_tb = x86_cpu_synchronize_from_tb;
cc->tcg_ops.cpu_exec_enter = x86_cpu_exec_enter;
cc->tcg_ops.cpu_exec_exit = x86_cpu_exec_exit;
cc->tcg_ops.cpu_exec_interrupt = x86_cpu_exec_interrupt;
cc->do_interrupt = x86_cpu_do_interrupt;
cc->tlb_fill = x86_cpu_tlb_fill;
#endif
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
cc->debug_excp_handler = breakpoint_handler;
#endif
}
void x86_cpu_register_types(void *opaque)

View file

@ -266,7 +266,7 @@ static void m68k_cpu_class_init(struct uc_struct *uc, ObjectClass *c, void *data
cc->class_by_name = m68k_cpu_class_by_name;
cc->has_work = m68k_cpu_has_work;
cc->do_interrupt = m68k_cpu_do_interrupt;
cc->cpu_exec_interrupt = m68k_cpu_exec_interrupt;
cc->tcg_ops.cpu_exec_interrupt = m68k_cpu_exec_interrupt;
cc->set_pc = m68k_cpu_set_pc;
cc->tlb_fill = m68k_cpu_tlb_fill;
#if defined(CONFIG_SOFTMMU)

View file

@ -174,7 +174,6 @@ static void mips_cpu_class_init(struct uc_struct *uc, ObjectClass *c, void *data
cc->class_by_name = mips_cpu_class_by_name;
cc->has_work = mips_cpu_has_work;
cc->do_interrupt = mips_cpu_do_interrupt;
cc->cpu_exec_interrupt = mips_cpu_exec_interrupt;
cc->set_pc = mips_cpu_set_pc;
#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = mips_cpu_do_transaction_failed;
@ -183,6 +182,7 @@ static void mips_cpu_class_init(struct uc_struct *uc, ObjectClass *c, void *data
#endif
#ifdef CONFIG_TCG
cc->tcg_ops.initialize = mips_tcg_init;
cc->tcg_ops.cpu_exec_interrupt = mips_cpu_exec_interrupt;
cc->tcg_ops.synchronize_from_tb = mips_cpu_synchronize_from_tb;
cc->tlb_fill = mips_cpu_tlb_fill;
#endif

View file

@ -841,7 +841,7 @@ static void sparc_cpu_class_init(struct uc_struct *uc, ObjectClass *oc, void *da
cc->parse_features = sparc_cpu_parse_features;
cc->has_work = sparc_cpu_has_work;
cc->do_interrupt = sparc_cpu_do_interrupt;
cc->cpu_exec_interrupt = sparc_cpu_exec_interrupt;
cc->tcg_ops.cpu_exec_interrupt = sparc_cpu_exec_interrupt;
//cc->dump_state = sparc_cpu_dump_state;
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
cc->memory_rw_debug = sparc_cpu_memory_rw_debug;