tcg: Add EXCP_ATOMIC

When we cannot emulate an atomic operation within a parallel
context, this exception allows us to stop the world and try
again in a serial context.

Backports commit fdbc2b5722f6092e47181a947c90fd4bdcc1c121 from qemu

Also backports parts of commit 02d57ea115b7669f588371c86484a2e8ebc369be
This commit is contained in:
Richard Henderson 2018-02-27 11:12:36 -05:00 committed by Lioncash
parent d5510a546f
commit e35aacd5ae
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
22 changed files with 138 additions and 0 deletions

View file

@ -210,6 +210,8 @@ struct uc_struct {
/* code generation context */
void *tcg_ctx; // for "TCGContext tcg_ctx" in qemu/translate-all.c
bool parallel_cpus; // for "bool parallel_cpus" in qemu/translate-all.c
/* memory.c */
unsigned memory_region_transaction_depth;
bool memory_region_update_pending;

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_aarch64
#define cpu_exec_init cpu_exec_init_aarch64
#define cpu_exec_init_all cpu_exec_init_all_aarch64
#define cpu_exec_step_atomic cpu_exec_step_atomic_aarch64
#define cpu_flush_icache_range cpu_flush_icache_range_aarch64
#define cpu_gen_init cpu_gen_init_aarch64
#define cpu_get_address_space cpu_get_address_space_aarch64
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_aarch64
#define cpu_lduw_code cpu_lduw_code_aarch64
#define cpu_loop_exit cpu_loop_exit_aarch64
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_aarch64
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_aarch64
#define cpu_loop_exit_restore cpu_loop_exit_restore_aarch64
#define cpu_memory_rw_debug cpu_memory_rw_debug_aarch64

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_aarch64eb
#define cpu_exec_init cpu_exec_init_aarch64eb
#define cpu_exec_init_all cpu_exec_init_all_aarch64eb
#define cpu_exec_step_atomic cpu_exec_step_atomic_aarch64eb
#define cpu_flush_icache_range cpu_flush_icache_range_aarch64eb
#define cpu_gen_init cpu_gen_init_aarch64eb
#define cpu_get_address_space cpu_get_address_space_aarch64eb
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_aarch64eb
#define cpu_lduw_code cpu_lduw_code_aarch64eb
#define cpu_loop_exit cpu_loop_exit_aarch64eb
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_aarch64eb
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_aarch64eb
#define cpu_loop_exit_restore cpu_loop_exit_restore_aarch64eb
#define cpu_memory_rw_debug cpu_memory_rw_debug_aarch64eb

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_arm
#define cpu_exec_init cpu_exec_init_arm
#define cpu_exec_init_all cpu_exec_init_all_arm
#define cpu_exec_step_atomic cpu_exec_step_atomic_arm
#define cpu_flush_icache_range cpu_flush_icache_range_arm
#define cpu_gen_init cpu_gen_init_arm
#define cpu_get_address_space cpu_get_address_space_arm
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_arm
#define cpu_lduw_code cpu_lduw_code_arm
#define cpu_loop_exit cpu_loop_exit_arm
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_arm
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_arm
#define cpu_loop_exit_restore cpu_loop_exit_restore_arm
#define cpu_memory_rw_debug cpu_memory_rw_debug_arm

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_armeb
#define cpu_exec_init cpu_exec_init_armeb
#define cpu_exec_init_all cpu_exec_init_all_armeb
#define cpu_exec_step_atomic cpu_exec_step_atomic_armeb
#define cpu_flush_icache_range cpu_flush_icache_range_armeb
#define cpu_gen_init cpu_gen_init_armeb
#define cpu_get_address_space cpu_get_address_space_armeb
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_armeb
#define cpu_lduw_code cpu_lduw_code_armeb
#define cpu_loop_exit cpu_loop_exit_armeb
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_armeb
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_armeb
#define cpu_loop_exit_restore cpu_loop_exit_restore_armeb
#define cpu_memory_rw_debug cpu_memory_rw_debug_armeb

View file

@ -44,3 +44,9 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
}
siglongjmp(cpu->jmp_env, 1);
}
void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
{
cpu->exception_index = EXCP_ATOMIC;
cpu_loop_exit_restore(cpu, pc);
}

View file

@ -23,6 +23,8 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg.h"
#include "qemu/atomic.h"
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
#include "exec/address-spaces.h"
#include "exec/tb-hash.h"
@ -83,6 +85,31 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
return ret;
}
/* Execute the code without caching the generated code. An interpreter
could be used if available. */
static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
TranslationBlock *orig_tb, bool ignore_icount)
{
TranslationBlock *tb;
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
/* Should never happen.
We only end up here when an existing TB is too long. */
if (max_cycles > CF_COUNT_MASK) {
max_cycles = CF_COUNT_MASK;
}
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
max_cycles | CF_NOCACHE);
tb->orig_tb = orig_tb;
/* execute the generated code */
// Unicorn: commented out
//trace_exec_tb_nocache(tb, tb->pc);
cpu_tb_exec(cpu, tb);
tb_phys_invalidate(env->uc, tb, -1);
tb_free(env->uc, tb);
}
static TranslationBlock *tb_htable_lookup(CPUState *cpu,
target_ulong pc,
target_ulong cs_base,
@ -385,11 +412,71 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
smp_rmb();
*last_tb = NULL;
break;
case TB_EXIT_ICOUNT_EXPIRED:
{
/* Instruction counter expired. */
#ifdef CONFIG_USER_ONLY
abort();
#else
int insns_left = cpu->icount_decr.u32;
if (cpu->icount_extra && insns_left >= 0) {
/* Refill decrementer and continue execution. */
cpu->icount_extra += insns_left;
insns_left = MIN(0xffff, cpu->icount_extra);
cpu->icount_extra -= insns_left;
cpu->icount_decr.u16.low = insns_left;
} else {
if (insns_left > 0) {
/* Execute remaining instructions. */
cpu_exec_nocache(cpu, insns_left, *last_tb, false);
// Unicorn: commented out
//align_clocks(sc, cpu);
}
cpu->exception_index = EXCP_INTERRUPT;
*last_tb = NULL;
cpu_loop_exit(cpu);
}
break;
#endif
}
default:
break;
}
}
static void cpu_exec_step(struct uc_struct *uc, CPUState *cpu)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb = tb_gen_code(cpu, pc, cs_base, flags,
1 | CF_NOCACHE | CF_IGNORE_ICOUNT);
tb->orig_tb = NULL;
/* execute the generated code */
// Unicorn: commented out
//trace_exec_tb_nocache(tb, pc);
cpu_tb_exec(cpu, tb);
tb_phys_invalidate(uc, tb, -1);
tb_free(uc, tb);
}
void cpu_exec_step_atomic(struct uc_struct *uc, CPUState *cpu)
{
// Unicorn: commented out
//start_exclusive();
/* Since we got here, we know that parallel_cpus must be true. */
uc->parallel_cpus = false;
cpu_exec_step(uc, cpu);
uc->parallel_cpus = true;
// Unicorn: commented out
//end_exclusive();
}
/* main execution loop */
int cpu_exec(struct uc_struct *uc, CPUState *cpu)

View file

@ -176,6 +176,8 @@ static bool tcg_exec_all(struct uc_struct* uc)
//printf(">>> got HLT!!!\n");
finish = true;
break;
} else if (r == EXCP_ATOMIC) {
cpu_exec_step_atomic(uc, cpu);
}
} else if (cpu->stop || cpu->stopped) {
printf(">>> got stopped!!!\n");

View file

@ -259,6 +259,7 @@ symbols = (
'cpu_dump_statistics',
'cpu_exec_init',
'cpu_exec_init_all',
'cpu_exec_step_atomic',
'cpu_flush_icache_range',
'cpu_gen_init',
'cpu_get_address_space',
@ -278,6 +279,7 @@ symbols = (
'cpu_ldub_code',
'cpu_lduw_code',
'cpu_loop_exit',
'cpu_loop_exit_atomic',
'cpu_loop_exit_noexc',
'cpu_loop_exit_restore',
'cpu_memory_rw_debug',

View file

@ -30,6 +30,7 @@
#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
/* some important defines:
*

View file

@ -71,6 +71,7 @@ void cpu_exec_init(CPUState *env, void *opaque);
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
#if !defined(CONFIG_USER_ONLY)
/**
@ -232,6 +233,8 @@ struct TranslationBlock {
uint8_t *tc_search; /* pointer to search data */
/* next matching tb for physical address. */
struct TranslationBlock *phys_hash_next;
/* original tb when cflags has CF_NOCACHE */
struct TranslationBlock *orig_tb;
/* first and second physical page containing code. The lower bit
of the pointer tells the index in page_next[] */
struct TranslationBlock *page_next[2];

View file

@ -65,6 +65,7 @@ bool tcg_enabled(struct uc_struct *uc);
struct uc_struct;
void cpu_exec_init_all(struct uc_struct *uc);
void cpu_exec_step_atomic(struct uc_struct *uc, CPUState *cpu);
/**
* set_preferred_target_page_bits:

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_m68k
#define cpu_exec_init cpu_exec_init_m68k
#define cpu_exec_init_all cpu_exec_init_all_m68k
#define cpu_exec_step_atomic cpu_exec_step_atomic_m68k
#define cpu_flush_icache_range cpu_flush_icache_range_m68k
#define cpu_gen_init cpu_gen_init_m68k
#define cpu_get_address_space cpu_get_address_space_m68k
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_m68k
#define cpu_lduw_code cpu_lduw_code_m68k
#define cpu_loop_exit cpu_loop_exit_m68k
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_m68k
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_m68k
#define cpu_loop_exit_restore cpu_loop_exit_restore_m68k
#define cpu_memory_rw_debug cpu_memory_rw_debug_m68k

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_mips
#define cpu_exec_init cpu_exec_init_mips
#define cpu_exec_init_all cpu_exec_init_all_mips
#define cpu_exec_step_atomic cpu_exec_step_atomic_mips
#define cpu_flush_icache_range cpu_flush_icache_range_mips
#define cpu_gen_init cpu_gen_init_mips
#define cpu_get_address_space cpu_get_address_space_mips
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_mips
#define cpu_lduw_code cpu_lduw_code_mips
#define cpu_loop_exit cpu_loop_exit_mips
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_mips
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_mips
#define cpu_loop_exit_restore cpu_loop_exit_restore_mips
#define cpu_memory_rw_debug cpu_memory_rw_debug_mips

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_mips64
#define cpu_exec_init cpu_exec_init_mips64
#define cpu_exec_init_all cpu_exec_init_all_mips64
#define cpu_exec_step_atomic cpu_exec_step_atomic_mips64
#define cpu_flush_icache_range cpu_flush_icache_range_mips64
#define cpu_gen_init cpu_gen_init_mips64
#define cpu_get_address_space cpu_get_address_space_mips64
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_mips64
#define cpu_lduw_code cpu_lduw_code_mips64
#define cpu_loop_exit cpu_loop_exit_mips64
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_mips64
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_mips64
#define cpu_loop_exit_restore cpu_loop_exit_restore_mips64
#define cpu_memory_rw_debug cpu_memory_rw_debug_mips64

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_mips64el
#define cpu_exec_init cpu_exec_init_mips64el
#define cpu_exec_init_all cpu_exec_init_all_mips64el
#define cpu_exec_step_atomic cpu_exec_step_atomic_mips64el
#define cpu_flush_icache_range cpu_flush_icache_range_mips64el
#define cpu_gen_init cpu_gen_init_mips64el
#define cpu_get_address_space cpu_get_address_space_mips64el
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_mips64el
#define cpu_lduw_code cpu_lduw_code_mips64el
#define cpu_loop_exit cpu_loop_exit_mips64el
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_mips64el
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_mips64el
#define cpu_loop_exit_restore cpu_loop_exit_restore_mips64el
#define cpu_memory_rw_debug cpu_memory_rw_debug_mips64el

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_mipsel
#define cpu_exec_init cpu_exec_init_mipsel
#define cpu_exec_init_all cpu_exec_init_all_mipsel
#define cpu_exec_step_atomic cpu_exec_step_atomic_mipsel
#define cpu_flush_icache_range cpu_flush_icache_range_mipsel
#define cpu_gen_init cpu_gen_init_mipsel
#define cpu_get_address_space cpu_get_address_space_mipsel
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_mipsel
#define cpu_lduw_code cpu_lduw_code_mipsel
#define cpu_loop_exit cpu_loop_exit_mipsel
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_mipsel
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_mipsel
#define cpu_loop_exit_restore cpu_loop_exit_restore_mipsel
#define cpu_memory_rw_debug cpu_memory_rw_debug_mipsel

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_powerpc
#define cpu_exec_init cpu_exec_init_powerpc
#define cpu_exec_init_all cpu_exec_init_all_powerpc
#define cpu_exec_step_atomic cpu_exec_step_atomic_powerpc
#define cpu_flush_icache_range cpu_flush_icache_range_powerpc
#define cpu_gen_init cpu_gen_init_powerpc
#define cpu_get_address_space cpu_get_address_space_powerpc
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_powerpc
#define cpu_lduw_code cpu_lduw_code_powerpc
#define cpu_loop_exit cpu_loop_exit_powerpc
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_powerpc
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_powerpc
#define cpu_loop_exit_restore cpu_loop_exit_restore_powerpc
#define cpu_memory_rw_debug cpu_memory_rw_debug_powerpc

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_sparc
#define cpu_exec_init cpu_exec_init_sparc
#define cpu_exec_init_all cpu_exec_init_all_sparc
#define cpu_exec_step_atomic cpu_exec_step_atomic_sparc
#define cpu_flush_icache_range cpu_flush_icache_range_sparc
#define cpu_gen_init cpu_gen_init_sparc
#define cpu_get_address_space cpu_get_address_space_sparc
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_sparc
#define cpu_lduw_code cpu_lduw_code_sparc
#define cpu_loop_exit cpu_loop_exit_sparc
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_sparc
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_sparc
#define cpu_loop_exit_restore cpu_loop_exit_restore_sparc
#define cpu_memory_rw_debug cpu_memory_rw_debug_sparc

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_sparc64
#define cpu_exec_init cpu_exec_init_sparc64
#define cpu_exec_init_all cpu_exec_init_all_sparc64
#define cpu_exec_step_atomic cpu_exec_step_atomic_sparc64
#define cpu_flush_icache_range cpu_flush_icache_range_sparc64
#define cpu_gen_init cpu_gen_init_sparc64
#define cpu_get_address_space cpu_get_address_space_sparc64
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_sparc64
#define cpu_lduw_code cpu_lduw_code_sparc64
#define cpu_loop_exit cpu_loop_exit_sparc64
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_sparc64
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_sparc64
#define cpu_loop_exit_restore cpu_loop_exit_restore_sparc64
#define cpu_memory_rw_debug cpu_memory_rw_debug_sparc64

View file

@ -1753,6 +1753,14 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cs_base = tb->cs_base;
flags = tb->flags;
tb_phys_invalidate(cpu->uc, tb, -1);
if (tb->cflags & CF_NOCACHE) {
if (tb->orig_tb) {
/* Invalidate original TB if this TB was generated in
* cpu_exec_nocache() */
tb_phys_invalidate(cpu->uc, tb->orig_tb, -1);
}
tb_free(env->uc, tb);
}
/* FIXME: In theory this could raise an exception. In practice
we have already translated the block once so it's probably ok. */
tb_gen_code(cpu, pc, cs_base, (int)flags, cflags);

View file

@ -253,6 +253,7 @@
#define cpu_dump_statistics cpu_dump_statistics_x86_64
#define cpu_exec_init cpu_exec_init_x86_64
#define cpu_exec_init_all cpu_exec_init_all_x86_64
#define cpu_exec_step_atomic cpu_exec_step_atomic_x86_64
#define cpu_flush_icache_range cpu_flush_icache_range_x86_64
#define cpu_gen_init cpu_gen_init_x86_64
#define cpu_get_address_space cpu_get_address_space_x86_64
@ -272,6 +273,7 @@
#define cpu_ldub_code cpu_ldub_code_x86_64
#define cpu_lduw_code cpu_lduw_code_x86_64
#define cpu_loop_exit cpu_loop_exit_x86_64
#define cpu_loop_exit_atomic cpu_loop_exit_atomic_x86_64
#define cpu_loop_exit_noexc cpu_loop_exit_noexc_x86_64
#define cpu_loop_exit_restore cpu_loop_exit_restore_x86_64
#define cpu_memory_rw_debug cpu_memory_rw_debug_x86_64