diff --git a/include/uc_priv.h b/include/uc_priv.h index 25a05c4a..0ec411e5 100644 --- a/include/uc_priv.h +++ b/include/uc_priv.h @@ -210,6 +210,8 @@ struct uc_struct { /* code generation context */ void *tcg_ctx; // for "TCGContext tcg_ctx" in qemu/translate-all.c + bool parallel_cpus; // for "bool parallel_cpus" in qemu/translate-all.c + /* memory.c */ unsigned memory_region_transaction_depth; bool memory_region_update_pending; diff --git a/qemu/aarch64.h b/qemu/aarch64.h index 16cf1571..7c3182bd 100644 --- a/qemu/aarch64.h +++ b/qemu/aarch64.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_aarch64 #define cpu_exec_init cpu_exec_init_aarch64 #define cpu_exec_init_all cpu_exec_init_all_aarch64 +#define cpu_exec_step_atomic cpu_exec_step_atomic_aarch64 #define cpu_flush_icache_range cpu_flush_icache_range_aarch64 #define cpu_gen_init cpu_gen_init_aarch64 #define cpu_get_address_space cpu_get_address_space_aarch64 @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_aarch64 #define cpu_lduw_code cpu_lduw_code_aarch64 #define cpu_loop_exit cpu_loop_exit_aarch64 +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_aarch64 #define cpu_loop_exit_noexc cpu_loop_exit_noexc_aarch64 #define cpu_loop_exit_restore cpu_loop_exit_restore_aarch64 #define cpu_memory_rw_debug cpu_memory_rw_debug_aarch64 diff --git a/qemu/aarch64eb.h b/qemu/aarch64eb.h index 55a17d2d..0bb37d42 100644 --- a/qemu/aarch64eb.h +++ b/qemu/aarch64eb.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_aarch64eb #define cpu_exec_init cpu_exec_init_aarch64eb #define cpu_exec_init_all cpu_exec_init_all_aarch64eb +#define cpu_exec_step_atomic cpu_exec_step_atomic_aarch64eb #define cpu_flush_icache_range cpu_flush_icache_range_aarch64eb #define cpu_gen_init cpu_gen_init_aarch64eb #define cpu_get_address_space cpu_get_address_space_aarch64eb @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_aarch64eb #define cpu_lduw_code cpu_lduw_code_aarch64eb #define cpu_loop_exit cpu_loop_exit_aarch64eb +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_aarch64eb #define cpu_loop_exit_noexc cpu_loop_exit_noexc_aarch64eb #define cpu_loop_exit_restore cpu_loop_exit_restore_aarch64eb #define cpu_memory_rw_debug cpu_memory_rw_debug_aarch64eb diff --git a/qemu/arm.h b/qemu/arm.h index 8e6867bf..f124969f 100644 --- a/qemu/arm.h +++ b/qemu/arm.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_arm #define cpu_exec_init cpu_exec_init_arm #define cpu_exec_init_all cpu_exec_init_all_arm +#define cpu_exec_step_atomic cpu_exec_step_atomic_arm #define cpu_flush_icache_range cpu_flush_icache_range_arm #define cpu_gen_init cpu_gen_init_arm #define cpu_get_address_space cpu_get_address_space_arm @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_arm #define cpu_lduw_code cpu_lduw_code_arm #define cpu_loop_exit cpu_loop_exit_arm +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_arm #define cpu_loop_exit_noexc cpu_loop_exit_noexc_arm #define cpu_loop_exit_restore cpu_loop_exit_restore_arm #define cpu_memory_rw_debug cpu_memory_rw_debug_arm diff --git a/qemu/armeb.h b/qemu/armeb.h index bb471f3b..eb6e3187 100644 --- a/qemu/armeb.h +++ b/qemu/armeb.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_armeb #define cpu_exec_init cpu_exec_init_armeb #define cpu_exec_init_all cpu_exec_init_all_armeb +#define cpu_exec_step_atomic cpu_exec_step_atomic_armeb #define cpu_flush_icache_range cpu_flush_icache_range_armeb #define cpu_gen_init cpu_gen_init_armeb #define cpu_get_address_space cpu_get_address_space_armeb @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_armeb #define cpu_lduw_code cpu_lduw_code_armeb #define cpu_loop_exit cpu_loop_exit_armeb +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_armeb #define cpu_loop_exit_noexc cpu_loop_exit_noexc_armeb #define cpu_loop_exit_restore cpu_loop_exit_restore_armeb #define cpu_memory_rw_debug cpu_memory_rw_debug_armeb diff --git a/qemu/cpu-exec-common.c b/qemu/cpu-exec-common.c index 8e0b7fc8..852661a5 100644 --- a/qemu/cpu-exec-common.c +++ b/qemu/cpu-exec-common.c @@ -44,3 +44,9 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) } siglongjmp(cpu->jmp_env, 1); } + +void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc) +{ + cpu->exception_index = EXCP_ATOMIC; + cpu_loop_exit_restore(cpu, pc); +} diff --git a/qemu/cpu-exec.c b/qemu/cpu-exec.c index b9123115..067bb8fe 100644 --- a/qemu/cpu-exec.c +++ b/qemu/cpu-exec.c @@ -23,6 +23,8 @@ #include "cpu.h" #include "exec/exec-all.h" #include "tcg.h" +#include "qemu/atomic.h" +#include "qemu/timer.h" #include "sysemu/sysemu.h" #include "exec/address-spaces.h" #include "exec/tb-hash.h" @@ -83,6 +85,31 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb) return ret; } + /* Execute the code without caching the generated code. An interpreter + could be used if available. */ +static void cpu_exec_nocache(CPUState *cpu, int max_cycles, + TranslationBlock *orig_tb, bool ignore_icount) +{ + TranslationBlock *tb; + CPUArchState *env = (CPUArchState *)cpu->env_ptr; + + /* Should never happen. + We only end up here when an existing TB is too long. */ + if (max_cycles > CF_COUNT_MASK) { + max_cycles = CF_COUNT_MASK; + } + + tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, + max_cycles | CF_NOCACHE); + tb->orig_tb = orig_tb; + /* execute the generated code */ + // Unicorn: commented out + //trace_exec_tb_nocache(tb, tb->pc); + cpu_tb_exec(cpu, tb); + tb_phys_invalidate(env->uc, tb, -1); + tb_free(env->uc, tb); +} + static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, target_ulong cs_base, @@ -385,11 +412,71 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, smp_rmb(); *last_tb = NULL; break; + case TB_EXIT_ICOUNT_EXPIRED: + { + /* Instruction counter expired. */ +#ifdef CONFIG_USER_ONLY + abort(); +#else + int insns_left = cpu->icount_decr.u32; + if (cpu->icount_extra && insns_left >= 0) { + /* Refill decrementer and continue execution. */ + cpu->icount_extra += insns_left; + insns_left = MIN(0xffff, cpu->icount_extra); + cpu->icount_extra -= insns_left; + cpu->icount_decr.u16.low = insns_left; + } else { + if (insns_left > 0) { + /* Execute remaining instructions. */ + cpu_exec_nocache(cpu, insns_left, *last_tb, false); + // Unicorn: commented out + //align_clocks(sc, cpu); + } + cpu->exception_index = EXCP_INTERRUPT; + *last_tb = NULL; + cpu_loop_exit(cpu); + } + break; +#endif + } default: break; } } +static void cpu_exec_step(struct uc_struct *uc, CPUState *cpu) +{ + CPUArchState *env = (CPUArchState *)cpu->env_ptr; + TranslationBlock *tb; + target_ulong cs_base, pc; + uint32_t flags; + + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + tb = tb_gen_code(cpu, pc, cs_base, flags, + 1 | CF_NOCACHE | CF_IGNORE_ICOUNT); + tb->orig_tb = NULL; + /* execute the generated code */ + // Unicorn: commented out + //trace_exec_tb_nocache(tb, pc); + cpu_tb_exec(cpu, tb); + tb_phys_invalidate(uc, tb, -1); + tb_free(uc, tb); +} + +void cpu_exec_step_atomic(struct uc_struct *uc, CPUState *cpu) +{ + // Unicorn: commented out + //start_exclusive(); + + /* Since we got here, we know that parallel_cpus must be true. */ + uc->parallel_cpus = false; + cpu_exec_step(uc, cpu); + uc->parallel_cpus = true; + + // Unicorn: commented out + //end_exclusive(); +} + /* main execution loop */ int cpu_exec(struct uc_struct *uc, CPUState *cpu) diff --git a/qemu/cpus.c b/qemu/cpus.c index e1c924bd..c74df596 100644 --- a/qemu/cpus.c +++ b/qemu/cpus.c @@ -176,6 +176,8 @@ static bool tcg_exec_all(struct uc_struct* uc) //printf(">>> got HLT!!!\n"); finish = true; break; + } else if (r == EXCP_ATOMIC) { + cpu_exec_step_atomic(uc, cpu); } } else if (cpu->stop || cpu->stopped) { printf(">>> got stopped!!!\n"); diff --git a/qemu/header_gen.py b/qemu/header_gen.py index ccd64d2e..37fb3579 100644 --- a/qemu/header_gen.py +++ b/qemu/header_gen.py @@ -259,6 +259,7 @@ symbols = ( 'cpu_dump_statistics', 'cpu_exec_init', 'cpu_exec_init_all', + 'cpu_exec_step_atomic', 'cpu_flush_icache_range', 'cpu_gen_init', 'cpu_get_address_space', @@ -278,6 +279,7 @@ symbols = ( 'cpu_ldub_code', 'cpu_lduw_code', 'cpu_loop_exit', + 'cpu_loop_exit_atomic', 'cpu_loop_exit_noexc', 'cpu_loop_exit_restore', 'cpu_memory_rw_debug', diff --git a/qemu/include/exec/cpu-all.h b/qemu/include/exec/cpu-all.h index f14b2caa..a4e0c437 100644 --- a/qemu/include/exec/cpu-all.h +++ b/qemu/include/exec/cpu-all.h @@ -30,6 +30,7 @@ #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ #define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ +#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */ /* some important defines: * diff --git a/qemu/include/exec/exec-all.h b/qemu/include/exec/exec-all.h index b26d957f..1565c0c3 100644 --- a/qemu/include/exec/exec-all.h +++ b/qemu/include/exec/exec-all.h @@ -71,6 +71,7 @@ void cpu_exec_init(CPUState *env, void *opaque); void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); +void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); #if !defined(CONFIG_USER_ONLY) /** @@ -232,6 +233,8 @@ struct TranslationBlock { uint8_t *tc_search; /* pointer to search data */ /* next matching tb for physical address. */ struct TranslationBlock *phys_hash_next; + /* original tb when cflags has CF_NOCACHE */ + struct TranslationBlock *orig_tb; /* first and second physical page containing code. The lower bit of the pointer tells the index in page_next[] */ struct TranslationBlock *page_next[2]; diff --git a/qemu/include/qemu-common.h b/qemu/include/qemu-common.h index 3209ea1a..4cdb41f2 100644 --- a/qemu/include/qemu-common.h +++ b/qemu/include/qemu-common.h @@ -65,6 +65,7 @@ bool tcg_enabled(struct uc_struct *uc); struct uc_struct; void cpu_exec_init_all(struct uc_struct *uc); +void cpu_exec_step_atomic(struct uc_struct *uc, CPUState *cpu); /** * set_preferred_target_page_bits: diff --git a/qemu/m68k.h b/qemu/m68k.h index d0ecec66..61781513 100644 --- a/qemu/m68k.h +++ b/qemu/m68k.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_m68k #define cpu_exec_init cpu_exec_init_m68k #define cpu_exec_init_all cpu_exec_init_all_m68k +#define cpu_exec_step_atomic cpu_exec_step_atomic_m68k #define cpu_flush_icache_range cpu_flush_icache_range_m68k #define cpu_gen_init cpu_gen_init_m68k #define cpu_get_address_space cpu_get_address_space_m68k @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_m68k #define cpu_lduw_code cpu_lduw_code_m68k #define cpu_loop_exit cpu_loop_exit_m68k +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_m68k #define cpu_loop_exit_noexc cpu_loop_exit_noexc_m68k #define cpu_loop_exit_restore cpu_loop_exit_restore_m68k #define cpu_memory_rw_debug cpu_memory_rw_debug_m68k diff --git a/qemu/mips.h b/qemu/mips.h index 4698892d..67b2e280 100644 --- a/qemu/mips.h +++ b/qemu/mips.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_mips #define cpu_exec_init cpu_exec_init_mips #define cpu_exec_init_all cpu_exec_init_all_mips +#define cpu_exec_step_atomic cpu_exec_step_atomic_mips #define cpu_flush_icache_range cpu_flush_icache_range_mips #define cpu_gen_init cpu_gen_init_mips #define cpu_get_address_space cpu_get_address_space_mips @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_mips #define cpu_lduw_code cpu_lduw_code_mips #define cpu_loop_exit cpu_loop_exit_mips +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_mips #define cpu_loop_exit_noexc cpu_loop_exit_noexc_mips #define cpu_loop_exit_restore cpu_loop_exit_restore_mips #define cpu_memory_rw_debug cpu_memory_rw_debug_mips diff --git a/qemu/mips64.h b/qemu/mips64.h index 318bc253..90fd4235 100644 --- a/qemu/mips64.h +++ b/qemu/mips64.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_mips64 #define cpu_exec_init cpu_exec_init_mips64 #define cpu_exec_init_all cpu_exec_init_all_mips64 +#define cpu_exec_step_atomic cpu_exec_step_atomic_mips64 #define cpu_flush_icache_range cpu_flush_icache_range_mips64 #define cpu_gen_init cpu_gen_init_mips64 #define cpu_get_address_space cpu_get_address_space_mips64 @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_mips64 #define cpu_lduw_code cpu_lduw_code_mips64 #define cpu_loop_exit cpu_loop_exit_mips64 +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_mips64 #define cpu_loop_exit_noexc cpu_loop_exit_noexc_mips64 #define cpu_loop_exit_restore cpu_loop_exit_restore_mips64 #define cpu_memory_rw_debug cpu_memory_rw_debug_mips64 diff --git a/qemu/mips64el.h b/qemu/mips64el.h index b3e1e15c..7a4e7856 100644 --- a/qemu/mips64el.h +++ b/qemu/mips64el.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_mips64el #define cpu_exec_init cpu_exec_init_mips64el #define cpu_exec_init_all cpu_exec_init_all_mips64el +#define cpu_exec_step_atomic cpu_exec_step_atomic_mips64el #define cpu_flush_icache_range cpu_flush_icache_range_mips64el #define cpu_gen_init cpu_gen_init_mips64el #define cpu_get_address_space cpu_get_address_space_mips64el @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_mips64el #define cpu_lduw_code cpu_lduw_code_mips64el #define cpu_loop_exit cpu_loop_exit_mips64el +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_mips64el #define cpu_loop_exit_noexc cpu_loop_exit_noexc_mips64el #define cpu_loop_exit_restore cpu_loop_exit_restore_mips64el #define cpu_memory_rw_debug cpu_memory_rw_debug_mips64el diff --git a/qemu/mipsel.h b/qemu/mipsel.h index b17c06d1..cdf9e93c 100644 --- a/qemu/mipsel.h +++ b/qemu/mipsel.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_mipsel #define cpu_exec_init cpu_exec_init_mipsel #define cpu_exec_init_all cpu_exec_init_all_mipsel +#define cpu_exec_step_atomic cpu_exec_step_atomic_mipsel #define cpu_flush_icache_range cpu_flush_icache_range_mipsel #define cpu_gen_init cpu_gen_init_mipsel #define cpu_get_address_space cpu_get_address_space_mipsel @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_mipsel #define cpu_lduw_code cpu_lduw_code_mipsel #define cpu_loop_exit cpu_loop_exit_mipsel +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_mipsel #define cpu_loop_exit_noexc cpu_loop_exit_noexc_mipsel #define cpu_loop_exit_restore cpu_loop_exit_restore_mipsel #define cpu_memory_rw_debug cpu_memory_rw_debug_mipsel diff --git a/qemu/powerpc.h b/qemu/powerpc.h index 8ce59f0b..8a95a0f0 100644 --- a/qemu/powerpc.h +++ b/qemu/powerpc.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_powerpc #define cpu_exec_init cpu_exec_init_powerpc #define cpu_exec_init_all cpu_exec_init_all_powerpc +#define cpu_exec_step_atomic cpu_exec_step_atomic_powerpc #define cpu_flush_icache_range cpu_flush_icache_range_powerpc #define cpu_gen_init cpu_gen_init_powerpc #define cpu_get_address_space cpu_get_address_space_powerpc @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_powerpc #define cpu_lduw_code cpu_lduw_code_powerpc #define cpu_loop_exit cpu_loop_exit_powerpc +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_powerpc #define cpu_loop_exit_noexc cpu_loop_exit_noexc_powerpc #define cpu_loop_exit_restore cpu_loop_exit_restore_powerpc #define cpu_memory_rw_debug cpu_memory_rw_debug_powerpc diff --git a/qemu/sparc.h b/qemu/sparc.h index 5bd3d680..3268fed9 100644 --- a/qemu/sparc.h +++ b/qemu/sparc.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_sparc #define cpu_exec_init cpu_exec_init_sparc #define cpu_exec_init_all cpu_exec_init_all_sparc +#define cpu_exec_step_atomic cpu_exec_step_atomic_sparc #define cpu_flush_icache_range cpu_flush_icache_range_sparc #define cpu_gen_init cpu_gen_init_sparc #define cpu_get_address_space cpu_get_address_space_sparc @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_sparc #define cpu_lduw_code cpu_lduw_code_sparc #define cpu_loop_exit cpu_loop_exit_sparc +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_sparc #define cpu_loop_exit_noexc cpu_loop_exit_noexc_sparc #define cpu_loop_exit_restore cpu_loop_exit_restore_sparc #define cpu_memory_rw_debug cpu_memory_rw_debug_sparc diff --git a/qemu/sparc64.h b/qemu/sparc64.h index de9c7800..b7f692a1 100644 --- a/qemu/sparc64.h +++ b/qemu/sparc64.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_sparc64 #define cpu_exec_init cpu_exec_init_sparc64 #define cpu_exec_init_all cpu_exec_init_all_sparc64 +#define cpu_exec_step_atomic cpu_exec_step_atomic_sparc64 #define cpu_flush_icache_range cpu_flush_icache_range_sparc64 #define cpu_gen_init cpu_gen_init_sparc64 #define cpu_get_address_space cpu_get_address_space_sparc64 @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_sparc64 #define cpu_lduw_code cpu_lduw_code_sparc64 #define cpu_loop_exit cpu_loop_exit_sparc64 +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_sparc64 #define cpu_loop_exit_noexc cpu_loop_exit_noexc_sparc64 #define cpu_loop_exit_restore cpu_loop_exit_restore_sparc64 #define cpu_memory_rw_debug cpu_memory_rw_debug_sparc64 diff --git a/qemu/translate-all.c b/qemu/translate-all.c index 4c7a776b..72126c61 100644 --- a/qemu/translate-all.c +++ b/qemu/translate-all.c @@ -1753,6 +1753,14 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) cs_base = tb->cs_base; flags = tb->flags; tb_phys_invalidate(cpu->uc, tb, -1); + if (tb->cflags & CF_NOCACHE) { + if (tb->orig_tb) { + /* Invalidate original TB if this TB was generated in + * cpu_exec_nocache() */ + tb_phys_invalidate(cpu->uc, tb->orig_tb, -1); + } + tb_free(env->uc, tb); + } /* FIXME: In theory this could raise an exception. In practice we have already translated the block once so it's probably ok. */ tb_gen_code(cpu, pc, cs_base, (int)flags, cflags); diff --git a/qemu/x86_64.h b/qemu/x86_64.h index d98c3898..5e8d97bf 100644 --- a/qemu/x86_64.h +++ b/qemu/x86_64.h @@ -253,6 +253,7 @@ #define cpu_dump_statistics cpu_dump_statistics_x86_64 #define cpu_exec_init cpu_exec_init_x86_64 #define cpu_exec_init_all cpu_exec_init_all_x86_64 +#define cpu_exec_step_atomic cpu_exec_step_atomic_x86_64 #define cpu_flush_icache_range cpu_flush_icache_range_x86_64 #define cpu_gen_init cpu_gen_init_x86_64 #define cpu_get_address_space cpu_get_address_space_x86_64 @@ -272,6 +273,7 @@ #define cpu_ldub_code cpu_ldub_code_x86_64 #define cpu_lduw_code cpu_lduw_code_x86_64 #define cpu_loop_exit cpu_loop_exit_x86_64 +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_x86_64 #define cpu_loop_exit_noexc cpu_loop_exit_noexc_x86_64 #define cpu_loop_exit_restore cpu_loop_exit_restore_x86_64 #define cpu_memory_rw_debug cpu_memory_rw_debug_x86_64