diff --git a/qemu/aarch64.h b/qemu/aarch64.h index ce4806c1..e75b598d 100644 --- a/qemu/aarch64.h +++ b/qemu/aarch64.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_aarch64 #define target_parse_constraint target_parse_constraint_aarch64 #define target_words_bigendian target_words_bigendian_aarch64 -#define tb_add_jump tb_add_jump_aarch64 #define tb_alloc tb_alloc_aarch64 #define tb_alloc_page tb_alloc_page_aarch64 #define tb_check_watchpoint tb_check_watchpoint_aarch64 diff --git a/qemu/aarch64eb.h b/qemu/aarch64eb.h index ea3a27fc..8f3b92a0 100644 --- a/qemu/aarch64eb.h +++ b/qemu/aarch64eb.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_aarch64eb #define target_parse_constraint target_parse_constraint_aarch64eb #define target_words_bigendian target_words_bigendian_aarch64eb -#define tb_add_jump tb_add_jump_aarch64eb #define tb_alloc tb_alloc_aarch64eb #define tb_alloc_page tb_alloc_page_aarch64eb #define tb_check_watchpoint tb_check_watchpoint_aarch64eb diff --git a/qemu/arm.h b/qemu/arm.h index 3db03210..64ec7aa3 100644 --- a/qemu/arm.h +++ b/qemu/arm.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_arm #define target_parse_constraint target_parse_constraint_arm #define target_words_bigendian target_words_bigendian_arm -#define tb_add_jump tb_add_jump_arm #define tb_alloc tb_alloc_arm #define tb_alloc_page tb_alloc_page_arm #define tb_check_watchpoint tb_check_watchpoint_arm diff --git a/qemu/armeb.h b/qemu/armeb.h index 68d484a0..ed306d9b 100644 --- a/qemu/armeb.h +++ b/qemu/armeb.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_armeb #define target_parse_constraint target_parse_constraint_armeb #define target_words_bigendian target_words_bigendian_armeb -#define tb_add_jump tb_add_jump_armeb #define tb_alloc tb_alloc_armeb #define tb_alloc_page tb_alloc_page_armeb #define tb_check_watchpoint tb_check_watchpoint_armeb diff --git a/qemu/cpu-exec.c b/qemu/cpu-exec.c index 42095675..64cf51e1 100644 --- a/qemu/cpu-exec.c +++ b/qemu/cpu-exec.c @@ -162,6 +162,41 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, return tb; } +void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) +{ + if (TCG_TARGET_HAS_direct_jump) { + uintptr_t offset = tb->jmp_target_arg[n]; + uintptr_t tc_ptr = (uintptr_t)tb->tc_ptr; + tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr); + } else { + tb->jmp_target_arg[n] = addr; + } +} + +/* Called with tb_lock held. */ +static inline void tb_add_jump(TranslationBlock *tb, int n, + TranslationBlock *tb_next) +{ + assert(n < ARRAY_SIZE(tb->jmp_list_next)); + if (tb->jmp_list_next[n]) { + /* Another thread has already done this while we were + * outside of the lock; nothing to do in this case */ + return; + } + qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, + "Linking TBs %p [" TARGET_FMT_lx + "] index %d -> %p [" TARGET_FMT_lx "]\n", + tb->tc_ptr, tb->pc, n, + tb_next->tc_ptr, tb_next->pc); + + /* patch the native jump address */ + tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); + + /* add in TB jmp circular list */ + tb->jmp_list_next[n] = tb_next->jmp_list_first; + tb_next->jmp_list_first = (uintptr_t)tb | n; +} + static inline TranslationBlock *tb_find(CPUState *cpu, TranslationBlock *last_tb, int tb_exit) diff --git a/qemu/header_gen.py b/qemu/header_gen.py index 971e2de6..875f41a4 100644 --- a/qemu/header_gen.py +++ b/qemu/header_gen.py @@ -2935,7 +2935,7 @@ symbols = ( 'target_el_table', 'target_parse_constraint', 'target_words_bigendian', - 'tb_add_jump', + #'tb_add_jump', 'tb_alloc', 'tb_alloc_page', 'tb_check_watchpoint', diff --git a/qemu/include/exec/exec-all.h b/qemu/include/exec/exec-all.h index 358fdfd2..df90b258 100644 --- a/qemu/include/exec/exec-all.h +++ b/qemu/include/exec/exec-all.h @@ -203,15 +203,6 @@ static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) #define CODE_GEN_AVG_BLOCK_SIZE 150 #endif -#if defined(_ARCH_PPC) \ - || defined(__x86_64__) || defined(__i386__) \ - || defined(__sparc__) || defined(__aarch64__) \ - || defined(__s390x__) || defined(__mips__) \ - || defined(CONFIG_TCG_INTERPRETER) -/* NOTE: Direct jump patching must be atomic to be thread-safe. */ -#define USE_DIRECT_JUMP -#endif - struct TranslationBlock { target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ target_ulong cs_base; /* CS base for this block */ @@ -248,11 +239,8 @@ struct TranslationBlock { */ uint16_t jmp_reset_offset[2]; /* offset of original jump target */ #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ -#ifdef USE_DIRECT_JUMP - uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */ -#else - uintptr_t jmp_target_addr[2]; /* target address for indirect jump */ -#endif + uintptr_t jmp_target_arg[2]; /* target address or offset */ + /* Each TB has an assosiated circular list of TBs jumping to this one. * jmp_list_first points to the first TB jumping to this one. * jmp_list_next is used to point to the next TB in a list. @@ -276,83 +264,7 @@ void tb_phys_invalidate(struct uc_struct *uc, TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags); -#if defined(USE_DIRECT_JUMP) - -#if defined(CONFIG_TCG_INTERPRETER) -static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) -{ - /* patch the branch destination */ - *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); - /* no need to flush icache explicitly */ -} -#elif defined(_ARCH_PPC) -void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); -#define tb_set_jmp_target1 ppc_tb_set_jmp_target -#elif defined(__i386__) || defined(__x86_64__) -static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) -{ - /* patch the branch destination */ - atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); - /* no need to flush icache explicitly */ -} -#elif defined(__s390x__) -static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) -{ - /* patch the branch destination */ - intptr_t disp = addr - (jmp_addr - 2); - atomic_set((int32_t *)jmp_addr, disp / 2); - /* no need to flush icache explicitly */ -} -#elif defined(__aarch64__) -void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); -#define tb_set_jmp_target1 aarch64_tb_set_jmp_target -#elif defined(__sparc__) || defined(__mips__) -void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr); -#else -#error tb_set_jmp_target1 is missing -#endif - -static inline void tb_set_jmp_target(TranslationBlock *tb, - int n, uintptr_t addr) -{ - uint16_t offset = tb->jmp_insn_offset[n]; - tb_set_jmp_target1((uintptr_t)((char*)tb->tc_ptr + offset), addr); -} - -#else - -/* set the jump target */ -static inline void tb_set_jmp_target(TranslationBlock *tb, - int n, uintptr_t addr) -{ - tb->jmp_target_addr[n] = addr; -} - -#endif - -/* Called with tb_lock held. */ -static inline void tb_add_jump(TranslationBlock *tb, int n, - TranslationBlock *tb_next) -{ - assert(n < ARRAY_SIZE(tb->jmp_list_next)); - if (tb->jmp_list_next[n]) { - /* Another thread has already done this while we were - * outside of the lock; nothing to do in this case */ - return; - } - qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, - "Linking TBs %p [" TARGET_FMT_lx - "] index %d -> %p [" TARGET_FMT_lx "]\n", - tb->tc_ptr, tb->pc, n, - tb_next->tc_ptr, tb_next->pc); - - /* patch the native jump address */ - tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); - - /* add in TB jmp circular list */ - tb->jmp_list_next[n] = tb_next->jmp_list_first; - tb_next->jmp_list_first = (uintptr_t)tb | n; -} +void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); /* GETPC is the true target of the return instruction that we'll execute. */ #if defined(CONFIG_TCG_INTERPRETER) diff --git a/qemu/m68k.h b/qemu/m68k.h index 9cd9bed4..25ecab64 100644 --- a/qemu/m68k.h +++ b/qemu/m68k.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_m68k #define target_parse_constraint target_parse_constraint_m68k #define target_words_bigendian target_words_bigendian_m68k -#define tb_add_jump tb_add_jump_m68k #define tb_alloc tb_alloc_m68k #define tb_alloc_page tb_alloc_page_m68k #define tb_check_watchpoint tb_check_watchpoint_m68k diff --git a/qemu/mips.h b/qemu/mips.h index b61e94aa..f7d16289 100644 --- a/qemu/mips.h +++ b/qemu/mips.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_mips #define target_parse_constraint target_parse_constraint_mips #define target_words_bigendian target_words_bigendian_mips -#define tb_add_jump tb_add_jump_mips #define tb_alloc tb_alloc_mips #define tb_alloc_page tb_alloc_page_mips #define tb_check_watchpoint tb_check_watchpoint_mips diff --git a/qemu/mips64.h b/qemu/mips64.h index 01815173..2d97717c 100644 --- a/qemu/mips64.h +++ b/qemu/mips64.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_mips64 #define target_parse_constraint target_parse_constraint_mips64 #define target_words_bigendian target_words_bigendian_mips64 -#define tb_add_jump tb_add_jump_mips64 #define tb_alloc tb_alloc_mips64 #define tb_alloc_page tb_alloc_page_mips64 #define tb_check_watchpoint tb_check_watchpoint_mips64 diff --git a/qemu/mips64el.h b/qemu/mips64el.h index bd2c9cf0..b2ba9b05 100644 --- a/qemu/mips64el.h +++ b/qemu/mips64el.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_mips64el #define target_parse_constraint target_parse_constraint_mips64el #define target_words_bigendian target_words_bigendian_mips64el -#define tb_add_jump tb_add_jump_mips64el #define tb_alloc tb_alloc_mips64el #define tb_alloc_page tb_alloc_page_mips64el #define tb_check_watchpoint tb_check_watchpoint_mips64el diff --git a/qemu/mipsel.h b/qemu/mipsel.h index 61699635..b8d6abed 100644 --- a/qemu/mipsel.h +++ b/qemu/mipsel.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_mipsel #define target_parse_constraint target_parse_constraint_mipsel #define target_words_bigendian target_words_bigendian_mipsel -#define tb_add_jump tb_add_jump_mipsel #define tb_alloc tb_alloc_mipsel #define tb_alloc_page tb_alloc_page_mipsel #define tb_check_watchpoint tb_check_watchpoint_mipsel diff --git a/qemu/powerpc.h b/qemu/powerpc.h index 738f0bbf..6fa8e5df 100644 --- a/qemu/powerpc.h +++ b/qemu/powerpc.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_powerpc #define target_parse_constraint target_parse_constraint_powerpc #define target_words_bigendian target_words_bigendian_powerpc -#define tb_add_jump tb_add_jump_powerpc #define tb_alloc tb_alloc_powerpc #define tb_alloc_page tb_alloc_page_powerpc #define tb_check_watchpoint tb_check_watchpoint_powerpc diff --git a/qemu/sparc.h b/qemu/sparc.h index 849f4d92..27a28376 100644 --- a/qemu/sparc.h +++ b/qemu/sparc.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_sparc #define target_parse_constraint target_parse_constraint_sparc #define target_words_bigendian target_words_bigendian_sparc -#define tb_add_jump tb_add_jump_sparc #define tb_alloc tb_alloc_sparc #define tb_alloc_page tb_alloc_page_sparc #define tb_check_watchpoint tb_check_watchpoint_sparc diff --git a/qemu/sparc64.h b/qemu/sparc64.h index 15b24f0a..ad72d6fb 100644 --- a/qemu/sparc64.h +++ b/qemu/sparc64.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_sparc64 #define target_parse_constraint target_parse_constraint_sparc64 #define target_words_bigendian target_words_bigendian_sparc64 -#define tb_add_jump tb_add_jump_sparc64 #define tb_alloc tb_alloc_sparc64 #define tb_alloc_page tb_alloc_page_sparc64 #define tb_check_watchpoint tb_check_watchpoint_sparc64 diff --git a/qemu/tcg/aarch64/tcg-target.h b/qemu/tcg/aarch64/tcg-target.h index b41a248b..719861fe 100644 --- a/qemu/tcg/aarch64/tcg-target.h +++ b/qemu/tcg/aarch64/tcg-target.h @@ -111,12 +111,15 @@ typedef enum { #define TCG_TARGET_HAS_muls2_i64 0 #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 +#define TCG_TARGET_HAS_direct_jump 1 + +#define TCG_TARGET_DEFAULT_MO (0) static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { __builtin___clear_cache((char *)start, (char *)stop); } -#define TCG_TARGET_DEFAULT_MO (0) +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); #endif /* AARCH64_TCG_TARGET_H */ diff --git a/qemu/tcg/aarch64/tcg-target.inc.c b/qemu/tcg/aarch64/tcg-target.inc.c index a36b3adb..09342d73 100644 --- a/qemu/tcg/aarch64/tcg-target.inc.c +++ b/qemu/tcg/aarch64/tcg-target.inc.c @@ -875,9 +875,8 @@ static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *target) } } -#ifdef USE_DIRECT_JUMP - -void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) +void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, + uintptr_t addr) { tcg_insn_unit i1, i2; TCGType rt = TCG_TYPE_I64; @@ -902,8 +901,6 @@ void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) flush_icache_range(jmp_addr, jmp_addr + 8); } -#endif - static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l) { if (!l->has_value) { @@ -1427,7 +1424,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_goto_tb: if (s->tb_jmp_insn_offset != NULL) { - /* USE_DIRECT_JUMP */ + /* TCG_TARGET_HAS_direct_jump */ /* Ensure that ADRP+ADD are 8-byte aligned so that an atomic write can be used to patch the target address. */ if ((uintptr_t)s->code_ptr & 7) { @@ -1435,11 +1432,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, } s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); /* actual branch destination will be patched by - aarch64_tb_set_jmp_target later. */ + tb_target_set_jmp_target later. */ tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0); tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0); } else { - /* !USE_DIRECT_JUMP */ + /* !TCG_TARGET_HAS_direct_jump */ tcg_debug_assert(s->tb_jmp_target_addr != NULL); intptr_t offset = tcg_pcrel_diff(s, (s->tb_jmp_target_addr + a0)) >> 2; tcg_out_insn(s, 3305, LDR, offset, TCG_REG_TMP); diff --git a/qemu/tcg/arm/tcg-target.h b/qemu/tcg/arm/tcg-target.h index 0680f253..2a028795 100644 --- a/qemu/tcg/arm/tcg-target.h +++ b/qemu/tcg/arm/tcg-target.h @@ -125,16 +125,20 @@ extern bool use_idiv_instructions_rt; #define TCG_TARGET_HAS_div_i32 use_idiv_instructions #define TCG_TARGET_HAS_rem_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_direct_jump 0 enum { TCG_AREG0 = TCG_REG_R6, }; +#define TCG_TARGET_DEFAULT_MO (0) + static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { __builtin___clear_cache((char *) start, (char *) stop); } -#define TCG_TARGET_DEFAULT_MO (0) +/* not defined -- call should be eliminated at compile time */ +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); #endif diff --git a/qemu/tcg/i386/tcg-target.h b/qemu/tcg/i386/tcg-target.h index ca528d76..32b44a25 100644 --- a/qemu/tcg/i386/tcg-target.h +++ b/qemu/tcg/i386/tcg-target.h @@ -108,6 +108,7 @@ extern bool have_popcnt; #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_direct_jump 1 #if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_extrl_i64_i32 0 @@ -166,6 +167,14 @@ static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { } +static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, + uintptr_t jmp_addr, uintptr_t addr) +{ + /* patch the branch destination */ + atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); + /* no need to flush icache explicitly */ +} + /* This defines the natural memory order supported by this * architecture before guarantees made by various barrier * instructions. diff --git a/qemu/tcg/mips/tcg-target.h b/qemu/tcg/mips/tcg-target.h index e9558d15..928a762b 100644 --- a/qemu/tcg/mips/tcg-target.h +++ b/qemu/tcg/mips/tcg-target.h @@ -131,6 +131,7 @@ extern bool use_mips32r2_instructions; #define TCG_TARGET_HAS_mulsh_i32 1 #define TCG_TARGET_HAS_bswap32_i32 1 #define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_direct_jump 1 #if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_add2_i32 0 @@ -201,11 +202,13 @@ extern bool use_mips32r2_instructions; #include #endif +#define TCG_TARGET_DEFAULT_MO (0) + static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { cacheflush ((void *)start, stop-start, ICACHE); } -#define TCG_TARGET_DEFAULT_MO (0) +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); #endif diff --git a/qemu/tcg/mips/tcg-target.inc.c b/qemu/tcg/mips/tcg-target.inc.c index 18345a9b..2f02e6b6 100644 --- a/qemu/tcg/mips/tcg-target.inc.c +++ b/qemu/tcg/mips/tcg-target.inc.c @@ -2657,7 +2657,8 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */ } -void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) +void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, + uintptr_t addr) { atomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2)); flush_icache_range(jmp_addr, jmp_addr + 4); diff --git a/qemu/tcg/ppc/tcg-target.h b/qemu/tcg/ppc/tcg-target.h index 88a0b2a4..dcf53214 100644 --- a/qemu/tcg/ppc/tcg-target.h +++ b/qemu/tcg/ppc/tcg-target.h @@ -83,6 +83,7 @@ extern bool have_isa_3_00; #define TCG_TARGET_HAS_muluh_i32 1 #define TCG_TARGET_HAS_mulsh_i32 1 #define TCG_TARGET_HAS_goto_ptr 0 +#define TCG_TARGET_HAS_direct_jump 1 #if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_add2_i32 0 @@ -124,6 +125,7 @@ extern bool have_isa_3_00; #endif void flush_icache_range(uintptr_t start, uintptr_t stop); +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); #define TCG_TARGET_DEFAULT_MO (0) diff --git a/qemu/tcg/ppc/tcg-target.inc.c b/qemu/tcg/ppc/tcg-target.inc.c index f50c086c..d51541c6 100644 --- a/qemu/tcg/ppc/tcg-target.inc.c +++ b/qemu/tcg/ppc/tcg-target.inc.c @@ -1301,7 +1301,8 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) tcg_out32(s, insn); } -void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) +void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, + uintptr_t addr) { tcg_insn_unit i1, i2; uint64_t pair; diff --git a/qemu/tcg/s390/tcg-target.h b/qemu/tcg/s390/tcg-target.h index bedda5ed..31a9eb4a 100644 --- a/qemu/tcg/s390/tcg-target.h +++ b/qemu/tcg/s390/tcg-target.h @@ -95,6 +95,7 @@ extern uint64_t s390_facilities; #define TCG_TARGET_HAS_extrl_i64_i32 0 #define TCG_TARGET_HAS_extrh_i64_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_direct_jump 1 #define TCG_TARGET_HAS_div2_i64 1 #define TCG_TARGET_HAS_rot_i64 1 @@ -145,4 +146,13 @@ static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { } +static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, + uintptr_t jmp_addr, uintptr_t addr) +{ + /* patch the branch destination */ + intptr_t disp = addr - (jmp_addr - 2); + atomic_set((int32_t *)jmp_addr, disp / 2); + /* no need to flush icache explicitly */ +} + #endif diff --git a/qemu/tcg/sparc/tcg-target.h b/qemu/tcg/sparc/tcg-target.h index e474c4f4..ee688efb 100644 --- a/qemu/tcg/sparc/tcg-target.h +++ b/qemu/tcg/sparc/tcg-target.h @@ -124,6 +124,7 @@ extern bool use_vis3_instructions; #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_direct_jump 1 #define TCG_TARGET_HAS_extrl_i64_i32 1 #define TCG_TARGET_HAS_extrh_i64_i32 1 @@ -180,4 +181,6 @@ static inline void flush_icache_range(uintptr_t start, uintptr_t stop) } #endif +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); + #endif diff --git a/qemu/tcg/sparc/tcg-target.inc.c b/qemu/tcg/sparc/tcg-target.inc.c index 44de7f6a..8dd33b9c 100644 --- a/qemu/tcg/sparc/tcg-target.inc.c +++ b/qemu/tcg/sparc/tcg-target.inc.c @@ -1675,7 +1675,8 @@ static void tcg_target_init(TCGContext *s) # define ELF_HOST_FLAGS EF_SPARC_32PLUS #endif -void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) +void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, + uintptr_t addr) { uint32_t *ptr = (uint32_t *)jmp_addr; uintptr_t disp = addr - jmp_addr; diff --git a/qemu/tcg/tcg.h b/qemu/tcg/tcg.h index 3c31f9f8..b2f6ce7f 100644 --- a/qemu/tcg/tcg.h +++ b/qemu/tcg/tcg.h @@ -782,8 +782,8 @@ struct TCGContext { /* goto_tb support */ tcg_insn_unit *code_buf; uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */ - uint16_t *tb_jmp_insn_offset; /* tb->jmp_insn_offset if USE_DIRECT_JUMP */ - uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_addr if !USE_DIRECT_JUMP */ + uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */ + uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */ TCGRegSet reserved_regs; intptr_t current_frame_offset; diff --git a/qemu/translate-all.c b/qemu/translate-all.c index 4835fd5a..d2f78642 100644 --- a/qemu/translate-all.c +++ b/qemu/translate-all.c @@ -1352,13 +1352,14 @@ TranslationBlock *tb_gen_code(CPUState *cpu, tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; -#ifdef USE_DIRECT_JUMP - tcg_ctx->tb_jmp_insn_offset = tb->jmp_insn_offset; - tcg_ctx->tb_jmp_target_addr = NULL; -#else - tcg_ctx->tb_jmp_insn_offset = NULL; - tcg_ctx->tb_jmp_target_addr = tb->jmp_target_addr; -#endif + + if (TCG_TARGET_HAS_direct_jump) { + tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; + tcg_ctx->tb_jmp_target_addr = NULL; + } else { + tcg_ctx->tb_jmp_insn_offset = NULL; + tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; + } #ifdef CONFIG_PROFILER tcg_ctx->tb_count++; diff --git a/qemu/x86_64.h b/qemu/x86_64.h index 1a16fc60..69438750 100644 --- a/qemu/x86_64.h +++ b/qemu/x86_64.h @@ -2929,7 +2929,6 @@ #define target_el_table target_el_table_x86_64 #define target_parse_constraint target_parse_constraint_x86_64 #define target_words_bigendian target_words_bigendian_x86_64 -#define tb_add_jump tb_add_jump_x86_64 #define tb_alloc tb_alloc_x86_64 #define tb_alloc_page tb_alloc_page_x86_64 #define tb_check_watchpoint tb_check_watchpoint_x86_64