diff --git a/qemu/cputlb.c b/qemu/cputlb.c index 1e6c776c..2c131c4e 100644 --- a/qemu/cputlb.c +++ b/qemu/cputlb.c @@ -532,10 +532,8 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, #undef MMUSUFFIX #define MMUSUFFIX _cmmu -#undef GETPC_ADJ -#define GETPC_ADJ 0 -#undef GETRA -#define GETRA() ((uintptr_t)0) +#undef GETPC +#define GETPC() ((uintptr_t)0) #define SOFTMMU_CODE_ACCESS #define SHIFT 0 diff --git a/qemu/include/exec/exec-all.h b/qemu/include/exec/exec-all.h index 2eb3aab3..b26d957f 100644 --- a/qemu/include/exec/exec-all.h +++ b/qemu/include/exec/exec-all.h @@ -351,16 +351,15 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, tb_next->jmp_list_first = (uintptr_t)tb | n; } -/* GETRA is the true target of the return instruction that we'll execute, - defined here for simplicity of defining the follow-up macros. */ +/* GETPC is the true target of the return instruction that we'll execute. */ #if defined(CONFIG_TCG_INTERPRETER) extern uintptr_t tci_tb_ptr; -# define GETRA() tci_tb_ptr +# define GETPC() tci_tb_ptr #elif defined(_MSC_VER) #include -# define GETRA() (uintptr_t)_ReturnAddress() +# define GETPC() (uintptr_t)_ReturnAddress() #else -# define GETRA() \ +# define GETPC() \ ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) #endif @@ -377,8 +376,6 @@ extern uintptr_t tci_tb_ptr; # define GETPC_ADJ 2 #endif -#define GETPC() (GETRA() - GETPC_ADJ) - #if !defined(CONFIG_USER_ONLY) void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)); diff --git a/qemu/softmmu_template.h b/qemu/softmmu_template.h index 3093e640..5f087696 100644 --- a/qemu/softmmu_template.h +++ b/qemu/softmmu_template.h @@ -250,9 +250,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, } } - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - if (addr & ((1 << a_bits) - 1)) { cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); @@ -302,10 +299,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, do_unaligned_access: addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ); - res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ); + res1 = helper_le_ld_name(env, addr1, oi, retaddr); + res2 = helper_le_ld_name(env, addr2, oi, retaddr); shift = (addr & (DATA_SIZE - 1)) * 8; /* Little-endian combine. */ @@ -440,9 +435,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, } } - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - if (addr & ((1 << a_bits) - 1)) { cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); @@ -491,10 +483,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, do_unaligned_access: addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ); - res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ); + res1 = helper_be_ld_name(env, addr1, oi, retaddr); + res2 = helper_be_ld_name(env, addr2, oi, retaddr); shift = (addr & (DATA_SIZE - 1)) * 8; /* Big-endian combine. */ @@ -627,9 +617,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, } } - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - if (addr & ((1 << a_bits) - 1)) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); @@ -691,10 +678,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, for (i = 0; i < DATA_SIZE; ++i) { /* Little-endian extract. */ uint8_t val8 = (uint8_t)(val >> (i * 8)); - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, - oi, retaddr + GETPC_ADJ); + oi, retaddr); if (env->invalid_error != UC_ERR_OK) break; } @@ -776,9 +761,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, } } - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - if (addr & ((1 << a_bits) - 1)) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); @@ -840,10 +822,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, for (i = 0; i < DATA_SIZE; ++i) { /* Big-endian extract. */ uint8_t val8 = (uint8_t)(val >> (((DATA_SIZE - 1) * 8) - (i * 8))); - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, - oi, retaddr + GETPC_ADJ); + oi, retaddr); if (env->invalid_error != UC_ERR_OK) break; } diff --git a/qemu/target-arm/helper.c b/qemu/target-arm/helper.c index 524b864a..3c4bdc0c 100644 --- a/qemu/target-arm/helper.c +++ b/qemu/target-arm/helper.c @@ -7569,12 +7569,12 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) * this purpose use the actual register value passed to us * so that we get the fault address right. */ - helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETRA()); + helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC()); /* Now we can populate the other TLB entries, if any */ for (i = 0; i < maxidx; i++) { uint64_t va = vaddr + TARGET_PAGE_SIZE * i; if (va != (vaddr_in & TARGET_PAGE_MASK)) { - helper_ret_stb_mmu(env, va, 0, oi, GETRA()); + helper_ret_stb_mmu(env, va, 0, oi, GETPC()); } } } @@ -7591,7 +7591,7 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) * bounce buffer was in use */ for (i = 0; i < blocklen; i++) { - helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETRA()); + helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC()); } } #else diff --git a/qemu/target-mips/op_helper.c b/qemu/target-mips/op_helper.c index e549966a..71f02db2 100644 --- a/qemu/target-mips/op_helper.c +++ b/qemu/target-mips/op_helper.c @@ -4135,17 +4135,17 @@ void helper_msa_st_ ## TYPE(CPUMIPSState *env, uint32_t wd, \ int mmu_idx = cpu_mmu_index(env, false); \ int i; \ MEMOP_IDX(DF) \ - ensure_writable_pages(env, addr, mmu_idx, GETRA()); \ + ensure_writable_pages(env, addr, mmu_idx, GETPC()); \ for (i = 0; i < DF_ELEMENTS(DF); i++) { \ ST_INSN(env, addr + (i << DF), pwd->TYPE[i], ##__VA_ARGS__); \ } \ } #if !defined(CONFIG_USER_ONLY) -MSA_ST_DF(DF_BYTE, b, helper_ret_stb_mmu, oi, GETRA()) -MSA_ST_DF(DF_HALF, h, helper_ret_stw_mmu, oi, GETRA()) -MSA_ST_DF(DF_WORD, w, helper_ret_stl_mmu, oi, GETRA()) -MSA_ST_DF(DF_DOUBLE, d, helper_ret_stq_mmu, oi, GETRA()) +MSA_ST_DF(DF_BYTE, b, helper_ret_stb_mmu, oi, GETPC()) +MSA_ST_DF(DF_HALF, h, helper_ret_stw_mmu, oi, GETPC()) +MSA_ST_DF(DF_WORD, w, helper_ret_stl_mmu, oi, GETPC()) +MSA_ST_DF(DF_DOUBLE, d, helper_ret_stq_mmu, oi, GETPC()) #else MSA_ST_DF(DF_BYTE, b, cpu_stb_data) MSA_ST_DF(DF_HALF, h, cpu_stw_data) @@ -4155,10 +4155,10 @@ MSA_ST_DF(DF_DOUBLE, d, cpu_stq_data) #if !defined(CONFIG_USER_ONLY) -MSA_LD_DF(DF_BYTE, b, helper_ret_ldub_mmu, oi, GETRA()) -MSA_LD_DF(DF_HALF, h, helper_ret_lduw_mmu, oi, GETRA()) -MSA_LD_DF(DF_WORD, w, helper_ret_ldul_mmu, oi, GETRA()) -MSA_LD_DF(DF_DOUBLE, d, helper_ret_ldq_mmu, oi, GETRA()) +MSA_LD_DF(DF_BYTE, b, helper_ret_ldub_mmu, oi, GETPC()) +MSA_LD_DF(DF_HALF, h, helper_ret_lduw_mmu, oi, GETPC()) +MSA_LD_DF(DF_WORD, w, helper_ret_ldul_mmu, oi, GETPC()) +MSA_LD_DF(DF_DOUBLE, d, helper_ret_ldq_mmu, oi, GETPC()) #else MSA_LD_DF(DF_BYTE, b, cpu_ldub_data) MSA_LD_DF(DF_HALF, h, cpu_lduw_data) diff --git a/qemu/translate-all.c b/qemu/translate-all.c index ea73777f..5456362a 100644 --- a/qemu/translate-all.c +++ b/qemu/translate-all.c @@ -251,6 +251,8 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, int64_t ti = profile_getclock(); #endif + searched_pc -= GETPC_ADJ; + if (searched_pc < host_pc) { return -1; }