diff --git a/qemu/accel/tcg/cputlb.c b/qemu/accel/tcg/cputlb.c index 34f6a2dd..6d8eb4cd 100644 --- a/qemu/accel/tcg/cputlb.c +++ b/qemu/accel/tcg/cputlb.c @@ -136,7 +136,7 @@ static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, target_ulong page) { int k; - //assert_cpu_is_self(ENV_GET_CPU(env)); + //assert_cpu_is_self(env_cpu(env)); for (k = 0; k < CPU_VTLB_SIZE; k++) { tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page); } @@ -535,7 +535,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) MemoryRegion *mr; MemoryRegionSection *section; ram_addr_t ram_addr; - CPUState *cpu = ENV_GET_CPU(env); + CPUState *cpu = env_cpu(env); CPUIOTLBEntry *iotlbentry; hwaddr physaddr, mr_offset; @@ -617,7 +617,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, target_ulong addr, uintptr_t retaddr, MMUAccessType access_type, int size) { - CPUState *cpu = ENV_GET_CPU(env); + CPUState *cpu = env_cpu(env); hwaddr mr_offset; MemoryRegionSection *section; MemoryRegion *mr; @@ -652,7 +652,7 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, uint64_t val, target_ulong addr, uintptr_t retaddr, int size) { - CPUState *cpu = ENV_GET_CPU(env); + CPUState *cpu = env_cpu(env); hwaddr mr_offset; MemoryRegionSection *section; MemoryRegion *mr; @@ -737,7 +737,7 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, if (!tlb_hit(tlb_addr_write(entry), addr)) { /* TLB entry is for a different page */ if (!VICTIM_TLB_HIT(addr_write, addr)) { - tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, + tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, mmu_idx, retaddr); } } @@ -771,7 +771,7 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, uintptr_t index = tlb_index(env, mmu_idx, addr); if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) { - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); CPUClass *cc = CPU_GET_CLASS(cs->uc, cs); if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) { @@ -812,7 +812,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, /* Enforce guest required alignment. */ if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { /* ??? Maybe indicate atomic op to cpu_unaligned_access */ - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } @@ -828,7 +828,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, /* Check TLB entry and enforce page permissions. */ if (!tlb_hit(tlb_addr, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) { - tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, + tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); tlbe = tlb_entry(env, mmu_idx, addr); @@ -838,7 +838,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, /* Check notdirty */ if (unlikely(tlb_addr & TLB_NOTDIRTY)) { - tlb_set_dirty(ENV_GET_CPU(env), addr); + tlb_set_dirty(env_cpu(env), addr); tlb_addr = tlb_addr & ~TLB_NOTDIRTY; } @@ -851,7 +851,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, /* Let the guest notice RMW on a write-only page. */ if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { - tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD, + tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, mmu_idx, retaddr); /* Since we don't support reads and writes to different addresses, and we do have the proper page loaded for write, this shouldn't @@ -862,7 +862,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, return (void *)((uintptr_t)addr + tlbe->addend); stop_the_world: - cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); + cpu_loop_exit_atomic(env_cpu(env), retaddr); } #ifdef TARGET_WORDS_BIGENDIAN @@ -1033,7 +1033,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, /* Handle CPU specific unaligned behaviour */ if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type, + cpu_unaligned_access(env_cpu(env), addr, access_type, mmu_idx, retaddr); } @@ -1041,7 +1041,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, if (!tlb_hit(tlb_addr, addr)) { if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, addr & TARGET_PAGE_MASK)) { - tlb_fill(ENV_GET_CPU(env), addr, size, + tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); @@ -1062,7 +1062,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, * repeat the MMU check here. This tlb_fill() call might * longjump out if this access should cause a guest exception. */ - tlb_fill(ENV_GET_CPU(env), addr, size, + tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); @@ -1355,7 +1355,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, /* Handle CPU specific unaligned behaviour */ if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } @@ -1363,7 +1363,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, if (!tlb_hit(tlb_addr, addr)) { if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, addr & TARGET_PAGE_MASK)) { - tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, + tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); @@ -1384,7 +1384,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, * repeat the MMU check here. This tlb_fill() call might * longjump out if this access should cause a guest exception. */ - tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, + tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); @@ -1424,7 +1424,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, if (!tlb_hit_page(tlb_addr2, page2) && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2 & TARGET_PAGE_MASK)) { - tlb_fill(ENV_GET_CPU(env), page2, size, MMU_DATA_STORE, + tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE, mmu_idx, retaddr); } diff --git a/qemu/accel/tcg/tcg-runtime.c b/qemu/accel/tcg/tcg-runtime.c index 3411f0d0..f1a0971c 100644 --- a/qemu/accel/tcg/tcg-runtime.c +++ b/qemu/accel/tcg/tcg-runtime.c @@ -146,7 +146,7 @@ uint64_t HELPER(ctpop_i64)(uint64_t arg) void *HELPER(lookup_tb_ptr)(CPUArchState *env) { TCGContext *tcg_ctx = env->uc->tcg_ctx; - CPUState *cpu = ENV_GET_CPU(env); + CPUState *cpu = env_cpu(env); TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; @@ -167,5 +167,5 @@ void *HELPER(lookup_tb_ptr)(CPUArchState *env) void HELPER(exit_atomic)(CPUArchState *env) { - cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC()); + cpu_loop_exit_atomic(env_cpu(env), GETPC()); } diff --git a/qemu/accel/tcg/translate-all.c b/qemu/accel/tcg/translate-all.c index c3123f1f..b99c3c60 100644 --- a/qemu/accel/tcg/translate-all.c +++ b/qemu/accel/tcg/translate-all.c @@ -1371,7 +1371,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, tcg_func_start(tcg_ctx); - tcg_ctx->cpu = ENV_GET_CPU(env); + tcg_ctx->cpu = env_cpu(env); gen_intermediate_code(cpu, tb, max_insns); tcg_ctx->cpu = NULL; diff --git a/qemu/include/exec/cpu-all.h b/qemu/include/exec/cpu-all.h index 8d465c1b..5f7e1d87 100644 --- a/qemu/include/exec/cpu-all.h +++ b/qemu/include/exec/cpu-all.h @@ -375,4 +375,16 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, int cpu_exec(struct uc_struct *uc, CPUState *cpu); +/** + * env_cpu(env) + * @env: The architecture environment + * + * Return the CPUState associated with the environment. + */ +static inline CPUState *env_cpu(CPUArchState *env) +{ + ArchCPU *arch_cpu = container_of(env, ArchCPU, env); + return &arch_cpu->parent_obj; +} + #endif /* CPU_ALL_H */ diff --git a/qemu/target/arm/cpu.h b/qemu/target/arm/cpu.h index 79c9e346..bf8e52f6 100644 --- a/qemu/target/arm/cpu.h +++ b/qemu/target/arm/cpu.h @@ -892,8 +892,6 @@ static inline ARMCPU *arm_env_get_cpu(CPUARMState *env) return container_of(env, ARMCPU, env); } -#define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e)) - #define ENV_OFFSET offsetof(ARMCPU, env) #ifndef CONFIG_USER_ONLY diff --git a/qemu/target/arm/helper-a64.c b/qemu/target/arm/helper-a64.c index 6c1b080f..496aa797 100644 --- a/qemu/target/arm/helper-a64.c +++ b/qemu/target/arm/helper-a64.c @@ -551,7 +551,7 @@ static uint64_t do_paired_cmpxchg64_le(CPUARMState *env, uint64_t addr, if (parallel) { #ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); + cpu_loop_exit_atomic(env_cpu(env), ra); #else int mem_idx = cpu_mmu_index(env, false); TCGMemOpIdx oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); @@ -621,7 +621,7 @@ static uint64_t do_paired_cmpxchg64_be(CPUARMState *env, uint64_t addr, if (parallel) { #ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); + cpu_loop_exit_atomic(env_cpu(env), ra); #else int mem_idx = cpu_mmu_index(env, false); TCGMemOpIdx oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); diff --git a/qemu/target/arm/helper.c b/qemu/target/arm/helper.c index 5f1bd058..ce46b1b9 100644 --- a/qemu/target/arm/helper.c +++ b/qemu/target/arm/helper.c @@ -429,7 +429,7 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - //CPUState *cs = ENV_GET_CPU(env); + //CPUState *cs = env_cpu(env); //tlb_flush_all_cpus_synced(cs); } @@ -437,7 +437,7 @@ static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - //CPUState *cs = ENV_GET_CPU(env); + //CPUState *cs = env_cpu(env); //tlb_flush_all_cpus_synced(cs); } @@ -445,7 +445,7 @@ static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - //CPUState *cs = ENV_GET_CPU(env); + //CPUState *cs = env_cpu(env); //tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); } @@ -453,7 +453,7 @@ static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - //CPUState *cs = ENV_GET_CPU(env); + //CPUState *cs = env_cpu(env); //tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); } @@ -528,7 +528,7 @@ static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S12NSE1 | @@ -541,7 +541,7 @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, { // Unicorn: if'd out. See issue 642 #if 0 - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S12NSE1 | @@ -559,7 +559,7 @@ static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, * translation information. * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. */ - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); uint64_t pageaddr; if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { @@ -593,7 +593,7 @@ static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); } @@ -603,7 +603,7 @@ static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, { // Unicorn: if'd out. See issue 642 #if 0 - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); #endif @@ -612,7 +612,7 @@ static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); @@ -623,7 +623,7 @@ static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, { // Unicorn: if'd out. See issue 642. #if 0 - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, @@ -1702,7 +1702,7 @@ static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) { - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); uint64_t hcr_el2 = arm_hcr_el2_eff(env); uint64_t ret = 0; @@ -3563,7 +3563,7 @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, // UNICORN: TODO: issue #642 #if 0 bool sec = arm_is_secure_below_el3(env); - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); if (sec) { tlb_flush_by_mmuidx_all_cpus_synced(cs, @@ -3580,7 +3580,7 @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); if (tlb_force_broadcast(env)) { tlbi_aa64_vmalle1is_write(env, NULL, value); @@ -3655,7 +3655,7 @@ static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, #if 0 bool sec = arm_is_secure_below_el3(env); bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); if (sec) { tlb_flush_by_mmuidx_all_cpus_synced(cs, @@ -3679,7 +3679,7 @@ static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, { // UNICORN: TODO: issue #642 #if 0 - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); #endif @@ -3690,7 +3690,7 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, { // UNICORN: TODO: issue #642 #if 0 - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3); #endif @@ -3730,7 +3730,7 @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, // UNICORN: TODO: issue #642 #if 0 bool sec = arm_is_secure_below_el3(env); - CPUState *cs = ENV_GET_CPU(env) + CPUState *cs = env_cpu(env) uint64_t pageaddr = sextract64(value << 12, 0, 56); if (sec) { @@ -3778,7 +3778,7 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, { // UNICORN: TODO: issue #642 #if 0 - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, @@ -3791,7 +3791,7 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, { // UNICORN: TODO: issue #642 #if 0 - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, @@ -3826,7 +3826,7 @@ static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, { // UNICORN: TODO: issue #642 #if 0 - CPUState *cs = ENV_GET_CPU(env); + CPUState *cs = env_cpu(env); uint64_t pageaddr; if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { diff --git a/qemu/target/i386/cpu.h b/qemu/target/i386/cpu.h index 45ec9cb9..fc78f986 100644 --- a/qemu/target/i386/cpu.h +++ b/qemu/target/i386/cpu.h @@ -1403,8 +1403,6 @@ static inline X86CPU *x86_env_get_cpu(CPUX86State *env) return container_of(env, X86CPU, env); } -#define ENV_GET_CPU(e) CPU(x86_env_get_cpu(e)) - #define ENV_OFFSET offsetof(X86CPU, env) #ifndef CONFIG_USER_ONLY diff --git a/qemu/target/i386/mem_helper.c b/qemu/target/i386/mem_helper.c index bbee8d91..65ddb995 100644 --- a/qemu/target/i386/mem_helper.c +++ b/qemu/target/i386/mem_helper.c @@ -91,7 +91,7 @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0) } CC_SRC = eflags; #else - cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC()); + cpu_loop_exit_atomic(env_cpu(env), GETPC()); #endif /* CONFIG_ATOMIC64 */ } @@ -160,7 +160,7 @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0) } CC_SRC = eflags; } else { - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); + cpu_loop_exit_atomic(env_cpu(env), ra); } } #endif diff --git a/qemu/target/m68k/cpu.h b/qemu/target/m68k/cpu.h index cc479a48..b70c544e 100644 --- a/qemu/target/m68k/cpu.h +++ b/qemu/target/m68k/cpu.h @@ -172,8 +172,6 @@ static inline M68kCPU *m68k_env_get_cpu(CPUM68KState *env) return container_of(env, M68kCPU, env); } -#define ENV_GET_CPU(e) CPU(m68k_env_get_cpu(e)) - #define ENV_OFFSET offsetof(M68kCPU, env) void m68k_cpu_do_interrupt(CPUState *cpu); diff --git a/qemu/target/m68k/op_helper.c b/qemu/target/m68k/op_helper.c index 74434471..78e5944e 100644 --- a/qemu/target/m68k/op_helper.c +++ b/qemu/target/m68k/op_helper.c @@ -790,7 +790,7 @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2, #endif { /* Tell the main loop we need to serialize this insn. */ - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); + cpu_loop_exit_atomic(env_cpu(env), ra); } } else { /* We're executing in a serial context -- no need to be atomic. */ diff --git a/qemu/target/mips/cpu.h b/qemu/target/mips/cpu.h index 552fd057..3af2abb5 100644 --- a/qemu/target/mips/cpu.h +++ b/qemu/target/mips/cpu.h @@ -1074,8 +1074,6 @@ static inline MIPSCPU *mips_env_get_cpu(CPUMIPSState *env) return container_of(env, MIPSCPU, env); } -#define ENV_GET_CPU(e) CPU(mips_env_get_cpu(e)) - #define ENV_OFFSET offsetof(MIPSCPU, env) void mips_cpu_list (FILE *f, fprintf_function cpu_fprintf); diff --git a/qemu/target/riscv/cpu.h b/qemu/target/riscv/cpu.h index a624cb56..ae69ea3a 100644 --- a/qemu/target/riscv/cpu.h +++ b/qemu/target/riscv/cpu.h @@ -241,7 +241,6 @@ extern const char * const riscv_fpr_regnames[]; extern const char * const riscv_excp_names[]; extern const char * const riscv_intr_names[]; -#define ENV_GET_CPU(e) CPU(riscv_env_get_cpu(e)) #define ENV_OFFSET offsetof(RISCVCPU, env) void riscv_cpu_do_interrupt(CPUState *cpu); diff --git a/qemu/target/sparc/cpu.h b/qemu/target/sparc/cpu.h index 771758b4..60417530 100644 --- a/qemu/target/sparc/cpu.h +++ b/qemu/target/sparc/cpu.h @@ -533,8 +533,6 @@ static inline SPARCCPU *sparc_env_get_cpu(CPUSPARCState *env) return container_of(env, SPARCCPU, env); } -#define ENV_GET_CPU(e) CPU(sparc_env_get_cpu(e)) - #define ENV_OFFSET offsetof(SPARCCPU, env) void sparc_cpu_do_interrupt(CPUState *cpu);