mirror of
				https://github.com/yuzu-emu/unicorn.git
				synced 2025-11-04 13:44:49 +00:00 
			
		
		
		
	cpu: Move icount_decr to CPUNegativeOffsetState
Amusingly, we had already ignored the comment to keep this value at the end of CPUState. This restores the minimum negative offset from TCG_AREG0 for code generation. For the couple of uses within qom/cpu.c, without NEED_CPU_H, add a pointer from the CPUState object to the IcountDecr object within CPUNegativeOffsetState. Backports commit 5e1401969b25f676fee6b1c564441759cf967a43 from qemu
This commit is contained in:
		
							parent
							
								
									8f53f09a05
								
							
						
					
					
						commit
						d7ea41c3a3
					
				| 
						 | 
				
			
			@ -442,14 +442,14 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
 | 
			
		|||
#ifdef CONFIG_USER_ONLY
 | 
			
		||||
        abort();
 | 
			
		||||
#else
 | 
			
		||||
        int insns_left = cpu->icount_decr.u32;
 | 
			
		||||
        int insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
 | 
			
		||||
        *last_tb = NULL;
 | 
			
		||||
        if (cpu->icount_extra && insns_left >= 0) {
 | 
			
		||||
            /* Refill decrementer and continue execution.  */
 | 
			
		||||
            cpu->icount_extra += insns_left;
 | 
			
		||||
            insns_left = MIN(0xffff, cpu->icount_extra);
 | 
			
		||||
            cpu->icount_extra -= insns_left;
 | 
			
		||||
            cpu->icount_decr.u16.low = insns_left;
 | 
			
		||||
            cpu_neg(cpu)->icount_decr.u16.low = insns_left;
 | 
			
		||||
        } else {
 | 
			
		||||
            if (insns_left > 0) {
 | 
			
		||||
                /* Execute remaining instructions.  */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -315,13 +315,15 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
 | 
			
		|||
    return -1;
 | 
			
		||||
 | 
			
		||||
found:
 | 
			
		||||
    // UNICORN: Commented out
 | 
			
		||||
    //if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
 | 
			
		||||
    //    assert(use_icount);
 | 
			
		||||
    //    /* Reset the cycle counter to the start of the block
 | 
			
		||||
    //       and shift if to the number of actually executed instructions */
 | 
			
		||||
    //    cpu->icount_decr.u16.low += num_insns - i;
 | 
			
		||||
    //}
 | 
			
		||||
    // UNICORN: If'd out
 | 
			
		||||
#if 0
 | 
			
		||||
    if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
 | 
			
		||||
        assert(use_icount);
 | 
			
		||||
        /* Reset the cycle counter to the start of the block
 | 
			
		||||
           and shift if to the number of actually executed instructions */
 | 
			
		||||
        cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
    restore_state_to_opc(env, tb, data);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_PROFILER
 | 
			
		||||
| 
						 | 
				
			
			@ -1865,11 +1867,11 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
 | 
			
		|||
        cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
 | 
			
		||||
                  (void *)retaddr);
 | 
			
		||||
    }
 | 
			
		||||
    n = cpu->icount_decr.u16.low + tb->icount;
 | 
			
		||||
    n = cpu_neg(cpu)->icount_decr.u16.low + tb->icount;
 | 
			
		||||
    cpu_restore_state_from_tb(cpu, tb, retaddr, true);
 | 
			
		||||
    /* Calculate how many instructions had been executed before the fault
 | 
			
		||||
       occurred.  */
 | 
			
		||||
    n = n - cpu->icount_decr.u16.low;
 | 
			
		||||
    n = n - cpu_neg(cpu)->icount_decr.u16.low;
 | 
			
		||||
    /* Generate a new TB ending on the I/O insn.  */
 | 
			
		||||
    n++;
 | 
			
		||||
    /* On MIPS and SH, delay slot instructions can only be restarted if
 | 
			
		||||
| 
						 | 
				
			
			@ -1879,14 +1881,14 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
 | 
			
		|||
#if defined(TARGET_MIPS)
 | 
			
		||||
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
 | 
			
		||||
        env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
 | 
			
		||||
        cpu->icount_decr.u16.low++;
 | 
			
		||||
        cpu_neg(cpu)->icount_decr.u16.low++;
 | 
			
		||||
        env->hflags &= ~MIPS_HFLAG_BMASK;
 | 
			
		||||
    }
 | 
			
		||||
#elif defined(TARGET_SH4)
 | 
			
		||||
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
 | 
			
		||||
            && n > 1) {
 | 
			
		||||
        env->pc -= 2;
 | 
			
		||||
        cpu->icount_decr.u16.low++;
 | 
			
		||||
        cpu_neg(cpu)->icount_decr.u16.low++;
 | 
			
		||||
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			@ -2003,6 +2005,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
 | 
			
		|||
{
 | 
			
		||||
    cpu->interrupt_request |= mask;
 | 
			
		||||
    cpu->tcg_exit_req = 1;
 | 
			
		||||
    atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if 0
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -384,6 +384,7 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu);
 | 
			
		|||
static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
 | 
			
		||||
{
 | 
			
		||||
    cpu->parent_obj.env_ptr = &cpu->env;
 | 
			
		||||
    cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -34,6 +34,7 @@
 | 
			
		|||
#include "exec/hwaddr.h"
 | 
			
		||||
#endif
 | 
			
		||||
#include "exec/memattrs.h"
 | 
			
		||||
#include "qom/cpu.h"
 | 
			
		||||
 | 
			
		||||
#include "cpu-param.h"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -220,7 +221,7 @@ typedef struct CPUTLBDesc {
 | 
			
		|||
 * before CPUArchState, as a field named "neg".
 | 
			
		||||
 */
 | 
			
		||||
typedef struct CPUNegativeOffsetState {
 | 
			
		||||
    /* Empty */
 | 
			
		||||
    IcountDecr icount_decr;
 | 
			
		||||
} CPUNegativeOffsetState;
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5,8 +5,6 @@
 | 
			
		|||
 | 
			
		||||
/* Helpers for instruction counting code generation.  */
 | 
			
		||||
 | 
			
		||||
#define ENV_OFFSET   offsetof(ArchCPU, env)
 | 
			
		||||
 | 
			
		||||
//static TCGOp *icount_start_insn;
 | 
			
		||||
 | 
			
		||||
static inline void gen_tb_start(TCGContext *tcg_ctx, TranslationBlock *tb)
 | 
			
		||||
| 
						 | 
				
			
			@ -17,7 +15,7 @@ static inline void gen_tb_start(TCGContext *tcg_ctx, TranslationBlock *tb)
 | 
			
		|||
    tcg_ctx->exitreq_label = gen_new_label(tcg_ctx);
 | 
			
		||||
    flag = tcg_temp_new_i32(tcg_ctx);
 | 
			
		||||
    tcg_gen_ld_i32(tcg_ctx, flag, tcg_ctx->cpu_env,
 | 
			
		||||
                   offsetof(CPUState, tcg_exit_req) - ENV_OFFSET);
 | 
			
		||||
                   offsetof(CPUState, tcg_exit_req) - offsetof(ArchCPU, env));
 | 
			
		||||
    tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, flag, 0, tcg_ctx->exitreq_label);
 | 
			
		||||
    tcg_temp_free_i32(tcg_ctx, flag);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -30,7 +28,8 @@ static inline void gen_tb_start(TCGContext *tcg_ctx, TranslationBlock *tb)
 | 
			
		|||
    }
 | 
			
		||||
 | 
			
		||||
    tcg_gen_ld_i32(tcg_ctx, count, tcg_ctx->cpu_env,
 | 
			
		||||
                   -ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
 | 
			
		||||
                   offsetof(ArchCPU, neg.icount_decr.u32) -
 | 
			
		||||
                   offsetof(ArchCPU, env));
 | 
			
		||||
 | 
			
		||||
    if (tb_cflags(tb) & CF_USE_ICOUNT) {
 | 
			
		||||
        imm = tcg_temp_new_i32(tcg_ctx);
 | 
			
		||||
| 
						 | 
				
			
			@ -48,7 +47,8 @@ static inline void gen_tb_start(TCGContext *tcg_ctx, TranslationBlock *tb)
 | 
			
		|||
 | 
			
		||||
    if (tb_cflags(tb) & CF_USE_ICOUNT) {
 | 
			
		||||
        tcg_gen_st16_i32(tcg_ctx, count, tcg_ctx, cpu_env,
 | 
			
		||||
                         -ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
 | 
			
		||||
                         offsetof(ArchCPU, neg.icount_decr.u16.low) -
 | 
			
		||||
                         offsetof(ArchCPU, env));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    tcg_temp_free_i32(tcg_ctx, count);
 | 
			
		||||
| 
						 | 
				
			
			@ -69,20 +69,26 @@ static inline void gen_tb_end(TCGContext *tcg_ctx, TranslationBlock *tb, int num
 | 
			
		|||
    tcg_gen_exit_tb(tcg_ctx, tb, TB_EXIT_REQUESTED);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if 0
 | 
			
		||||
static inline void gen_io_start(TCGContext *tcg_ctx)
 | 
			
		||||
{
 | 
			
		||||
#if 0
 | 
			
		||||
    TCGv_i32 tmp = tcg_const_i32(tcg_ctx, 1);
 | 
			
		||||
    tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->tcg_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
 | 
			
		||||
    tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->tcg_env,
 | 
			
		||||
                   offsetof(ArchCPU, parent_obj.can_do_io) -
 | 
			
		||||
                   offsetof(ArchCPU, env));
 | 
			
		||||
    tcg_temp_free_i32(tcg_ctx, tmp);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void gen_io_end(TCGContext *tcg_ctx)
 | 
			
		||||
{
 | 
			
		||||
#if 0
 | 
			
		||||
    TCGv_i32 tmp = tcg_const_i32(tcg_ctx, 0);
 | 
			
		||||
    tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->tcg_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
 | 
			
		||||
    tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->tcg_env,
 | 
			
		||||
                   offsetof(ArchCPU, parent_obj.can_do_io) -
 | 
			
		||||
                   offsetof(ArchCPU, env));
 | 
			
		||||
    tcg_temp_free_i32(tcg_ctx, tmp);
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -189,17 +189,25 @@ typedef struct CPUClass {
 | 
			
		|||
    bool tcg_initialized;
 | 
			
		||||
} CPUClass;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Low 16 bits: number of cycles left, used only in icount mode.
 | 
			
		||||
 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs
 | 
			
		||||
 * for this CPU and return to its top level loop (even in non-icount mode).
 | 
			
		||||
 * This allows a single read-compare-cbranch-write sequence to test
 | 
			
		||||
 * for both decrementer underflow and exceptions.
 | 
			
		||||
 */
 | 
			
		||||
typedef union IcountDecr {
 | 
			
		||||
    uint32_t u32;
 | 
			
		||||
    struct {
 | 
			
		||||
#ifdef HOST_WORDS_BIGENDIAN
 | 
			
		||||
typedef struct icount_decr_u16 {
 | 
			
		||||
    uint16_t high;
 | 
			
		||||
    uint16_t low;
 | 
			
		||||
} icount_decr_u16;
 | 
			
		||||
        uint16_t high;
 | 
			
		||||
        uint16_t low;
 | 
			
		||||
#else
 | 
			
		||||
typedef struct icount_decr_u16 {
 | 
			
		||||
    uint16_t low;
 | 
			
		||||
    uint16_t high;
 | 
			
		||||
} icount_decr_u16;
 | 
			
		||||
        uint16_t low;
 | 
			
		||||
        uint16_t high;
 | 
			
		||||
#endif
 | 
			
		||||
    } u16;
 | 
			
		||||
} IcountDecr;
 | 
			
		||||
 | 
			
		||||
typedef struct CPUBreakpoint {
 | 
			
		||||
    vaddr pc;
 | 
			
		||||
| 
						 | 
				
			
			@ -286,6 +294,7 @@ struct CPUAddressSpace {
 | 
			
		|||
 * @as: Pointer to the first AddressSpace, for the convenience of targets which
 | 
			
		||||
 *      only have a single AddressSpace
 | 
			
		||||
 * @env_ptr: Pointer to subclass-specific CPUArchState field.
 | 
			
		||||
 * @icount_decr_ptr: Pointer to IcountDecr field within subclass.
 | 
			
		||||
 * @next_cpu: Next CPU sharing TB cache.
 | 
			
		||||
 * @opaque: User data.
 | 
			
		||||
 * @mem_io_pc: Host Program Counter at which the memory was accessed.
 | 
			
		||||
| 
						 | 
				
			
			@ -329,6 +338,7 @@ struct CPUState {
 | 
			
		|||
    MemoryRegion *memory;
 | 
			
		||||
 | 
			
		||||
    void *env_ptr; /* CPUArchState */
 | 
			
		||||
    IcountDecr *icount_decr_ptr;
 | 
			
		||||
 | 
			
		||||
    /* Accessed in parallel; all accesses must be atomic */
 | 
			
		||||
    struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
 | 
			
		||||
| 
						 | 
				
			
			@ -364,10 +374,6 @@ struct CPUState {
 | 
			
		|||
    int cpu_index;
 | 
			
		||||
    int cluster_index;
 | 
			
		||||
    uint32_t halted;
 | 
			
		||||
    union {
 | 
			
		||||
        uint32_t u32;
 | 
			
		||||
        icount_decr_u16 u16;
 | 
			
		||||
    } icount_decr;
 | 
			
		||||
    uint32_t can_do_io;
 | 
			
		||||
    int32_t exception_index;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -91,6 +91,7 @@ void cpu_exit(CPUState *cpu)
 | 
			
		|||
    /* Ensure cpu_exec will see the exit request after TCG has exited.  */
 | 
			
		||||
    smp_wmb();
 | 
			
		||||
    atomic_set(&cpu->tcg_exit_req, 1);
 | 
			
		||||
    atomic_set(&cpu->icount_decr_ptr->u16.high, -1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void cpu_common_noop(CPUState *cpu)
 | 
			
		||||
| 
						 | 
				
			
			@ -145,7 +146,7 @@ static void cpu_common_reset(CPUState *cpu)
 | 
			
		|||
    cpu->mem_io_pc = 0;
 | 
			
		||||
    cpu->mem_io_vaddr = 0;
 | 
			
		||||
    cpu->icount_extra = 0;
 | 
			
		||||
    atomic_set(&cpu->icount_decr.u32, 0);
 | 
			
		||||
    atomic_set(&cpu->icount_decr_ptr->u32, 0);
 | 
			
		||||
    cpu->can_do_io = 0;
 | 
			
		||||
    cpu->exception_index = -1;
 | 
			
		||||
    cpu->crash_occurred = false;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2807,7 +2807,7 @@ void check_exit_request(TCGContext *tcg_ctx)
 | 
			
		|||
 | 
			
		||||
    flag = tcg_temp_new_i32(tcg_ctx);
 | 
			
		||||
    tcg_gen_ld_i32(tcg_ctx, flag, tcg_ctx->cpu_env,
 | 
			
		||||
            offsetof(CPUState, tcg_exit_req) - ENV_OFFSET);
 | 
			
		||||
            offsetof(CPUState, tcg_exit_req) - offsetof(ArchCPU, env));
 | 
			
		||||
    tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, flag, 0, tcg_ctx->exitreq_label);
 | 
			
		||||
    tcg_temp_free_i32(tcg_ctx, flag);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue