mirror of
				https://github.com/yuzu-emu/unicorn.git
				synced 2025-11-04 14:14:57 +00:00 
			
		
		
		
	target/arm: Add gen_mte_check1
Replace existing uses of check_data_tbi in translate-a64.c that perform a single logical memory access. Leave the helper blank for now to reduce the patch size. Backports commit 0a405be2b8fd9506a009b10d7d2d98c394b36db6 from qemu
This commit is contained in:
		
							parent
							
								
									4488858072
								
							
						
					
					
						commit
						582e64f348
					
				| 
						 | 
				
			
			@ -3583,6 +3583,7 @@
 | 
			
		|||
#define helper_msr_i_daifclear helper_msr_i_daifclear_aarch64
 | 
			
		||||
#define helper_msr_i_daifset helper_msr_i_daifset_aarch64
 | 
			
		||||
#define helper_msr_i_spsel helper_msr_i_spsel_aarch64
 | 
			
		||||
#define helper_mte_check1 helper_mte_check1_aarch64
 | 
			
		||||
#define helper_neon_addlp_s16 helper_neon_addlp_s16_aarch64
 | 
			
		||||
#define helper_neon_addlp_s8 helper_neon_addlp_s8_aarch64
 | 
			
		||||
#define helper_neon_addlp_u16 helper_neon_addlp_u16_aarch64
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3583,6 +3583,7 @@
 | 
			
		|||
#define helper_msr_i_daifclear helper_msr_i_daifclear_aarch64eb
 | 
			
		||||
#define helper_msr_i_daifset helper_msr_i_daifset_aarch64eb
 | 
			
		||||
#define helper_msr_i_spsel helper_msr_i_spsel_aarch64eb
 | 
			
		||||
#define helper_mte_check1 helper_mte_check1_aarch64eb
 | 
			
		||||
#define helper_neon_addlp_s16 helper_neon_addlp_s16_aarch64eb
 | 
			
		||||
#define helper_neon_addlp_s8 helper_neon_addlp_s8_aarch64eb
 | 
			
		||||
#define helper_neon_addlp_u16 helper_neon_addlp_u16_aarch64eb
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3718,6 +3718,7 @@ aarch64_symbols = (
 | 
			
		|||
    'helper_msr_i_daifclear',
 | 
			
		||||
    'helper_msr_i_daifset',
 | 
			
		||||
    'helper_msr_i_spsel',
 | 
			
		||||
    'helper_mte_check1',
 | 
			
		||||
    'helper_neon_addlp_s16',
 | 
			
		||||
    'helper_neon_addlp_s8',
 | 
			
		||||
    'helper_neon_addlp_u16',
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -104,6 +104,7 @@ DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
 | 
			
		|||
DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
 | 
			
		||||
DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
 | 
			
		||||
 | 
			
		||||
DEF_HELPER_FLAGS_3(mte_check1, TCG_CALL_NO_WG, i64, env, i32, i64)
 | 
			
		||||
DEF_HELPER_FLAGS_3(irg, TCG_CALL_NO_RWG, i64, env, i64, i64)
 | 
			
		||||
DEF_HELPER_FLAGS_4(addsubg, TCG_CALL_NO_RWG_SE, i64, env, i64, s32, i32)
 | 
			
		||||
DEF_HELPER_FLAGS_3(ldg, TCG_CALL_NO_WG, i64, env, i64, i64)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1314,6 +1314,14 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
 | 
			
		|||
#define LOG2_TAG_GRANULE 4
 | 
			
		||||
#define TAG_GRANULE      (1 << LOG2_TAG_GRANULE)
 | 
			
		||||
 | 
			
		||||
/* Bits within a descriptor passed to the helper_mte_check* functions. */
 | 
			
		||||
FIELD(MTEDESC, MIDX,  0, 4)
 | 
			
		||||
FIELD(MTEDESC, TBI,   4, 2)
 | 
			
		||||
FIELD(MTEDESC, TCMA,  6, 2)
 | 
			
		||||
FIELD(MTEDESC, WRITE, 8, 1)
 | 
			
		||||
FIELD(MTEDESC, ESIZE, 9, 5)
 | 
			
		||||
FIELD(MTEDESC, TSIZE, 14, 10)  /* mte_checkN only */
 | 
			
		||||
 | 
			
		||||
static inline int allocation_tag_from_addr(uint64_t ptr)
 | 
			
		||||
{
 | 
			
		||||
    return extract64(ptr, 56, 4);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -358,3 +358,11 @@ void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
 | 
			
		|||
        memset(mem, tag_pair, tag_bytes);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Perform an MTE checked access for a single logical or atomic access.
 | 
			
		||||
 */
 | 
			
		||||
uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
 | 
			
		||||
{
 | 
			
		||||
    return ptr;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -341,20 +341,19 @@ static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Return a "clean" address for ADDR according to TBID.
 | 
			
		||||
 * This is always a fresh temporary, as we need to be able to
 | 
			
		||||
 * increment this independently of a dirty write-back address.
 | 
			
		||||
 * Handle MTE and/or TBI.
 | 
			
		||||
 *
 | 
			
		||||
 * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
 | 
			
		||||
 * for the tag to be present in the FAR_ELx register.  But for user-only
 | 
			
		||||
 * mode we do not have a TLB with which to implement this, so we must
 | 
			
		||||
 * remove the top byte now.
 | 
			
		||||
 *
 | 
			
		||||
 * Always return a fresh temporary that we can increment independently
 | 
			
		||||
 * of the write-back address.
 | 
			
		||||
 */
 | 
			
		||||
static TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
 | 
			
		||||
{
 | 
			
		||||
    TCGv_i64 clean = new_tmp_a64(s);
 | 
			
		||||
    /*
 | 
			
		||||
     * In order to get the correct value in the FAR_ELx register,
 | 
			
		||||
     * we must present the memory subsystem with the "dirty" address
 | 
			
		||||
     * including the TBI.  In system mode we can make this work via
 | 
			
		||||
     * the TLB, dropping the TBI during translation.  But for user-only
 | 
			
		||||
     * mode we don't have that option, and must remove the top byte now.
 | 
			
		||||
     */
 | 
			
		||||
#ifdef CONFIG_USER_ONLY
 | 
			
		||||
    gen_top_byte_ignore(s, clean, addr, s->tbid);
 | 
			
		||||
#else
 | 
			
		||||
| 
						 | 
				
			
			@ -383,6 +382,45 @@ static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
 | 
			
		|||
    tcg_temp_free_i32(tcg_ctx, t_size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * For MTE, check a single logical or atomic access.  This probes a single
 | 
			
		||||
 * address, the exact one specified.  The size and alignment of the access
 | 
			
		||||
 * is not relevant to MTE, per se, but watchpoints do require the size,
 | 
			
		||||
 * and we want to recognize those before making any other changes to state.
 | 
			
		||||
 */
 | 
			
		||||
static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
 | 
			
		||||
                                      bool is_write, bool tag_checked,
 | 
			
		||||
                                      int log2_size, bool is_unpriv,
 | 
			
		||||
                                      int core_idx)
 | 
			
		||||
{
 | 
			
		||||
    if (tag_checked && s->mte_active[is_unpriv]) {
 | 
			
		||||
        TCGContext *tcg_ctx = s->uc->tcg_ctx;
 | 
			
		||||
        TCGv_i32 tcg_desc;
 | 
			
		||||
        TCGv_i64 ret;
 | 
			
		||||
        int desc = 0;
 | 
			
		||||
 | 
			
		||||
        desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
 | 
			
		||||
        desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
 | 
			
		||||
        desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
 | 
			
		||||
        desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
 | 
			
		||||
        desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_size);
 | 
			
		||||
        tcg_desc = tcg_const_i32(tcg_ctx, desc);
 | 
			
		||||
 | 
			
		||||
        ret = new_tmp_a64(s);
 | 
			
		||||
        gen_helper_mte_check1(tcg_ctx, ret, tcg_ctx->cpu_env, tcg_desc, addr);
 | 
			
		||||
        tcg_temp_free_i32(tcg_ctx, tcg_desc);
 | 
			
		||||
 | 
			
		||||
        return ret;
 | 
			
		||||
    }
 | 
			
		||||
    return clean_data_tbi(s, addr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
 | 
			
		||||
                               bool tag_checked, int log2_size)
 | 
			
		||||
{
 | 
			
		||||
    return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size,
 | 
			
		||||
                                 false, get_mem_index(s));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
typedef struct DisasCompare64 {
 | 
			
		||||
    TCGCond cond;
 | 
			
		||||
| 
						 | 
				
			
			@ -2555,7 +2593,7 @@ static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
 | 
			
		|||
    if (rn == 31) {
 | 
			
		||||
        gen_check_sp_alignment(s);
 | 
			
		||||
    }
 | 
			
		||||
    clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
 | 
			
		||||
    clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
 | 
			
		||||
    tcg_gen_atomic_cmpxchg_i64(tcg_ctx, tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
 | 
			
		||||
                               size | MO_ALIGN | s->be_data);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -2574,7 +2612,9 @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
 | 
			
		|||
    if (rn == 31) {
 | 
			
		||||
        gen_check_sp_alignment(s);
 | 
			
		||||
    }
 | 
			
		||||
    clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
 | 
			
		||||
 | 
			
		||||
    /* This is a single atomic access, despite the "pair". */
 | 
			
		||||
    clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1);
 | 
			
		||||
 | 
			
		||||
    if (size == 2) {
 | 
			
		||||
        TCGv_i64 cmp = tcg_temp_new_i64(tcg_ctx);
 | 
			
		||||
| 
						 | 
				
			
			@ -2695,7 +2735,8 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
 | 
			
		|||
        if (is_lasr) {
 | 
			
		||||
            tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL);
 | 
			
		||||
        }
 | 
			
		||||
        clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
 | 
			
		||||
        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
 | 
			
		||||
                                    true, rn != 31, size);
 | 
			
		||||
        gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
 | 
			
		||||
        return;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2704,7 +2745,8 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
 | 
			
		|||
        if (rn == 31) {
 | 
			
		||||
            gen_check_sp_alignment(s);
 | 
			
		||||
        }
 | 
			
		||||
        clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
 | 
			
		||||
        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
 | 
			
		||||
                                    false, rn != 31, size);
 | 
			
		||||
        s->is_ldex = true;
 | 
			
		||||
        gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
 | 
			
		||||
        if (is_lasr) {
 | 
			
		||||
| 
						 | 
				
			
			@ -2724,7 +2766,8 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
 | 
			
		|||
            gen_check_sp_alignment(s);
 | 
			
		||||
        }
 | 
			
		||||
        tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL);
 | 
			
		||||
        clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
 | 
			
		||||
        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
 | 
			
		||||
                                    true, rn != 31, size);
 | 
			
		||||
        do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
 | 
			
		||||
                  disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
 | 
			
		||||
        return;
 | 
			
		||||
| 
						 | 
				
			
			@ -2740,7 +2783,8 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
 | 
			
		|||
        if (rn == 31) {
 | 
			
		||||
            gen_check_sp_alignment(s);
 | 
			
		||||
        }
 | 
			
		||||
        clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
 | 
			
		||||
        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
 | 
			
		||||
                                    false, rn != 31, size);
 | 
			
		||||
        do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
 | 
			
		||||
                  disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
 | 
			
		||||
        tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ);
 | 
			
		||||
| 
						 | 
				
			
			@ -2755,7 +2799,8 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
 | 
			
		|||
            if (is_lasr) {
 | 
			
		||||
                tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL);
 | 
			
		||||
            }
 | 
			
		||||
            clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
 | 
			
		||||
            clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
 | 
			
		||||
                                        true, rn != 31, size);
 | 
			
		||||
            gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
| 
						 | 
				
			
			@ -2773,7 +2818,8 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
 | 
			
		|||
            if (rn == 31) {
 | 
			
		||||
                gen_check_sp_alignment(s);
 | 
			
		||||
            }
 | 
			
		||||
            clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
 | 
			
		||||
            clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
 | 
			
		||||
                                        false, rn != 31, size);
 | 
			
		||||
            s->is_ldex = true;
 | 
			
		||||
            gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
 | 
			
		||||
            if (is_lasr) {
 | 
			
		||||
| 
						 | 
				
			
			@ -3069,6 +3115,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
 | 
			
		|||
    bool iss_valid = !is_vector;
 | 
			
		||||
    bool post_index;
 | 
			
		||||
    bool writeback;
 | 
			
		||||
    int memidx;
 | 
			
		||||
 | 
			
		||||
    TCGv_i64 clean_addr, dirty_addr;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -3126,7 +3173,11 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
 | 
			
		|||
    if (!post_index) {
 | 
			
		||||
        tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, imm9);
 | 
			
		||||
    }
 | 
			
		||||
    clean_addr = clean_data_tbi(s, dirty_addr);
 | 
			
		||||
 | 
			
		||||
    memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
 | 
			
		||||
    clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
 | 
			
		||||
                                       writeback || rn != 31,
 | 
			
		||||
                                       size, is_unpriv, memidx);
 | 
			
		||||
 | 
			
		||||
    if (is_vector) {
 | 
			
		||||
        if (is_store) {
 | 
			
		||||
| 
						 | 
				
			
			@ -3136,7 +3187,6 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
 | 
			
		|||
        }
 | 
			
		||||
    } else {
 | 
			
		||||
        TCGv_i64 tcg_rt = cpu_reg(s, rt);
 | 
			
		||||
        int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
 | 
			
		||||
        bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
 | 
			
		||||
 | 
			
		||||
        if (is_store) {
 | 
			
		||||
| 
						 | 
				
			
			@ -3234,7 +3284,7 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
 | 
			
		|||
    ext_and_shift_reg(s, tcg_rm, tcg_rm, opt, shift ? size : 0);
 | 
			
		||||
 | 
			
		||||
    tcg_gen_add_i64(tcg_ctx, dirty_addr, dirty_addr, tcg_rm);
 | 
			
		||||
    clean_addr = clean_data_tbi(s, dirty_addr);
 | 
			
		||||
    clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size);
 | 
			
		||||
 | 
			
		||||
    if (is_vector) {
 | 
			
		||||
        if (is_store) {
 | 
			
		||||
| 
						 | 
				
			
			@ -3320,7 +3370,7 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
 | 
			
		|||
    dirty_addr = read_cpu_reg_sp(s, rn, 1);
 | 
			
		||||
    offset = imm12 << size;
 | 
			
		||||
    tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, offset);
 | 
			
		||||
    clean_addr = clean_data_tbi(s, dirty_addr);
 | 
			
		||||
    clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size);
 | 
			
		||||
 | 
			
		||||
    if (is_vector) {
 | 
			
		||||
        if (is_store) {
 | 
			
		||||
| 
						 | 
				
			
			@ -3414,7 +3464,7 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
 | 
			
		|||
    if (rn == 31) {
 | 
			
		||||
        gen_check_sp_alignment(s);
 | 
			
		||||
    }
 | 
			
		||||
    clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
 | 
			
		||||
    clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
 | 
			
		||||
 | 
			
		||||
    if (o3_opc == 014) {
 | 
			
		||||
        /*
 | 
			
		||||
| 
						 | 
				
			
			@ -3493,7 +3543,8 @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
 | 
			
		|||
    tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, offset);
 | 
			
		||||
 | 
			
		||||
    /* Note that "clean" and "dirty" here refer to TBI not PAC.  */
 | 
			
		||||
    clean_addr = clean_data_tbi(s, dirty_addr);
 | 
			
		||||
    clean_addr = gen_mte_check1(s, dirty_addr, false,
 | 
			
		||||
                                is_wback || rn != 31, size);
 | 
			
		||||
 | 
			
		||||
    tcg_rt = cpu_reg(s, rt);
 | 
			
		||||
    do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue