mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-08 22:25:27 +00:00
target/arm: Rename neon_load_reg64 to vfp_load_reg64
The only uses of this function are for loading VFP double-precision values, and nothing to do with NEON. Backports b38b96ca90827012ab8eb045c1337cea83a54c4b
This commit is contained in:
parent
9d87b62578
commit
07c2b70234
|
@ -239,8 +239,8 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
|
|||
tcg_gen_ext_i32_i64(tcg_ctx, nf, tcg_ctx->cpu_NF);
|
||||
tcg_gen_ext_i32_i64(tcg_ctx, vf, tcg_ctx->cpu_VF);
|
||||
|
||||
neon_load_reg64(s, frn, rn);
|
||||
neon_load_reg64(s, frm, rm);
|
||||
vfp_load_reg64(s, frn, rn);
|
||||
vfp_load_reg64(s, frm, rm);
|
||||
switch (a->cc) {
|
||||
case 0: /* eq: Z */
|
||||
tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, dest, zf, zero,
|
||||
|
@ -267,7 +267,7 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
|
|||
tcg_temp_free_i64(tcg_ctx, tmp);
|
||||
break;
|
||||
}
|
||||
neon_store_reg64(s, dest, rd);
|
||||
vfp_store_reg64(s, dest, rd);
|
||||
tcg_temp_free_i64(tcg_ctx, frn);
|
||||
tcg_temp_free_i64(tcg_ctx, frm);
|
||||
tcg_temp_free_i64(tcg_ctx, dest);
|
||||
|
@ -390,9 +390,9 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
|
|||
TCGv_i64 tcg_res;
|
||||
tcg_op = tcg_temp_new_i64(tcg_ctx);
|
||||
tcg_res = tcg_temp_new_i64(tcg_ctx);
|
||||
neon_load_reg64(s, tcg_op, rm);
|
||||
vfp_load_reg64(s, tcg_op, rm);
|
||||
gen_helper_rintd(tcg_ctx, tcg_res, tcg_op, fpst);
|
||||
neon_store_reg64(s, tcg_res, rd);
|
||||
vfp_store_reg64(s, tcg_res, rd);
|
||||
tcg_temp_free_i64(tcg_ctx, tcg_op);
|
||||
tcg_temp_free_i64(tcg_ctx, tcg_res);
|
||||
} else {
|
||||
|
@ -469,7 +469,7 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
|
|||
tcg_double = tcg_temp_new_i64(tcg_ctx);
|
||||
tcg_res = tcg_temp_new_i64(tcg_ctx);
|
||||
tcg_tmp = tcg_temp_new_i32(tcg_ctx);
|
||||
neon_load_reg64(s, tcg_double, rm);
|
||||
vfp_load_reg64(s, tcg_double, rm);
|
||||
if (is_signed) {
|
||||
gen_helper_vfp_tosld(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst);
|
||||
} else {
|
||||
|
@ -1019,9 +1019,9 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
|
|||
tmp = tcg_temp_new_i64(tcg_ctx);
|
||||
if (a->l) {
|
||||
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
|
||||
neon_store_reg64(s, tmp, a->vd);
|
||||
vfp_store_reg64(s, tmp, a->vd);
|
||||
} else {
|
||||
neon_load_reg64(s, tmp, a->vd);
|
||||
vfp_load_reg64(s, tmp, a->vd);
|
||||
gen_aa32_st64(s, tmp, addr, get_mem_index(s));
|
||||
}
|
||||
tcg_temp_free_i64(tcg_ctx, tmp);
|
||||
|
@ -1168,10 +1168,10 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
|
|||
if (a->l) {
|
||||
/* load */
|
||||
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
|
||||
neon_store_reg64(s, tmp, a->vd + i);
|
||||
vfp_store_reg64(s, tmp, a->vd + i);
|
||||
} else {
|
||||
/* store */
|
||||
neon_load_reg64(s, tmp, a->vd + i);
|
||||
vfp_load_reg64(s, tmp, a->vd + i);
|
||||
gen_aa32_st64(s, tmp, addr, get_mem_index(s));
|
||||
}
|
||||
tcg_gen_addi_i32(tcg_ctx, addr, addr, offset);
|
||||
|
@ -1438,15 +1438,15 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
|
|||
fd = tcg_temp_new_i64(tcg_ctx);
|
||||
fpst = fpstatus_ptr(tcg_ctx, FPST_FPCR);
|
||||
|
||||
neon_load_reg64(s, f0, vn);
|
||||
neon_load_reg64(s, f1, vm);
|
||||
vfp_load_reg64(s, f0, vn);
|
||||
vfp_load_reg64(s, f1, vm);
|
||||
|
||||
for (;;) {
|
||||
if (reads_vd) {
|
||||
neon_load_reg64(s, fd, vd);
|
||||
vfp_load_reg64(s, fd, vd);
|
||||
}
|
||||
fn(tcg_ctx, fd, f0, f1, fpst);
|
||||
neon_store_reg64(s, fd, vd);
|
||||
vfp_store_reg64(s, fd, vd);
|
||||
|
||||
if (veclen == 0) {
|
||||
break;
|
||||
|
@ -1455,10 +1455,10 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
|
|||
veclen--;
|
||||
vd = vfp_advance_dreg(vd, delta_d);
|
||||
vn = vfp_advance_dreg(vn, delta_d);
|
||||
neon_load_reg64(s, f0, vn);
|
||||
vfp_load_reg64(s, f0, vn);
|
||||
if (delta_m) {
|
||||
vm = vfp_advance_dreg(vm, delta_m);
|
||||
neon_load_reg64(s, f1, vm);
|
||||
vfp_load_reg64(s, f1, vm);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1624,11 +1624,11 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
|
|||
f0 = tcg_temp_new_i64(tcg_ctx);
|
||||
fd = tcg_temp_new_i64(tcg_ctx);
|
||||
|
||||
neon_load_reg64(s, f0, vm);
|
||||
vfp_load_reg64(s, f0, vm);
|
||||
|
||||
for (;;) {
|
||||
fn(tcg_ctx, fd, f0);
|
||||
neon_store_reg64(s, fd, vd);
|
||||
vfp_store_reg64(s, fd, vd);
|
||||
|
||||
if (veclen == 0) {
|
||||
break;
|
||||
|
@ -1638,7 +1638,7 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
|
|||
/* single source one-many */
|
||||
while (veclen--) {
|
||||
vd = vfp_advance_dreg(vd, delta_d);
|
||||
neon_store_reg64(s, fd, vd);
|
||||
vfp_store_reg64(s, fd, vd);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1647,7 +1647,7 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
|
|||
veclen--;
|
||||
vd = vfp_advance_dreg(vd, delta_d);
|
||||
vd = vfp_advance_dreg(vm, delta_m);
|
||||
neon_load_reg64(s, f0, vm);
|
||||
vfp_load_reg64(s, f0, vm);
|
||||
}
|
||||
|
||||
tcg_temp_free_i64(tcg_ctx, f0);
|
||||
|
@ -2202,20 +2202,20 @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
|
|||
vm = tcg_temp_new_i64(tcg_ctx);
|
||||
vd = tcg_temp_new_i64(tcg_ctx);
|
||||
|
||||
neon_load_reg64(s, vn, a->vn);
|
||||
neon_load_reg64(s, vm, a->vm);
|
||||
vfp_load_reg64(s, vn, a->vn);
|
||||
vfp_load_reg64(s, vm, a->vm);
|
||||
if (neg_n) {
|
||||
/* VFNMS, VFMS */
|
||||
gen_helper_vfp_negd(tcg_ctx, vn, vn);
|
||||
}
|
||||
neon_load_reg64(s, vd, a->vd);
|
||||
vfp_load_reg64(s, vd, a->vd);
|
||||
if (neg_d) {
|
||||
/* VFNMA, VFNMS */
|
||||
gen_helper_vfp_negd(tcg_ctx, vd, vd);
|
||||
}
|
||||
fpst = fpstatus_ptr(tcg_ctx, FPST_FPCR);
|
||||
gen_helper_vfp_muladdd(tcg_ctx, vd, vn, vm, vd, fpst);
|
||||
neon_store_reg64(s, vd, a->vd);
|
||||
vfp_store_reg64(s, vd, a->vd);
|
||||
|
||||
tcg_temp_free_ptr(tcg_ctx, fpst);
|
||||
tcg_temp_free_i64(tcg_ctx, vn);
|
||||
|
@ -2357,7 +2357,7 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
|
|||
fd = tcg_const_i64(tcg_ctx, vfp_expand_imm(MO_64, a->imm));
|
||||
|
||||
for (;;) {
|
||||
neon_store_reg64(s, fd, vd);
|
||||
vfp_store_reg64(s, fd, vd);
|
||||
|
||||
if (veclen == 0) {
|
||||
break;
|
||||
|
@ -2515,11 +2515,11 @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
|
|||
vd = tcg_temp_new_i64(tcg_ctx);
|
||||
vm = tcg_temp_new_i64(tcg_ctx);
|
||||
|
||||
neon_load_reg64(s, vd, a->vd);
|
||||
vfp_load_reg64(s, vd, a->vd);
|
||||
if (a->z) {
|
||||
tcg_gen_movi_i64(tcg_ctx, vm, 0);
|
||||
} else {
|
||||
neon_load_reg64(s, vm, a->vm);
|
||||
vfp_load_reg64(s, vm, a->vm);
|
||||
}
|
||||
|
||||
if (a->e) {
|
||||
|
@ -2595,7 +2595,7 @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
|
|||
tcg_gen_ld16u_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, vfp_f16_offset(a->vm, a->t));
|
||||
vd = tcg_temp_new_i64(tcg_ctx);
|
||||
gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, vd, tmp, fpst, ahp_mode);
|
||||
neon_store_reg64(s, vd, a->vd);
|
||||
vfp_store_reg64(s, vd, a->vd);
|
||||
tcg_temp_free_i32(tcg_ctx, ahp_mode);
|
||||
tcg_temp_free_ptr(tcg_ctx, fpst);
|
||||
tcg_temp_free_i32(tcg_ctx, tmp);
|
||||
|
@ -2661,7 +2661,7 @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
|
|||
tmp = tcg_temp_new_i32(tcg_ctx);
|
||||
vm = tcg_temp_new_i64(tcg_ctx);
|
||||
|
||||
neon_load_reg64(s, vm, a->vm);
|
||||
vfp_load_reg64(s, vm, a->vm);
|
||||
gen_helper_vfp_fcvt_f64_to_f16(tcg_ctx, tmp, vm, fpst, ahp_mode);
|
||||
tcg_temp_free_i64(tcg_ctx, vm);
|
||||
tcg_gen_st16_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, vfp_f16_offset(a->vd, a->t));
|
||||
|
@ -2743,10 +2743,10 @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
|
|||
}
|
||||
|
||||
tmp = tcg_temp_new_i64(tcg_ctx);
|
||||
neon_load_reg64(s, tmp, a->vm);
|
||||
vfp_load_reg64(s, tmp, a->vm);
|
||||
fpst = fpstatus_ptr(tcg_ctx, FPST_FPCR);
|
||||
gen_helper_rintd(tcg_ctx, tmp, tmp, fpst);
|
||||
neon_store_reg64(s, tmp, a->vd);
|
||||
vfp_store_reg64(s, tmp, a->vd);
|
||||
tcg_temp_free_ptr(tcg_ctx, fpst);
|
||||
tcg_temp_free_i64(tcg_ctx, tmp);
|
||||
return true;
|
||||
|
@ -2835,13 +2835,13 @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
|
|||
}
|
||||
|
||||
tmp = tcg_temp_new_i64(tcg_ctx);
|
||||
neon_load_reg64(s, tmp, a->vm);
|
||||
vfp_load_reg64(s, tmp, a->vm);
|
||||
fpst = fpstatus_ptr(tcg_ctx, FPST_FPCR);
|
||||
tcg_rmode = tcg_const_i32(tcg_ctx, float_round_to_zero);
|
||||
gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst);
|
||||
gen_helper_rintd(tcg_ctx, tmp, tmp, fpst);
|
||||
gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst);
|
||||
neon_store_reg64(s, tmp, a->vd);
|
||||
vfp_store_reg64(s, tmp, a->vd);
|
||||
tcg_temp_free_ptr(tcg_ctx, fpst);
|
||||
tcg_temp_free_i64(tcg_ctx, tmp);
|
||||
tcg_temp_free_i32(tcg_ctx, tcg_rmode);
|
||||
|
@ -2920,10 +2920,10 @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
|
|||
}
|
||||
|
||||
tmp = tcg_temp_new_i64(tcg_ctx);
|
||||
neon_load_reg64(s, tmp, a->vm);
|
||||
vfp_load_reg64(s, tmp, a->vm);
|
||||
fpst = fpstatus_ptr(tcg_ctx, FPST_FPCR);
|
||||
gen_helper_rintd_exact(tcg_ctx, tmp, tmp, fpst);
|
||||
neon_store_reg64(s, tmp, a->vd);
|
||||
vfp_store_reg64(s, tmp, a->vd);
|
||||
tcg_temp_free_ptr(tcg_ctx, fpst);
|
||||
tcg_temp_free_i64(tcg_ctx, tmp);
|
||||
return true;
|
||||
|
@ -2952,7 +2952,7 @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
|
|||
vd = tcg_temp_new_i64(tcg_ctx);
|
||||
vfp_load_reg32(s, vm, a->vm);
|
||||
gen_helper_vfp_fcvtds(tcg_ctx, vd, vm, tcg_ctx->cpu_env);
|
||||
neon_store_reg64(s, vd, a->vd);
|
||||
vfp_store_reg64(s, vd, a->vd);
|
||||
tcg_temp_free_i32(tcg_ctx, vm);
|
||||
tcg_temp_free_i64(tcg_ctx, vd);
|
||||
return true;
|
||||
|
@ -2979,7 +2979,7 @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
|
|||
|
||||
vd = tcg_temp_new_i32(tcg_ctx);
|
||||
vm = tcg_temp_new_i64(tcg_ctx);
|
||||
neon_load_reg64(s, vm, a->vm);
|
||||
vfp_load_reg64(s, vm, a->vm);
|
||||
gen_helper_vfp_fcvtsd(tcg_ctx, vd, vm, tcg_ctx->cpu_env);
|
||||
vfp_store_reg32(s, vd, a->vd);
|
||||
tcg_temp_free_i32(tcg_ctx, vd);
|
||||
|
@ -3078,7 +3078,7 @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
|
|||
/* u32 -> f64 */
|
||||
gen_helper_vfp_uitod(tcg_ctx, vd, vm, fpst);
|
||||
}
|
||||
neon_store_reg64(s, vd, a->vd);
|
||||
vfp_store_reg64(s, vd, a->vd);
|
||||
tcg_temp_free_i32(tcg_ctx, vm);
|
||||
tcg_temp_free_i64(tcg_ctx, vd);
|
||||
tcg_temp_free_ptr(tcg_ctx, fpst);
|
||||
|
@ -3110,7 +3110,7 @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
|
|||
|
||||
vm = tcg_temp_new_i64(tcg_ctx);
|
||||
vd = tcg_temp_new_i32(tcg_ctx);
|
||||
neon_load_reg64(s, vm, a->vm);
|
||||
vfp_load_reg64(s, vm, a->vm);
|
||||
gen_helper_vjcvt(tcg_ctx, vd, vm, tcg_ctx->cpu_env);
|
||||
vfp_store_reg32(s, vd, a->vd);
|
||||
tcg_temp_free_i64(tcg_ctx, vm);
|
||||
|
@ -3262,7 +3262,7 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
|
|||
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
|
||||
|
||||
vd = tcg_temp_new_i64(tcg_ctx);
|
||||
neon_load_reg64(s, vd, a->vd);
|
||||
vfp_load_reg64(s, vd, a->vd);
|
||||
|
||||
fpst = fpstatus_ptr(tcg_ctx, FPST_FPCR);
|
||||
shift = tcg_const_i32(tcg_ctx, frac_bits);
|
||||
|
@ -3297,7 +3297,7 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
|
|||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
neon_store_reg64(s, vd, a->vd);
|
||||
vfp_store_reg64(s, vd, a->vd);
|
||||
tcg_temp_free_i64(tcg_ctx, vd);
|
||||
tcg_temp_free_i32(tcg_ctx, shift);
|
||||
tcg_temp_free_ptr(tcg_ctx, fpst);
|
||||
|
@ -3401,7 +3401,7 @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
|
|||
fpst = fpstatus_ptr(tcg_ctx, FPST_FPCR);
|
||||
vm = tcg_temp_new_i64(tcg_ctx);
|
||||
vd = tcg_temp_new_i32(tcg_ctx);
|
||||
neon_load_reg64(s, vm, a->vm);
|
||||
vfp_load_reg64(s, vm, a->vm);
|
||||
|
||||
if (a->s) {
|
||||
if (a->rz) {
|
||||
|
|
|
@ -1176,16 +1176,16 @@ static long vfp_reg_offset(bool dp, unsigned reg)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void neon_load_reg64(DisasContext *s, TCGv_i64 var, int reg)
|
||||
static inline void vfp_load_reg64(DisasContext *s, TCGv_i64 var, int reg)
|
||||
{
|
||||
TCGContext *tcg_ctx = s->uc->tcg_ctx;
|
||||
tcg_gen_ld_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(1, reg));
|
||||
tcg_gen_ld_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(true, reg));
|
||||
}
|
||||
|
||||
static inline void neon_store_reg64(DisasContext *s, TCGv_i64 var, int reg)
|
||||
static inline void vfp_store_reg64(DisasContext *s, TCGv_i64 var, int reg)
|
||||
{
|
||||
TCGContext *tcg_ctx = s->uc->tcg_ctx;
|
||||
tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(1, reg));
|
||||
tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(true, reg));
|
||||
}
|
||||
|
||||
static inline void vfp_load_reg32(DisasContext *s, TCGv_i32 var, int reg)
|
||||
|
|
Loading…
Reference in a new issue