target/arm: Add helpers for VFP register loads and stores

The current VFP code has two different idioms for
loading and storing from the VFP register file:
 1 using the gen_mov_F0_vreg() and similar functions,
   which load and store to a fixed set of TCG globals
   cpu_F0s, CPU_F0d, etc
 2 by direct calls to tcg_gen_ld_f64() and friends

We want to phase out idiom 1 (because the use of the
fixed globals is a relic of a much older version of TCG),
but idiom 2 is quite longwinded:
  tcg_gen_ld_f64(tmp, cpu_env, vfp_reg_offset(true, reg))
requires us to specify the 64-bitness twice, once in
the function name and once by passing 'true' to
vfp_reg_offset(). There's no guard against accidentally
passing the wrong flag.

Instead, let's move to a convention of accessing 64-bit
registers via the existing neon_load_reg64() and
neon_store_reg64(), and provide new neon_load_reg32()
and neon_store_reg32() for the 32-bit equivalents.

Implement the new functions and use them in the code in
translate-vfp.inc.c. We will convert the rest of the VFP
code as we do the decodetree conversion in subsequent
commits.

Backports commit 160f3b64c5cc4c8a09a1859edc764882ce6ad6bf from qemu
This commit is contained in:
Peter Maydell 2019-06-13 17:01:57 -04:00 committed by Lioncash
parent 033a386ffb
commit 7265161108
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
2 changed files with 32 additions and 20 deletions

View file

@ -182,8 +182,8 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
tcg_gen_ext_i32_i64(tcg_ctx, nf, tcg_ctx->cpu_NF); tcg_gen_ext_i32_i64(tcg_ctx, nf, tcg_ctx->cpu_NF);
tcg_gen_ext_i32_i64(tcg_ctx, vf, tcg_ctx->cpu_VF); tcg_gen_ext_i32_i64(tcg_ctx, vf, tcg_ctx->cpu_VF);
tcg_gen_ld_f64(tcg_ctx, frn, tcg_ctx->cpu_env, vfp_reg_offset(dp, rn)); neon_load_reg64(s, frn, rn);
tcg_gen_ld_f64(tcg_ctx, frm, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); neon_load_reg64(s, frm, rm);
switch (a->cc) { switch (a->cc) {
case 0: /* eq: Z */ case 0: /* eq: Z */
tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, dest, zf, zero, tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, dest, zf, zero,
@ -210,7 +210,7 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
tcg_temp_free_i64(tcg_ctx, tmp); tcg_temp_free_i64(tcg_ctx, tmp);
break; break;
} }
tcg_gen_st_f64(tcg_ctx, dest, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); neon_store_reg64(s, dest, rd);
tcg_temp_free_i64(tcg_ctx, frn); tcg_temp_free_i64(tcg_ctx, frn);
tcg_temp_free_i64(tcg_ctx, frm); tcg_temp_free_i64(tcg_ctx, frm);
tcg_temp_free_i64(tcg_ctx, dest); tcg_temp_free_i64(tcg_ctx, dest);
@ -229,8 +229,8 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
frn = tcg_temp_new_i32(tcg_ctx); frn = tcg_temp_new_i32(tcg_ctx);
frm = tcg_temp_new_i32(tcg_ctx); frm = tcg_temp_new_i32(tcg_ctx);
dest = tcg_temp_new_i32(tcg_ctx); dest = tcg_temp_new_i32(tcg_ctx);
tcg_gen_ld_f32(tcg_ctx, frn, tcg_ctx->cpu_env, vfp_reg_offset(dp, rn)); neon_load_reg32(s, frn, rn);
tcg_gen_ld_f32(tcg_ctx, frm, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); neon_load_reg32(s, frm, rm);
switch (a->cc) { switch (a->cc) {
case 0: /* eq: Z */ case 0: /* eq: Z */
tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, dest, tcg_ctx->cpu_ZF, zero, tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, dest, tcg_ctx->cpu_ZF, zero,
@ -257,7 +257,7 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, tmp);
break; break;
} }
tcg_gen_st_f32(tcg_ctx, dest, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); neon_store_reg32(s, dest, rd);
tcg_temp_free_i32(tcg_ctx, frn); tcg_temp_free_i32(tcg_ctx, frn);
tcg_temp_free_i32(tcg_ctx, frm); tcg_temp_free_i32(tcg_ctx, frm);
tcg_temp_free_i32(tcg_ctx, dest); tcg_temp_free_i32(tcg_ctx, dest);
@ -302,14 +302,14 @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
frm = tcg_temp_new_i64(tcg_ctx); frm = tcg_temp_new_i64(tcg_ctx);
dest = tcg_temp_new_i64(tcg_ctx); dest = tcg_temp_new_i64(tcg_ctx);
tcg_gen_ld_f64(tcg_ctx, frn, tcg_ctx->cpu_env, vfp_reg_offset(dp, rn)); neon_load_reg64(s, frn, rn);
tcg_gen_ld_f64(tcg_ctx, frm, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); neon_load_reg64(s, frm, rm);
if (vmin) { if (vmin) {
gen_helper_vfp_minnumd(tcg_ctx, dest, frn, frm, fpst); gen_helper_vfp_minnumd(tcg_ctx, dest, frn, frm, fpst);
} else { } else {
gen_helper_vfp_maxnumd(tcg_ctx, dest, frn, frm, fpst); gen_helper_vfp_maxnumd(tcg_ctx, dest, frn, frm, fpst);
} }
tcg_gen_st_f64(tcg_ctx, dest, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); neon_store_reg64(s, dest, rd);
tcg_temp_free_i64(tcg_ctx, frn); tcg_temp_free_i64(tcg_ctx, frn);
tcg_temp_free_i64(tcg_ctx, frm); tcg_temp_free_i64(tcg_ctx, frm);
tcg_temp_free_i64(tcg_ctx, dest); tcg_temp_free_i64(tcg_ctx, dest);
@ -320,14 +320,14 @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
frm = tcg_temp_new_i32(tcg_ctx); frm = tcg_temp_new_i32(tcg_ctx);
dest = tcg_temp_new_i32(tcg_ctx); dest = tcg_temp_new_i32(tcg_ctx);
tcg_gen_ld_f32(tcg_ctx, frn, tcg_ctx->cpu_env, vfp_reg_offset(dp, rn)); neon_load_reg32(s, frn, rn);
tcg_gen_ld_f32(tcg_ctx, frm, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); neon_load_reg32(s, frm, rm);
if (vmin) { if (vmin) {
gen_helper_vfp_minnums(tcg_ctx, dest, frn, frm, fpst); gen_helper_vfp_minnums(tcg_ctx, dest, frn, frm, fpst);
} else { } else {
gen_helper_vfp_maxnums(tcg_ctx, dest, frn, frm, fpst); gen_helper_vfp_maxnums(tcg_ctx, dest, frn, frm, fpst);
} }
tcg_gen_st_f32(tcg_ctx, dest, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); neon_store_reg32(s, dest, rd);
tcg_temp_free_i32(tcg_ctx, frn); tcg_temp_free_i32(tcg_ctx, frn);
tcg_temp_free_i32(tcg_ctx, frm); tcg_temp_free_i32(tcg_ctx, frm);
tcg_temp_free_i32(tcg_ctx, dest); tcg_temp_free_i32(tcg_ctx, dest);
@ -384,9 +384,9 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
TCGv_i64 tcg_res; TCGv_i64 tcg_res;
tcg_op = tcg_temp_new_i64(tcg_ctx); tcg_op = tcg_temp_new_i64(tcg_ctx);
tcg_res = tcg_temp_new_i64(tcg_ctx); tcg_res = tcg_temp_new_i64(tcg_ctx);
tcg_gen_ld_f64(tcg_ctx, tcg_op, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); neon_load_reg64(s, tcg_op, rm);
gen_helper_rintd(tcg_ctx, tcg_res, tcg_op, fpst); gen_helper_rintd(tcg_ctx, tcg_res, tcg_op, fpst);
tcg_gen_st_f64(tcg_ctx, tcg_res, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); neon_store_reg64(s, tcg_res, rd);
tcg_temp_free_i64(tcg_ctx, tcg_op); tcg_temp_free_i64(tcg_ctx, tcg_op);
tcg_temp_free_i64(tcg_ctx, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_res);
} else { } else {
@ -394,9 +394,9 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
TCGv_i32 tcg_res; TCGv_i32 tcg_res;
tcg_op = tcg_temp_new_i32(tcg_ctx); tcg_op = tcg_temp_new_i32(tcg_ctx);
tcg_res = tcg_temp_new_i32(tcg_ctx); tcg_res = tcg_temp_new_i32(tcg_ctx);
tcg_gen_ld_f32(tcg_ctx, tcg_op, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); neon_load_reg32(s, tcg_op, rm);
gen_helper_rints(tcg_ctx, tcg_res, tcg_op, fpst); gen_helper_rints(tcg_ctx, tcg_res, tcg_op, fpst);
tcg_gen_st_f32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); neon_store_reg32(s, tcg_res, rd);
tcg_temp_free_i32(tcg_ctx, tcg_op); tcg_temp_free_i32(tcg_ctx, tcg_op);
tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_res);
} }
@ -446,14 +446,14 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
tcg_double = tcg_temp_new_i64(tcg_ctx); tcg_double = tcg_temp_new_i64(tcg_ctx);
tcg_res = tcg_temp_new_i64(tcg_ctx); tcg_res = tcg_temp_new_i64(tcg_ctx);
tcg_tmp = tcg_temp_new_i32(tcg_ctx); tcg_tmp = tcg_temp_new_i32(tcg_ctx);
tcg_gen_ld_f64(tcg_ctx, tcg_double, tcg_ctx->cpu_env, vfp_reg_offset(1, rm)); neon_load_reg64(s, tcg_double, rm);
if (is_signed) { if (is_signed) {
gen_helper_vfp_tosld(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst); gen_helper_vfp_tosld(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst);
} else { } else {
gen_helper_vfp_tould(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst); gen_helper_vfp_tould(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst);
} }
tcg_gen_extrl_i64_i32(tcg_ctx, tcg_tmp, tcg_res); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_tmp, tcg_res);
tcg_gen_st_f32(tcg_ctx, tcg_tmp, tcg_ctx->cpu_env, vfp_reg_offset(0, rd)); neon_store_reg32(s, tcg_tmp, rd);
tcg_temp_free_i32(tcg_ctx, tcg_tmp); tcg_temp_free_i32(tcg_ctx, tcg_tmp);
tcg_temp_free_i64(tcg_ctx, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_res);
tcg_temp_free_i64(tcg_ctx, tcg_double); tcg_temp_free_i64(tcg_ctx, tcg_double);
@ -461,13 +461,13 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
TCGv_i32 tcg_single, tcg_res; TCGv_i32 tcg_single, tcg_res;
tcg_single = tcg_temp_new_i32(tcg_ctx); tcg_single = tcg_temp_new_i32(tcg_ctx);
tcg_res = tcg_temp_new_i32(tcg_ctx); tcg_res = tcg_temp_new_i32(tcg_ctx);
tcg_gen_ld_f32(tcg_ctx, tcg_single, tcg_ctx->cpu_env, vfp_reg_offset(0, rm)); neon_load_reg32(s, tcg_single, rm);
if (is_signed) { if (is_signed) {
gen_helper_vfp_tosls(tcg_ctx, tcg_res, tcg_single, tcg_shift, fpst); gen_helper_vfp_tosls(tcg_ctx, tcg_res, tcg_single, tcg_shift, fpst);
} else { } else {
gen_helper_vfp_touls(tcg_ctx, tcg_res, tcg_single, tcg_shift, fpst); gen_helper_vfp_touls(tcg_ctx, tcg_res, tcg_single, tcg_shift, fpst);
} }
tcg_gen_st_f32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, vfp_reg_offset(0, rd)); neon_store_reg32(s, tcg_res, rd);
tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_res);
tcg_temp_free_i32(tcg_ctx, tcg_single); tcg_temp_free_i32(tcg_ctx, tcg_single);
} }

View file

@ -1764,6 +1764,18 @@ static inline void neon_store_reg64(DisasContext *s, TCGv_i64 var, int reg)
tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(1, reg)); tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(1, reg));
} }
static inline void neon_load_reg32(DisasContext *s, TCGv_i32 var, int reg)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
tcg_gen_ld_i32(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(false, reg));
}
static inline void neon_store_reg32(DisasContext *s, TCGv_i32 var, int reg)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(false, reg));
}
static TCGv_ptr vfp_reg_ptr(DisasContext *s, bool dp, int reg) static TCGv_ptr vfp_reg_ptr(DisasContext *s, bool dp, int reg)
{ {
TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGContext *tcg_ctx = s->uc->tcg_ctx;