target/arm: Convert the VCVT-from-f16 insns to decodetree

Convert the VCVTT, VCVTB instructions that deal with conversion
from half-precision floats to f32 or 64 to decodetree.

Since we're no longer constrained to the old decoder's style
using cpu_F0s and cpu_F0d we can perform a direct 16 bit
load of the right half of the input single-precision register
rather than loading the full 32 bits and then doing a
separate shift or sign-extension.

Backports commit b623d803dda805f07aadcbf098961fde27315c19 from qemu
This commit is contained in:
Peter Maydell 2019-06-13 18:59:06 -04:00 committed by Lioncash
parent e6cc2616d2
commit 7d927b2d0e
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
3 changed files with 91 additions and 55 deletions

View file

@ -30,6 +30,26 @@
#include "decode-vfp.inc.c"
#include "decode-vfp-uncond.inc.c"
/*
* Return the offset of a 16-bit half of the specified VFP single-precision
* register. If top is true, returns the top 16 bits; otherwise the bottom
* 16 bits.
*/
static inline long vfp_f16_offset(unsigned reg, bool top)
{
long offs = vfp_reg_offset(false, reg);
#ifdef HOST_WORDS_BIGENDIAN
if (!top) {
offs += 2;
}
#else
if (top) {
offs += 2;
}
#endif
return offs;
}
/*
* Check that VFP access is enabled. If it is, do the necessary
* M-profile lazy-FP handling and then return true.
@ -2040,3 +2060,67 @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
return true;
}
static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_ptr fpst;
TCGv_i32 ahp_mode;
TCGv_i32 tmp;
if (!dc_isar_feature(aa32_fp16_spconv, s)) {
return false;
}
if (!vfp_access_check(s)) {
return true;
}
fpst = get_fpstatus_ptr(s, false);
ahp_mode = get_ahp_flag(s);
tmp = tcg_temp_new_i32(tcg_ctx);
/* The T bit tells us if we want the low or high 16 bits of Vm */
tcg_gen_ld16u_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, vfp_f16_offset(a->vm, a->t));
gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tmp, tmp, fpst, ahp_mode);
neon_store_reg32(s, tmp, a->vd);
tcg_temp_free_i32(tcg_ctx, ahp_mode);
tcg_temp_free_ptr(tcg_ctx, fpst);
tcg_temp_free_i32(tcg_ctx, tmp);
return true;
}
static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_ptr fpst;
TCGv_i32 ahp_mode;
TCGv_i32 tmp;
TCGv_i64 vd;
if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
return false;
}
/* UNDEF accesses to D16-D31 if they don't exist. */
if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
return false;
}
if (!vfp_access_check(s)) {
return true;
}
fpst = get_fpstatus_ptr(s, false);
ahp_mode = get_ahp_flag(s);
tmp = tcg_temp_new_i32(tcg_ctx);
/* The T bit tells us if we want the low or high 16 bits of Vm */
tcg_gen_ld16u_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, vfp_f16_offset(a->vm, a->t));
vd = tcg_temp_new_i64(tcg_ctx);
gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, vd, tmp, fpst, ahp_mode);
neon_store_reg64(s, vd, a->vd);
tcg_temp_free_i32(tcg_ctx, ahp_mode);
tcg_temp_free_ptr(tcg_ctx, fpst);
tcg_temp_free_i32(tcg_ctx, tmp);
tcg_temp_free_i64(tcg_ctx, vd);
return true;
}

View file

@ -3163,7 +3163,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
return 1;
case 15:
switch (rn) {
case 0 ... 3:
case 0 ... 5:
case 8 ... 11:
/* Already handled by decodetree */
return 1;
@ -3177,24 +3177,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
if (op == 15) {
/* rn is opcode, encoded as per VFP_SREG_N. */
switch (rn) {
case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */
case 0x05: /* vcvtt.f64.f16, vcvtt.f32.f16 */
/*
* VCVTB, VCVTT: only present with the halfprec extension
* UNPREDICTABLE if bit 8 is set prior to ARMv8
* (we choose to UNDEF)
*/
if (dp) {
if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
return 1;
}
} else {
if (!dc_isar_feature(aa32_fp16_spconv, s)) {
return 1;
}
}
rm_is_dp = false;
break;
case 0x06: /* vcvtb.f16.f32, vcvtb.f16.f64 */
case 0x07: /* vcvtt.f16.f32, vcvtt.f16.f64 */
if (dp) {
@ -3336,42 +3318,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
switch (op) {
case 15: /* extension space */
switch (rn) {
case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
{
TCGv_ptr fpst = get_fpstatus_ptr(s, false);
TCGv_i32 ahp_mode = get_ahp_flag(s);
tmp = gen_vfp_mrs(s);
tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp);
if (dp) {
gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, s->F0d, tmp,
fpst, ahp_mode);
} else {
gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, s->F0s, tmp,
fpst, ahp_mode);
}
tcg_temp_free_i32(tcg_ctx, ahp_mode);
tcg_temp_free_ptr(tcg_ctx, fpst);
tcg_temp_free_i32(tcg_ctx, tmp);
break;
}
case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
{
TCGv_ptr fpst = get_fpstatus_ptr(s, false);
TCGv_i32 ahp = get_ahp_flag(s);
tmp = gen_vfp_mrs(s);
tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16);
if (dp) {
gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, s->F0d, tmp,
fpst, ahp);
} else {
gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, s->F0s, tmp,
fpst, ahp);
}
tcg_temp_free_i32(tcg_ctx, tmp);
tcg_temp_free_i32(tcg_ctx, ahp);
tcg_temp_free_ptr(tcg_ctx, fpst);
break;
}
case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
{
TCGv_ptr fpst = get_fpstatus_ptr(s, false);

View file

@ -181,3 +181,9 @@ VCMP_sp ---- 1110 1.11 010 z:1 .... 1010 e:1 1.0 .... \
vd=%vd_sp vm=%vm_sp
VCMP_dp ---- 1110 1.11 010 z:1 .... 1011 e:1 1.0 .... \
vd=%vd_dp vm=%vm_dp
# VCVTT and VCVTB from f16: Vd format depends on size bit; Vm is always vm_sp
VCVT_f32_f16 ---- 1110 1.11 0010 .... 1010 t:1 1.0 .... \
vd=%vd_sp vm=%vm_sp
VCVT_f64_f16 ---- 1110 1.11 0010 .... 1011 t:1 1.0 .... \
vd=%vd_dp vm=%vm_sp