target/arm: Convert Neon 3-reg-diff saturating doubling multiplies

Convert the Neon 3-reg-diff insns VQDMULL, VQDMLAL and VQDMLSL:
these are all saturating doubling long multiplies with a possible
accumulate step.

These are the last insns in the group which use the pass-over-each
elements loop, so we can delete that code.

Backports commit 9546ca5998d3cbd98a81b2d46a2e92a11b0f78a4 from qemu
This commit is contained in:
Peter Maydell 2020-06-16 23:51:54 -04:00 committed by Lioncash
parent 5464405d5c
commit 090426b120
3 changed files with 92 additions and 54 deletions

View file

@ -454,10 +454,16 @@ Vimm_1r 1111 001 . 1 . 000 ... .... cmode:4 0 . op:1 1 .... @1reg_imm
VMLAL_S_3d 1111 001 0 1 . .. .... .... 1000 . 0 . 0 .... @3diff
VMLAL_U_3d 1111 001 1 1 . .. .... .... 1000 . 0 . 0 .... @3diff
VQDMLAL_3d 1111 001 0 1 . .. .... .... 1001 . 0 . 0 .... @3diff
VMLSL_S_3d 1111 001 0 1 . .. .... .... 1010 . 0 . 0 .... @3diff
VMLSL_U_3d 1111 001 1 1 . .. .... .... 1010 . 0 . 0 .... @3diff
VQDMLSL_3d 1111 001 0 1 . .. .... .... 1011 . 0 . 0 .... @3diff
VMULL_S_3d 1111 001 0 1 . .. .... .... 1100 . 0 . 0 .... @3diff
VMULL_U_3d 1111 001 1 1 . .. .... .... 1100 . 0 . 0 .... @3diff
VQDMULL_3d 1111 001 0 1 . .. .... .... 1101 . 0 . 0 .... @3diff
]
}

View file

@ -2249,3 +2249,85 @@ DO_VMLAL(VMLAL_S,mull_s,add)
DO_VMLAL(VMLAL_U,mull_u,add)
DO_VMLAL(VMLSL_S,mull_s,sub)
DO_VMLAL(VMLSL_U,mull_u,sub)
static void gen_VQDMULL_16(TCGContext *s, TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
{
gen_helper_neon_mull_s16(s, rd, rn, rm);
gen_helper_neon_addl_saturate_s32(s, rd, s->cpu_env, rd, rd);
}
static void gen_VQDMULL_32(TCGContext *s, TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
{
gen_mull_s32(s, rd, rn, rm);
gen_helper_neon_addl_saturate_s64(s, rd, s->cpu_env, rd, rd);
}
static bool trans_VQDMULL_3d(DisasContext *s, arg_3diff *a)
{
static NeonGenTwoOpWidenFn * const opfn[] = {
NULL,
gen_VQDMULL_16,
gen_VQDMULL_32,
NULL,
};
return do_long_3d(s, a, opfn[a->size], NULL);
}
static void gen_VQDMLAL_acc_16(TCGContext *s, TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
gen_helper_neon_addl_saturate_s32(s, rd, s->cpu_env, rn, rm);
}
static void gen_VQDMLAL_acc_32(TCGContext *s, TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
gen_helper_neon_addl_saturate_s64(s, rd, s->cpu_env, rn, rm);
}
static bool trans_VQDMLAL_3d(DisasContext *s, arg_3diff *a)
{
static NeonGenTwoOpWidenFn * const opfn[] = {
NULL,
gen_VQDMULL_16,
gen_VQDMULL_32,
NULL,
};
static NeonGenTwo64OpFn * const accfn[] = {
NULL,
gen_VQDMLAL_acc_16,
gen_VQDMLAL_acc_32,
NULL,
};
return do_long_3d(s, a, opfn[a->size], accfn[a->size]);
}
static void gen_VQDMLSL_acc_16(TCGContext *s, TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
gen_helper_neon_negl_u32(s, rm, rm);
gen_helper_neon_addl_saturate_s32(s, rd, s->cpu_env, rn, rm);
}
static void gen_VQDMLSL_acc_32(TCGContext *s, TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
tcg_gen_neg_i64(s, rm, rm);
gen_helper_neon_addl_saturate_s64(s, rd, s->cpu_env, rn, rm);
}
static bool trans_VQDMLSL_3d(DisasContext *s, arg_3diff *a)
{
static NeonGenTwoOpWidenFn * const opfn[] = {
NULL,
gen_VQDMULL_16,
gen_VQDMULL_32,
NULL,
};
static NeonGenTwo64OpFn * const accfn[] = {
NULL,
gen_VQDMLSL_acc_16,
gen_VQDMLSL_acc_32,
NULL,
};
return do_long_3d(s, a, opfn[a->size], accfn[a->size]);
}

View file

@ -5371,11 +5371,11 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
{0, 0, 0, 7}, /* VSUBHN: handled by decodetree */
{0, 0, 0, 7}, /* VABDL */
{0, 0, 0, 7}, /* VMLAL */
{0, 0, 0, 9}, /* VQDMLAL */
{0, 0, 0, 7}, /* VQDMLAL */
{0, 0, 0, 7}, /* VMLSL */
{0, 0, 0, 9}, /* VQDMLSL */
{0, 0, 0, 7}, /* VQDMLSL */
{0, 0, 0, 7}, /* Integer VMULL */
{0, 0, 0, 9}, /* VQDMULL */
{0, 0, 0, 7}, /* VQDMULL */
{0, 0, 0, 0xa}, /* Polynomial VMULL */
{0, 0, 0, 7}, /* Reserved: always UNDEF */
};
@ -5407,57 +5407,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
return 0;
}
/* Avoid overlapping operands. Wide source operands are
always aligned so will never overlap with wide
destinations in problematic ways. */
if (rd == rm) {
tmp = neon_load_reg(s, rm, 1);
neon_store_scratch(s, 2, tmp);
} else if (rd == rn) {
tmp = neon_load_reg(s, rn, 1);
neon_store_scratch(s, 2, tmp);
}
tmp3 = NULL;
for (pass = 0; pass < 2; pass++) {
if (pass == 1 && rd == rn) {
tmp = neon_load_scratch(s, 2);
} else {
tmp = neon_load_reg(s, rn, pass);
}
if (pass == 1 && rd == rm) {
tmp2 = neon_load_scratch(s, 2);
} else {
tmp2 = neon_load_reg(s, rm, pass);
}
switch (op) {
case 9: case 11: case 13:
/* VQDMLAL, VQDMLSL, VQDMULL */
gen_neon_mull(s, s->V0, tmp, tmp2, size, u);
break;
default: /* 15 is RESERVED: caught earlier */
abort();
}
if (op == 13) {
/* VQDMULL */
gen_neon_addl_saturate(s, s->V0, s->V0, size);
neon_store_reg64(s, s->V0, rd + pass);
} else {
/* Accumulate. */
neon_load_reg64(s, s->V1, rd + pass);
switch (op) {
case 9: case 11: /* VQDMLAL, VQDMLSL */
gen_neon_addl_saturate(s, s->V0, s->V0, size);
if (op == 11) {
gen_neon_negl(s, s->V0, size);
}
gen_neon_addl_saturate(s, s->V0, s->V1, size);
break;
default:
abort();
}
neon_store_reg64(s, s->V0, rd + pass);
}
}
abort(); /* all others handled by decodetree */
} else {
/* Two registers and a scalar. NB that for ops of this form
* the ARM ARM labels bit 24 as Q, but it is in our variable