target/arm: Convert Halfword multiply and multiply accumulate

Backports commit 26c6923de7131fa1cf223ab67131d1992dc17001 from qemu
This commit is contained in:
Richard Henderson 2019-11-19 13:52:23 -05:00 committed by Lioncash
parent 44416a6794
commit 543b598d45
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
3 changed files with 172 additions and 97 deletions

View file

@ -114,6 +114,7 @@ MVN_rxi .... 001 1111 . 0000 .... ............ @s_rxi_rot
@s_rdamn ---- .... ... s:1 rd:4 ra:4 rm:4 .... rn:4 &s_rrrr
@s_rd0mn ---- .... ... s:1 rd:4 .... rm:4 .... rn:4 &s_rrrr ra=0
@rdamn ---- .... ... . rd:4 ra:4 rm:4 .... rn:4 &rrrr
@rd0mn ---- .... ... . rd:4 .... rm:4 .... rn:4 &rrrr ra=0
MUL .... 0000 000 . .... 0000 .... 1001 .... @s_rd0mn
MLA .... 0000 001 . .... .... .... 1001 .... @s_rdamn
@ -132,3 +133,22 @@ QADD .... 0001 0000 .... .... 0000 0101 .... @rndm
QSUB .... 0001 0010 .... .... 0000 0101 .... @rndm
QDADD .... 0001 0100 .... .... 0000 0101 .... @rndm
QDSUB .... 0001 0110 .... .... 0000 0101 .... @rndm
# Halfword multiply and multiply accumulate
SMLABB .... 0001 0000 .... .... .... 1000 .... @rdamn
SMLABT .... 0001 0000 .... .... .... 1100 .... @rdamn
SMLATB .... 0001 0000 .... .... .... 1010 .... @rdamn
SMLATT .... 0001 0000 .... .... .... 1110 .... @rdamn
SMLAWB .... 0001 0010 .... .... .... 1000 .... @rdamn
SMULWB .... 0001 0010 .... 0000 .... 1010 .... @rd0mn
SMLAWT .... 0001 0010 .... .... .... 1100 .... @rdamn
SMULWT .... 0001 0010 .... 0000 .... 1110 .... @rd0mn
SMLALBB .... 0001 0100 .... .... .... 1000 .... @rdamn
SMLALBT .... 0001 0100 .... .... .... 1100 .... @rdamn
SMLALTB .... 0001 0100 .... .... .... 1010 .... @rdamn
SMLALTT .... 0001 0100 .... .... .... 1110 .... @rdamn
SMULBB .... 0001 0110 .... 0000 .... 1000 .... @rd0mn
SMULBT .... 0001 0110 .... 0000 .... 1100 .... @rd0mn
SMULTB .... 0001 0110 .... 0000 .... 1010 .... @rd0mn
SMULTT .... 0001 0110 .... 0000 .... 1110 .... @rd0mn

View file

@ -119,6 +119,7 @@ RSB_rri 1111 0.0 1110 . .... 0 ... .... ........ @s_rri_rot
@s0_rn0dm .... .... .... rn:4 .... rd:4 .... rm:4 &s_rrrr ra=0 s=0
@rnadm .... .... .... rn:4 ra:4 rd:4 .... rm:4 &rrrr
@rndm .... .... .... rn:4 .... rd:4 .... rm:4 &rrr
@rn0dm .... .... .... rn:4 .... rd:4 .... rm:4 &rrrr ra=0
{
MUL 1111 1011 0000 .... 1111 .... 0000 .... @s0_rn0dm
@ -130,6 +131,34 @@ UMULL 1111 1011 1010 .... .... .... 0000 .... @s0_rnadm
SMLAL 1111 1011 1100 .... .... .... 0000 .... @s0_rnadm
UMLAL 1111 1011 1110 .... .... .... 0000 .... @s0_rnadm
UMAAL 1111 1011 1110 .... .... .... 0110 .... @rnadm
{
SMULWB 1111 1011 0011 .... 1111 .... 0000 .... @rn0dm
SMLAWB 1111 1011 0011 .... .... .... 0000 .... @rnadm
}
{
SMULWT 1111 1011 0011 .... 1111 .... 0001 .... @rn0dm
SMLAWT 1111 1011 0011 .... .... .... 0001 .... @rnadm
}
{
SMULBB 1111 1011 0001 .... 1111 .... 0000 .... @rn0dm
SMLABB 1111 1011 0001 .... .... .... 0000 .... @rnadm
}
{
SMULBT 1111 1011 0001 .... 1111 .... 0001 .... @rn0dm
SMLABT 1111 1011 0001 .... .... .... 0001 .... @rnadm
}
{
SMULTB 1111 1011 0001 .... 1111 .... 0010 .... @rn0dm
SMLATB 1111 1011 0001 .... .... .... 0010 .... @rnadm
}
{
SMULTT 1111 1011 0001 .... 1111 .... 0011 .... @rn0dm
SMLATT 1111 1011 0001 .... .... .... 0011 .... @rnadm
}
SMLALBB 1111 1011 1100 .... .... .... 1000 .... @rnadm
SMLALBT 1111 1011 1100 .... .... .... 1001 .... @rnadm
SMLALTB 1111 1011 1100 .... .... .... 1010 .... @rnadm
SMLALTT 1111 1011 1100 .... .... .... 1011 .... @rnadm
# Data-processing (two source registers)

View file

@ -8390,6 +8390,119 @@ DO_QADDSUB(QDSUB, false, true)
#undef DO_QADDSUB
/*
* Halfword multiply and multiply accumulate
*/
static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
int add_long, bool nt, bool mt)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i32 t0, t1;
TCGv_i64 t64;
if (s->thumb
? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
: !ENABLE_ARCH_5TE) {
return false;
}
t0 = load_reg(s, a->rn);
t1 = load_reg(s, a->rm);
gen_mulxy(s, t0, t1, nt, mt);
tcg_temp_free_i32(tcg_ctx, t1);
switch (add_long) {
case 0:
store_reg(s, a->rd, t0);
break;
case 1:
t1 = load_reg(s, a->ra);
gen_helper_add_setq(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1);
tcg_temp_free_i32(tcg_ctx, t1);
store_reg(s, a->rd, t0);
break;
case 2:
t64 = tcg_temp_new_i64(tcg_ctx);
tcg_gen_ext_i32_i64(tcg_ctx, t64, t0);
tcg_temp_free_i32(tcg_ctx, t0);
gen_addq(s, t64, a->ra, a->rd);
gen_storeq_reg(s, a->ra, a->rd, t64);
tcg_temp_free_i64(tcg_ctx, t64);
break;
default:
g_assert_not_reached();
}
return true;
}
#define DO_SMLAX(NAME, add, nt, mt) \
static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
{ \
return op_smlaxxx(s, a, add, nt, mt); \
}
DO_SMLAX(SMULBB, 0, 0, 0)
DO_SMLAX(SMULBT, 0, 0, 1)
DO_SMLAX(SMULTB, 0, 1, 0)
DO_SMLAX(SMULTT, 0, 1, 1)
DO_SMLAX(SMLABB, 1, 0, 0)
DO_SMLAX(SMLABT, 1, 0, 1)
DO_SMLAX(SMLATB, 1, 1, 0)
DO_SMLAX(SMLATT, 1, 1, 1)
DO_SMLAX(SMLALBB, 2, 0, 0)
DO_SMLAX(SMLALBT, 2, 0, 1)
DO_SMLAX(SMLALTB, 2, 1, 0)
DO_SMLAX(SMLALTT, 2, 1, 1)
#undef DO_SMLAX
static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i32 t0, t1;
TCGv_i64 t64;
if (!ENABLE_ARCH_5TE) {
return false;
}
t0 = load_reg(s, a->rn);
t1 = load_reg(s, a->rm);
if (mt) {
tcg_gen_sari_i32(tcg_ctx, t1, t1, 16);
} else {
gen_sxth(t1);
}
t64 = gen_muls_i64_i32(s, t0, t1);
tcg_gen_shri_i64(tcg_ctx, t64, t64, 16);
t1 = tcg_temp_new_i32(tcg_ctx);
tcg_gen_extrl_i64_i32(tcg_ctx, t1, t64);
tcg_temp_free_i64(tcg_ctx, t64);
if (add) {
t0 = load_reg(s, a->ra);
gen_helper_add_setq(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t0);
tcg_temp_free_i32(tcg_ctx, t0);
}
store_reg(s, a->rd, t1);
return true;
}
#define DO_SMLAWX(NAME, add, mt) \
static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
{ \
return op_smlawx(s, a, add, mt); \
}
DO_SMLAWX(SMULWB, 0, 0)
DO_SMLAWX(SMULWT, 0, 1)
DO_SMLAWX(SMLAWB, 1, 0)
DO_SMLAWX(SMLAWT, 1, 1)
#undef DO_SMLAWX
/*
* Legacy decoder.
*/
@ -8865,56 +8978,13 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
}
break;
}
case 0x8: /* signed multiply */
case 0x8:
case 0xa:
case 0xc:
case 0xe:
ARCH(5TE);
rs = (insn >> 8) & 0xf;
rn = (insn >> 12) & 0xf;
rd = (insn >> 16) & 0xf;
if (op1 == 1) {
/* (32 * 16) >> 16 */
tmp = load_reg(s, rm);
tmp2 = load_reg(s, rs);
if (sh & 4)
tcg_gen_sari_i32(tcg_ctx, tmp2, tmp2, 16);
else
gen_sxth(tmp2);
tmp64 = gen_muls_i64_i32(s, tmp, tmp2);
tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, 16);
tmp = tcg_temp_new_i32(tcg_ctx);
tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tmp64);
tcg_temp_free_i64(tcg_ctx, tmp64);
if ((sh & 2) == 0) {
tmp2 = load_reg(s, rn);
gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2);
tcg_temp_free_i32(tcg_ctx, tmp2);
}
store_reg(s, rd, tmp);
} else {
/* 16 * 16 */
tmp = load_reg(s, rm);
tmp2 = load_reg(s, rs);
gen_mulxy(s, tmp, tmp2, sh & 2, sh & 4);
tcg_temp_free_i32(tcg_ctx, tmp2);
if (op1 == 2) {
tmp64 = tcg_temp_new_i64(tcg_ctx);
tcg_gen_ext_i32_i64(tcg_ctx, tmp64, tmp);
tcg_temp_free_i32(tcg_ctx, tmp);
gen_addq(s, tmp64, rn, rd);
gen_storeq_reg(s, rn, rd, tmp64);
tcg_temp_free_i64(tcg_ctx, tmp64);
} else {
if (op1 == 0) {
tmp2 = load_reg(s, rn);
gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2);
tcg_temp_free_i32(tcg_ctx, tmp2);
}
store_reg(s, rd, tmp);
}
}
break;
/* Halfword multiply and multiply accumulate. */
/* All done in decodetree. Reach here for illegal ops. */
goto illegal_op;
default:
goto illegal_op;
}
@ -10370,13 +10440,14 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
break;
case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
switch ((insn >> 20) & 7) {
case 0: /* 32 x 32 -> 32, in decodetree */
case 0: /* 32 x 32 -> 32 */
case 1: /* 16 x 16 -> 32 */
case 3: /* 32 * 16 -> 32msb */
/* in decodetree */
goto illegal_op;
case 7: /* Unsigned sum of absolute differences. */
break;
case 1: /* 16 x 16 -> 32 */
case 2: /* Dual multiply add. */
case 3: /* 32 * 16 -> 32msb */
case 4: /* Dual multiply subtract. */
case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
@ -10388,15 +10459,6 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
switch ((insn >> 20) & 7) {
case 1: /* 16 x 16 -> 32 */
gen_mulxy(s, tmp, tmp2, op & 2, op & 1);
tcg_temp_free_i32(tcg_ctx, tmp2);
if (rs != 15) {
tmp2 = load_reg(s, rs);
gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2);
tcg_temp_free_i32(tcg_ctx, tmp2);
}
break;
case 2: /* Dual multiply add. */
case 4: /* Dual multiply subtract. */
if (op)
@ -10420,23 +10482,6 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
tcg_temp_free_i32(tcg_ctx, tmp2);
}
break;
case 3: /* 32 * 16 -> 32msb */
if (op)
tcg_gen_sari_i32(tcg_ctx, tmp2, tmp2, 16);
else
gen_sxth(tmp2);
tmp64 = gen_muls_i64_i32(s, tmp, tmp2);
tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, 16);
tmp = tcg_temp_new_i32(tcg_ctx);
tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tmp64);
tcg_temp_free_i64(tcg_ctx, tmp64);
if (rs != 15)
{
tmp2 = load_reg(s, rs);
gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2);
tcg_temp_free_i32(tcg_ctx, tmp2);
}
break;
case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
tcg_gen_muls2_i32(tcg_ctx, tmp2, tmp, tmp, tmp2);
if (rs != 15) {
@ -10517,29 +10562,10 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
gen_storeq_reg(s, rs, rd, tmp64);
tcg_temp_free_i64(tcg_ctx, tmp64);
} else {
if ((op & 0x20) || !(op & 8)) {
/* Signed/unsigned 64-bit multiply, in decodetree */
tcg_temp_free_i32(tcg_ctx, tmp2);
tcg_temp_free_i32(tcg_ctx, tmp);
goto illegal_op;
}
/* smlalxy */
if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
tcg_temp_free_i32(tcg_ctx, tmp2);
tcg_temp_free_i32(tcg_ctx, tmp);
goto illegal_op;
}
gen_mulxy(s, tmp, tmp2, op & 2, op & 1);
/* Signed/unsigned 64-bit multiply, in decodetree */
tcg_temp_free_i32(tcg_ctx, tmp2);
tmp64 = tcg_temp_new_i64(tcg_ctx);
tcg_gen_ext_i32_i64(tcg_ctx, tmp64, tmp);
tcg_temp_free_i32(tcg_ctx, tmp);
if (op & 0x40) {
/* 64-bit accumulate. */
gen_addq(s, tmp64, rs, rd);
}
gen_storeq_reg(s, rs, rd, tmp64);
tcg_temp_free_i64(tcg_ctx, tmp64);
goto illegal_op;
}
break;
}