target/arm: Convert Neon VEXT to decodetree

Convert the Neon VEXT insn to decodetree. Rather than keeping the
old implementation which used fixed temporaries cpu_V0 and cpu_V1
and did the extraction with by-hand shift and logic ops, we use
the TCG extract2 insn.

We don't need to special case 0 or 8 immediates any more as the
optimizer is smart enough to throw away the dead code.

Backports commit 0aad761fb0aed40c99039eacac470cbd03d07019 from qemu
This commit is contained in:
Peter Maydell 2020-06-17 00:28:54 -04:00 committed by Lioncash
parent 1aa9046120
commit 4731a69d66
3 changed files with 87 additions and 57 deletions

View file

@ -413,7 +413,13 @@ Vimm_1r 1111 001 . 1 . 000 ... .... cmode:4 0 . op:1 1 .... @1reg_imm
# return false for size==3.
######################################################################
{
# 0b11 subgroup will go here
[
##################################################################
# Miscellaneous size=0b11 insns
##################################################################
VEXT 1111 001 0 1 . 11 .... .... imm:4 . q:1 . 0 .... \
vm=%vm_dp vn=%vn_dp vd=%vd_dp
]
# Subgroup for size != 0b11
[

View file

@ -2845,3 +2845,81 @@ static bool trans_VQDMLSL_2sc(DisasContext *s, arg_2scalar *a)
return do_2scalar_long(s, a, opfn[a->size], accfn[a->size]);
}
static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false;
}
/* UNDEF accesses to D16-D31 if they don't exist. */
if (!dc_isar_feature(aa32_simd_r32, s) &&
((a->vd | a->vn | a->vm) & 0x10)) {
return false;
}
if ((a->vn | a->vm | a->vd) & a->q) {
return false;
}
if (a->imm > 7 && !a->q) {
return false;
}
if (!vfp_access_check(s)) {
return true;
}
if (!a->q) {
/* Extract 64 bits from <Vm:Vn> */
TCGv_i64 left, right, dest;
left = tcg_temp_new_i64(tcg_ctx);
right = tcg_temp_new_i64(tcg_ctx);
dest = tcg_temp_new_i64(tcg_ctx);
neon_load_reg64(s, right, a->vn);
neon_load_reg64(s, left, a->vm);
tcg_gen_extract2_i64(tcg_ctx, dest, right, left, a->imm * 8);
neon_store_reg64(s, dest, a->vd);
tcg_temp_free_i64(tcg_ctx, left);
tcg_temp_free_i64(tcg_ctx, right);
tcg_temp_free_i64(tcg_ctx, dest);
} else {
/* Extract 128 bits from <Vm+1:Vm:Vn+1:Vn> */
TCGv_i64 left, middle, right, destleft, destright;
left = tcg_temp_new_i64(tcg_ctx);
middle = tcg_temp_new_i64(tcg_ctx);
right = tcg_temp_new_i64(tcg_ctx);
destleft = tcg_temp_new_i64(tcg_ctx);
destright = tcg_temp_new_i64(tcg_ctx);
if (a->imm < 8) {
neon_load_reg64(s, right, a->vn);
neon_load_reg64(s, middle, a->vn + 1);
tcg_gen_extract2_i64(tcg_ctx, destright, right, middle, a->imm * 8);
neon_load_reg64(s, left, a->vm);
tcg_gen_extract2_i64(tcg_ctx, destleft, middle, left, a->imm * 8);
} else {
neon_load_reg64(s, right, a->vn + 1);
neon_load_reg64(s, middle, a->vm);
tcg_gen_extract2_i64(tcg_ctx, destright, right, middle, (a->imm - 8) * 8);
neon_load_reg64(s, left, a->vm + 1);
tcg_gen_extract2_i64(tcg_ctx, destleft, middle, left, (a->imm - 8) * 8);
}
neon_store_reg64(s, destright, a->vd);
neon_store_reg64(s, destleft, a->vd + 1);
tcg_temp_free_i64(tcg_ctx, destright);
tcg_temp_free_i64(tcg_ctx, destleft);
tcg_temp_free_i64(tcg_ctx, right);
tcg_temp_free_i64(tcg_ctx, middle);
tcg_temp_free_i64(tcg_ctx, left);
}
return true;
}

View file

@ -5143,10 +5143,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
int pass;
int u;
int vec_size;
uint32_t imm;
TCGv_i32 tmp, tmp2, tmp3, tmp5;
TCGv_ptr ptr1;
TCGv_i64 tmp64;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return 1;
@ -5189,60 +5187,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
return 1;
} else { /* size == 3 */
if (!u) {
/* Extract. */
imm = (insn >> 8) & 0xf;
if (imm > 7 && !q)
return 1;
if (q && ((rd | rn | rm) & 1)) {
return 1;
}
if (imm == 0) {
neon_load_reg64(s, s->V0, rn);
if (q) {
neon_load_reg64(s, s->V1, rn + 1);
}
} else if (imm == 8) {
neon_load_reg64(s, s->V0, rn + 1);
if (q) {
neon_load_reg64(s, s->V1, rm);
}
} else if (q) {
tmp64 = tcg_temp_new_i64(tcg_ctx);
if (imm < 8) {
neon_load_reg64(s, s->V0, rn);
neon_load_reg64(s, tmp64, rn + 1);
} else {
neon_load_reg64(s, s->V0, rn + 1);
neon_load_reg64(s, tmp64, rm);
}
tcg_gen_shri_i64(tcg_ctx, s->V0, s->V0, (imm & 7) * 8);
tcg_gen_shli_i64(tcg_ctx, s->V1, tmp64, 64 - ((imm & 7) * 8));
tcg_gen_or_i64(tcg_ctx, s->V0, s->V0, s->V1);
if (imm < 8) {
neon_load_reg64(s, s->V1, rm);
} else {
neon_load_reg64(s, s->V1, rm + 1);
imm -= 8;
}
tcg_gen_shli_i64(tcg_ctx, s->V1, s->V1, 64 - (imm * 8));
tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, imm * 8);
tcg_gen_or_i64(tcg_ctx, s->V1, s->V1, tmp64);
tcg_temp_free_i64(tcg_ctx, tmp64);
} else {
/* BUGFIX */
neon_load_reg64(s, s->V0, rn);
tcg_gen_shri_i64(tcg_ctx, s->V0, s->V0, imm * 8);
neon_load_reg64(s, s->V1, rm);
tcg_gen_shli_i64(tcg_ctx, s->V1, s->V1, 64 - (imm * 8));
tcg_gen_or_i64(tcg_ctx, s->V0, s->V0, s->V1);
}
neon_store_reg64(s, s->V0, rd);
if (q) {
neon_store_reg64(s, s->V1, rd + 1);
}
/* Extract: handled by decodetree */
return 1;
} else if ((insn & (1 << 11)) == 0) {
/* Two register misc. */
op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);