target/arm: Convert VQSHLU, VQSHL 2-reg-shift insns to decodetree

Convert the VQSHLU and QVSHL 2-reg-shift insns to decodetree.
These are the last of the simple shift-by-immediate insns.

Backports commit 37bfce81b10450071193c8495a07f182ec652e2a from qemu
This commit is contained in:
Peter Maydell 2020-06-15 12:18:54 -04:00 committed by Lioncash
parent 055c96f985
commit 69a3312e3a
3 changed files with 128 additions and 142 deletions

View file

@ -286,3 +286,18 @@ VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_d
VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_s
VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_h
VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_b
VQSHLU_64_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_d
VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_s
VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_h
VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_b
VQSHL_S_64_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_d
VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_s
VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_h
VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_b
VQSHL_U_64_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_d
VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_s
VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_h
VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_b

View file

@ -1305,3 +1305,113 @@ static bool trans_VSHR_U_2sh(DisasContext *s, arg_2reg_shift *a)
return do_vector_2sh(s, a, tcg_gen_gvec_shri);
}
}
static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
NeonGenTwo64OpEnvFn *fn)
{
/*
* 2-reg-and-shift operations, size == 3 case, where the
* function needs to be passed cpu_env.
*/
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i64 constimm;
int pass;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false;
}
/* UNDEF accesses to D16-D31 if they don't exist. */
if (!dc_isar_feature(aa32_simd_r32, s) &&
((a->vd | a->vm) & 0x10)) {
return false;
}
if ((a->vm | a->vd) & a->q) {
return false;
}
if (!vfp_access_check(s)) {
return true;
}
/*
* To avoid excessive duplication of ops we implement shift
* by immediate using the variable shift operations.
*/
constimm = tcg_const_i64(tcg_ctx, dup_const(a->size, a->shift));
for (pass = 0; pass < a->q + 1; pass++) {
TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx);
neon_load_reg64(s, tmp, a->vm + pass);
fn(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, constimm);
neon_store_reg64(s, tmp, a->vd + pass);
}
tcg_temp_free_i64(tcg_ctx, constimm);
return true;
}
static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
NeonGenTwoOpEnvFn *fn)
{
/*
* 2-reg-and-shift operations, size < 3 case, where the
* helper needs to be passed cpu_env.
*/
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i32 constimm;
int pass;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false;
}
/* UNDEF accesses to D16-D31 if they don't exist. */
if (!dc_isar_feature(aa32_simd_r32, s) &&
((a->vd | a->vm) & 0x10)) {
return false;
}
if ((a->vm | a->vd) & a->q) {
return false;
}
if (!vfp_access_check(s)) {
return true;
}
/*
* To avoid excessive duplication of ops we implement shift
* by immediate using the variable shift operations.
*/
constimm = tcg_const_i32(tcg_ctx, dup_const(a->size, a->shift));
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
TCGv_i32 tmp = neon_load_reg(s, a->vm, pass);
fn(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, constimm);
neon_store_reg(s, a->vd, pass, tmp);
}
tcg_temp_free_i32(tcg_ctx, constimm);
return true;
}
#define DO_2SHIFT_ENV(INSN, FUNC) \
static bool trans_##INSN##_64_2sh(DisasContext *s, arg_2reg_shift *a) \
{ \
return do_2shift_env_64(s, a, gen_helper_neon_##FUNC##64); \
} \
static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
{ \
static NeonGenTwoOpEnvFn * const fns[] = { \
gen_helper_neon_##FUNC##8, \
gen_helper_neon_##FUNC##16, \
gen_helper_neon_##FUNC##32, \
}; \
assert(a->size < ARRAY_SIZE(fns)); \
return do_2shift_env_32(s, a, fns[a->size]); \
}
DO_2SHIFT_ENV(VQSHLU, qshlu_s)
DO_2SHIFT_ENV(VQSHL_U, qshl_u)
DO_2SHIFT_ENV(VQSHL_S, qshl_s)

View file

@ -3121,52 +3121,6 @@ static inline void gen_neon_rsb(DisasContext *s, int size, TCGv_i32 t0, TCGv_i32
}
}
#define GEN_NEON_INTEGER_OP_ENV(name) do { \
switch ((size << 1) | u) { \
case 0: \
gen_helper_neon_##name##_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \
break; \
case 1: \
gen_helper_neon_##name##_u8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \
break; \
case 2: \
gen_helper_neon_##name##_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \
break; \
case 3: \
gen_helper_neon_##name##_u16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \
break; \
case 4: \
gen_helper_neon_##name##_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \
break; \
case 5: \
gen_helper_neon_##name##_u32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \
break; \
default: return 1; \
}} while (0)
#define GEN_NEON_INTEGER_OP(name) do { \
switch ((size << 1) | u) { \
case 0: \
gen_helper_neon_##name##_s8(tcg_ctx, tmp, tmp, tmp2); \
break; \
case 1: \
gen_helper_neon_##name##_u8(tcg_ctx, tmp, tmp, tmp2); \
break; \
case 2: \
gen_helper_neon_##name##_s16(tcg_ctx, tmp, tmp, tmp2); \
break; \
case 3: \
gen_helper_neon_##name##_u16(tcg_ctx, tmp, tmp, tmp2); \
break; \
case 4: \
gen_helper_neon_##name##_s32(tcg_ctx, tmp, tmp, tmp2); \
break; \
case 5: \
gen_helper_neon_##name##_u32(tcg_ctx, tmp, tmp, tmp2); \
break; \
default: return 1; \
}} while (0)
static TCGv_i32 neon_load_scratch(DisasContext *s, int scratch)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
@ -5401,7 +5355,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
int size;
int shift;
int pass;
int count;
int u;
int vec_size;
uint32_t imm;
@ -5451,6 +5404,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
case 3: /* VRSRA */
case 4: /* VSRI */
case 5: /* VSHL, VSLI */
case 6: /* VQSHLU */
case 7: /* VQSHL */
return 1; /* handled by decodetree */
default:
break;
@ -5468,101 +5423,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
size--;
}
shift = (insn >> 16) & ((1 << (3 + size)) - 1);
if (op < 8) {
/* Shift by immediate:
VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
if (q && ((rd | rm) & 1)) {
return 1;
}
if (!u && (op == 4 || op == 6)) {
return 1;
}
/* Right shifts are encoded as N - shift, where N is the
element size in bits. */
if (op <= 4) {
shift = shift - (1 << (size + 3));
}
if (size == 3) {
count = q + 1;
} else {
count = q ? 4: 2;
}
/* To avoid excessive duplication of ops we implement shift
* by immediate using the variable shift operations.
*/
imm = dup_const(size, shift);
for (pass = 0; pass < count; pass++) {
if (size == 3) {
neon_load_reg64(s, s->V0, rm + pass);
tcg_gen_movi_i64(tcg_ctx, s->V1, imm);
switch (op) {
case 6: /* VQSHLU */
gen_helper_neon_qshlu_s64(tcg_ctx, s->V0, tcg_ctx->cpu_env,
s->V0, s->V1);
break;
case 7: /* VQSHL */
if (u) {
gen_helper_neon_qshl_u64(tcg_ctx, s->V0, tcg_ctx->cpu_env,
s->V0, s->V1);
} else {
gen_helper_neon_qshl_s64(tcg_ctx, s->V0, tcg_ctx->cpu_env,
s->V0, s->V1);
}
break;
default:
g_assert_not_reached();
}
if (op == 3) {
/* Accumulate. */
neon_load_reg64(s, s->V1, rd + pass);
tcg_gen_add_i64(tcg_ctx, s->V0, s->V0, s->V1);
}
neon_store_reg64(s, s->V0, rd + pass);
} else { /* size < 3 */
/* Operands in T0 and T1. */
tmp = neon_load_reg(s, rm, pass);
tmp2 = tcg_temp_new_i32(tcg_ctx);
tcg_gen_movi_i32(tcg_ctx, tmp2, imm);
switch (op) {
case 6: /* VQSHLU */
switch (size) {
case 0:
gen_helper_neon_qshlu_s8(tcg_ctx, tmp, tcg_ctx->cpu_env,
tmp, tmp2);
break;
case 1:
gen_helper_neon_qshlu_s16(tcg_ctx, tmp, tcg_ctx->cpu_env,
tmp, tmp2);
break;
case 2:
gen_helper_neon_qshlu_s32(tcg_ctx, tmp, tcg_ctx->cpu_env,
tmp, tmp2);
break;
default:
abort();
}
break;
case 7: /* VQSHL */
GEN_NEON_INTEGER_OP_ENV(qshl);
break;
default:
g_assert_not_reached();
}
tcg_temp_free_i32(tcg_ctx, tmp2);
if (op == 3) {
/* Accumulate. */
tmp2 = neon_load_reg(s, rd, pass);
gen_neon_add(s, size, tmp, tmp2);
tcg_temp_free_i32(tcg_ctx, tmp2);
}
neon_store_reg(s, rd, pass, tmp);
}
} /* for pass */
} else if (op < 10) {
if (op < 10) {
/* Shift by immediate and narrow:
VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
int input_unsigned = (op == 8) ? !u : u;