target/arm: Create gen_gvec_{sri,sli}

The functions eliminate duplication of the special cases for
this operation. They match up with the GVecGen2iFn typedef.

Add out-of-line helpers. We got away with only having inline
expanders because the neon vector size is only 16 bytes, and
we know that the inline expansion will always succeed.
When we reuse this for SVE, tcg-gvec-op may decide to use an
out-of-line helper due to longer vector lengths.

Backports commit 893ab0542aa385a287cbe46d5535c8b9e95ce699 from qemu
This commit is contained in:
Richard Henderson 2020-05-15 20:37:01 -04:00 committed by Lioncash
parent 2609e6f319
commit 6190be3191
10 changed files with 220 additions and 115 deletions

View file

@ -3423,7 +3423,9 @@
#define fp_exception_el fp_exception_el_aarch64
#define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64
#define gen_cmtst_i64 gen_cmtst_i64_aarch64
#define gen_gvec_sli gen_gvec_sli_aarch64
#define gen_gvec_ssra gen_gvec_ssra_aarch64
#define gen_gvec_sri gen_gvec_sri_aarch64
#define gen_gvec_srshr gen_gvec_srshr_aarch64
#define gen_gvec_srsra gen_gvec_srsra_aarch64
#define gen_gvec_ursra gen_gvec_ursra_aarch64
@ -3483,6 +3485,14 @@
#define helper_gvec_rsqrts_d helper_gvec_rsqrts_d_aarch64
#define helper_gvec_rsqrts_h helper_gvec_rsqrts_h_aarch64
#define helper_gvec_rsqrts_s helper_gvec_rsqrts_s_aarch64
#define helper_gvec_sli_b helper_gvec_sli_b_aarch64
#define helper_gvec_sli_d helper_gvec_sli_d_aarch64
#define helper_gvec_sli_h helper_gvec_sli_h_aarch64
#define helper_gvec_sli_s helper_gvec_sli_s_aarch64
#define helper_gvec_sri_b helper_gvec_sri_b_aarch64
#define helper_gvec_sri_d helper_gvec_sri_d_aarch64
#define helper_gvec_sri_h helper_gvec_sri_h_aarch64
#define helper_gvec_sri_s helper_gvec_sri_s_aarch64
#define helper_gvec_srshr_b helper_gvec_srshr_b_aarch64
#define helper_gvec_srshr_d helper_gvec_srshr_d_aarch64
#define helper_gvec_srshr_h helper_gvec_srshr_h_aarch64
@ -4456,11 +4466,9 @@
#define raise_exception_ra raise_exception_ra_aarch64
#define read_cpu_reg read_cpu_reg_aarch64
#define read_cpu_reg_sp read_cpu_reg_sp_aarch64
#define sli_op sli_op_aarch64
#define sqadd_op sqadd_op_aarch64
#define sqsub_op sqsub_op_aarch64
#define sshl_op sshl_op_aarch64
#define sri_op sri_op_aarch64
#define sve_access_check sve_access_check_aarch64
#define sve_exception_el sve_exception_el_aarch64
#define sve_zcr_len_for_el sve_zcr_len_for_el_aarch64

View file

@ -3423,7 +3423,9 @@
#define fp_exception_el fp_exception_el_aarch64eb
#define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64eb
#define gen_cmtst_i64 gen_cmtst_i64_aarch64eb
#define gen_gvec_sli gen_gvec_sli_aarch64eb
#define gen_gvec_ssra gen_gvec_ssra_aarch64eb
#define gen_gvec_sri gen_gvec_sri_aarch64eb
#define gen_gvec_srshr gen_gvec_srshr_aarch64eb
#define gen_gvec_srsra gen_gvec_srsra_aarch64eb
#define gen_gvec_ursra gen_gvec_ursra_aarch64eb
@ -3483,6 +3485,14 @@
#define helper_gvec_rsqrts_d helper_gvec_rsqrts_d_aarch64eb
#define helper_gvec_rsqrts_h helper_gvec_rsqrts_h_aarch64eb
#define helper_gvec_rsqrts_s helper_gvec_rsqrts_s_aarch64eb
#define helper_gvec_sli_b helper_gvec_sli_b_aarch64eb
#define helper_gvec_sli_d helper_gvec_sli_d_aarch64eb
#define helper_gvec_sli_h helper_gvec_sli_h_aarch64eb
#define helper_gvec_sli_s helper_gvec_sli_s_aarch64eb
#define helper_gvec_sri_b helper_gvec_sri_b_aarch64eb
#define helper_gvec_sri_d helper_gvec_sri_d_aarch64eb
#define helper_gvec_sri_h helper_gvec_sri_h_aarch64eb
#define helper_gvec_sri_s helper_gvec_sri_s_aarch64eb
#define helper_gvec_srshr_b helper_gvec_srshr_b_aarch64eb
#define helper_gvec_srshr_d helper_gvec_srshr_d_aarch64eb
#define helper_gvec_srshr_h helper_gvec_srshr_h_aarch64eb
@ -4456,11 +4466,9 @@
#define raise_exception_ra raise_exception_ra_aarch64eb
#define read_cpu_reg read_cpu_reg_aarch64eb
#define read_cpu_reg_sp read_cpu_reg_sp_aarch64eb
#define sli_op sli_op_aarch64eb
#define sqadd_op sqadd_op_aarch64eb
#define sqsub_op sqsub_op_aarch64eb
#define sshl_op sshl_op_aarch64eb
#define sri_op sri_op_aarch64eb
#define sve_access_check sve_access_check_aarch64eb
#define sve_exception_el sve_exception_el_aarch64eb
#define sve_zcr_len_for_el sve_zcr_len_for_el_aarch64eb

View file

@ -3408,7 +3408,9 @@
#define cpu_mmu_index cpu_mmu_index_arm
#define fp_exception_el fp_exception_el_arm
#define gen_cmtst_i64 gen_cmtst_i64_arm
#define gen_gvec_sli gen_gvec_sli_arm
#define gen_gvec_ssra gen_gvec_ssra_arm
#define gen_gvec_sri gen_gvec_sri_arm
#define gen_gvec_srshr gen_gvec_srshr_arm
#define gen_gvec_srsra gen_gvec_srsra_arm
#define gen_gvec_ursra gen_gvec_ursra_arm
@ -3420,6 +3422,14 @@
#define gen_ushl_i32 gen_ushl_i32_arm
#define gen_ushl_i64 gen_ushl_i64_arm
#define helper_fjcvtzs helper_fjcvtzs_arm
#define helper_gvec_sli_b helper_gvec_sli_b_arm
#define helper_gvec_sli_d helper_gvec_sli_d_arm
#define helper_gvec_sli_h helper_gvec_sli_h_arm
#define helper_gvec_sli_s helper_gvec_sli_s_arm
#define helper_gvec_sri_b helper_gvec_sri_b_arm
#define helper_gvec_sri_d helper_gvec_sri_d_arm
#define helper_gvec_sri_h helper_gvec_sri_h_arm
#define helper_gvec_sri_s helper_gvec_sri_s_arm
#define helper_gvec_srshr_b helper_gvec_srshr_b_arm
#define helper_gvec_srshr_d helper_gvec_srshr_d_arm
#define helper_gvec_srshr_h helper_gvec_srshr_h_arm
@ -3455,11 +3465,9 @@
#define pmu_post_el_change pmu_post_el_change_arm
#define raise_exception raise_exception_arm
#define raise_exception_ra raise_exception_ra_arm
#define sli_op sli_op_arm
#define sqadd_op sqadd_op_arm
#define sqsub_op sqsub_op_arm
#define sshl_op sshl_op_arm
#define sri_op sri_op_arm
#define sve_exception_el sve_exception_el_arm
#define sve_zcr_len_for_el sve_zcr_len_for_el_arm
#define uqadd_op uqadd_op_arm

View file

@ -3408,7 +3408,9 @@
#define cpu_mmu_index cpu_mmu_index_armeb
#define fp_exception_el fp_exception_el_armeb
#define gen_cmtst_i64 gen_cmtst_i64_armeb
#define gen_gvec_sli gen_gvec_sli_armeb
#define gen_gvec_ssra gen_gvec_ssra_armeb
#define gen_gvec_sri gen_gvec_sri_armeb
#define gen_gvec_srshr gen_gvec_srshr_armeb
#define gen_gvec_srsra gen_gvec_srsra_armeb
#define gen_gvec_ursra gen_gvec_ursra_armeb
@ -3420,6 +3422,14 @@
#define gen_ushl_i32 gen_ushl_i32_armeb
#define gen_ushl_i64 gen_ushl_i64_armeb
#define helper_fjcvtzs helper_fjcvtzs_armeb
#define helper_gvec_sli_b helper_gvec_sli_b_armeb
#define helper_gvec_sli_d helper_gvec_sli_d_armeb
#define helper_gvec_sli_h helper_gvec_sli_h_armeb
#define helper_gvec_sli_s helper_gvec_sli_s_armeb
#define helper_gvec_sri_b helper_gvec_sri_b_armeb
#define helper_gvec_sri_d helper_gvec_sri_d_armeb
#define helper_gvec_sri_h helper_gvec_sri_h_armeb
#define helper_gvec_sri_s helper_gvec_sri_s_armeb
#define helper_gvec_srshr_b helper_gvec_srshr_b_armeb
#define helper_gvec_srshr_d helper_gvec_srshr_d_armeb
#define helper_gvec_srshr_h helper_gvec_srshr_h_armeb
@ -3455,11 +3465,9 @@
#define pmu_post_el_change pmu_post_el_change_armeb
#define raise_exception raise_exception_armeb
#define raise_exception_ra raise_exception_ra_armeb
#define sli_op sli_op_armeb
#define sqadd_op sqadd_op_armeb
#define sqsub_op sqsub_op_armeb
#define sshl_op sshl_op_armeb
#define sri_op sri_op_armeb
#define sve_exception_el sve_exception_el_armeb
#define sve_zcr_len_for_el sve_zcr_len_for_el_armeb
#define uqadd_op uqadd_op_armeb

View file

@ -3417,7 +3417,9 @@ arm_symbols = (
'cpu_mmu_index',
'fp_exception_el',
'gen_cmtst_i64',
'gen_gvec_sli',
'gen_gvec_ssra',
'gen_gvec_sri',
'gen_gvec_srshr',
'gen_gvec_srsra',
'gen_gvec_ursra',
@ -3429,6 +3431,14 @@ arm_symbols = (
'gen_ushl_i32',
'gen_ushl_i64',
'helper_fjcvtzs',
'helper_gvec_sli_b',
'helper_gvec_sli_d',
'helper_gvec_sli_h',
'helper_gvec_sli_s',
'helper_gvec_sri_b',
'helper_gvec_sri_d',
'helper_gvec_sri_h',
'helper_gvec_sri_s',
'helper_gvec_srshr_b',
'helper_gvec_srshr_d',
'helper_gvec_srshr_h',
@ -3464,11 +3474,9 @@ arm_symbols = (
'pmu_post_el_change',
'raise_exception',
'raise_exception_ra',
'sli_op',
'sqadd_op',
'sqsub_op',
'sshl_op',
'sri_op',
'sve_exception_el',
'sve_zcr_len_for_el',
'uqadd_op',
@ -3522,7 +3530,9 @@ aarch64_symbols = (
'fp_exception_el',
'gen_a64_set_pc_im',
'gen_cmtst_i64',
'gen_gvec_sli',
'gen_gvec_ssra',
'gen_gvec_sri',
'gen_gvec_srshr',
'gen_gvec_srsra',
'gen_gvec_ursra',
@ -3582,6 +3592,14 @@ aarch64_symbols = (
'helper_gvec_rsqrts_d',
'helper_gvec_rsqrts_h',
'helper_gvec_rsqrts_s',
'helper_gvec_sli_b',
'helper_gvec_sli_d',
'helper_gvec_sli_h',
'helper_gvec_sli_s',
'helper_gvec_sri_b',
'helper_gvec_sri_d',
'helper_gvec_sri_h',
'helper_gvec_sri_s',
'helper_gvec_srshr_b',
'helper_gvec_srshr_d',
'helper_gvec_srshr_h',
@ -4555,11 +4573,9 @@ aarch64_symbols = (
'raise_exception_ra',
'read_cpu_reg',
'read_cpu_reg_sp',
'sli_op',
'sqadd_op',
'sqsub_op',
'sshl_op',
'sri_op',
'sve_access_check',
'sve_exception_el',
'sve_zcr_len_for_el',

View file

@ -717,6 +717,16 @@ DEF_HELPER_FLAGS_3(gvec_ursra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_ursra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_ursra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sli_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sli_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sli_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
#ifdef TARGET_ARM
#define helper_clz helper_clz_arm
#define gen_helper_clz gen_helper_clz_arm

View file

@ -751,18 +751,6 @@ static void gen_gvec_op2(DisasContext *s, bool is_q, int rd,
is_q ? 16 : 8, vec_full_reg_size(s), gvec_op);
}
/* Expand a 2-operand + immediate AdvSIMD vector operation using
* an op descriptor.
*/
static void gen_gvec_op2i(DisasContext *s, bool is_q, int rd,
int rn, int64_t imm, const GVecGen2i *gvec_op)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
tcg_gen_gvec_2i(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
is_q ? 16 : 8, vec_full_reg_size(s), imm, gvec_op);
}
/* Expand a 3-operand AdvSIMD vector operation using an op descriptor. */
static void gen_gvec_op3(DisasContext *s, bool is_q, int rd,
int rn, int rm, const GVecGen3 *gvec_op)
@ -10480,12 +10468,9 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
gen_gvec_fn2i(s, is_q, rd, rn, shift,
is_u ? gen_gvec_usra : gen_gvec_ssra, size);
return;
case 0x08: /* SRI */
/* Shift count same as element size is valid but does nothing. */
if (shift == 8 << size) {
goto done;
}
gen_gvec_op2i(s, is_q, rd, rn, shift, &sri_op[size]);
gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sri, size);
return;
case 0x00: /* SSHR / USHR */
@ -10536,7 +10521,6 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
}
tcg_temp_free_i64(tcg_ctx, tcg_round);
done:
clear_vec_high(s, is_q, rd);
}
@ -10561,7 +10545,7 @@ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
}
if (insert) {
gen_gvec_op2i(s, is_q, rd, rn, shift, &sli_op[size]);
gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
} else {
gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
}

View file

@ -4582,47 +4582,62 @@ static void gen_shr64_ins_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t shi
static void gen_shr_ins_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
{
if (sh == 0) {
tcg_gen_mov_vec(s, d, a);
} else {
TCGv_vec t = tcg_temp_new_vec_matching(s, d);
TCGv_vec m = tcg_temp_new_vec_matching(s, d);
TCGv_vec t = tcg_temp_new_vec_matching(s, d);
TCGv_vec m = tcg_temp_new_vec_matching(s, d);
tcg_gen_dupi_vec(s, vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
tcg_gen_shri_vec(s, vece, t, a, sh);
tcg_gen_and_vec(s, vece, d, d, m);
tcg_gen_or_vec(s, vece, d, d, t);
tcg_gen_dupi_vec(s, vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
tcg_gen_shri_vec(s, vece, t, a, sh);
tcg_gen_and_vec(s, vece, d, d, m);
tcg_gen_or_vec(s, vece, d, d, t);
tcg_temp_free_vec(s, t);
tcg_temp_free_vec(s, m);
}
tcg_temp_free_vec(s, t);
tcg_temp_free_vec(s, m);
}
static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
void gen_gvec_sri(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
{
static const TCGOpcode vecop_list[] = { INDEX_op_shri_vec, 0 };
const GVecGen2i ops[4] = {
{ .fni8 = gen_shr8_ins_i64,
.fniv = gen_shr_ins_vec,
.fno = gen_helper_gvec_sri_b,
.load_dest = true,
.opt_opc = vecop_list,
.vece = MO_8 },
{ .fni8 = gen_shr16_ins_i64,
.fniv = gen_shr_ins_vec,
.fno = gen_helper_gvec_sri_h,
.load_dest = true,
.opt_opc = vecop_list,
.vece = MO_16 },
{ .fni4 = gen_shr32_ins_i32,
.fniv = gen_shr_ins_vec,
.fno = gen_helper_gvec_sri_s,
.load_dest = true,
.opt_opc = vecop_list,
.vece = MO_32 },
{ .fni8 = gen_shr64_ins_i64,
.fniv = gen_shr_ins_vec,
.fno = gen_helper_gvec_sri_d,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
.opt_opc = vecop_list,
.vece = MO_64 },
};
const GVecGen2i sri_op[4] = {
{ .fni8 = gen_shr8_ins_i64,
.fniv = gen_shr_ins_vec,
.load_dest = true,
.opt_opc = vecop_list_sri,
.vece = MO_8 },
{ .fni8 = gen_shr16_ins_i64,
.fniv = gen_shr_ins_vec,
.load_dest = true,
.opt_opc = vecop_list_sri,
.vece = MO_16 },
{ .fni4 = gen_shr32_ins_i32,
.fniv = gen_shr_ins_vec,
.load_dest = true,
.opt_opc = vecop_list_sri,
.vece = MO_32 },
{ .fni8 = gen_shr64_ins_i64,
.fniv = gen_shr_ins_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
.opt_opc = vecop_list_sri,
.vece = MO_64 },
};
/* tszimm encoding produces immediates in the range [1..esize]. */
tcg_debug_assert(shift > 0);
tcg_debug_assert(shift <= (8 << vece));
/* Shift of esize leaves destination unchanged. */
if (shift < (8 << vece)) {
tcg_gen_gvec_2i(s, rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
} else {
/* Nop, but we do need to clear the tail. */
tcg_gen_gvec_mov(s, vece, rd_ofs, rd_ofs, opr_sz, max_sz);
}
}
static void gen_shl8_ins_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t shift)
{
@ -4660,47 +4675,60 @@ static void gen_shl64_ins_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t shi
static void gen_shl_ins_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
{
if (sh == 0) {
tcg_gen_mov_vec(s, d, a);
} else {
TCGv_vec t = tcg_temp_new_vec_matching(s, d);
TCGv_vec m = tcg_temp_new_vec_matching(s, d);
TCGv_vec t = tcg_temp_new_vec_matching(s, d);
TCGv_vec m = tcg_temp_new_vec_matching(s, d);
tcg_gen_dupi_vec(s, vece, m, MAKE_64BIT_MASK(0, sh));
tcg_gen_shli_vec(s, vece, t, a, sh);
tcg_gen_and_vec(s, vece, d, d, m);
tcg_gen_or_vec(s, vece, d, d, t);
tcg_gen_shli_vec(s, vece, t, a, sh);
tcg_gen_dupi_vec(s, vece, m, MAKE_64BIT_MASK(0, sh));
tcg_gen_and_vec(s, vece, d, d, m);
tcg_gen_or_vec(s, vece, d, d, t);
tcg_temp_free_vec(s, t);
tcg_temp_free_vec(s, m);
}
tcg_temp_free_vec(s, t);
tcg_temp_free_vec(s, m);
}
static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
void gen_gvec_sli(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
{
static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 };
const GVecGen2i ops[4] = {
{ .fni8 = gen_shl8_ins_i64,
.fniv = gen_shl_ins_vec,
.fno = gen_helper_gvec_sli_b,
.load_dest = true,
.opt_opc = vecop_list,
.vece = MO_8 },
{ .fni8 = gen_shl16_ins_i64,
.fniv = gen_shl_ins_vec,
.fno = gen_helper_gvec_sli_h,
.load_dest = true,
.opt_opc = vecop_list,
.vece = MO_16 },
{ .fni4 = gen_shl32_ins_i32,
.fniv = gen_shl_ins_vec,
.fno = gen_helper_gvec_sli_s,
.load_dest = true,
.opt_opc = vecop_list,
.vece = MO_32 },
{ .fni8 = gen_shl64_ins_i64,
.fniv = gen_shl_ins_vec,
.fno = gen_helper_gvec_sli_d,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
.opt_opc = vecop_list,
.vece = MO_64 },
};
const GVecGen2i sli_op[4] = {
{ .fni8 = gen_shl8_ins_i64,
.fniv = gen_shl_ins_vec,
.load_dest = true,
.opt_opc = vecop_list_sli,
.vece = MO_8 },
{ .fni8 = gen_shl16_ins_i64,
.fniv = gen_shl_ins_vec,
.load_dest = true,
.opt_opc = vecop_list_sli,
.vece = MO_16 },
{ .fni4 = gen_shl32_ins_i32,
.fniv = gen_shl_ins_vec,
.load_dest = true,
.opt_opc = vecop_list_sli,
.vece = MO_32 },
{ .fni8 = gen_shl64_ins_i64,
.fniv = gen_shl_ins_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
.opt_opc = vecop_list_sli,
.vece = MO_64 },
};
/* tszimm encoding produces immediates in the range [0..esize-1]. */
tcg_debug_assert(shift >= 0);
tcg_debug_assert(shift < (8 << vece));
if (shift == 0) {
tcg_gen_gvec_mov(s, vece, rd_ofs, rm_ofs, opr_sz, max_sz);
} else {
tcg_gen_gvec_2i(s, rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
}
}
static void gen_mla8_i32(TCGContext *s, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
@ -5820,20 +5848,14 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
}
/* Right shift comes here negative. */
shift = -shift;
/* Shift out of range leaves destination unchanged. */
if (shift < 8 << size) {
tcg_gen_gvec_2i(tcg_ctx, rd_ofs, rm_ofs, vec_size, vec_size,
shift, &sri_op[size]);
}
gen_gvec_sri(tcg_ctx, size, rd_ofs, rm_ofs, shift,
vec_size, vec_size);
return 0;
case 5: /* VSHL, VSLI */
if (u) { /* VSLI */
/* Shift out of range leaves destination unchanged. */
if (shift < 8 << size) {
tcg_gen_gvec_2i(tcg_ctx, rd_ofs, rm_ofs, vec_size,
vec_size, shift, &sli_op[size]);
}
gen_gvec_sli(tcg_ctx, size, rd_ofs, rm_ofs, shift,
vec_size, vec_size);
} else { /* VSHL */
/* Shifts larger than the element size are
* architecturally valid and results in zero.

View file

@ -292,8 +292,6 @@ extern const GVecGen3 mls_op[4];
extern const GVecGen3 cmtst_op[4];
extern const GVecGen3 sshl_op[4];
extern const GVecGen3 ushl_op[4];
extern const GVecGen2i sri_op[4];
extern const GVecGen2i sli_op[4];
extern const GVecGen4 uqadd_op[4];
extern const GVecGen4 sqadd_op[4];
extern const GVecGen4 uqsub_op[4];
@ -318,6 +316,11 @@ void gen_gvec_srsra(TCGContext *tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_
void gen_gvec_ursra(TCGContext *tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
void gen_gvec_sri(TCGContext *tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
void gen_gvec_sli(TCGContext *tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
/*
* Forward to the isar_feature_* tests given a DisasContext pointer.
*/

View file

@ -974,6 +974,44 @@ DO_RSRA(gvec_ursra_d, uint64_t)
#undef DO_RSRA
#define DO_SRI(NAME, TYPE) \
void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
{ \
intptr_t i, oprsz = simd_oprsz(desc); \
int shift = simd_data(desc); \
TYPE *d = vd, *n = vn; \
for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
d[i] = deposit64(d[i], 0, sizeof(TYPE) * 8 - shift, n[i] >> shift); \
} \
clear_tail(d, oprsz, simd_maxsz(desc)); \
}
DO_SRI(gvec_sri_b, uint8_t)
DO_SRI(gvec_sri_h, uint16_t)
DO_SRI(gvec_sri_s, uint32_t)
DO_SRI(gvec_sri_d, uint64_t)
#undef DO_SRI
#define DO_SLI(NAME, TYPE) \
void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
{ \
intptr_t i, oprsz = simd_oprsz(desc); \
int shift = simd_data(desc); \
TYPE *d = vd, *n = vn; \
for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
d[i] = deposit64(d[i], shift, sizeof(TYPE) * 8 - shift, n[i]); \
} \
clear_tail(d, oprsz, simd_maxsz(desc)); \
}
DO_SLI(gvec_sli_b, uint8_t)
DO_SLI(gvec_sli_h, uint16_t)
DO_SLI(gvec_sli_s, uint32_t)
DO_SLI(gvec_sli_d, uint64_t)
#undef DO_SLI
/*
* Convert float16 to float32, raising no exceptions and
* preserving exceptional values, including SNaN.