mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-23 05:45:36 +00:00
target/arm: Create gen_gvec_{u,s}{rshr,rsra}
Create vectorized versions of handle_shri_with_rndacc for shift+round and shift+round+accumulate. Add out-of-line helpers in preparation for longer vector lengths from SVE. Backports commit 6ccd48d4ea244c1c46a24dfa50bfb547f11422dd from qemu
This commit is contained in:
parent
5d7c46204d
commit
2609e6f319
|
@ -3424,6 +3424,10 @@
|
|||
#define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64
|
||||
#define gen_cmtst_i64 gen_cmtst_i64_aarch64
|
||||
#define gen_gvec_ssra gen_gvec_ssra_aarch64
|
||||
#define gen_gvec_srshr gen_gvec_srshr_aarch64
|
||||
#define gen_gvec_srsra gen_gvec_srsra_aarch64
|
||||
#define gen_gvec_ursra gen_gvec_ursra_aarch64
|
||||
#define gen_gvec_urshr gen_gvec_urshr_aarch64
|
||||
#define gen_gvec_usra gen_gvec_usra_aarch64
|
||||
#define get_phys_addr get_phys_addr_aarch64
|
||||
#define gen_sshl_i32 gen_sshl_i32_aarch64
|
||||
|
@ -3479,10 +3483,26 @@
|
|||
#define helper_gvec_rsqrts_d helper_gvec_rsqrts_d_aarch64
|
||||
#define helper_gvec_rsqrts_h helper_gvec_rsqrts_h_aarch64
|
||||
#define helper_gvec_rsqrts_s helper_gvec_rsqrts_s_aarch64
|
||||
#define helper_gvec_srshr_b helper_gvec_srshr_b_aarch64
|
||||
#define helper_gvec_srshr_d helper_gvec_srshr_d_aarch64
|
||||
#define helper_gvec_srshr_h helper_gvec_srshr_h_aarch64
|
||||
#define helper_gvec_srshr_s helper_gvec_srshr_s_aarch64
|
||||
#define helper_gvec_srsra_b helper_gvec_srsra_b_aarch64
|
||||
#define helper_gvec_srsra_d helper_gvec_srsra_d_aarch64
|
||||
#define helper_gvec_srsra_h helper_gvec_srsra_h_aarch64
|
||||
#define helper_gvec_srsra_s helper_gvec_srsra_s_aarch64
|
||||
#define helper_gvec_ssra_b helper_gvec_ssra_b_aarch64
|
||||
#define helper_gvec_ssra_d helper_gvec_ssra_d_aarch64
|
||||
#define helper_gvec_ssra_h helper_gvec_ssra_h_aarch64
|
||||
#define helper_gvec_ssra_s helper_gvec_ssra_s_aarch64
|
||||
#define helper_gvec_urshr_b helper_gvec_urshr_b_aarch64
|
||||
#define helper_gvec_urshr_d helper_gvec_urshr_d_aarch64
|
||||
#define helper_gvec_urshr_h helper_gvec_urshr_h_aarch64
|
||||
#define helper_gvec_urshr_s helper_gvec_urshr_s_aarch64
|
||||
#define helper_gvec_ursra_b helper_gvec_ursra_b_aarch64
|
||||
#define helper_gvec_ursra_d helper_gvec_ursra_d_aarch64
|
||||
#define helper_gvec_ursra_h helper_gvec_ursra_h_aarch64
|
||||
#define helper_gvec_ursra_s helper_gvec_ursra_s_aarch64
|
||||
#define helper_gvec_usra_b helper_gvec_usra_b_aarch64
|
||||
#define helper_gvec_usra_d helper_gvec_usra_d_aarch64
|
||||
#define helper_gvec_usra_h helper_gvec_usra_h_aarch64
|
||||
|
|
|
@ -3424,6 +3424,10 @@
|
|||
#define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64eb
|
||||
#define gen_cmtst_i64 gen_cmtst_i64_aarch64eb
|
||||
#define gen_gvec_ssra gen_gvec_ssra_aarch64eb
|
||||
#define gen_gvec_srshr gen_gvec_srshr_aarch64eb
|
||||
#define gen_gvec_srsra gen_gvec_srsra_aarch64eb
|
||||
#define gen_gvec_ursra gen_gvec_ursra_aarch64eb
|
||||
#define gen_gvec_urshr gen_gvec_urshr_aarch64eb
|
||||
#define gen_gvec_usra gen_gvec_usra_aarch64eb
|
||||
#define get_phys_addr get_phys_addr_aarch64eb
|
||||
#define gen_sshl_i32 gen_sshl_i32_aarch64eb
|
||||
|
@ -3479,10 +3483,26 @@
|
|||
#define helper_gvec_rsqrts_d helper_gvec_rsqrts_d_aarch64eb
|
||||
#define helper_gvec_rsqrts_h helper_gvec_rsqrts_h_aarch64eb
|
||||
#define helper_gvec_rsqrts_s helper_gvec_rsqrts_s_aarch64eb
|
||||
#define helper_gvec_srshr_b helper_gvec_srshr_b_aarch64eb
|
||||
#define helper_gvec_srshr_d helper_gvec_srshr_d_aarch64eb
|
||||
#define helper_gvec_srshr_h helper_gvec_srshr_h_aarch64eb
|
||||
#define helper_gvec_srshr_s helper_gvec_srshr_s_aarch64eb
|
||||
#define helper_gvec_srsra_b helper_gvec_srsra_b_aarch64eb
|
||||
#define helper_gvec_srsra_d helper_gvec_srsra_d_aarch64eb
|
||||
#define helper_gvec_srsra_h helper_gvec_srsra_h_aarch64eb
|
||||
#define helper_gvec_srsra_s helper_gvec_srsra_s_aarch64eb
|
||||
#define helper_gvec_ssra_b helper_gvec_ssra_b_aarch64eb
|
||||
#define helper_gvec_ssra_d helper_gvec_ssra_d_aarch64eb
|
||||
#define helper_gvec_ssra_h helper_gvec_ssra_h_aarch64eb
|
||||
#define helper_gvec_ssra_s helper_gvec_ssra_s_aarch64eb
|
||||
#define helper_gvec_urshr_b helper_gvec_urshr_b_aarch64eb
|
||||
#define helper_gvec_urshr_d helper_gvec_urshr_d_aarch64eb
|
||||
#define helper_gvec_urshr_h helper_gvec_urshr_h_aarch64eb
|
||||
#define helper_gvec_urshr_s helper_gvec_urshr_s_aarch64eb
|
||||
#define helper_gvec_ursra_b helper_gvec_ursra_b_aarch64eb
|
||||
#define helper_gvec_ursra_d helper_gvec_ursra_d_aarch64eb
|
||||
#define helper_gvec_ursra_h helper_gvec_ursra_h_aarch64eb
|
||||
#define helper_gvec_ursra_s helper_gvec_ursra_s_aarch64eb
|
||||
#define helper_gvec_usra_b helper_gvec_usra_b_aarch64eb
|
||||
#define helper_gvec_usra_d helper_gvec_usra_d_aarch64eb
|
||||
#define helper_gvec_usra_h helper_gvec_usra_h_aarch64eb
|
||||
|
|
20
qemu/arm.h
20
qemu/arm.h
|
@ -3409,6 +3409,10 @@
|
|||
#define fp_exception_el fp_exception_el_arm
|
||||
#define gen_cmtst_i64 gen_cmtst_i64_arm
|
||||
#define gen_gvec_ssra gen_gvec_ssra_arm
|
||||
#define gen_gvec_srshr gen_gvec_srshr_arm
|
||||
#define gen_gvec_srsra gen_gvec_srsra_arm
|
||||
#define gen_gvec_ursra gen_gvec_ursra_arm
|
||||
#define gen_gvec_urshr gen_gvec_urshr_arm
|
||||
#define gen_gvec_usra gen_gvec_usra_arm
|
||||
#define get_phys_addr get_phys_addr_arm
|
||||
#define gen_sshl_i32 gen_sshl_i32_arm
|
||||
|
@ -3416,10 +3420,26 @@
|
|||
#define gen_ushl_i32 gen_ushl_i32_arm
|
||||
#define gen_ushl_i64 gen_ushl_i64_arm
|
||||
#define helper_fjcvtzs helper_fjcvtzs_arm
|
||||
#define helper_gvec_srshr_b helper_gvec_srshr_b_arm
|
||||
#define helper_gvec_srshr_d helper_gvec_srshr_d_arm
|
||||
#define helper_gvec_srshr_h helper_gvec_srshr_h_arm
|
||||
#define helper_gvec_srshr_s helper_gvec_srshr_s_arm
|
||||
#define helper_gvec_srsra_b helper_gvec_srsra_b_arm
|
||||
#define helper_gvec_srsra_d helper_gvec_srsra_d_arm
|
||||
#define helper_gvec_srsra_h helper_gvec_srsra_h_arm
|
||||
#define helper_gvec_srsra_s helper_gvec_srsra_s_arm
|
||||
#define helper_gvec_ssra_b helper_gvec_ssra_b_arm
|
||||
#define helper_gvec_ssra_d helper_gvec_ssra_d_arm
|
||||
#define helper_gvec_ssra_h helper_gvec_ssra_h_arm
|
||||
#define helper_gvec_ssra_s helper_gvec_ssra_s_arm
|
||||
#define helper_gvec_urshr_b helper_gvec_urshr_b_arm
|
||||
#define helper_gvec_urshr_d helper_gvec_urshr_d_arm
|
||||
#define helper_gvec_urshr_h helper_gvec_urshr_h_arm
|
||||
#define helper_gvec_urshr_s helper_gvec_urshr_s_arm
|
||||
#define helper_gvec_ursra_b helper_gvec_ursra_b_arm
|
||||
#define helper_gvec_ursra_d helper_gvec_ursra_d_arm
|
||||
#define helper_gvec_ursra_h helper_gvec_ursra_h_arm
|
||||
#define helper_gvec_ursra_s helper_gvec_ursra_s_arm
|
||||
#define helper_gvec_usra_b helper_gvec_usra_b_arm
|
||||
#define helper_gvec_usra_d helper_gvec_usra_d_arm
|
||||
#define helper_gvec_usra_h helper_gvec_usra_h_arm
|
||||
|
|
20
qemu/armeb.h
20
qemu/armeb.h
|
@ -3409,6 +3409,10 @@
|
|||
#define fp_exception_el fp_exception_el_armeb
|
||||
#define gen_cmtst_i64 gen_cmtst_i64_armeb
|
||||
#define gen_gvec_ssra gen_gvec_ssra_armeb
|
||||
#define gen_gvec_srshr gen_gvec_srshr_armeb
|
||||
#define gen_gvec_srsra gen_gvec_srsra_armeb
|
||||
#define gen_gvec_ursra gen_gvec_ursra_armeb
|
||||
#define gen_gvec_urshr gen_gvec_urshr_armeb
|
||||
#define gen_gvec_usra gen_gvec_usra_armeb
|
||||
#define get_phys_addr get_phys_addr_armeb
|
||||
#define gen_sshl_i32 gen_sshl_i32_armeb
|
||||
|
@ -3416,10 +3420,26 @@
|
|||
#define gen_ushl_i32 gen_ushl_i32_armeb
|
||||
#define gen_ushl_i64 gen_ushl_i64_armeb
|
||||
#define helper_fjcvtzs helper_fjcvtzs_armeb
|
||||
#define helper_gvec_srshr_b helper_gvec_srshr_b_armeb
|
||||
#define helper_gvec_srshr_d helper_gvec_srshr_d_armeb
|
||||
#define helper_gvec_srshr_h helper_gvec_srshr_h_armeb
|
||||
#define helper_gvec_srshr_s helper_gvec_srshr_s_armeb
|
||||
#define helper_gvec_srsra_b helper_gvec_srsra_b_armeb
|
||||
#define helper_gvec_srsra_d helper_gvec_srsra_d_armeb
|
||||
#define helper_gvec_srsra_h helper_gvec_srsra_h_armeb
|
||||
#define helper_gvec_srsra_s helper_gvec_srsra_s_armeb
|
||||
#define helper_gvec_ssra_b helper_gvec_ssra_b_armeb
|
||||
#define helper_gvec_ssra_d helper_gvec_ssra_d_armeb
|
||||
#define helper_gvec_ssra_h helper_gvec_ssra_h_armeb
|
||||
#define helper_gvec_ssra_s helper_gvec_ssra_s_armeb
|
||||
#define helper_gvec_urshr_b helper_gvec_urshr_b_armeb
|
||||
#define helper_gvec_urshr_d helper_gvec_urshr_d_armeb
|
||||
#define helper_gvec_urshr_h helper_gvec_urshr_h_armeb
|
||||
#define helper_gvec_urshr_s helper_gvec_urshr_s_armeb
|
||||
#define helper_gvec_ursra_b helper_gvec_ursra_b_armeb
|
||||
#define helper_gvec_ursra_d helper_gvec_ursra_d_armeb
|
||||
#define helper_gvec_ursra_h helper_gvec_ursra_h_armeb
|
||||
#define helper_gvec_ursra_s helper_gvec_ursra_s_armeb
|
||||
#define helper_gvec_usra_b helper_gvec_usra_b_armeb
|
||||
#define helper_gvec_usra_d helper_gvec_usra_d_armeb
|
||||
#define helper_gvec_usra_h helper_gvec_usra_h_armeb
|
||||
|
|
|
@ -3418,6 +3418,10 @@ arm_symbols = (
|
|||
'fp_exception_el',
|
||||
'gen_cmtst_i64',
|
||||
'gen_gvec_ssra',
|
||||
'gen_gvec_srshr',
|
||||
'gen_gvec_srsra',
|
||||
'gen_gvec_ursra',
|
||||
'gen_gvec_urshr',
|
||||
'gen_gvec_usra',
|
||||
'get_phys_addr',
|
||||
'gen_sshl_i32',
|
||||
|
@ -3425,10 +3429,26 @@ arm_symbols = (
|
|||
'gen_ushl_i32',
|
||||
'gen_ushl_i64',
|
||||
'helper_fjcvtzs',
|
||||
'helper_gvec_srshr_b',
|
||||
'helper_gvec_srshr_d',
|
||||
'helper_gvec_srshr_h',
|
||||
'helper_gvec_srshr_s',
|
||||
'helper_gvec_srsra_b',
|
||||
'helper_gvec_srsra_d',
|
||||
'helper_gvec_srsra_h',
|
||||
'helper_gvec_srsra_s',
|
||||
'helper_gvec_ssra_b',
|
||||
'helper_gvec_ssra_d',
|
||||
'helper_gvec_ssra_h',
|
||||
'helper_gvec_ssra_s',
|
||||
'helper_gvec_urshr_b',
|
||||
'helper_gvec_urshr_d',
|
||||
'helper_gvec_urshr_h',
|
||||
'helper_gvec_urshr_s',
|
||||
'helper_gvec_ursra_b',
|
||||
'helper_gvec_ursra_d',
|
||||
'helper_gvec_ursra_h',
|
||||
'helper_gvec_ursra_s',
|
||||
'helper_gvec_usra_b',
|
||||
'helper_gvec_usra_d',
|
||||
'helper_gvec_usra_h',
|
||||
|
@ -3503,6 +3523,10 @@ aarch64_symbols = (
|
|||
'gen_a64_set_pc_im',
|
||||
'gen_cmtst_i64',
|
||||
'gen_gvec_ssra',
|
||||
'gen_gvec_srshr',
|
||||
'gen_gvec_srsra',
|
||||
'gen_gvec_ursra',
|
||||
'gen_gvec_urshr',
|
||||
'gen_gvec_usra',
|
||||
'get_phys_addr',
|
||||
'gen_sshl_i32',
|
||||
|
@ -3558,10 +3582,26 @@ aarch64_symbols = (
|
|||
'helper_gvec_rsqrts_d',
|
||||
'helper_gvec_rsqrts_h',
|
||||
'helper_gvec_rsqrts_s',
|
||||
'helper_gvec_srshr_b',
|
||||
'helper_gvec_srshr_d',
|
||||
'helper_gvec_srshr_h',
|
||||
'helper_gvec_srshr_s',
|
||||
'helper_gvec_srsra_b',
|
||||
'helper_gvec_srsra_d',
|
||||
'helper_gvec_srsra_h',
|
||||
'helper_gvec_srsra_s',
|
||||
'helper_gvec_ssra_b',
|
||||
'helper_gvec_ssra_d',
|
||||
'helper_gvec_ssra_h',
|
||||
'helper_gvec_ssra_s',
|
||||
'helper_gvec_urshr_b',
|
||||
'helper_gvec_urshr_d',
|
||||
'helper_gvec_urshr_h',
|
||||
'helper_gvec_urshr_s',
|
||||
'helper_gvec_ursra_b',
|
||||
'helper_gvec_ursra_d',
|
||||
'helper_gvec_ursra_h',
|
||||
'helper_gvec_ursra_s',
|
||||
'helper_gvec_usra_b',
|
||||
'helper_gvec_usra_d',
|
||||
'helper_gvec_usra_h',
|
||||
|
|
|
@ -697,6 +697,26 @@ DEF_HELPER_FLAGS_3(gvec_usra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
|||
DEF_HELPER_FLAGS_3(gvec_usra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_usra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_3(gvec_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_3(gvec_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_3(gvec_srsra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_srsra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_srsra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_srsra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_3(gvec_ursra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_ursra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_ursra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_ursra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
|
||||
#ifdef TARGET_ARM
|
||||
#define helper_clz helper_clz_arm
|
||||
#define gen_helper_clz gen_helper_clz_arm
|
||||
|
|
|
@ -10507,10 +10507,15 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
|
|||
return;
|
||||
|
||||
case 0x04: /* SRSHR / URSHR (rounding) */
|
||||
break;
|
||||
gen_gvec_fn2i(s, is_q, rd, rn, shift,
|
||||
is_u ? gen_gvec_urshr : gen_gvec_srshr, size);
|
||||
return;
|
||||
|
||||
case 0x06: /* SRSRA / URSRA (accum + rounding) */
|
||||
accumulate = true;
|
||||
break;
|
||||
gen_gvec_fn2i(s, is_q, rd, rn, shift,
|
||||
is_u ? gen_gvec_ursra : gen_gvec_srsra, size);
|
||||
return;
|
||||
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
|
|
@ -4128,6 +4128,422 @@ void gen_gvec_usra(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_of
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Shift one less than the requested amount, and the low bit is
|
||||
* the rounding bit. For the 8 and 16-bit operations, because we
|
||||
* mask the low bit, we can perform a normal integer shift instead
|
||||
* of a vector shift.
|
||||
*/
|
||||
static void gen_srshr8_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
{
|
||||
TCGv_i64 t = tcg_temp_new_i64(s);
|
||||
|
||||
tcg_gen_shri_i64(s, t, a, sh - 1);
|
||||
tcg_gen_andi_i64(s, t, t, dup_const(MO_8, 1));
|
||||
tcg_gen_vec_sar8i_i64(s, d, a, sh);
|
||||
tcg_gen_vec_add8_i64(s, d, d, t);
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
|
||||
static void gen_srshr16_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
{
|
||||
TCGv_i64 t = tcg_temp_new_i64(s);
|
||||
|
||||
tcg_gen_shri_i64(s, t, a, sh - 1);
|
||||
tcg_gen_andi_i64(s, t, t, dup_const(MO_16, 1));
|
||||
tcg_gen_vec_sar16i_i64(s, d, a, sh);
|
||||
tcg_gen_vec_add16_i64(s, d, d, t);
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
|
||||
static void gen_srshr32_i32(TCGContext *s, TCGv_i32 d, TCGv_i32 a, int32_t sh)
|
||||
{
|
||||
TCGv_i32 t = tcg_temp_new_i32(s);
|
||||
|
||||
tcg_gen_extract_i32(s, t, a, sh - 1, 1);
|
||||
tcg_gen_sari_i32(s, d, a, sh);
|
||||
tcg_gen_add_i32(s, d, d, t);
|
||||
tcg_temp_free_i32(s, t);
|
||||
}
|
||||
|
||||
static void gen_srshr64_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
{
|
||||
TCGv_i64 t = tcg_temp_new_i64(s);
|
||||
|
||||
tcg_gen_extract_i64(s, t, a, sh - 1, 1);
|
||||
tcg_gen_sari_i64(s, d, a, sh);
|
||||
tcg_gen_add_i64(s, d, d, t);
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
|
||||
static void gen_srshr_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
|
||||
{
|
||||
TCGv_vec t = tcg_temp_new_vec_matching(s, d);
|
||||
TCGv_vec ones = tcg_temp_new_vec_matching(s, d);
|
||||
|
||||
tcg_gen_shri_vec(s, vece, t, a, sh - 1);
|
||||
tcg_gen_dupi_vec(s, vece, ones, 1);
|
||||
tcg_gen_and_vec(s, vece, t, t, ones);
|
||||
tcg_gen_sari_vec(s, vece, d, a, sh);
|
||||
tcg_gen_add_vec(s, vece, d, d, t);
|
||||
|
||||
tcg_temp_free_vec(s, t);
|
||||
tcg_temp_free_vec(s, ones);
|
||||
}
|
||||
|
||||
void gen_gvec_srshr(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
||||
{
|
||||
static const TCGOpcode vecop_list[] = {
|
||||
INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
|
||||
};
|
||||
static const GVecGen2i ops[4] = {
|
||||
{ .fni8 = gen_srshr8_i64,
|
||||
.fniv = gen_srshr_vec,
|
||||
.fno = gen_helper_gvec_srshr_b,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_8 },
|
||||
{ .fni8 = gen_srshr16_i64,
|
||||
.fniv = gen_srshr_vec,
|
||||
.fno = gen_helper_gvec_srshr_h,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_16 },
|
||||
{ .fni4 = gen_srshr32_i32,
|
||||
.fniv = gen_srshr_vec,
|
||||
.fno = gen_helper_gvec_srshr_s,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_32 },
|
||||
{ .fni8 = gen_srshr64_i64,
|
||||
.fniv = gen_srshr_vec,
|
||||
.fno = gen_helper_gvec_srshr_d,
|
||||
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_64 },
|
||||
};
|
||||
|
||||
/* tszimm encoding produces immediates in the range [1..esize] */
|
||||
tcg_debug_assert(shift > 0);
|
||||
tcg_debug_assert(shift <= (8 << vece));
|
||||
|
||||
if (shift == (8 << vece)) {
|
||||
/*
|
||||
* Shifts larger than the element size are architecturally valid.
|
||||
* Signed results in all sign bits. With rounding, this produces
|
||||
* (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
|
||||
* I.e. always zero.
|
||||
*/
|
||||
tcg_gen_gvec_dup_imm(s, vece, rd_ofs, opr_sz, max_sz, 0);
|
||||
} else {
|
||||
tcg_gen_gvec_2i(s, rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_srsra8_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
{
|
||||
TCGv_i64 t = tcg_temp_new_i64(s);
|
||||
|
||||
gen_srshr8_i64(s, t, a, sh);
|
||||
tcg_gen_vec_add8_i64(s, d, d, t);
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
|
||||
static void gen_srsra16_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
{
|
||||
TCGv_i64 t = tcg_temp_new_i64(s);
|
||||
|
||||
gen_srshr16_i64(s, t, a, sh);
|
||||
tcg_gen_vec_add16_i64(s, d, d, t);
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
|
||||
static void gen_srsra32_i32(TCGContext *s, TCGv_i32 d, TCGv_i32 a, int32_t sh)
|
||||
{
|
||||
TCGv_i32 t = tcg_temp_new_i32(s);
|
||||
|
||||
gen_srshr32_i32(s, t, a, sh);
|
||||
tcg_gen_add_i32(s, d, d, t);
|
||||
tcg_temp_free_i32(s, t);
|
||||
}
|
||||
|
||||
static void gen_srsra64_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
{
|
||||
TCGv_i64 t = tcg_temp_new_i64(s);
|
||||
|
||||
gen_srshr64_i64(s, t, a, sh);
|
||||
tcg_gen_add_i64(s, d, d, t);
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
|
||||
static void gen_srsra_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
|
||||
{
|
||||
TCGv_vec t = tcg_temp_new_vec_matching(s, d);
|
||||
|
||||
gen_srshr_vec(s, vece, t, a, sh);
|
||||
tcg_gen_add_vec(s, vece, d, d, t);
|
||||
tcg_temp_free_vec(s, t);
|
||||
}
|
||||
|
||||
void gen_gvec_srsra(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
||||
{
|
||||
static const TCGOpcode vecop_list[] = {
|
||||
INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
|
||||
};
|
||||
static const GVecGen2i ops[4] = {
|
||||
{ .fni8 = gen_srsra8_i64,
|
||||
.fniv = gen_srsra_vec,
|
||||
.fno = gen_helper_gvec_srsra_b,
|
||||
.opt_opc = vecop_list,
|
||||
.load_dest = true,
|
||||
.vece = MO_8 },
|
||||
{ .fni8 = gen_srsra16_i64,
|
||||
.fniv = gen_srsra_vec,
|
||||
.fno = gen_helper_gvec_srsra_h,
|
||||
.opt_opc = vecop_list,
|
||||
.load_dest = true,
|
||||
.vece = MO_16 },
|
||||
{ .fni4 = gen_srsra32_i32,
|
||||
.fniv = gen_srsra_vec,
|
||||
.fno = gen_helper_gvec_srsra_s,
|
||||
.opt_opc = vecop_list,
|
||||
.load_dest = true,
|
||||
.vece = MO_32 },
|
||||
{ .fni8 = gen_srsra64_i64,
|
||||
.fniv = gen_srsra_vec,
|
||||
.fno = gen_helper_gvec_srsra_d,
|
||||
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
||||
.opt_opc = vecop_list,
|
||||
.load_dest = true,
|
||||
.vece = MO_64 },
|
||||
};
|
||||
|
||||
/* tszimm encoding produces immediates in the range [1..esize] */
|
||||
tcg_debug_assert(shift > 0);
|
||||
tcg_debug_assert(shift <= (8 << vece));
|
||||
|
||||
/*
|
||||
* Shifts larger than the element size are architecturally valid.
|
||||
* Signed results in all sign bits. With rounding, this produces
|
||||
* (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
|
||||
* I.e. always zero. With accumulation, this leaves D unchanged.
|
||||
*/
|
||||
if (shift == (8 << vece)) {
|
||||
/* Nop, but we do need to clear the tail. */
|
||||
tcg_gen_gvec_mov(s, vece, rd_ofs, rd_ofs, opr_sz, max_sz);
|
||||
} else {
|
||||
tcg_gen_gvec_2i(s, rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_urshr8_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
{
|
||||
TCGv_i64 t = tcg_temp_new_i64(s);
|
||||
|
||||
tcg_gen_shri_i64(s, t, a, sh - 1);
|
||||
tcg_gen_andi_i64(s, t, t, dup_const(MO_8, 1));
|
||||
tcg_gen_vec_shr8i_i64(s, d, a, sh);
|
||||
tcg_gen_vec_add8_i64(s, d, d, t);
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
|
||||
static void gen_urshr16_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
{
|
||||
TCGv_i64 t = tcg_temp_new_i64(s);
|
||||
|
||||
tcg_gen_shri_i64(s, t, a, sh - 1);
|
||||
tcg_gen_andi_i64(s, t, t, dup_const(MO_16, 1));
|
||||
tcg_gen_vec_shr16i_i64(s, d, a, sh);
|
||||
tcg_gen_vec_add16_i64(s, d, d, t);
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
|
||||
static void gen_urshr32_i32(TCGContext *s, TCGv_i32 d, TCGv_i32 a, int32_t sh)
|
||||
{
|
||||
TCGv_i32 t = tcg_temp_new_i32(s);
|
||||
|
||||
tcg_gen_extract_i32(s, t, a, sh - 1, 1);
|
||||
tcg_gen_shri_i32(s, d, a, sh);
|
||||
tcg_gen_add_i32(s, d, d, t);
|
||||
tcg_temp_free_i32(s, t);
|
||||
}
|
||||
|
||||
static void gen_urshr64_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
{
|
||||
TCGv_i64 t = tcg_temp_new_i64(s);
|
||||
|
||||
tcg_gen_extract_i64(s, t, a, sh - 1, 1);
|
||||
tcg_gen_shri_i64(s, d, a, sh);
|
||||
tcg_gen_add_i64(s, d, d, t);
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
|
||||
static void gen_urshr_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift)
|
||||
{
|
||||
TCGv_vec t = tcg_temp_new_vec_matching(s, d);
|
||||
TCGv_vec ones = tcg_temp_new_vec_matching(s, d);
|
||||
|
||||
tcg_gen_shri_vec(s, vece, t, a, shift - 1);
|
||||
tcg_gen_dupi_vec(s, vece, ones, 1);
|
||||
tcg_gen_and_vec(s, vece, t, t, ones);
|
||||
tcg_gen_shri_vec(s, vece, d, a, shift);
|
||||
tcg_gen_add_vec(s, vece, d, d, t);
|
||||
|
||||
tcg_temp_free_vec(s, t);
|
||||
tcg_temp_free_vec(s, ones);
|
||||
}
|
||||
|
||||
void gen_gvec_urshr(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
||||
{
|
||||
static const TCGOpcode vecop_list[] = {
|
||||
INDEX_op_shri_vec, INDEX_op_add_vec, 0
|
||||
};
|
||||
static const GVecGen2i ops[4] = {
|
||||
{ .fni8 = gen_urshr8_i64,
|
||||
.fniv = gen_urshr_vec,
|
||||
.fno = gen_helper_gvec_urshr_b,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_8 },
|
||||
{ .fni8 = gen_urshr16_i64,
|
||||
.fniv = gen_urshr_vec,
|
||||
.fno = gen_helper_gvec_urshr_h,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_16 },
|
||||
{ .fni4 = gen_urshr32_i32,
|
||||
.fniv = gen_urshr_vec,
|
||||
.fno = gen_helper_gvec_urshr_s,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_32 },
|
||||
{ .fni8 = gen_urshr64_i64,
|
||||
.fniv = gen_urshr_vec,
|
||||
.fno = gen_helper_gvec_urshr_d,
|
||||
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_64 },
|
||||
};
|
||||
|
||||
/* tszimm encoding produces immediates in the range [1..esize] */
|
||||
tcg_debug_assert(shift > 0);
|
||||
tcg_debug_assert(shift <= (8 << vece));
|
||||
|
||||
if (shift == (8 << vece)) {
|
||||
/*
|
||||
* Shifts larger than the element size are architecturally valid.
|
||||
* Unsigned results in zero. With rounding, this produces a
|
||||
* copy of the most significant bit.
|
||||
*/
|
||||
tcg_gen_gvec_shri(s, vece, rd_ofs, rm_ofs, shift - 1, opr_sz, max_sz);
|
||||
} else {
|
||||
tcg_gen_gvec_2i(s, rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_ursra8_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
{
|
||||
TCGv_i64 t = tcg_temp_new_i64(s);
|
||||
|
||||
if (sh == 8) {
|
||||
tcg_gen_vec_shr8i_i64(s, t, a, 7);
|
||||
} else {
|
||||
gen_urshr8_i64(s, t, a, sh);
|
||||
}
|
||||
tcg_gen_vec_add8_i64(s, d, d, t);
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
|
||||
static void gen_ursra16_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
{
|
||||
TCGv_i64 t = tcg_temp_new_i64(s);
|
||||
|
||||
if (sh == 16) {
|
||||
tcg_gen_vec_shr16i_i64(s, t, a, 15);
|
||||
} else {
|
||||
gen_urshr16_i64(s, t, a, sh);
|
||||
}
|
||||
tcg_gen_vec_add16_i64(s, d, d, t);
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
|
||||
static void gen_ursra32_i32(TCGContext *s, TCGv_i32 d, TCGv_i32 a, int32_t sh)
|
||||
{
|
||||
TCGv_i32 t = tcg_temp_new_i32(s);
|
||||
|
||||
if (sh == 32) {
|
||||
tcg_gen_shri_i32(s, t, a, 31);
|
||||
} else {
|
||||
gen_urshr32_i32(s, t, a, sh);
|
||||
}
|
||||
tcg_gen_add_i32(s, d, d, t);
|
||||
tcg_temp_free_i32(s, t);
|
||||
}
|
||||
|
||||
static void gen_ursra64_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
||||
{
|
||||
TCGv_i64 t = tcg_temp_new_i64(s);
|
||||
|
||||
if (sh == 64) {
|
||||
tcg_gen_shri_i64(s, t, a, 63);
|
||||
} else {
|
||||
gen_urshr64_i64(s, t, a, sh);
|
||||
}
|
||||
tcg_gen_add_i64(s, d, d, t);
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
|
||||
static void gen_ursra_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
|
||||
{
|
||||
TCGv_vec t = tcg_temp_new_vec_matching(s, d);
|
||||
|
||||
if (sh == (8 << vece)) {
|
||||
tcg_gen_shri_vec(s, vece, t, a, sh - 1);
|
||||
} else {
|
||||
gen_urshr_vec(s, vece, t, a, sh);
|
||||
}
|
||||
tcg_gen_add_vec(s, vece, d, d, t);
|
||||
tcg_temp_free_vec(s, t);
|
||||
}
|
||||
|
||||
void gen_gvec_ursra(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
||||
{
|
||||
static const TCGOpcode vecop_list[] = {
|
||||
INDEX_op_shri_vec, INDEX_op_add_vec, 0
|
||||
};
|
||||
static const GVecGen2i ops[4] = {
|
||||
{ .fni8 = gen_ursra8_i64,
|
||||
.fniv = gen_ursra_vec,
|
||||
.fno = gen_helper_gvec_ursra_b,
|
||||
.opt_opc = vecop_list,
|
||||
.load_dest = true,
|
||||
.vece = MO_8 },
|
||||
{ .fni8 = gen_ursra16_i64,
|
||||
.fniv = gen_ursra_vec,
|
||||
.fno = gen_helper_gvec_ursra_h,
|
||||
.opt_opc = vecop_list,
|
||||
.load_dest = true,
|
||||
.vece = MO_16 },
|
||||
{ .fni4 = gen_ursra32_i32,
|
||||
.fniv = gen_ursra_vec,
|
||||
.fno = gen_helper_gvec_ursra_s,
|
||||
.opt_opc = vecop_list,
|
||||
.load_dest = true,
|
||||
.vece = MO_32 },
|
||||
{ .fni8 = gen_ursra64_i64,
|
||||
.fniv = gen_ursra_vec,
|
||||
.fno = gen_helper_gvec_ursra_d,
|
||||
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
||||
.opt_opc = vecop_list,
|
||||
.load_dest = true,
|
||||
.vece = MO_64 },
|
||||
};
|
||||
|
||||
/* tszimm encoding produces immediates in the range [1..esize] */
|
||||
tcg_debug_assert(shift > 0);
|
||||
tcg_debug_assert(shift <= (8 << vece));
|
||||
|
||||
tcg_gen_gvec_2i(s, rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
||||
}
|
||||
|
||||
static void gen_shr8_ins_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
||||
{
|
||||
uint64_t mask = dup_const(MO_8, 0xff >> shift);
|
||||
|
|
|
@ -309,6 +309,15 @@ void gen_gvec_ssra(TCGContext* tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t
|
|||
void gen_gvec_usra(TCGContext *tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
||||
|
||||
void gen_gvec_srshr(TCGContext *tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
||||
void gen_gvec_urshr(TCGContext *tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
||||
void gen_gvec_srsra(TCGContext *tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
||||
void gen_gvec_ursra(TCGContext *tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
||||
|
||||
/*
|
||||
* Forward to the isar_feature_* tests given a DisasContext pointer.
|
||||
*/
|
||||
|
|
|
@ -924,6 +924,56 @@ DO_SRA(gvec_usra_d, uint64_t)
|
|||
|
||||
#undef DO_SRA
|
||||
|
||||
#define DO_RSHR(NAME, TYPE) \
|
||||
void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
|
||||
{ \
|
||||
intptr_t i, oprsz = simd_oprsz(desc); \
|
||||
int shift = simd_data(desc); \
|
||||
TYPE *d = vd, *n = vn; \
|
||||
for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
|
||||
TYPE tmp = n[i] >> (shift - 1); \
|
||||
d[i] = (tmp >> 1) + (tmp & 1); \
|
||||
} \
|
||||
clear_tail(d, oprsz, simd_maxsz(desc)); \
|
||||
}
|
||||
|
||||
DO_RSHR(gvec_srshr_b, int8_t)
|
||||
DO_RSHR(gvec_srshr_h, int16_t)
|
||||
DO_RSHR(gvec_srshr_s, int32_t)
|
||||
DO_RSHR(gvec_srshr_d, int64_t)
|
||||
|
||||
DO_RSHR(gvec_urshr_b, uint8_t)
|
||||
DO_RSHR(gvec_urshr_h, uint16_t)
|
||||
DO_RSHR(gvec_urshr_s, uint32_t)
|
||||
DO_RSHR(gvec_urshr_d, uint64_t)
|
||||
|
||||
#undef DO_RSHR
|
||||
|
||||
#define DO_RSRA(NAME, TYPE) \
|
||||
void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
|
||||
{ \
|
||||
intptr_t i, oprsz = simd_oprsz(desc); \
|
||||
int shift = simd_data(desc); \
|
||||
TYPE *d = vd, *n = vn; \
|
||||
for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
|
||||
TYPE tmp = n[i] >> (shift - 1); \
|
||||
d[i] += (tmp >> 1) + (tmp & 1); \
|
||||
} \
|
||||
clear_tail(d, oprsz, simd_maxsz(desc)); \
|
||||
}
|
||||
|
||||
DO_RSRA(gvec_srsra_b, int8_t)
|
||||
DO_RSRA(gvec_srsra_h, int16_t)
|
||||
DO_RSRA(gvec_srsra_s, int32_t)
|
||||
DO_RSRA(gvec_srsra_d, int64_t)
|
||||
|
||||
DO_RSRA(gvec_ursra_b, uint8_t)
|
||||
DO_RSRA(gvec_ursra_h, uint16_t)
|
||||
DO_RSRA(gvec_ursra_s, uint32_t)
|
||||
DO_RSRA(gvec_ursra_d, uint64_t)
|
||||
|
||||
#undef DO_RSRA
|
||||
|
||||
/*
|
||||
* Convert float16 to float32, raising no exceptions and
|
||||
* preserving exceptional values, including SNaN.
|
||||
|
|
Loading…
Reference in a new issue