mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-03 19:35:26 +00:00
target/arm: Create gen_gvec_[us]sra
The functions eliminate duplication of the special cases for this operation. They match up with the GVecGen2iFn typedef. Add out-of-line helpers. We got away with only having inline expanders because the neon vector size is only 16 bytes, and we know that the inline expansion will always succeed. When we reuse this for SVE, tcg-gvec-op may decide to use an out-of-line helper due to longer vector lengths. Backports commit 631e565450c483e0622eec3d8b61d7fa41d16bca from qemu
This commit is contained in:
parent
4be4ca57b1
commit
5d7c46204d
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_aarch64
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_aarch64
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_aarch64
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_aarch64
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_aarch64
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_aarch64
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_aarch64
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_aarch64
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_aarch64
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_aarch64
|
||||
|
@ -3427,6 +3423,8 @@
|
|||
#define fp_exception_el fp_exception_el_aarch64
|
||||
#define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64
|
||||
#define gen_cmtst_i64 gen_cmtst_i64_aarch64
|
||||
#define gen_gvec_ssra gen_gvec_ssra_aarch64
|
||||
#define gen_gvec_usra gen_gvec_usra_aarch64
|
||||
#define get_phys_addr get_phys_addr_aarch64
|
||||
#define gen_sshl_i32 gen_sshl_i32_aarch64
|
||||
#define gen_sshl_i64 gen_sshl_i64_aarch64
|
||||
|
@ -3481,6 +3479,14 @@
|
|||
#define helper_gvec_rsqrts_d helper_gvec_rsqrts_d_aarch64
|
||||
#define helper_gvec_rsqrts_h helper_gvec_rsqrts_h_aarch64
|
||||
#define helper_gvec_rsqrts_s helper_gvec_rsqrts_s_aarch64
|
||||
#define helper_gvec_ssra_b helper_gvec_ssra_b_aarch64
|
||||
#define helper_gvec_ssra_d helper_gvec_ssra_d_aarch64
|
||||
#define helper_gvec_ssra_h helper_gvec_ssra_h_aarch64
|
||||
#define helper_gvec_ssra_s helper_gvec_ssra_s_aarch64
|
||||
#define helper_gvec_usra_b helper_gvec_usra_b_aarch64
|
||||
#define helper_gvec_usra_d helper_gvec_usra_d_aarch64
|
||||
#define helper_gvec_usra_h helper_gvec_usra_h_aarch64
|
||||
#define helper_gvec_usra_s helper_gvec_usra_s_aarch64
|
||||
#define helper_msr_i_daifclear helper_msr_i_daifclear_aarch64
|
||||
#define helper_msr_i_daifset helper_msr_i_daifset_aarch64
|
||||
#define helper_msr_i_spsel helper_msr_i_spsel_aarch64
|
||||
|
@ -4434,7 +4440,6 @@
|
|||
#define sqadd_op sqadd_op_aarch64
|
||||
#define sqsub_op sqsub_op_aarch64
|
||||
#define sshl_op sshl_op_aarch64
|
||||
#define ssra_op ssra_op_aarch64
|
||||
#define sri_op sri_op_aarch64
|
||||
#define sve_access_check sve_access_check_aarch64
|
||||
#define sve_exception_el sve_exception_el_aarch64
|
||||
|
@ -4443,7 +4448,6 @@
|
|||
#define uqadd_op uqadd_op_aarch64
|
||||
#define uqsub_op uqsub_op_aarch64
|
||||
#define ushl_op ushl_op_aarch64
|
||||
#define usra_op usra_op_aarch64
|
||||
#define v8m_security_lookup v8m_security_lookup_aarch64
|
||||
#define vfp_expand_imm vfp_expand_imm_aarch64
|
||||
#define write_fp_dreg write_fp_dreg_aarch64
|
||||
|
|
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_aarch64eb
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_aarch64eb
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_aarch64eb
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_aarch64eb
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_aarch64eb
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_aarch64eb
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_aarch64eb
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_aarch64eb
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_aarch64eb
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_aarch64eb
|
||||
|
@ -3427,6 +3423,8 @@
|
|||
#define fp_exception_el fp_exception_el_aarch64eb
|
||||
#define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64eb
|
||||
#define gen_cmtst_i64 gen_cmtst_i64_aarch64eb
|
||||
#define gen_gvec_ssra gen_gvec_ssra_aarch64eb
|
||||
#define gen_gvec_usra gen_gvec_usra_aarch64eb
|
||||
#define get_phys_addr get_phys_addr_aarch64eb
|
||||
#define gen_sshl_i32 gen_sshl_i32_aarch64eb
|
||||
#define gen_sshl_i64 gen_sshl_i64_aarch64eb
|
||||
|
@ -3481,6 +3479,14 @@
|
|||
#define helper_gvec_rsqrts_d helper_gvec_rsqrts_d_aarch64eb
|
||||
#define helper_gvec_rsqrts_h helper_gvec_rsqrts_h_aarch64eb
|
||||
#define helper_gvec_rsqrts_s helper_gvec_rsqrts_s_aarch64eb
|
||||
#define helper_gvec_ssra_b helper_gvec_ssra_b_aarch64eb
|
||||
#define helper_gvec_ssra_d helper_gvec_ssra_d_aarch64eb
|
||||
#define helper_gvec_ssra_h helper_gvec_ssra_h_aarch64eb
|
||||
#define helper_gvec_ssra_s helper_gvec_ssra_s_aarch64eb
|
||||
#define helper_gvec_usra_b helper_gvec_usra_b_aarch64eb
|
||||
#define helper_gvec_usra_d helper_gvec_usra_d_aarch64eb
|
||||
#define helper_gvec_usra_h helper_gvec_usra_h_aarch64eb
|
||||
#define helper_gvec_usra_s helper_gvec_usra_s_aarch64eb
|
||||
#define helper_msr_i_daifclear helper_msr_i_daifclear_aarch64eb
|
||||
#define helper_msr_i_daifset helper_msr_i_daifset_aarch64eb
|
||||
#define helper_msr_i_spsel helper_msr_i_spsel_aarch64eb
|
||||
|
@ -4434,7 +4440,6 @@
|
|||
#define sqadd_op sqadd_op_aarch64eb
|
||||
#define sqsub_op sqsub_op_aarch64eb
|
||||
#define sshl_op sshl_op_aarch64eb
|
||||
#define ssra_op ssra_op_aarch64eb
|
||||
#define sri_op sri_op_aarch64eb
|
||||
#define sve_access_check sve_access_check_aarch64eb
|
||||
#define sve_exception_el sve_exception_el_aarch64eb
|
||||
|
@ -4443,7 +4448,6 @@
|
|||
#define uqadd_op uqadd_op_aarch64eb
|
||||
#define uqsub_op uqsub_op_aarch64eb
|
||||
#define ushl_op ushl_op_aarch64eb
|
||||
#define usra_op usra_op_aarch64eb
|
||||
#define v8m_security_lookup v8m_security_lookup_aarch64eb
|
||||
#define vfp_expand_imm vfp_expand_imm_aarch64eb
|
||||
#define write_fp_dreg write_fp_dreg_aarch64eb
|
||||
|
|
16
qemu/arm.h
16
qemu/arm.h
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_arm
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_arm
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_arm
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_arm
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_arm
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_arm
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_arm
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_arm
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_arm
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_arm
|
||||
|
@ -3412,12 +3408,22 @@
|
|||
#define cpu_mmu_index cpu_mmu_index_arm
|
||||
#define fp_exception_el fp_exception_el_arm
|
||||
#define gen_cmtst_i64 gen_cmtst_i64_arm
|
||||
#define gen_gvec_ssra gen_gvec_ssra_arm
|
||||
#define gen_gvec_usra gen_gvec_usra_arm
|
||||
#define get_phys_addr get_phys_addr_arm
|
||||
#define gen_sshl_i32 gen_sshl_i32_arm
|
||||
#define gen_sshl_i64 gen_sshl_i64_arm
|
||||
#define gen_ushl_i32 gen_ushl_i32_arm
|
||||
#define gen_ushl_i64 gen_ushl_i64_arm
|
||||
#define helper_fjcvtzs helper_fjcvtzs_arm
|
||||
#define helper_gvec_ssra_b helper_gvec_ssra_b_arm
|
||||
#define helper_gvec_ssra_d helper_gvec_ssra_d_arm
|
||||
#define helper_gvec_ssra_h helper_gvec_ssra_h_arm
|
||||
#define helper_gvec_ssra_s helper_gvec_ssra_s_arm
|
||||
#define helper_gvec_usra_b helper_gvec_usra_b_arm
|
||||
#define helper_gvec_usra_d helper_gvec_usra_d_arm
|
||||
#define helper_gvec_usra_h helper_gvec_usra_h_arm
|
||||
#define helper_gvec_usra_s helper_gvec_usra_s_arm
|
||||
#define helper_vjcvt helper_vjcvt_arm
|
||||
#define pmu_init pmu_init_arm
|
||||
#define mla_op mla_op_arm
|
||||
|
@ -3433,14 +3439,12 @@
|
|||
#define sqadd_op sqadd_op_arm
|
||||
#define sqsub_op sqsub_op_arm
|
||||
#define sshl_op sshl_op_arm
|
||||
#define ssra_op ssra_op_arm
|
||||
#define sri_op sri_op_arm
|
||||
#define sve_exception_el sve_exception_el_arm
|
||||
#define sve_zcr_len_for_el sve_zcr_len_for_el_arm
|
||||
#define uqadd_op uqadd_op_arm
|
||||
#define uqsub_op uqsub_op_arm
|
||||
#define ushl_op ushl_op_arm
|
||||
#define usra_op usra_op_arm
|
||||
#define v8m_security_lookup v8m_security_lookup_arm
|
||||
#define vfp_expand_imm vfp_expand_imm_arm
|
||||
#endif
|
||||
|
|
16
qemu/armeb.h
16
qemu/armeb.h
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_armeb
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_armeb
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_armeb
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_armeb
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_armeb
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_armeb
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_armeb
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_armeb
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_armeb
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_armeb
|
||||
|
@ -3412,12 +3408,22 @@
|
|||
#define cpu_mmu_index cpu_mmu_index_armeb
|
||||
#define fp_exception_el fp_exception_el_armeb
|
||||
#define gen_cmtst_i64 gen_cmtst_i64_armeb
|
||||
#define gen_gvec_ssra gen_gvec_ssra_armeb
|
||||
#define gen_gvec_usra gen_gvec_usra_armeb
|
||||
#define get_phys_addr get_phys_addr_armeb
|
||||
#define gen_sshl_i32 gen_sshl_i32_armeb
|
||||
#define gen_sshl_i64 gen_sshl_i64_armeb
|
||||
#define gen_ushl_i32 gen_ushl_i32_armeb
|
||||
#define gen_ushl_i64 gen_ushl_i64_armeb
|
||||
#define helper_fjcvtzs helper_fjcvtzs_armeb
|
||||
#define helper_gvec_ssra_b helper_gvec_ssra_b_armeb
|
||||
#define helper_gvec_ssra_d helper_gvec_ssra_d_armeb
|
||||
#define helper_gvec_ssra_h helper_gvec_ssra_h_armeb
|
||||
#define helper_gvec_ssra_s helper_gvec_ssra_s_armeb
|
||||
#define helper_gvec_usra_b helper_gvec_usra_b_armeb
|
||||
#define helper_gvec_usra_d helper_gvec_usra_d_armeb
|
||||
#define helper_gvec_usra_h helper_gvec_usra_h_armeb
|
||||
#define helper_gvec_usra_s helper_gvec_usra_s_armeb
|
||||
#define helper_vjcvt helper_vjcvt_armeb
|
||||
#define pmu_init pmu_init_armeb
|
||||
#define mla_op mla_op_armeb
|
||||
|
@ -3433,14 +3439,12 @@
|
|||
#define sqadd_op sqadd_op_armeb
|
||||
#define sqsub_op sqsub_op_armeb
|
||||
#define sshl_op sshl_op_armeb
|
||||
#define ssra_op ssra_op_armeb
|
||||
#define sri_op sri_op_armeb
|
||||
#define sve_exception_el sve_exception_el_armeb
|
||||
#define sve_zcr_len_for_el sve_zcr_len_for_el_armeb
|
||||
#define uqadd_op uqadd_op_armeb
|
||||
#define uqsub_op uqsub_op_armeb
|
||||
#define ushl_op ushl_op_armeb
|
||||
#define usra_op usra_op_armeb
|
||||
#define v8m_security_lookup v8m_security_lookup_armeb
|
||||
#define vfp_expand_imm vfp_expand_imm_armeb
|
||||
#endif
|
||||
|
|
|
@ -3417,12 +3417,22 @@ arm_symbols = (
|
|||
'cpu_mmu_index',
|
||||
'fp_exception_el',
|
||||
'gen_cmtst_i64',
|
||||
'gen_gvec_ssra',
|
||||
'gen_gvec_usra',
|
||||
'get_phys_addr',
|
||||
'gen_sshl_i32',
|
||||
'gen_sshl_i64',
|
||||
'gen_ushl_i32',
|
||||
'gen_ushl_i64',
|
||||
'helper_fjcvtzs',
|
||||
'helper_gvec_ssra_b',
|
||||
'helper_gvec_ssra_d',
|
||||
'helper_gvec_ssra_h',
|
||||
'helper_gvec_ssra_s',
|
||||
'helper_gvec_usra_b',
|
||||
'helper_gvec_usra_d',
|
||||
'helper_gvec_usra_h',
|
||||
'helper_gvec_usra_s',
|
||||
'helper_vjcvt',
|
||||
'pmu_init',
|
||||
'mla_op',
|
||||
|
@ -3438,14 +3448,12 @@ arm_symbols = (
|
|||
'sqadd_op',
|
||||
'sqsub_op',
|
||||
'sshl_op',
|
||||
'ssra_op',
|
||||
'sri_op',
|
||||
'sve_exception_el',
|
||||
'sve_zcr_len_for_el',
|
||||
'uqadd_op',
|
||||
'uqsub_op',
|
||||
'ushl_op',
|
||||
'usra_op',
|
||||
'v8m_security_lookup',
|
||||
'vfp_expand_imm',
|
||||
)
|
||||
|
@ -3494,6 +3502,8 @@ aarch64_symbols = (
|
|||
'fp_exception_el',
|
||||
'gen_a64_set_pc_im',
|
||||
'gen_cmtst_i64',
|
||||
'gen_gvec_ssra',
|
||||
'gen_gvec_usra',
|
||||
'get_phys_addr',
|
||||
'gen_sshl_i32',
|
||||
'gen_sshl_i64',
|
||||
|
@ -3548,6 +3558,14 @@ aarch64_symbols = (
|
|||
'helper_gvec_rsqrts_d',
|
||||
'helper_gvec_rsqrts_h',
|
||||
'helper_gvec_rsqrts_s',
|
||||
'helper_gvec_ssra_b',
|
||||
'helper_gvec_ssra_d',
|
||||
'helper_gvec_ssra_h',
|
||||
'helper_gvec_ssra_s',
|
||||
'helper_gvec_usra_b',
|
||||
'helper_gvec_usra_d',
|
||||
'helper_gvec_usra_h',
|
||||
'helper_gvec_usra_s',
|
||||
'helper_msr_i_daifclear',
|
||||
'helper_msr_i_daifset',
|
||||
'helper_msr_i_spsel',
|
||||
|
@ -4501,7 +4519,6 @@ aarch64_symbols = (
|
|||
'sqadd_op',
|
||||
'sqsub_op',
|
||||
'sshl_op',
|
||||
'ssra_op',
|
||||
'sri_op',
|
||||
'sve_access_check',
|
||||
'sve_exception_el',
|
||||
|
@ -4510,7 +4527,6 @@ aarch64_symbols = (
|
|||
'uqadd_op',
|
||||
'uqsub_op',
|
||||
'ushl_op',
|
||||
'usra_op',
|
||||
'v8m_security_lookup',
|
||||
'vfp_expand_imm',
|
||||
'write_fp_dreg',
|
||||
|
|
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_m68k
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_m68k
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_m68k
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_m68k
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_m68k
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_m68k
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_m68k
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_m68k
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_m68k
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_m68k
|
||||
|
|
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_mips
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_mips
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_mips
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_mips
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_mips
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_mips
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_mips
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_mips
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_mips
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_mips
|
||||
|
|
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_mips64
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_mips64
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_mips64
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_mips64
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_mips64
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_mips64
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_mips64
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_mips64
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_mips64
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_mips64
|
||||
|
|
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_mips64el
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_mips64el
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_mips64el
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_mips64el
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_mips64el
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_mips64el
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_mips64el
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_mips64el
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_mips64el
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_mips64el
|
||||
|
|
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_mipsel
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_mipsel
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_mipsel
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_mipsel
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_mipsel
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_mipsel
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_mipsel
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_mipsel
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_mipsel
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_mipsel
|
||||
|
|
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_powerpc
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_powerpc
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_powerpc
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_powerpc
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_powerpc
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_powerpc
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_powerpc
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_powerpc
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_powerpc
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_powerpc
|
||||
|
|
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_riscv32
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_riscv32
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_riscv32
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_riscv32
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_riscv32
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_riscv32
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_riscv32
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_riscv32
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_riscv32
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_riscv32
|
||||
|
|
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_riscv64
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_riscv64
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_riscv64
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_riscv64
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_riscv64
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_riscv64
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_riscv64
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_riscv64
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_riscv64
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_riscv64
|
||||
|
|
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_sparc
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_sparc
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_sparc
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_sparc
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_sparc
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_sparc
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_sparc
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_sparc
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_sparc
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_sparc
|
||||
|
|
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_sparc64
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_sparc64
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_sparc64
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_sparc64
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_sparc64
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_sparc64
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_sparc64
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_sparc64
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_sparc64
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_sparc64
|
||||
|
|
|
@ -687,6 +687,16 @@ DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
|||
|
||||
DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_3(gvec_ssra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_ssra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_ssra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_ssra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_3(gvec_usra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_usra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_usra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_3(gvec_usra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
|
||||
|
||||
#ifdef TARGET_ARM
|
||||
#define helper_clz helper_clz_arm
|
||||
#define gen_helper_clz gen_helper_clz_arm
|
||||
|
|
|
@ -10477,19 +10477,8 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
|
|||
|
||||
switch (opcode) {
|
||||
case 0x02: /* SSRA / USRA (accumulate) */
|
||||
if (is_u) {
|
||||
/* Shift count same as element size produces zero to add. */
|
||||
if (shift == 8 << size) {
|
||||
goto done;
|
||||
}
|
||||
gen_gvec_op2i(s, is_q, rd, rn, shift, &usra_op[size]);
|
||||
} else {
|
||||
/* Shift count same as element size produces all sign to add. */
|
||||
if (shift == 8 << size) {
|
||||
shift -= 1;
|
||||
}
|
||||
gen_gvec_op2i(s, is_q, rd, rn, shift, &ssra_op[size]);
|
||||
}
|
||||
gen_gvec_fn2i(s, is_q, rd, rn, shift,
|
||||
is_u ? gen_gvec_usra : gen_gvec_ssra, size);
|
||||
return;
|
||||
case 0x08: /* SRI */
|
||||
/* Shift count same as element size is valid but does nothing. */
|
||||
|
|
|
@ -4004,33 +4004,50 @@ static void gen_ssra_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a, i
|
|||
tcg_gen_add_vec(s, vece, d, d, a);
|
||||
}
|
||||
|
||||
static const TCGOpcode vecop_list_ssra[] = {
|
||||
INDEX_op_sari_vec, INDEX_op_add_vec, 0
|
||||
};
|
||||
void gen_gvec_ssra(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
||||
{
|
||||
static const TCGOpcode vecop_list[] = {
|
||||
INDEX_op_sari_vec, INDEX_op_add_vec, 0
|
||||
};
|
||||
static const GVecGen2i ops[4] = {
|
||||
{ .fni8 = gen_ssra8_i64,
|
||||
.fniv = gen_ssra_vec,
|
||||
.fno = gen_helper_gvec_ssra_b,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_8 },
|
||||
{ .fni8 = gen_ssra16_i64,
|
||||
.fniv = gen_ssra_vec,
|
||||
.fno = gen_helper_gvec_ssra_h,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_16 },
|
||||
{ .fni4 = gen_ssra32_i32,
|
||||
.fniv = gen_ssra_vec,
|
||||
.fno = gen_helper_gvec_ssra_s,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_32 },
|
||||
{ .fni8 = gen_ssra64_i64,
|
||||
.fniv = gen_ssra_vec,
|
||||
.fno = gen_helper_gvec_ssra_b,
|
||||
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
||||
.opt_opc = vecop_list,
|
||||
.load_dest = true,
|
||||
.vece = MO_64 },
|
||||
};
|
||||
/* tszimm encoding produces immediates in the range [1..esize]. */
|
||||
tcg_debug_assert(shift > 0);
|
||||
tcg_debug_assert(shift <= (8 << vece));
|
||||
|
||||
const GVecGen2i ssra_op[4] = {
|
||||
{ .fni8 = gen_ssra8_i64,
|
||||
.fniv = gen_ssra_vec,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_ssra,
|
||||
.vece = MO_8 },
|
||||
{ .fni8 = gen_ssra16_i64,
|
||||
.fniv = gen_ssra_vec,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_ssra,
|
||||
.vece = MO_16 },
|
||||
{ .fni4 = gen_ssra32_i32,
|
||||
.fniv = gen_ssra_vec,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_ssra,
|
||||
.vece = MO_32 },
|
||||
{ .fni8 = gen_ssra64_i64,
|
||||
.fniv = gen_ssra_vec,
|
||||
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_ssra,
|
||||
.vece = MO_64 },
|
||||
};
|
||||
/*
|
||||
* Shifts larger than the element size are architecturally valid.
|
||||
* Signed results in all sign bits.
|
||||
*/
|
||||
shift = MIN(shift, (8 << vece) - 1);
|
||||
tcg_gen_gvec_2i(s, rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
||||
}
|
||||
|
||||
static void gen_usra8_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
||||
{
|
||||
|
@ -4062,33 +4079,54 @@ static void gen_usra_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a, i
|
|||
tcg_gen_add_vec(s, vece, d, d, a);
|
||||
}
|
||||
|
||||
static const TCGOpcode vecop_list_usra[] = {
|
||||
INDEX_op_shri_vec, INDEX_op_add_vec, 0
|
||||
};
|
||||
void gen_gvec_usra(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
||||
{
|
||||
static const TCGOpcode vecop_list[] = {
|
||||
INDEX_op_shri_vec, INDEX_op_add_vec, 0
|
||||
};
|
||||
static const GVecGen2i ops[4] = {
|
||||
{ .fni8 = gen_usra8_i64,
|
||||
.fniv = gen_usra_vec,
|
||||
.fno = gen_helper_gvec_usra_b,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_8, },
|
||||
{ .fni8 = gen_usra16_i64,
|
||||
.fniv = gen_usra_vec,
|
||||
.fno = gen_helper_gvec_usra_h,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_16, },
|
||||
{ .fni4 = gen_usra32_i32,
|
||||
.fniv = gen_usra_vec,
|
||||
.fno = gen_helper_gvec_usra_s,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_32, },
|
||||
{ .fni8 = gen_usra64_i64,
|
||||
.fniv = gen_usra_vec,
|
||||
.fno = gen_helper_gvec_usra_d,
|
||||
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list,
|
||||
.vece = MO_64, },
|
||||
};
|
||||
/* tszimm encoding produces immediates in the range [1..esize]. */
|
||||
tcg_debug_assert(shift > 0);
|
||||
tcg_debug_assert(shift <= (8 << vece));
|
||||
|
||||
const GVecGen2i usra_op[4] = {
|
||||
{ .fni8 = gen_usra8_i64,
|
||||
.fniv = gen_usra_vec,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_usra,
|
||||
.vece = MO_8, },
|
||||
{ .fni8 = gen_usra16_i64,
|
||||
.fniv = gen_usra_vec,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_usra,
|
||||
.vece = MO_16, },
|
||||
{ .fni4 = gen_usra32_i32,
|
||||
.fniv = gen_usra_vec,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_usra,
|
||||
.vece = MO_32, },
|
||||
{ .fni8 = gen_usra64_i64,
|
||||
.fniv = gen_usra_vec,
|
||||
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
||||
.load_dest = true,
|
||||
.opt_opc = vecop_list_usra,
|
||||
.vece = MO_64, },
|
||||
};
|
||||
/*
|
||||
* Shifts larger than the element size are architecturally valid.
|
||||
* Unsigned results in all zeros as input to accumulate: nop.
|
||||
*/
|
||||
if (shift < (8 << vece)) {
|
||||
tcg_gen_gvec_2i(s, rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
||||
} else {
|
||||
/* Nop, but we do need to clear the tail. */
|
||||
tcg_gen_gvec_mov(s, vece, rd_ofs, rd_ofs, opr_sz, max_sz);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_shr8_ins_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
||||
{
|
||||
|
@ -5351,19 +5389,12 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
|
|||
case 1: /* VSRA */
|
||||
/* Right shift comes here negative. */
|
||||
shift = -shift;
|
||||
/* Shifts larger than the element size are architecturally
|
||||
* valid. Unsigned results in all zeros; signed results
|
||||
* in all sign bits.
|
||||
*/
|
||||
if (!u) {
|
||||
tcg_gen_gvec_2i(tcg_ctx, rd_ofs, rm_ofs, vec_size, vec_size,
|
||||
MIN(shift, (8 << size) - 1),
|
||||
&ssra_op[size]);
|
||||
} else if (shift >= 8 << size) {
|
||||
/* rd += 0 */
|
||||
if (u) {
|
||||
gen_gvec_usra(tcg_ctx, size, rd_ofs, rm_ofs, shift,
|
||||
vec_size, vec_size);
|
||||
} else {
|
||||
tcg_gen_gvec_2i(tcg_ctx, rd_ofs, rm_ofs, vec_size, vec_size,
|
||||
shift, &usra_op[size]);
|
||||
gen_gvec_ssra(tcg_ctx, size, rd_ofs, rm_ofs, shift,
|
||||
vec_size, vec_size);
|
||||
}
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -292,8 +292,6 @@ extern const GVecGen3 mls_op[4];
|
|||
extern const GVecGen3 cmtst_op[4];
|
||||
extern const GVecGen3 sshl_op[4];
|
||||
extern const GVecGen3 ushl_op[4];
|
||||
extern const GVecGen2i ssra_op[4];
|
||||
extern const GVecGen2i usra_op[4];
|
||||
extern const GVecGen2i sri_op[4];
|
||||
extern const GVecGen2i sli_op[4];
|
||||
extern const GVecGen4 uqadd_op[4];
|
||||
|
@ -306,6 +304,11 @@ void gen_sshl_i32(TCGContext* tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
|
|||
void gen_ushl_i64(TCGContext* tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
|
||||
void gen_sshl_i64(TCGContext* tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
|
||||
|
||||
void gen_gvec_ssra(TCGContext* tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
||||
void gen_gvec_usra(TCGContext *tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
||||
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
||||
|
||||
/*
|
||||
* Forward to the isar_feature_* tests given a DisasContext pointer.
|
||||
*/
|
||||
|
|
|
@ -900,6 +900,30 @@ void HELPER(gvec_sqsub_d)(void *vd, void *vq, void *vn,
|
|||
clear_tail(d, oprsz, simd_maxsz(desc));
|
||||
}
|
||||
|
||||
#define DO_SRA(NAME, TYPE) \
|
||||
void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
|
||||
{ \
|
||||
intptr_t i, oprsz = simd_oprsz(desc); \
|
||||
int shift = simd_data(desc); \
|
||||
TYPE *d = vd, *n = vn; \
|
||||
for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
|
||||
d[i] += n[i] >> shift; \
|
||||
} \
|
||||
clear_tail(d, oprsz, simd_maxsz(desc)); \
|
||||
}
|
||||
|
||||
DO_SRA(gvec_ssra_b, int8_t)
|
||||
DO_SRA(gvec_ssra_h, int16_t)
|
||||
DO_SRA(gvec_ssra_s, int32_t)
|
||||
DO_SRA(gvec_ssra_d, int64_t)
|
||||
|
||||
DO_SRA(gvec_usra_b, uint8_t)
|
||||
DO_SRA(gvec_usra_h, uint16_t)
|
||||
DO_SRA(gvec_usra_s, uint32_t)
|
||||
DO_SRA(gvec_usra_d, uint64_t)
|
||||
|
||||
#undef DO_SRA
|
||||
|
||||
/*
|
||||
* Convert float16 to float32, raising no exceptions and
|
||||
* preserving exceptional values, including SNaN.
|
||||
|
|
|
@ -2889,10 +2889,6 @@
|
|||
#define tcg_gen_gvec_ands tcg_gen_gvec_ands_x86_64
|
||||
#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_x86_64
|
||||
#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_x86_64
|
||||
#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_x86_64
|
||||
#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_x86_64
|
||||
#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_x86_64
|
||||
#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_x86_64
|
||||
#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_x86_64
|
||||
#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_x86_64
|
||||
#define tcg_gen_gvec_dup_imm tcg_gen_gvec_dup_imm_x86_64
|
||||
|
|
Loading…
Reference in a new issue