mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-22 20:15:28 +00:00
target/riscv: vector single-width floating-point fused multiply-add instructions
Backports 4aa5a8fed4a21fe2e132a9a21b251aa95e19de80
This commit is contained in:
parent
14cbabde4f
commit
42116609f0
|
@ -7027,6 +7027,54 @@ riscv_symbols = (
|
|||
'helper_vfwmul_vv_w',
|
||||
'helper_vfwmul_vf_h',
|
||||
'helper_vfwmul_vf_w',
|
||||
'helper_vfmacc_vv_h',
|
||||
'helper_vfmacc_vv_w',
|
||||
'helper_vfmacc_vv_d',
|
||||
'helper_vfnmacc_vv_h',
|
||||
'helper_vfnmacc_vv_w',
|
||||
'helper_vfnmacc_vv_d',
|
||||
'helper_vfmsac_vv_h',
|
||||
'helper_vfmsac_vv_w',
|
||||
'helper_vfmsac_vv_d',
|
||||
'helper_vfnmsac_vv_h',
|
||||
'helper_vfnmsac_vv_w',
|
||||
'helper_vfnmsac_vv_d',
|
||||
'helper_vfmadd_vv_h',
|
||||
'helper_vfmadd_vv_w',
|
||||
'helper_vfmadd_vv_d',
|
||||
'helper_vfnmadd_vv_h',
|
||||
'helper_vfnmadd_vv_w',
|
||||
'helper_vfnmadd_vv_d',
|
||||
'helper_vfmsub_vv_h',
|
||||
'helper_vfmsub_vv_w',
|
||||
'helper_vfmsub_vv_d',
|
||||
'helper_vfnmsub_vv_h',
|
||||
'helper_vfnmsub_vv_w',
|
||||
'helper_vfnmsub_vv_d',
|
||||
'helper_vfmacc_vf_h',
|
||||
'helper_vfmacc_vf_w',
|
||||
'helper_vfmacc_vf_d',
|
||||
'helper_vfnmacc_vf_h',
|
||||
'helper_vfnmacc_vf_w',
|
||||
'helper_vfnmacc_vf_d',
|
||||
'helper_vfmsac_vf_h',
|
||||
'helper_vfmsac_vf_w',
|
||||
'helper_vfmsac_vf_d',
|
||||
'helper_vfnmsac_vf_h',
|
||||
'helper_vfnmsac_vf_w',
|
||||
'helper_vfnmsac_vf_d',
|
||||
'helper_vfmadd_vf_h',
|
||||
'helper_vfmadd_vf_w',
|
||||
'helper_vfmadd_vf_d',
|
||||
'helper_vfnmadd_vf_h',
|
||||
'helper_vfnmadd_vf_w',
|
||||
'helper_vfnmadd_vf_d',
|
||||
'helper_vfmsub_vf_h',
|
||||
'helper_vfmsub_vf_w',
|
||||
'helper_vfmsub_vf_d',
|
||||
'helper_vfnmsub_vf_h',
|
||||
'helper_vfnmsub_vf_w',
|
||||
'helper_vfnmsub_vf_d',
|
||||
'pmp_hart_has_privs',
|
||||
'pmpaddr_csr_read',
|
||||
'pmpaddr_csr_write',
|
||||
|
|
|
@ -4463,6 +4463,54 @@
|
|||
#define helper_vfwmul_vv_w helper_vfwmul_vv_w_riscv32
|
||||
#define helper_vfwmul_vf_h helper_vfwmul_vf_h_riscv32
|
||||
#define helper_vfwmul_vf_w helper_vfwmul_vf_w_riscv32
|
||||
#define helper_vfmacc_vv_h helper_vfmacc_vv_h_riscv32
|
||||
#define helper_vfmacc_vv_w helper_vfmacc_vv_w_riscv32
|
||||
#define helper_vfmacc_vv_d helper_vfmacc_vv_d_riscv32
|
||||
#define helper_vfnmacc_vv_h helper_vfnmacc_vv_h_riscv32
|
||||
#define helper_vfnmacc_vv_w helper_vfnmacc_vv_w_riscv32
|
||||
#define helper_vfnmacc_vv_d helper_vfnmacc_vv_d_riscv32
|
||||
#define helper_vfmsac_vv_h helper_vfmsac_vv_h_riscv32
|
||||
#define helper_vfmsac_vv_w helper_vfmsac_vv_w_riscv32
|
||||
#define helper_vfmsac_vv_d helper_vfmsac_vv_d_riscv32
|
||||
#define helper_vfnmsac_vv_h helper_vfnmsac_vv_h_riscv32
|
||||
#define helper_vfnmsac_vv_w helper_vfnmsac_vv_w_riscv32
|
||||
#define helper_vfnmsac_vv_d helper_vfnmsac_vv_d_riscv32
|
||||
#define helper_vfmadd_vv_h helper_vfmadd_vv_h_riscv32
|
||||
#define helper_vfmadd_vv_w helper_vfmadd_vv_w_riscv32
|
||||
#define helper_vfmadd_vv_d helper_vfmadd_vv_d_riscv32
|
||||
#define helper_vfnmadd_vv_h helper_vfnmadd_vv_h_riscv32
|
||||
#define helper_vfnmadd_vv_w helper_vfnmadd_vv_w_riscv32
|
||||
#define helper_vfnmadd_vv_d helper_vfnmadd_vv_d_riscv32
|
||||
#define helper_vfmsub_vv_h helper_vfmsub_vv_h_riscv32
|
||||
#define helper_vfmsub_vv_w helper_vfmsub_vv_w_riscv32
|
||||
#define helper_vfmsub_vv_d helper_vfmsub_vv_d_riscv32
|
||||
#define helper_vfnmsub_vv_h helper_vfnmsub_vv_h_riscv32
|
||||
#define helper_vfnmsub_vv_w helper_vfnmsub_vv_w_riscv32
|
||||
#define helper_vfnmsub_vv_d helper_vfnmsub_vv_d_riscv32
|
||||
#define helper_vfmacc_vf_h helper_vfmacc_vf_h_riscv32
|
||||
#define helper_vfmacc_vf_w helper_vfmacc_vf_w_riscv32
|
||||
#define helper_vfmacc_vf_d helper_vfmacc_vf_d_riscv32
|
||||
#define helper_vfnmacc_vf_h helper_vfnmacc_vf_h_riscv32
|
||||
#define helper_vfnmacc_vf_w helper_vfnmacc_vf_w_riscv32
|
||||
#define helper_vfnmacc_vf_d helper_vfnmacc_vf_d_riscv32
|
||||
#define helper_vfmsac_vf_h helper_vfmsac_vf_h_riscv32
|
||||
#define helper_vfmsac_vf_w helper_vfmsac_vf_w_riscv32
|
||||
#define helper_vfmsac_vf_d helper_vfmsac_vf_d_riscv32
|
||||
#define helper_vfnmsac_vf_h helper_vfnmsac_vf_h_riscv32
|
||||
#define helper_vfnmsac_vf_w helper_vfnmsac_vf_w_riscv32
|
||||
#define helper_vfnmsac_vf_d helper_vfnmsac_vf_d_riscv32
|
||||
#define helper_vfmadd_vf_h helper_vfmadd_vf_h_riscv32
|
||||
#define helper_vfmadd_vf_w helper_vfmadd_vf_w_riscv32
|
||||
#define helper_vfmadd_vf_d helper_vfmadd_vf_d_riscv32
|
||||
#define helper_vfnmadd_vf_h helper_vfnmadd_vf_h_riscv32
|
||||
#define helper_vfnmadd_vf_w helper_vfnmadd_vf_w_riscv32
|
||||
#define helper_vfnmadd_vf_d helper_vfnmadd_vf_d_riscv32
|
||||
#define helper_vfmsub_vf_h helper_vfmsub_vf_h_riscv32
|
||||
#define helper_vfmsub_vf_w helper_vfmsub_vf_w_riscv32
|
||||
#define helper_vfmsub_vf_d helper_vfmsub_vf_d_riscv32
|
||||
#define helper_vfnmsub_vf_h helper_vfnmsub_vf_h_riscv32
|
||||
#define helper_vfnmsub_vf_w helper_vfnmsub_vf_w_riscv32
|
||||
#define helper_vfnmsub_vf_d helper_vfnmsub_vf_d_riscv32
|
||||
#define pmp_hart_has_privs pmp_hart_has_privs_riscv32
|
||||
#define pmpaddr_csr_read pmpaddr_csr_read_riscv32
|
||||
#define pmpaddr_csr_write pmpaddr_csr_write_riscv32
|
||||
|
|
|
@ -4463,6 +4463,54 @@
|
|||
#define helper_vfwmul_vv_w helper_vfwmul_vv_w_riscv64
|
||||
#define helper_vfwmul_vf_h helper_vfwmul_vf_h_riscv64
|
||||
#define helper_vfwmul_vf_w helper_vfwmul_vf_w_riscv64
|
||||
#define helper_vfmacc_vv_h helper_vfmacc_vv_h_riscv64
|
||||
#define helper_vfmacc_vv_w helper_vfmacc_vv_w_riscv64
|
||||
#define helper_vfmacc_vv_d helper_vfmacc_vv_d_riscv64
|
||||
#define helper_vfnmacc_vv_h helper_vfnmacc_vv_h_riscv64
|
||||
#define helper_vfnmacc_vv_w helper_vfnmacc_vv_w_riscv64
|
||||
#define helper_vfnmacc_vv_d helper_vfnmacc_vv_d_riscv64
|
||||
#define helper_vfmsac_vv_h helper_vfmsac_vv_h_riscv64
|
||||
#define helper_vfmsac_vv_w helper_vfmsac_vv_w_riscv64
|
||||
#define helper_vfmsac_vv_d helper_vfmsac_vv_d_riscv64
|
||||
#define helper_vfnmsac_vv_h helper_vfnmsac_vv_h_riscv64
|
||||
#define helper_vfnmsac_vv_w helper_vfnmsac_vv_w_riscv64
|
||||
#define helper_vfnmsac_vv_d helper_vfnmsac_vv_d_riscv64
|
||||
#define helper_vfmadd_vv_h helper_vfmadd_vv_h_riscv64
|
||||
#define helper_vfmadd_vv_w helper_vfmadd_vv_w_riscv64
|
||||
#define helper_vfmadd_vv_d helper_vfmadd_vv_d_riscv64
|
||||
#define helper_vfnmadd_vv_h helper_vfnmadd_vv_h_riscv64
|
||||
#define helper_vfnmadd_vv_w helper_vfnmadd_vv_w_riscv64
|
||||
#define helper_vfnmadd_vv_d helper_vfnmadd_vv_d_riscv64
|
||||
#define helper_vfmsub_vv_h helper_vfmsub_vv_h_riscv64
|
||||
#define helper_vfmsub_vv_w helper_vfmsub_vv_w_riscv64
|
||||
#define helper_vfmsub_vv_d helper_vfmsub_vv_d_riscv64
|
||||
#define helper_vfnmsub_vv_h helper_vfnmsub_vv_h_riscv64
|
||||
#define helper_vfnmsub_vv_w helper_vfnmsub_vv_w_riscv64
|
||||
#define helper_vfnmsub_vv_d helper_vfnmsub_vv_d_riscv64
|
||||
#define helper_vfmacc_vf_h helper_vfmacc_vf_h_riscv64
|
||||
#define helper_vfmacc_vf_w helper_vfmacc_vf_w_riscv64
|
||||
#define helper_vfmacc_vf_d helper_vfmacc_vf_d_riscv64
|
||||
#define helper_vfnmacc_vf_h helper_vfnmacc_vf_h_riscv64
|
||||
#define helper_vfnmacc_vf_w helper_vfnmacc_vf_w_riscv64
|
||||
#define helper_vfnmacc_vf_d helper_vfnmacc_vf_d_riscv64
|
||||
#define helper_vfmsac_vf_h helper_vfmsac_vf_h_riscv64
|
||||
#define helper_vfmsac_vf_w helper_vfmsac_vf_w_riscv64
|
||||
#define helper_vfmsac_vf_d helper_vfmsac_vf_d_riscv64
|
||||
#define helper_vfnmsac_vf_h helper_vfnmsac_vf_h_riscv64
|
||||
#define helper_vfnmsac_vf_w helper_vfnmsac_vf_w_riscv64
|
||||
#define helper_vfnmsac_vf_d helper_vfnmsac_vf_d_riscv64
|
||||
#define helper_vfmadd_vf_h helper_vfmadd_vf_h_riscv64
|
||||
#define helper_vfmadd_vf_w helper_vfmadd_vf_w_riscv64
|
||||
#define helper_vfmadd_vf_d helper_vfmadd_vf_d_riscv64
|
||||
#define helper_vfnmadd_vf_h helper_vfnmadd_vf_h_riscv64
|
||||
#define helper_vfnmadd_vf_w helper_vfnmadd_vf_w_riscv64
|
||||
#define helper_vfnmadd_vf_d helper_vfnmadd_vf_d_riscv64
|
||||
#define helper_vfmsub_vf_h helper_vfmsub_vf_h_riscv64
|
||||
#define helper_vfmsub_vf_w helper_vfmsub_vf_w_riscv64
|
||||
#define helper_vfmsub_vf_d helper_vfmsub_vf_d_riscv64
|
||||
#define helper_vfnmsub_vf_h helper_vfnmsub_vf_h_riscv64
|
||||
#define helper_vfnmsub_vf_w helper_vfnmsub_vf_w_riscv64
|
||||
#define helper_vfnmsub_vf_d helper_vfnmsub_vf_d_riscv64
|
||||
#define pmp_hart_has_privs pmp_hart_has_privs_riscv64
|
||||
#define pmpaddr_csr_read pmpaddr_csr_read_riscv64
|
||||
#define pmpaddr_csr_write pmpaddr_csr_write_riscv64
|
||||
|
|
|
@ -860,3 +860,52 @@ DEF_HELPER_6(vfwmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
|
|||
DEF_HELPER_6(vfwmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfwmul_vf_h, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfwmul_vf_w, void, ptr, ptr, i64, ptr, env, i32)
|
||||
|
||||
DEF_HELPER_6(vfmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmacc_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmacc_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmsac_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmsac_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmacc_vf_h, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmacc_vf_w, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmacc_vf_d, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmacc_vf_h, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmacc_vf_w, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmacc_vf_d, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmsac_vf_h, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmsac_vf_w, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmsac_vf_d, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmsac_vf_h, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmsac_vf_w, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmsac_vf_d, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmadd_vf_h, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmadd_vf_w, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmadd_vf_d, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmadd_vf_h, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmadd_vf_w, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmadd_vf_d, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmsub_vf_h, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmsub_vf_w, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfmsub_vf_d, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmsub_vf_h, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmsub_vf_w, void, ptr, ptr, i64, ptr, env, i32)
|
||||
DEF_HELPER_6(vfnmsub_vf_d, void, ptr, ptr, i64, ptr, env, i32)
|
||||
|
|
|
@ -465,6 +465,22 @@ vfdiv_vf 100000 . ..... ..... 101 ..... 1010111 @r_vm
|
|||
vfrdiv_vf 100001 . ..... ..... 101 ..... 1010111 @r_vm
|
||||
vfwmul_vv 111000 . ..... ..... 001 ..... 1010111 @r_vm
|
||||
vfwmul_vf 111000 . ..... ..... 101 ..... 1010111 @r_vm
|
||||
vfmacc_vv 101100 . ..... ..... 001 ..... 1010111 @r_vm
|
||||
vfnmacc_vv 101101 . ..... ..... 001 ..... 1010111 @r_vm
|
||||
vfnmacc_vf 101101 . ..... ..... 101 ..... 1010111 @r_vm
|
||||
vfmacc_vf 101100 . ..... ..... 101 ..... 1010111 @r_vm
|
||||
vfmsac_vv 101110 . ..... ..... 001 ..... 1010111 @r_vm
|
||||
vfmsac_vf 101110 . ..... ..... 101 ..... 1010111 @r_vm
|
||||
vfnmsac_vv 101111 . ..... ..... 001 ..... 1010111 @r_vm
|
||||
vfnmsac_vf 101111 . ..... ..... 101 ..... 1010111 @r_vm
|
||||
vfmadd_vv 101000 . ..... ..... 001 ..... 1010111 @r_vm
|
||||
vfmadd_vf 101000 . ..... ..... 101 ..... 1010111 @r_vm
|
||||
vfnmadd_vv 101001 . ..... ..... 001 ..... 1010111 @r_vm
|
||||
vfnmadd_vf 101001 . ..... ..... 101 ..... 1010111 @r_vm
|
||||
vfmsub_vv 101010 . ..... ..... 001 ..... 1010111 @r_vm
|
||||
vfmsub_vf 101010 . ..... ..... 101 ..... 1010111 @r_vm
|
||||
vfnmsub_vv 101011 . ..... ..... 001 ..... 1010111 @r_vm
|
||||
vfnmsub_vf 101011 . ..... ..... 101 ..... 1010111 @r_vm
|
||||
|
||||
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
|
||||
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
|
||||
|
|
|
@ -2093,3 +2093,21 @@ GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
|
|||
/* Vector Widening Floating-Point Multiply */
|
||||
GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
|
||||
GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
|
||||
|
||||
/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
|
||||
GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
|
||||
GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check)
|
||||
GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check)
|
||||
GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check)
|
||||
GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check)
|
||||
GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check)
|
||||
GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check)
|
||||
GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check)
|
||||
GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check)
|
||||
GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check)
|
||||
GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check)
|
||||
GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check)
|
||||
GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
|
||||
GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
|
||||
GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
|
||||
GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
|
||||
|
|
|
@ -3433,3 +3433,254 @@ RVVCALL(OPFVF2, vfwmul_vf_h, WOP_UUU_H, H4, H2, vfwmul16)
|
|||
RVVCALL(OPFVF2, vfwmul_vf_w, WOP_UUU_W, H8, H4, vfwmul32)
|
||||
GEN_VEXT_VF(vfwmul_vf_h, 2, 4, clearl)
|
||||
GEN_VEXT_VF(vfwmul_vf_w, 4, 8, clearq)
|
||||
|
||||
/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
|
||||
#define OPFVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
|
||||
static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \
|
||||
CPURISCVState *env) \
|
||||
{ \
|
||||
TX1 s1 = *((T1 *)vs1 + HS1(i)); \
|
||||
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
||||
TD d = *((TD *)vd + HD(i)); \
|
||||
*((TD *)vd + HD(i)) = OP(s2, s1, d, &env->fp_status); \
|
||||
}
|
||||
|
||||
static uint16_t fmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
||||
{
|
||||
return float16_muladd(a, b, d, 0, s);
|
||||
}
|
||||
|
||||
static uint32_t fmacc32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
||||
{
|
||||
return float32_muladd(a, b, d, 0, s);
|
||||
}
|
||||
|
||||
static uint64_t fmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
||||
{
|
||||
return float64_muladd(a, b, d, 0, s);
|
||||
}
|
||||
|
||||
RVVCALL(OPFVV3, vfmacc_vv_h, OP_UUU_H, H2, H2, H2, fmacc16)
|
||||
RVVCALL(OPFVV3, vfmacc_vv_w, OP_UUU_W, H4, H4, H4, fmacc32)
|
||||
RVVCALL(OPFVV3, vfmacc_vv_d, OP_UUU_D, H8, H8, H8, fmacc64)
|
||||
GEN_VEXT_VV_ENV(vfmacc_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV_ENV(vfmacc_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV_ENV(vfmacc_vv_d, 8, 8, clearq)
|
||||
|
||||
#define OPFVF3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
|
||||
static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
|
||||
CPURISCVState *env) \
|
||||
{ \
|
||||
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
||||
TD d = *((TD *)vd + HD(i)); \
|
||||
*((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, d, &env->fp_status);\
|
||||
}
|
||||
|
||||
RVVCALL(OPFVF3, vfmacc_vf_h, OP_UUU_H, H2, H2, fmacc16)
|
||||
RVVCALL(OPFVF3, vfmacc_vf_w, OP_UUU_W, H4, H4, fmacc32)
|
||||
RVVCALL(OPFVF3, vfmacc_vf_d, OP_UUU_D, H8, H8, fmacc64)
|
||||
GEN_VEXT_VF(vfmacc_vf_h, 2, 2, clearh)
|
||||
GEN_VEXT_VF(vfmacc_vf_w, 4, 4, clearl)
|
||||
GEN_VEXT_VF(vfmacc_vf_d, 8, 8, clearq)
|
||||
|
||||
static uint16_t fnmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
||||
{
|
||||
return float16_muladd(a, b, d,
|
||||
float_muladd_negate_c | float_muladd_negate_product, s);
|
||||
}
|
||||
|
||||
static uint32_t fnmacc32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
||||
{
|
||||
return float32_muladd(a, b, d,
|
||||
float_muladd_negate_c | float_muladd_negate_product, s);
|
||||
}
|
||||
|
||||
static uint64_t fnmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
||||
{
|
||||
return float64_muladd(a, b, d,
|
||||
float_muladd_negate_c | float_muladd_negate_product, s);
|
||||
}
|
||||
|
||||
RVVCALL(OPFVV3, vfnmacc_vv_h, OP_UUU_H, H2, H2, H2, fnmacc16)
|
||||
RVVCALL(OPFVV3, vfnmacc_vv_w, OP_UUU_W, H4, H4, H4, fnmacc32)
|
||||
RVVCALL(OPFVV3, vfnmacc_vv_d, OP_UUU_D, H8, H8, H8, fnmacc64)
|
||||
GEN_VEXT_VV_ENV(vfnmacc_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV_ENV(vfnmacc_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV_ENV(vfnmacc_vv_d, 8, 8, clearq)
|
||||
RVVCALL(OPFVF3, vfnmacc_vf_h, OP_UUU_H, H2, H2, fnmacc16)
|
||||
RVVCALL(OPFVF3, vfnmacc_vf_w, OP_UUU_W, H4, H4, fnmacc32)
|
||||
RVVCALL(OPFVF3, vfnmacc_vf_d, OP_UUU_D, H8, H8, fnmacc64)
|
||||
GEN_VEXT_VF(vfnmacc_vf_h, 2, 2, clearh)
|
||||
GEN_VEXT_VF(vfnmacc_vf_w, 4, 4, clearl)
|
||||
GEN_VEXT_VF(vfnmacc_vf_d, 8, 8, clearq)
|
||||
|
||||
static uint16_t fmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
||||
{
|
||||
return float16_muladd(a, b, d, float_muladd_negate_c, s);
|
||||
}
|
||||
|
||||
static uint32_t fmsac32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
||||
{
|
||||
return float32_muladd(a, b, d, float_muladd_negate_c, s);
|
||||
}
|
||||
|
||||
static uint64_t fmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
||||
{
|
||||
return float64_muladd(a, b, d, float_muladd_negate_c, s);
|
||||
}
|
||||
|
||||
RVVCALL(OPFVV3, vfmsac_vv_h, OP_UUU_H, H2, H2, H2, fmsac16)
|
||||
RVVCALL(OPFVV3, vfmsac_vv_w, OP_UUU_W, H4, H4, H4, fmsac32)
|
||||
RVVCALL(OPFVV3, vfmsac_vv_d, OP_UUU_D, H8, H8, H8, fmsac64)
|
||||
GEN_VEXT_VV_ENV(vfmsac_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV_ENV(vfmsac_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV_ENV(vfmsac_vv_d, 8, 8, clearq)
|
||||
RVVCALL(OPFVF3, vfmsac_vf_h, OP_UUU_H, H2, H2, fmsac16)
|
||||
RVVCALL(OPFVF3, vfmsac_vf_w, OP_UUU_W, H4, H4, fmsac32)
|
||||
RVVCALL(OPFVF3, vfmsac_vf_d, OP_UUU_D, H8, H8, fmsac64)
|
||||
GEN_VEXT_VF(vfmsac_vf_h, 2, 2, clearh)
|
||||
GEN_VEXT_VF(vfmsac_vf_w, 4, 4, clearl)
|
||||
GEN_VEXT_VF(vfmsac_vf_d, 8, 8, clearq)
|
||||
|
||||
static uint16_t fnmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
||||
{
|
||||
return float16_muladd(a, b, d, float_muladd_negate_product, s);
|
||||
}
|
||||
|
||||
static uint32_t fnmsac32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
||||
{
|
||||
return float32_muladd(a, b, d, float_muladd_negate_product, s);
|
||||
}
|
||||
|
||||
static uint64_t fnmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
||||
{
|
||||
return float64_muladd(a, b, d, float_muladd_negate_product, s);
|
||||
}
|
||||
|
||||
RVVCALL(OPFVV3, vfnmsac_vv_h, OP_UUU_H, H2, H2, H2, fnmsac16)
|
||||
RVVCALL(OPFVV3, vfnmsac_vv_w, OP_UUU_W, H4, H4, H4, fnmsac32)
|
||||
RVVCALL(OPFVV3, vfnmsac_vv_d, OP_UUU_D, H8, H8, H8, fnmsac64)
|
||||
GEN_VEXT_VV_ENV(vfnmsac_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV_ENV(vfnmsac_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV_ENV(vfnmsac_vv_d, 8, 8, clearq)
|
||||
RVVCALL(OPFVF3, vfnmsac_vf_h, OP_UUU_H, H2, H2, fnmsac16)
|
||||
RVVCALL(OPFVF3, vfnmsac_vf_w, OP_UUU_W, H4, H4, fnmsac32)
|
||||
RVVCALL(OPFVF3, vfnmsac_vf_d, OP_UUU_D, H8, H8, fnmsac64)
|
||||
GEN_VEXT_VF(vfnmsac_vf_h, 2, 2, clearh)
|
||||
GEN_VEXT_VF(vfnmsac_vf_w, 4, 4, clearl)
|
||||
GEN_VEXT_VF(vfnmsac_vf_d, 8, 8, clearq)
|
||||
|
||||
static uint16_t fmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
||||
{
|
||||
return float16_muladd(d, b, a, 0, s);
|
||||
}
|
||||
|
||||
static uint32_t fmadd32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
||||
{
|
||||
return float32_muladd(d, b, a, 0, s);
|
||||
}
|
||||
|
||||
static uint64_t fmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
||||
{
|
||||
return float64_muladd(d, b, a, 0, s);
|
||||
}
|
||||
|
||||
RVVCALL(OPFVV3, vfmadd_vv_h, OP_UUU_H, H2, H2, H2, fmadd16)
|
||||
RVVCALL(OPFVV3, vfmadd_vv_w, OP_UUU_W, H4, H4, H4, fmadd32)
|
||||
RVVCALL(OPFVV3, vfmadd_vv_d, OP_UUU_D, H8, H8, H8, fmadd64)
|
||||
GEN_VEXT_VV_ENV(vfmadd_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV_ENV(vfmadd_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV_ENV(vfmadd_vv_d, 8, 8, clearq)
|
||||
RVVCALL(OPFVF3, vfmadd_vf_h, OP_UUU_H, H2, H2, fmadd16)
|
||||
RVVCALL(OPFVF3, vfmadd_vf_w, OP_UUU_W, H4, H4, fmadd32)
|
||||
RVVCALL(OPFVF3, vfmadd_vf_d, OP_UUU_D, H8, H8, fmadd64)
|
||||
GEN_VEXT_VF(vfmadd_vf_h, 2, 2, clearh)
|
||||
GEN_VEXT_VF(vfmadd_vf_w, 4, 4, clearl)
|
||||
GEN_VEXT_VF(vfmadd_vf_d, 8, 8, clearq)
|
||||
|
||||
static uint16_t fnmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
||||
{
|
||||
return float16_muladd(d, b, a,
|
||||
float_muladd_negate_c | float_muladd_negate_product, s);
|
||||
}
|
||||
|
||||
static uint32_t fnmadd32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
||||
{
|
||||
return float32_muladd(d, b, a,
|
||||
float_muladd_negate_c | float_muladd_negate_product, s);
|
||||
}
|
||||
|
||||
static uint64_t fnmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
||||
{
|
||||
return float64_muladd(d, b, a,
|
||||
float_muladd_negate_c | float_muladd_negate_product, s);
|
||||
}
|
||||
|
||||
RVVCALL(OPFVV3, vfnmadd_vv_h, OP_UUU_H, H2, H2, H2, fnmadd16)
|
||||
RVVCALL(OPFVV3, vfnmadd_vv_w, OP_UUU_W, H4, H4, H4, fnmadd32)
|
||||
RVVCALL(OPFVV3, vfnmadd_vv_d, OP_UUU_D, H8, H8, H8, fnmadd64)
|
||||
GEN_VEXT_VV_ENV(vfnmadd_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV_ENV(vfnmadd_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV_ENV(vfnmadd_vv_d, 8, 8, clearq)
|
||||
RVVCALL(OPFVF3, vfnmadd_vf_h, OP_UUU_H, H2, H2, fnmadd16)
|
||||
RVVCALL(OPFVF3, vfnmadd_vf_w, OP_UUU_W, H4, H4, fnmadd32)
|
||||
RVVCALL(OPFVF3, vfnmadd_vf_d, OP_UUU_D, H8, H8, fnmadd64)
|
||||
GEN_VEXT_VF(vfnmadd_vf_h, 2, 2, clearh)
|
||||
GEN_VEXT_VF(vfnmadd_vf_w, 4, 4, clearl)
|
||||
GEN_VEXT_VF(vfnmadd_vf_d, 8, 8, clearq)
|
||||
|
||||
static uint16_t fmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
||||
{
|
||||
return float16_muladd(d, b, a, float_muladd_negate_c, s);
|
||||
}
|
||||
|
||||
static uint32_t fmsub32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
||||
{
|
||||
return float32_muladd(d, b, a, float_muladd_negate_c, s);
|
||||
}
|
||||
|
||||
static uint64_t fmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
||||
{
|
||||
return float64_muladd(d, b, a, float_muladd_negate_c, s);
|
||||
}
|
||||
|
||||
RVVCALL(OPFVV3, vfmsub_vv_h, OP_UUU_H, H2, H2, H2, fmsub16)
|
||||
RVVCALL(OPFVV3, vfmsub_vv_w, OP_UUU_W, H4, H4, H4, fmsub32)
|
||||
RVVCALL(OPFVV3, vfmsub_vv_d, OP_UUU_D, H8, H8, H8, fmsub64)
|
||||
GEN_VEXT_VV_ENV(vfmsub_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV_ENV(vfmsub_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV_ENV(vfmsub_vv_d, 8, 8, clearq)
|
||||
RVVCALL(OPFVF3, vfmsub_vf_h, OP_UUU_H, H2, H2, fmsub16)
|
||||
RVVCALL(OPFVF3, vfmsub_vf_w, OP_UUU_W, H4, H4, fmsub32)
|
||||
RVVCALL(OPFVF3, vfmsub_vf_d, OP_UUU_D, H8, H8, fmsub64)
|
||||
GEN_VEXT_VF(vfmsub_vf_h, 2, 2, clearh)
|
||||
GEN_VEXT_VF(vfmsub_vf_w, 4, 4, clearl)
|
||||
GEN_VEXT_VF(vfmsub_vf_d, 8, 8, clearq)
|
||||
|
||||
static uint16_t fnmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
||||
{
|
||||
return float16_muladd(d, b, a, float_muladd_negate_product, s);
|
||||
}
|
||||
|
||||
static uint32_t fnmsub32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
||||
{
|
||||
return float32_muladd(d, b, a, float_muladd_negate_product, s);
|
||||
}
|
||||
|
||||
static uint64_t fnmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
||||
{
|
||||
return float64_muladd(d, b, a, float_muladd_negate_product, s);
|
||||
}
|
||||
|
||||
RVVCALL(OPFVV3, vfnmsub_vv_h, OP_UUU_H, H2, H2, H2, fnmsub16)
|
||||
RVVCALL(OPFVV3, vfnmsub_vv_w, OP_UUU_W, H4, H4, H4, fnmsub32)
|
||||
RVVCALL(OPFVV3, vfnmsub_vv_d, OP_UUU_D, H8, H8, H8, fnmsub64)
|
||||
GEN_VEXT_VV_ENV(vfnmsub_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV_ENV(vfnmsub_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV_ENV(vfnmsub_vv_d, 8, 8, clearq)
|
||||
RVVCALL(OPFVF3, vfnmsub_vf_h, OP_UUU_H, H2, H2, fnmsub16)
|
||||
RVVCALL(OPFVF3, vfnmsub_vf_w, OP_UUU_W, H4, H4, fnmsub32)
|
||||
RVVCALL(OPFVF3, vfnmsub_vf_d, OP_UUU_D, H8, H8, fnmsub64)
|
||||
GEN_VEXT_VF(vfnmsub_vf_h, 2, 2, clearh)
|
||||
GEN_VEXT_VF(vfnmsub_vf_w, 4, 4, clearl)
|
||||
GEN_VEXT_VF(vfnmsub_vf_d, 8, 8, clearq)
|
||||
|
|
Loading…
Reference in a new issue