From 0de56731ae6b53bfdd2fa3cfed7765b85f0468a6 Mon Sep 17 00:00:00 2001 From: LIU Zhiwei Date: Sun, 7 Mar 2021 11:27:27 -0500 Subject: [PATCH] target/riscv: vector widening floating-point add/subtract instructions eeffab2ec1b332a5eb2d2dcd2732cdb57179c6eb --- qemu/header_gen.py | 16 ++ qemu/riscv32.h | 16 ++ qemu/riscv64.h | 16 ++ qemu/target/riscv/helper.h | 17 +++ qemu/target/riscv/insn32.decode | 8 + qemu/target/riscv/insn_trans/trans_rvv.inc.c | 151 +++++++++++++++++++ qemu/target/riscv/vector_helper.c | 83 ++++++++++ 7 files changed, 307 insertions(+) diff --git a/qemu/header_gen.py b/qemu/header_gen.py index 5bf5ebee..41799c30 100644 --- a/qemu/header_gen.py +++ b/qemu/header_gen.py @@ -6992,6 +6992,22 @@ riscv_symbols = ( 'helper_vnsra_vx_b', 'helper_vnsra_vx_h', 'helper_vnsra_vx_w', + 'helper_vfwadd_vv_h', + 'helper_vfwadd_vv_w', + 'helper_vfwsub_vv_h', + 'helper_vfwsub_vv_w', + 'helper_vfwadd_wv_h', + 'helper_vfwadd_wv_w', + 'helper_vfwsub_wv_h', + 'helper_vfwsub_wv_w', + 'helper_vfwadd_vf_h', + 'helper_vfwadd_vf_w', + 'helper_vfwsub_vf_h', + 'helper_vfwsub_vf_w', + 'helper_vfwadd_wf_h', + 'helper_vfwadd_wf_w', + 'helper_vfwsub_wf_h', + 'helper_vfwsub_wf_w', 'pmp_hart_has_privs', 'pmpaddr_csr_read', 'pmpaddr_csr_write', diff --git a/qemu/riscv32.h b/qemu/riscv32.h index ec06321c..fd7aa66b 100644 --- a/qemu/riscv32.h +++ b/qemu/riscv32.h @@ -4428,6 +4428,22 @@ #define helper_vnsra_vx_b helper_vnsra_vx_b_riscv32 #define helper_vnsra_vx_h helper_vnsra_vx_h_riscv32 #define helper_vnsra_vx_w helper_vnsra_vx_w_riscv32 +#define helper_vfwadd_vv_h helper_vfwadd_vv_h_riscv32 +#define helper_vfwadd_vv_w helper_vfwadd_vv_w_riscv32 +#define helper_vfwsub_vv_h helper_vfwsub_vv_h_riscv32 +#define helper_vfwsub_vv_w helper_vfwsub_vv_w_riscv32 +#define helper_vfwadd_wv_h helper_vfwadd_wv_h_riscv32 +#define helper_vfwadd_wv_w helper_vfwadd_wv_w_riscv32 +#define helper_vfwsub_wv_h helper_vfwsub_wv_h_riscv32 +#define helper_vfwsub_wv_w helper_vfwsub_wv_w_riscv32 +#define helper_vfwadd_vf_h helper_vfwadd_vf_h_riscv32 +#define helper_vfwadd_vf_w helper_vfwadd_vf_w_riscv32 +#define helper_vfwsub_vf_h helper_vfwsub_vf_h_riscv32 +#define helper_vfwsub_vf_w helper_vfwsub_vf_w_riscv32 +#define helper_vfwadd_wf_h helper_vfwadd_wf_h_riscv32 +#define helper_vfwadd_wf_w helper_vfwadd_wf_w_riscv32 +#define helper_vfwsub_wf_h helper_vfwsub_wf_h_riscv32 +#define helper_vfwsub_wf_w helper_vfwsub_wf_w_riscv32 #define pmp_hart_has_privs pmp_hart_has_privs_riscv32 #define pmpaddr_csr_read pmpaddr_csr_read_riscv32 #define pmpaddr_csr_write pmpaddr_csr_write_riscv32 diff --git a/qemu/riscv64.h b/qemu/riscv64.h index ac256844..59f75f60 100644 --- a/qemu/riscv64.h +++ b/qemu/riscv64.h @@ -4428,6 +4428,22 @@ #define helper_vnsra_vx_b helper_vnsra_vx_b_riscv64 #define helper_vnsra_vx_h helper_vnsra_vx_h_riscv64 #define helper_vnsra_vx_w helper_vnsra_vx_w_riscv64 +#define helper_vfwadd_vv_h helper_vfwadd_vv_h_riscv64 +#define helper_vfwadd_vv_w helper_vfwadd_vv_w_riscv64 +#define helper_vfwsub_vv_h helper_vfwsub_vv_h_riscv64 +#define helper_vfwsub_vv_w helper_vfwsub_vv_w_riscv64 +#define helper_vfwadd_wv_h helper_vfwadd_wv_h_riscv64 +#define helper_vfwadd_wv_w helper_vfwadd_wv_w_riscv64 +#define helper_vfwsub_wv_h helper_vfwsub_wv_h_riscv64 +#define helper_vfwsub_wv_w helper_vfwsub_wv_w_riscv64 +#define helper_vfwadd_vf_h helper_vfwadd_vf_h_riscv64 +#define helper_vfwadd_vf_w helper_vfwadd_vf_w_riscv64 +#define helper_vfwsub_vf_h helper_vfwsub_vf_h_riscv64 +#define helper_vfwsub_vf_w helper_vfwsub_vf_w_riscv64 +#define helper_vfwadd_wf_h helper_vfwadd_wf_h_riscv64 +#define helper_vfwadd_wf_w helper_vfwadd_wf_w_riscv64 +#define helper_vfwsub_wf_h helper_vfwsub_wf_h_riscv64 +#define helper_vfwsub_wf_w helper_vfwsub_wf_w_riscv64 #define pmp_hart_has_privs pmp_hart_has_privs_riscv64 #define pmpaddr_csr_read pmpaddr_csr_read_riscv64 #define pmpaddr_csr_write pmpaddr_csr_write_riscv64 diff --git a/qemu/target/riscv/helper.h b/qemu/target/riscv/helper.h index 1a915f8c..e154c524 100644 --- a/qemu/target/riscv/helper.h +++ b/qemu/target/riscv/helper.h @@ -822,3 +822,20 @@ DEF_HELPER_6(vfsub_vf_d, void, ptr, ptr, i64, ptr, env, i32) DEF_HELPER_6(vfrsub_vf_h, void, ptr, ptr, i64, ptr, env, i32) DEF_HELPER_6(vfrsub_vf_w, void, ptr, ptr, i64, ptr, env, i32) DEF_HELPER_6(vfrsub_vf_d, void, ptr, ptr, i64, ptr, env, i32) + +DEF_HELPER_6(vfwadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwadd_wv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwadd_wv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwsub_wv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwsub_wv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwadd_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwadd_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwsub_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwsub_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwadd_wf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwadd_wf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwsub_wf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwsub_wf_w, void, ptr, ptr, i64, ptr, env, i32) diff --git a/qemu/target/riscv/insn32.decode b/qemu/target/riscv/insn32.decode index 9e26ed36..42d8a967 100644 --- a/qemu/target/riscv/insn32.decode +++ b/qemu/target/riscv/insn32.decode @@ -450,6 +450,14 @@ vfadd_vf 000000 . ..... ..... 101 ..... 1010111 @r_vm vfsub_vv 000010 . ..... ..... 001 ..... 1010111 @r_vm vfsub_vf 000010 . ..... ..... 101 ..... 1010111 @r_vm vfrsub_vf 100111 . ..... ..... 101 ..... 1010111 @r_vm +vfwadd_vv 110000 . ..... ..... 001 ..... 1010111 @r_vm +vfwadd_vf 110000 . ..... ..... 101 ..... 1010111 @r_vm +vfwadd_wv 110100 . ..... ..... 001 ..... 1010111 @r_vm +vfwadd_wf 110100 . ..... ..... 101 ..... 1010111 @r_vm +vfwsub_vv 110010 . ..... ..... 001 ..... 1010111 @r_vm +vfwsub_vf 110010 . ..... ..... 101 ..... 1010111 @r_vm +vfwsub_wv 110110 . ..... ..... 001 ..... 1010111 @r_vm +vfwsub_wf 110110 . ..... ..... 101 ..... 1010111 @r_vm vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm vsetvl 1000000 ..... ..... 111 ..... 1010111 @r diff --git a/qemu/target/riscv/insn_trans/trans_rvv.inc.c b/qemu/target/riscv/insn_trans/trans_rvv.inc.c index ead8c136..b8906cc8 100644 --- a/qemu/target/riscv/insn_trans/trans_rvv.inc.c +++ b/qemu/target/riscv/insn_trans/trans_rvv.inc.c @@ -1931,3 +1931,154 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ GEN_OPFVF_TRANS(vfadd_vf, opfvf_check) GEN_OPFVF_TRANS(vfsub_vf, opfvf_check) GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check) + +/* Vector Widening Floating-Point Add/Subtract Instructions */ +static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, false) && + vext_check_reg(s, a->rs1, false) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2, + 1 << s->lmul) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1, + 1 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); +} + +/* OPFVV with WIDEN */ +#define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + if (CHECK(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_gvec_4_ptr * const fns[2] = { \ + gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ + }; \ + TCGLabel *over = gen_new_label(tcg_ctx); \ + gen_set_rm(s, 7); \ + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_vl_risc, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_4_ptr(tcg_ctx, vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs1), \ + vreg_ofs(s, a->rs2), tcg_ctx->cpu_env, 0, \ + s->vlen / 8, data, fns[s->sew - 1]); \ + gen_set_label(tcg_ctx, over); \ + return true; \ + } \ + return false; \ +} + +GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check) + +static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, false) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2, + 1 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); +} + +/* OPFVF with WIDEN */ +#define GEN_OPFVF_WIDEN_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (opfvf_widen_check(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_opfvf *const fns[2] = { \ + gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ + }; \ + gen_set_rm(s, 7); \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ + fns[s->sew - 1], s); \ + } \ + return false; \ +} + +GEN_OPFVF_WIDEN_TRANS(vfwadd_vf) +GEN_OPFVF_WIDEN_TRANS(vfwsub_vf) + +static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, true) && + vext_check_reg(s, a->rs1, false) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1, + 1 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); +} + +/* WIDEN OPFVV with WIDEN */ +#define GEN_OPFWV_WIDEN_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + if (opfwv_widen_check(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_gvec_4_ptr * const fns[2] = { \ + gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ + }; \ + TCGLabel *over = gen_new_label(tcg_ctx); \ + gen_set_rm(s, 7); \ + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_vl_risc, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_4_ptr(tcg_ctx, vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs1), \ + vreg_ofs(s, a->rs2), tcg_ctx->cpu_env, 0, \ + s->vlen / 8, data, fns[s->sew - 1]); \ + gen_set_label(tcg_ctx, over); \ + return true; \ + } \ + return false; \ +} + +GEN_OPFWV_WIDEN_TRANS(vfwadd_wv) +GEN_OPFWV_WIDEN_TRANS(vfwsub_wv) + +static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, true) && + (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); +} + +/* WIDEN OPFVF with WIDEN */ +#define GEN_OPFWF_WIDEN_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (opfwf_widen_check(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_opfvf *const fns[2] = { \ + gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ + }; \ + gen_set_rm(s, 7); \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ + fns[s->sew - 1], s); \ + } \ + return false; \ +} + +GEN_OPFWF_WIDEN_TRANS(vfwadd_wf) +GEN_OPFWF_WIDEN_TRANS(vfwsub_wf) diff --git a/qemu/target/riscv/vector_helper.c b/qemu/target/riscv/vector_helper.c index 28fbd023..148d58b2 100644 --- a/qemu/target/riscv/vector_helper.c +++ b/qemu/target/riscv/vector_helper.c @@ -3279,3 +3279,86 @@ RVVCALL(OPFVF2, vfrsub_vf_d, OP_UUU_D, H8, H8, float64_rsub) GEN_VEXT_VF(vfrsub_vf_h, 2, 2, clearh) GEN_VEXT_VF(vfrsub_vf_w, 4, 4, clearl) GEN_VEXT_VF(vfrsub_vf_d, 8, 8, clearq) + +/* Vector Widening Floating-Point Add/Subtract Instructions */ +static uint32_t vfwadd16(uint16_t a, uint16_t b, float_status *s) +{ + return float32_add(float16_to_float32(a, true, s), + float16_to_float32(b, true, s), s); +} + +static uint64_t vfwadd32(uint32_t a, uint32_t b, float_status *s) +{ + return float64_add(float32_to_float64(a, s), + float32_to_float64(b, s), s); + +} + +RVVCALL(OPFVV2, vfwadd_vv_h, WOP_UUU_H, H4, H2, H2, vfwadd16) +RVVCALL(OPFVV2, vfwadd_vv_w, WOP_UUU_W, H8, H4, H4, vfwadd32) +GEN_VEXT_VV_ENV(vfwadd_vv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwadd_vv_w, 4, 8, clearq) +RVVCALL(OPFVF2, vfwadd_vf_h, WOP_UUU_H, H4, H2, vfwadd16) +RVVCALL(OPFVF2, vfwadd_vf_w, WOP_UUU_W, H8, H4, vfwadd32) +GEN_VEXT_VF(vfwadd_vf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwadd_vf_w, 4, 8, clearq) + +static uint32_t vfwsub16(uint16_t a, uint16_t b, float_status *s) +{ + return float32_sub(float16_to_float32(a, true, s), + float16_to_float32(b, true, s), s); +} + +static uint64_t vfwsub32(uint32_t a, uint32_t b, float_status *s) +{ + return float64_sub(float32_to_float64(a, s), + float32_to_float64(b, s), s); + +} + +RVVCALL(OPFVV2, vfwsub_vv_h, WOP_UUU_H, H4, H2, H2, vfwsub16) +RVVCALL(OPFVV2, vfwsub_vv_w, WOP_UUU_W, H8, H4, H4, vfwsub32) +GEN_VEXT_VV_ENV(vfwsub_vv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwsub_vv_w, 4, 8, clearq) +RVVCALL(OPFVF2, vfwsub_vf_h, WOP_UUU_H, H4, H2, vfwsub16) +RVVCALL(OPFVF2, vfwsub_vf_w, WOP_UUU_W, H8, H4, vfwsub32) +GEN_VEXT_VF(vfwsub_vf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwsub_vf_w, 4, 8, clearq) + +static uint32_t vfwaddw16(uint32_t a, uint16_t b, float_status *s) +{ + return float32_add(a, float16_to_float32(b, true, s), s); +} + +static uint64_t vfwaddw32(uint64_t a, uint32_t b, float_status *s) +{ + return float64_add(a, float32_to_float64(b, s), s); +} + +RVVCALL(OPFVV2, vfwadd_wv_h, WOP_WUUU_H, H4, H2, H2, vfwaddw16) +RVVCALL(OPFVV2, vfwadd_wv_w, WOP_WUUU_W, H8, H4, H4, vfwaddw32) +GEN_VEXT_VV_ENV(vfwadd_wv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwadd_wv_w, 4, 8, clearq) +RVVCALL(OPFVF2, vfwadd_wf_h, WOP_WUUU_H, H4, H2, vfwaddw16) +RVVCALL(OPFVF2, vfwadd_wf_w, WOP_WUUU_W, H8, H4, vfwaddw32) +GEN_VEXT_VF(vfwadd_wf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwadd_wf_w, 4, 8, clearq) + +static uint32_t vfwsubw16(uint32_t a, uint16_t b, float_status *s) +{ + return float32_sub(a, float16_to_float32(b, true, s), s); +} + +static uint64_t vfwsubw32(uint64_t a, uint32_t b, float_status *s) +{ + return float64_sub(a, float32_to_float64(b, s), s); +} + +RVVCALL(OPFVV2, vfwsub_wv_h, WOP_WUUU_H, H4, H2, H2, vfwsubw16) +RVVCALL(OPFVV2, vfwsub_wv_w, WOP_WUUU_W, H8, H4, H4, vfwsubw32) +GEN_VEXT_VV_ENV(vfwsub_wv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwsub_wv_w, 4, 8, clearq) +RVVCALL(OPFVF2, vfwsub_wf_h, WOP_WUUU_H, H4, H2, vfwsubw16) +RVVCALL(OPFVF2, vfwsub_wf_w, WOP_WUUU_W, H8, H4, vfwsubw32) +GEN_VEXT_VF(vfwsub_wf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwsub_wf_w, 4, 8, clearq)