diff --git a/qemu/header_gen.py b/qemu/header_gen.py index 2240f075..c29f8f00 100644 --- a/qemu/header_gen.py +++ b/qemu/header_gen.py @@ -6953,6 +6953,18 @@ riscv_symbols = ( 'helper_vssra_vx_h', 'helper_vssra_vx_w', 'helper_vssra_vx_d', + 'helper_vnclip_vv_b', + 'helper_vnclip_vv_h', + 'helper_vnclip_vv_w', + 'helper_vnclipu_vv_b', + 'helper_vnclipu_vv_h', + 'helper_vnclipu_vv_w', + 'helper_vnclipu_vx_b', + 'helper_vnclipu_vx_h', + 'helper_vnclipu_vx_w', + 'helper_vnclip_vx_b', + 'helper_vnclip_vx_h', + 'helper_vnclip_vx_w', 'pmp_hart_has_privs', 'pmpaddr_csr_read', 'pmpaddr_csr_write', diff --git a/qemu/riscv32.h b/qemu/riscv32.h index 6cf1f0cb..3a900aca 100644 --- a/qemu/riscv32.h +++ b/qemu/riscv32.h @@ -4389,6 +4389,18 @@ #define helper_vssra_vx_h helper_vssra_vx_h_riscv32 #define helper_vssra_vx_w helper_vssra_vx_w_riscv32 #define helper_vssra_vx_d helper_vssra_vx_d_riscv32 +#define helper_vnclip_vv_b helper_vnclip_vv_b_riscv32 +#define helper_vnclip_vv_h helper_vnclip_vv_h_riscv32 +#define helper_vnclip_vv_w helper_vnclip_vv_w_riscv32 +#define helper_vnclipu_vv_b helper_vnclipu_vv_b_riscv32 +#define helper_vnclipu_vv_h helper_vnclipu_vv_h_riscv32 +#define helper_vnclipu_vv_w helper_vnclipu_vv_w_riscv32 +#define helper_vnclipu_vx_b helper_vnclipu_vx_b_riscv32 +#define helper_vnclipu_vx_h helper_vnclipu_vx_h_riscv32 +#define helper_vnclipu_vx_w helper_vnclipu_vx_w_riscv32 +#define helper_vnclip_vx_b helper_vnclip_vx_b_riscv32 +#define helper_vnclip_vx_h helper_vnclip_vx_h_riscv32 +#define helper_vnclip_vx_w helper_vnclip_vx_w_riscv32 #define pmp_hart_has_privs pmp_hart_has_privs_riscv32 #define pmpaddr_csr_read pmpaddr_csr_read_riscv32 #define pmpaddr_csr_write pmpaddr_csr_write_riscv32 diff --git a/qemu/riscv64.h b/qemu/riscv64.h index 224bdea6..789bc82e 100644 --- a/qemu/riscv64.h +++ b/qemu/riscv64.h @@ -4389,6 +4389,18 @@ #define helper_vssra_vx_h helper_vssra_vx_h_riscv64 #define helper_vssra_vx_w helper_vssra_vx_w_riscv64 #define helper_vssra_vx_d helper_vssra_vx_d_riscv64 +#define helper_vnclip_vv_b helper_vnclip_vv_b_riscv64 +#define helper_vnclip_vv_h helper_vnclip_vv_h_riscv64 +#define helper_vnclip_vv_w helper_vnclip_vv_w_riscv64 +#define helper_vnclipu_vv_b helper_vnclipu_vv_b_riscv64 +#define helper_vnclipu_vv_h helper_vnclipu_vv_h_riscv64 +#define helper_vnclipu_vv_w helper_vnclipu_vv_w_riscv64 +#define helper_vnclipu_vx_b helper_vnclipu_vx_b_riscv64 +#define helper_vnclipu_vx_h helper_vnclipu_vx_h_riscv64 +#define helper_vnclipu_vx_w helper_vnclipu_vx_w_riscv64 +#define helper_vnclip_vx_b helper_vnclip_vx_b_riscv64 +#define helper_vnclip_vx_h helper_vnclip_vx_h_riscv64 +#define helper_vnclip_vx_w helper_vnclip_vx_w_riscv64 #define pmp_hart_has_privs pmp_hart_has_privs_riscv64 #define pmpaddr_csr_read pmpaddr_csr_read_riscv64 #define pmpaddr_csr_write pmpaddr_csr_write_riscv64 diff --git a/qemu/target/riscv/helper.h b/qemu/target/riscv/helper.h index a2f7c4e5..bc8c2c37 100644 --- a/qemu/target/riscv/helper.h +++ b/qemu/target/riscv/helper.h @@ -780,3 +780,16 @@ DEF_HELPER_6(vssra_vx_b, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vssra_vx_h, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vssra_vx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vssra_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vnclip_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclip_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclip_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclipu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclipu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclipu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclipu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclipu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclipu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclip_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclip_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclip_vx_w, void, ptr, ptr, tl, ptr, env, i32) diff --git a/qemu/target/riscv/insn32.decode b/qemu/target/riscv/insn32.decode index 6bff0328..80cf5413 100644 --- a/qemu/target/riscv/insn32.decode +++ b/qemu/target/riscv/insn32.decode @@ -433,6 +433,12 @@ vssrl_vi 101010 . ..... ..... 011 ..... 1010111 @r_vm vssra_vv 101011 . ..... ..... 000 ..... 1010111 @r_vm vssra_vx 101011 . ..... ..... 100 ..... 1010111 @r_vm vssra_vi 101011 . ..... ..... 011 ..... 1010111 @r_vm +vnclipu_vv 101110 . ..... ..... 000 ..... 1010111 @r_vm +vnclipu_vx 101110 . ..... ..... 100 ..... 1010111 @r_vm +vnclipu_vi 101110 . ..... ..... 011 ..... 1010111 @r_vm +vnclip_vv 101111 . ..... ..... 000 ..... 1010111 @r_vm +vnclip_vx 101111 . ..... ..... 100 ..... 1010111 @r_vm +vnclip_vi 101111 . ..... ..... 011 ..... 1010111 @r_vm vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm vsetvl 1000000 ..... ..... 111 ..... 1010111 @r diff --git a/qemu/target/riscv/insn_trans/trans_rvv.inc.c b/qemu/target/riscv/insn_trans/trans_rvv.inc.c index 0b1906cb..677bbba4 100644 --- a/qemu/target/riscv/insn_trans/trans_rvv.inc.c +++ b/qemu/target/riscv/insn_trans/trans_rvv.inc.c @@ -1712,3 +1712,11 @@ GEN_OPIVX_TRANS(vssrl_vx, opivx_check) GEN_OPIVX_TRANS(vssra_vx, opivx_check) GEN_OPIVI_TRANS(vssrl_vi, 1, vssrl_vx, opivx_check) GEN_OPIVI_TRANS(vssra_vi, 0, vssra_vx, opivx_check) + +/* Vector Narrowing Fixed-Point Clip Instructions */ +GEN_OPIVV_NARROW_TRANS(vnclipu_vv) +GEN_OPIVV_NARROW_TRANS(vnclip_vv) +GEN_OPIVX_NARROW_TRANS(vnclipu_vx) +GEN_OPIVX_NARROW_TRANS(vnclip_vx) +GEN_OPIVI_NARROW_TRANS(vnclipu_vi, 1, vnclipu_vx) +GEN_OPIVI_NARROW_TRANS(vnclip_vi, 1, vnclip_vx) diff --git a/qemu/target/riscv/vector_helper.c b/qemu/target/riscv/vector_helper.c index f762ce91..c1e1c1e8 100644 --- a/qemu/target/riscv/vector_helper.c +++ b/qemu/target/riscv/vector_helper.c @@ -876,6 +876,12 @@ GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w, clearl) #define WOP_SSU_B int16_t, int8_t, uint8_t, int16_t, uint16_t #define WOP_SSU_H int32_t, int16_t, uint16_t, int32_t, uint32_t #define WOP_SSU_W int64_t, int32_t, uint32_t, int64_t, uint64_t +#define NOP_SSS_B int8_t, int8_t, int16_t, int8_t, int16_t +#define NOP_SSS_H int16_t, int16_t, int32_t, int16_t, int32_t +#define NOP_SSS_W int32_t, int32_t, int64_t, int32_t, int64_t +#define NOP_UUU_B uint8_t, uint8_t, uint16_t, uint8_t, uint16_t +#define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t +#define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t /* operation of two vector elements */ typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i); @@ -2996,6 +3002,7 @@ vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) res = (a >> shift) + round; return res; } + RVVCALL(OPIVV2_RM, vssra_vv_b, OP_SSS_B, H1, H1, H1, vssra8) RVVCALL(OPIVV2_RM, vssra_vv_h, OP_SSS_H, H2, H2, H2, vssra16) RVVCALL(OPIVV2_RM, vssra_vv_w, OP_SSS_W, H4, H4, H4, vssra32) @@ -3013,3 +3020,137 @@ GEN_VEXT_VX_RM(vssra_vx_b, 1, 1, clearb) GEN_VEXT_VX_RM(vssra_vx_h, 2, 2, clearh) GEN_VEXT_VX_RM(vssra_vx_w, 4, 4, clearl) GEN_VEXT_VX_RM(vssra_vx_d, 8, 8, clearq) + +/* Vector Narrowing Fixed-Point Clip Instructions */ +static inline int8_t +vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b) +{ + uint8_t round, shift = b & 0xf; + int16_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + if (res > INT8_MAX) { + env->vxsat = 0x1; + return INT8_MAX; + } else if (res < INT8_MIN) { + env->vxsat = 0x1; + return INT8_MIN; + } else { + return res; + } +} + +static inline int16_t +vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b) +{ + uint8_t round, shift = b & 0x1f; + int32_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + if (res > INT16_MAX) { + env->vxsat = 0x1; + return INT16_MAX; + } else if (res < INT16_MIN) { + env->vxsat = 0x1; + return INT16_MIN; + } else { + return res; + } +} + +static inline int32_t +vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b) +{ + uint8_t round, shift = b & 0x3f; + int64_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + if (res > INT32_MAX) { + env->vxsat = 0x1; + return INT32_MAX; + } else if (res < INT32_MIN) { + env->vxsat = 0x1; + return INT32_MIN; + } else { + return res; + } +} + +RVVCALL(OPIVV2_RM, vnclip_vv_b, NOP_SSS_B, H1, H2, H1, vnclip8) +RVVCALL(OPIVV2_RM, vnclip_vv_h, NOP_SSS_H, H2, H4, H2, vnclip16) +RVVCALL(OPIVV2_RM, vnclip_vv_w, NOP_SSS_W, H4, H8, H4, vnclip32) +GEN_VEXT_VV_RM(vnclip_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vnclip_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vnclip_vv_w, 4, 4, clearl) + +RVVCALL(OPIVX2_RM, vnclip_vx_b, NOP_SSS_B, H1, H2, vnclip8) +RVVCALL(OPIVX2_RM, vnclip_vx_h, NOP_SSS_H, H2, H4, vnclip16) +RVVCALL(OPIVX2_RM, vnclip_vx_w, NOP_SSS_W, H4, H8, vnclip32) +GEN_VEXT_VX_RM(vnclip_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vnclip_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vnclip_vx_w, 4, 4, clearl) + +static inline uint8_t +vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b) +{ + uint8_t round, shift = b & 0xf; + uint16_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + if (res > UINT8_MAX) { + env->vxsat = 0x1; + return UINT8_MAX; + } else { + return res; + } +} + +static inline uint16_t +vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b) +{ + uint8_t round, shift = b & 0x1f; + uint32_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + if (res > UINT16_MAX) { + env->vxsat = 0x1; + return UINT16_MAX; + } else { + return res; + } +} + +static inline uint32_t +vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b) +{ + uint8_t round, shift = b & 0x3f; + int64_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + if (res > UINT32_MAX) { + env->vxsat = 0x1; + return UINT32_MAX; + } else { + return res; + } +} + +RVVCALL(OPIVV2_RM, vnclipu_vv_b, NOP_UUU_B, H1, H2, H1, vnclipu8) +RVVCALL(OPIVV2_RM, vnclipu_vv_h, NOP_UUU_H, H2, H4, H2, vnclipu16) +RVVCALL(OPIVV2_RM, vnclipu_vv_w, NOP_UUU_W, H4, H8, H4, vnclipu32) +GEN_VEXT_VV_RM(vnclipu_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vnclipu_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vnclipu_vv_w, 4, 4, clearl) + +RVVCALL(OPIVX2_RM, vnclipu_vx_b, NOP_UUU_B, H1, H2, vnclipu8) +RVVCALL(OPIVX2_RM, vnclipu_vx_h, NOP_UUU_H, H2, H4, vnclipu16) +RVVCALL(OPIVX2_RM, vnclipu_vx_w, NOP_UUU_W, H4, H8, vnclipu32) +GEN_VEXT_VX_RM(vnclipu_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vnclipu_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vnclipu_vx_w, 4, 4, clearl)