mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-03 15:45:35 +00:00
target/riscv: vector single-width fractional multiply with rounding and saturation
Backports 9f0ff9e51480f8f1d2d7a62b11aa156fcdb4ef95
This commit is contained in:
parent
2343892c2e
commit
e27aadfa4f
|
@ -6908,6 +6908,14 @@ riscv_symbols = (
|
|||
'helper_vasub_vx_h',
|
||||
'helper_vasub_vx_w',
|
||||
'helper_vasub_vx_d',
|
||||
'helper_vsmul_vv_b',
|
||||
'helper_vsmul_vv_h',
|
||||
'helper_vsmul_vv_w',
|
||||
'helper_vsmul_vv_d',
|
||||
'helper_vsmul_vx_b',
|
||||
'helper_vsmul_vx_h',
|
||||
'helper_vsmul_vx_w',
|
||||
'helper_vsmul_vx_d',
|
||||
'pmp_hart_has_privs',
|
||||
'pmpaddr_csr_read',
|
||||
'pmpaddr_csr_write',
|
||||
|
|
|
@ -4344,6 +4344,14 @@
|
|||
#define helper_vasub_vx_h helper_vasub_vx_h_riscv32
|
||||
#define helper_vasub_vx_w helper_vasub_vx_w_riscv32
|
||||
#define helper_vasub_vx_d helper_vasub_vx_d_riscv32
|
||||
#define helper_vsmul_vv_b helper_vsmul_vv_b_riscv32
|
||||
#define helper_vsmul_vv_h helper_vsmul_vv_h_riscv32
|
||||
#define helper_vsmul_vv_w helper_vsmul_vv_w_riscv32
|
||||
#define helper_vsmul_vv_d helper_vsmul_vv_d_riscv32
|
||||
#define helper_vsmul_vx_b helper_vsmul_vx_b_riscv32
|
||||
#define helper_vsmul_vx_h helper_vsmul_vx_h_riscv32
|
||||
#define helper_vsmul_vx_w helper_vsmul_vx_w_riscv32
|
||||
#define helper_vsmul_vx_d helper_vsmul_vx_d_riscv32
|
||||
#define pmp_hart_has_privs pmp_hart_has_privs_riscv32
|
||||
#define pmpaddr_csr_read pmpaddr_csr_read_riscv32
|
||||
#define pmpaddr_csr_write pmpaddr_csr_write_riscv32
|
||||
|
|
|
@ -4344,6 +4344,14 @@
|
|||
#define helper_vasub_vx_h helper_vasub_vx_h_riscv64
|
||||
#define helper_vasub_vx_w helper_vasub_vx_w_riscv64
|
||||
#define helper_vasub_vx_d helper_vasub_vx_d_riscv64
|
||||
#define helper_vsmul_vv_b helper_vsmul_vv_b_riscv64
|
||||
#define helper_vsmul_vv_h helper_vsmul_vv_h_riscv64
|
||||
#define helper_vsmul_vv_w helper_vsmul_vv_w_riscv64
|
||||
#define helper_vsmul_vv_d helper_vsmul_vv_d_riscv64
|
||||
#define helper_vsmul_vx_b helper_vsmul_vx_b_riscv64
|
||||
#define helper_vsmul_vx_h helper_vsmul_vx_h_riscv64
|
||||
#define helper_vsmul_vx_w helper_vsmul_vx_w_riscv64
|
||||
#define helper_vsmul_vx_d helper_vsmul_vx_d_riscv64
|
||||
#define pmp_hart_has_privs pmp_hart_has_privs_riscv64
|
||||
#define pmpaddr_csr_read pmpaddr_csr_read_riscv64
|
||||
#define pmpaddr_csr_write pmpaddr_csr_write_riscv64
|
||||
|
|
|
@ -732,3 +732,12 @@ DEF_HELPER_6(vasub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
|
|||
DEF_HELPER_6(vasub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vasub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vasub_vx_d, void, ptr, ptr, tl, ptr, env, i32)
|
||||
|
||||
DEF_HELPER_6(vsmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vsmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vsmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vsmul_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
|
||||
DEF_HELPER_6(vsmul_vx_b, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vsmul_vx_h, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vsmul_vx_w, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vsmul_vx_d, void, ptr, ptr, tl, ptr, env, i32)
|
||||
|
|
|
@ -418,6 +418,8 @@ vaadd_vx 100100 . ..... ..... 100 ..... 1010111 @r_vm
|
|||
vaadd_vi 100100 . ..... ..... 011 ..... 1010111 @r_vm
|
||||
vasub_vv 100110 . ..... ..... 000 ..... 1010111 @r_vm
|
||||
vasub_vx 100110 . ..... ..... 100 ..... 1010111 @r_vm
|
||||
vsmul_vv 100111 . ..... ..... 000 ..... 1010111 @r_vm
|
||||
vsmul_vx 100111 . ..... ..... 100 ..... 1010111 @r_vm
|
||||
|
||||
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
|
||||
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
|
||||
|
|
|
@ -1691,3 +1691,7 @@ GEN_OPIVV_TRANS(vasub_vv, opivv_check)
|
|||
GEN_OPIVX_TRANS(vaadd_vx, opivx_check)
|
||||
GEN_OPIVX_TRANS(vasub_vx, opivx_check)
|
||||
GEN_OPIVI_TRANS(vaadd_vi, 0, vaadd_vx, opivx_check)
|
||||
|
||||
/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
|
||||
GEN_OPIVV_TRANS(vsmul_vv, opivv_check)
|
||||
GEN_OPIVX_TRANS(vsmul_vx, opivx_check)
|
||||
|
|
|
@ -2584,3 +2584,110 @@ GEN_VEXT_VX_RM(vasub_vx_b, 1, 1, clearb)
|
|||
GEN_VEXT_VX_RM(vasub_vx_h, 2, 2, clearh)
|
||||
GEN_VEXT_VX_RM(vasub_vx_w, 4, 4, clearl)
|
||||
GEN_VEXT_VX_RM(vasub_vx_d, 8, 8, clearq)
|
||||
|
||||
/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
|
||||
static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
|
||||
{
|
||||
uint8_t round;
|
||||
int16_t res;
|
||||
|
||||
res = (int16_t)a * (int16_t)b;
|
||||
round = get_round(vxrm, res, 7);
|
||||
res = (res >> 7) + round;
|
||||
|
||||
if (res > INT8_MAX) {
|
||||
env->vxsat = 0x1;
|
||||
return INT8_MAX;
|
||||
} else if (res < INT8_MIN) {
|
||||
env->vxsat = 0x1;
|
||||
return INT8_MIN;
|
||||
} else {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
static int16_t vsmul16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
|
||||
{
|
||||
uint8_t round;
|
||||
int32_t res;
|
||||
|
||||
res = (int32_t)a * (int32_t)b;
|
||||
round = get_round(vxrm, res, 15);
|
||||
res = (res >> 15) + round;
|
||||
|
||||
if (res > INT16_MAX) {
|
||||
env->vxsat = 0x1;
|
||||
return INT16_MAX;
|
||||
} else if (res < INT16_MIN) {
|
||||
env->vxsat = 0x1;
|
||||
return INT16_MIN;
|
||||
} else {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t vsmul32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
|
||||
{
|
||||
uint8_t round;
|
||||
int64_t res;
|
||||
|
||||
res = (int64_t)a * (int64_t)b;
|
||||
round = get_round(vxrm, res, 31);
|
||||
res = (res >> 31) + round;
|
||||
|
||||
if (res > INT32_MAX) {
|
||||
env->vxsat = 0x1;
|
||||
return INT32_MAX;
|
||||
} else if (res < INT32_MIN) {
|
||||
env->vxsat = 0x1;
|
||||
return INT32_MIN;
|
||||
} else {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t vsmul64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
|
||||
{
|
||||
uint8_t round;
|
||||
uint64_t hi_64, lo_64;
|
||||
int64_t res;
|
||||
|
||||
if (a == INT64_MIN && b == INT64_MIN) {
|
||||
env->vxsat = 1;
|
||||
return INT64_MAX;
|
||||
}
|
||||
|
||||
muls64(&lo_64, &hi_64, a, b);
|
||||
round = get_round(vxrm, lo_64, 63);
|
||||
/*
|
||||
* Cannot overflow, as there are always
|
||||
* 2 sign bits after multiply.
|
||||
*/
|
||||
res = (hi_64 << 1) | (lo_64 >> 63);
|
||||
if (round) {
|
||||
if (res == INT64_MAX) {
|
||||
env->vxsat = 1;
|
||||
} else {
|
||||
res += 1;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
RVVCALL(OPIVV2_RM, vsmul_vv_b, OP_SSS_B, H1, H1, H1, vsmul8)
|
||||
RVVCALL(OPIVV2_RM, vsmul_vv_h, OP_SSS_H, H2, H2, H2, vsmul16)
|
||||
RVVCALL(OPIVV2_RM, vsmul_vv_w, OP_SSS_W, H4, H4, H4, vsmul32)
|
||||
RVVCALL(OPIVV2_RM, vsmul_vv_d, OP_SSS_D, H8, H8, H8, vsmul64)
|
||||
GEN_VEXT_VV_RM(vsmul_vv_b, 1, 1, clearb)
|
||||
GEN_VEXT_VV_RM(vsmul_vv_h, 2, 2, clearh)
|
||||
GEN_VEXT_VV_RM(vsmul_vv_w, 4, 4, clearl)
|
||||
GEN_VEXT_VV_RM(vsmul_vv_d, 8, 8, clearq)
|
||||
|
||||
RVVCALL(OPIVX2_RM, vsmul_vx_b, OP_SSS_B, H1, H1, vsmul8)
|
||||
RVVCALL(OPIVX2_RM, vsmul_vx_h, OP_SSS_H, H2, H2, vsmul16)
|
||||
RVVCALL(OPIVX2_RM, vsmul_vx_w, OP_SSS_W, H4, H4, vsmul32)
|
||||
RVVCALL(OPIVX2_RM, vsmul_vx_d, OP_SSS_D, H8, H8, vsmul64)
|
||||
GEN_VEXT_VX_RM(vsmul_vx_b, 1, 1, clearb)
|
||||
GEN_VEXT_VX_RM(vsmul_vx_h, 2, 2, clearh)
|
||||
GEN_VEXT_VX_RM(vsmul_vx_w, 4, 4, clearl)
|
||||
GEN_VEXT_VX_RM(vsmul_vx_d, 8, 8, clearq)
|
||||
|
|
Loading…
Reference in a new issue