mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-18 14:47:17 +00:00
target/riscv: vector slide instructions
Backports ec17e03688ce4d0ae188db6d90b185b92a9a2087
This commit is contained in:
parent
0e0ac052cd
commit
162ae6efd7
|
@ -7271,6 +7271,22 @@ riscv_symbols = (
|
|||
'helper_vid_v_h',
|
||||
'helper_vid_v_w',
|
||||
'helper_vid_v_d',
|
||||
'helper_vslideup_vx_b',
|
||||
'helper_vslideup_vx_h',
|
||||
'helper_vslideup_vx_w',
|
||||
'helper_vslideup_vx_d',
|
||||
'helper_vslidedown_vx_b',
|
||||
'helper_vslidedown_vx_h',
|
||||
'helper_vslidedown_vx_w',
|
||||
'helper_vslidedown_vx_d',
|
||||
'helper_vslide1up_vx_b',
|
||||
'helper_vslide1up_vx_h',
|
||||
'helper_vslide1up_vx_w',
|
||||
'helper_vslide1up_vx_d',
|
||||
'helper_vslide1down_vx_b',
|
||||
'helper_vslide1down_vx_h',
|
||||
'helper_vslide1down_vx_w',
|
||||
'helper_vslide1down_vx_d',
|
||||
'pmp_hart_has_privs',
|
||||
'pmpaddr_csr_read',
|
||||
'pmpaddr_csr_write',
|
||||
|
|
|
@ -4707,6 +4707,22 @@
|
|||
#define helper_vid_v_h helper_vid_v_h_riscv32
|
||||
#define helper_vid_v_w helper_vid_v_w_riscv32
|
||||
#define helper_vid_v_d helper_vid_v_d_riscv32
|
||||
#define helper_vslideup_vx_b helper_vslideup_vx_b_riscv32
|
||||
#define helper_vslideup_vx_h helper_vslideup_vx_h_riscv32
|
||||
#define helper_vslideup_vx_w helper_vslideup_vx_w_riscv32
|
||||
#define helper_vslideup_vx_d helper_vslideup_vx_d_riscv32
|
||||
#define helper_vslidedown_vx_b helper_vslidedown_vx_b_riscv32
|
||||
#define helper_vslidedown_vx_h helper_vslidedown_vx_h_riscv32
|
||||
#define helper_vslidedown_vx_w helper_vslidedown_vx_w_riscv32
|
||||
#define helper_vslidedown_vx_d helper_vslidedown_vx_d_riscv32
|
||||
#define helper_vslide1up_vx_b helper_vslide1up_vx_b_riscv32
|
||||
#define helper_vslide1up_vx_h helper_vslide1up_vx_h_riscv32
|
||||
#define helper_vslide1up_vx_w helper_vslide1up_vx_w_riscv32
|
||||
#define helper_vslide1up_vx_d helper_vslide1up_vx_d_riscv32
|
||||
#define helper_vslide1down_vx_b helper_vslide1down_vx_b_riscv32
|
||||
#define helper_vslide1down_vx_h helper_vslide1down_vx_h_riscv32
|
||||
#define helper_vslide1down_vx_w helper_vslide1down_vx_w_riscv32
|
||||
#define helper_vslide1down_vx_d helper_vslide1down_vx_d_riscv32
|
||||
#define pmp_hart_has_privs pmp_hart_has_privs_riscv32
|
||||
#define pmpaddr_csr_read pmpaddr_csr_read_riscv32
|
||||
#define pmpaddr_csr_write pmpaddr_csr_write_riscv32
|
||||
|
|
|
@ -4707,6 +4707,22 @@
|
|||
#define helper_vid_v_h helper_vid_v_h_riscv64
|
||||
#define helper_vid_v_w helper_vid_v_w_riscv64
|
||||
#define helper_vid_v_d helper_vid_v_d_riscv64
|
||||
#define helper_vslideup_vx_b helper_vslideup_vx_b_riscv64
|
||||
#define helper_vslideup_vx_h helper_vslideup_vx_h_riscv64
|
||||
#define helper_vslideup_vx_w helper_vslideup_vx_w_riscv64
|
||||
#define helper_vslideup_vx_d helper_vslideup_vx_d_riscv64
|
||||
#define helper_vslidedown_vx_b helper_vslidedown_vx_b_riscv64
|
||||
#define helper_vslidedown_vx_h helper_vslidedown_vx_h_riscv64
|
||||
#define helper_vslidedown_vx_w helper_vslidedown_vx_w_riscv64
|
||||
#define helper_vslidedown_vx_d helper_vslidedown_vx_d_riscv64
|
||||
#define helper_vslide1up_vx_b helper_vslide1up_vx_b_riscv64
|
||||
#define helper_vslide1up_vx_h helper_vslide1up_vx_h_riscv64
|
||||
#define helper_vslide1up_vx_w helper_vslide1up_vx_w_riscv64
|
||||
#define helper_vslide1up_vx_d helper_vslide1up_vx_d_riscv64
|
||||
#define helper_vslide1down_vx_b helper_vslide1down_vx_b_riscv64
|
||||
#define helper_vslide1down_vx_h helper_vslide1down_vx_h_riscv64
|
||||
#define helper_vslide1down_vx_w helper_vslide1down_vx_w_riscv64
|
||||
#define helper_vslide1down_vx_d helper_vslide1down_vx_d_riscv64
|
||||
#define pmp_hart_has_privs pmp_hart_has_privs_riscv64
|
||||
#define pmpaddr_csr_read pmpaddr_csr_read_riscv64
|
||||
#define pmpaddr_csr_write pmpaddr_csr_write_riscv64
|
||||
|
|
|
@ -1122,3 +1122,20 @@ DEF_HELPER_4(vid_v_b, void, ptr, ptr, env, i32)
|
|||
DEF_HELPER_4(vid_v_h, void, ptr, ptr, env, i32)
|
||||
DEF_HELPER_4(vid_v_w, void, ptr, ptr, env, i32)
|
||||
DEF_HELPER_4(vid_v_d, void, ptr, ptr, env, i32)
|
||||
|
||||
DEF_HELPER_6(vslideup_vx_b, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslideup_vx_h, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslideup_vx_w, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslideup_vx_d, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslidedown_vx_b, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslidedown_vx_h, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslidedown_vx_w, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslidedown_vx_d, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslide1up_vx_b, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslide1up_vx_h, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslide1up_vx_w, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslide1up_vx_d, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslide1down_vx_b, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32)
|
||||
DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32)
|
||||
|
|
|
@ -568,6 +568,12 @@ vext_x_v 001100 1 ..... ..... 010 ..... 1010111 @r
|
|||
vmv_s_x 001101 1 00000 ..... 110 ..... 1010111 @r2
|
||||
vfmv_f_s 001100 1 ..... 00000 001 ..... 1010111 @r2rd
|
||||
vfmv_s_f 001101 1 00000 ..... 101 ..... 1010111 @r2
|
||||
vslideup_vx 001110 . ..... ..... 100 ..... 1010111 @r_vm
|
||||
vslideup_vi 001110 . ..... ..... 011 ..... 1010111 @r_vm
|
||||
vslide1up_vx 001110 . ..... ..... 110 ..... 1010111 @r_vm
|
||||
vslidedown_vx 001111 . ..... ..... 100 ..... 1010111 @r_vm
|
||||
vslidedown_vi 001111 . ..... ..... 011 ..... 1010111 @r_vm
|
||||
vslide1down_vx 001111 . ..... ..... 110 ..... 1010111 @r_vm
|
||||
|
||||
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
|
||||
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
|
||||
|
|
|
@ -2815,3 +2815,21 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Vector Slide Instructions */
|
||||
static bool slideup_check(DisasContext *s, arg_rmrr *a)
|
||||
{
|
||||
return (vext_check_isa_ill(s) &&
|
||||
vext_check_overlap_mask(s, a->rd, a->vm, true) &&
|
||||
vext_check_reg(s, a->rd, false) &&
|
||||
vext_check_reg(s, a->rs2, false) &&
|
||||
(a->rd != a->rs2));
|
||||
}
|
||||
|
||||
GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
|
||||
GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
|
||||
GEN_OPIVI_TRANS(vslideup_vi, 1, vslideup_vx, slideup_check)
|
||||
|
||||
GEN_OPIVX_TRANS(vslidedown_vx, opivx_check)
|
||||
GEN_OPIVX_TRANS(vslide1down_vx, opivx_check)
|
||||
GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, opivx_check)
|
||||
|
|
|
@ -4673,3 +4673,117 @@ GEN_VEXT_VID_V(vid_v_b, uint8_t, H1, clearb)
|
|||
GEN_VEXT_VID_V(vid_v_h, uint16_t, H2, clearh)
|
||||
GEN_VEXT_VID_V(vid_v_w, uint32_t, H4, clearl)
|
||||
GEN_VEXT_VID_V(vid_v_d, uint64_t, H8, clearq)
|
||||
|
||||
/*
|
||||
*** Vector Permutation Instructions
|
||||
*/
|
||||
|
||||
/* Vector Slide Instructions */
|
||||
#define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H, CLEAR_FN) \
|
||||
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
||||
CPURISCVState *env, uint32_t desc) \
|
||||
{ \
|
||||
uint32_t mlen = vext_mlen(desc); \
|
||||
uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
|
||||
uint32_t vm = vext_vm(desc); \
|
||||
uint32_t vl = env->vl; \
|
||||
target_ulong offset = s1, i; \
|
||||
\
|
||||
for (i = offset; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, mlen, i)) { \
|
||||
continue; \
|
||||
} \
|
||||
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
|
||||
} \
|
||||
CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
|
||||
}
|
||||
|
||||
/* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
|
||||
GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b, uint8_t, H1, clearb)
|
||||
GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h, uint16_t, H2, clearh)
|
||||
GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w, uint32_t, H4, clearl)
|
||||
GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8, clearq)
|
||||
|
||||
#define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H, CLEAR_FN) \
|
||||
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
||||
CPURISCVState *env, uint32_t desc) \
|
||||
{ \
|
||||
uint32_t mlen = vext_mlen(desc); \
|
||||
uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
|
||||
uint32_t vm = vext_vm(desc); \
|
||||
uint32_t vl = env->vl; \
|
||||
target_ulong offset = s1, i; \
|
||||
\
|
||||
for (i = 0; i < vl; ++i) { \
|
||||
target_ulong j = i + offset; \
|
||||
if (!vm && !vext_elem_mask(v0, mlen, i)) { \
|
||||
continue; \
|
||||
} \
|
||||
*((ETYPE *)vd + H(i)) = j >= vlmax ? 0 : *((ETYPE *)vs2 + H(j)); \
|
||||
} \
|
||||
CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
|
||||
}
|
||||
|
||||
/* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
|
||||
GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b, uint8_t, H1, clearb)
|
||||
GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2, clearh)
|
||||
GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4, clearl)
|
||||
GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8, clearq)
|
||||
|
||||
#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H, CLEAR_FN) \
|
||||
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
||||
CPURISCVState *env, uint32_t desc) \
|
||||
{ \
|
||||
uint32_t mlen = vext_mlen(desc); \
|
||||
uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
|
||||
uint32_t vm = vext_vm(desc); \
|
||||
uint32_t vl = env->vl; \
|
||||
uint32_t i; \
|
||||
\
|
||||
for (i = 0; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, mlen, i)) { \
|
||||
continue; \
|
||||
} \
|
||||
if (i == 0) { \
|
||||
*((ETYPE *)vd + H(i)) = s1; \
|
||||
} else { \
|
||||
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \
|
||||
} \
|
||||
} \
|
||||
CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
|
||||
}
|
||||
|
||||
/* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */
|
||||
GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1, clearb)
|
||||
GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2, clearh)
|
||||
GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4, clearl)
|
||||
GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8, clearq)
|
||||
|
||||
#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H, CLEAR_FN) \
|
||||
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
||||
CPURISCVState *env, uint32_t desc) \
|
||||
{ \
|
||||
uint32_t mlen = vext_mlen(desc); \
|
||||
uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
|
||||
uint32_t vm = vext_vm(desc); \
|
||||
uint32_t vl = env->vl; \
|
||||
uint32_t i; \
|
||||
\
|
||||
for (i = 0; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, mlen, i)) { \
|
||||
continue; \
|
||||
} \
|
||||
if (i == vl - 1) { \
|
||||
*((ETYPE *)vd + H(i)) = s1; \
|
||||
} else { \
|
||||
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \
|
||||
} \
|
||||
} \
|
||||
CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
|
||||
}
|
||||
|
||||
/* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */
|
||||
GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1, clearb)
|
||||
GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2, clearh)
|
||||
GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4, clearl)
|
||||
GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8, clearq)
|
||||
|
|
Loading…
Reference in a new issue