diff --git a/qemu/header_gen.py b/qemu/header_gen.py index 505eefc6..17072a55 100644 --- a/qemu/header_gen.py +++ b/qemu/header_gen.py @@ -7250,6 +7250,14 @@ riscv_symbols = ( 'helper_vfredmin_vs_d', 'helper_vfwredsum_vs_h', 'helper_vfwredsum_vs_w', + 'helper_vmand_mm', + 'helper_vmnand_mm', + 'helper_vmandnot_mm', + 'helper_vmxor_mm', + 'helper_vmor_mm', + 'helper_vmnor_mm', + 'helper_vmornot_mm', + 'helper_vmxnor_mm', 'pmp_hart_has_privs', 'pmpaddr_csr_read', 'pmpaddr_csr_write', diff --git a/qemu/riscv32.h b/qemu/riscv32.h index ec4a3c5d..807ed079 100644 --- a/qemu/riscv32.h +++ b/qemu/riscv32.h @@ -4686,6 +4686,14 @@ #define helper_vfredmin_vs_d helper_vfredmin_vs_d_riscv32 #define helper_vfwredsum_vs_h helper_vfwredsum_vs_h_riscv32 #define helper_vfwredsum_vs_w helper_vfwredsum_vs_w_riscv32 +#define helper_vmand_mm helper_vmand_mm_riscv32 +#define helper_vmnand_mm helper_vmnand_mm_riscv32 +#define helper_vmandnot_mm helper_vmandnot_mm_riscv32 +#define helper_vmxor_mm helper_vmxor_mm_riscv32 +#define helper_vmor_mm helper_vmor_mm_riscv32 +#define helper_vmnor_mm helper_vmnor_mm_riscv32 +#define helper_vmornot_mm helper_vmornot_mm_riscv32 +#define helper_vmxnor_mm helper_vmxnor_mm_riscv32 #define pmp_hart_has_privs pmp_hart_has_privs_riscv32 #define pmpaddr_csr_read pmpaddr_csr_read_riscv32 #define pmpaddr_csr_write pmpaddr_csr_write_riscv32 diff --git a/qemu/riscv64.h b/qemu/riscv64.h index ed44ed51..e5a4bbad 100644 --- a/qemu/riscv64.h +++ b/qemu/riscv64.h @@ -4686,6 +4686,14 @@ #define helper_vfredmin_vs_d helper_vfredmin_vs_d_riscv64 #define helper_vfwredsum_vs_h helper_vfwredsum_vs_h_riscv64 #define helper_vfwredsum_vs_w helper_vfwredsum_vs_w_riscv64 +#define helper_vmand_mm helper_vmand_mm_riscv64 +#define helper_vmnand_mm helper_vmnand_mm_riscv64 +#define helper_vmandnot_mm helper_vmandnot_mm_riscv64 +#define helper_vmxor_mm helper_vmxor_mm_riscv64 +#define helper_vmor_mm helper_vmor_mm_riscv64 +#define helper_vmnor_mm helper_vmnor_mm_riscv64 +#define helper_vmornot_mm helper_vmornot_mm_riscv64 +#define helper_vmxnor_mm helper_vmxnor_mm_riscv64 #define pmp_hart_has_privs pmp_hart_has_privs_riscv64 #define pmpaddr_csr_read pmpaddr_csr_read_riscv64 #define pmpaddr_csr_write pmpaddr_csr_write_riscv64 diff --git a/qemu/target/riscv/helper.h b/qemu/target/riscv/helper.h index eab71ff0..dc2f1e9f 100644 --- a/qemu/target/riscv/helper.h +++ b/qemu/target/riscv/helper.h @@ -1095,3 +1095,12 @@ DEF_HELPER_6(vfredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vfwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vfwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32) + +DEF_HELPER_6(vmand_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmnand_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmandnot_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmxor_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmor_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmnor_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmornot_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmxnor_mm, void, ptr, ptr, ptr, ptr, env, i32) diff --git a/qemu/target/riscv/insn32.decode b/qemu/target/riscv/insn32.decode index 2668d483..c71cbef1 100644 --- a/qemu/target/riscv/insn32.decode +++ b/qemu/target/riscv/insn32.decode @@ -547,6 +547,14 @@ vfredmin_vs 000101 . ..... ..... 001 ..... 1010111 @r_vm vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm # Vector widening ordered and unordered float reduction sum vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm +vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r +vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r +vmandnot_mm 011000 - ..... ..... 010 ..... 1010111 @r +vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r +vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r +vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r +vmornot_mm 011100 - ..... ..... 010 ..... 1010111 @r +vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm vsetvl 1000000 ..... ..... 111 ..... 1010111 @r diff --git a/qemu/target/riscv/insn_trans/trans_rvv.inc.c b/qemu/target/riscv/insn_trans/trans_rvv.inc.c index b66d96eb..1e81a722 100644 --- a/qemu/target/riscv/insn_trans/trans_rvv.inc.c +++ b/qemu/target/riscv/insn_trans/trans_rvv.inc.c @@ -2391,3 +2391,39 @@ GEN_OPFVV_TRANS(vfredmin_vs, reduction_check) /* Vector Widening Floating-Point Reduction Instructions */ GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check) + +/* + *** Vector Mask Operations + */ + +/* Vector Mask-Register Logical Instructions */ +#define GEN_MM_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_r *a) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + if (vext_check_isa_ill(s)) { \ + uint32_t data = 0; \ + gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \ + TCGLabel *over = gen_new_label(tcg_ctx); \ + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_vl_risc, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_4_ptr(tcg_ctx, vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs1), \ + vreg_ofs(s, a->rs2), tcg_ctx->cpu_env, 0, \ + s->vlen / 8, data, fn); \ + gen_set_label(tcg_ctx, over); \ + return true; \ + } \ + return false; \ +} + +GEN_MM_TRANS(vmand_mm) +GEN_MM_TRANS(vmnand_mm) +GEN_MM_TRANS(vmandnot_mm) +GEN_MM_TRANS(vmxor_mm) +GEN_MM_TRANS(vmor_mm) +GEN_MM_TRANS(vmnor_mm) +GEN_MM_TRANS(vmornot_mm) +GEN_MM_TRANS(vmxnor_mm) diff --git a/qemu/target/riscv/vector_helper.c b/qemu/target/riscv/vector_helper.c index 35cf1dae..f63b7276 100644 --- a/qemu/target/riscv/vector_helper.c +++ b/qemu/target/riscv/vector_helper.c @@ -4478,3 +4478,43 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1, *((uint64_t *)vd) = s1; clearq(vd, 1, sizeof(uint64_t), tot); } + +/* + *** Vector Mask Operations + */ +/* Vector Mask-Register Logical Instructions */ +#define GEN_VEXT_MASK_VV(NAME, OP) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, \ + void *vs2, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vl = env->vl; \ + uint32_t i; \ + int a, b; \ + \ + for (i = 0; i < vl; i++) { \ + a = vext_elem_mask(vs1, mlen, i); \ + b = vext_elem_mask(vs2, mlen, i); \ + vext_set_elem_mask(vd, mlen, i, OP(b, a)); \ + } \ + for (; i < vlmax; i++) { \ + vext_set_elem_mask(vd, mlen, i, 0); \ + } \ +} + +#define DO_NAND(N, M) (!(N & M)) +#define DO_ANDNOT(N, M) (N & !M) +#define DO_NOR(N, M) (!(N | M)) +#define DO_ORNOT(N, M) (N | !M) +#define DO_XNOR(N, M) (!(N ^ M)) + +GEN_VEXT_MASK_VV(vmand_mm, DO_AND) +GEN_VEXT_MASK_VV(vmnand_mm, DO_NAND) +GEN_VEXT_MASK_VV(vmandnot_mm, DO_ANDNOT) +GEN_VEXT_MASK_VV(vmxor_mm, DO_XOR) +GEN_VEXT_MASK_VV(vmor_mm, DO_OR) +GEN_VEXT_MASK_VV(vmnor_mm, DO_NOR) +GEN_VEXT_MASK_VV(vmornot_mm, DO_ORNOT) +GEN_VEXT_MASK_VV(vmxnor_mm, DO_XNOR)