diff --git a/qemu/aarch64.h b/qemu/aarch64.h
index b9d26667..1dd5b28a 100644
--- a/qemu/aarch64.h
+++ b/qemu/aarch64.h
@@ -3497,6 +3497,10 @@
 #define helper_sve_tbl_d helper_sve_tbl_d_aarch64
 #define helper_sve_tbl_h helper_sve_tbl_h_aarch64
 #define helper_sve_tbl_s helper_sve_tbl_s_aarch64
+#define helper_sve_trn_b helper_sve_trn_b_aarch64
+#define helper_sve_trn_d helper_sve_trn_d_aarch64
+#define helper_sve_trn_h helper_sve_trn_h_aarch64
+#define helper_sve_trn_s helper_sve_trn_s_aarch64
 #define helper_sve_trn_p helper_sve_trn_p_aarch64
 #define helper_sve_uabd_zpzz_b helper_sve_uabd_zpzz_b_aarch64
 #define helper_sve_uabd_zpzz_d helper_sve_uabd_zpzz_d_aarch64
@@ -3542,7 +3546,15 @@
 #define helper_sve_uxth_d helper_sve_uxth_d_aarch64
 #define helper_sve_uxth_s helper_sve_uxth_s_aarch64
 #define helper_sve_uxtw_d helper_sve_uxtw_d_aarch64
+#define helper_sve_uzp_b helper_sve_uzp_b_aarch64
+#define helper_sve_uzp_d helper_sve_uzp_d_aarch64
+#define helper_sve_uzp_h helper_sve_uzp_h_aarch64
+#define helper_sve_uzp_s helper_sve_uzp_s_aarch64
 #define helper_sve_uzp_p helper_sve_uzp_p_aarch64
+#define helper_sve_zip_b helper_sve_zip_b_aarch64
+#define helper_sve_zip_d helper_sve_zip_d_aarch64
+#define helper_sve_zip_h helper_sve_zip_h_aarch64
+#define helper_sve_zip_s helper_sve_zip_s_aarch64
 #define helper_sve_zip_p helper_sve_zip_p_aarch64
 #define helper_udiv64 helper_udiv64_aarch64
 #define helper_vfp_cmpd_a64 helper_vfp_cmpd_a64_aarch64
diff --git a/qemu/aarch64eb.h b/qemu/aarch64eb.h
index d0593fab..60b95da5 100644
--- a/qemu/aarch64eb.h
+++ b/qemu/aarch64eb.h
@@ -3497,6 +3497,10 @@
 #define helper_sve_tbl_d helper_sve_tbl_d_aarch64eb
 #define helper_sve_tbl_h helper_sve_tbl_h_aarch64eb
 #define helper_sve_tbl_s helper_sve_tbl_s_aarch64eb
+#define helper_sve_trn_b helper_sve_trn_b_aarch64eb
+#define helper_sve_trn_d helper_sve_trn_d_aarch64eb
+#define helper_sve_trn_h helper_sve_trn_h_aarch64eb
+#define helper_sve_trn_s helper_sve_trn_s_aarch64eb
 #define helper_sve_trn_p helper_sve_trn_p_aarch64eb
 #define helper_sve_uabd_zpzz_b helper_sve_uabd_zpzz_b_aarch64eb
 #define helper_sve_uabd_zpzz_d helper_sve_uabd_zpzz_d_aarch64eb
@@ -3542,7 +3546,15 @@
 #define helper_sve_uxth_d helper_sve_uxth_d_aarch64eb
 #define helper_sve_uxth_s helper_sve_uxth_s_aarch64eb
 #define helper_sve_uxtw_d helper_sve_uxtw_d_aarch64eb
+#define helper_sve_uzp_b helper_sve_uzp_b_aarch64eb
+#define helper_sve_uzp_d helper_sve_uzp_d_aarch64eb
+#define helper_sve_uzp_h helper_sve_uzp_h_aarch64eb
+#define helper_sve_uzp_s helper_sve_uzp_s_aarch64eb
 #define helper_sve_uzp_p helper_sve_uzp_p_aarch64eb
+#define helper_sve_zip_b helper_sve_zip_b_aarch64eb
+#define helper_sve_zip_d helper_sve_zip_d_aarch64eb
+#define helper_sve_zip_h helper_sve_zip_h_aarch64eb
+#define helper_sve_zip_s helper_sve_zip_s_aarch64eb
 #define helper_sve_zip_p helper_sve_zip_p_aarch64eb
 #define helper_udiv64 helper_udiv64_aarch64eb
 #define helper_vfp_cmpd_a64 helper_vfp_cmpd_a64_aarch64eb
diff --git a/qemu/header_gen.py b/qemu/header_gen.py
index 76ef59f1..dd2a1f93 100644
--- a/qemu/header_gen.py
+++ b/qemu/header_gen.py
@@ -3518,6 +3518,10 @@ aarch64_symbols = (
     'helper_sve_tbl_d',
     'helper_sve_tbl_h',
     'helper_sve_tbl_s',
+    'helper_sve_trn_b',
+    'helper_sve_trn_d',
+    'helper_sve_trn_h',
+    'helper_sve_trn_s',
     'helper_sve_trn_p',
     'helper_sve_uabd_zpzz_b',
     'helper_sve_uabd_zpzz_d',
@@ -3563,7 +3567,15 @@ aarch64_symbols = (
     'helper_sve_uxth_d',
     'helper_sve_uxth_s',
     'helper_sve_uxtw_d',
+    'helper_sve_uzp_b',
+    'helper_sve_uzp_d',
+    'helper_sve_uzp_h',
+    'helper_sve_uzp_s',
     'helper_sve_uzp_p',
+    'helper_sve_zip_b',
+    'helper_sve_zip_d',
+    'helper_sve_zip_h',
+    'helper_sve_zip_s',
     'helper_sve_zip_p',
     'helper_udiv64',
     'helper_vfp_cmpd_a64',
diff --git a/qemu/target/arm/helper-sve.h b/qemu/target/arm/helper-sve.h
index ff958fce..bab20345 100644
--- a/qemu/target/arm/helper-sve.h
+++ b/qemu/target/arm/helper-sve.h
@@ -445,6 +445,21 @@ DEF_HELPER_FLAGS_4(sve_trn_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_3(sve_rev_p, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
 DEF_HELPER_FLAGS_3(sve_punpk_p, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
 
+DEF_HELPER_FLAGS_4(sve_zip_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_zip_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_zip_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_zip_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_uzp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_uzp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_uzp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_uzp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_trn_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_trn_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_trn_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_trn_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
 DEF_HELPER_FLAGS_5(sve_and_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_5(sve_bic_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_5(sve_eor_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
diff --git a/qemu/target/arm/sve.decode b/qemu/target/arm/sve.decode
index ead07ad9..7b55b1e7 100644
--- a/qemu/target/arm/sve.decode
+++ b/qemu/target/arm/sve.decode
@@ -414,6 +414,16 @@ REV_p           00000101 .. 11 0100 010 000 0 .... 0 ....       @pd_pn
 PUNPKLO         00000101 00 11 0000 010 000 0 .... 0 ....       @pd_pn_e0
 PUNPKHI         00000101 00 11 0001 010 000 0 .... 0 ....       @pd_pn_e0
 
+### SVE Permute - Interleaving Group
+
+# SVE permute vector elements
+ZIP1_z          00000101 .. 1 ..... 011 000 ..... .....         @rd_rn_rm
+ZIP2_z          00000101 .. 1 ..... 011 001 ..... .....         @rd_rn_rm
+UZP1_z          00000101 .. 1 ..... 011 010 ..... .....         @rd_rn_rm
+UZP2_z          00000101 .. 1 ..... 011 011 ..... .....         @rd_rn_rm
+TRN1_z          00000101 .. 1 ..... 011 100 ..... .....         @rd_rn_rm
+TRN2_z          00000101 .. 1 ..... 011 101 ..... .....         @rd_rn_rm
+
 ### SVE Predicate Logical Operations Group
 
 # SVE predicate logical operations
diff --git a/qemu/target/arm/sve_helper.c b/qemu/target/arm/sve_helper.c
index 3188c027..ccc77a7a 100644
--- a/qemu/target/arm/sve_helper.c
+++ b/qemu/target/arm/sve_helper.c
@@ -1963,3 +1963,75 @@ void HELPER(sve_punpk_p)(void *vd, void *vn, uint32_t pred_desc)
         }
     }
 }
+
+#define DO_ZIP(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc)       \
+{                                                                    \
+    intptr_t oprsz = simd_oprsz(desc);                               \
+    intptr_t i, oprsz_2 = oprsz / 2;                                 \
+    ARMVectorReg tmp_n, tmp_m;                                       \
+    /* We produce output faster than we consume input.               \
+       Therefore we must be mindful of possible overlap.  */         \
+    if (unlikely((vn - vd) < (uintptr_t)oprsz)) {                    \
+        vn = memcpy(&tmp_n, vn, oprsz_2);                            \
+    }                                                                \
+    if (unlikely((vm - vd) < (uintptr_t)oprsz)) {                    \
+        vm = memcpy(&tmp_m, vm, oprsz_2);                            \
+    }                                                                \
+    for (i = 0; i < oprsz_2; i += sizeof(TYPE)) {                    \
+        *(TYPE *)(vd + H(2 * i + 0)) = *(TYPE *)(vn + H(i));         \
+        *(TYPE *)(vd + H(2 * i + sizeof(TYPE))) = *(TYPE *)(vm + H(i)); \
+    }                                                                \
+}
+
+DO_ZIP(sve_zip_b, uint8_t, H1)
+DO_ZIP(sve_zip_h, uint16_t, H1_2)
+DO_ZIP(sve_zip_s, uint32_t, H1_4)
+DO_ZIP(sve_zip_d, uint64_t, )
+
+#define DO_UZP(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc)         \
+{                                                                      \
+    intptr_t oprsz = simd_oprsz(desc);                                 \
+    intptr_t oprsz_2 = oprsz / 2;                                      \
+    intptr_t odd_ofs = simd_data(desc);                                \
+    intptr_t i;                                                        \
+    ARMVectorReg tmp_m;                                                \
+    if (unlikely((vm - vd) < (uintptr_t)oprsz)) {                      \
+        vm = memcpy(&tmp_m, vm, oprsz);                                \
+    }                                                                  \
+    for (i = 0; i < oprsz_2; i += sizeof(TYPE)) {                      \
+        *(TYPE *)(vd + H(i)) = *(TYPE *)(vn + H(2 * i + odd_ofs));     \
+    }                                                                  \
+    for (i = 0; i < oprsz_2; i += sizeof(TYPE)) {                      \
+        *(TYPE *)(vd + H(oprsz_2 + i)) = *(TYPE *)(vm + H(2 * i + odd_ofs)); \
+    }                                                                  \
+}
+
+DO_UZP(sve_uzp_b, uint8_t, H1)
+DO_UZP(sve_uzp_h, uint16_t, H1_2)
+DO_UZP(sve_uzp_s, uint32_t, H1_4)
+DO_UZP(sve_uzp_d, uint64_t, )
+
+#define DO_TRN(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc)         \
+{                                                                      \
+    intptr_t oprsz = simd_oprsz(desc);                                 \
+    intptr_t odd_ofs = simd_data(desc);                                \
+    intptr_t i;                                                        \
+    for (i = 0; i < oprsz; i += 2 * sizeof(TYPE)) {                    \
+        TYPE ae = *(TYPE *)(vn + H(i + odd_ofs));                      \
+        TYPE be = *(TYPE *)(vm + H(i + odd_ofs));                      \
+        *(TYPE *)(vd + H(i + 0)) = ae;                                 \
+        *(TYPE *)(vd + H(i + sizeof(TYPE))) = be;                      \
+    }                                                                  \
+}
+
+DO_TRN(sve_trn_b, uint8_t, H1)
+DO_TRN(sve_trn_h, uint16_t, H1_2)
+DO_TRN(sve_trn_s, uint32_t, H1_4)
+DO_TRN(sve_trn_d, uint64_t, )
+
+#undef DO_ZIP
+#undef DO_UZP
+#undef DO_TRN
diff --git a/qemu/target/arm/translate-sve.c b/qemu/target/arm/translate-sve.c
index f0f810cb..588d135d 100644
--- a/qemu/target/arm/translate-sve.c
+++ b/qemu/target/arm/translate-sve.c
@@ -2293,6 +2293,83 @@ static bool trans_PUNPKHI(DisasContext *s, arg_PUNPKHI *a, uint32_t insn)
     return do_perm_pred2(s, a, 1, gen_helper_sve_punpk_p);
 }
 
+/*
+ *** SVE Permute - Interleaving Group
+ */
+
+static bool do_zip(DisasContext *s, arg_rrr_esz *a, bool high)
+{
+    static gen_helper_gvec_3 * const fns[4] = {
+        gen_helper_sve_zip_b, gen_helper_sve_zip_h,
+        gen_helper_sve_zip_s, gen_helper_sve_zip_d,
+    };
+
+    if (sve_access_check(s)) {
+        TCGContext *tcg_ctx = s->uc->tcg_ctx;
+        unsigned vsz = vec_full_reg_size(s);
+        unsigned high_ofs = high ? vsz / 2 : 0;
+        tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd),
+                           vec_full_reg_offset(s, a->rn) + high_ofs,
+                           vec_full_reg_offset(s, a->rm) + high_ofs,
+                           vsz, vsz, 0, fns[a->esz]);
+    }
+    return true;
+}
+
+static bool do_zzz_data_ool(DisasContext *s, arg_rrr_esz *a, int data,
+                            gen_helper_gvec_3 *fn)
+{
+    if (sve_access_check(s)) {
+        TCGContext *tcg_ctx = s->uc->tcg_ctx;
+        unsigned vsz = vec_full_reg_size(s);
+        tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd),
+                           vec_full_reg_offset(s, a->rn),
+                           vec_full_reg_offset(s, a->rm),
+                           vsz, vsz, data, fn);
+    }
+    return true;
+}
+
+static bool trans_ZIP1_z(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
+{
+    return do_zip(s, a, false);
+}
+
+static bool trans_ZIP2_z(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
+{
+    return do_zip(s, a, true);
+}
+
+static gen_helper_gvec_3 * const uzp_fns[4] = {
+    gen_helper_sve_uzp_b, gen_helper_sve_uzp_h,
+    gen_helper_sve_uzp_s, gen_helper_sve_uzp_d,
+};
+
+static bool trans_UZP1_z(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
+{
+    return do_zzz_data_ool(s, a, 0, uzp_fns[a->esz]);
+}
+
+static bool trans_UZP2_z(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
+{
+    return do_zzz_data_ool(s, a, 1 << a->esz, uzp_fns[a->esz]);
+}
+
+static gen_helper_gvec_3 * const trn_fns[4] = {
+    gen_helper_sve_trn_b, gen_helper_sve_trn_h,
+    gen_helper_sve_trn_s, gen_helper_sve_trn_d,
+};
+
+static bool trans_TRN1_z(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
+{
+    return do_zzz_data_ool(s, a, 0, trn_fns[a->esz]);
+}
+
+static bool trans_TRN2_z(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
+{
+    return do_zzz_data_ool(s, a, 1 << a->esz, trn_fns[a->esz]);
+}
+
 /*
  *** SVE Memory - 32-bit Gather and Unsized Contiguous Group
  */