From 03c8d3ff009ece4faffa799a7d49c4c8d13bf1c2 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 8 Mar 2021 11:29:36 -0500 Subject: [PATCH] target/arm: Speed up aarch64 TBL/TBX Always perform one call instead of two for 16-byte operands. Use byte loads/stores directly into the vector register file instead of extractions and deposits to a 64-bit local variable. In order to easily receive pointers into the vector register file, convert the helper to the gvec out-of-line signature. Move the helper into vec_helper.c, where it can make use of H1 and clear_tail. Backports 519183d3fee58e52f7b51cf146c9dc9edc565059 --- qemu/aarch64.h | 2 +- qemu/aarch64eb.h | 2 +- qemu/header_gen.py | 2 +- qemu/target/arm/helper-a64.c | 32 ------------------ qemu/target/arm/helper-a64.h | 2 +- qemu/target/arm/translate-a64.c | 58 ++++----------------------------- qemu/target/arm/vec_helper.c | 48 +++++++++++++++++++++++++++ 7 files changed, 59 insertions(+), 87 deletions(-) diff --git a/qemu/aarch64.h b/qemu/aarch64.h index a23e4bdc..ae03adeb 100644 --- a/qemu/aarch64.h +++ b/qemu/aarch64.h @@ -3858,7 +3858,7 @@ #define helper_rsqrtsf_f32 helper_rsqrtsf_f32_aarch64 #define helper_rsqrtsf_f64 helper_rsqrtsf_f64_aarch64 #define helper_sdiv64 helper_sdiv64_aarch64 -#define helper_simd_tbl helper_simd_tbl_aarch64 +#define helper_simd_tblx helper_simd_tblx_aarch64 #define helper_sqrt_f16 helper_sqrt_f16_aarch64 #define helper_st2g helper_st2g_aarch64 #define helper_st2g_parallel helper_st2g_parallel_aarch64 diff --git a/qemu/aarch64eb.h b/qemu/aarch64eb.h index ef5be1f4..669b3b8b 100644 --- a/qemu/aarch64eb.h +++ b/qemu/aarch64eb.h @@ -3858,7 +3858,7 @@ #define helper_rsqrtsf_f32 helper_rsqrtsf_f32_aarch64eb #define helper_rsqrtsf_f64 helper_rsqrtsf_f64_aarch64eb #define helper_sdiv64 helper_sdiv64_aarch64eb -#define helper_simd_tbl helper_simd_tbl_aarch64eb +#define helper_simd_tblx helper_simd_tblx_aarch64eb #define helper_sqrt_f16 helper_sqrt_f16_aarch64eb #define helper_st2g helper_st2g_aarch64eb #define helper_st2g_parallel helper_st2g_parallel_aarch64eb diff --git a/qemu/header_gen.py b/qemu/header_gen.py index 81011ff3..929081db 100644 --- a/qemu/header_gen.py +++ b/qemu/header_gen.py @@ -4007,7 +4007,7 @@ aarch64_symbols = ( 'helper_rsqrtsf_f32', 'helper_rsqrtsf_f64', 'helper_sdiv64', - 'helper_simd_tbl', + 'helper_simd_tblx', 'helper_sqrt_f16', 'helper_st2g', 'helper_st2g_parallel', diff --git a/qemu/target/arm/helper-a64.c b/qemu/target/arm/helper-a64.c index 6af845fb..af039b50 100644 --- a/qemu/target/arm/helper-a64.c +++ b/qemu/target/arm/helper-a64.c @@ -178,38 +178,6 @@ float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp) return float64_mul(a, b, fpst); } -uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices, - uint32_t rn, uint32_t numregs) -{ - /* Helper function for SIMD TBL and TBX. We have to do the table - * lookup part for the 64 bits worth of indices we're passed in. - * result is the initial results vector (either zeroes for TBL - * or some guest values for TBX), rn the register number where - * the table starts, and numregs the number of registers in the table. - * We return the results of the lookups. - */ - int shift; - - for (shift = 0; shift < 64; shift += 8) { - int index = extract64(indices, shift, 8); - if (index < 16 * numregs) { - /* Convert index (a byte offset into the virtual table - * which is a series of 128-bit vectors concatenated) - * into the correct register element plus a bit offset - * into that element, bearing in mind that the table - * can wrap around from V31 to V0. - */ - int elt = (rn * 2 + (index >> 3)) % 64; - int bitidx = (index & 7) * 8; - uint64_t *q = aa64_vfp_qreg(env, elt >> 1); - uint64_t val = extract64(q[elt & 1], bitidx, 8); - - result = deposit64(result, shift, 8, val); - } - } - return result; -} - /* 64bit/double versions of the neon float compare functions */ uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp) { diff --git a/qemu/target/arm/helper-a64.h b/qemu/target/arm/helper-a64.h index 5f7a30fb..9a93f5fb 100644 --- a/qemu/target/arm/helper-a64.h +++ b/qemu/target/arm/helper-a64.h @@ -28,7 +28,7 @@ DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr) DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr) DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr) DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr) -DEF_HELPER_FLAGS_5(simd_tbl, TCG_CALL_NO_RWG_SE, i64, env, i64, i64, i32, i32) +DEF_HELPER_FLAGS_4(simd_tblx, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, ptr) DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, ptr) DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) diff --git a/qemu/target/arm/translate-a64.c b/qemu/target/arm/translate-a64.c index 4311a132..b6f4e9fa 100644 --- a/qemu/target/arm/translate-a64.c +++ b/qemu/target/arm/translate-a64.c @@ -7793,10 +7793,8 @@ static void disas_simd_tb(DisasContext *s, uint32_t insn) int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); - int is_tblx = extract32(insn, 12, 1); - int len = extract32(insn, 13, 2); - TCGv_i64 tcg_resl, tcg_resh, tcg_idx; - TCGv_i32 tcg_regno, tcg_numregs; + int is_tbx = extract32(insn, 12, 1); + int len = (extract32(insn, 13, 2) + 1) * 16; if (op2 != 0) { unallocated_encoding(s); @@ -7807,53 +7805,11 @@ static void disas_simd_tb(DisasContext *s, uint32_t insn) return; } - /* This does a table lookup: for every byte element in the input - * we index into a table formed from up to four vector registers, - * and then the output is the result of the lookups. Our helper - * function does the lookup operation for a single 64 bit part of - * the input. - */ - tcg_resl = tcg_temp_new_i64(tcg_ctx); - tcg_resh = NULL; - - if (is_tblx) { - read_vec_element(s, tcg_resl, rd, 0, MO_64); - } else { - tcg_gen_movi_i64(tcg_ctx, tcg_resl, 0); - } - - if (is_q) { - tcg_resh = tcg_temp_new_i64(tcg_ctx); - if (is_tblx) { - read_vec_element(s, tcg_resh, rd, 1, MO_64); - } else { - tcg_gen_movi_i64(tcg_ctx, tcg_resh, 0); - } - } - - tcg_idx = tcg_temp_new_i64(tcg_ctx); - tcg_regno = tcg_const_i32(tcg_ctx, rn); - tcg_numregs = tcg_const_i32(tcg_ctx, len + 1); - read_vec_element(s, tcg_idx, rm, 0, MO_64); - gen_helper_simd_tbl(tcg_ctx, tcg_resl, tcg_ctx->cpu_env, tcg_resl, tcg_idx, - tcg_regno, tcg_numregs); - if (is_q) { - read_vec_element(s, tcg_idx, rm, 1, MO_64); - gen_helper_simd_tbl(tcg_ctx, tcg_resh, tcg_ctx->cpu_env, tcg_resh, tcg_idx, - tcg_regno, tcg_numregs); - } - tcg_temp_free_i64(tcg_ctx, tcg_idx); - tcg_temp_free_i32(tcg_ctx, tcg_regno); - tcg_temp_free_i32(tcg_ctx, tcg_numregs); - - write_vec_element(s, tcg_resl, rd, 0, MO_64); - tcg_temp_free_i64(tcg_ctx, tcg_resl); - - if (is_q) { - write_vec_element(s, tcg_resh, rd, 1, MO_64); - tcg_temp_free_i64(tcg_ctx, tcg_resh); - } - clear_vec_high(s, is_q, rd); + tcg_gen_gvec_2_ptr(tcg_ctx, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rm), tcg_ctx->cpu_env, + is_q ? 16 : 8, vec_full_reg_size(s), + (len << 6) | (is_tbx << 5) | rn, + gen_helper_simd_tblx); } /* ZIP/UZP/TRN diff --git a/qemu/target/arm/vec_helper.c b/qemu/target/arm/vec_helper.c index c728e372..96344b22 100644 --- a/qemu/target/arm/vec_helper.c +++ b/qemu/target/arm/vec_helper.c @@ -1938,3 +1938,51 @@ DO_VRINT_RMODE(gvec_vrint_rm_h, helper_rinth, uint16_t) DO_VRINT_RMODE(gvec_vrint_rm_s, helper_rints, uint32_t) #undef DO_VRINT_RMODE + +#ifdef TARGET_AARCH64 +void HELPER(simd_tblx)(void *vd, void *vm, void *venv, uint32_t desc) +{ + const uint8_t *indices = vm; + CPUARMState *env = venv; + size_t oprsz = simd_oprsz(desc); + uint32_t rn = extract32(desc, SIMD_DATA_SHIFT, 5); + bool is_tbx = extract32(desc, SIMD_DATA_SHIFT + 5, 1); + uint32_t table_len = desc >> (SIMD_DATA_SHIFT + 6); + union { + uint8_t b[16]; + uint64_t d[2]; + } result; + + /* + * We must construct the final result in a temp, lest the output + * overlaps the input table. For TBL, begin with zero; for TBX, + * begin with the original register contents. Note that we always + * copy 16 bytes here to avoid an extra branch; clearing the high + * bits of the register for oprsz == 8 is handled below. + */ + if (is_tbx) { + memcpy(&result, vd, 16); + } else { + memset(&result, 0, 16); + } + + for (size_t i = 0; i < oprsz; ++i) { + uint32_t index = indices[H1(i)]; + + if (index < table_len) { + /* + * Convert index (a byte offset into the virtual table + * which is a series of 128-bit vectors concatenated) + * into the correct register element, bearing in mind + * that the table can wrap around from V31 to V0. + */ + const uint8_t *table = (const uint8_t *) + aa64_vfp_qreg(env, (rn + (index >> 4)) % 32); + result.b[H1(i)] = table[H1(index % 16)]; + } + } + + memcpy(vd, &result, 16); + clear_tail(vd, oprsz, simd_maxsz(desc)); +} +#endif