From d2d8e2fc33eeced5ab5a8975b36e6ce3267c183d Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 20 May 2018 00:25:00 -0400 Subject: [PATCH] target/arm: Introduce translate-a64.h Move some stuff that will be common to both translate-a64.c and translate-sve.c. Backports commit 8c71baedb8055beaa681823206ee3a74f9f8649a from qemu --- qemu/aarch64.h | 11 +++ qemu/aarch64eb.h | 11 +++ qemu/header_gen.py | 13 +++- qemu/target/arm/translate-a64.c | 116 +++++-------------------------- qemu/target/arm/translate-a64.h | 119 ++++++++++++++++++++++++++++++++ 5 files changed, 169 insertions(+), 101 deletions(-) create mode 100644 qemu/target/arm/translate-a64.h diff --git a/qemu/aarch64.h b/qemu/aarch64.h index c696310c..2ae1cf2e 100644 --- a/qemu/aarch64.h +++ b/qemu/aarch64.h @@ -3214,6 +3214,8 @@ #define arm_reset_cpu arm_reset_cpu_aarch64 #define arm_set_cpu_off arm_set_cpu_off_aarch64 #define arm_set_cpu_on arm_set_cpu_on_aarch64 +#define cpu_reg cpu_reg_aarch64 +#define cpu_reg_sp cpu_reg_sp_aarch64 #define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64 #define helper_advsimd_acge_f16 helper_advsimd_acge_f16_aarch64 #define helper_advsimd_acgt_f16 helper_advsimd_acgt_f16_aarch64 @@ -3278,4 +3280,13 @@ #define helper_vfp_cmps_a64 helper_vfp_cmps_a64_aarch64 #define helper_vfp_mulxd helper_vfp_mulxd_aarch64 #define helper_vfp_mulxs helper_vfp_mulxs_aarch64 +#define logic_imm_decode_wmask logic_imm_decode_wmask_aarch64 +#define new_tmp_a64 new_tmp_a64_aarch64 +#define new_tmp_a64_zero new_tmp_a64_zero_aarch64 +#define read_cpu_reg read_cpu_reg_aarch64 +#define read_cpu_reg_sp read_cpu_reg_sp_aarch64 +#define sve_access_check sve_access_check_aarch64 +#define unallocated_encoding unallocated_encoding_aarch64 +#define vfp_expand_imm vfp_expand_imm_aarch64 +#define write_fp_dreg write_fp_dreg_aarch64 #endif diff --git a/qemu/aarch64eb.h b/qemu/aarch64eb.h index 8a6da386..392c9767 100644 --- a/qemu/aarch64eb.h +++ b/qemu/aarch64eb.h @@ -3214,6 +3214,8 @@ #define arm_reset_cpu arm_reset_cpu_aarch64eb #define arm_set_cpu_off arm_set_cpu_off_aarch64eb #define arm_set_cpu_on arm_set_cpu_on_aarch64eb +#define cpu_reg cpu_reg_aarch64eb +#define cpu_reg_sp cpu_reg_sp_aarch64eb #define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64eb #define helper_advsimd_acge_f16 helper_advsimd_acge_f16_aarch64eb #define helper_advsimd_acgt_f16 helper_advsimd_acgt_f16_aarch64eb @@ -3278,4 +3280,13 @@ #define helper_vfp_cmps_a64 helper_vfp_cmps_a64_aarch64eb #define helper_vfp_mulxd helper_vfp_mulxd_aarch64eb #define helper_vfp_mulxs helper_vfp_mulxs_aarch64eb +#define logic_imm_decode_wmask logic_imm_decode_wmask_aarch64eb +#define new_tmp_a64 new_tmp_a64_aarch64eb +#define new_tmp_a64_zero new_tmp_a64_zero_aarch64eb +#define read_cpu_reg read_cpu_reg_aarch64eb +#define read_cpu_reg_sp read_cpu_reg_sp_aarch64eb +#define sve_access_check sve_access_check_aarch64eb +#define unallocated_encoding unallocated_encoding_aarch64eb +#define vfp_expand_imm vfp_expand_imm_aarch64eb +#define write_fp_dreg write_fp_dreg_aarch64eb #endif diff --git a/qemu/header_gen.py b/qemu/header_gen.py index 2aa447ff..8e79d59d 100644 --- a/qemu/header_gen.py +++ b/qemu/header_gen.py @@ -3235,6 +3235,8 @@ aarch64_symbols = ( 'arm_reset_cpu', 'arm_set_cpu_off', 'arm_set_cpu_on', + 'cpu_reg', + 'cpu_reg_sp', 'gen_a64_set_pc_im', 'helper_advsimd_acge_f16', 'helper_advsimd_acgt_f16', @@ -3298,7 +3300,16 @@ aarch64_symbols = ( 'helper_vfp_cmpes_a64', 'helper_vfp_cmps_a64', 'helper_vfp_mulxd', - 'helper_vfp_mulxs' + 'helper_vfp_mulxs', + 'logic_imm_decode_wmask', + 'new_tmp_a64', + 'new_tmp_a64_zero', + 'read_cpu_reg', + 'read_cpu_reg_sp', + 'sve_access_check', + 'unallocated_encoding', + 'vfp_expand_imm', + 'write_fp_dreg', ) mips_symbols = ( diff --git a/qemu/target/arm/translate-a64.c b/qemu/target/arm/translate-a64.c index ef4fab92..75bc21e9 100644 --- a/qemu/target/arm/translate-a64.c +++ b/qemu/target/arm/translate-a64.c @@ -34,13 +34,13 @@ #include "exec/semihost.h" #include "exec/gen-icount.h" +#include "translate-a64.h" + #ifdef CONFIG_USER_ONLY static TCGv_i64 cpu_exclusive_test; static TCGv_i32 cpu_exclusive_info; #endif -static TCGv_i64 cpu_reg(DisasContext *s, int reg); - static const char *regnames[] = { "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", @@ -83,13 +83,6 @@ typedef void CryptoThreeOpIntFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void CryptoThreeOpFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr); typedef void AtomicThreeOpFn(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp); -/* Note that the gvec expanders operate on offsets + sizes. */ -typedef void GVecGen2Fn(TCGContext *, unsigned, uint32_t, uint32_t, uint32_t, uint32_t); -typedef void GVecGen2iFn(TCGContext *, unsigned, uint32_t, uint32_t, int64_t, - uint32_t, uint32_t); -typedef void GVecGen3Fn(TCGContext *, unsigned, uint32_t, uint32_t, - uint32_t, uint32_t, uint32_t); - /* initialize TCG globals. */ void a64_translate_init(struct uc_struct *uc) { @@ -426,22 +419,13 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) } } -static void unallocated_encoding(DisasContext *s) +void unallocated_encoding(DisasContext *s) { /* Unallocated and reserved encodings are uncategorized */ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), default_exception_el(s)); } -#define unsupported_encoding(s, insn) \ - do { \ - qemu_log_mask(LOG_UNIMP, \ - "%s:%d: unsupported instruction encoding 0x%08x " \ - "at pc=%016" PRIx64 "\n", \ - __FILE__, __LINE__, insn, s->pc - 4); \ - unallocated_encoding(s); \ - } while (0) - static void init_tmp_a64_array(DisasContext *s) { #ifdef CONFIG_DEBUG_TCG @@ -460,14 +444,14 @@ static void free_tmp_a64(DisasContext *s) init_tmp_a64_array(s); } -static TCGv_i64 new_tmp_a64(DisasContext *s) +TCGv_i64 new_tmp_a64(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; assert(s->tmp_a64_count < TMP_A64_MAX); return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64(tcg_ctx); } -static TCGv_i64 new_tmp_a64_zero(DisasContext *s) +TCGv_i64 new_tmp_a64_zero(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 t = new_tmp_a64(s); @@ -490,7 +474,7 @@ static TCGv_i64 new_tmp_a64_zero(DisasContext *s) * to tcg_ctx->cpu_X[31] and ZR accesses to a temporary which can be discarded. * This is the point of the _sp forms. */ -static TCGv_i64 cpu_reg(DisasContext *s, int reg) +TCGv_i64 cpu_reg(DisasContext *s, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (reg == 31) { @@ -501,7 +485,7 @@ static TCGv_i64 cpu_reg(DisasContext *s, int reg) } /* register access for when 31 == SP */ -static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg) +TCGv_i64 cpu_reg_sp(DisasContext *s, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; return tcg_ctx->cpu_X[reg]; @@ -511,7 +495,7 @@ static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg) * representing the register contents. This TCGv is an auto-freed * temporary so it need not be explicitly freed, and may be modified. */ -static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) +TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 v = new_tmp_a64(s); @@ -527,7 +511,7 @@ static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) return v; } -static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) +TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 v = new_tmp_a64(s); @@ -539,74 +523,6 @@ static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) return v; } -/* We should have at some point before trying to access an FP register - * done the necessary access check, so assert that - * (a) we did the check and - * (b) we didn't then just plough ahead anyway if it failed. - * Print the instruction pattern in the abort message so we can figure - * out what we need to fix if a user encounters this problem in the wild. - */ -static inline void assert_fp_access_checked(DisasContext *s) -{ -#ifdef CONFIG_DEBUG_TCG - if (unlikely(!s->fp_access_checked || s->fp_excp_el)) { - fprintf(stderr, "target-arm: FP access check missing for " - "instruction 0x%08x\n", s->insn); - abort(); - } -#endif -} - -/* Return the offset into CPUARMState of an element of specified - * size, 'element' places in from the least significant end of - * the FP/vector register Qn. - */ -static inline int vec_reg_offset(DisasContext *s, int regno, - int element, TCGMemOp size) -{ - int offs = 0; -#ifdef HOST_WORDS_BIGENDIAN - /* This is complicated slightly because vfp.zregs[n].d[0] is - * still the low half and vfp.zregs[n].d[1] the high half - * of the 128 bit vector, even on big endian systems. - * Calculate the offset assuming a fully bigendian 128 bits, - * then XOR to account for the order of the two 64 bit halves. - */ - offs += (16 - ((element + 1) * (1 << size))); - offs ^= 8; -#else - offs += element * (1 << size); -#endif - offs += offsetof(CPUARMState, vfp.zregs[regno]); - assert_fp_access_checked(s); - return offs; -} - -/* Return the offset info CPUARMState of the "whole" vector register Qn. */ -static inline int vec_full_reg_offset(DisasContext *s, int regno) -{ - assert_fp_access_checked(s); - return offsetof(CPUARMState, vfp.zregs[regno]); -} - -/* Return a newly allocated pointer to the vector register. */ -static TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - TCGv_ptr ret = tcg_temp_new_ptr(tcg_ctx); - tcg_gen_addi_ptr(tcg_ctx, ret, tcg_ctx->cpu_env, vec_full_reg_offset(s, regno)); - return ret; -} - -/* Return the byte size of the "whole" vector register, VL / 8. */ -static inline int vec_full_reg_size(DisasContext *s) -{ - /* FIXME SVE: We should put the composite ZCR_EL* value into tb->flags. - In the meantime this is just the AdvSIMD length of 128. */ - return 128 / 8; -} - /* Return the offset into CPUARMState of a slice (from * the least significant end) of FP register Qn (ie * Dn, Sn, Hn or Bn). @@ -675,7 +591,7 @@ static void clear_vec_high(DisasContext *s, bool is_q, int rd) } } -static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v) +void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned ofs = fp_reg_offset(s, reg, MO_64); @@ -694,7 +610,7 @@ static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v) tcg_temp_free_i64(tcg_ctx, tmp); } -static TCGv_ptr get_fpstatus_ptr(TCGContext *tcg_ctx, bool is_f16) +TCGv_ptr get_fpstatus_ptr(TCGContext *tcg_ctx, bool is_f16) { TCGv_ptr statusptr = tcg_temp_new_ptr(tcg_ctx); int offset; @@ -1308,14 +1224,14 @@ static inline bool fp_access_check(DisasContext *s) /* Check that SVE access is enabled. If it is, return true. * If not, emit code to generate an appropriate exception and return false. */ -static inline bool sve_access_check(DisasContext *s) +bool sve_access_check(DisasContext *s) { if (s->sve_excp_el) { gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(), s->sve_excp_el); return false; } - return true; + return fp_access_check(s); } /* @@ -3487,8 +3403,8 @@ static inline uint64_t bitmask64(unsigned int length) * value (ie should cause a guest UNDEF exception), and true if they are * valid, in which case the decoded bit pattern is written to result. */ -static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, - unsigned int imms, unsigned int immr) +bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, + unsigned int imms, unsigned int immr) { uint64_t mask; unsigned e, levels, s, r; @@ -5754,7 +5670,7 @@ static void disas_fp_3src(DisasContext *s, uint32_t insn) * the range 01....1xx to 10....0xx, and the most significant 4 bits of * the mantissa; see VFPExpandImm() in the v8 ARM ARM. */ -static uint64_t vfp_expand_imm(int size, uint8_t imm8) +uint64_t vfp_expand_imm(int size, uint8_t imm8) { uint64_t imm; diff --git a/qemu/target/arm/translate-a64.h b/qemu/target/arm/translate-a64.h new file mode 100644 index 00000000..407ae4de --- /dev/null +++ b/qemu/target/arm/translate-a64.h @@ -0,0 +1,119 @@ +/* + * AArch64 translation, common definitions. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef TARGET_ARM_TRANSLATE_A64_H +#define TARGET_ARM_TRANSLATE_A64_H + +void unallocated_encoding(DisasContext *s); + +#define unsupported_encoding(s, insn) \ + do { \ + qemu_log_mask(LOG_UNIMP, \ + "%s:%d: unsupported instruction encoding 0x%08x " \ + "at pc=%016" PRIx64 "\n", \ + __FILE__, __LINE__, insn, s->pc - 4); \ + unallocated_encoding(s); \ + } while (0) + +TCGv_i64 new_tmp_a64(DisasContext *s); +TCGv_i64 new_tmp_a64_zero(DisasContext *s); +TCGv_i64 cpu_reg(DisasContext *s, int reg); +TCGv_i64 cpu_reg_sp(DisasContext *s, int reg); +TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf); +TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf); +void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v); +TCGv_ptr get_fpstatus_ptr(TCGContext *, bool); +bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, + unsigned int imms, unsigned int immr); +uint64_t vfp_expand_imm(int size, uint8_t imm8); +bool sve_access_check(DisasContext *s); + +/* We should have at some point before trying to access an FP register + * done the necessary access check, so assert that + * (a) we did the check and + * (b) we didn't then just plough ahead anyway if it failed. + * Print the instruction pattern in the abort message so we can figure + * out what we need to fix if a user encounters this problem in the wild. + */ +static inline void assert_fp_access_checked(DisasContext *s) +{ +#ifdef CONFIG_DEBUG_TCG + if (unlikely(!s->fp_access_checked || s->fp_excp_el)) { + fprintf(stderr, "target-arm: FP access check missing for " + "instruction 0x%08x\n", s->insn); + abort(); + } +#endif +} + +/* Return the offset into CPUARMState of an element of specified + * size, 'element' places in from the least significant end of + * the FP/vector register Qn. + */ +static inline int vec_reg_offset(DisasContext *s, int regno, + int element, TCGMemOp size) +{ + int offs = 0; +#ifdef HOST_WORDS_BIGENDIAN + /* This is complicated slightly because vfp.zregs[n].d[0] is + * still the low half and vfp.zregs[n].d[1] the high half + * of the 128 bit vector, even on big endian systems. + * Calculate the offset assuming a fully bigendian 128 bits, + * then XOR to account for the order of the two 64 bit halves. + */ + offs += (16 - ((element + 1) * (1 << size))); + offs ^= 8; +#else + offs += element * (1 << size); +#endif + offs += offsetof(CPUARMState, vfp.zregs[regno]); + assert_fp_access_checked(s); + return offs; +} + +/* Return the offset info CPUARMState of the "whole" vector register Qn. */ +static inline int vec_full_reg_offset(DisasContext *s, int regno) +{ + assert_fp_access_checked(s); + return offsetof(CPUARMState, vfp.zregs[regno]); +} + +/* Return a newly allocated pointer to the vector register. */ +static inline TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr ret = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, ret, tcg_ctx->cpu_env, vec_full_reg_offset(s, regno)); + return ret; +} + +/* Return the byte size of the "whole" vector register, VL / 8. */ +static inline int vec_full_reg_size(DisasContext *s) +{ + return s->sve_len; +} + +bool disas_sve(DisasContext *, uint32_t); + +/* Note that the gvec expanders operate on offsets + sizes. */ +typedef void GVecGen2Fn(TCGContext *, unsigned, uint32_t, uint32_t, uint32_t, uint32_t); +typedef void GVecGen2iFn(TCGContext *, unsigned, uint32_t, uint32_t, int64_t, + uint32_t, uint32_t); +typedef void GVecGen3Fn(TCGContext *, unsigned, uint32_t, uint32_t, + uint32_t, uint32_t, uint32_t); + +#endif /* TARGET_ARM_TRANSLATE_A64_H */