target/arm: Handle TBI for sve scalar + int memory ops

We still need to handle tbi for user-only when mte is inactive.

Backports commit 9473d0ecafcffc8b258892b1f9f18e037bdba958 from qemu
This commit is contained in:
Richard Henderson 2021-02-25 22:12:00 -05:00 committed by Lioncash
parent 586235d02d
commit 5698b7badb
6 changed files with 8 additions and 3 deletions

View file

@ -3494,6 +3494,7 @@
#define bif_op bif_op_aarch64
#define bit_op bit_op_aarch64
#define bsl_op bsl_op_aarch64
#define clean_data_tbi clean_data_tbi_aarch64
#define cpu_mmu_index cpu_mmu_index_aarch64
#define cpu_reg cpu_reg_aarch64
#define cpu_reg_sp cpu_reg_sp_aarch64

View file

@ -3494,6 +3494,7 @@
#define bif_op bif_op_aarch64eb
#define bit_op bit_op_aarch64eb
#define bsl_op bsl_op_aarch64eb
#define clean_data_tbi clean_data_tbi_aarch64eb
#define cpu_mmu_index cpu_mmu_index_aarch64eb
#define cpu_reg cpu_reg_aarch64eb
#define cpu_reg_sp cpu_reg_sp_aarch64eb

View file

@ -3634,6 +3634,7 @@ aarch64_symbols = (
'bif_op',
'bit_op',
'bsl_op',
'clean_data_tbi',
'cpu_mmu_index',
'cpu_reg',
'cpu_reg_sp',

View file

@ -351,7 +351,7 @@ static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
* Always return a fresh temporary that we can increment independently
* of the write-back address.
*/
static TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
{
TCGv_i64 clean = new_tmp_a64(s);
#ifdef CONFIG_USER_ONLY

View file

@ -119,6 +119,7 @@ bool disas_sve(DisasContext *, uint32_t);
void gen_gvec_rax1(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr);
TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
bool tag_checked, int log2_size);
TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,

View file

@ -4738,9 +4738,8 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
* For e.g. LD4, there are not enough arguments to pass all 4
* registers as pointers, so encode the regno into the data field.
* For consistency, do this even for LD1.
* TODO: mte_n check here while callers are updated.
*/
if (mte_n && s->mte_active[0]) {
if (s->mte_active[0]) {
int msz = dtype_msz(dtype);
desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
@ -4750,6 +4749,8 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << msz);
desc = FIELD_DP32(desc, MTEDESC, TSIZE, mte_n << msz);
desc <<= SVE_MTEDESC_SHIFT;
} else {
addr = clean_data_tbi(s, addr);
}
desc = simd_desc(vsz, vsz, zt | desc);
t_desc = tcg_const_i32(tcg_ctx, desc);