target/arm: Use mte_checkN for sve unpredicated loads

Backports commit b2aa8879b884cd66acde4123899dd92a38fe6527 from qemu
This commit is contained in:
Richard Henderson 2021-02-25 17:26:34 -05:00 committed by Lioncash
parent 4fdd05e1aa
commit 3ce14ebc78

View file

@ -4490,15 +4490,17 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
int len_remain = len % 8; int len_remain = len % 8;
int nparts = len / 8 + ctpop8(len_remain); int nparts = len / 8 + ctpop8(len_remain);
int midx = get_mem_index(s); int midx = get_mem_index(s);
TCGv_i64 addr, t0, t1; TCGv_i64 dirty_addr, clean_addr, t0, t1;
addr = tcg_temp_new_i64(tcg_ctx); dirty_addr = tcg_temp_new_i64(tcg_ctx);
t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_addi_i64(tcg_ctx, dirty_addr, cpu_reg_sp(s, rn), imm);
clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8);
tcg_temp_free_i64(tcg_ctx, dirty_addr);
/* Note that unpredicated load/store of vector/predicate registers /*
* Note that unpredicated load/store of vector/predicate registers
* are defined as a stream of bytes, which equates to little-endian * are defined as a stream of bytes, which equates to little-endian
* operations on larger quantities. There is no nice way to force * operations on larger quantities.
* a little-endian load for aarch64_be-linux-user out of line.
* *
* Attempt to keep code expansion to a minimum by limiting the * Attempt to keep code expansion to a minimum by limiting the
* amount of unrolling done. * amount of unrolling done.
@ -4506,55 +4508,59 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
if (nparts <= 4) { if (nparts <= 4) {
int i; int i;
t0 = tcg_temp_new_i64(tcg_ctx);
for (i = 0; i < len_align; i += 8) { for (i = 0; i < len_align; i += 8) {
tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, rn), imm + i); tcg_gen_qemu_ld_i64(s->uc, t0, clean_addr, midx, MO_LEQ);
tcg_gen_qemu_ld_i64(s->uc, t0, addr, midx, MO_LEQ);
tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + i); tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + i);
tcg_gen_addi_i64(tcg_ctx, clean_addr, cpu_reg_sp(s, rn), 8);
} }
tcg_temp_free_i64(tcg_ctx, t0);
} else { } else {
TCGLabel *loop = gen_new_label(tcg_ctx); TCGLabel *loop = gen_new_label(tcg_ctx);
TCGv_ptr tp, i = tcg_const_local_ptr(tcg_ctx, 0); TCGv_ptr tp, i = tcg_const_local_ptr(tcg_ctx, 0);
/* Copy the clean address into a local temp, live across the loop. */
t0 = clean_addr;
clean_addr = tcg_temp_local_new_i64(tcg_ctx);
tcg_gen_mov_i64(tcg_ctx, clean_addr, t0);
tcg_temp_free_i64(tcg_ctx, t0);
gen_set_label(tcg_ctx, loop); gen_set_label(tcg_ctx, loop);
/* Minimize the number of local temps that must be re-read from t0 = tcg_temp_new_i64(tcg_ctx);
* the stack each iteration. Instead, re-compute values other tcg_gen_qemu_ld_i64(s->uc, t0, clean_addr, midx, MO_LEQ);
* than the loop counter. tcg_gen_addi_i64(tcg_ctx, clean_addr, clean_addr, 8);
*/
tp = tcg_temp_new_ptr(tcg_ctx); tp = tcg_temp_new_ptr(tcg_ctx);
tcg_gen_addi_ptr(tcg_ctx, tp, i, imm);
tcg_gen_extu_ptr_i64(tcg_ctx, addr, tp);
tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, rn));
tcg_gen_qemu_ld_i64(s->uc, t0, addr, midx, MO_LEQ);
tcg_gen_add_ptr(tcg_ctx, tp, tcg_ctx->cpu_env, i); tcg_gen_add_ptr(tcg_ctx, tp, tcg_ctx->cpu_env, i);
tcg_gen_addi_ptr(tcg_ctx, i, i, 8); tcg_gen_addi_ptr(tcg_ctx, i, i, 8);
tcg_gen_st_i64(tcg_ctx, t0, tp, vofs); tcg_gen_st_i64(tcg_ctx, t0, tp, vofs);
tcg_temp_free_ptr(tcg_ctx, tp); tcg_temp_free_ptr(tcg_ctx, tp);
tcg_temp_free_i64(tcg_ctx, t0);
tcg_gen_brcondi_ptr(tcg_ctx, TCG_COND_LTU, i, len_align, loop); tcg_gen_brcondi_ptr(tcg_ctx, TCG_COND_LTU, i, len_align, loop);
tcg_temp_free_ptr(tcg_ctx, i); tcg_temp_free_ptr(tcg_ctx, i);
} }
/* Predicate register loads can be any multiple of 2. /*
* Predicate register loads can be any multiple of 2.
* Note that we still store the entire 64-bit unit into cpu_env. * Note that we still store the entire 64-bit unit into cpu_env.
*/ */
if (len_remain) { if (len_remain) {
tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, rn), imm + len_align); t0 = tcg_temp_new_i64(tcg_ctx);
switch (len_remain) { switch (len_remain) {
case 2: case 2:
case 4: case 4:
case 8: case 8:
tcg_gen_qemu_ld_i64(s->uc, t0, addr, midx, MO_LE | ctz32(len_remain)); tcg_gen_qemu_ld_i64(s->uc, t0, clean_addr, midx,
MO_LE | ctz32(len_remain));
break; break;
case 6: case 6:
t1 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx);
tcg_gen_qemu_ld_i64(s->uc, t0, addr, midx, MO_LEUL); tcg_gen_qemu_ld_i64(s->uc, t0, clean_addr, midx, MO_LEUL);
tcg_gen_addi_i64(tcg_ctx, addr, addr, 4); tcg_gen_addi_i64(tcg_ctx, clean_addr, clean_addr, 4);
tcg_gen_qemu_ld_i64(s->uc, t1, addr, midx, MO_LEUW); tcg_gen_qemu_ld_i64(s->uc, t1, clean_addr, midx, MO_LEUW);
tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 32, 32); tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 32, 32);
tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t1);
break; break;
@ -4563,9 +4569,9 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
g_assert_not_reached(); g_assert_not_reached();
} }
tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + len_align); tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + len_align);
tcg_temp_free_i64(tcg_ctx, t0);
} }
tcg_temp_free_i64(tcg_ctx, addr); tcg_temp_free_i64(tcg_ctx, clean_addr);
tcg_temp_free_i64(tcg_ctx, t0);
} }
/* Similarly for stores. */ /* Similarly for stores. */