mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-07-19 02:37:31 +00:00
target/arm: Use gvec for NEON VLD all lanes
Backports commit 7377c2c97e20e64ed9b481eb2d9b9084bfd5b7e9 from qemu
This commit is contained in:
parent
985acb9cde
commit
a2239b9f5b
|
@ -3104,20 +3104,6 @@ static void gen_vfp_msr(DisasContext *s, TCGv_i32 tmp)
|
||||||
tcg_temp_free_i32(tcg_ctx, tmp);
|
tcg_temp_free_i32(tcg_ctx, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen_neon_dup_u8(DisasContext *s, TCGv_i32 var, int shift)
|
|
||||||
{
|
|
||||||
TCGContext *tcg_ctx = s->uc->tcg_ctx;
|
|
||||||
TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx);
|
|
||||||
if (shift)
|
|
||||||
tcg_gen_shri_i32(tcg_ctx, var, var, shift);
|
|
||||||
tcg_gen_ext8u_i32(tcg_ctx, var, var);
|
|
||||||
tcg_gen_shli_i32(tcg_ctx, tmp, var, 8);
|
|
||||||
tcg_gen_or_i32(tcg_ctx, var, var, tmp);
|
|
||||||
tcg_gen_shli_i32(tcg_ctx, tmp, var, 16);
|
|
||||||
tcg_gen_or_i32(tcg_ctx, var, var, tmp);
|
|
||||||
tcg_temp_free_i32(tcg_ctx, tmp);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gen_neon_dup_low16(DisasContext *s, TCGv_i32 var)
|
static void gen_neon_dup_low16(DisasContext *s, TCGv_i32 var)
|
||||||
{
|
{
|
||||||
TCGContext *tcg_ctx = s->uc->tcg_ctx;
|
TCGContext *tcg_ctx = s->uc->tcg_ctx;
|
||||||
|
@ -3138,29 +3124,6 @@ static void gen_neon_dup_high16(DisasContext *s, TCGv_i32 var)
|
||||||
tcg_temp_free_i32(tcg_ctx, tmp);
|
tcg_temp_free_i32(tcg_ctx, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
|
|
||||||
{
|
|
||||||
/* Load a single Neon element and replicate into a 32 bit TCG reg */
|
|
||||||
TCGContext *tcg_ctx = s->uc->tcg_ctx;
|
|
||||||
TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx);
|
|
||||||
switch (size) {
|
|
||||||
case 0:
|
|
||||||
gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
|
|
||||||
gen_neon_dup_u8(s, tmp, 0);
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
|
|
||||||
gen_neon_dup_low16(s, tmp);
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
|
|
||||||
break;
|
|
||||||
default: /* Avoid compiler warnings. */
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
return tmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int handle_vsel(DisasContext *s, uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
|
static int handle_vsel(DisasContext *s, uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
|
||||||
uint32_t dp)
|
uint32_t dp)
|
||||||
{
|
{
|
||||||
|
@ -5097,6 +5060,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
|
||||||
int load;
|
int load;
|
||||||
int shift;
|
int shift;
|
||||||
int n;
|
int n;
|
||||||
|
int vec_size;
|
||||||
TCGv_i32 addr;
|
TCGv_i32 addr;
|
||||||
TCGv_i32 tmp;
|
TCGv_i32 tmp;
|
||||||
TCGv_i32 tmp2;
|
TCGv_i32 tmp2;
|
||||||
|
@ -5266,28 +5230,33 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
|
||||||
}
|
}
|
||||||
addr = tcg_temp_new_i32(tcg_ctx);
|
addr = tcg_temp_new_i32(tcg_ctx);
|
||||||
load_reg_var(s, addr, rn);
|
load_reg_var(s, addr, rn);
|
||||||
if (nregs == 1) {
|
|
||||||
/* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
|
/* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
|
||||||
tmp = gen_load_and_replicate(s, addr, size);
|
* VLD2/3/4 to all lanes: bit 5 indicates register stride.
|
||||||
tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd, 0));
|
*/
|
||||||
tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd, 1));
|
stride = (insn & (1 << 5)) ? 2 : 1;
|
||||||
if (insn & (1 << 5)) {
|
vec_size = nregs == 1 ? stride * 8 : 8;
|
||||||
tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd + 1, 0));
|
|
||||||
tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd + 1, 1));
|
tmp = tcg_temp_new_i32(tcg_ctx);
|
||||||
}
|
for (reg = 0; reg < nregs; reg++) {
|
||||||
tcg_temp_free_i32(tcg_ctx, tmp);
|
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
|
||||||
} else {
|
s->be_data | size);
|
||||||
/* VLD2/3/4 to all lanes: bit 5 indicates register stride */
|
if ((rd & 1) && vec_size == 16) {
|
||||||
stride = (insn & (1 << 5)) ? 2 : 1;
|
/* We cannot write 16 bytes at once because the
|
||||||
for (reg = 0; reg < nregs; reg++) {
|
* destination is unaligned.
|
||||||
tmp = gen_load_and_replicate(s, addr, size);
|
*/
|
||||||
tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd, 0));
|
tcg_gen_gvec_dup_i32(tcg_ctx, size, neon_reg_offset(rd, 0),
|
||||||
tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd, 1));
|
8, 8, tmp);
|
||||||
tcg_temp_free_i32(tcg_ctx, tmp);
|
tcg_gen_gvec_mov(tcg_ctx, 0, neon_reg_offset(rd + 1, 0),
|
||||||
tcg_gen_addi_i32(tcg_ctx, addr, addr, 1 << size);
|
neon_reg_offset(rd, 0), 8, 8);
|
||||||
rd += stride;
|
} else {
|
||||||
|
tcg_gen_gvec_dup_i32(tcg_ctx, size, neon_reg_offset(rd, 0),
|
||||||
|
vec_size, vec_size, tmp);
|
||||||
}
|
}
|
||||||
|
tcg_gen_addi_i32(tcg_ctx, addr, addr, 1 << size);
|
||||||
|
rd += stride;
|
||||||
}
|
}
|
||||||
|
tcg_temp_free_i32(tcg_ctx, tmp);
|
||||||
tcg_temp_free_i32(tcg_ctx, addr);
|
tcg_temp_free_i32(tcg_ctx, addr);
|
||||||
stride = (1 << size) * nregs;
|
stride = (1 << size) * nregs;
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in a new issue