diff --git a/qemu/target/arm/translate-vfp.inc.c b/qemu/target/arm/translate-vfp.inc.c index 7077eb9e..c065128d 100644 --- a/qemu/target/arm/translate-vfp.inc.c +++ b/qemu/target/arm/translate-vfp.inc.c @@ -866,3 +866,78 @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a) return true; } + +static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t offset; + TCGv_i32 addr; + + if (!vfp_access_check(s)) { + return true; + } + + offset = a->imm << 2; + if (!a->u) { + offset = -offset; + } + + if (s->thumb && a->rn == 15) { + /* This is actually UNPREDICTABLE */ + addr = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, addr, s->pc & ~2); + } else { + addr = load_reg(s, a->rn); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + if (a->l) { + gen_vfp_ld(s, false, addr); + gen_mov_vreg_F0(s, false, a->vd); + } else { + gen_mov_F0_vreg(s, false, a->vd); + gen_vfp_st(s, false, addr); + } + tcg_temp_free_i32(tcg_ctx, addr); + + return true; +} + +static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t offset; + TCGv_i32 addr; + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + offset = a->imm << 2; + if (!a->u) { + offset = -offset; + } + + if (s->thumb && a->rn == 15) { + /* This is actually UNPREDICTABLE */ + addr = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, addr, s->pc & ~2); + } else { + addr = load_reg(s, a->rn); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + if (a->l) { + gen_vfp_ld(s, true, addr); + gen_mov_vreg_F0(s, true, a->vd); + } else { + gen_mov_F0_vreg(s, true, a->vd); + gen_vfp_st(s, true, addr); + } + tcg_temp_free_i32(tcg_ctx, addr); + + return true; +} diff --git a/qemu/target/arm/translate.c b/qemu/target/arm/translate.c index b0b97355..ef27d01f 100644 --- a/qemu/target/arm/translate.c +++ b/qemu/target/arm/translate.c @@ -3817,26 +3817,8 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) else rd = VFP_SREG_D(insn); if ((insn & 0x01200000) == 0x01000000) { - /* Single load/store */ - offset = (insn & 0xff) << 2; - if ((insn & (1 << 23)) == 0) - offset = 0-offset; - if (s->thumb && rn == 15) { - /* This is actually UNPREDICTABLE */ - addr = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, addr, s->pc & ~2); - } else { - addr = load_reg(s, rn); - } - tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); - if (insn & (1 << 20)) { - gen_vfp_ld(s, dp, addr); - gen_mov_vreg_F0(s, dp, rd); - } else { - gen_mov_F0_vreg(s, dp, rd); - gen_vfp_st(s, dp, addr); - } - tcg_temp_free_i32(tcg_ctx, addr); + /* Already handled by decodetree */ + return 1; } else { /* load/store multiple */ int w = insn & (1 << 21); diff --git a/qemu/target/arm/vfp.decode b/qemu/target/arm/vfp.decode index 134f1c9e..8fa7fa0b 100644 --- a/qemu/target/arm/vfp.decode +++ b/qemu/target/arm/vfp.decode @@ -71,3 +71,10 @@ VMOV_64_sp ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... \ vm=%vm_sp VMOV_64_dp ---- 1100 010 op:1 rt2:4 rt:4 1011 00.1 .... \ vm=%vm_dp + +# Note that the half-precision variants of VLDR and VSTR are +# not part of this decodetree at all because they have bits [9:8] == 0b01 +VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 \ + vd=%vd_sp +VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 \ + vd=%vd_dp