target/arm: Fix short-vector increment behaviour

For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.

Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.

This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.

Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations

Backports commit 18cf951af9a27ae573a6fa17f9d0c103f7b7679b from qemu
This commit is contained in:
Peter Maydell 2019-06-13 19:44:25 -04:00 committed by Lioncash
parent 1a0d31c05e
commit 021da28bfd
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7

View file

@ -1156,6 +1156,42 @@ typedef void VFPGen3OpDPFn(TCGContext *, TCGv_i64 vd,
typedef void VFPGen2OpSPFn(TCGContext *, TCGv_i32 vd, TCGv_i32 vm);
typedef void VFPGen2OpDPFn(TCGContext *, TCGv_i64 vd, TCGv_i64 vm);
/*
* Return true if the specified S reg is in a scalar bank
* (ie if it is s0..s7)
*/
static inline bool vfp_sreg_is_scalar(int reg)
{
return (reg & 0x18) == 0;
}
/*
* Return true if the specified D reg is in a scalar bank
* (ie if it is d0..d3 or d16..d19)
*/
static inline bool vfp_dreg_is_scalar(int reg)
{
return (reg & 0xc) == 0;
}
/*
* Advance the S reg number forwards by delta within its bank
* (ie increment the low 3 bits but leave the rest the same)
*/
static inline int vfp_advance_sreg(int reg, int delta)
{
return ((reg + delta) & 0x7) | (reg & ~0x7);
}
/*
* Advance the D reg number forwards by delta within its bank
* (ie increment the low 2 bits but leave the rest the same)
*/
static inline int vfp_advance_dreg(int reg, int delta)
{
return ((reg + delta) & 0x3) | (reg & ~0x3);
}
/*
* Perform a 3-operand VFP data processing instruction. fn is the
* callback to do the actual operation; this function deals with the
@ -1167,7 +1203,6 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
TCGContext *tcg_ctx = s->uc->tcg_ctx;
uint32_t delta_m = 0;
uint32_t delta_d = 0;
uint32_t bank_mask = 0;
int veclen = s->vec_len;
TCGv_i32 f0, f1, fd;
TCGv_ptr fpst;
@ -1182,16 +1217,14 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
}
if (veclen > 0) {
bank_mask = 0x18;
/* Figure out what type of vector operation this is. */
if ((vd & bank_mask) == 0) {
if (vfp_sreg_is_scalar(vd)) {
/* scalar */
veclen = 0;
} else {
delta_d = s->vec_stride + 1;
if ((vm & bank_mask) == 0) {
if (vfp_sreg_is_scalar(vm)) {
/* mixed scalar/vector */
delta_m = 0;
} else {
@ -1222,11 +1255,11 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
/* Set up the operands for the next iteration */
veclen--;
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask);
vd = vfp_advance_sreg(vd, delta_d);
vn = vfp_advance_sreg(vn, delta_d);
neon_load_reg32(s, f0, vn);
if (delta_m) {
vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
vm = vfp_advance_sreg(vm, delta_m);
neon_load_reg32(s, f1, vm);
}
}
@ -1245,7 +1278,6 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
TCGContext *tcg_ctx = s->uc->tcg_ctx;
uint32_t delta_m = 0;
uint32_t delta_d = 0;
uint32_t bank_mask = 0;
int veclen = s->vec_len;
TCGv_i64 f0, f1, fd;
TCGv_ptr fpst;
@ -1265,16 +1297,14 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
}
if (veclen > 0) {
bank_mask = 0xc;
/* Figure out what type of vector operation this is. */
if ((vd & bank_mask) == 0) {
if (vfp_dreg_is_scalar(vd)) {
/* scalar */
veclen = 0;
} else {
delta_d = (s->vec_stride >> 1) + 1;
if ((vm & bank_mask) == 0) {
if (vfp_dreg_is_scalar(vm)) {
/* mixed scalar/vector */
delta_m = 0;
} else {
@ -1304,11 +1334,11 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
}
/* Set up the operands for the next iteration */
veclen--;
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask);
vd = vfp_advance_dreg(vd, delta_d);
vn = vfp_advance_dreg(vn, delta_d);
neon_load_reg64(s, f0, vn);
if (delta_m) {
vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
vm = vfp_advance_dreg(vm, delta_m);
neon_load_reg64(s, f1, vm);
}
}
@ -1326,7 +1356,6 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
TCGContext *tcg_ctx = s->uc->tcg_ctx;
uint32_t delta_m = 0;
uint32_t delta_d = 0;
uint32_t bank_mask = 0;
int veclen = s->vec_len;
TCGv_i32 f0, fd;
@ -1340,16 +1369,14 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
}
if (veclen > 0) {
bank_mask = 0x18;
/* Figure out what type of vector operation this is. */
if ((vd & bank_mask) == 0) {
if (vfp_sreg_is_scalar(vd)) {
/* scalar */
veclen = 0;
} else {
delta_d = s->vec_stride + 1;
if ((vm & bank_mask) == 0) {
if (vfp_sreg_is_scalar(vm)) {
/* mixed scalar/vector */
delta_m = 0;
} else {
@ -1375,7 +1402,7 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
if (delta_m == 0) {
/* single source one-many */
while (veclen--) {
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
vd = vfp_advance_sreg(vd, delta_d);
neon_store_reg32(s, fd, vd);
}
break;
@ -1383,8 +1410,8 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
/* Set up the operands for the next iteration */
veclen--;
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
vd = vfp_advance_sreg(vd, delta_d);
vm = vfp_advance_sreg(vm, delta_m);
neon_load_reg32(s, f0, vm);
}
@ -1399,7 +1426,6 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
TCGContext *tcg_ctx = s->uc->tcg_ctx;
uint32_t delta_m = 0;
uint32_t delta_d = 0;
uint32_t bank_mask = 0;
int veclen = s->vec_len;
TCGv_i64 f0, fd;
@ -1418,16 +1444,14 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
}
if (veclen > 0) {
bank_mask = 0xc;
/* Figure out what type of vector operation this is. */
if ((vd & bank_mask) == 0) {
if (vfp_dreg_is_scalar(vd)) {
/* scalar */
veclen = 0;
} else {
delta_d = (s->vec_stride >> 1) + 1;
if ((vm & bank_mask) == 0) {
if (vfp_dreg_is_scalar(vm)) {
/* mixed scalar/vector */
delta_m = 0;
} else {
@ -1453,7 +1477,7 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
if (delta_m == 0) {
/* single source one-many */
while (veclen--) {
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
vd = vfp_advance_dreg(vd, delta_d);
neon_store_reg64(s, fd, vd);
}
break;
@ -1461,8 +1485,8 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
/* Set up the operands for the next iteration */
veclen--;
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
vd = vfp_advance_dreg(vd, delta_d);
vd = vfp_advance_dreg(vm, delta_m);
neon_load_reg64(s, f0, vm);
}
@ -1807,7 +1831,6 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
uint32_t delta_d = 0;
uint32_t bank_mask = 0;
int veclen = s->vec_len;
TCGv_i32 fd;
uint32_t n, i, vd;
@ -1828,9 +1851,8 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
}
if (veclen > 0) {
bank_mask = 0x18;
/* Figure out what type of vector operation this is. */
if ((vd & bank_mask) == 0) {
if (vfp_sreg_is_scalar(vd)) {
/* scalar */
veclen = 0;
} else {
@ -1859,7 +1881,7 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
/* Set up the operands for the next iteration */
veclen--;
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
vd = vfp_advance_sreg(vd, delta_d);
}
tcg_temp_free_i32(tcg_ctx, fd);
@ -1870,7 +1892,6 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
uint32_t delta_d = 0;
uint32_t bank_mask = 0;
int veclen = s->vec_len;
TCGv_i64 fd;
uint32_t n, i, vd;
@ -1896,9 +1917,8 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
}
if (veclen > 0) {
bank_mask = 0xc;
/* Figure out what type of vector operation this is. */
if ((vd & bank_mask) == 0) {
if (vfp_dreg_is_scalar(vd)) {
/* scalar */
veclen = 0;
} else {
@ -1927,7 +1947,7 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
/* Set up the operands for the next iteration */
veclen--;
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
vfp_advance_dreg(vd, delta_d);
}
tcg_temp_free_i64(tcg_ctx, fd);