target/arm: Fix sve_zip_p vs odd vector lengths

Wrote too much with low-half zip (zip1) with vl % 512 != 0.

Adjust all of the x + (y << s) to x | (y << s) as a style fix.

We only ever have exact overlap between D, M, and N. Therefore
we only need a single temporary, and we do not need to check for
partial overlap.

Backports 8e7fefed1bdcc0f7e722ccf2a2fc2b4f79fe725e
This commit is contained in:
Richard Henderson 2021-03-30 14:29:31 -04:00 committed by Lioncash
parent 1aed8cee64
commit 78c016ef83

View file

@ -1870,6 +1870,7 @@ void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ); int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
intptr_t high = FIELD_EX32(pred_desc, PREDDESC, DATA); intptr_t high = FIELD_EX32(pred_desc, PREDDESC, DATA);
int esize = 1 << esz;
uint64_t *d = vd; uint64_t *d = vd;
intptr_t i; intptr_t i;
@ -1882,33 +1883,35 @@ void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
mm = extract64(mm, high * half, half); mm = extract64(mm, high * half, half);
nn = expand_bits(nn, esz); nn = expand_bits(nn, esz);
mm = expand_bits(mm, esz); mm = expand_bits(mm, esz);
d[0] = nn + (mm << (1 << esz)); d[0] = nn | (mm << esize);
} else { } else {
ARMPredicateReg tmp_n, tmp_m; ARMPredicateReg tmp;
/* We produce output faster than we consume input. /* We produce output faster than we consume input.
Therefore we must be mindful of possible overlap. */ Therefore we must be mindful of possible overlap. */
if ((vn - vd) < (uintptr_t)oprsz) { if (vd == vn) {
vn = memcpy(&tmp_n, vn, oprsz); vn = memcpy(&tmp, vn, oprsz);
if (vd == vm) {
vm = vn;
} }
if ((vm - vd) < (uintptr_t)oprsz) { } else if (vd == vm) {
vm = memcpy(&tmp_m, vm, oprsz); vm = memcpy(&tmp, vm, oprsz);
} }
if (high) { if (high) {
high = oprsz >> 1; high = oprsz >> 1;
} }
if ((high & 3) == 0) { if ((oprsz & 7) == 0) {
uint32_t *n = vn, *m = vm; uint32_t *n = vn, *m = vm;
high >>= 2; high >>= 2;
for (i = 0; i < DIV_ROUND_UP(oprsz, 8); i++) { for (i = 0; i < oprsz / 8; i++) {
uint64_t nn = n[H4(high + i)]; uint64_t nn = n[H4(high + i)];
uint64_t mm = m[H4(high + i)]; uint64_t mm = m[H4(high + i)];
nn = expand_bits(nn, esz); nn = expand_bits(nn, esz);
mm = expand_bits(mm, esz); mm = expand_bits(mm, esz);
d[i] = nn + (mm << (1 << esz)); d[i] = nn | (mm << esize);
} }
} else { } else {
uint8_t *n = vn, *m = vm; uint8_t *n = vn, *m = vm;
@ -1920,7 +1923,7 @@ void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
nn = expand_bits(nn, esz); nn = expand_bits(nn, esz);
mm = expand_bits(mm, esz); mm = expand_bits(mm, esz);
d16[H2(i)] = nn + (mm << (1 << esz)); d16[H2(i)] = nn | (mm << esize);
} }
} }
} }