target/arm: Implement SVE Bitwise Logical - Unpredicated Group

These were the instructions that were stubbed out when
introducing the decode skeleton.

Backports commit 39eea56172e668cc4cca611ed9166779df54ac63 from qemu
This commit is contained in:
Richard Henderson 2018-05-20 00:53:35 -04:00 committed by Lioncash
parent 49def4bbde
commit e6a0b2c2bc
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
2 changed files with 197 additions and 8 deletions

View file

@ -19,11 +19,17 @@
# This file is processed by scripts/decodetree.py
#
###########################################################################
# Named fields. These are primarily for disjoint fields.
%imm9_16_10 16:s6 10:3
###########################################################################
# Named attribute sets. These are used to make nice(er) names
# when creating helpers common to those for the individual
# instruction patterns.
&rri rd rn imm
&rrr_esz rd rn rm esz
###########################################################################
@ -33,6 +39,12 @@
# Three operand with unused vector element size
@rd_rn_rm_e0 ........ ... rm:5 ... ... rn:5 rd:5 &rrr_esz esz=0
# Basic Load/Store with 9-bit immediate offset
@pd_rn_i9 ........ ........ ...... rn:5 . rd:4 \
&rri imm=%imm9_16_10
@rd_rn_i9 ........ ........ ...... rn:5 rd:5 \
&rri imm=%imm9_16_10
###########################################################################
# Instruction patterns. Grouped according to the SVE encodingindex.xhtml.
@ -43,3 +55,11 @@ AND_zzz 00000100 00 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
ORR_zzz 00000100 01 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
EOR_zzz 00000100 10 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
BIC_zzz 00000100 11 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
### SVE Memory - 32-bit Gather and Unsized Contiguous Group
# SVE load predicate register
LDR_pri 10000101 10 ...... 000 ... ..... 0 .... @pd_rn_i9
# SVE load vector register
LDR_zri 10000101 10 ...... 010 ... ..... ..... @rd_rn_i9

View file

@ -39,22 +39,191 @@
* Implement all of the translator functions referenced by the decoder.
*/
static bool trans_AND_zzz(DisasContext *s, arg_AND_zzz *a, uint32_t insn)
/* Return the offset info CPUARMState of the predicate vector register Pn.
* Note for this purpose, FFR is P16.
*/
static inline int pred_full_reg_offset(DisasContext *s, int regno)
{
return false;
return offsetof(CPUARMState, vfp.pregs[regno]);
}
static bool trans_ORR_zzz(DisasContext *s, arg_ORR_zzz *a, uint32_t insn)
/* Return the byte size of the whole predicate register, VL / 64. */
static inline int pred_full_reg_size(DisasContext *s)
{
return false;
return s->sve_len >> 3;
}
static bool trans_EOR_zzz(DisasContext *s, arg_EOR_zzz *a, uint32_t insn)
/* Invoke a vector expander on two Zregs. */
static bool do_vector2_z(DisasContext *s, GVecGen2Fn *gvec_fn,
int esz, int rd, int rn)
{
return false;
if (sve_access_check(s)) {
TCGContext *tcg_ctx = s->uc->tcg_ctx;
unsigned vsz = vec_full_reg_size(s);
gvec_fn(tcg_ctx, esz, vec_full_reg_offset(s, rd),
vec_full_reg_offset(s, rn), vsz, vsz);
}
return true;
}
static bool trans_BIC_zzz(DisasContext *s, arg_BIC_zzz *a, uint32_t insn)
/* Invoke a vector expander on three Zregs. */
static bool do_vector3_z(DisasContext *s, GVecGen3Fn *gvec_fn,
int esz, int rd, int rn, int rm)
{
return false;
if (sve_access_check(s)) {
TCGContext *tcg_ctx = s->uc->tcg_ctx;
unsigned vsz = vec_full_reg_size(s);
gvec_fn(tcg_ctx, esz, vec_full_reg_offset(s, rd),
vec_full_reg_offset(s, rn),
vec_full_reg_offset(s, rm), vsz, vsz);
}
return true;
}
/* Invoke a vector move on two Zregs. */
static bool do_mov_z(DisasContext *s, int rd, int rn)
{
return do_vector2_z(s, tcg_gen_gvec_mov, 0, rd, rn);
}
/*
*** SVE Logical - Unpredicated Group
*/
static bool trans_AND_zzz(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
{
return do_vector3_z(s, tcg_gen_gvec_and, 0, a->rd, a->rn, a->rm);
}
static bool trans_ORR_zzz(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
{
if (a->rn == a->rm) { /* MOV */
return do_mov_z(s, a->rd, a->rn);
} else {
return do_vector3_z(s, tcg_gen_gvec_or, 0, a->rd, a->rn, a->rm);
}
}
static bool trans_EOR_zzz(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
{
return do_vector3_z(s, tcg_gen_gvec_xor, 0, a->rd, a->rn, a->rm);
}
static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
{
return do_vector3_z(s, tcg_gen_gvec_andc, 0, a->rd, a->rn, a->rm);
}
/*
*** SVE Memory - 32-bit Gather and Unsized Contiguous Group
*/
/* Subroutine loading a vector register at VOFS of LEN bytes.
* The load should begin at the address Rn + IMM.
*/
static void do_ldr(DisasContext *s, uint32_t vofs, uint32_t len,
int rn, int imm)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
uint32_t len_align = QEMU_ALIGN_DOWN(len, 8);
uint32_t len_remain = len % 8;
uint32_t nparts = len / 8 + ctpop8(len_remain);
int midx = get_mem_index(s);
TCGv_i64 addr, t0, t1;
addr = tcg_temp_new_i64(tcg_ctx);
t0 = tcg_temp_new_i64(tcg_ctx);
/* Note that unpredicated load/store of vector/predicate registers
* are defined as a stream of bytes, which equates to little-endian
* operations on larger quantities. There is no nice way to force
* a little-endian load for aarch64_be-linux-user out of line.
*
* Attempt to keep code expansion to a minimum by limiting the
* amount of unrolling done.
*/
if (nparts <= 4) {
int i;
for (i = 0; i < len_align; i += 8) {
tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, rn), imm + i);
tcg_gen_qemu_ld_i64(s->uc, t0, addr, midx, MO_LEQ);
tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + i);
}
} else {
TCGLabel *loop = gen_new_label(tcg_ctx);
TCGv_ptr tp, i = tcg_const_local_ptr(tcg_ctx, 0);
gen_set_label(tcg_ctx, loop);
/* Minimize the number of local temps that must be re-read from
* the stack each iteration. Instead, re-compute values other
* than the loop counter.
*/
tp = tcg_temp_new_ptr(tcg_ctx);
tcg_gen_addi_ptr(tcg_ctx, tp, i, imm);
tcg_gen_extu_ptr_i64(tcg_ctx, addr, tp);
tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, rn));
tcg_gen_qemu_ld_i64(s->uc, t0, addr, midx, MO_LEQ);
tcg_gen_add_ptr(tcg_ctx, tp, tcg_ctx->cpu_env, i);
tcg_gen_addi_ptr(tcg_ctx, i, i, 8);
tcg_gen_st_i64(tcg_ctx, t0, tp, vofs);
tcg_temp_free_ptr(tcg_ctx, tp);
tcg_gen_brcondi_ptr(tcg_ctx, TCG_COND_LTU, i, len_align, loop);
tcg_temp_free_ptr(tcg_ctx, i);
}
/* Predicate register loads can be any multiple of 2.
* Note that we still store the entire 64-bit unit into cpu_env.
*/
if (len_remain) {
tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, rn), imm + len_align);
switch (len_remain) {
case 2:
case 4:
case 8:
tcg_gen_qemu_ld_i64(s->uc, t0, addr, midx, MO_LE | ctz32(len_remain));
break;
case 6:
t1 = tcg_temp_new_i64(tcg_ctx);
tcg_gen_qemu_ld_i64(s->uc, t0, addr, midx, MO_LEUL);
tcg_gen_addi_i64(tcg_ctx, addr, addr, 4);
tcg_gen_qemu_ld_i64(s->uc, t1, addr, midx, MO_LEUW);
tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 32, 32);
tcg_temp_free_i64(tcg_ctx, t1);
break;
default:
g_assert_not_reached();
}
tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + len_align);
}
tcg_temp_free_i64(tcg_ctx, addr);
tcg_temp_free_i64(tcg_ctx, t0);
}
static bool trans_LDR_zri(DisasContext *s, arg_rri *a, uint32_t insn)
{
if (sve_access_check(s)) {
int size = vec_full_reg_size(s);
int off = vec_full_reg_offset(s, a->rd);
do_ldr(s, off, size, a->rn, a->imm * size);
}
return true;
}
static bool trans_LDR_pri(DisasContext *s, arg_rri *a, uint32_t insn)
{
if (sve_access_check(s)) {
int size = pred_full_reg_size(s);
int off = pred_full_reg_offset(s, a->rd);
do_ldr(s, off, size, a->rn, a->imm * size);
}
return true;
}