target/arm: Fill in disas_ldst_atomic

This implements all of the v8.1-Atomics instructions except
for compare-and-swap, which is decoded elsewhere.

Backports commit 74608ea45434c9b07055b21885e093528c5ed98c from qemu
This commit is contained in:
Richard Henderson 2018-05-14 08:18:29 -04:00 committed by Lioncash
parent 7ae8671b5e
commit b23c543e1a
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7

View file

@ -81,6 +81,7 @@ typedef void NeonGenOneOpFn(TCGContext *t, TCGv_i64, TCGv_i64);
typedef void CryptoTwoOpFn(TCGContext *, TCGv_ptr, TCGv_ptr);
typedef void CryptoThreeOpIntFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i32);
typedef void CryptoThreeOpFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr);
typedef void AtomicThreeOpFn(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp);
/* Note that the gvec expanders operate on offsets + sizes. */
typedef void GVecGen2Fn(TCGContext *, unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
@ -2827,10 +2828,13 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
int size, int rt, bool is_vector)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
int rs = extract32(insn, 16, 5);
int rn = extract32(insn, 5, 5);
int o3_opc = extract32(insn, 12, 4);
int feature = ARM_FEATURE_V8_ATOMICS;
TCGv_i64 tcg_rn, tcg_rs;
AtomicThreeOpFn *fn;
if (is_vector) {
unallocated_encoding(s);
@ -2838,14 +2842,32 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
}
switch (o3_opc) {
case 000: /* LDADD */
fn = tcg_gen_atomic_fetch_add_i64;
break;
case 001: /* LDCLR */
fn = tcg_gen_atomic_fetch_and_i64;
break;
case 002: /* LDEOR */
fn = tcg_gen_atomic_fetch_xor_i64;
break;
case 003: /* LDSET */
fn = tcg_gen_atomic_fetch_or_i64;
break;
case 004: /* LDSMAX */
fn = tcg_gen_atomic_fetch_smax_i64;
break;
case 005: /* LDSMIN */
fn = tcg_gen_atomic_fetch_smin_i64;
break;
case 006: /* LDUMAX */
fn = tcg_gen_atomic_fetch_umax_i64;
break;
case 007: /* LDUMIN */
fn = tcg_gen_atomic_fetch_umin_i64;
break;
case 010: /* SWP */
fn = tcg_gen_atomic_xchg_i64;
break;
default:
unallocated_encoding(s);
return;
@ -2855,8 +2877,21 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
return;
}
(void)rs;
(void)rn;
if (rn == 31) {
gen_check_sp_alignment(s);
}
tcg_rn = cpu_reg_sp(s, rn);
tcg_rs = read_cpu_reg(s, rs, true);
if (o3_opc == 1) { /* LDCLR */
tcg_gen_not_i64(tcg_ctx, tcg_rs, tcg_rs);
}
/* The tcg atomic primitives are all full barriers. Therefore we
* can ignore the Acquire and Release bits of this instruction.
*/
fn(tcg_ctx, cpu_reg(s, rt), tcg_rn, tcg_rs, get_mem_index(s),
s->be_data | size | MO_ALIGN);
}
/* Load/store register (all forms) */