target/arm: Convert LDM, STM

This includes a minor bug fix to LDM (user), which requires
bit 21 to be 0, which means no writeback.

Backports commit c5c426d4c680f908a1e262091a17b088b5709200 from qemu
This commit is contained in:
Richard Henderson 2019-11-20 11:17:18 -05:00 committed by Lioncash
parent e4ca88f9d6
commit a501800ba6
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
3 changed files with 251 additions and 195 deletions

View file

@ -40,6 +40,7 @@
&mrs_bank rd r sysm
&ldst_rr p w u rn rt rm shimm shtype
&ldst_ri p w u rn rt imm
&ldst_block rn i b u w list
&strex rn rd rt rt2 imm
&ldrex rn rt rt2 imm
&bfx rd rn lsb widthm1
@ -514,3 +515,8 @@ SMMLA .... 0111 0101 .... .... .... 0001 .... @rdamn
SMMLAR .... 0111 0101 .... .... .... 0011 .... @rdamn
SMMLS .... 0111 0101 .... .... .... 1101 .... @rdamn
SMMLSR .... 0111 0101 .... .... .... 1111 .... @rdamn
# Block data transfer
STM ---- 100 b:1 i:1 u:1 w:1 0 rn:4 list:16 &ldst_block
LDM_a32 ---- 100 b:1 i:1 u:1 w:1 1 rn:4 list:16 &ldst_block

View file

@ -37,6 +37,7 @@
&mrs_bank !extern rd r sysm
&ldst_rr !extern p w u rn rt rm shimm shtype
&ldst_ri !extern p w u rn rt imm
&ldst_block !extern rn i b u w list
&strex !extern rn rd rt rt2 imm
&ldrex !extern rn rt rt2 imm
&bfx !extern rd rn lsb widthm1
@ -563,3 +564,12 @@ SXTAB16 1111 1010 0010 .... 1111 .... 10.. .... @rrr_rot
UXTAB16 1111 1010 0011 .... 1111 .... 10.. .... @rrr_rot
SXTAB 1111 1010 0100 .... 1111 .... 10.. .... @rrr_rot
UXTAB 1111 1010 0101 .... 1111 .... 10.. .... @rrr_rot
# Load/store multiple
@ldstm .... .... .. w:1 . rn:4 list:16 &ldst_block u=0
STM_t32 1110 1000 10.0 .... ................ @ldstm i=1 b=0
STM_t32 1110 1001 00.0 .... ................ @ldstm i=0 b=1
LDM_t32 1110 1000 10.1 .... ................ @ldstm i=1 b=0
LDM_t32 1110 1001 00.1 .... ................ @ldstm i=0 b=1

View file

@ -10113,6 +10113,237 @@ static bool trans_UDIV(DisasContext *s, arg_rrr *a)
return op_div(s, a, true);
}
/*
* Block data transfer
*/
static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i32 addr = load_reg(s, a->rn);
if (a->b) {
if (a->i) {
/* pre increment */
tcg_gen_addi_i32(tcg_ctx, addr, addr, 4);
} else {
/* pre decrement */
tcg_gen_addi_i32(tcg_ctx, addr, addr, -(n * 4));
}
} else if (!a->i && n != 1) {
/* post decrement */
tcg_gen_addi_i32(tcg_ctx, addr, addr, -((n - 1) * 4));
}
if (s->v8m_stackcheck && a->rn == 13 && a->w) {
/*
* If the writeback is incrementing SP rather than
* decrementing it, and the initial SP is below the
* stack limit but the final written-back SP would
* be above, then then we must not perform any memory
* accesses, but it is IMPDEF whether we generate
* an exception. We choose to do so in this case.
* At this point 'addr' is the lowest address, so
* either the original SP (if incrementing) or our
* final SP (if decrementing), so that's what we check.
*/
gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, addr);
}
return addr;
}
static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
TCGv_i32 addr, int n)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
if (a->w) {
/* write back */
if (!a->b) {
if (a->i) {
/* post increment */
tcg_gen_addi_i32(tcg_ctx, addr, addr, 4);
} else {
/* post decrement */
tcg_gen_addi_i32(tcg_ctx, addr, addr, -(n * 4));
}
} else if (!a->i && n != 1) {
/* pre decrement */
tcg_gen_addi_i32(tcg_ctx, addr, addr, -((n - 1) * 4));
}
store_reg(s, a->rn, addr);
} else {
tcg_temp_free_i32(tcg_ctx, addr);
}
}
static bool op_stm(DisasContext *s, arg_ldst_block *a)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
int i, j, n, list, mem_idx;
bool user = a->u;
TCGv_i32 addr, tmp, tmp2;
if (user) {
/* STM (user) */
if (IS_USER(s)) {
/* Only usable in supervisor mode. */
unallocated_encoding(s);
return true;
}
}
list = a->list;
n = ctpop16(list);
/* TODO: test invalid n == 0 case */
addr = op_addr_block_pre(s, a, n);
mem_idx = get_mem_index(s);
for (i = j = 0; i < 16; i++) {
if (!(list & (1 << i))) {
continue;
}
if (user && i != 15) {
tmp = tcg_temp_new_i32(tcg_ctx);
tmp2 = tcg_const_i32(tcg_ctx, i);
gen_helper_get_user_reg(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp2);
tcg_temp_free_i32(tcg_ctx, tmp2);
} else {
tmp = load_reg(s, i);
}
gen_aa32_st32(s, tmp, addr, mem_idx);
tcg_temp_free_i32(tcg_ctx, tmp);
/* No need to add after the last transfer. */
if (++j != n) {
tcg_gen_addi_i32(tcg_ctx, addr, addr, 4);
}
}
op_addr_block_post(s, a, addr, n);
return true;
}
static bool trans_STM(DisasContext *s, arg_ldst_block *a)
{
return op_stm(s, a);
}
static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
{
/* Writeback register in register list is UNPREDICTABLE for T32. */
if (a->w && (a->list & (1 << a->rn))) {
unallocated_encoding(s);
return true;
}
return op_stm(s, a);
}
static bool do_ldm(DisasContext *s, arg_ldst_block *a)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
int i, j, n, list, mem_idx;
bool loaded_base;
bool user = a->u;
bool exc_return = false;
TCGv_i32 addr, tmp, tmp2, loaded_var;
if (user) {
/* LDM (user), LDM (exception return) */
if (IS_USER(s)) {
/* Only usable in supervisor mode. */
unallocated_encoding(s);
return true;
}
if (extract32(a->list, 15, 1)) {
exc_return = true;
user = false;
} else {
/* LDM (user) does not allow writeback. */
if (a->w) {
unallocated_encoding(s);
return true;
}
}
}
list = a->list;
n = ctpop16(list);
/* TODO: test invalid n == 0 case */
addr = op_addr_block_pre(s, a, n);
mem_idx = get_mem_index(s);
loaded_base = false;
loaded_var = NULL;
for (i = j = 0; i < 16; i++) {
if (!(list & (1 << i))) {
continue;
}
tmp = tcg_temp_new_i32(tcg_ctx);
gen_aa32_ld32u(s, tmp, addr, mem_idx);
if (user) {
tmp2 = tcg_const_i32(tcg_ctx, i);
gen_helper_set_user_reg(tcg_ctx, tcg_ctx->cpu_env, tmp2, tmp);
tcg_temp_free_i32(tcg_ctx, tmp2);
tcg_temp_free_i32(tcg_ctx, tmp);
} else if (i == a->rn) {
loaded_var = tmp;
loaded_base = true;
} else if (i == 15 && exc_return) {
store_pc_exc_ret(s, tmp);
} else {
store_reg_from_load(s, i, tmp);
}
/* No need to add after the last transfer. */
if (++j != n) {
tcg_gen_addi_i32(tcg_ctx, addr, addr, 4);
}
}
op_addr_block_post(s, a, addr, n);
if (loaded_base) {
store_reg(s, a->rn, loaded_var);
}
if (exc_return) {
/* Restore CPSR from SPSR. */
tmp = load_cpu_field(s, spsr);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(tcg_ctx);
}
gen_helper_cpsr_write_eret(tcg_ctx, tcg_ctx->cpu_env, tmp);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(tcg_ctx);
}
tcg_temp_free_i32(tcg_ctx, tmp);
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
}
return true;
}
static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
{
return do_ldm(s, a);
}
static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
{
/* Writeback register in register list is UNPREDICTABLE for T32. */
if (a->w && (a->list & (1 << a->rn))) {
unallocated_encoding(s);
return true;
}
return do_ldm(s, a);
}
/*
* Legacy decoder.
*/
@ -10402,136 +10633,10 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
case 0x5:
case 0x6:
case 0x7:
/* All done in decodetree. Reach here for illegal ops. */
goto illegal_op;
case 0x08:
case 0x09:
{
int j, n, loaded_base;
bool exc_return = false;
bool is_load = extract32(insn, 20, 1);
bool user = false;
TCGv_i32 loaded_var;
/* load/store multiple words */
/* XXX: store correct base if write back */
if (insn & (1 << 22)) {
/* LDM (user), LDM (exception return) and STM (user) */
if (IS_USER(s))
goto illegal_op; /* only usable in supervisor mode */
if (is_load && extract32(insn, 15, 1)) {
exc_return = true;
} else {
user = true;
}
}
rn = (insn >> 16) & 0xf;
addr = load_reg(s, rn);
/* compute total size */
loaded_base = 0;
loaded_var = NULL;
n = 0;
for (i = 0; i < 16; i++) {
if (insn & (1 << i))
n++;
}
/* XXX: test invalid n == 0 case ? */
if (insn & (1 << 23)) {
if (insn & (1 << 24)) {
/* pre increment */
tcg_gen_addi_i32(tcg_ctx, addr, addr, 4);
} else {
/* post increment */
}
} else {
if (insn & (1 << 24)) {
/* pre decrement */
tcg_gen_addi_i32(tcg_ctx, addr, addr, -(n * 4));
} else {
/* post decrement */
if (n != 1)
tcg_gen_addi_i32(tcg_ctx, addr, addr, -((n - 1) * 4));
}
}
j = 0;
for (i = 0; i < 16; i++) {
if (insn & (1 << i)) {
if (is_load) {
/* load */
tmp = tcg_temp_new_i32(tcg_ctx);
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
if (user) {
tmp2 = tcg_const_i32(tcg_ctx, i);
gen_helper_set_user_reg(tcg_ctx, tcg_ctx->cpu_env, tmp2, tmp);
tcg_temp_free_i32(tcg_ctx, tmp2);
tcg_temp_free_i32(tcg_ctx, tmp);
} else if (i == rn) {
loaded_var = tmp;
loaded_base = 1;
} else if (i == 15 && exc_return) {
store_pc_exc_ret(s, tmp);
} else {
store_reg_from_load(s, i, tmp);
}
} else {
/* store */
if (i == 15) {
tmp = tcg_temp_new_i32(tcg_ctx);
tcg_gen_movi_i32(tcg_ctx, tmp, read_pc(s));
} else if (user) {
tmp = tcg_temp_new_i32(tcg_ctx);
tmp2 = tcg_const_i32(tcg_ctx, i);
gen_helper_get_user_reg(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp2);
tcg_temp_free_i32(tcg_ctx, tmp2);
} else {
tmp = load_reg(s, i);
}
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tcg_ctx, tmp);
}
j++;
/* no need to add after the last transfer */
if (j != n)
tcg_gen_addi_i32(tcg_ctx, addr, addr, 4);
}
}
if (insn & (1 << 21)) {
/* write back */
if (insn & (1 << 23)) {
if (insn & (1 << 24)) {
/* pre increment */
} else {
/* post increment */
tcg_gen_addi_i32(tcg_ctx, addr, addr, 4);
}
} else {
if (insn & (1 << 24)) {
/* pre decrement */
if (n != 1)
tcg_gen_addi_i32(tcg_ctx, addr, addr, -((n - 1) * 4));
} else {
/* post decrement */
tcg_gen_addi_i32(tcg_ctx, addr, addr, -(n * 4));
}
}
store_reg(s, rn, addr);
} else {
tcg_temp_free_i32(tcg_ctx, addr);
}
if (loaded_base) {
store_reg(s, rn, loaded_var);
}
if (exc_return) {
/* Restore CPSR from SPSR. */
tmp = load_cpu_field(s, spsr);
gen_helper_cpsr_write_eret(tcg_ctx, tcg_ctx->cpu_env, tmp);
tcg_temp_free_i32(tcg_ctx, tmp);
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
}
}
break;
/* All done in decodetree. Reach here for illegal ops. */
goto illegal_op;
case 0xa:
case 0xb:
{
@ -10799,73 +10904,8 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
insn & (1 << 21));
}
} else {
int i, loaded_base = 0;
TCGv_i32 loaded_var;
bool wback = extract32(insn, 21, 1);
/* Load/store multiple. */
addr = load_reg(s, rn);
offset = 0;
for (i = 0; i < 16; i++) {
if (insn & (1 << i))
offset += 4;
}
if (insn & (1 << 24)) {
tcg_gen_addi_i32(tcg_ctx, addr, addr, 0-offset);
}
if (s->v8m_stackcheck && rn == 13 && wback) {
/*
* If the writeback is incrementing SP rather than
* decrementing it, and the initial SP is below the
* stack limit but the final written-back SP would
* be above, then then we must not perform any memory
* accesses, but it is IMPDEF whether we generate
* an exception. We choose to do so in this case.
* At this point 'addr' is the lowest address, so
* either the original SP (if incrementing) or our
* final SP (if decrementing), so that's what we check.
*/
gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, addr);
}
loaded_var = NULL;
for (i = 0; i < 16; i++) {
if ((insn & (1 << i)) == 0)
continue;
if (insn & (1 << 20)) {
/* Load. */
tmp = tcg_temp_new_i32(tcg_ctx);
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
if (i == rn) {
loaded_var = tmp;
loaded_base = 1;
} else {
store_reg_from_load(s, i, tmp);
}
} else {
/* Store. */
tmp = load_reg(s, i);
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tcg_ctx, tmp);
}
tcg_gen_addi_i32(tcg_ctx, addr, addr, 4);
}
if (loaded_base) {
store_reg(s, rn, loaded_var);
}
if (wback) {
/* Base register writeback. */
if (insn & (1 << 24)) {
tcg_gen_addi_i32(tcg_ctx, addr, addr, 0-offset);
}
/* Fault if writeback register is in register list. */
if (insn & (1 << rn))
goto illegal_op;
store_reg(s, rn, addr);
} else {
tcg_temp_free_i32(tcg_ctx, addr);
}
/* Load/store multiple, in decodetree */
goto illegal_op;
}
}
break;