diff --git a/qemu/target/arm/translate.c b/qemu/target/arm/translate.c index 6e49602b..d33c5477 100644 --- a/qemu/target/arm/translate.c +++ b/qemu/target/arm/translate.c @@ -8071,9 +8071,28 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv_i32 tmp2 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); - gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc); + /* For AArch32, architecturally the 32-bit word at the lowest + * address is always Rt and the one at addr+4 is Rt2, even if + * the CPU is big-endian. That means we don't want to do a + * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if + * for an architecturally 64-bit access, but instead do a + * 64-bit access using MO_BE if appropriate and then split + * the two halves. + * This only makes a difference for BE32 user-mode, where + * frob64() must not flip the two halves of the 64-bit data + * but this code must treat BE32 user-mode like BE32 system. + */ + TCGv taddr = gen_aa32_addr(s, addr, opc); + + tcg_gen_qemu_ld_i64(s->uc, t64, taddr, get_mem_index(s), opc); + tcg_temp_free(tcg_ctx, taddr); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, t64); - tcg_gen_extr_i64_i32(tcg_ctx, tmp, tmp2, t64); + if (s->be_data == MO_BE) { + tcg_gen_extr_i64_i32(tcg_ctx, tmp2, tmp, t64); + } else { + tcg_gen_extr_i64_i32(tcg_ctx, tmp, tmp2, t64); + } tcg_temp_free_i64(tcg_ctx, t64); store_reg(s, rt2, tmp2); @@ -8124,15 +8143,26 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, TCGv_i64 n64 = tcg_temp_new_i64(tcg_ctx); t2 = load_reg(s, rt2); - tcg_gen_concat_i32_i64(tcg_ctx, n64, t1, t2); + /* For AArch32, architecturally the 32-bit word at the lowest + * address is always Rt and the one at addr+4 is Rt2, even if + * the CPU is big-endian. Since we're going to treat this as a + * single 64-bit BE store, we need to put the two halves in the + * opposite order for BE to LE, so that they end up in the right + * places. + * We don't want gen_aa32_frob64() because that does the wrong + * thing for BE32 usermode. + */ + if (s->be_data == MO_BE) { + tcg_gen_concat_i32_i64(tcg_ctx, n64, t2, t1); + } else { + tcg_gen_concat_i32_i64(tcg_ctx, n64, t1, t2); + } tcg_temp_free_i32(tcg_ctx, t2); - gen_aa32_frob64(s, n64); tcg_gen_atomic_cmpxchg_i64(tcg_ctx, o64, taddr, tcg_ctx->cpu_exclusive_val, n64, get_mem_index(s), opc); tcg_temp_free_i64(tcg_ctx, n64); - gen_aa32_frob64(s, o64); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, o64, o64, tcg_ctx->cpu_exclusive_val); tcg_gen_extrl_i64_i32(tcg_ctx, t0, o64);