mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-23 20:15:31 +00:00
target/arm: Correct load exclusive pair atomicity
We are not providing the required single-copy atomic semantics for the 64-bit operation that is the 32-bit paired load. At the same time, leave the entire 64-bit value in cpu_exclusive_val and stop writing to cpu_exclusive_high. This means that we do not have to re-assemble the 64-bit quantity when it comes time to store. At the same time, drop a redundant temporary and perform all loads directly into the cpu_exclusive_* globals. Backports commit 19514cde3b92938df750acaecf2caaa85e1d36a6 from qemu
This commit is contained in:
parent
009a52dd13
commit
4a8f556c29
|
@ -1897,29 +1897,43 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
|
|||
TCGv_i64 addr, int size, bool is_pair)
|
||||
{
|
||||
TCGContext *tcg_ctx = s->uc->tcg_ctx;
|
||||
TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx);
|
||||
TCGMemOp memop = s->be_data + size;
|
||||
int idx = get_mem_index(s);
|
||||
TCGMemOp memop = s->be_data;
|
||||
|
||||
g_assert(size <= 3);
|
||||
tcg_gen_qemu_ld_i64(s->uc, tmp, addr, get_mem_index(s), memop);
|
||||
|
||||
if (is_pair) {
|
||||
TCGv_i64 addr2 = tcg_temp_new_i64(tcg_ctx);
|
||||
TCGv_i64 hitmp = tcg_temp_new_i64(tcg_ctx);
|
||||
|
||||
g_assert(size >= 2);
|
||||
tcg_gen_addi_i64(tcg_ctx, addr2, addr, 1 << size);
|
||||
tcg_gen_qemu_ld_i64(s->uc, hitmp, addr2, get_mem_index(s), memop);
|
||||
tcg_temp_free_i64(tcg_ctx, addr2);
|
||||
tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_high, hitmp);
|
||||
tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt2), hitmp);
|
||||
tcg_temp_free_i64(tcg_ctx, hitmp);
|
||||
if (size == 2) {
|
||||
/* The pair must be single-copy atomic for the doubleword. */
|
||||
memop |= MO_64;
|
||||
tcg_gen_qemu_ld_i64(s->uc, tcg_ctx->cpu_exclusive_val, addr, idx, memop);
|
||||
if (s->be_data == MO_LE) {
|
||||
tcg_gen_extract_i64(tcg_ctx, cpu_reg(s, rt), tcg_ctx->cpu_exclusive_val, 0, 32);
|
||||
tcg_gen_extract_i64(tcg_ctx, cpu_reg(s, rt2), tcg_ctx->cpu_exclusive_val, 32, 32);
|
||||
} else {
|
||||
tcg_gen_extract_i64(tcg_ctx, cpu_reg(s, rt), tcg_ctx->cpu_exclusive_val, 32, 32);
|
||||
tcg_gen_extract_i64(tcg_ctx, cpu_reg(s, rt2), tcg_ctx->cpu_exclusive_val, 0, 32);
|
||||
}
|
||||
} else {
|
||||
/* The pair must be single-copy atomic for *each* doubleword,
|
||||
but not the entire quadword. */
|
||||
memop |= MO_64;
|
||||
tcg_gen_qemu_ld_i64(s->uc, tcg_ctx->cpu_exclusive_val, addr, idx, memop);
|
||||
|
||||
TCGv_i64 addr2 = tcg_temp_new_i64(tcg_ctx);
|
||||
tcg_gen_addi_i64(tcg_ctx, addr2, addr, 8);
|
||||
tcg_gen_qemu_ld_i64(s->uc, tcg_ctx->cpu_exclusive_high, addr2, idx, memop);
|
||||
tcg_temp_free_i64(tcg_ctx, addr2);
|
||||
tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt), tcg_ctx->cpu_exclusive_val);
|
||||
tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt2), tcg_ctx->cpu_exclusive_high);
|
||||
}
|
||||
} else {
|
||||
memop |= size;
|
||||
tcg_gen_qemu_ld_i64(s->uc, tcg_ctx->cpu_exclusive_val, addr, idx, memop);
|
||||
tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt), tcg_ctx->cpu_exclusive_val);
|
||||
}
|
||||
|
||||
tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, tmp);
|
||||
tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt), tmp);
|
||||
|
||||
tcg_temp_free_i64(tcg_ctx, tmp);
|
||||
tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, addr);
|
||||
}
|
||||
|
||||
|
@ -1954,14 +1968,15 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
|
|||
|
||||
if (is_pair) {
|
||||
if (size == 2) {
|
||||
TCGv_i64 val = tcg_temp_new_i64(tcg_ctx);
|
||||
tcg_gen_concat32_i64(tcg_ctx, tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
|
||||
tcg_gen_concat32_i64(tcg_ctx, val, tcg_ctx->cpu_exclusive_val, tcg_ctx->cpu_exclusive_high);
|
||||
tcg_gen_atomic_cmpxchg_i64(tcg_ctx, tmp, addr, val, tmp,
|
||||
if (s->be_data == MO_LE) {
|
||||
tcg_gen_concat32_i64(tcg_ctx, tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
|
||||
} else {
|
||||
tcg_gen_concat32_i64(tcg_ctx, tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
|
||||
}
|
||||
tcg_gen_atomic_cmpxchg_i64(tcg_ctx, tmp, addr, tcg_ctx->cpu_exclusive_val, tmp,
|
||||
get_mem_index(s),
|
||||
MO_64 | MO_ALIGN | s->be_data);
|
||||
tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, tmp, tmp, val);
|
||||
tcg_temp_free_i64(tcg_ctx, val);
|
||||
tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, tmp, tmp, tcg_ctx->cpu_exclusive_val);
|
||||
} else if (s->be_data == MO_LE) {
|
||||
gen_helper_paired_cmpxchg64_le(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, cpu_reg(s, rt),
|
||||
cpu_reg(s, rt2));
|
||||
|
|
Loading…
Reference in a new issue