mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-23 14:45:48 +00:00
tcg: check CF_PARALLEL instead of parallel_cpus
Thereby decoupling the resulting translated code from the current state of the system. The tb->cflags field is not passed to tcg generation functions. So we add a field to TCGContext, storing there a copy of tb->cflags. Most architectures have <= 32 registers, which results in a 4-byte hole in TCGContext. Use this hole for the new field. Backports commit e82d5a2460b0e176128027651ff9b104e4bdf5cc from qemu
This commit is contained in:
parent
915a8a92c8
commit
f593db445a
|
@ -1335,6 +1335,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||
tb->cs_base = cs_base;
|
||||
tb->flags = flags;
|
||||
tb->cflags = cflags;
|
||||
tb->trace_vcpu_dstate = 0;
|
||||
tcg_ctx->tb_cflags = cflags;
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
tcg_ctx->tb_count1++; /* includes aborted translations because of
|
||||
|
|
|
@ -97,7 +97,7 @@ void tcg_gen_op6(TCGContext *ctx, TCGOpcode opc, TCGArg a1, TCGArg a2,
|
|||
|
||||
void tcg_gen_mb(TCGContext *ctx, TCGBar mb_type)
|
||||
{
|
||||
if (ctx->uc->parallel_cpus) {
|
||||
if (ctx->tb_cflags & CF_PARALLEL) {
|
||||
tcg_gen_op1(ctx, INDEX_op_mb, mb_type);
|
||||
}
|
||||
}
|
||||
|
@ -2819,7 +2819,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGContext *s,
|
|||
{
|
||||
memop = tcg_canonicalize_memop(memop, 0, 0);
|
||||
|
||||
if (!s->uc->parallel_cpus) {
|
||||
if (!(s->tb_cflags & CF_PARALLEL)) {
|
||||
TCGv_i32 t1 = tcg_temp_new_i32(s);
|
||||
TCGv_i32 t2 = tcg_temp_new_i32(s);
|
||||
|
||||
|
@ -2864,7 +2864,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGContext *s,
|
|||
{
|
||||
memop = tcg_canonicalize_memop(memop, 1, 0);
|
||||
|
||||
if (!s->uc->parallel_cpus) {
|
||||
if (!(s->tb_cflags & CF_PARALLEL)) {
|
||||
TCGv_i64 t1 = tcg_temp_new_i64(s);
|
||||
TCGv_i64 t2 = tcg_temp_new_i64(s);
|
||||
|
||||
|
@ -3071,7 +3071,7 @@ GEN_ATOMIC_TABLE(NAME) \
|
|||
void tcg_gen_atomic_##NAME##_i32 \
|
||||
(TCGContext *s, TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
|
||||
{ \
|
||||
if (s->uc->parallel_cpus) { \
|
||||
if (s->tb_cflags & CF_PARALLEL) { \
|
||||
do_atomic_op_i32(s, ret, addr, val, idx, memop, table_##NAME); \
|
||||
} else { \
|
||||
do_nonatomic_op_i32(s, ret, addr, val, idx, memop, NEW, \
|
||||
|
@ -3081,7 +3081,7 @@ void tcg_gen_atomic_##NAME##_i32 \
|
|||
void tcg_gen_atomic_##NAME##_i64 \
|
||||
(TCGContext *s, TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
|
||||
{ \
|
||||
if (s->uc->parallel_cpus) { \
|
||||
if (s->tb_cflags & CF_PARALLEL) { \
|
||||
do_atomic_op_i64(s, ret, addr, val, idx, memop, table_##NAME); \
|
||||
} else { \
|
||||
do_nonatomic_op_i64(s, ret, addr, val, idx, memop, NEW, \
|
||||
|
|
|
@ -731,6 +731,7 @@ struct TCGContext {
|
|||
uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
|
||||
|
||||
TCGRegSet reserved_regs;
|
||||
uint32_t tb_cflags; /* cflags of the current TB */
|
||||
intptr_t current_frame_offset;
|
||||
intptr_t frame_start;
|
||||
intptr_t frame_end;
|
||||
|
|
Loading…
Reference in a new issue