exec: Backport tb_cflags accessor

This commit is contained in:
Lioncash 2019-04-22 06:07:36 -04:00
parent 9f0e469142
commit d844d7cc9d
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
9 changed files with 41 additions and 35 deletions

View file

@ -134,7 +134,7 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
tb->page_addr[0] == phys_page1 && tb->page_addr[0] == phys_page1 &&
tb->cs_base == cs_base && tb->cs_base == cs_base &&
tb->flags == flags && tb->flags == flags &&
!(atomic_read(&tb->cflags) & CF_INVALID)) { !(tb_cflags(tb) & CF_INVALID)) {
if (tb->page_addr[1] == -1) { if (tb->page_addr[1] == -1) {
/* done, we have a match */ /* done, we have a match */
@ -251,7 +251,7 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
/* Check if translation buffer has been flushed */ /* Check if translation buffer has been flushed */
if (cpu->tb_flushed) { if (cpu->tb_flushed) {
cpu->tb_flushed = false; cpu->tb_flushed = false;
} else if (!(tb->cflags & CF_INVALID)) { } else if (!(tb_cflags(tb) & CF_INVALID)) {
tb_add_jump(last_tb, tb_exit, tb); tb_add_jump(last_tb, tb_exit, tb);
} }
} }

View file

@ -316,7 +316,7 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
found: found:
// UNICORN: Commented out // UNICORN: Commented out
//if (reset_icount && (tb->cflags & CF_USE_ICOUNT)) { //if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
// assert(use_icount); // assert(use_icount);
// /* Reset the cycle counter to the start of the block // /* Reset the cycle counter to the start of the block
// and shift if to the number of actually executed instructions */ // and shift if to the number of actually executed instructions */
@ -359,7 +359,7 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
tb = tb_find_pc(env->uc, host_pc); tb = tb_find_pc(env->uc, host_pc);
if (tb) { if (tb) {
cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
if (tb->cflags & CF_NOCACHE) { if (tb_cflags(tb) & CF_NOCACHE) {
/* one-shot translation, invalidate it immediately */ /* one-shot translation, invalidate it immediately */
tb_phys_invalidate(cpu->uc, tb, -1); tb_phys_invalidate(cpu->uc, tb, -1);
tb_free(cpu->uc, tb); tb_free(cpu->uc, tb);
@ -1591,7 +1591,7 @@ void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, t
} }
} }
if (current_tb == tb && if (current_tb == tb &&
(current_tb->cflags & CF_COUNT_MASK) != 1) { (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop /* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking its execution. We could be more precise by checking
that the modification is after the current PC, but it that the modification is after the current PC, but it
@ -1711,7 +1711,7 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
tb = (TranslationBlock *)((uintptr_t)tb & ~3); tb = (TranslationBlock *)((uintptr_t)tb & ~3);
#ifdef TARGET_HAS_PRECISE_SMC #ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb && if (current_tb == tb &&
(current_tb->cflags & CF_COUNT_MASK) != 1) { (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop /* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking its execution. We could be more precise by checking
that the modification is after the current PC, but it that the modification is after the current PC, but it
@ -1870,7 +1870,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cs_base = tb->cs_base; cs_base = tb->cs_base;
flags = tb->flags; flags = tb->flags;
tb_phys_invalidate(cpu->uc, tb, -1); tb_phys_invalidate(cpu->uc, tb, -1);
if (tb->cflags & CF_NOCACHE) { if (tb_cflags(tb) & CF_NOCACHE) {
if (tb->orig_tb) { if (tb->orig_tb) {
/* Invalidate original TB if this TB was generated in /* Invalidate original TB if this TB was generated in
* cpu_exec_nocache() */ * cpu_exec_nocache() */

View file

@ -47,7 +47,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
db->uc->block_full = false; db->uc->block_full = false;
/* Instruction counting */ /* Instruction counting */
db->max_insns = db->tb->cflags & CF_COUNT_MASK; db->max_insns = tb_cflags(db->tb) & CF_COUNT_MASK;
if (db->max_insns == 0) { if (db->max_insns == 0) {
db->max_insns = CF_COUNT_MASK; db->max_insns = CF_COUNT_MASK;
} }
@ -124,7 +124,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
done next -- either exiting this loop or locate the start of done next -- either exiting this loop or locate the start of
the next instruction. */ the next instruction. */
if (db->num_insns == db->max_insns if (db->num_insns == db->max_insns
&& (db->tb->cflags & CF_LAST_IO)) { && (tb_cflags(db->tb) & CF_LAST_IO)) {
/* Accept I/O on the last instruction. */ /* Accept I/O on the last instruction. */
//gen_io_start(); //gen_io_start();
ops->translate_insn(db, cpu); ops->translate_insn(db, cpu);

View file

@ -286,6 +286,12 @@ struct TranslationBlock {
uintptr_t jmp_list_first; uintptr_t jmp_list_first;
}; };
/* Hide the atomic_read to make code a little easier on the eyes */
static inline uint32_t tb_cflags(const TranslationBlock *tb)
{
return atomic_read(&tb->cflags);
}
void tb_free(struct uc_struct *uc, TranslationBlock *tb); void tb_free(struct uc_struct *uc, TranslationBlock *tb);
void tb_flush(CPUState *cpu); void tb_flush(CPUState *cpu);
void tb_phys_invalidate(struct uc_struct *uc, void tb_phys_invalidate(struct uc_struct *uc,

View file

@ -32,7 +32,7 @@ tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
tb->pc == *pc && tb->pc == *pc &&
tb->cs_base == *cs_base && tb->cs_base == *cs_base &&
tb->flags == *flags && tb->flags == *flags &&
!(atomic_read(&tb->cflags) & CF_INVALID))) { !(tb_cflags(tb) & CF_INVALID))) {
return tb; return tb;
} }
tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags); tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags);

View file

@ -458,7 +458,7 @@ static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
/* No direct tb linking with singlestep (either QEMU's or the ARM /* No direct tb linking with singlestep (either QEMU's or the ARM
* debug architecture kind) or deterministic io * debug architecture kind) or deterministic io
*/ */
if (s->base.singlestep_enabled || s->ss_active || (s->base.tb->cflags & CF_LAST_IO)) { if (s->base.singlestep_enabled || s->ss_active || (tb_cflags(s->base.tb) & CF_LAST_IO)) {
return false; return false;
} }
@ -1953,7 +1953,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
// Unicorn: if'd out // Unicorn: if'd out
#if 0 #if 0
if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start(); gen_io_start();
} }
#endif #endif
@ -1985,7 +1985,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
} }
} }
if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */ /* I/O operations must end the TB here (whether read or write) */
// Unicorn: commented out // Unicorn: commented out
//gen_io_end(); //gen_io_end();
@ -2543,7 +2543,7 @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
} }
tcg_temp_free_i64(tcg_ctx, cmp); tcg_temp_free_i64(tcg_ctx, cmp);
// Unicorn: commented out as parallel context support isn't implemented // Unicorn: commented out as parallel context support isn't implemented
/* } else if (s->base.tb->cflags & CF_PARALLEL) { /* } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
TCGv_i32 tcg_rs = tcg_const_i32(tcg_ctx, rs); TCGv_i32 tcg_rs = tcg_const_i32(tcg_ctx, rs);
if (s->be_data == MO_LE) { if (s->be_data == MO_LE) {

View file

@ -8882,7 +8882,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
// Unicorn: if'd out // Unicorn: if'd out
#if 0 #if 0
if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start(); gen_io_start();
} }
#endif #endif
@ -8974,7 +8974,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
} }
} }
if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */ /* I/O operations must end the TB here (whether read or write) */
// Unicorn: commented out // Unicorn: commented out
//gen_io_end(); //gen_io_end();
@ -13818,7 +13818,7 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
TCGContext *tcg_ctx = cpu->uc->tcg_ctx; TCGContext *tcg_ctx = cpu->uc->tcg_ctx;
if (dc->base.tb->cflags & CF_LAST_IO && dc->condjmp) { if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
/* FIXME: This can theoretically happen with self-modifying code. */ /* FIXME: This can theoretically happen with self-modifying code. */
cpu_abort(cpu, "IO on conditional branch instruction"); cpu_abort(cpu, "IO on conditional branch instruction");
} }

View file

@ -8162,12 +8162,12 @@ case 0x101:
} }
gen_update_cc_op(s); gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base); gen_jmp_im(s, pc_start - s->cs_base);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
// Unicorn: commented out // Unicorn: commented out
//gen_io_start(); //gen_io_start();
} }
gen_helper_rdtscp(tcg_ctx, cpu_env); gen_helper_rdtscp(tcg_ctx, cpu_env);
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
// Unicorn: commented out // Unicorn: commented out
//gen_io_end(); //gen_io_end();
gen_jmp(s, s->pc - s->cs_base); gen_jmp(s, s->pc - s->cs_base);
@ -8535,7 +8535,7 @@ case 0x101:
if (b & 2) { if (b & 2) {
// Unicorn: if'd out // Unicorn: if'd out
#if 0 #if 0
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
#endif #endif
@ -8545,7 +8545,7 @@ case 0x101:
// Unicorn: if'd out // Unicorn: if'd out
#if 0 #if 0
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
#endif #endif
@ -8554,14 +8554,14 @@ case 0x101:
} else { } else {
// Unicorn: if'd out // Unicorn: if'd out
#if 0 #if 0
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
#endif #endif
gen_helper_read_crN(tcg_ctx, s->T0, cpu_env, tcg_const_i32(tcg_ctx, reg)); gen_helper_read_crN(tcg_ctx, s->T0, cpu_env, tcg_const_i32(tcg_ctx, reg));
gen_op_mov_reg_v(s, ot, rm, s->T0); gen_op_mov_reg_v(s, ot, rm, s->T0);
#if 0 #if 0
if (s->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
#endif #endif
@ -9105,7 +9105,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
record/replay modes and there will always be an record/replay modes and there will always be an
additional step for ecx=0 when icount is enabled. additional step for ecx=0 when icount is enabled.
*/ */
dc->repz_opt = !dc->jmp_opt && !(dc->base.tb->cflags & CF_USE_ICOUNT); dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT);
#if 0 #if 0
/* check addseg logic */ /* check addseg logic */
if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32)) if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
@ -9183,7 +9183,7 @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
the flag and abort the translation to give the irqs a the flag and abort the translation to give the irqs a
chance to happen */ chance to happen */
dc->base.is_jmp = DISAS_TOO_MANY; dc->base.is_jmp = DISAS_TOO_MANY;
} else if ((dc->base.tb->cflags & CF_USE_ICOUNT) } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
&& ((pc_next & TARGET_PAGE_MASK) && ((pc_next & TARGET_PAGE_MASK)
!= ((pc_next + TARGET_MAX_INSN_SIZE - 1) != ((pc_next + TARGET_MAX_INSN_SIZE - 1)
& TARGET_PAGE_MASK) & TARGET_PAGE_MASK)

View file

@ -7100,11 +7100,11 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
switch (sel) { switch (sel) {
case 0: case 0:
/* Mark as an IO operation because we read the time. */ /* Mark as an IO operation because we read the time. */
//if (ctx->base.tb->cflags & CF_USE_ICOUNT) { //if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
// gen_io_start(); // gen_io_start();
//} //}
gen_helper_mfc0_count(tcg_ctx, arg, tcg_ctx->cpu_env); gen_helper_mfc0_count(tcg_ctx, arg, tcg_ctx->cpu_env);
//if (ctx->base.tb->cflags & CF_USE_ICOUNT) { //if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
// gen_io_end(); // gen_io_end();
//} //}
/* Break the TB to be able to take timer interrupts immediately /* Break the TB to be able to take timer interrupts immediately
@ -7542,7 +7542,7 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
if (sel != 0) if (sel != 0)
check_insn(ctx, ISA_MIPS32); check_insn(ctx, ISA_MIPS32);
//if (ctx->base.tb->cflags & CF_USE_ICOUNT) { //if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
// gen_io_start(); // gen_io_start();
//} //}
@ -8262,7 +8262,7 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
(void)register_name; /* avoid a compiler warning */ (void)register_name; /* avoid a compiler warning */
LOG_DISAS("mtc0 %s (reg %d sel %d)\n", register_name, reg, sel); LOG_DISAS("mtc0 %s (reg %d sel %d)\n", register_name, reg, sel);
/* For simplicity assume that all writes can cause interrupts. */ /* For simplicity assume that all writes can cause interrupts. */
//if (ctx->base.tb->cflags & CF_USE_ICOUNT) { //if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
// gen_io_end(); // gen_io_end();
// /* BS_STOP isn't sufficient, we need to ensure we break out of // /* BS_STOP isn't sufficient, we need to ensure we break out of
// * translated code to check for pending interrupts. */ // * translated code to check for pending interrupts. */
@ -8561,11 +8561,11 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
switch (sel) { switch (sel) {
case 0: case 0:
/* Mark as an IO operation because we read the time. */ /* Mark as an IO operation because we read the time. */
//if (ctx->base.tb->cflags & CF_USE_ICOUNT) { //if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
// gen_io_start(); // gen_io_start();
//} //}
gen_helper_mfc0_count(tcg_ctx, arg, tcg_ctx->cpu_env); gen_helper_mfc0_count(tcg_ctx, arg, tcg_ctx->cpu_env);
//if (ctx->base.tb->cflags & CF_USE_ICOUNT) { //if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
// gen_io_end(); // gen_io_end();
//} //}
/* Break the TB to be able to take timer interrupts immediately /* Break the TB to be able to take timer interrupts immediately
@ -8989,7 +8989,7 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
if (sel != 0) if (sel != 0)
check_insn(ctx, ISA_MIPS64); check_insn(ctx, ISA_MIPS64);
//if (ctx->base.tb->cflags & CF_USE_ICOUNT) { //if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
// gen_io_start(); // gen_io_start();
//} //}
@ -9697,7 +9697,7 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
(void)register_name; /* avoid a compiler warning */ (void)register_name; /* avoid a compiler warning */
LOG_DISAS("dmtc0 %s (reg %d sel %d)\n", register_name, reg, sel); LOG_DISAS("dmtc0 %s (reg %d sel %d)\n", register_name, reg, sel);
/* For simplicity assume that all writes can cause interrupts. */ /* For simplicity assume that all writes can cause interrupts. */
//if (ctx->base.tb->cflags & CF_USE_ICOUNT) { //if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
// gen_io_end(); // gen_io_end();
// /* DISAS_STOP isn't sufficient, we need to ensure we break out of // /* DISAS_STOP isn't sufficient, we need to ensure we break out of
// * translated code to check for pending interrupts. */ // * translated code to check for pending interrupts. */
@ -12757,13 +12757,13 @@ static void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel)
case 2: case 2:
// Unicorn: if'd out // Unicorn: if'd out
#if 0 #if 0
if (ctx->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start(); gen_io_start();
} }
#endif #endif
gen_helper_rdhwr_cc(tcg_ctx, t0, tcg_ctx->cpu_env); gen_helper_rdhwr_cc(tcg_ctx, t0, tcg_ctx->cpu_env);
#if 0 #if 0
if (ctx->base.tb->cflags & CF_USE_ICOUNT) { if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_end(); gen_io_end();
} }
#endif #endif