target/arm: [tcg] Port to DisasContextBase

Incrementally paves the way towards using the generic
instruction translation loop.

Backports commit dcba3a8d443842f7a30a2c52d50a6b50b6982b35 from qemu
This commit is contained in:
Lluís Vilanova 2018-03-04 17:54:11 -05:00 committed by Lioncash
parent c40f5eb73e
commit 8581e6f6fe
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
3 changed files with 186 additions and 126 deletions

View file

@ -320,7 +320,7 @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
{ {
gen_a64_set_pc_im(s, s->pc - offset); gen_a64_set_pc_im(s, s->pc - offset);
gen_exception_internal(s, excp); gen_exception_internal(s, excp);
s->is_jmp = DISAS_NORETURN; s->base.is_jmp = DISAS_NORETURN;
} }
static void gen_exception_insn(DisasContext *s, int offset, int excp, static void gen_exception_insn(DisasContext *s, int offset, int excp,
@ -328,7 +328,7 @@ static void gen_exception_insn(DisasContext *s, int offset, int excp,
{ {
gen_a64_set_pc_im(s, s->pc - offset); gen_a64_set_pc_im(s, s->pc - offset);
gen_exception(s, excp, syndrome, target_el); gen_exception(s, excp, syndrome, target_el);
s->is_jmp = DISAS_NORETURN; s->base.is_jmp = DISAS_NORETURN;
} }
static void gen_ss_advance(DisasContext *s) static void gen_ss_advance(DisasContext *s)
@ -357,7 +357,7 @@ static void gen_step_complete_exception(DisasContext *s)
gen_ss_advance(s); gen_ss_advance(s);
gen_exception(s, EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex), gen_exception(s, EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
default_exception_el(s)); default_exception_el(s));
s->is_jmp = DISAS_NORETURN; s->base.is_jmp = DISAS_NORETURN;
} }
static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest) static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
@ -365,13 +365,13 @@ static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
/* No direct tb linking with singlestep (either QEMU's or the ARM /* No direct tb linking with singlestep (either QEMU's or the ARM
* debug architecture kind) or deterministic io * debug architecture kind) or deterministic io
*/ */
if (s->singlestep_enabled || s->ss_active || (s->tb->cflags & CF_LAST_IO)) { if (s->base.singlestep_enabled || s->ss_active || (s->base.tb->cflags & CF_LAST_IO)) {
return false; return false;
} }
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
/* Only link tbs from inside the same guest page */ /* Only link tbs from inside the same guest page */
if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) { if ((s->base.tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
return false; return false;
} }
#endif #endif
@ -384,21 +384,21 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
TranslationBlock *tb; TranslationBlock *tb;
TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGContext *tcg_ctx = s->uc->tcg_ctx;
tb = s->tb; tb = s->base.tb;
if (use_goto_tb(s, n, dest)) { if (use_goto_tb(s, n, dest)) {
tcg_gen_goto_tb(tcg_ctx, n); tcg_gen_goto_tb(tcg_ctx, n);
gen_a64_set_pc_im(s, dest); gen_a64_set_pc_im(s, dest);
tcg_gen_exit_tb(tcg_ctx, (intptr_t)tb + n); tcg_gen_exit_tb(tcg_ctx, (intptr_t)tb + n);
s->is_jmp = DISAS_NORETURN; s->base.is_jmp = DISAS_NORETURN;
} else { } else {
gen_a64_set_pc_im(s, dest); gen_a64_set_pc_im(s, dest);
if (s->ss_active) { if (s->ss_active) {
gen_step_complete_exception(s); gen_step_complete_exception(s);
} else if (s->singlestep_enabled) { } else if (s->base.singlestep_enabled) {
gen_exception_internal(s, EXCP_DEBUG); gen_exception_internal(s, EXCP_DEBUG);
} else { } else {
tcg_gen_lookup_and_goto_ptr(tcg_ctx, tcg_ctx->cpu_pc); tcg_gen_lookup_and_goto_ptr(tcg_ctx, tcg_ctx->cpu_pc);
s->is_jmp = DISAS_NORETURN; s->base.is_jmp = DISAS_NORETURN;
} }
} }
} }
@ -1371,16 +1371,16 @@ static void handle_hint(DisasContext *s, uint32_t insn,
case 0: /* NOP */ case 0: /* NOP */
return; return;
case 3: /* WFI */ case 3: /* WFI */
s->is_jmp = DISAS_WFI; s->base.is_jmp = DISAS_WFI;
return; return;
case 1: /* YIELD */ case 1: /* YIELD */
if (!s->uc->parallel_cpus) { if (!s->uc->parallel_cpus) {
s->is_jmp = DISAS_YIELD; s->base.is_jmp = DISAS_YIELD;
} }
return; return;
case 2: /* WFE */ case 2: /* WFE */
if (!s->uc->parallel_cpus) { if (!s->uc->parallel_cpus) {
s->is_jmp = DISAS_WFE; s->base.is_jmp = DISAS_WFE;
} }
return; return;
case 4: /* SEV */ case 4: /* SEV */
@ -1467,7 +1467,7 @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
tcg_temp_free_i32(tcg_ctx, tcg_op); tcg_temp_free_i32(tcg_ctx, tcg_op);
/* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */ /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
gen_a64_set_pc_im(s, s->pc); gen_a64_set_pc_im(s, s->pc);
s->is_jmp = (op == 0x1f ? DISAS_EXIT : DISAS_JUMP); s->base.is_jmp = (op == 0x1f ? DISAS_EXIT : DISAS_JUMP);
break; break;
} }
default: default:
@ -1603,6 +1603,13 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
break; break;
} }
// Unicorn: if'd out
#if 0
if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
#endif
tcg_rt = cpu_reg(s, rt); tcg_rt = cpu_reg(s, rt);
if (isread) { if (isread) {
@ -1630,12 +1637,17 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
} }
} }
if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
// Unicorn: commented out
//gen_io_end();
s->base.is_jmp = DISAS_UPDATE;
} else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
/* We default to ending the TB on a coprocessor register write, /* We default to ending the TB on a coprocessor register write,
* but allow this to be suppressed by the register definition * but allow this to be suppressed by the register definition
* (usually only necessary to work around guest bugs). * (usually only necessary to work around guest bugs).
*/ */
s->is_jmp = DISAS_UPDATE; s->base.is_jmp = DISAS_UPDATE;
} }
} }
@ -1832,7 +1844,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
} }
gen_helper_exception_return(tcg_ctx, tcg_ctx->cpu_env); gen_helper_exception_return(tcg_ctx, tcg_ctx->cpu_env);
/* Must exit loop to check un-masked IRQs */ /* Must exit loop to check un-masked IRQs */
s->is_jmp = DISAS_EXIT; s->base.is_jmp = DISAS_EXIT;
return; return;
case 5: /* DRPS */ case 5: /* DRPS */
if (rn != 0x1f) { if (rn != 0x1f) {
@ -1846,7 +1858,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
return; return;
} }
s->is_jmp = DISAS_JUMP; s->base.is_jmp = DISAS_JUMP;
} }
/* C3.2 Branches, exception generating and system instructions */ /* C3.2 Branches, exception generating and system instructions */
@ -11382,7 +11394,7 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
// Unicorn: end address tells us to stop emulation // Unicorn: end address tells us to stop emulation
if (s->pc == s->uc->addr_end) { if (s->pc == s->uc->addr_end) {
// imitate WFI instruction to halt emulation // imitate WFI instruction to halt emulation
s->is_jmp = DISAS_WFI; s->base.is_jmp = DISAS_WFI;
return; return;
} }
@ -11432,26 +11444,27 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
free_tmp_a64(s); free_tmp_a64(s);
} }
void gen_intermediate_code_a64(CPUState *cs, TranslationBlock *tb) void gen_intermediate_code_a64(DisasContextBase *dcbase, CPUState *cs,
TranslationBlock *tb)
{ {
CPUARMState *env = cs->env_ptr; CPUARMState *env = cs->env_ptr;
ARMCPU *cpu = arm_env_get_cpu(env); ARMCPU *cpu = arm_env_get_cpu(env);
DisasContext dc1, *dc = &dc1; DisasContext *dc = container_of(dcbase, DisasContext, base);
target_ulong pc_start;
target_ulong next_page_start; target_ulong next_page_start;
int num_insns;
int max_insns; int max_insns;
TCGContext *tcg_ctx = env->uc->tcg_ctx; TCGContext *tcg_ctx = env->uc->tcg_ctx;
bool block_full = false; bool block_full = false;
pc_start = tb->pc; dc->base.tb = tb;
dc->base.pc_first = dc->base.tb->pc;
dc->base.pc_next = dc->base.pc_first;
dc->base.is_jmp = DISAS_NEXT;
dc->base.num_insns = 0;
dc->base.singlestep_enabled = cs->singlestep_enabled;
dc->uc = env->uc; dc->uc = env->uc;
dc->tb = tb;
dc->is_jmp = DISAS_NEXT; dc->pc = dc->base.pc_first;
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
dc->condjmp = 0; dc->condjmp = 0;
dc->aarch64 = 1; dc->aarch64 = 1;
@ -11462,17 +11475,17 @@ void gen_intermediate_code_a64(CPUState *cs, TranslationBlock *tb)
!arm_el_is_aa64(env, 3); !arm_el_is_aa64(env, 3);
dc->thumb = 0; dc->thumb = 0;
dc->sctlr_b = 0; dc->sctlr_b = 0;
dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE; dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
dc->condexec_mask = 0; dc->condexec_mask = 0;
dc->condexec_cond = 0; dc->condexec_cond = 0;
dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(tb->flags)); dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
dc->tbi0 = ARM_TBFLAG_TBI0(tb->flags); dc->tbi0 = ARM_TBFLAG_TBI0(dc->base.tb->flags);
dc->tbi1 = ARM_TBFLAG_TBI1(tb->flags); dc->tbi1 = ARM_TBFLAG_TBI1(dc->base.tb->flags);
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
dc->user = (dc->current_el == 0); dc->user = (dc->current_el == 0);
#endif #endif
dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags); dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
dc->vec_len = 0; dc->vec_len = 0;
dc->vec_stride = 0; dc->vec_stride = 0;
dc->cp_regs = cpu->cp_regs; dc->cp_regs = cpu->cp_regs;
@ -11493,16 +11506,15 @@ void gen_intermediate_code_a64(CPUState *cs, TranslationBlock *tb)
* emit code to generate a software step exception * emit code to generate a software step exception
* end the TB * end the TB
*/ */
dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags); dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags); dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
dc->is_ldex = false; dc->is_ldex = false;
dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el); dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
init_tmp_a64_array(dc); init_tmp_a64_array(dc);
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; next_page_start = (dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0; max_insns = dc->base.tb->cflags & CF_COUNT_MASK;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
} }
@ -11516,18 +11528,18 @@ void gen_intermediate_code_a64(CPUState *cs, TranslationBlock *tb)
if (tb->pc == env->uc->addr_end) { if (tb->pc == env->uc->addr_end) {
// imitate WFI instruction to halt emulation // imitate WFI instruction to halt emulation
gen_tb_start(tcg_ctx, tb); gen_tb_start(tcg_ctx, tb);
dc->is_jmp = DISAS_WFI; dc->base.is_jmp = DISAS_WFI;
goto tb_end; goto tb_end;
} }
// Unicorn: trace this block on request // Unicorn: trace this block on request
// Only hook this block if it is not broken from previous translation due to // Only hook this block if it is not broken from previous translation due to
// full translation cache // full translation cache
if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, dc->base.pc_first)) {
// save block address to see if we need to patch block size later // save block address to see if we need to patch block size later
env->uc->block_addr = pc_start; env->uc->block_addr = dc->base.pc_first;
env->uc->size_arg = tcg_ctx->gen_op_buf[tcg_ctx->gen_op_buf[0].prev].args; env->uc->size_arg = tcg_ctx->gen_op_buf[tcg_ctx->gen_op_buf[0].prev].args;
gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, dc->base.pc_first);
} else { } else {
env->uc->size_arg = -1; env->uc->size_arg = -1;
} }
@ -11535,27 +11547,36 @@ void gen_intermediate_code_a64(CPUState *cs, TranslationBlock *tb)
gen_tb_start(tcg_ctx, tb); gen_tb_start(tcg_ctx, tb);
do { do {
dc->base.num_insns++;
dc->insn_start_idx = tcg_op_buf_count(tcg_ctx); dc->insn_start_idx = tcg_op_buf_count(tcg_ctx);
tcg_gen_insn_start(tcg_ctx, dc->pc, 0, 0); tcg_gen_insn_start(tcg_ctx, dc->pc, 0, 0);
num_insns++;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
CPUBreakpoint *bp; CPUBreakpoint *bp;
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) { if (bp->pc == dc->pc) {
gen_exception_internal_insn(dc, 0, EXCP_DEBUG); if (bp->flags & BP_CPU) {
gen_a64_set_pc_im(dc, dc->pc);
gen_helper_check_breakpoints(tcg_ctx, tcg_ctx->cpu_env);
/* End the TB early; it likely won't be executed */
dc->base.is_jmp = DISAS_UPDATE;
} else {
gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
/* The address covered by the breakpoint must be /* The address covered by the breakpoint must be
included in [tb->pc, tb->pc + tb->size) in order included in [dc->base.tb->pc, dc->base.tb->pc + dc->base.tb->size) in order
to for it to be properly cleared -- thus we to for it to be properly cleared -- thus we
increment the PC here so that the logic setting increment the PC here so that the logic setting
tb->size below does the right thing. */ dc->base.tb->size below does the right thing. */
dc->pc += 2; dc->pc += 4;
goto done_generating; goto done_generating;
}
break;
} }
} }
} }
//if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { //if (dc->base.num_insns == max_insns && (dc->base.tb->cflags & CF_LAST_IO)) {
// gen_io_start(); // gen_io_start();
//} //}
@ -11570,10 +11591,10 @@ void gen_intermediate_code_a64(CPUState *cs, TranslationBlock *tb)
* "did not step an insn" case, and so the syndrome ISV and EX * "did not step an insn" case, and so the syndrome ISV and EX
* bits should be zero. * bits should be zero.
*/ */
assert(num_insns == 1); assert(dc->base.num_insns == 1);
gen_exception(dc, EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0), gen_exception(dc, EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
default_exception_el(dc)); default_exception_el(dc));
dc->is_jmp = DISAS_NORETURN; dc->base.is_jmp = DISAS_NORETURN;
break; break;
} }
@ -11589,18 +11610,18 @@ void gen_intermediate_code_a64(CPUState *cs, TranslationBlock *tb)
* Also stop translation when a page boundary is reached. This * Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. * ensures prefetch aborts occur at the right place.
*/ */
} while (!dc->is_jmp && !tcg_op_buf_full(tcg_ctx) && } while (!dc->base.is_jmp && !tcg_op_buf_full(tcg_ctx) &&
!cs->singlestep_enabled && !cs->singlestep_enabled &&
!dc->ss_active && !dc->ss_active &&
dc->pc < next_page_start && dc->pc < next_page_start &&
num_insns < max_insns); dc->base.num_insns < max_insns);
/* if too long translation, save this info */ /* if too long translation, save this info */
if (tcg_op_buf_full(tcg_ctx) || num_insns >= max_insns) { if (tcg_op_buf_full(tcg_ctx) || dc->base.num_insns >= max_insns) {
block_full = true; block_full = true;
} }
//if (tb->cflags & CF_LAST_IO) { //if (dc->base.tb->cflags & CF_LAST_IO) {
// gen_io_end(); // gen_io_end();
//} //}
@ -11611,7 +11632,7 @@ tb_end:
* gen_goto_tb() has already handled emitting the debug exception * gen_goto_tb() has already handled emitting the debug exception
* (and thus a tb-jump is not possible when singlestepping). * (and thus a tb-jump is not possible when singlestepping).
*/ */
switch (dc->is_jmp) { switch (dc->base.is_jmp) {
default: default:
gen_a64_set_pc_im(dc, dc->pc); gen_a64_set_pc_im(dc, dc->pc);
/* fall through */ /* fall through */
@ -11626,7 +11647,7 @@ tb_end:
break; break;
} }
} else { } else {
switch (dc->is_jmp) { switch (dc->base.is_jmp) {
case DISAS_NEXT: case DISAS_NEXT:
gen_goto_tb(dc, 1, dc->pc); gen_goto_tb(dc, 1, dc->pc);
break; break;
@ -11666,10 +11687,24 @@ tb_end:
} }
done_generating: done_generating:
gen_tb_end(tcg_ctx, tb, num_insns); gen_tb_end(tcg_ctx, tb, dc->base.num_insns);
tb->size = dc->pc - pc_start; // Unicorn: commented out
tb->icount = num_insns; #if 0
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
qemu_log_in_addr_range(dc->base.pc_first)) {
qemu_log_lock();
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
log_target_disas(cs, dc->base.pc_first, dc->pc - dc->base.pc_first,
4 | (bswap_code(dc->sctlr_b) ? 2 : 0));
qemu_log("\n");
qemu_log_unlock();
}
#endif
dc->base.tb->size = dc->pc - dc->base.pc_first;
dc->base.tb->icount = dc->base.num_insns;
env->uc->block_full = block_full; env->uc->block_full = block_full;
} }

View file

@ -214,7 +214,7 @@ static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
* We choose to ignore [1:0] in ARM mode for all architecture versions. * We choose to ignore [1:0] in ARM mode for all architecture versions.
*/ */
tcg_gen_andi_i32(tcg_ctx, var, var, s->thumb ? ~1 : ~3); tcg_gen_andi_i32(tcg_ctx, var, var, s->thumb ? ~1 : ~3);
s->is_jmp = DISAS_JUMP; s->base.is_jmp = DISAS_JUMP;
} }
tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_R[reg], var); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_R[reg], var);
tcg_temp_free_i32(tcg_ctx, var); tcg_temp_free_i32(tcg_ctx, var);
@ -291,7 +291,7 @@ static void gen_step_complete_exception(DisasContext *s)
gen_ss_advance(s); gen_ss_advance(s);
gen_exception(s, EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex), gen_exception(s, EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
default_exception_el(s)); default_exception_el(s));
s->is_jmp = DISAS_NORETURN; s->base.is_jmp = DISAS_NORETURN;
} }
static void gen_singlestep_exception(DisasContext *s) static void gen_singlestep_exception(DisasContext *s)
@ -315,7 +315,7 @@ static inline bool is_singlestepping(DisasContext *s)
* misnamed as it only means "one instruction per TB" and doesn't * misnamed as it only means "one instruction per TB" and doesn't
* affect the code we generate. * affect the code we generate.
*/ */
return s->singlestep_enabled || s->ss_active; return s->base.singlestep_enabled || s->ss_active;
} }
static void gen_smul_dual(DisasContext *s, TCGv_i32 a, TCGv_i32 b) static void gen_smul_dual(DisasContext *s, TCGv_i32 a, TCGv_i32 b)
@ -955,7 +955,7 @@ static inline void gen_bx_im(DisasContext *s, uint32_t addr)
TCGv_i32 tmp; TCGv_i32 tmp;
TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGContext *tcg_ctx = s->uc->tcg_ctx;
s->is_jmp = DISAS_JUMP; s->base.is_jmp = DISAS_JUMP;
if (s->thumb != (addr & 1)) { if (s->thumb != (addr & 1)) {
tmp = tcg_temp_new_i32(tcg_ctx); tmp = tcg_temp_new_i32(tcg_ctx);
tcg_gen_movi_i32(tcg_ctx, tmp, addr & 1); tcg_gen_movi_i32(tcg_ctx, tmp, addr & 1);
@ -970,7 +970,7 @@ static inline void gen_bx(DisasContext *s, TCGv_i32 var)
{ {
TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGContext *tcg_ctx = s->uc->tcg_ctx;
s->is_jmp = DISAS_JUMP; s->base.is_jmp = DISAS_JUMP;
tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_R[15], var, ~1); tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_R[15], var, ~1);
tcg_gen_andi_i32(tcg_ctx, var, var, 1); tcg_gen_andi_i32(tcg_ctx, var, var, 1);
store_cpu_field(tcg_ctx, var, thumb); store_cpu_field(tcg_ctx, var, thumb);
@ -984,11 +984,11 @@ static inline void gen_bx(DisasContext *s, TCGv_i32 var)
static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var) static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
{ {
/* Generate the same code here as for a simple bx, but flag via /* Generate the same code here as for a simple bx, but flag via
* s->is_jmp that we need to do the rest of the work later. * s->base.is_jmp that we need to do the rest of the work later.
*/ */
gen_bx(s, var); gen_bx(s, var);
if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) { if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) {
s->is_jmp = DISAS_BX_EXCRET; s->base.is_jmp = DISAS_BX_EXCRET;
} }
} }
@ -1198,7 +1198,7 @@ static inline void gen_hvc(DisasContext *s, int imm16)
*/ */
s->svc_imm = imm16; s->svc_imm = imm16;
gen_set_pc_im(s, s->pc); gen_set_pc_im(s, s->pc);
s->is_jmp = DISAS_HVC; s->base.is_jmp = DISAS_HVC;
} }
static inline void gen_smc(DisasContext *s) static inline void gen_smc(DisasContext *s)
@ -1214,7 +1214,7 @@ static inline void gen_smc(DisasContext *s)
gen_helper_pre_smc(tcg_ctx, tcg_ctx->cpu_env, tmp); gen_helper_pre_smc(tcg_ctx, tcg_ctx->cpu_env, tmp);
tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, tmp);
gen_set_pc_im(s, s->pc); gen_set_pc_im(s, s->pc);
s->is_jmp = DISAS_SMC; s->base.is_jmp = DISAS_SMC;
} }
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp) static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
@ -1222,7 +1222,7 @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
gen_set_condexec(s); gen_set_condexec(s);
gen_set_pc_im(s, s->pc - offset); gen_set_pc_im(s, s->pc - offset);
gen_exception_internal(s, excp); gen_exception_internal(s, excp);
s->is_jmp = DISAS_NORETURN; s->base.is_jmp = DISAS_NORETURN;
} }
static void gen_exception_insn(DisasContext *s, int offset, int excp, static void gen_exception_insn(DisasContext *s, int offset, int excp,
@ -1231,7 +1231,7 @@ static void gen_exception_insn(DisasContext *s, int offset, int excp,
gen_set_condexec(s); gen_set_condexec(s);
gen_set_pc_im(s, s->pc - offset); gen_set_pc_im(s, s->pc - offset);
gen_exception(s, excp, syn, target_el); gen_exception(s, excp, syn, target_el);
s->is_jmp = DISAS_NORETURN; s->base.is_jmp = DISAS_NORETURN;
} }
/* Force a TB lookup after an instruction that changes the CPU state. */ /* Force a TB lookup after an instruction that changes the CPU state. */
@ -1239,7 +1239,7 @@ static inline void gen_lookup_tb(DisasContext *s)
{ {
TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGContext *tcg_ctx = s->uc->tcg_ctx;
tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[15], s->pc & ~1); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[15], s->pc & ~1);
s->is_jmp = DISAS_EXIT; s->base.is_jmp = DISAS_EXIT;
} }
static inline void gen_hlt(DisasContext *s, int imm) static inline void gen_hlt(DisasContext *s, int imm)
@ -4250,7 +4250,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
static inline bool use_goto_tb(DisasContext *s, target_ulong dest) static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
{ {
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) || return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else #else
return true; return true;
@ -4278,12 +4278,12 @@ static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
if (use_goto_tb(s, dest)) { if (use_goto_tb(s, dest)) {
tcg_gen_goto_tb(tcg_ctx, n); tcg_gen_goto_tb(tcg_ctx, n);
gen_set_pc_im(s, dest); gen_set_pc_im(s, dest);
tcg_gen_exit_tb(tcg_ctx, (uintptr_t)s->tb + n); tcg_gen_exit_tb(tcg_ctx, (uintptr_t)s->base.tb + n);
} else { } else {
gen_set_pc_im(s, dest); gen_set_pc_im(s, dest);
gen_goto_ptr(s); gen_goto_ptr(s);
} }
s->is_jmp = DISAS_NORETURN; s->base.is_jmp = DISAS_NORETURN;
} }
static inline void gen_jmp(DisasContext *s, uint32_t dest) static inline void gen_jmp(DisasContext *s, uint32_t dest)
@ -4567,7 +4567,7 @@ static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
tcg_temp_free_i32(tcg_ctx, tcg_tgtmode); tcg_temp_free_i32(tcg_ctx, tcg_tgtmode);
tcg_temp_free_i32(tcg_ctx, tcg_regno); tcg_temp_free_i32(tcg_ctx, tcg_regno);
tcg_temp_free_i32(tcg_ctx, tcg_reg); tcg_temp_free_i32(tcg_ctx, tcg_reg);
s->is_jmp = DISAS_UPDATE; s->base.is_jmp = DISAS_UPDATE;
} }
static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn) static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
@ -4590,7 +4590,7 @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
tcg_temp_free_i32(tcg_ctx, tcg_tgtmode); tcg_temp_free_i32(tcg_ctx, tcg_tgtmode);
tcg_temp_free_i32(tcg_ctx, tcg_regno); tcg_temp_free_i32(tcg_ctx, tcg_regno);
store_reg(s, rn, tcg_reg); store_reg(s, rn, tcg_reg);
s->is_jmp = DISAS_UPDATE; s->base.is_jmp = DISAS_UPDATE;
} }
/* Store value to PC as for an exception return (ie don't /* Store value to PC as for an exception return (ie don't
@ -4618,7 +4618,7 @@ static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
gen_helper_cpsr_write_eret(tcg_ctx, tcg_ctx->cpu_env, cpsr); gen_helper_cpsr_write_eret(tcg_ctx, tcg_ctx->cpu_env, cpsr);
tcg_temp_free_i32(tcg_ctx, cpsr); tcg_temp_free_i32(tcg_ctx, cpsr);
/* Must exit loop to check un-masked IRQs */ /* Must exit loop to check un-masked IRQs */
s->is_jmp = DISAS_EXIT; s->base.is_jmp = DISAS_EXIT;
} }
/* Generate an old-style exception return. Marks pc as dead. */ /* Generate an old-style exception return. Marks pc as dead. */
@ -4641,17 +4641,17 @@ static void gen_nop_hint(DisasContext *s, int val)
case 1: /* yield */ case 1: /* yield */
if (!s->uc->parallel_cpus) { if (!s->uc->parallel_cpus) {
gen_set_pc_im(s, s->pc); gen_set_pc_im(s, s->pc);
s->is_jmp = DISAS_YIELD; s->base.is_jmp = DISAS_YIELD;
} }
break; break;
case 3: /* wfi */ case 3: /* wfi */
gen_set_pc_im(s, s->pc); gen_set_pc_im(s, s->pc);
s->is_jmp = DISAS_WFI; s->base.is_jmp = DISAS_WFI;
break; break;
case 2: /* wfe */ case 2: /* wfe */
if (!s->uc->parallel_cpus) { if (!s->uc->parallel_cpus) {
gen_set_pc_im(s, s->pc); gen_set_pc_im(s, s->pc);
s->is_jmp = DISAS_WFE; s->base.is_jmp = DISAS_WFE;
} }
break; break;
case 4: /* sev */ case 4: /* sev */
@ -7809,12 +7809,19 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
return 1; return 1;
} }
gen_set_pc_im(s, s->pc); gen_set_pc_im(s, s->pc);
s->is_jmp = DISAS_WFI; s->base.is_jmp = DISAS_WFI;
return 0; return 0;
default: default:
break; break;
} }
// Unicorn: if'd out
#if 0
if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
#endif
if (isread) { if (isread) {
/* Read */ /* Read */
if (is64) { if (is64) {
@ -7902,7 +7909,12 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
} }
} }
if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
// Unicorn: commented out
//gen_io_end();
gen_lookup_tb(s);
} else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
/* We default to ending the TB on a coprocessor register write, /* We default to ending the TB on a coprocessor register write,
* but allow this to be suppressed by the register definition * but allow this to be suppressed by the register definition
* (usually only necessary to work around guest bugs). * (usually only necessary to work around guest bugs).
@ -8220,7 +8232,7 @@ static void gen_srs(DisasContext *s,
tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, tmp);
} }
tcg_temp_free_i32(tcg_ctx, addr); tcg_temp_free_i32(tcg_ctx, addr);
s->is_jmp = DISAS_UPDATE; s->base.is_jmp = DISAS_UPDATE;
} }
static void disas_arm_insn(DisasContext *s, unsigned int insn) // qq static void disas_arm_insn(DisasContext *s, unsigned int insn) // qq
@ -8317,7 +8329,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) // qq
/* setend */ /* setend */
if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) { if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
gen_helper_setend(tcg_ctx, tcg_ctx->cpu_env); gen_helper_setend(tcg_ctx, tcg_ctx->cpu_env);
s->is_jmp = DISAS_UPDATE; s->base.is_jmp = DISAS_UPDATE;
} }
return; return;
} else if ((insn & 0x0fffff00) == 0x057ff000) { } else if ((insn & 0x0fffff00) == 0x057ff000) {
@ -9691,7 +9703,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) // qq
gen_helper_cpsr_write_eret(tcg_ctx, tcg_ctx->cpu_env, tmp); gen_helper_cpsr_write_eret(tcg_ctx, tcg_ctx->cpu_env, tmp);
tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, tmp);
/* Must exit loop to check un-masked IRQs */ /* Must exit loop to check un-masked IRQs */
s->is_jmp = DISAS_EXIT; s->base.is_jmp = DISAS_EXIT;
} }
} }
break; break;
@ -9729,7 +9741,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) // qq
/* swi */ /* swi */
gen_set_pc_im(s, s->pc); gen_set_pc_im(s, s->pc);
s->svc_imm = extract32(insn, 0, 24); s->svc_imm = extract32(insn, 0, 24);
s->is_jmp = DISAS_SWI; s->base.is_jmp = DISAS_SWI;
break; break;
default: default:
illegal_op: illegal_op:
@ -11214,7 +11226,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) // qq
// Unicorn: end address tells us to stop emulation // Unicorn: end address tells us to stop emulation
if (s->pc == s->uc->addr_end) { if (s->pc == s->uc->addr_end) {
// imitate WFI instruction to halt emulation // imitate WFI instruction to halt emulation
s->is_jmp = DISAS_WFI; s->base.is_jmp = DISAS_WFI;
return; return;
} }
@ -11852,7 +11864,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) // qq
ARCH(6); ARCH(6);
if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) { if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
gen_helper_setend(tcg_ctx, tcg_ctx->cpu_env); gen_helper_setend(tcg_ctx, tcg_ctx->cpu_env);
s->is_jmp = DISAS_UPDATE; s->base.is_jmp = DISAS_UPDATE;
} }
break; break;
case 3: case 3:
@ -11946,7 +11958,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) // qq
/* swi */ /* swi */
gen_set_pc_im(s, s->pc); gen_set_pc_im(s, s->pc);
s->svc_imm = extract32(insn, 0, 8); s->svc_imm = extract32(insn, 0, 8);
s->is_jmp = DISAS_SWI; s->base.is_jmp = DISAS_SWI;
break; break;
} }
/* generate a conditional jump to next instruction */ /* generate a conditional jump to next instruction */
@ -12026,9 +12038,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
CPUARMState *env = cs->env_ptr; CPUARMState *env = cs->env_ptr;
ARMCPU *cpu = arm_env_get_cpu(env); ARMCPU *cpu = arm_env_get_cpu(env);
DisasContext dc1, *dc = &dc1; DisasContext dc1, *dc = &dc1;
target_ulong pc_start;
target_ulong next_page_start; target_ulong next_page_start;
int num_insns;
int max_insns; int max_insns;
bool end_of_page; bool end_of_page;
TCGContext *tcg_ctx = env->uc->tcg_ctx; TCGContext *tcg_ctx = env->uc->tcg_ctx;
@ -12040,19 +12050,20 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
* the A32/T32 complexity to do with conditional execution/IT blocks/etc. * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
*/ */
if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) { if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
gen_intermediate_code_a64(cs, tb); gen_intermediate_code_a64(&dc->base, cs, tb);
return; return;
} }
pc_start = tb->pc; dc->base.tb = tb;
dc->base.pc_first = tb->pc;
dc->base.pc_next = dc->base.pc_first;
dc->base.is_jmp = DISAS_NEXT;
dc->base.num_insns = 0;
dc->base.singlestep_enabled = cs->singlestep_enabled;
dc->uc = env->uc; dc->uc = env->uc;
dc->tb = tb;
dc->is_jmp = DISAS_NEXT; dc->pc = dc->base.pc_first;
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
dc->condjmp = 0; dc->condjmp = 0;
dc->aarch64 = 0; dc->aarch64 = 0;
@ -12109,8 +12120,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
tcg_ctx->cpu_V1 = tcg_ctx->cpu_F1d; tcg_ctx->cpu_V1 = tcg_ctx->cpu_F1d;
/* FIXME: tcg_ctx->cpu_M0 can probably be the same as tcg_ctx->cpu_V0. */ /* FIXME: tcg_ctx->cpu_M0 can probably be the same as tcg_ctx->cpu_V0. */
tcg_ctx->cpu_M0 = tcg_temp_new_i64(tcg_ctx); tcg_ctx->cpu_M0 = tcg_temp_new_i64(tcg_ctx);
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; next_page_start = (dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK; max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK; max_insns = CF_COUNT_MASK;
@ -12125,18 +12135,18 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
if (tb->pc == env->uc->addr_end) { if (tb->pc == env->uc->addr_end) {
// imitate WFI instruction to halt emulation // imitate WFI instruction to halt emulation
gen_tb_start(tcg_ctx, tb); gen_tb_start(tcg_ctx, tb);
dc->is_jmp = DISAS_WFI; dc->base.is_jmp = DISAS_WFI;
goto tb_end; goto tb_end;
} }
// Unicorn: trace this block on request // Unicorn: trace this block on request
// Only hook this block if it is not broken from previous translation due to // Only hook this block if it is not broken from previous translation due to
// full translation cache // full translation cache
if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, dc->base.pc_first)) {
// save block address to see if we need to patch block size later // save block address to see if we need to patch block size later
env->uc->block_addr = pc_start; env->uc->block_addr = dc->base.pc_first;
env->uc->size_arg = tcg_ctx->gen_op_buf[tcg_ctx->gen_op_buf[0].prev].args; env->uc->size_arg = tcg_ctx->gen_op_buf[tcg_ctx->gen_op_buf[0].prev].args;
gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, dc->base.pc_first);
} else { } else {
env->uc->size_arg = -1; env->uc->size_arg = -1;
} }
@ -12182,11 +12192,11 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
store_cpu_field(tcg_ctx, tmp, condexec_bits); store_cpu_field(tcg_ctx, tmp, condexec_bits);
} }
do { do {
dc->base.num_insns++;
dc->insn_start_idx = tcg_op_buf_count(tcg_ctx); dc->insn_start_idx = tcg_op_buf_count(tcg_ctx);
tcg_gen_insn_start(tcg_ctx, dc->pc, tcg_gen_insn_start(tcg_ctx, dc->pc,
(dc->condexec_cond << 4) | (dc->condexec_mask >> 1), (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
0); 0);
num_insns++;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
CPUBreakpoint *bp; CPUBreakpoint *bp;
@ -12197,7 +12207,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
gen_set_pc_im(dc, dc->pc); gen_set_pc_im(dc, dc->pc);
gen_helper_check_breakpoints(tcg_ctx, tcg_ctx->cpu_env); gen_helper_check_breakpoints(tcg_ctx, tcg_ctx->cpu_env);
/* End the TB early; it's likely not going to be executed */ /* End the TB early; it's likely not going to be executed */
dc->is_jmp = DISAS_UPDATE; dc->base.is_jmp = DISAS_UPDATE;
} else { } else {
gen_exception_internal_insn(dc, 0, EXCP_DEBUG); gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
/* The address covered by the breakpoint must be /* The address covered by the breakpoint must be
@ -12215,7 +12225,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
} }
} }
//if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { //if (dc->base.num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
// gen_io_start(); // gen_io_start();
//} //}
@ -12225,7 +12235,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
/* We always get here via a jump, so know we are not in a /* We always get here via a jump, so know we are not in a
conditional execution block. */ conditional execution block. */
gen_exception_internal(dc, EXCP_KERNEL_TRAP); gen_exception_internal(dc, EXCP_KERNEL_TRAP);
dc->is_jmp = DISAS_NORETURN; dc->base.is_jmp = DISAS_NORETURN;
break; break;
} }
#endif #endif
@ -12241,10 +12251,11 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
* "did not step an insn" case, and so the syndrome ISV and EX * "did not step an insn" case, and so the syndrome ISV and EX
* bits should be zero. * bits should be zero.
*/ */
assert(num_insns == 1); assert(dc->base.num_insns == 1);
gen_exception(dc, EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0), gen_exception(dc, EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
default_exception_el(dc)); default_exception_el(dc));
goto done_generating; dc->base.is_jmp = DISAS_NORETURN;
break;
} }
if (dc->thumb) { // qq if (dc->thumb) { // qq
@ -12263,7 +12274,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
// end address tells us to stop emulation // end address tells us to stop emulation
if (dc->pc == dc->uc->addr_end) { if (dc->pc == dc->uc->addr_end) {
// imitate WFI instruction to halt emulation // imitate WFI instruction to halt emulation
dc->is_jmp = DISAS_WFI; dc->base.is_jmp = DISAS_WFI;
} else { } else {
insn = arm_ldl_code(env, dc->pc, dc->sctlr_b); insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
dc->pc += 4; dc->pc += 4;
@ -12271,7 +12282,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
} }
} }
if (dc->condjmp && !dc->is_jmp) { if (dc->condjmp && !dc->base.is_jmp) {
gen_set_label(tcg_ctx, dc->condlabel); gen_set_label(tcg_ctx, dc->condlabel);
dc->condjmp = 0; dc->condjmp = 0;
} }
@ -12298,10 +12309,10 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
end_of_page = (dc->pc >= next_page_start) || end_of_page = (dc->pc >= next_page_start) ||
((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc)); ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
} while (!dc->is_jmp && !tcg_op_buf_full(tcg_ctx) && } while (!dc->base.is_jmp && !tcg_op_buf_full(tcg_ctx) &&
!is_singlestepping(dc) && !is_singlestepping(dc) &&
!end_of_page && !end_of_page &&
num_insns < max_insns); dc->base.num_insns < max_insns);
if (tb->cflags & CF_LAST_IO) { if (tb->cflags & CF_LAST_IO) {
if (dc->condjmp) { if (dc->condjmp) {
@ -12313,7 +12324,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
} }
/* if too long translation, save this info */ /* if too long translation, save this info */
if (tcg_op_buf_full(tcg_ctx) || num_insns >= max_insns) { if (tcg_op_buf_full(tcg_ctx) || dc->base.num_insns >= max_insns) {
block_full = true; block_full = true;
} }
@ -12323,7 +12334,7 @@ tb_end:
instruction was a conditional branch or trap, and the PC has instruction was a conditional branch or trap, and the PC has
already been written. */ already been written. */
gen_set_condexec(dc); gen_set_condexec(dc);
if (dc->is_jmp == DISAS_BX_EXCRET) { if (dc->base.is_jmp == DISAS_BX_EXCRET) {
/* Exception return branches need some special case code at the /* Exception return branches need some special case code at the
* end of the TB, which is complex enough that it has to * end of the TB, which is complex enough that it has to
* handle the single-step vs not and the condition-failed * handle the single-step vs not and the condition-failed
@ -12332,7 +12343,7 @@ tb_end:
gen_bx_excret_final_code(dc); gen_bx_excret_final_code(dc);
} else if (unlikely(is_singlestepping(dc))) { } else if (unlikely(is_singlestepping(dc))) {
/* Unconditional and "condition passed" instruction codepath. */ /* Unconditional and "condition passed" instruction codepath. */
switch (dc->is_jmp) { switch (dc->base.is_jmp) {
case DISAS_SWI: case DISAS_SWI:
gen_ss_advance(dc); gen_ss_advance(dc);
gen_exception(dc, EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), gen_exception(dc, EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
@ -12366,7 +12377,7 @@ tb_end:
- Hardware watchpoints. - Hardware watchpoints.
Hardware breakpoints have already been handled and skip this code. Hardware breakpoints have already been handled and skip this code.
*/ */
switch(dc->is_jmp) { switch(dc->base.is_jmp) {
case DISAS_NEXT: case DISAS_NEXT:
gen_goto_tb(dc, 1, dc->pc); gen_goto_tb(dc, 1, dc->pc);
break; break;
@ -12422,10 +12433,24 @@ tb_end:
} }
done_generating: done_generating:
gen_tb_end(tcg_ctx, tb, num_insns); gen_tb_end(tcg_ctx, tb, dc->base.num_insns);
tb->size = dc->pc - pc_start; // Unicorn: commented out
tb->icount = num_insns; #if 0
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
qemu_log_in_addr_range(dc->base.pc_first)) {
qemu_log_lock();
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
log_target_disas(cs, dc->base.pc_first, dc->pc - dc->base.pc_first,
dc->thumb | (dc->sctlr_b << 1));
qemu_log("\n");
qemu_log_unlock();
}
#endif
tb->size = dc->pc - dc->base.pc_first;
tb->icount = dc->base.num_insns;
env->uc->block_full = block_full; env->uc->block_full = block_full;
} }

View file

@ -5,9 +5,9 @@
/* internal defines */ /* internal defines */
typedef struct DisasContext { typedef struct DisasContext {
DisasContextBase base;
target_ulong pc; target_ulong pc;
uint32_t insn; uint32_t insn;
int is_jmp;
/* Nonzero if this instruction has been conditionally skipped. */ /* Nonzero if this instruction has been conditionally skipped. */
int condjmp; int condjmp;
/* The label that will be jumped to when the instruction is skipped. */ /* The label that will be jumped to when the instruction is skipped. */
@ -15,8 +15,6 @@ typedef struct DisasContext {
/* Thumb-2 conditional execution bits. */ /* Thumb-2 conditional execution bits. */
int condexec_mask; int condexec_mask;
int condexec_cond; int condexec_cond;
struct TranslationBlock *tb;
int singlestep_enabled;
int thumb; int thumb;
int sctlr_b; int sctlr_b;
TCGMemOp be_data; TCGMemOp be_data;
@ -149,14 +147,16 @@ static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
#ifdef TARGET_AARCH64 #ifdef TARGET_AARCH64
void a64_translate_init(struct uc_struct *uc); void a64_translate_init(struct uc_struct *uc);
void gen_intermediate_code_a64(CPUState *cpu, TranslationBlock *tb); void gen_intermediate_code_a64(DisasContextBase *db, CPUState *cpu,
TranslationBlock *tb);
void gen_a64_set_pc_im(DisasContext *s, uint64_t val); void gen_a64_set_pc_im(DisasContext *s, uint64_t val);
#else #else
static inline void a64_translate_init(struct uc_struct *uc) static inline void a64_translate_init(struct uc_struct *uc)
{ {
} }
static inline void gen_intermediate_code_a64(CPUState *cpu, TranslationBlock *tb) static inline void gen_intermediate_code_a64(DisasContextBase *db, CPUState *cpu,
TranslationBlock *tb)
{ {
} }