target/arm: [tcg,a64] Port to breakpoint_check

Incrementally paves the way towards using the generic instruction translation
loop.

Backports commit 0cb56b373da70047979b61b042f59aaff4012e1b from qemu
This commit is contained in:
Lluís Vilanova 2018-03-04 19:34:02 -05:00 committed by Lioncash
parent 67e0d99080
commit 7b89c4c813
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7

View file

@ -11515,6 +11515,31 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(tcg_ctx, dc->pc, 0, 0);
}
static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
TCGContext *tcg_ctx = cpu->uc->tcg_ctx;
if (bp->flags & BP_CPU) {
gen_a64_set_pc_im(dc, dc->pc);
gen_helper_check_breakpoints(tcg_ctx, tcg_ctx->cpu_env);
/* End the TB early; it likely won't be executed */
dc->base.is_jmp = DISAS_TOO_MANY;
} else {
gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
/* The address covered by the breakpoint must be
included in [tb->pc, tb->pc + tb->size) in order
to for it to be properly cleared -- thus we
increment the PC here so that the logic setting
tb->size below does the right thing. */
dc->pc += 4;
dc->base.is_jmp = DISAS_NORETURN;
}
return true;
}
void gen_intermediate_code_a64(DisasContextBase *dcbase, CPUState *cs,
TranslationBlock *tb)
{
@ -11573,25 +11598,15 @@ void gen_intermediate_code_a64(DisasContextBase *dcbase, CPUState *cs,
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
CPUBreakpoint *bp;
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
if (bp->flags & BP_CPU) {
gen_a64_set_pc_im(dc, dc->pc);
gen_helper_check_breakpoints(tcg_ctx, tcg_ctx->cpu_env);
/* End the TB early; it likely won't be executed */
dc->base.is_jmp = DISAS_UPDATE;
} else {
gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
/* The address covered by the breakpoint must be
included in [dc->base.tb->pc, dc->base.tb->pc + dc->base.tb->size) in order
to for it to be properly cleared -- thus we
increment the PC here so that the logic setting
dc->base.tb->size below does the right thing. */
dc->pc += 4;
goto done_generating;
if (bp->pc == dc->base.pc_next) {
if (aarch64_tr_breakpoint_check(&dc->base, cs, bp)) {
break;
}
break;
}
}
if (dc->base.is_jmp > DISAS_TOO_MANY) {
break;
}
}
//if (dc->base.num_insns == max_insns && (dc->base.tb->cflags & CF_LAST_IO)) {
@ -11667,6 +11682,7 @@ tb_end:
} else {
switch (dc->base.is_jmp) {
case DISAS_NEXT:
case DISAS_TOO_MANY:
gen_goto_tb(dc, 1, dc->pc);
break;
case DISAS_JUMP:
@ -11704,7 +11720,6 @@ tb_end:
}
}
done_generating:
gen_tb_end(tcg_ctx, tb, dc->base.num_insns);
// Unicorn: commented out