mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-11 07:55:34 +00:00
tcg: Clean up direct block chaining safety checks
We don't take care of direct jumps when address mapping changes. Thus we must be sure to generate direct jumps so that they always keep valid even if address mapping changes. Luckily, we can only allow to execute a TB if it was generated from the pages which match with current mapping. Document tcg_gen_goto_tb() declaration and note the reason for destination PC limitations. Some targets with variable length instructions allow TB to straddle a page boundary. However, we make sure that both of TB pages match the current address mapping when looking up TBs. So it is safe to do direct jumps into the both pages. Correct the checks for some of those targets. Given that, we can safely patch a TB which spans two pages. Remove the unnecessary check in cpu_exec() and allow such TBs to be patched. Backports commit 5b053a4a28278bca606eeff7d1c0730df1b047e9 from qemu
This commit is contained in:
parent
39d262f0d2
commit
73c59faad5
|
@ -198,11 +198,8 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
|
|||
next_tb = 0;
|
||||
tcg_ctx->tb_ctx.tb_invalidated_flag = 0;
|
||||
}
|
||||
/* see if we can patch the calling TB. When the TB
|
||||
spans two pages, we cannot safely do a direct
|
||||
jump. */
|
||||
if (next_tb != 0 && tb->page_addr[1] == -1
|
||||
&& !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
|
||||
/* See if we can patch the calling TB. */
|
||||
if (next_tb != 0 && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
|
||||
tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
|
||||
next_tb & TB_EXIT_MASK, tb);
|
||||
}
|
||||
|
|
|
@ -4155,7 +4155,8 @@ static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
|
|||
TranslationBlock *tb;
|
||||
|
||||
tb = s->tb;
|
||||
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
|
||||
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
|
||||
((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
|
||||
tcg_gen_goto_tb(tcg_ctx, n);
|
||||
gen_set_pc_im(s, dest);
|
||||
tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + n);
|
||||
|
|
|
@ -2373,7 +2373,7 @@ static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
|
|||
tb = s->tb;
|
||||
/* NOTE: we handle the case where the TB spans two pages here */
|
||||
if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
|
||||
(pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
|
||||
(pc & TARGET_PAGE_MASK) == (s->pc_start & TARGET_PAGE_MASK)) {
|
||||
/* jump to same page: we can use a direct jump */
|
||||
tcg_gen_goto_tb(tcg_ctx, tb_num);
|
||||
gen_jmp_im(s, eip);
|
||||
|
|
|
@ -851,7 +851,7 @@ static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
|
|||
if (unlikely(s->singlestep_enabled)) {
|
||||
gen_exception(s, dest, EXCP_DEBUG);
|
||||
} else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
|
||||
(s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
|
||||
(s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
|
||||
tcg_gen_goto_tb(tcg_ctx, n);
|
||||
tcg_gen_movi_i32(tcg_ctx, tcg_ctx->QREG_PC, dest);
|
||||
tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + n);
|
||||
|
|
|
@ -753,6 +753,16 @@ static inline void tcg_gen_exit_tb(TCGContext *s, uintptr_t val)
|
|||
tcg_gen_op1i(s, INDEX_op_exit_tb, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* tcg_gen_goto_tb() - output goto_tb TCG operation
|
||||
* @idx: Direct jump slot index (0 or 1)
|
||||
*
|
||||
* See tcg/README for more info about this TCG operation.
|
||||
*
|
||||
* NOTE: Direct jumps with goto_tb are only safe within the pages this TB
|
||||
* resides in because we don't take care of direct jumps when address mapping
|
||||
* changes, e.g. in tlb_flush().
|
||||
*/
|
||||
void tcg_gen_goto_tb(TCGContext *s, unsigned idx);
|
||||
|
||||
#if TARGET_LONG_BITS == 32
|
||||
|
|
Loading…
Reference in a new issue