mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-23 17:55:27 +00:00
tcg: Rearrange tb_link_page() to avoid forward declaration
Backports commit e90d96b158665a684ab89b4f002838034b5fafc8 from qemu
This commit is contained in:
parent
fbc0a1105f
commit
87f2bb42d4
|
@ -118,8 +118,6 @@ static intptr_t qemu_real_host_page_mask;
|
|||
static uintptr_t qemu_host_page_size;
|
||||
static intptr_t qemu_host_page_mask;
|
||||
|
||||
static void tb_link_page(struct uc_struct *uc, TranslationBlock *tb,
|
||||
tb_page_addr_t phys_pc, tb_page_addr_t phys_page2);
|
||||
static TranslationBlock *tb_find_pc(struct uc_struct *uc, uintptr_t tc_ptr);
|
||||
|
||||
// Unicorn: for cleaning up memory later.
|
||||
|
@ -1161,6 +1159,106 @@ static void build_page_bitmap(PageDesc *p)
|
|||
}
|
||||
}
|
||||
|
||||
/* add the tb in the target page and protect it if necessary */
|
||||
static inline void tb_alloc_page(struct uc_struct *uc, TranslationBlock *tb,
|
||||
unsigned int n, tb_page_addr_t page_addr)
|
||||
{
|
||||
PageDesc *p;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
bool page_already_protected;
|
||||
#endif
|
||||
|
||||
tb->page_addr[n] = page_addr;
|
||||
p = page_find_alloc(uc, page_addr >> TARGET_PAGE_BITS, 1);
|
||||
tb->page_next[n] = p->first_tb;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
page_already_protected = p->first_tb != NULL;
|
||||
#endif
|
||||
p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
|
||||
invalidate_page_bitmap(p);
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
if (p->flags & PAGE_WRITE) {
|
||||
target_ulong addr;
|
||||
PageDesc *p2;
|
||||
int prot;
|
||||
|
||||
/* force the host page as non writable (writes will have a
|
||||
page fault + mprotect overhead) */
|
||||
page_addr &= qemu_host_page_mask;
|
||||
prot = 0;
|
||||
for (addr = page_addr; addr < page_addr + qemu_host_page_size;
|
||||
addr += TARGET_PAGE_SIZE) {
|
||||
|
||||
p2 = page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (!p2) {
|
||||
continue;
|
||||
}
|
||||
prot |= p2->flags;
|
||||
p2->flags &= ~PAGE_WRITE;
|
||||
}
|
||||
mprotect(g2h(page_addr), qemu_host_page_size,
|
||||
(prot & PAGE_BITS) & ~PAGE_WRITE);
|
||||
#ifdef DEBUG_TB_INVALIDATE
|
||||
printf("protecting code page: 0x" TARGET_FMT_lx "\n",
|
||||
page_addr);
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
/* if some code is already present, then the pages are already
|
||||
protected. So we handle the case where only the first TB is
|
||||
allocated in a physical page */
|
||||
if (!page_already_protected) {
|
||||
tlb_protect_code(uc, page_addr);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* add a new TB and link it to the physical page tables. phys_page2 is
|
||||
(-1) to indicate that only one page contains the TB. */
|
||||
static void tb_link_page(struct uc_struct *uc,
|
||||
TranslationBlock *tb, tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
|
||||
{
|
||||
TCGContext *tcg_ctx = uc->tcg_ctx;
|
||||
unsigned int h;
|
||||
TranslationBlock **ptb;
|
||||
|
||||
/* Grab the mmap lock to stop another thread invalidating this TB
|
||||
before we are done. */
|
||||
mmap_lock();
|
||||
/* add in the physical hash table */
|
||||
h = tb_phys_hash_func(phys_pc);
|
||||
ptb = &tcg_ctx->tb_ctx.tb_phys_hash[h];
|
||||
tb->phys_hash_next = *ptb;
|
||||
*ptb = tb;
|
||||
|
||||
/* add in the page list */
|
||||
tb_alloc_page(uc, tb, 0, phys_pc & TARGET_PAGE_MASK);
|
||||
if (phys_page2 != -1) {
|
||||
tb_alloc_page(uc, tb, 1, phys_page2);
|
||||
} else {
|
||||
tb->page_addr[1] = -1;
|
||||
}
|
||||
|
||||
assert(((uintptr_t)tb & 3) == 0);
|
||||
tb->jmp_list_first = (uintptr_t)tb | 2;
|
||||
tb->jmp_list_next[0] = (uintptr_t)NULL;
|
||||
tb->jmp_list_next[1] = (uintptr_t)NULL;
|
||||
|
||||
/* init original jump addresses */
|
||||
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
|
||||
tb_reset_jump(tb, 0);
|
||||
}
|
||||
if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
|
||||
tb_reset_jump(tb, 1);
|
||||
}
|
||||
|
||||
#ifdef DEBUG_TB_CHECK
|
||||
tb_page_check();
|
||||
#endif
|
||||
mmap_unlock();
|
||||
}
|
||||
|
||||
TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
target_ulong pc, target_ulong cs_base,
|
||||
uint32_t flags, int cflags)
|
||||
|
@ -1486,61 +1584,6 @@ static void tb_invalidate_phys_page(struct uc_struct *uc, tb_page_addr_t addr,
|
|||
}
|
||||
#endif
|
||||
|
||||
/* add the tb in the target page and protect it if necessary */
|
||||
static inline void tb_alloc_page(struct uc_struct *uc, TranslationBlock *tb,
|
||||
unsigned int n, tb_page_addr_t page_addr)
|
||||
{
|
||||
PageDesc *p;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
bool page_already_protected;
|
||||
#endif
|
||||
|
||||
tb->page_addr[n] = page_addr;
|
||||
p = page_find_alloc(uc, page_addr >> TARGET_PAGE_BITS, 1);
|
||||
tb->page_next[n] = p->first_tb;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
page_already_protected = p->first_tb != NULL;
|
||||
#endif
|
||||
p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
|
||||
invalidate_page_bitmap(p);
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
if (p->flags & PAGE_WRITE) {
|
||||
target_ulong addr;
|
||||
PageDesc *p2;
|
||||
int prot;
|
||||
|
||||
/* force the host page as non writable (writes will have a
|
||||
page fault + mprotect overhead) */
|
||||
page_addr &= qemu_host_page_mask;
|
||||
prot = 0;
|
||||
for (addr = page_addr; addr < page_addr + qemu_host_page_size;
|
||||
addr += TARGET_PAGE_SIZE) {
|
||||
|
||||
p2 = page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (!p2) {
|
||||
continue;
|
||||
}
|
||||
prot |= p2->flags;
|
||||
p2->flags &= ~PAGE_WRITE;
|
||||
}
|
||||
mprotect(g2h(page_addr), qemu_host_page_size,
|
||||
(prot & PAGE_BITS) & ~PAGE_WRITE);
|
||||
#ifdef DEBUG_TB_INVALIDATE
|
||||
printf("protecting code page: 0x" TARGET_FMT_lx "\n",
|
||||
page_addr);
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
/* if some code is already present, then the pages are already
|
||||
protected. So we handle the case where only the first TB is
|
||||
allocated in a physical page */
|
||||
if (!page_already_protected) {
|
||||
tlb_protect_code(uc, page_addr);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void tb_invalidate_phys_page_fast(struct uc_struct* uc, tb_page_addr_t start, int len)
|
||||
{
|
||||
PageDesc *p;
|
||||
|
@ -1578,51 +1621,6 @@ void tb_invalidate_phys_page_fast(struct uc_struct* uc, tb_page_addr_t start, in
|
|||
}
|
||||
}
|
||||
|
||||
/* add a new TB and link it to the physical page tables. phys_page2 is
|
||||
(-1) to indicate that only one page contains the TB. */
|
||||
static void tb_link_page(struct uc_struct *uc,
|
||||
TranslationBlock *tb, tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
|
||||
{
|
||||
TCGContext *tcg_ctx = uc->tcg_ctx;
|
||||
unsigned int h;
|
||||
TranslationBlock **ptb;
|
||||
|
||||
/* Grab the mmap lock to stop another thread invalidating this TB
|
||||
before we are done. */
|
||||
mmap_lock();
|
||||
/* add in the physical hash table */
|
||||
h = tb_phys_hash_func(phys_pc);
|
||||
ptb = &tcg_ctx->tb_ctx.tb_phys_hash[h];
|
||||
tb->phys_hash_next = *ptb;
|
||||
*ptb = tb;
|
||||
|
||||
/* add in the page list */
|
||||
tb_alloc_page(uc, tb, 0, phys_pc & TARGET_PAGE_MASK);
|
||||
if (phys_page2 != -1) {
|
||||
tb_alloc_page(uc, tb, 1, phys_page2);
|
||||
} else {
|
||||
tb->page_addr[1] = -1;
|
||||
}
|
||||
|
||||
assert(((uintptr_t)tb & 3) == 0);
|
||||
tb->jmp_list_first = (uintptr_t)tb | 2;
|
||||
tb->jmp_list_next[0] = (uintptr_t)NULL;
|
||||
tb->jmp_list_next[1] = (uintptr_t)NULL;
|
||||
|
||||
/* init original jump addresses */
|
||||
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
|
||||
tb_reset_jump(tb, 0);
|
||||
}
|
||||
if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
|
||||
tb_reset_jump(tb, 1);
|
||||
}
|
||||
|
||||
#ifdef DEBUG_TB_CHECK
|
||||
tb_page_check();
|
||||
#endif
|
||||
mmap_unlock();
|
||||
}
|
||||
|
||||
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
|
||||
tb[1].tc_ptr. Return NULL if not found */
|
||||
static TranslationBlock *tb_find_pc(struct uc_struct *uc, uintptr_t tc_ptr)
|
||||
|
|
Loading…
Reference in a new issue