tcg: Allocate a guard page after code_gen_buffer

This will catch any overflow of the buffer.

Add a native win32 alternative for alloc_code_gen_buffer;
remove the malloc alternative.

Backports commit f293709c6af7a65a9bcec09cdba7a60183657a3e from qemu
This commit is contained in:
Richard Henderson 2018-02-16 14:16:06 -05:00 committed by Lioncash
parent 19a3c7e03f
commit 71221baf40
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7

View file

@ -303,31 +303,6 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
return false; return false;
} }
#ifdef _WIN32
static inline QEMU_UNUSED_FUNC void map_exec(void *addr, long size)
{
DWORD old_protect;
VirtualProtect(addr, size,
PAGE_EXECUTE_READWRITE, &old_protect);
}
#else
static inline QEMU_UNUSED_FUNC void map_exec(void *addr, long size)
{
unsigned long start, end, page_size;
page_size = getpagesize();
start = (unsigned long)addr;
start &= ~(page_size - 1);
end = (unsigned long)addr + size;
end += page_size - 1;
end &= ~(page_size - 1);
mprotect((void *)start, end - start,
PROT_READ | PROT_WRITE | PROT_EXEC);
}
#endif
static void page_size_init(void) static void page_size_init(void)
{ {
/* NOTE: we can always suppose that qemu_host_page_size >= /* NOTE: we can always suppose that qemu_host_page_size >=
@ -479,14 +454,6 @@ static inline PageDesc *page_find(struct uc_struct *uc, tb_page_addr_t index)
#define USE_STATIC_CODE_GEN_BUFFER #define USE_STATIC_CODE_GEN_BUFFER
#endif #endif
/* ??? Should configure for this, not list operating systems here. */
#if (defined(__linux__) \
|| defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
|| defined(__DragonFly__) || defined(__OpenBSD__) \
|| defined(__NetBSD__))
# define USE_MMAP
#endif
/* Minimum size of the code gen buffer. This number is randomly chosen, /* Minimum size of the code gen buffer. This number is randomly chosen,
but not so small that we can't have a fair number of TB's live. */ but not so small that we can't have a fair number of TB's live. */
#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
@ -581,22 +548,107 @@ void free_code_gen_buffer(struct uc_struct *uc)
// Do nothing, we use a static buffer. // Do nothing, we use a static buffer.
} }
# ifdef _WIN32
static inline void do_protect(void *addr, long size, int prot)
{
DWORD old_protect;
VirtualProtect(addr, size, prot, &old_protect);
}
static inline void map_exec(void *addr, long size)
{
do_protect(addr, size, PAGE_EXECUTE_READWRITE);
}
static inline void map_none(void *addr, long size)
{
do_protect(addr, size, PAGE_NOACCESS);
}
# else
static inline void do_protect(void *addr, long size, int prot)
{
uintptr_t start, end;
start = (uintptr_t)addr;
start &= qemu_real_host_page_mask;
end = (uintptr_t)addr + size;
end = ROUND_UP(end, qemu_real_host_page_size);
mprotect((void *)start, end - start, prot);
}
static inline void map_exec(void *addr, long size)
{
do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
}
static inline void map_none(void *addr, long size)
{
do_protect(addr, size, PROT_NONE);
}
# endif /* WIN32 */
static inline void *alloc_code_gen_buffer(struct uc_struct *uc) static inline void *alloc_code_gen_buffer(struct uc_struct *uc)
{ {
TCGContext *tcg_ctx = uc->tcg_ctx; TCGContext *tcg_ctx = uc->tcg_ctx;
void *buf = static_code_gen_buffer; void *buf = static_code_gen_buffer;
size_t full_size, size;
/* The size of the buffer, rounded down to end on a page boundary. */
full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
& qemu_real_host_page_mask) - (uintptr_t)buf;
/* Reserve a guard page. */
size = full_size - qemu_real_host_page_size;
/* Honor a command-line option limiting the size of the buffer. */
if (size > tcg_ctx->code_gen_buffer_size) {
size = (((uintptr_t)buf + tcg_ctx->code_gen_buffer_size)
& qemu_real_host_page_mask) - (uintptr_t)buf;
}
tcg_ctx->code_gen_buffer_size = size;
#ifdef __mips__ #ifdef __mips__
if (cross_256mb(buf, tcg_ctx->code_gen_buffer_size)) { if (cross_256mb(buf, size)) {
buf = split_cross_256mb(buf, tcg_ctx->code_gen_buffer_size); buf = split_cross_256mb(buf, size);
size = tcg_ctx->code_gen_buffer_size;
} }
#endif #endif
map_exec(buf, tcg_ctx->code_gen_buffer_size); map_exec(buf, size);
map_none(buf + size, qemu_real_host_page_size);
// Unicorn: commented out
//qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
return buf; return buf;
} }
#elif defined(USE_MMAP) #elif defined(_WIN32)
static inline void *alloc_code_gen_buffer(struct uc_struct *uc)
{
TCGContext *tcg_ctx = uc->tcg_ctx;
size_t size = tcg_ctx->code_gen_buffer_size;
void *buf1, *buf2;
/* Perform the allocation in two steps, so that the guard page
is reserved but uncommitted. */
buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
MEM_RESERVE, PAGE_NOACCESS);
if (buf1 != NULL) {
buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
assert(buf1 == buf2);
}
return buf1;
}
void free_code_gen_buffer(struct uc_struct *uc) void free_code_gen_buffer(struct uc_struct *uc)
{ {
TCGContext *tcg_ctx = uc->tcg_ctx; TCGContext *tcg_ctx = uc->tcg_ctx;
void *prologue = tcg_ctx->code_gen_prologue;
if (!prologue) {
return;
}
// Unicorn: Free the prologue rather than the buffer directly, as the prologue // Unicorn: Free the prologue rather than the buffer directly, as the prologue
// has the starting address of the same memory block that the code // has the starting address of the same memory block that the code
@ -607,16 +659,16 @@ void free_code_gen_buffer(struct uc_struct *uc)
// //
// See tcg_prologue_init in tcg.c for more info. // See tcg_prologue_init in tcg.c for more info.
// //
if (tcg_ctx->code_gen_prologue) VirtualFree(prologue, 0, MEM_RELEASE);
munmap(tcg_ctx->code_gen_prologue, tcg_ctx->code_gen_buffer_size);
} }
#else
static inline void *alloc_code_gen_buffer(struct uc_struct *uc) static inline void *alloc_code_gen_buffer(struct uc_struct *uc)
{ {
TCGContext *tcg_ctx = uc->tcg_ctx;
int flags = MAP_PRIVATE | MAP_ANONYMOUS; int flags = MAP_PRIVATE | MAP_ANONYMOUS;
uintptr_t start = 0; uintptr_t start = 0;
size_t size = tcg_ctx->code_gen_buffer_size;
void *buf; void *buf;
TCGContext *tcg_ctx = uc->tcg_ctx;
/* Constrain the position of the buffer based on the host cpu. /* Constrain the position of the buffer based on the host cpu.
Note that these addresses are chosen in concert with the Note that these addresses are chosen in concert with the
@ -631,16 +683,14 @@ static inline void *alloc_code_gen_buffer(struct uc_struct *uc)
Leave the choice of exact location with the kernel. */ Leave the choice of exact location with the kernel. */
flags |= MAP_32BIT; flags |= MAP_32BIT;
/* Cannot expect to map more than 800MB in low memory. */ /* Cannot expect to map more than 800MB in low memory. */
if (tcg_ctx->code_gen_buffer_size > 800u * 1024 * 1024) { if (size > 800u * 1024 * 1024) {
tcg_ctx->code_gen_buffer_size = 800u * 1024 * 1024; tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
} }
# elif defined(__sparc__) # elif defined(__sparc__)
start = 0x40000000ul; start = 0x40000000ul;
# elif defined(__s390x__) # elif defined(__s390x__)
start = 0x90000000ul; start = 0x90000000ul;
# elif defined(__mips__) # if _MIPS_SIM == _ABI64
/* ??? We ought to more explicitly manage layout for softmmu too. */
# ifdef CONFIG_USER_ONLY
start = 0x68000000ul; start = 0x68000000ul;
# elif _MIPS_SIM == _ABI64 # elif _MIPS_SIM == _ABI64
start = 0x128000000ul; start = 0x128000000ul;
@ -649,43 +699,65 @@ static inline void *alloc_code_gen_buffer(struct uc_struct *uc)
# endif # endif
# endif # endif
buf = mmap((void *)start, tcg_ctx->code_gen_buffer_size, buf = mmap((void *)start, size + qemu_real_host_page_size,
PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0); PROT_NONE, flags, -1, 0);
if (buf == MAP_FAILED) { if (buf == MAP_FAILED) {
return NULL; return NULL;
} }
#ifdef __mips__ #ifdef __mips__
if (cross_256mb(buf, tcg_ctx->code_gen_buffer_size)) { if (cross_256mb(buf, size)) {
/* Try again, with the original still mapped, to avoid re-acquiring /* Try again, with the original still mapped, to avoid re-acquiring
that 256mb crossing. This time don't specify an address. */ that 256mb crossing. This time don't specify an address. */
size_t size2, size1 = tcg_ctx->code_gen_buffer_size; size_t size2;
void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC, void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
flags, -1, 0); PROT_NONE, flags, -1, 0);
if (buf2 != MAP_FAILED) { switch (buf2 != MAP_FAILED) {
if (!cross_256mb(buf2, size1)) { case 1:
if (!cross_256mb(buf2, size)) {
/* Success! Use the new buffer. */ /* Success! Use the new buffer. */
munmap(buf, size1); munmap(buf, size);
return buf2; break;
} }
/* Failure. Work with what we had. */ /* Failure. Work with what we had. */
munmap(buf2, size1); munmap(buf2, size);
/* fallthru */
default:
/* Split the original buffer. Free the smaller half. */
buf2 = split_cross_256mb(buf, size);
size2 = tcg_ctx->code_gen_buffer_size;
if (buf == buf2) {
munmap(buf + size2 + qemu_real_host_page_size, size - size2);
} else {
munmap(buf, size - size2);
}
size = size2;
break;
} }
/* Split the original buffer. Free the smaller half. */ buf = buf2;
buf2 = split_cross_256mb(buf, size1);
size2 = tcg_ctx->code_gen_buffer_size;
munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
return buf2;
} }
#endif #endif
/* Make the final buffer accessible. The guard page at the end
will remain inaccessible with PROT_NONE. */
mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
/* Request large pages for the buffer. */
// Unicorn: Commented out
//qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
return buf; return buf;
} }
#else
void free_code_gen_buffer(struct uc_struct *uc) void free_code_gen_buffer(struct uc_struct *uc)
{ {
TCGContext *tcg_ctx = uc->tcg_ctx; TCGContext *tcg_ctx = uc->tcg_ctx;
void *prologue = tcg_ctx->code_gen_prologue;
if (!prologue) {
return;
}
// Unicorn: Free the prologue rather than the buffer directly, as the prologue // Unicorn: Free the prologue rather than the buffer directly, as the prologue
// has the starting address of the same memory block that the code // has the starting address of the same memory block that the code
@ -696,37 +768,7 @@ void free_code_gen_buffer(struct uc_struct *uc)
// //
// See tcg_prologue_init in tcg.c for more info. // See tcg_prologue_init in tcg.c for more info.
// //
if (tcg_ctx->code_gen_prologue) munmap(prologue, tcg_ctx->code_gen_buffer_size);
g_free(tcg_ctx->code_gen_prologue);
}
static inline void *alloc_code_gen_buffer(struct uc_struct *uc)
{
TCGContext *tcg_ctx = uc->tcg_ctx;
void *buf = g_malloc(tcg_ctx->code_gen_buffer_size);
if (buf == NULL) {
return NULL;
}
#ifdef __mips__
if (cross_256mb(buf, tcg_ctx->code_gen_buffer_size)) {
void *buf2 = g_malloc(tcg_ctx->code_gen_buffer_size);
if (buf2 != NULL && !cross_256mb(buf2, size1)) {
/* Success! Use the new buffer. */
free(buf);
buf = buf2;
} else {
/* Failure. Work with what we had. Since this is malloc
and not mmap, we can't free the other half. */
free(buf2);
buf = split_cross_256mb(buf, tcg_ctx->code_gen_buffer_size);
}
}
#endif
map_exec(buf, tcg_ctx->code_gen_buffer_size);
return buf;
} }
#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */ #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
@ -761,10 +803,10 @@ void tcg_exec_init(struct uc_struct *uc, unsigned long tb_size)
TCGContext *tcg_ctx; TCGContext *tcg_ctx;
cpu_gen_init(uc); cpu_gen_init(uc);
code_gen_alloc(uc, tb_size);
tcg_ctx = uc->tcg_ctx; tcg_ctx = uc->tcg_ctx;
tcg_ctx->uc = uc; tcg_ctx->uc = uc;
page_init(); page_init();
code_gen_alloc(uc, tb_size);
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
/* There's no guest base to take into account, so go ahead and /* There's no guest base to take into account, so go ahead and
initialize the prologue now. */ initialize the prologue now. */