mmap-alloc: unfold qemu_ram_mmap()

Unfold parts of qemu_ram_mmap() for the sake of understanding, moving
declarations to the top, and keeping architecture-specifics in the
ifdef-else blocks. No changes in the function behaviour.

Give ptr and ptr1 meaningful names:
ptr -> guardptr : pointer to the PROT_NONE guard region
ptr1 -> ptr : pointer to the mapped memory returned to caller

Backports commit 2044c3e7116eeac0449dcb4a4130cc8f8b9310da from qemu
This commit is contained in:
Murilo Opsfelder Araujo 2019-02-05 16:49:35 -05:00 committed by Lioncash
parent 0b7f1ff086
commit 22e3feb162
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7

View file

@ -17,11 +17,19 @@
void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared)
{ {
int flags;
int guardfd;
size_t offset;
size_t total;
void *guardptr;
void *ptr;
/* /*
* Note: this always allocates at least one extra page of virtual address * Note: this always allocates at least one extra page of virtual address
* space, even if size is already aligned. * space, even if size is already aligned.
*/ */
size_t total = size + align; total = size + align;
#if defined(__powerpc64__) && defined(__linux__) #if defined(__powerpc64__) && defined(__linux__)
/* On ppc64 mappings in the same segment (aka slice) must share the same /* On ppc64 mappings in the same segment (aka slice) must share the same
* page size. Since we will be re-allocating part of this segment * page size. Since we will be re-allocating part of this segment
@ -31,16 +39,21 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared)
* We do this unless we are using the system page size, in which case * We do this unless we are using the system page size, in which case
* anonymous memory is OK. * anonymous memory is OK.
*/ */
int anonfd = fd == -1 || qemu_fd_getpagesize(fd) == getpagesize() ? -1 : fd; flags = MAP_PRIVATE;
int flags = anonfd == -1 ? MAP_ANONYMOUS : MAP_NORESERVE; if (fd == -1 || qemu_fd_getpagesize(fd) == getpagesize()) {
void *ptr = mmap(0, total, PROT_NONE, flags | MAP_PRIVATE, anonfd, 0); guardfd = -1;
flags |= MAP_ANONYMOUS;
} else {
guardfd = fd;
flags |= MAP_NORESERVE;
}
#else #else
void *ptr = mmap(0, total, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); guardfd = -1;
flags = MAP_PRIVATE | MAP_ANONYMOUS;
#endif #endif
size_t offset; guardptr = mmap(0, total, PROT_NONE, flags, guardfd, 0);
void *ptr1;
if (ptr == MAP_FAILED) { if (guardptr == MAP_FAILED) {
return MAP_FAILED; return MAP_FAILED;
} }
@ -49,19 +62,20 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared)
/* Always align to host page size */ /* Always align to host page size */
assert(align >= getpagesize()); assert(align >= getpagesize());
offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr; flags = MAP_FIXED;
ptr1 = mmap(ptr + offset, size, PROT_READ | PROT_WRITE, flags |= fd == -1 ? MAP_ANONYMOUS : 0;
MAP_FIXED | flags |= shared ? MAP_SHARED : MAP_PRIVATE;
(fd == -1 ? MAP_ANONYMOUS : 0) | offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
(shared ? MAP_SHARED : MAP_PRIVATE),
fd, 0); ptr = mmap(guardptr + offset, size, PROT_READ | PROT_WRITE, flags, fd, 0);
if (ptr1 == MAP_FAILED) {
munmap(ptr, total); if (ptr == MAP_FAILED) {
munmap(guardptr, total);
return MAP_FAILED; return MAP_FAILED;
} }
if (offset > 0) { if (offset > 0) {
munmap(ptr, offset); munmap(guardptr, offset);
} }
/* /*
@ -70,10 +84,10 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared)
*/ */
total -= offset; total -= offset;
if (total > size + getpagesize()) { if (total > size + getpagesize()) {
munmap(ptr1 + size + getpagesize(), total - size - getpagesize()); munmap(ptr + size + getpagesize(), total - size - getpagesize());
} }
return ptr1; return ptr;
} }
void qemu_ram_munmap(void *ptr, size_t size) void qemu_ram_munmap(void *ptr, size_t size)