[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v1 4/9] util/mmap-alloc: factor out activating of memory to mmap_
From: |
David Hildenbrand |
Subject: |
[PATCH v1 4/9] util/mmap-alloc: factor out activating of memory to mmap_activate() |
Date: |
Tue, 9 Feb 2021 14:49:34 +0100 |
We want to activate memory within a reserved memory region, to make it
accessible. Let's factor that out.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Acked-by: Murilo Opsfelder Araujo <muriloo@linux.ibm.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
util/mmap-alloc.c | 91 +++++++++++++++++++++++++----------------------
1 file changed, 48 insertions(+), 43 deletions(-)
diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c
index 5c2bfe4c99..b50dc86a3c 100644
--- a/util/mmap-alloc.c
+++ b/util/mmap-alloc.c
@@ -114,6 +114,51 @@ static void *mmap_reserve(size_t size, int fd)
return mmap(0, size, PROT_NONE, flags, fd, 0);
}
+/*
+ * Activate memory in a reserved region from the given fd (if any), to make
+ * it accessible.
+ */
+static void *mmap_activate(void *ptr, size_t size, int fd, bool readonly,
+ bool shared, bool is_pmem)
+{
+ const int prot = PROT_READ | (readonly ? 0 : PROT_WRITE);
+ int map_sync_flags = 0;
+ int flags = MAP_FIXED;
+ void *activated_ptr;
+
+ flags |= fd == -1 ? MAP_ANONYMOUS : 0;
+ flags |= shared ? MAP_SHARED : MAP_PRIVATE;
+ if (shared && is_pmem) {
+ map_sync_flags = MAP_SYNC | MAP_SHARED_VALIDATE;
+ }
+
+ activated_ptr = mmap(ptr, size, prot, flags | map_sync_flags, fd, 0);
+ if (activated_ptr == MAP_FAILED && map_sync_flags) {
+ if (errno == ENOTSUP) {
+ char *proc_link = g_strdup_printf("/proc/self/fd/%d", fd);
+ char *file_name = g_malloc0(PATH_MAX);
+ int len = readlink(proc_link, file_name, PATH_MAX - 1);
+
+ if (len < 0) {
+ len = 0;
+ }
+ file_name[len] = '\0';
+ fprintf(stderr, "Warning: requesting persistence across crashes "
+ "for backend file %s failed. Proceeding without "
+ "persistence, data might become corrupted in case of host "
+ "crash.\n", file_name);
+ g_free(proc_link);
+ g_free(file_name);
+ }
+ /*
+ * If mmap failed with MAP_SHARED_VALIDATE | MAP_SYNC, we will try
+ * again without these flags to handle backwards compatibility.
+ */
+ activated_ptr = mmap(ptr, size, prot, flags, fd, 0);
+ }
+ return activated_ptr;
+}
+
static inline size_t mmap_guard_pagesize(int fd)
{
#if defined(__powerpc64__) && defined(__linux__)
@@ -132,13 +177,8 @@ void *qemu_ram_mmap(int fd,
bool is_pmem)
{
const size_t guard_pagesize = mmap_guard_pagesize(fd);
- int prot;
- int flags;
- int map_sync_flags = 0;
- size_t offset;
- size_t total;
- void *guardptr;
- void *ptr;
+ size_t offset, total;
+ void *ptr, *guardptr;
/*
* Note: this always allocates at least one extra page of virtual address
@@ -155,44 +195,9 @@ void *qemu_ram_mmap(int fd,
/* Always align to host page size */
assert(align >= guard_pagesize);
- flags = MAP_FIXED;
- flags |= fd == -1 ? MAP_ANONYMOUS : 0;
- flags |= shared ? MAP_SHARED : MAP_PRIVATE;
- if (shared && is_pmem) {
- map_sync_flags = MAP_SYNC | MAP_SHARED_VALIDATE;
- }
-
offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
- prot = PROT_READ | (readonly ? 0 : PROT_WRITE);
-
- ptr = mmap(guardptr + offset, size, prot, flags | map_sync_flags, fd, 0);
-
- if (ptr == MAP_FAILED && map_sync_flags) {
- if (errno == ENOTSUP) {
- char *proc_link, *file_name;
- int len;
- proc_link = g_strdup_printf("/proc/self/fd/%d", fd);
- file_name = g_malloc0(PATH_MAX);
- len = readlink(proc_link, file_name, PATH_MAX - 1);
- if (len < 0) {
- len = 0;
- }
- file_name[len] = '\0';
- fprintf(stderr, "Warning: requesting persistence across crashes "
- "for backend file %s failed. Proceeding without "
- "persistence, data might become corrupted in case of host "
- "crash.\n", file_name);
- g_free(proc_link);
- g_free(file_name);
- }
- /*
- * if map failed with MAP_SHARED_VALIDATE | MAP_SYNC,
- * we will remove these flags to handle compatibility.
- */
- ptr = mmap(guardptr + offset, size, prot, flags, fd, 0);
- }
-
+ ptr = mmap_activate(guardptr + offset, size, fd, readonly, shared,
is_pmem);
if (ptr == MAP_FAILED) {
munmap(guardptr, total);
return MAP_FAILED;
--
2.29.2
- [PATCH v1 0/9] RAM_NORESERVE, MAP_NORESERVE and hostmem "reserve" property, David Hildenbrand, 2021/02/09
- [PATCH v1 1/9] softmmu/physmem: drop "shared" parameter from ram_block_add(), David Hildenbrand, 2021/02/09
- [PATCH v1 2/9] util/mmap-alloc: factor out calculation of the pagesize for the guard page, David Hildenbrand, 2021/02/09
- [PATCH v1 3/9] util/mmap-alloc: factor out reserving of a memory region to mmap_reserve(), David Hildenbrand, 2021/02/09
- [PATCH v1 4/9] util/mmap-alloc: factor out activating of memory to mmap_activate(),
David Hildenbrand <=
- [PATCH v1 5/9] softmmu/memory: pass ram_flags into qemu_ram_alloc_from_fd(), David Hildenbrand, 2021/02/09
- [PATCH v1 6/9] softmmu/memory: pass ram_flags into memory_region_init_ram_shared_nomigrate(), David Hildenbrand, 2021/02/09
- [PATCH v1 8/9] util/mmap-alloc: support RAM_NORESERVE via MAP_NORESERVE, David Hildenbrand, 2021/02/09
- [PATCH v1 7/9] memory: introduce RAM_NORESERVE and wire it up in qemu_ram_mmap(), David Hildenbrand, 2021/02/09
- [PATCH v1 9/9] hostmem: wire up RAM_NORESERVE via "reserve" property, David Hildenbrand, 2021/02/09