qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 kvm/queue 07/16] KVM: Refactor hva based memory invalidation c


From: Chao Peng
Subject: [PATCH v3 kvm/queue 07/16] KVM: Refactor hva based memory invalidation code
Date: Thu, 23 Dec 2021 20:30:02 +0800

The purpose of this patch is for fd-based memslot to reuse the same
mmu_notifier based guest memory invalidation code for private pages.

No functional changes except renaming 'hva' to more neutral 'useraddr'
so that it can also cover 'offset' in a fd that private pages live in.

Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
---
 include/linux/kvm_host.h |  8 ++++--
 virt/kvm/kvm_main.c      | 55 ++++++++++++++++++++++------------------
 2 files changed, 36 insertions(+), 27 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 21f8b1880723..07863ff855cd 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1464,9 +1464,13 @@ static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
 }
 
 static inline gfn_t
-hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
+useraddr_to_gfn_memslot(unsigned long useraddr, struct kvm_memory_slot *slot,
+                       bool addr_is_hva)
 {
-       gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
+       unsigned long useraddr_base = addr_is_hva ? slot->userspace_addr
+                                                 : slot->ofs;
+
+       gfn_t gfn_offset = (useraddr - useraddr_base) >> PAGE_SHIFT;
 
        return slot->base_gfn + gfn_offset;
 }
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 47e96d1eb233..b7a1c4d7eaaa 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -486,16 +486,16 @@ static void kvm_mmu_notifier_invalidate_range(struct 
mmu_notifier *mn,
        srcu_read_unlock(&kvm->srcu, idx);
 }
 
-typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
+typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
 
 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
                             unsigned long end);
 
-struct kvm_hva_range {
+struct kvm_useraddr_range {
        unsigned long start;
        unsigned long end;
        pte_t pte;
-       hva_handler_t handler;
+       gfn_handler_t handler;
        on_lock_fn_t on_lock;
        bool flush_on_ret;
        bool may_block;
@@ -515,13 +515,13 @@ static void kvm_null_fn(void)
 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
 
 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
-#define kvm_for_each_memslot_in_hva_range(node, slots, start, last)         \
-       for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
+#define kvm_for_each_memslot_in_useraddr_range(node, tree, start, last)        
     \
+       for (node = interval_tree_iter_first(tree, start, last);             \
             node;                                                           \
             node = interval_tree_iter_next(node, start, last))      \
 
-static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
-                                                 const struct kvm_hva_range 
*range)
+static __always_inline int __kvm_handle_useraddr_range(struct kvm *kvm,
+                                       const struct kvm_useraddr_range *range)
 {
        bool ret = false, locked = false;
        struct kvm_gfn_range gfn_range;
@@ -540,17 +540,19 @@ static __always_inline int __kvm_handle_hva_range(struct 
kvm *kvm,
        idx = srcu_read_lock(&kvm->srcu);
 
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+               struct rb_root_cached *useraddr_tree;
                struct interval_tree_node *node;
 
                slots = __kvm_memslots(kvm, i);
-               kvm_for_each_memslot_in_hva_range(node, slots,
+               useraddr_tree = &slots->hva_tree;
+               kvm_for_each_memslot_in_useraddr_range(node, useraddr_tree,
                                                  range->start, range->end - 1) 
{
-                       unsigned long hva_start, hva_end;
+                       unsigned long useraddr_start, useraddr_end;
 
                        slot = container_of(node, struct kvm_memory_slot, 
hva_node[slots->node_idx]);
-                       hva_start = max(range->start, slot->userspace_addr);
-                       hva_end = min(range->end, slot->userspace_addr +
-                                                 (slot->npages << PAGE_SHIFT));
+                       useraddr_start = max(range->start, 
slot->userspace_addr);
+                       useraddr_end = min(range->end, slot->userspace_addr +
+                                                      (slot->npages << 
PAGE_SHIFT));
 
                        /*
                         * To optimize for the likely case where the address
@@ -562,11 +564,14 @@ static __always_inline int __kvm_handle_hva_range(struct 
kvm *kvm,
                        gfn_range.may_block = range->may_block;
 
                        /*
-                        * {gfn(page) | page intersects with [hva_start, 
hva_end)} =
+                        * {gfn(page) | page intersects with [useraddr_start, 
useraddr_end)} =
                         * {gfn_start, gfn_start+1, ..., gfn_end-1}.
                         */
-                       gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
-                       gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE 
- 1, slot);
+                       gfn_range.start = 
useraddr_to_gfn_memslot(useraddr_start,
+                                                                 slot, true);
+                       gfn_range.end = useraddr_to_gfn_memslot(
+                                               useraddr_end + PAGE_SIZE - 1,
+                                               slot, true);
                        gfn_range.slot = slot;
 
                        if (!locked) {
@@ -597,10 +602,10 @@ static __always_inline int kvm_handle_hva_range(struct 
mmu_notifier *mn,
                                                unsigned long start,
                                                unsigned long end,
                                                pte_t pte,
-                                               hva_handler_t handler)
+                                               gfn_handler_t handler)
 {
        struct kvm *kvm = mmu_notifier_to_kvm(mn);
-       const struct kvm_hva_range range = {
+       const struct kvm_useraddr_range range = {
                .start          = start,
                .end            = end,
                .pte            = pte,
@@ -610,16 +615,16 @@ static __always_inline int kvm_handle_hva_range(struct 
mmu_notifier *mn,
                .may_block      = false,
        };
 
-       return __kvm_handle_hva_range(kvm, &range);
+       return __kvm_handle_useraddr_range(kvm, &range);
 }
 
 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier 
*mn,
                                                         unsigned long start,
                                                         unsigned long end,
-                                                        hva_handler_t handler)
+                                                        gfn_handler_t handler)
 {
        struct kvm *kvm = mmu_notifier_to_kvm(mn);
-       const struct kvm_hva_range range = {
+       const struct kvm_useraddr_range range = {
                .start          = start,
                .end            = end,
                .pte            = __pte(0),
@@ -629,7 +634,7 @@ static __always_inline int 
kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
                .may_block      = false,
        };
 
-       return __kvm_handle_hva_range(kvm, &range);
+       return __kvm_handle_useraddr_range(kvm, &range);
 }
 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
                                        struct mm_struct *mm,
@@ -687,7 +692,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct 
mmu_notifier *mn,
                                        const struct mmu_notifier_range *range)
 {
        struct kvm *kvm = mmu_notifier_to_kvm(mn);
-       const struct kvm_hva_range hva_range = {
+       const struct kvm_useraddr_range useraddr_range = {
                .start          = range->start,
                .end            = range->end,
                .pte            = __pte(0),
@@ -711,7 +716,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct 
mmu_notifier *mn,
        kvm->mn_active_invalidate_count++;
        spin_unlock(&kvm->mn_invalidate_lock);
 
-       __kvm_handle_hva_range(kvm, &hva_range);
+       __kvm_handle_useraddr_range(kvm, &useraddr_range);
 
        return 0;
 }
@@ -738,7 +743,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct 
mmu_notifier *mn,
                                        const struct mmu_notifier_range *range)
 {
        struct kvm *kvm = mmu_notifier_to_kvm(mn);
-       const struct kvm_hva_range hva_range = {
+       const struct kvm_useraddr_range useraddr_range = {
                .start          = range->start,
                .end            = range->end,
                .pte            = __pte(0),
@@ -749,7 +754,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct 
mmu_notifier *mn,
        };
        bool wake;
 
-       __kvm_handle_hva_range(kvm, &hva_range);
+       __kvm_handle_useraddr_range(kvm, &useraddr_range);
 
        /* Pairs with the increment in range_start(). */
        spin_lock(&kvm->mn_invalidate_lock);
-- 
2.17.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]