qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 kvm/queue 11/16] KVM: Add kvm_map_gfn_range


From: Chao Peng
Subject: [PATCH v3 kvm/queue 11/16] KVM: Add kvm_map_gfn_range
Date: Thu, 23 Dec 2021 20:30:06 +0800

This new function establishes the mapping in KVM page tables for a
given gfn range. It can be used in the memory fallocate callback for
memfd based memory to establish the mapping for KVM secondary MMU when
the pages are allocated in the memory backend.

Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
---
 arch/x86/kvm/mmu/mmu.c   | 47 ++++++++++++++++++++++++++++++++++++++++
 include/linux/kvm_host.h |  2 ++
 virt/kvm/kvm_main.c      |  5 +++++
 3 files changed, 54 insertions(+)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 1d275e9d76b5..2856eb662a21 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1568,6 +1568,53 @@ static __always_inline bool kvm_handle_gfn_range(struct 
kvm *kvm,
        return ret;
 }
 
+bool kvm_map_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+       struct kvm_vcpu *vcpu;
+       kvm_pfn_t pfn;
+       gfn_t gfn;
+       int idx;
+       bool ret = true;
+
+       /* Need vcpu context for kvm_mmu_do_page_fault. */
+       vcpu = kvm_get_vcpu(kvm, 0);
+       if (mutex_lock_killable(&vcpu->mutex))
+               return false;
+
+       vcpu_load(vcpu);
+       idx = srcu_read_lock(&kvm->srcu);
+
+       kvm_mmu_reload(vcpu);
+
+       gfn = range->start;
+       while (gfn < range->end) {
+               if (signal_pending(current)) {
+                       ret = false;
+                       break;
+               }
+
+               if (need_resched())
+                       cond_resched();
+
+               pfn = kvm_mmu_do_page_fault(vcpu, gfn << PAGE_SHIFT,
+                                       PFERR_WRITE_MASK | PFERR_USER_MASK,
+                                       false);
+               if (is_error_noslot_pfn(pfn) || kvm->vm_bugged) {
+                       ret = false;
+                       break;
+               }
+
+               gfn++;
+       }
+
+       srcu_read_unlock(&kvm->srcu, idx);
+       vcpu_put(vcpu);
+
+       mutex_unlock(&vcpu->mutex);
+
+       return ret;
+}
+
 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
 {
        bool flush = false;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index be567925831b..8c2359175509 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -241,6 +241,8 @@ struct kvm_gfn_range {
        pte_t pte;
        bool may_block;
 };
+
+bool kvm_map_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f495c1a313bd..660ce15973ad 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -471,6 +471,11 @@ EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
 #if defined(CONFIG_MEMFD_OPS) ||\
        (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
 
+bool __weak kvm_map_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+       return false;
+}
+
 typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
 
 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
-- 
2.17.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]