qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 5/6 v3] KVM: nVMX: Fill in conforming vmx_nested_ops via macro


From: Krish Sadhukhan
Subject: [PATCH 5/6 v3] KVM: nVMX: Fill in conforming vmx_nested_ops via macro
Date: Tue, 28 Jul 2020 00:10:49 +0000

The names of some of the vmx_nested_ops functions do not have a corresponding
'nested_vmx_' prefix. Generate the names using a macro so that the names are
conformant. Fixing the naming will help in better readability and
maintenance of the code.

Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 arch/x86/kvm/vmx/nested.c | 24 +++++++++++++-----------
 arch/x86/kvm/vmx/nested.h |  2 +-
 arch/x86/kvm/vmx/vmx.c    |  4 ++--
 3 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index a898b53..fc09bb0 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3105,7 +3105,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu 
*vcpu)
        return 0;
 }
 
-static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+static bool nested_vmx_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 {
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -3295,7 +3295,7 @@ enum nvmx_vmentry_status 
nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
        prepare_vmcs02_early(vmx, vmcs12);
 
        if (from_vmentry) {
-               if (unlikely(!nested_get_vmcs12_pages(vcpu)))
+               if (unlikely(!nested_vmx_get_vmcs12_pages(vcpu)))
                        return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
 
                if (nested_vmx_check_vmentry_hw(vcpu)) {
@@ -3711,7 +3711,7 @@ static bool nested_vmx_preemption_timer_pending(struct 
kvm_vcpu *vcpu)
               to_vmx(vcpu)->nested.preemption_timer_expired;
 }
 
-static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
+static int nested_vmx_check_events(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qual;
@@ -5907,7 +5907,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
        return true;
 }
 
-static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
+static int nested_vmx_get_state(struct kvm_vcpu *vcpu,
                                struct kvm_nested_state __user 
*user_kvm_nested_state,
                                u32 user_data_size)
 {
@@ -6031,7 +6031,7 @@ void vmx_leave_nested(struct kvm_vcpu *vcpu)
        free_nested(vcpu);
 }
 
-static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
+static int nested_vmx_set_state(struct kvm_vcpu *vcpu,
                                struct kvm_nested_state __user 
*user_kvm_nested_state,
                                struct kvm_nested_state *kvm_state)
 {
@@ -6448,7 +6448,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs 
*msrs, u32 ept_caps)
        msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
 }
 
-void nested_vmx_hardware_unsetup(void)
+void nested_vmx_hardware_teardown(void)
 {
        int i;
 
@@ -6473,7 +6473,7 @@ __init int nested_vmx_hardware_setup(int 
(*exit_handlers[])(struct kvm_vcpu *))
                        vmx_bitmap[i] = (unsigned long *)
                                __get_free_page(GFP_KERNEL);
                        if (!vmx_bitmap[i]) {
-                               nested_vmx_hardware_unsetup();
+                               nested_vmx_hardware_teardown();
                                return -ENOMEM;
                        }
                }
@@ -6497,12 +6497,14 @@ __init int nested_vmx_hardware_setup(int 
(*exit_handlers[])(struct kvm_vcpu *))
        return 0;
 }
 
+#define KVM_X86_NESTED_OP(name) .name = nested_vmx_##name
+
 struct kvm_x86_nested_ops vmx_nested_ops = {
-       .check_events = vmx_check_nested_events,
+       KVM_X86_NESTED_OP(check_events),
        .hv_timer_pending = nested_vmx_preemption_timer_pending,
-       .get_state = vmx_get_nested_state,
-       .set_state = vmx_set_nested_state,
-       .get_vmcs12_pages = nested_get_vmcs12_pages,
+       KVM_X86_NESTED_OP(get_state),
+       KVM_X86_NESTED_OP(set_state),
+       KVM_X86_NESTED_OP(get_vmcs12_pages),
        .enable_evmcs = nested_enable_evmcs,
        .get_evmcs_version = nested_get_evmcs_version,
 };
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 758bccc..ac6b561 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -18,7 +18,7 @@ enum nvmx_vmentry_status {
 
 void vmx_leave_nested(struct kvm_vcpu *vcpu);
 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
-void nested_vmx_hardware_unsetup(void);
+void nested_vmx_hardware_teardown(void);
 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu 
*));
 void nested_vmx_set_vmcs_shadowing_bitmap(void);
 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f6a6674..6512e6e 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7830,7 +7830,7 @@ static void vmx_migrate_timers(struct kvm_vcpu *vcpu)
 static void vmx_hardware_teardown(void)
 {
        if (nested)
-               nested_vmx_hardware_unsetup();
+               nested_vmx_hardware_teardown();
 
        free_kvm_area();
 }
@@ -8144,7 +8144,7 @@ static __init int hardware_setup(void)
 
        r = alloc_kvm_area();
        if (r)
-               nested_vmx_hardware_unsetup();
+               nested_vmx_hardware_teardown();
        return r;
 }
 
-- 
1.8.3.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]