qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 5/5 v5] KVM: nVMX: Fill in conforming vmx_nested_ops via macro


From: Krish Sadhukhan
Subject: [PATCH 5/5 v5] KVM: nVMX: Fill in conforming vmx_nested_ops via macro
Date: Sat, 14 Nov 2020 01:49:55 +0000

The names of some of the vmx_nested_ops functions do not have a corresponding
'nested_vmx_' prefix. Generate the names using a macro so that the names are
conformant. Fixing the naming will help in better readability and
maintenance of the code.

Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 arch/x86/kvm/vmx/evmcs.c  |  6 +++---
 arch/x86/kvm/vmx/evmcs.h  |  4 ++--
 arch/x86/kvm/vmx/nested.c | 35 +++++++++++++++++++++--------------
 3 files changed, 26 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
index f3199bb02f22..e54b366ea114 100644
--- a/arch/x86/kvm/vmx/evmcs.c
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -324,7 +324,7 @@ bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 
*evmcs_gpa)
        return true;
 }
 
-uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
+uint16_t nested_vmx_get_evmcs_version(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        /*
@@ -418,7 +418,7 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
        return ret;
 }
 
-int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+int nested_vmx_enable_evmcs(struct kvm_vcpu *vcpu,
                        uint16_t *vmcs_version)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -426,7 +426,7 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
        vmx->nested.enlightened_vmcs_enabled = true;
 
        if (vmcs_version)
-               *vmcs_version = nested_get_evmcs_version(vcpu);
+               *vmcs_version = nested_vmx_get_evmcs_version(vcpu);
 
        return 0;
 }
diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
index bd41d9462355..150e7921b5fd 100644
--- a/arch/x86/kvm/vmx/evmcs.h
+++ b/arch/x86/kvm/vmx/evmcs.h
@@ -205,8 +205,8 @@ enum nested_evmptrld_status {
 };
 
 bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
-uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
-int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+uint16_t nested_vmx_get_evmcs_version(struct kvm_vcpu *vcpu);
+int nested_vmx_enable_evmcs(struct kvm_vcpu *vcpu,
                        uint16_t *vmcs_version);
 void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata);
 int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 104d6782ddc3..ecff1117f598 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3223,7 +3223,12 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu 
*vcpu)
        return true;
 }
 
-static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
+static bool nested_vmx_get_pages(struct kvm_vcpu *vcpu)
+{
+       return nested_get_vmcs12_pages(vcpu);
+}
+
+static int nested_vmx_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t gpa)
 {
        struct vmcs12 *vmcs12;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -3769,13 +3774,13 @@ static void nested_vmx_update_pending_dbg(struct 
kvm_vcpu *vcpu)
                            vcpu->arch.exception.payload);
 }
 
-static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
+static bool nested_vmx_hv_timer_pending(struct kvm_vcpu *vcpu)
 {
        return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
               to_vmx(vcpu)->nested.preemption_timer_expired;
 }
 
-static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
+static int nested_vmx_check_events(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qual;
@@ -3830,7 +3835,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
                return 0;
        }
 
-       if (nested_vmx_preemption_timer_pending(vcpu)) {
+       if (nested_vmx_hv_timer_pending(vcpu)) {
                if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
@@ -5964,7 +5969,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
        return true;
 }
 
-static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
+static int nested_vmx_get_state(struct kvm_vcpu *vcpu,
                                struct kvm_nested_state __user 
*user_kvm_nested_state,
                                u32 user_data_size)
 {
@@ -6088,7 +6093,7 @@ void vmx_leave_nested(struct kvm_vcpu *vcpu)
        free_nested(vcpu);
 }
 
-static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
+static int nested_vmx_set_state(struct kvm_vcpu *vcpu,
                                struct kvm_nested_state __user 
*user_kvm_nested_state,
                                struct kvm_nested_state *kvm_state)
 {
@@ -6568,13 +6573,15 @@ __init int nested_vmx_hardware_setup(int 
(*exit_handlers[])(struct kvm_vcpu *))
        return 0;
 }
 
+#define KVM_X86_NESTED_OP_NAME(name) .name = nested_vmx_##name
+
 struct kvm_x86_nested_ops vmx_nested_ops = {
-       .check_events = vmx_check_nested_events,
-       .hv_timer_pending = nested_vmx_preemption_timer_pending,
-       .get_state = vmx_get_nested_state,
-       .set_state = vmx_set_nested_state,
-       .get_pages = nested_get_vmcs12_pages,
-       .write_log_dirty = nested_vmx_write_pml_buffer,
-       .enable_evmcs = nested_enable_evmcs,
-       .get_evmcs_version = nested_get_evmcs_version,
+       KVM_X86_NESTED_OP_NAME(check_events),
+       KVM_X86_NESTED_OP_NAME(hv_timer_pending),
+       KVM_X86_NESTED_OP_NAME(get_state),
+       KVM_X86_NESTED_OP_NAME(set_state),
+       KVM_X86_NESTED_OP_NAME(get_pages),
+       KVM_X86_NESTED_OP_NAME(write_log_dirty),
+       KVM_X86_NESTED_OP_NAME(enable_evmcs),
+       KVM_X86_NESTED_OP_NAME(get_evmcs_version),
 };
-- 
2.27.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]