qemu-arm
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH 12/19] accel/kvm: Use kvm_vcpu_state() when possible


From: Philippe Mathieu-Daudé
Subject: [RFC PATCH 12/19] accel/kvm: Use kvm_vcpu_state() when possible
Date: Wed, 3 Mar 2021 19:22:12 +0100

In preparation to move the kvm_state field out of CPUState in few
commits, replace the CPUState->kvm_state dereference by a call to
kvm_vcpu_state().

Patch created mechanically using:

  $ sed -i 's/cpu->kvm_state/kvm_vcpu_state(cpu)/' \
        -i 's/cs->kvm_state/kvm_vcpu_state(cs)/' \
        -i 's/c->kvm_state/kvm_vcpu_state(c)/' $(git grep -l kvm_state)

Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
 accel/kvm/kvm-all.c   | 10 +++++-----
 target/arm/kvm.c      |  2 +-
 target/arm/kvm64.c    | 12 ++++++------
 target/i386/kvm/kvm.c | 34 +++++++++++++++++-----------------
 target/ppc/kvm.c      | 16 ++++++++--------
 5 files changed, 37 insertions(+), 37 deletions(-)

diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index b787d590a9a..8259e89bbaf 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -2760,7 +2760,7 @@ struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState 
*cpu,
 {
     struct kvm_sw_breakpoint *bp;
 
-    QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
+    QTAILQ_FOREACH(bp, &kvm_vcpu_state(cpu)->kvm_sw_breakpoints, entry) {
         if (bp->pc == pc) {
             return bp;
         }
@@ -2770,7 +2770,7 @@ struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState 
*cpu,
 
 int kvm_sw_breakpoints_active(CPUState *cpu)
 {
-    return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
+    return !QTAILQ_EMPTY(&kvm_vcpu_state(cpu)->kvm_sw_breakpoints);
 }
 
 struct kvm_set_guest_debug_data {
@@ -2825,7 +2825,7 @@ int kvm_insert_breakpoint(CPUState *cpu, target_ulong 
addr,
             return err;
         }
 
-        QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
+        QTAILQ_INSERT_HEAD(&kvm_vcpu_state(cpu)->kvm_sw_breakpoints, bp, 
entry);
     } else {
         err = kvm_arch_insert_hw_breakpoint(addr, len, type);
         if (err) {
@@ -2864,7 +2864,7 @@ int kvm_remove_breakpoint(CPUState *cpu, target_ulong 
addr,
             return err;
         }
 
-        QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
+        QTAILQ_REMOVE(&kvm_vcpu_state(cpu)->kvm_sw_breakpoints, bp, entry);
         g_free(bp);
     } else {
         err = kvm_arch_remove_hw_breakpoint(addr, len, type);
@@ -2885,7 +2885,7 @@ int kvm_remove_breakpoint(CPUState *cpu, target_ulong 
addr,
 void kvm_remove_all_breakpoints(CPUState *cpu)
 {
     struct kvm_sw_breakpoint *bp, *next;
-    KVMState *s = cpu->kvm_state;
+    KVMState *s = kvm_vcpu_state(cpu);
     CPUState *tmpcpu;
 
     QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 00e124c8123..ed7c4e4815c 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -61,7 +61,7 @@ int kvm_arm_vcpu_finalize(CPUState *cs, int feature)
 
 void kvm_arm_init_serror_injection(CPUState *cs)
 {
-    cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
+    cap_has_inject_serror_esr = kvm_check_extension(kvm_vcpu_state(cs),
                                     KVM_CAP_ARM_INJECT_SERROR_ESR);
 }
 
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index dff85f6db94..c15df0cb1b7 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -85,14 +85,14 @@ GArray *hw_breakpoints, *hw_watchpoints;
  */
 static void kvm_arm_init_debug(CPUState *cs)
 {
-    have_guest_debug = kvm_check_extension(cs->kvm_state,
+    have_guest_debug = kvm_check_extension(kvm_vcpu_state(cs),
                                            KVM_CAP_SET_GUEST_DEBUG);
 
-    max_hw_wps = kvm_check_extension(cs->kvm_state, 
KVM_CAP_GUEST_DEBUG_HW_WPS);
+    max_hw_wps = kvm_check_extension(kvm_vcpu_state(cs), 
KVM_CAP_GUEST_DEBUG_HW_WPS);
     hw_watchpoints = g_array_sized_new(true, true,
                                        sizeof(HWWatchpoint), max_hw_wps);
 
-    max_hw_bps = kvm_check_extension(cs->kvm_state, 
KVM_CAP_GUEST_DEBUG_HW_BPS);
+    max_hw_bps = kvm_check_extension(kvm_vcpu_state(cs), 
KVM_CAP_GUEST_DEBUG_HW_BPS);
     hw_breakpoints = g_array_sized_new(true, true,
                                        sizeof(HWBreakpoint), max_hw_bps);
     return;
@@ -837,14 +837,14 @@ int kvm_arch_init_vcpu(CPUState *cs)
     if (cs->start_powered_off) {
         cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
     }
-    if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
+    if (kvm_check_extension(kvm_vcpu_state(cs), KVM_CAP_ARM_PSCI_0_2)) {
         cpu->psci_version = 2;
         cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
     }
     if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
         cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
     }
-    if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
+    if (!kvm_check_extension(kvm_vcpu_state(cs), KVM_CAP_ARM_PMU_V3)) {
         cpu->has_pmu = false;
     }
     if (cpu->has_pmu) {
@@ -1411,7 +1411,7 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void 
*addr)
             object_property_get_bool(obj, "ras", NULL)) {
         ram_addr = qemu_ram_addr_from_host(addr);
         if (ram_addr != RAM_ADDR_INVALID &&
-            kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
+            kvm_physical_memory_addr_from_host(kvm_vcpu_state(c), addr, 
&paddr)) {
             kvm_hwpoison_page_add(ram_addr);
             /*
              * If this is a BUS_MCEERR_AR, we know we have been called
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index 0b5755e42b8..b2facf4f7c1 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -583,7 +583,7 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void 
*addr)
     if ((env->mcg_cap & MCG_SER_P) && addr) {
         ram_addr = qemu_ram_addr_from_host(addr);
         if (ram_addr != RAM_ADDR_INVALID &&
-            kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
+            kvm_physical_memory_addr_from_host(kvm_vcpu_state(c), addr, 
&paddr)) {
             kvm_hwpoison_page_add(ram_addr);
             kvm_mce_inject(cpu, paddr, code);
 
@@ -715,7 +715,7 @@ unsigned long kvm_arch_vcpu_id(CPUState *cs)
 static bool hyperv_enabled(X86CPU *cpu)
 {
     CPUState *cs = CPU(cpu);
-    return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
+    return kvm_check_extension(kvm_vcpu_state(cs), KVM_CAP_HYPERV) > 0 &&
         ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) ||
          cpu->hyperv_features || cpu->hyperv_passthrough);
 }
@@ -747,13 +747,13 @@ static int kvm_arch_set_tsc_khz(CPUState *cs)
         return 0;
     }
 
-    cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+    cur_freq = kvm_check_extension(kvm_vcpu_state(cs), KVM_CAP_GET_TSC_KHZ) ?
                kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP;
 
     /*
      * If TSC scaling is supported, attempt to set TSC frequency.
      */
-    if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) {
+    if (kvm_check_extension(kvm_vcpu_state(cs), KVM_CAP_TSC_CONTROL)) {
         set_ioctl = true;
     }
 
@@ -773,9 +773,9 @@ static int kvm_arch_set_tsc_khz(CPUState *cs)
         /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
          * TSC frequency doesn't match the one we want.
          */
-        cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
-                   kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
-                   -ENOTSUP;
+        cur_freq = kvm_check_extension(kvm_vcpu_state(cs), KVM_CAP_GET_TSC_KHZ)
+                   ? kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ)
+                   : -ENOTSUP;
         if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
             warn_report("TSC frequency mismatch between "
                         "VM (%" PRId64 " kHz) and host (%d kHz), "
@@ -994,7 +994,7 @@ static struct kvm_cpuid2 
*get_supported_hv_cpuid_legacy(CPUState *cs)
     entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
     entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
 
-    if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
+    if (kvm_check_extension(kvm_vcpu_state(cs), KVM_CAP_HYPERV) > 0) {
         entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
         entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
         entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
@@ -1002,7 +1002,7 @@ static struct kvm_cpuid2 
*get_supported_hv_cpuid_legacy(CPUState *cs)
         entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
     }
 
-    if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
+    if (kvm_check_extension(kvm_vcpu_state(cs), KVM_CAP_HYPERV_TIME) > 0) {
         entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
         entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
     }
@@ -1036,7 +1036,7 @@ static struct kvm_cpuid2 
*get_supported_hv_cpuid_legacy(CPUState *cs)
         unsigned int cap = cpu->hyperv_synic_kvm_only ?
             KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
 
-        if (kvm_check_extension(cs->kvm_state, cap) > 0) {
+        if (kvm_check_extension(kvm_vcpu_state(cs), cap) > 0) {
             entry_feat->eax |= HV_SYNIC_AVAILABLE;
         }
     }
@@ -1045,18 +1045,18 @@ static struct kvm_cpuid2 
*get_supported_hv_cpuid_legacy(CPUState *cs)
         entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
     }
 
-    if (kvm_check_extension(cs->kvm_state,
+    if (kvm_check_extension(kvm_vcpu_state(cs),
                             KVM_CAP_HYPERV_TLBFLUSH) > 0) {
         entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
         entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
     }
 
-    if (kvm_check_extension(cs->kvm_state,
+    if (kvm_check_extension(kvm_vcpu_state(cs),
                             KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
         entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
     }
 
-    if (kvm_check_extension(cs->kvm_state,
+    if (kvm_check_extension(kvm_vcpu_state(cs),
                             KVM_CAP_HYPERV_SEND_IPI) > 0) {
         entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
         entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
@@ -1200,7 +1200,7 @@ static int hyperv_handle_properties(CPUState *cs,
         }
     }
 
-    if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
+    if (kvm_check_extension(kvm_vcpu_state(cs), KVM_CAP_HYPERV_CPUID) > 0) {
         cpuid = get_supported_hv_cpuid(cs);
     } else {
         cpuid = get_supported_hv_cpuid_legacy(cs);
@@ -1504,7 +1504,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
      * so that vcpu's TSC frequency can be migrated later via this field.
      */
     if (!env->tsc_khz) {
-        r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+        r = kvm_check_extension(kvm_vcpu_state(cs), KVM_CAP_GET_TSC_KHZ) ?
             kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
             -ENOTSUP;
         if (r > 0) {
@@ -1746,12 +1746,12 @@ int kvm_arch_init_vcpu(CPUState *cs)
     if (((env->cpuid_version >> 8)&0xF) >= 6
         && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
            (CPUID_MCE | CPUID_MCA)
-        && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
+        && kvm_check_extension(kvm_vcpu_state(cs), KVM_CAP_MCE) > 0) {
         uint64_t mcg_cap, unsupported_caps;
         int banks;
         int ret;
 
-        ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
+        ret = kvm_get_mce_cap_supported(kvm_vcpu_state(cs), &mcg_cap, &banks);
         if (ret < 0) {
             fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
             return ret;
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 298c1f882c6..d9a8f019a74 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -205,7 +205,7 @@ static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
     int ret, i;
 
     if (!kvm_enabled() ||
-        !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
+        !kvm_check_extension(kvm_vcpu_state(cs), KVM_CAP_SW_TLB)) {
         return 0;
     }
 
@@ -303,7 +303,7 @@ target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
         flags |= KVM_PPC_MMUV3_GTSE;
     }
     cfg.flags = flags;
-    ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
+    ret = kvm_vm_ioctl(kvm_vcpu_state(cs), KVM_PPC_CONFIGURE_V3_MMU, &cfg);
     switch (ret) {
     case 0:
         return H_SUCCESS;
@@ -483,7 +483,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
         ret = kvm_booke206_tlb_init(cpu);
         break;
     case POWERPC_MMU_2_07:
-        if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
+        if (!cap_htm && !kvmppc_is_pr(kvm_vcpu_state(cs))) {
             /*
              * KVM-HV has transactional memory on POWER8 also without
              * the KVM_CAP_PPC_HTM extension, so enable it here
@@ -1947,8 +1947,8 @@ static int kvmppc_get_pvinfo(CPUPPCState *env, struct 
kvm_ppc_pvinfo *pvinfo)
 {
     CPUState *cs = env_cpu(env);
 
-    if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
-        !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
+    if (kvm_vm_check_extension(kvm_vcpu_state(cs), KVM_CAP_PPC_GET_PVINFO) &&
+        !kvm_vm_ioctl(kvm_vcpu_state(cs), KVM_PPC_GET_PVINFO, pvinfo)) {
         return 0;
     }
 
@@ -2864,7 +2864,7 @@ int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, 
target_ulong flags, int shift)
         return -ENOSYS;
     }
 
-    return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
+    return kvm_vm_ioctl(kvm_vcpu_state(cs), KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
 }
 
 int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
@@ -2879,7 +2879,7 @@ int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, 
target_ulong flags, int shift)
         return -ENOSYS;
     }
 
-    return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
+    return kvm_vm_ioctl(kvm_vcpu_state(cs), KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
 }
 
 /*
@@ -2909,7 +2909,7 @@ bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
         return false;
     }
 
-    return !kvmppc_is_pr(cs->kvm_state);
+    return !kvmppc_is_pr(kvm_vcpu_state(cs));
 }
 
 void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
-- 
2.26.2




reply via email to

[Prev in Thread] Current Thread [Next in Thread]