qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2 2/5] target-i386/kvm: Hyper-V SynIC MSR's sup


From: Andrey Smetanin
Subject: Re: [Qemu-devel] [PATCH v2 2/5] target-i386/kvm: Hyper-V SynIC MSR's support
Date: Wed, 11 Nov 2015 12:09:08 +0300
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Thunderbird/38.0.1



On 11/10/2015 04:14 PM, Paolo Bonzini wrote:


On 10/11/2015 13:52, Andrey Smetanin wrote:
This patch does Hyper-V Synthetic interrupt
controller(Hyper-V SynIC) MSR's support and
migration. Hyper-V SynIC is enabled by cpu's
'hv-synic' option.

This patch does not allow cpu creation if
'hv-synic' option specified but kernel
doesn't support Hyper-V SynIC.

Changes v2:
* activate Hyper-V SynIC by enabling corresponding vcpu cap
* reject cpu initialization if user requested Hyper-V SynIC
   but kernel does not support Hyper-V SynIC

Signed-off-by: Andrey Smetanin <address@hidden>
Reviewed-by: Roman Kagan <address@hidden>
Signed-off-by: Denis V. Lunev <address@hidden>
CC: Paolo Bonzini <address@hidden>
CC: Richard Henderson <address@hidden>
CC: Eduardo Habkost <address@hidden>
CC: "Andreas Färber" <address@hidden>
CC: Marcelo Tosatti <address@hidden>
CC: Roman Kagan <address@hidden>
CC: Denis V. Lunev <address@hidden>
CC: address@hidden

---
  target-i386/cpu-qom.h |  1 +
  target-i386/cpu.c     |  1 +
  target-i386/cpu.h     |  5 ++++
  target-i386/kvm.c     | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++-
  target-i386/machine.c | 39 ++++++++++++++++++++++++++++++
  5 files changed, 112 insertions(+), 1 deletion(-)

diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
index e3bfe9d..7ea5b34 100644
--- a/target-i386/cpu-qom.h
+++ b/target-i386/cpu-qom.h
@@ -94,6 +94,7 @@ typedef struct X86CPU {
      bool hyperv_reset;
      bool hyperv_vpindex;
      bool hyperv_runtime;
+    bool hyperv_synic;
      bool check_cpuid;
      bool enforce_cpuid;
      bool expose_kvm;
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index e5f1c5b..1462e19 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -3142,6 +3142,7 @@ static Property x86_cpu_properties[] = {
      DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
      DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
      DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
+    DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
      DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
      DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
      DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index fc4a605..8cf33df 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -918,6 +918,11 @@ typedef struct CPUX86State {
      uint64_t msr_hv_tsc;
      uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS];
      uint64_t msr_hv_runtime;
+    uint64_t msr_hv_synic_control;
+    uint64_t msr_hv_synic_version;
+    uint64_t msr_hv_synic_evt_page;
+    uint64_t msr_hv_synic_msg_page;
+    uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT];

      /* exception/interrupt handling */
      int error_code;
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 2a9953b..cfcd01d 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -86,6 +86,7 @@ static bool has_msr_hv_crash;
  static bool has_msr_hv_reset;
  static bool has_msr_hv_vpindex;
  static bool has_msr_hv_runtime;
+static bool has_msr_hv_synic;
  static bool has_msr_mtrr;
  static bool has_msr_xss;

@@ -521,7 +522,8 @@ static bool hyperv_enabled(X86CPU *cpu)
              cpu->hyperv_crash ||
              cpu->hyperv_reset ||
              cpu->hyperv_vpindex ||
-            cpu->hyperv_runtime);
+            cpu->hyperv_runtime ||
+            cpu->hyperv_synic);
  }

  static Error *invtsc_mig_blocker;
@@ -610,6 +612,14 @@ int kvm_arch_init_vcpu(CPUState *cs)
          if (cpu->hyperv_runtime && has_msr_hv_runtime) {
              c->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
          }
+        if (cpu->hyperv_synic) {
+            if (!has_msr_hv_synic ||
+                kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
+                fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
+                return -ENOSYS;
+            }
+            c->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
+        }
          c = &cpuid_data.entries[cpuid_i++];
          c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
          if (cpu->hyperv_relaxed_timing) {
@@ -950,6 +960,10 @@ static int kvm_get_supported_msrs(KVMState *s)
                      has_msr_hv_runtime = true;
                      continue;
                  }
+                if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
+                    has_msr_hv_synic = true;
+                    continue;
+                }
              }
          }

@@ -1511,6 +1525,31 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
              kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME,
                                env->msr_hv_runtime);
          }
+        if (cpu->hyperv_synic) {
+            int j;
+
+            if (!env->msr_hv_synic_version) {
+                /* First time initialization */
+                env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
+                for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
+                    env->msr_hv_synic_sint[j] = HV_SYNIC_SINT_MASKED;
+                }
+            }

I would prefer to put this in kvm_arch_init_vcpu, if possible.

Ok. I think the kvm_arch_init_vcpu() is called after migration restores cpu->env->msr_hv_synic_* values, so unconditional initialization of cpu->env->msr_hv_synic_* values can overwrite migrated values. The check "if (!env->msr_hv_synic_version) {" is neccessary for first time initialization to protect against such overwriting. This is why this code migrates 'msr_hv_synic_version' value.

+            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SCONTROL,
+                              env->msr_hv_synic_control);
+            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SVERSION,
+                              env->msr_hv_synic_version);
+            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIEFP,
+                              env->msr_hv_synic_evt_page);
+            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIMP,
+                              env->msr_hv_synic_msg_page);
+
+            for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
+                kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SINT0 + j,
+                                  env->msr_hv_synic_sint[j]);
+            }
+        }
          if (has_msr_mtrr) {
              kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
              kvm_msr_entry_set(&msrs[n++],
@@ -1879,6 +1918,17 @@ static int kvm_get_msrs(X86CPU *cpu)
      if (has_msr_hv_runtime) {
          msrs[n++].index = HV_X64_MSR_VP_RUNTIME;
      }
+    if (cpu->hyperv_synic) {
+        uint32_t msr;
+
+        msrs[n++].index = HV_X64_MSR_SCONTROL;
+        msrs[n++].index = HV_X64_MSR_SVERSION;
+        msrs[n++].index = HV_X64_MSR_SIEFP;
+        msrs[n++].index = HV_X64_MSR_SIMP;
+        for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
+            msrs[n++].index = msr;
+        }
+    }
      if (has_msr_mtrr) {
          msrs[n++].index = MSR_MTRRdefType;
          msrs[n++].index = MSR_MTRRfix64K_00000;
@@ -2035,6 +2085,21 @@ static int kvm_get_msrs(X86CPU *cpu)
          case HV_X64_MSR_VP_RUNTIME:
              env->msr_hv_runtime = msrs[i].data;
              break;
+        case HV_X64_MSR_SCONTROL:
+            env->msr_hv_synic_control = msrs[i].data;
+            break;
+        case HV_X64_MSR_SVERSION:
+            env->msr_hv_synic_version = msrs[i].data;
+            break;
+        case HV_X64_MSR_SIEFP:
+            env->msr_hv_synic_evt_page = msrs[i].data;
+            break;
+        case HV_X64_MSR_SIMP:
+            env->msr_hv_synic_msg_page = msrs[i].data;
+            break;
+        case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+            env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
+            break;
          case MSR_MTRRdefType:
              env->mtrr_deftype = msrs[i].data;
              break;
diff --git a/target-i386/machine.c b/target-i386/machine.c
index a18e16e..bdb2997 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -710,6 +710,44 @@ static const VMStateDescription vmstate_msr_hyperv_runtime 
= {
      }
  };

+static bool hyperv_synic_enable_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    int i;
+
+    if (env->msr_hv_synic_control != 0 ||
+        env->msr_hv_synic_version != 0 ||
+        env->msr_hv_synic_evt_page != 0 ||
+        env->msr_hv_synic_msg_page != 0) {
+        return true;
+    }
+
+    for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
+        if (env->msr_hv_synic_sint[i] != 0) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_synic = {
+    .name = "cpu/msr_hyperv_synic",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = hyperv_synic_enable_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
+        VMSTATE_UINT64(env.msr_hv_synic_version, X86CPU),

This one need not be migrated, since it's always the same value.

see my comment above
Paolo

+        VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
+        VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
+        VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU,
+                             HV_SYNIC_SINT_COUNT),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
  static bool avx512_needed(void *opaque)
  {
      X86CPU *cpu = opaque;
@@ -893,6 +931,7 @@ VMStateDescription vmstate_x86_cpu = {
          &vmstate_msr_hyperv_time,
          &vmstate_msr_hyperv_crash,
          &vmstate_msr_hyperv_runtime,
+        &vmstate_msr_hyperv_synic,
          &vmstate_avx512,
          &vmstate_xss,
          NULL




reply via email to

[Prev in Thread] Current Thread [Next in Thread]