qemu-arm
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH RESEND v2 5/6] target/arm/cpu: Enable 'el2' to work with host


From: Andrew Jones
Subject: Re: [PATCH RESEND v2 5/6] target/arm/cpu: Enable 'el2' to work with host/max cpu
Date: Tue, 27 Apr 2021 11:24:23 +0200

On Thu, Apr 01, 2021 at 05:55:37AM -0700, Haibo Xu wrote:
> Turn off the 'el2' cpu property by default to keep in line with
> that in TCG mode, i.e. we can now use '-cpu max|host,el2=on' to
> enable the nested virtualization.
> 
> Signed-off-by: Haibo Xu <haibo.xu@linaro.org>
> ---
>  hw/arm/virt.c      | 14 ++++++++++----
>  target/arm/cpu.c   |  3 ++-
>  target/arm/cpu64.c |  1 +
>  target/arm/kvm64.c | 10 ++++++++++
>  4 files changed, 23 insertions(+), 5 deletions(-)
> 
> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
> index 92d46ebcfe..74340e21bd 100644
> --- a/hw/arm/virt.c
> +++ b/hw/arm/virt.c
> @@ -454,6 +454,7 @@ static void fdt_add_gic_node(VirtMachineState *vms)
>  {
>      MachineState *ms = MACHINE(vms);
>      char *nodename;
> +    bool has_el2 = object_property_get_bool(OBJECT(first_cpu), "el2", NULL);
>  
>      vms->gic_phandle = qemu_fdt_alloc_phandle(ms->fdt);
>      qemu_fdt_setprop_cell(ms->fdt, "/", "interrupt-parent", 
> vms->gic_phandle);
> @@ -491,7 +492,7 @@ static void fdt_add_gic_node(VirtMachineState *vms)
>                                   2, vms->memmap[VIRT_HIGH_GIC_REDIST2].size);
>          }
>  
> -        if (vms->virt) {
> +        if (vms->virt || has_el2) {
>              qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts",
>                                     GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ,
>                                     GIC_FDT_IRQ_FLAGS_LEVEL_HI);
> @@ -1911,8 +1912,8 @@ static void machvirt_init(MachineState *machine)
>      }
>  
>      if (vms->virt && kvm_enabled()) {
> -        error_report("mach-virt: KVM does not support providing "
> -                     "Virtualization extensions to the guest CPU");
> +        error_report("mach-virt: VM 'virtualization' feature is not 
> supported "
> +                     "in KVM mode, please use CPU feature 'el2' instead");
>          exit(1);
>      }
>  
> @@ -1950,11 +1951,16 @@ static void machvirt_init(MachineState *machine)
>              object_property_set_bool(cpuobj, "has_el3", false, NULL);
>          }
>  
> -        if (!vms->virt && object_property_find(cpuobj, "has_el2")) {
> +        if (!vms->virt && !kvm_enabled() &&
> +            object_property_find(cpuobj, "has_el2")) {
>              object_property_set_bool(cpuobj, "has_el2", false, NULL);
>          }
>  
>          if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) {
> +            if (kvm_enabled() && ARM_CPU(cpuobj)->has_el2) {
> +                vms->psci_conduit = QEMU_PSCI_CONDUIT_SMC;
> +            }
> +
>              object_property_set_int(cpuobj, "psci-conduit", 
> vms->psci_conduit,
>                                      NULL);

Is there any reason not to do

  vms->virt = object_property_get_bool(OBJECT(first_cpu), "el2", NULL);

right after we do the cpu realize loop here in machvirt_init()? If we did
that we wouldn't need to scatter that object_property_get_bool() around.
We'd just use 'vms->virt'. Actually, shouldn't vms->virt be consistent
with cpu->has_el2 anyway?

>  
> diff --git a/target/arm/cpu.c b/target/arm/cpu.c
> index 30cc330f50..9530a2c4bf 100644
> --- a/target/arm/cpu.c
> +++ b/target/arm/cpu.c
> @@ -1099,7 +1099,7 @@ static Property arm_cpu_rvbar_property =
>  
>  #ifndef CONFIG_USER_ONLY
>  static Property arm_cpu_has_el2_property =
> -            DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
> +            DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, false);

Doesn't this break TCG's enablement of the feature?

>  
>  static Property arm_cpu_has_el3_property =
>              DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
> @@ -2018,6 +2018,7 @@ static void arm_host_initfn(Object *obj)
>      kvm_arm_set_cpu_features_from_host(cpu);
>      if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
>          aarch64_add_sve_properties(obj);
> +        aarch64_add_el2_properties(obj);
>      }
>      arm_cpu_post_init(obj);
>  }
> diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
> index 3f3f2c5495..ae8811d09e 100644
> --- a/target/arm/cpu64.c
> +++ b/target/arm/cpu64.c
> @@ -666,6 +666,7 @@ static void aarch64_max_initfn(Object *obj)
>  
>      if (kvm_enabled()) {
>          kvm_arm_set_cpu_features_from_host(cpu);
> +        aarch64_add_el2_properties(obj);
>      } else {
>          uint64_t t;
>          uint32_t u;
> diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
> index 9cacaf2eb8..7bf892404f 100644
> --- a/target/arm/kvm64.c
> +++ b/target/arm/kvm64.c
> @@ -500,6 +500,7 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures 
> *ahcf)
>       */
>      int fdarray[3];
>      bool sve_supported;
> +    bool el2_supported;
>      uint64_t features = 0;
>      uint64_t t;
>      int err;
> @@ -646,6 +647,7 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures 
> *ahcf)
>      }
>  
>      sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) 
> > 0;
> +    el2_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_EL2) 
> > 0;
>  
>      kvm_arm_destroy_scratch_host_vcpu(fdarray);
>  
> @@ -660,6 +662,11 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures 
> *ahcf)
>          ahcf->isar.id_aa64pfr0 = t;
>      }
>  
> +    /* Use the ARM_FEATURE_EL2 bit to keep inline with that in TCG mode. */
> +    if (el2_supported) {
> +        features |= 1ULL << ARM_FEATURE_EL2;
> +    }

Do we need to do this? Why not just used kvm_arm_el2_supported()? Note, we
add a check for SVE here because we want to update ID_AA64PFR0. Unless you
want to update ID registers, which maybe you should, then I don't think we
need to touch kvm_arm_get_host_cpu_features().

> +
>      /*
>       * We can assume any KVM supporting CPU is at least a v8
>       * with VFPv4+Neon; this in turn implies most of the other
> @@ -861,6 +868,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
>          assert(kvm_arm_sve_supported());
>          cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
>      }
> +    if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
> +        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2;
> +    }

I feel like there are way too many ways to track this feature now. If I
didn't lose count we have

 1) cpu->has_el2
 2) cpu->env & ARM_FEATURE_EL2
 3) (for mach-virt) vms->virt
 4) possibly (and probably should) some ID register bits

I realize the first three are already in use for TCG, but I'm guessing
we'll want to clean those up. What's the plan going forward? I presume
it'll be (4), but maybe something like (1) and/or (3) will stick around
for convenience. I'm pretty sure we want to avoid (2).

I suggest figuring out what's the best way forward (at least for a next
step) and then post a patch that changes TCG's use to that and then use
that for KVM too.

>  
>      /* Do KVM_ARM_VCPU_INIT ioctl */
>      ret = kvm_arm_vcpu_init(cs);
> -- 
> 2.17.1
> 
> 

Thanks,
drew




reply via email to

[Prev in Thread] Current Thread [Next in Thread]