[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v2 10/14] target/arm/kvm64: Add kvm_arch_get/put_sve
From: |
Andrew Jones |
Subject: |
[Qemu-devel] [PATCH v2 10/14] target/arm/kvm64: Add kvm_arch_get/put_sve |
Date: |
Fri, 21 Jun 2019 18:34:18 +0200 |
These are the SVE equivalents to kvm_arch_get/put_fpsimd. Note, the
swabbing is different than it is for fpsmid because the vector format
is a little-endian stream of words.
Signed-off-by: Andrew Jones <address@hidden>
---
target/arm/kvm64.c | 135 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 131 insertions(+), 4 deletions(-)
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index a2485d447e6a..706541327491 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -673,11 +673,12 @@ int kvm_arch_destroy_vcpu(CPUState *cs)
bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
{
/* Return true if the regidx is a register we should synchronize
- * via the cpreg_tuples array (ie is not a core reg we sync by
- * hand in kvm_arch_get/put_registers())
+ * via the cpreg_tuples array (ie is not a core or sve reg that
+ * we sync by hand in kvm_arch_get/put_registers())
*/
switch (regidx & KVM_REG_ARM_COPROC_MASK) {
case KVM_REG_ARM_CORE:
+ case KVM_REG_ARM64_SVE:
return false;
default:
return true;
@@ -763,6 +764,70 @@ static int kvm_arch_put_fpsimd(CPUState *cs)
return 0;
}
+/*
+ * If ARM_MAX_VQ is increased to be greater than 16, then we can no
+ * longer hard code slices to 1 in kvm_arch_put/get_sve().
+ */
+QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
+
+static int kvm_arch_put_sve(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int slices = 1;
+ int i, n, ret;
+
+ for (i = 0; i < slices; i++) {
+ for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; n++) {
+ uint64_t *q = aa64_vfp_qreg(env, n);
+#ifdef HOST_WORDS_BIGENDIAN
+ uint64_t d[ARM_MAX_VQ * 2];
+ int j;
+ for (j = 0; j < cpu->sve_max_vq * 2; j++) {
+ d[j] = bswap64(q[j]);
+ }
+ reg.addr = (uintptr_t)d;
+#else
+ reg.addr = (uintptr_t)q;
+#endif
+ reg.id = KVM_REG_ARM64_SVE_ZREG(n, i);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; n++) {
+ uint64_t *q = &env->vfp.pregs[n].p[0];
+#ifdef HOST_WORDS_BIGENDIAN
+ uint64_t d[ARM_MAX_VQ * 2 / 8];
+ int j;
+ for (j = 0; j < cpu->sve_max_vq * 2 / 8; j++) {
+ d[j] = bswap64(q[j]);
+ }
+ reg.addr = (uintptr_t)d;
+#else
+ reg.addr = (uintptr_t)q;
+#endif
+ reg.id = KVM_REG_ARM64_SVE_PREG(n, i);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ reg.addr = (uintptr_t)&env->vfp.pregs[FFR_PRED_NUM].p[0];
+ reg.id = KVM_REG_ARM64_SVE_FFR(i);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
int kvm_arch_put_registers(CPUState *cs, int level)
{
struct kvm_one_reg reg;
@@ -857,7 +922,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
}
- ret = kvm_arch_put_fpsimd(cs);
+ if (!cpu->sve_max_vq) {
+ ret = kvm_arch_put_fpsimd(cs);
+ } else {
+ ret = kvm_arch_put_sve(cs);
+ }
if (ret) {
return ret;
}
@@ -920,6 +989,60 @@ static int kvm_arch_get_fpsimd(CPUState *cs)
return 0;
}
+static int kvm_arch_get_sve(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int slices = 1;
+ int i, n, ret;
+
+ for (i = 0; i < slices; i++) {
+ for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; n++) {
+ uint64_t *q = aa64_vfp_qreg(env, n);
+ reg.id = KVM_REG_ARM64_SVE_ZREG(n, i);
+ reg.addr = (uintptr_t)q;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
+ if (ret) {
+ return ret;
+ } else {
+#ifdef HOST_WORDS_BIGENDIAN
+ int j;
+ for (j = 0; j < cpu->sve_max_vq * 2; j++) {
+ q[j] = bswap64(q[j]);
+ }
+#endif
+ }
+ }
+
+ for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; n++) {
+ uint64_t *q = &env->vfp.pregs[n].p[0];
+ reg.id = KVM_REG_ARM64_SVE_PREG(n, i);
+ reg.addr = (uintptr_t)q;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
+ if (ret) {
+ return ret;
+ } else {
+#ifdef HOST_WORDS_BIGENDIAN
+ int j;
+ for (j = 0; j < cpu->sve_max_vq * 2 / 8; j++) {
+ q[j] = bswap64(q[j]);
+ }
+#endif
+ }
+ }
+
+ reg.addr = (uintptr_t)&env->vfp.pregs[FFR_PRED_NUM].p[0];
+ reg.id = KVM_REG_ARM64_SVE_FFR(i);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
int kvm_arch_get_registers(CPUState *cs)
{
struct kvm_one_reg reg;
@@ -1014,7 +1137,11 @@ int kvm_arch_get_registers(CPUState *cs)
env->spsr = env->banked_spsr[i];
}
- ret = kvm_arch_get_fpsimd(cs);
+ if (!cpu->sve_max_vq) {
+ ret = kvm_arch_get_fpsimd(cs);
+ } else {
+ ret = kvm_arch_get_sve(cs);
+ }
if (ret) {
return ret;
}
--
2.20.1
- Re: [Qemu-devel] [PATCH v2 06/14] target/arm: Allow SVE to be disabled via a CPU property, (continued)
Re: [Qemu-devel] [PATCH v2 10/14] target/arm/kvm64: Add kvm_arch_get/put_sve, Auger Eric, 2019/06/27
[Qemu-devel] [PATCH v2 11/14] target/arm/kvm64: max cpu: Enable SVE when available, Andrew Jones, 2019/06/21