[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH] s390x/kvm: cleanup partial register handling
From: |
Alexander Graf |
Subject: |
Re: [Qemu-devel] [PATCH] s390x/kvm: cleanup partial register handling |
Date: |
Thu, 30 Jan 2014 22:34:18 +0100 |
> Am 30.01.2014 um 17:18 schrieb Christian Borntraeger <address@hidden>:
>
> From: Dominik Dingel <address@hidden>
>
> Alex, if you have no complaints, I will push this patch to s390-next for 1.8.
> This brings s390 back in line with other kvm users and simply use
> cpu_synchronize_state.
>
> Ok?
Ack.
Alex
>
> Christian
>
> ---snip---
>
>
> The partial register handling (introduced with commits
> 420840e58b85f7f4e5493dca3f273566f261090a
> 3474b679486caa8f6448bae974e131370f360c13 ) aimed to improve intercept
> handling performance.
>
> It made the code more complicated though. During development for life
> migration
> migration/init/reset etc it turned out that this might cause several hard to
> debug programming
> errors. With the introduction of ioeventfd (and future irqfd patches) the
> qemu intercept
> handlers are no longer hot-path. And therefore the partial register handling
> can be
> removed to simplify the code.
>
> Signed-off-by: Dominik Dingel <address@hidden>
> CC: Jason J. Herne <address@hidden>
> Signed-off-by: Christian Borntraeger <address@hidden>
> ---
> target-s390x/cpu.h | 17 -------
> target-s390x/kvm.c | 136 +++++++++++++++++++++--------------------------------
> 2 files changed, 53 insertions(+), 100 deletions(-)
>
> diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h
> index 68b5ab7..96c2b4a 100644
> --- a/target-s390x/cpu.h
> +++ b/target-s390x/cpu.h
> @@ -78,11 +78,6 @@ typedef struct MchkQueue {
> uint16_t type;
> } MchkQueue;
>
> -/* Defined values for CPUS390XState.runtime_reg_dirty_mask */
> -#define KVM_S390_RUNTIME_DIRTY_NONE 0
> -#define KVM_S390_RUNTIME_DIRTY_PARTIAL 1
> -#define KVM_S390_RUNTIME_DIRTY_FULL 2
> -
> typedef struct CPUS390XState {
> uint64_t regs[16]; /* GP registers */
> CPU_DoubleU fregs[16]; /* FP registers */
> @@ -126,13 +121,6 @@ typedef struct CPUS390XState {
> uint64_t cputm;
> uint32_t todpr;
>
> - /* on S390 the runtime register set has two dirty states:
> - * a partial dirty state in which only the registers that
> - * are needed all the time are fetched. And a fully dirty
> - * state in which all runtime registers are fetched.
> - */
> - uint32_t runtime_reg_dirty_mask;
> -
> CPU_COMMON
>
> /* reset does memset(0) up to here */
> @@ -1076,7 +1064,6 @@ void kvm_s390_io_interrupt(S390CPU *cpu, uint16_t
> subchannel_id,
> uint32_t io_int_word);
> void kvm_s390_crw_mchk(S390CPU *cpu);
> void kvm_s390_enable_css_support(S390CPU *cpu);
> -int kvm_s390_get_registers_partial(CPUState *cpu);
> int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
> int vq, bool assign);
> int kvm_s390_cpu_restart(S390CPU *cpu);
> @@ -1094,10 +1081,6 @@ static inline void kvm_s390_crw_mchk(S390CPU *cpu)
> static inline void kvm_s390_enable_css_support(S390CPU *cpu)
> {
> }
> -static inline int kvm_s390_get_registers_partial(CPUState *cpu)
> -{
> - return -ENOSYS;
> -}
> static inline int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier,
> uint32_t sch, int vq,
> bool assign)
> diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c
> index f7b7726..f60ccdc 100644
> --- a/target-s390x/kvm.c
> +++ b/target-s390x/kvm.c
> @@ -152,33 +152,30 @@ int kvm_arch_put_registers(CPUState *cs, int level)
> }
> }
>
> - if (env->runtime_reg_dirty_mask == KVM_S390_RUNTIME_DIRTY_FULL) {
> - reg.id = KVM_REG_S390_CPU_TIMER;
> - reg.addr = (__u64)&(env->cputm);
> - ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
> - if (ret < 0) {
> - return ret;
> - }
> + /* Do we need to save more than that? */
> + if (level == KVM_PUT_RUNTIME_STATE) {
> + return 0;
> + }
>
> - reg.id = KVM_REG_S390_CLOCK_COMP;
> - reg.addr = (__u64)&(env->ckc);
> - ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
> - if (ret < 0) {
> - return ret;
> - }
> + reg.id = KVM_REG_S390_CPU_TIMER;
> + reg.addr = (__u64)&(env->cputm);
> + ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
> + if (ret < 0) {
> + return ret;
> + }
>
> - reg.id = KVM_REG_S390_TODPR;
> - reg.addr = (__u64)&(env->todpr);
> - ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
> - if (ret < 0) {
> - return ret;
> - }
> + reg.id = KVM_REG_S390_CLOCK_COMP;
> + reg.addr = (__u64)&(env->ckc);
> + ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
> + if (ret < 0) {
> + return ret;
> }
> - env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_NONE;
>
> - /* Do we need to save more than that? */
> - if (level == KVM_PUT_RUNTIME_STATE) {
> - return 0;
> + reg.id = KVM_REG_S390_TODPR;
> + reg.addr = (__u64)&(env->todpr);
> + ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
> + if (ret < 0) {
> + return ret;
> }
>
> if (cap_sync_regs &&
> @@ -216,50 +213,9 @@ int kvm_arch_get_registers(CPUState *cs)
> S390CPU *cpu = S390_CPU(cs);
> CPUS390XState *env = &cpu->env;
> struct kvm_one_reg reg;
> - int r;
> -
> - r = kvm_s390_get_registers_partial(cs);
> - if (r < 0) {
> - return r;
> - }
> -
> - reg.id = KVM_REG_S390_CPU_TIMER;
> - reg.addr = (__u64)&(env->cputm);
> - r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
> - if (r < 0) {
> - return r;
> - }
> -
> - reg.id = KVM_REG_S390_CLOCK_COMP;
> - reg.addr = (__u64)&(env->ckc);
> - r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
> - if (r < 0) {
> - return r;
> - }
> -
> - reg.id = KVM_REG_S390_TODPR;
> - reg.addr = (__u64)&(env->todpr);
> - r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
> - if (r < 0) {
> - return r;
> - }
> -
> - env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_FULL;
> - return 0;
> -}
> -
> -int kvm_s390_get_registers_partial(CPUState *cs)
> -{
> - S390CPU *cpu = S390_CPU(cs);
> - CPUS390XState *env = &cpu->env;
> struct kvm_sregs sregs;
> struct kvm_regs regs;
> - int ret;
> - int i;
> -
> - if (env->runtime_reg_dirty_mask) {
> - return 0;
> - }
> + int i, r;
>
> /* get the PSW */
> env->psw.addr = cs->kvm_run->psw_addr;
> @@ -271,9 +227,9 @@ int kvm_s390_get_registers_partial(CPUState *cs)
> env->regs[i] = cs->kvm_run->s.regs.gprs[i];
> }
> } else {
> - ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
> - if (ret < 0) {
> - return ret;
> + r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
> + if (r < 0) {
> + return r;
> }
> for (i = 0; i < 16; i++) {
> env->regs[i] = regs.gprs[i];
> @@ -289,9 +245,9 @@ int kvm_s390_get_registers_partial(CPUState *cs)
> env->cregs[i] = cs->kvm_run->s.regs.crs[i];
> }
> } else {
> - ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
> - if (ret < 0) {
> - return ret;
> + r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
> + if (r < 0) {
> + return r;
> }
> for (i = 0; i < 16; i++) {
> env->aregs[i] = sregs.acrs[i];
> @@ -299,14 +255,33 @@ int kvm_s390_get_registers_partial(CPUState *cs)
> }
> }
>
> - /* Finally the prefix */
> + /* The prefix */
> if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
> env->psa = cs->kvm_run->s.regs.prefix;
> - } else {
> - /* no prefix without sync regs */
> }
>
> - env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_PARTIAL;
> + /* One Regs */
> + reg.id = KVM_REG_S390_CPU_TIMER;
> + reg.addr = (__u64)&(env->cputm);
> + r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
> + if (r < 0) {
> + return r;
> + }
> +
> + reg.id = KVM_REG_S390_CLOCK_COMP;
> + reg.addr = (__u64)&(env->ckc);
> + r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
> + if (r < 0) {
> + return r;
> + }
> +
> + reg.id = KVM_REG_S390_TODPR;
> + reg.addr = (__u64)&(env->todpr);
> + r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
> + if (r < 0) {
> + return r;
> + }
> +
> return 0;
> }
>
> @@ -442,15 +417,13 @@ static int kvm_handle_css_inst(S390CPU *cpu, struct
> kvm_run *run,
> uint8_t ipa0, uint8_t ipa1, uint8_t ipb)
> {
> CPUS390XState *env = &cpu->env;
> - CPUState *cs = CPU(cpu);
>
> if (ipa0 != 0xb2) {
> /* Not handled for now. */
> return -1;
> }
>
> - kvm_s390_get_registers_partial(cs);
> - cs->kvm_vcpu_dirty = true;
> + cpu_synchronize_state(CPU(cpu));
>
> switch (ipa1) {
> case PRIV_XSCH:
> @@ -537,11 +510,9 @@ static int handle_priv(S390CPU *cpu, struct kvm_run *run,
>
> static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
> {
> - CPUState *cs = CPU(cpu);
> CPUS390XState *env = &cpu->env;
>
> - kvm_s390_get_registers_partial(cs);
> - cs->kvm_vcpu_dirty = true;
> + cpu_synchronize_state(CPU(cpu));
> env->regs[2] = s390_virtio_hypercall(env);
>
> return 0;
> @@ -767,8 +738,7 @@ static int handle_tsch(S390CPU *cpu)
> struct kvm_run *run = cs->kvm_run;
> int ret;
>
> - kvm_s390_get_registers_partial(cs);
> - cs->kvm_vcpu_dirty = true;
> + cpu_synchronize_state(cs);
>
> ret = ioinst_handle_tsch(env, env->regs[1], run->s390_tsch.ipb);
> if (ret >= 0) {
> --
> 1.8.4.2
>