qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC PATCH V3 1/3] cpus: protect queued_work_* with wor


From: Alex Bennée
Subject: Re: [Qemu-devel] [RFC PATCH V3 1/3] cpus: protect queued_work_* with work_mutex.
Date: Mon, 20 Jul 2015 17:22:27 +0100

address@hidden writes:

> From: KONRAD Frederic <address@hidden>
>
> This protects queued_work_* used by async_run_on_cpu, run_on_cpu and
> flush_queued_work with a new lock (work_mutex) to prevent multiple 
> (concurrent)
> access.
>
> Signed-off-by: KONRAD Frederic <address@hidden>
Reviewed-by: Alex Bennée <address@hidden>

>
> Changes V1 -> V2:
>   * Unlock the mutex while running the callback.
> ---
>  cpus.c            | 11 +++++++++++
>  include/qom/cpu.h |  3 +++
>  qom/cpu.c         |  1 +
>  3 files changed, 15 insertions(+)
>
> diff --git a/cpus.c b/cpus.c
> index b00a423..eabd4b1 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -845,6 +845,8 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), 
> void *data)
>      wi.func = func;
>      wi.data = data;
>      wi.free = false;
> +
> +    qemu_mutex_lock(&cpu->work_mutex);
>      if (cpu->queued_work_first == NULL) {
>          cpu->queued_work_first = &wi;
>      } else {
> @@ -853,6 +855,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), 
> void *data)
>      cpu->queued_work_last = &wi;
>      wi.next = NULL;
>      wi.done = false;
> +    qemu_mutex_unlock(&cpu->work_mutex);
>  
>      qemu_cpu_kick(cpu);
>      while (!wi.done) {
> @@ -876,6 +879,8 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void 
> *data), void *data)
>      wi->func = func;
>      wi->data = data;
>      wi->free = true;
> +
> +    qemu_mutex_lock(&cpu->work_mutex);
>      if (cpu->queued_work_first == NULL) {
>          cpu->queued_work_first = wi;
>      } else {
> @@ -884,6 +889,7 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void 
> *data), void *data)
>      cpu->queued_work_last = wi;
>      wi->next = NULL;
>      wi->done = false;
> +    qemu_mutex_unlock(&cpu->work_mutex);
>  
>      qemu_cpu_kick(cpu);
>  }
> @@ -896,15 +902,20 @@ static void flush_queued_work(CPUState *cpu)
>          return;
>      }
>  
> +    qemu_mutex_lock(&cpu->work_mutex);
>      while ((wi = cpu->queued_work_first)) {
>          cpu->queued_work_first = wi->next;
> +        qemu_mutex_unlock(&cpu->work_mutex);
>          wi->func(wi->data);
> +        qemu_mutex_lock(&cpu->work_mutex);
>          wi->done = true;
>          if (wi->free) {
>              g_free(wi);
>          }
>      }
>      cpu->queued_work_last = NULL;
> +    qemu_mutex_unlock(&cpu->work_mutex);
> +
>      qemu_cond_broadcast(&qemu_work_cond);
>  }
>  
> diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> index 20aabc9..efa9624 100644
> --- a/include/qom/cpu.h
> +++ b/include/qom/cpu.h
> @@ -242,6 +242,8 @@ struct kvm_run;
>   * @mem_io_pc: Host Program Counter at which the memory was accessed.
>   * @mem_io_vaddr: Target virtual address at which the memory was accessed.
>   * @kvm_fd: vCPU file descriptor for KVM.
> + * @work_mutex: Lock to prevent multiple access to queued_work_*.
> + * @queued_work_first: First asynchronous work pending.
>   *
>   * State of one CPU core or thread.
>   */
> @@ -262,6 +264,7 @@ struct CPUState {
>      uint32_t host_tid;
>      bool running;
>      struct QemuCond *halt_cond;
> +    QemuMutex work_mutex;
>      struct qemu_work_item *queued_work_first, *queued_work_last;
>      bool thread_kicked;
>      bool created;
> diff --git a/qom/cpu.c b/qom/cpu.c
> index eb9cfec..4e12598 100644
> --- a/qom/cpu.c
> +++ b/qom/cpu.c
> @@ -316,6 +316,7 @@ static void cpu_common_initfn(Object *obj)
>      cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
>      QTAILQ_INIT(&cpu->breakpoints);
>      QTAILQ_INIT(&cpu->watchpoints);
> +    qemu_mutex_init(&cpu->work_mutex);
>  }
>  
>  static void cpu_common_finalize(Object *obj)

-- 
Alex Bennée



reply via email to

[Prev in Thread] Current Thread [Next in Thread]