qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 07/16] cpus-common: move CPU work item managemen


From: Emilio G. Cota
Subject: Re: [Qemu-devel] [PATCH 07/16] cpus-common: move CPU work item management to common code
Date: Wed, 21 Sep 2016 13:03:34 -0400
User-agent: Mutt/1.5.23 (2014-03-12)

On Mon, Sep 19, 2016 at 14:50:50 +0200, Paolo Bonzini wrote:
> From: Sergey Fedorov <address@hidden>
> 
> Make CPU work core functions common between system and user-mode
> emulation. User-mode does not use run_on_cpu, so do not implement it.
> 
> Signed-off-by: Sergey Fedorov <address@hidden>
> Signed-off-by: Sergey Fedorov <address@hidden>
> Reviewed-by: Alex Bennée <address@hidden>
> Signed-off-by: Alex Bennée <address@hidden>
> Message-Id: <address@hidden>
> Signed-off-by: Paolo Bonzini <address@hidden>
> ---
>  bsd-user/main.c   | 11 +++++--
>  cpus-common.c     | 94 
> +++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  cpus.c            | 82 +-----------------------------------------------
>  include/qom/cpu.h | 27 +++++++++++-----
>  linux-user/main.c | 25 +++++++++++++++
>  5 files changed, 148 insertions(+), 91 deletions(-)
> 
> diff --git a/bsd-user/main.c b/bsd-user/main.c
> index 591c424..6dfa912 100644
> --- a/bsd-user/main.c
> +++ b/bsd-user/main.c
> @@ -68,11 +68,11 @@ int cpu_get_pic_interrupt(CPUX86State *env)
>  #endif
>  
>  /* These are no-ops because we are not threadsafe.  */
> -static inline void cpu_exec_start(CPUArchState *env)
> +static inline void cpu_exec_start(CPUState *cpu)
>  {
>  }
>  
> -static inline void cpu_exec_end(CPUArchState *env)
> +static inline void cpu_exec_end(CPUState *cpu)
>  {
>  }
>  
> @@ -164,7 +164,11 @@ void cpu_loop(CPUX86State *env)
>      //target_siginfo_t info;
>  
>      for(;;) {
> +        cpu_exec_start(cs);
>          trapnr = cpu_exec(cs);
> +        cpu_exec_end(cs);
> +        process_queued_cpu_work(cs);
> +
>          switch(trapnr) {
>          case 0x80:
>              /* syscall from int $0x80 */
> @@ -505,7 +509,10 @@ void cpu_loop(CPUSPARCState *env)
>      //target_siginfo_t info;
>  
>      while (1) {
> +        cpu_exec_start(cs);
>          trapnr = cpu_exec(cs);
> +        cpu_exec_end(cs);
> +        process_queued_cpu_work(cs);
>  
>          switch (trapnr) {
>  #ifndef TARGET_SPARC64
> diff --git a/cpus-common.c b/cpus-common.c
> index ca367ad..a739e66 100644
> --- a/cpus-common.c
> +++ b/cpus-common.c
> @@ -23,10 +23,12 @@
>  #include "sysemu/cpus.h"
>  
>  static QemuMutex qemu_cpu_list_mutex;
> +static QemuCond qemu_work_cond;
>  
>  void qemu_init_cpu_list(void)
>  {
>      qemu_mutex_init(&qemu_cpu_list_mutex);
> +    qemu_cond_init(&qemu_work_cond);
>  }
>  
>  void cpu_list_lock(void)
> @@ -81,3 +83,95 @@ void cpu_list_remove(CPUState *cpu)
>      cpu->cpu_index = UNASSIGNED_CPU_INDEX;
>      qemu_mutex_unlock(&qemu_cpu_list_mutex);
>  }
> +
> +struct qemu_work_item {
> +    struct qemu_work_item *next;
> +    run_on_cpu_func func;
> +    void *data;
> +    int done;
> +    bool free;
> +};
> +
> +static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
> +{
> +    qemu_mutex_lock(&cpu->work_mutex);
> +    if (cpu->queued_work_first == NULL) {
> +        cpu->queued_work_first = wi;
> +    } else {
> +        cpu->queued_work_last->next = wi;
> +    }
> +    cpu->queued_work_last = wi;
> +    wi->next = NULL;
> +    wi->done = false;
> +    qemu_mutex_unlock(&cpu->work_mutex);
> +
> +    qemu_cpu_kick(cpu);
> +}
> +
> +void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
> +                   QemuMutex *mutex)
> +{
> +    struct qemu_work_item wi;
> +
> +    if (qemu_cpu_is_self(cpu)) {
> +        func(cpu, data);
> +        return;
> +    }
> +
> +    wi.func = func;
> +    wi.data = data;
> +    wi.free = false;
> +
> +    queue_work_on_cpu(cpu, &wi);
> +    while (!atomic_mb_read(&wi.done)) {
> +        CPUState *self_cpu = current_cpu;
> +
> +        qemu_cond_wait(&qemu_work_cond, mutex);
> +        current_cpu = self_cpu;
> +    }
> +}
(snip)
> diff --git a/cpus.c b/cpus.c
> index 28d6206..c3afd18 100644
> --- a/cpus.c
> +++ b/cpus.c
(snip)
>  void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
>  {
> -    struct qemu_work_item wi;
> -
> -    if (qemu_cpu_is_self(cpu)) {
> -        func(cpu, data);
> -        return;
> -    }
> -
> -    wi.func = func;
> -    wi.data = data;
> -    wi.free = false;
> -
> -    queue_work_on_cpu(cpu, &wi);
> -    while (!atomic_mb_read(&wi.done)) {
> -        CPUState *self_cpu = current_cpu;
> -
> -        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
> -        current_cpu = self_cpu;
> -    }
> -}
> -
> -void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
> -{
> -    struct qemu_work_item *wi;
> -
> -    if (qemu_cpu_is_self(cpu)) {
> -        func(cpu, data);
> -        return;
> -    }
> -
> -    wi = g_malloc0(sizeof(struct qemu_work_item));
> -    wi->func = func;
> -    wi->data = data;
> -    wi->free = true;
> -
> -    queue_work_on_cpu(cpu, wi);
> +    do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
>  }

AFAICT this is the only caller of do_run_on_cpu. Is qemu_global_mutex
necessary here? I wonder if we could just use cpu->work_mutex to wait
on a per-cpu work_cond. Contending for a global lock here doesn't
make much sense unless I'm missing something. Furthermore, that change
would allow us to get rid of the atomic accesses to wi.done, which I
dislike.

                E.



reply via email to

[Prev in Thread] Current Thread [Next in Thread]