qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 08/12] cpus-common: move exclusive work infrastr


From: Alex Bennée
Subject: Re: [Qemu-devel] [PATCH 08/12] cpus-common: move exclusive work infrastructure from
Date: Mon, 05 Sep 2016 15:55:04 +0100
User-agent: mu4e 0.9.17; emacs 25.1.11

Subject title seems to be truncated.

Paolo Bonzini <address@hidden> writes:

> This will serve as the base for async_safe_run_on_cpu.
>
> Signed-off-by: Paolo Bonzini <address@hidden>
> ---
>  bsd-user/main.c   | 17 -----------
>  cpus-common.c     | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++
>  cpus.c            |  2 ++
>  include/qom/cpu.h | 40 ++++++++++++++++++++++++-
>  linux-user/main.c | 87 
> -------------------------------------------------------
>  5 files changed, 123 insertions(+), 105 deletions(-)
>
> diff --git a/bsd-user/main.c b/bsd-user/main.c
> index 125067a..f29e0e3 100644
> --- a/bsd-user/main.c
> +++ b/bsd-user/main.c
> @@ -66,23 +66,6 @@ int cpu_get_pic_interrupt(CPUX86State *env)
>  }
>  #endif
>
> -/* These are no-ops because we are not threadsafe.  */
> -static inline void cpu_exec_start(CPUState *cpu)
> -{
> -}
> -
> -static inline void cpu_exec_end(CPUState *cpu)
> -{
> -}
> -
> -static inline void start_exclusive(void)
> -{
> -}
> -
> -static inline void end_exclusive(void)
> -{
> -}
> -

Does this mean BSD user now gets exclusive support as well or is other
plumbing required for the tb_flush to work?

>  void fork_start(void)
>  {
>  }
> diff --git a/cpus-common.c b/cpus-common.c
> index 12729e5..47f7c06 100644
> --- a/cpus-common.c
> +++ b/cpus-common.c
> @@ -23,11 +23,21 @@
>  #include "exec/memory-internal.h"
>
>  static QemuMutex qemu_cpu_list_mutex;
> +static QemuCond exclusive_cond;
> +static QemuCond exclusive_resume;
>  static QemuCond qemu_work_cond;
>
> +static int pending_cpus;
> +
>  void qemu_init_cpu_list(void)
>  {
> +    /* This is needed because qemu_init_cpu_list is also called by the
> +     * child process in a fork.  */
> +    pending_cpus = 0;
> +
>      qemu_mutex_init(&qemu_cpu_list_mutex);
> +    qemu_cond_init(&exclusive_cond);
> +    qemu_cond_init(&exclusive_resume);
>      qemu_cond_init(&qemu_work_cond);
>  }
>
> @@ -52,6 +62,12 @@ static int cpu_get_free_index(void)
>      return cpu_index;
>  }
>
> +static void finish_safe_work(CPUState *cpu)
> +{
> +    cpu_exec_start(cpu);
> +    cpu_exec_end(cpu);
> +}
> +
>  void cpu_list_add(CPUState *cpu)
>  {
>      qemu_mutex_lock(&qemu_cpu_list_mutex);
> @@ -62,6 +78,8 @@ void cpu_list_add(CPUState *cpu)
>
>      QTAILQ_INSERT_TAIL(&cpus, cpu, node);
>      qemu_mutex_unlock(&qemu_cpu_list_mutex);
> +
> +    finish_safe_work(cpu);
>  }
>
>  void cpu_list_remove(CPUState *cpu)
> @@ -131,6 +149,70 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func 
> func, void *data)
>      queue_work_on_cpu(cpu, wi);
>  }
>
> +/* Wait for pending exclusive operations to complete.  The exclusive lock
> +   must be held.  */
> +static inline void exclusive_idle(void)
> +{
> +    while (pending_cpus) {
> +        qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_mutex);
> +    }
> +}
> +
> +/* Start an exclusive operation.
> +   Must only be called from outside cpu_exec, takes
> +   qemu_cpu_list_mutex.   */
> +void start_exclusive(void)
> +{
> +    CPUState *other_cpu;
> +
> +    qemu_mutex_lock(&qemu_cpu_list_mutex);
> +    exclusive_idle();
> +
> +    /* Make all other cpus stop executing.  */
> +    pending_cpus = 1;
> +    CPU_FOREACH(other_cpu) {
> +        if (other_cpu->running) {
> +            pending_cpus++;
> +            qemu_cpu_kick(other_cpu);
> +        }
> +    }
> +    if (pending_cpus > 1) {
> +        qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_mutex);
> +    }
> +}
> +
> +/* Finish an exclusive operation.  Releases qemu_cpu_list_mutex.  */
> +void end_exclusive(void)
> +{
> +    pending_cpus = 0;
> +    qemu_cond_broadcast(&exclusive_resume);
> +    qemu_mutex_unlock(&qemu_cpu_list_mutex);
> +}
> +
> +/* Wait for exclusive ops to finish, and begin cpu execution.  */
> +void cpu_exec_start(CPUState *cpu)
> +{
> +    qemu_mutex_lock(&qemu_cpu_list_mutex);
> +    exclusive_idle();
> +    cpu->running = true;
> +    qemu_mutex_unlock(&qemu_cpu_list_mutex);
> +}
> +
> +/* Mark cpu as not executing, and release pending exclusive ops.  */
> +void cpu_exec_end(CPUState *cpu)
> +{
> +    qemu_mutex_lock(&qemu_cpu_list_mutex);
> +    cpu->running = false;
> +    if (pending_cpus > 1) {
> +        pending_cpus--;
> +        if (pending_cpus == 1) {
> +            qemu_cond_signal(&exclusive_cond);
> +        }
> +    }
> +    exclusive_idle();
> +    qemu_mutex_unlock(&qemu_cpu_list_mutex);
> +}
> +
>  void process_queued_cpu_work(CPUState *cpu)
>  {
>      struct qemu_work_item *wi;
> diff --git a/cpus.c b/cpus.c
> index e1bdc16..b024604 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -1456,7 +1456,9 @@ static int tcg_cpu_exec(CPUState *cpu)
>          cpu->icount_decr.u16.low = decr;
>          cpu->icount_extra = count;
>      }
> +    cpu_exec_start(cpu);
>      ret = cpu_exec(cpu);
> +    cpu_exec_end(cpu);
>  #ifdef CONFIG_PROFILER
>      tcg_time += profile_getclock() - ti;
>  #endif
> diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> index d7688f6..0e04e8f 100644
> --- a/include/qom/cpu.h
> +++ b/include/qom/cpu.h
> @@ -249,7 +249,8 @@ struct qemu_work_item {
>   * @nr_threads: Number of threads within this CPU.
>   * @numa_node: NUMA node this CPU is belonging to.
>   * @host_tid: Host thread ID.
> - * @running: #true if CPU is currently running (usermode).
> + * @running: #true if CPU is currently running;
> + * valid under cpu_list_lock.
>   * @created: Indicates whether the CPU thread has been successfully created.
>   * @interrupt_request: Indicates a pending interrupt request.
>   * @halted: Nonzero if the CPU is in suspended state.
> @@ -826,6 +827,43 @@ void cpu_remove_sync(CPUState *cpu);
>  void process_queued_cpu_work(CPUState *cpu);
>
>  /**
> + * cpu_exec_start:
> + * @cpu: The CPU for the current thread.
> + *
> + * Record that a CPU has started execution and can be interrupted with
> + * cpu_exit.
> + */
> +void cpu_exec_start(CPUState *cpu);
> +
> +/**
> + * cpu_exec_end:
> + * @cpu: The CPU for the current thread.
> + *
> + * Record that a CPU has stopped execution and safe work can be executed
> + * without interrupting it.
> + */
> +void cpu_exec_end(CPUState *cpu);
> +
> +/**
> + * start_exclusive:
> + * @cpu: The CPU for the current thread.
> + *
> + * Similar to async_safe_work_run_on_cpu, but the work item is only
> + * run on one CPU and is between start_exclusive and end_exclusive.
> + * Returns with the CPU list lock taken (which nests outside all
> + * other locks except the BQL).
> + */
> +void start_exclusive(void);

Hmm the comment here looks as though it has been transplanted or is
somehow mangled. The function doesn't take @cpu and isn't like
async_safe_wrok_run_on_cpu.

> +
> +/**
> + * end_exclusive:
> + *
> + * Concludes an exclusive execution section started by start_exclusive.
> + * Releases the CPU list lock.
> + */
> +void end_exclusive(void);
> +
> +/**
>   * qemu_init_vcpu:
>   * @cpu: The vCPU to initialize.
>   *
> diff --git a/linux-user/main.c b/linux-user/main.c
> index 4972bbe..cecab8d 100644
> --- a/linux-user/main.c
> +++ b/linux-user/main.c
> @@ -107,28 +107,11 @@ int cpu_get_pic_interrupt(CPUX86State *env)
>  /***********************************************************/
>  /* Helper routines for implementing atomic operations.  */
>
> -/* To implement exclusive operations we force all cpus to syncronise.
> -   We don't require a full sync, only that no cpus are executing guest code.
> -   The alternative is to map target atomic ops onto host equivalents,
> -   which requires quite a lot of per host/target work.  */
> -static QemuMutex exclusive_lock;
> -static QemuCond exclusive_cond;
> -static QemuCond exclusive_resume;
> -static int pending_cpus;
> -
> -void qemu_init_cpu_loop(void)
> -{
> -    qemu_mutex_init(&exclusive_lock);
> -    qemu_cond_init(&exclusive_cond);
> -    qemu_cond_init(&exclusive_resume);
> -}
> -
>  /* Make sure everything is in a consistent state for calling fork().  */
>  void fork_start(void)
>  {
>      cpu_list_lock();
>      qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
> -    qemu_mutex_lock(&exclusive_lock);
>      mmap_fork_start();
>  }
>
> @@ -144,84 +127,15 @@ void fork_end(int child)
>                  QTAILQ_REMOVE(&cpus, cpu, node);
>              }
>          }
> -        pending_cpus = 0;
> -        qemu_mutex_init(&exclusive_lock);
> -        qemu_cond_init(&exclusive_cond);
> -        qemu_cond_init(&exclusive_resume);
>          qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
>          qemu_init_cpu_list();
>          gdbserver_fork(thread_cpu);
>      } else {
> -        qemu_mutex_unlock(&exclusive_lock);
>          qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
>          cpu_list_unlock();
>      }
>  }
>
> -/* Wait for pending exclusive operations to complete.  The exclusive lock
> -   must be held.  */
> -static inline void exclusive_idle(void)
> -{
> -    while (pending_cpus) {
> -        qemu_cond_wait(&exclusive_resume, &exclusive_lock);
> -    }
> -}
> -
> -/* Start an exclusive operation.
> -   Must only be called from outside cpu_exec.   */
> -static inline void start_exclusive(void)
> -{
> -    CPUState *other_cpu;
> -
> -    qemu_mutex_lock(&exclusive_lock);
> -    exclusive_idle();
> -
> -    pending_cpus = 1;
> -    /* Make all other cpus stop executing.  */
> -    CPU_FOREACH(other_cpu) {
> -        if (other_cpu->running) {
> -            pending_cpus++;
> -            cpu_exit(other_cpu);
> -        }
> -    }
> -    if (pending_cpus > 1) {
> -        qemu_cond_wait(&exclusive_cond, &exclusive_lock);
> -    }
> -}
> -
> -/* Finish an exclusive operation.  */
> -static inline void __attribute__((unused)) end_exclusive(void)
> -{
> -    pending_cpus = 0;
> -    qemu_cond_broadcast(&exclusive_resume);
> -    qemu_mutex_unlock(&exclusive_lock);
> -}
> -
> -/* Wait for exclusive ops to finish, and begin cpu execution.  */
> -static inline void cpu_exec_start(CPUState *cpu)
> -{
> -    qemu_mutex_lock(&exclusive_lock);
> -    exclusive_idle();
> -    cpu->running = true;
> -    qemu_mutex_unlock(&exclusive_lock);
> -}
> -
> -/* Mark cpu as not executing, and release pending exclusive ops.  */
> -static inline void cpu_exec_end(CPUState *cpu)
> -{
> -    qemu_mutex_lock(&exclusive_lock);
> -    cpu->running = false;
> -    if (pending_cpus > 1) {
> -        pending_cpus--;
> -        if (pending_cpus == 1) {
> -            qemu_cond_signal(&exclusive_cond);
> -        }
> -    }
> -    exclusive_idle();
> -    qemu_mutex_unlock(&exclusive_lock);
> -}
> -
> -
>  #ifdef TARGET_I386
>  /***********************************************************/
>  /* CPUX86 core interface */
> @@ -4256,7 +4170,6 @@ int main(int argc, char **argv, char **envp)
>      int execfd;
>
>      qemu_init_cpu_list();
> -    qemu_init_cpu_loop();
>      module_call_init(MODULE_INIT_QOM);
>
>      if ((envlist = envlist_create()) == NULL) {


Aside from a few niggles about comments it looks good to me. Fix those
and you can have a:

Reviewed-by: Alex Bennée <address@hidden>

--
Alex Bennée



reply via email to

[Prev in Thread] Current Thread [Next in Thread]