qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC PATCH V6 02/18] replace spinlock by QemuMutex.


From: Alex Bennée
Subject: Re: [Qemu-devel] [RFC PATCH V6 02/18] replace spinlock by QemuMutex.
Date: Tue, 07 Jul 2015 11:15:08 +0100

address@hidden writes:

> From: KONRAD Frederic <address@hidden>
>
> spinlock is only used in two cases:
>   * cpu-exec.c: to protect TranslationBlock
>   * mem_helper.c: for lock helper in target-i386 (which seems broken).
>
> It's a pthread_mutex_t in user-mode so better using QemuMutex directly in this
> case.
> It allows as well to reuse tb_lock mutex of TBContext in case of multithread
> TCG.
>
> Signed-off-by: KONRAD Frederic <address@hidden>
> ---
>  cpu-exec.c               | 15 +++++++++++----
>  include/exec/exec-all.h  |  4 ++--
>  linux-user/main.c        |  6 +++---
>  target-i386/mem_helper.c | 16 +++++++++++++---
>  tcg/i386/tcg-target.c    |  8 ++++++++
>  5 files changed, 37 insertions(+), 12 deletions(-)
>
> diff --git a/cpu-exec.c b/cpu-exec.c
> index 2ffeb6e..d6336d9 100644
> --- a/cpu-exec.c
> +++ b/cpu-exec.c
> @@ -362,7 +362,9 @@ int cpu_exec(CPUArchState *env)
>      SyncClocks sc;
>  
>      /* This must be volatile so it is not trashed by longjmp() */
> +#if defined(CONFIG_USER_ONLY)
>      volatile bool have_tb_lock = false;
> +#endif
>  
>      if (cpu->halted) {
>          if (!cpu_has_work(cpu)) {
> @@ -480,8 +482,10 @@ int cpu_exec(CPUArchState *env)
>                      cpu->exception_index = EXCP_INTERRUPT;
>                      cpu_loop_exit(cpu);
>                  }
> -                spin_lock(&tcg_ctx.tb_ctx.tb_lock);
> +#if defined(CONFIG_USER_ONLY)
> +                qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
>                  have_tb_lock = true;
> +#endif

Why are the locking rules different for CONFIG_USER versus system
emulation? Looking at the final tree:

>                  tb = tb_find_fast(env);

this eventually ends up doing a tb_lock on the find_slow path which IIRC
is when might end up doing the actual code generation.

>                  /* Note: we do it here to avoid a gcc bug on Mac OS X when
>                     doing it in tb_find_slow */
> @@ -503,9 +507,10 @@ int cpu_exec(CPUArchState *env)
>                      tb_add_jump((TranslationBlock *)(next_tb & 
> ~TB_EXIT_MASK),
>                                  next_tb & TB_EXIT_MASK, tb);
>                  }
> +#if defined(CONFIG_USER_ONLY)
>                  have_tb_lock = false;
> -                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
> -
> +                qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
> +#endif
>                  /* cpu_interrupt might be called while translating the
>                     TB, but before it is linked into a potentially
>                     infinite loop and becomes env->current_tb. Avoid
> @@ -572,10 +577,12 @@ int cpu_exec(CPUArchState *env)
>  #ifdef TARGET_I386
>              x86_cpu = X86_CPU(cpu);
>  #endif
> +#if defined(CONFIG_USER_ONLY)
>              if (have_tb_lock) {
> -                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
> +                qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
>                  have_tb_lock = false;
>              }
> +#endif
>          }
>      } /* for(;;) */
>  
> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
> index 2573e8c..44f3336 100644
> --- a/include/exec/exec-all.h
> +++ b/include/exec/exec-all.h
> @@ -176,7 +176,7 @@ struct TranslationBlock {
>      struct TranslationBlock *jmp_first;
>  };
>  
> -#include "exec/spinlock.h"
> +#include "qemu/thread.h"
>  
>  typedef struct TBContext TBContext;
>  
> @@ -186,7 +186,7 @@ struct TBContext {
>      TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
>      int nb_tbs;
>      /* any access to the tbs or the page table must use this lock */
> -    spinlock_t tb_lock;
> +    QemuMutex tb_lock;
>  
>      /* statistics */
>      int tb_flush_count;
> diff --git a/linux-user/main.c b/linux-user/main.c
> index c855bcc..bce3a98 100644
> --- a/linux-user/main.c
> +++ b/linux-user/main.c
> @@ -107,7 +107,7 @@ static int pending_cpus;
>  /* Make sure everything is in a consistent state for calling fork().  */
>  void fork_start(void)
>  {
> -    pthread_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
> +    qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
>      pthread_mutex_lock(&exclusive_lock);
>      mmap_fork_start();
>  }
> @@ -129,11 +129,11 @@ void fork_end(int child)
>          pthread_mutex_init(&cpu_list_mutex, NULL);
>          pthread_cond_init(&exclusive_cond, NULL);
>          pthread_cond_init(&exclusive_resume, NULL);
> -        pthread_mutex_init(&tcg_ctx.tb_ctx.tb_lock, NULL);
> +        qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
>          gdbserver_fork((CPUArchState *)thread_cpu->env_ptr);
>      } else {
>          pthread_mutex_unlock(&exclusive_lock);
> -        pthread_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
> +        qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
>      }
>  }
>  
> diff --git a/target-i386/mem_helper.c b/target-i386/mem_helper.c
> index 1aec8a5..7106cc3 100644
> --- a/target-i386/mem_helper.c
> +++ b/target-i386/mem_helper.c
> @@ -23,17 +23,27 @@
>  
>  /* broken thread support */
>  
> -static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
> +#if defined(CONFIG_USER_ONLY)
> +QemuMutex global_cpu_lock;
>  
>  void helper_lock(void)
>  {
> -    spin_lock(&global_cpu_lock);
> +    qemu_mutex_lock(&global_cpu_lock);
>  }
>  
>  void helper_unlock(void)
>  {
> -    spin_unlock(&global_cpu_lock);
> +    qemu_mutex_unlock(&global_cpu_lock);
>  }
> +#else
> +void helper_lock(void)
> +{
> +}
> +
> +void helper_unlock(void)
> +{
> +}
> +#endif
>  
>  void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
>  {
> diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
> index ff4d9cf..0d7c99c 100644
> --- a/tcg/i386/tcg-target.c
> +++ b/tcg/i386/tcg-target.c
> @@ -24,6 +24,10 @@
>  
>  #include "tcg-be-ldst.h"
>  
> +#if defined(CONFIG_USER_ONLY)
> +extern QemuMutex global_cpu_lock;
> +#endif
> +
>  #ifndef NDEBUG
>  static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
>  #if TCG_TARGET_REG_BITS == 64
> @@ -2342,6 +2346,10 @@ static void tcg_target_init(TCGContext *s)
>      tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
>  
>      tcg_add_target_add_op_defs(x86_op_defs);
> +
> +#if defined(CONFIG_USER_ONLY)
> +    qemu_mutex_init(global_cpu_lock);
> +#endif
>  }
>  
>  typedef struct {

I wonder if it would be better splitting the patches:

 - Convert tb spinlocks to use tb_lock
 - i386: convert lock helpers to QemuMutex

before the final

  - Remove spinlocks

-- 
Alex Bennée



reply via email to

[Prev in Thread] Current Thread [Next in Thread]