qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC v4 19/28] tcg: move locking for tb_invalidate_phys


From: Paolo Bonzini
Subject: Re: [Qemu-devel] [RFC v4 19/28] tcg: move locking for tb_invalidate_phys_page_range up
Date: Tue, 27 Sep 2016 17:56:16 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Thunderbird/45.3.0


On 11/08/2016 17:24, Alex Bennée wrote:
> In the linux-user case all things that involve ''l1_map' and PageDesc
> tweaks are protected by the memory lock (mmpa_lock). For SoftMMU mode
> we previously relied on single threaded behaviour, with MTTCG we now use
> the tb_lock().
> 
> As a result we need to do a little re-factoring  and push the taking of
> this lock up the call tree. This requires a slightly different entry for
> the SoftMMU and user-mode cases from tb_invalidate_phys_range.

What exactly requires pushing the lock up?  IIUC it's really just for
the code bitmap, but then the commit message should say so.

> This also means user-mode breakpoint insertion needs to take two locks
> but it hadn't taken any previously so this is an improvement.

Was it really a problem?  Walking the l1_map needs no lock---only
growing it and accessing the PageDesc does.

Paolo

> Signed-off-by: Alex Bennée <address@hidden>
> 
> ---
> v4
>  - reword commit message
> ---
>  exec.c          | 16 ++++++++++++++++
>  translate-all.c | 37 +++++++++++++++++++++++++++++--------
>  2 files changed, 45 insertions(+), 8 deletions(-)
> 
> diff --git a/exec.c b/exec.c
> index a39a200..f418725 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -722,7 +722,11 @@ void cpu_exec_init(CPUState *cpu, Error **errp)
>  #if defined(CONFIG_USER_ONLY)
>  static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
>  {
> +    mmap_lock();
> +    tb_lock();
>      tb_invalidate_phys_page_range(pc, pc + 1, 0);
> +    tb_unlock();
> +    mmap_unlock();
>  }
>  #else
>  static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
> @@ -731,6 +735,7 @@ static void breakpoint_invalidate(CPUState *cpu, 
> target_ulong pc)
>      hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
>      int asidx = cpu_asidx_from_attrs(cpu, attrs);
>      if (phys != -1) {
> +        /* Locks grabbed by tb_invalidate_phys_addr */
>          tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
>                                  phys | (pc & ~TARGET_PAGE_MASK));
>      }
> @@ -2009,7 +2014,11 @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
>  static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
>                                 uint64_t val, unsigned size)
>  {
> +    bool locked = false;
> +
>      if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
> +        locked = true;
> +        tb_lock();
>          tb_invalidate_phys_page_fast(ram_addr, size);
>      }
>      switch (size) {
> @@ -2025,6 +2034,11 @@ static void notdirty_mem_write(void *opaque, hwaddr 
> ram_addr,
>      default:
>          abort();
>      }
> +
> +    if (locked) {
> +        tb_unlock();
> +    }
> +
>      /* Set both VGA and migration bits for simplicity and to remove
>       * the notdirty callback faster.
>       */
> @@ -2505,7 +2519,9 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, 
> hwaddr addr,
>              cpu_physical_memory_range_includes_clean(addr, length, 
> dirty_log_mask);
>      }
>      if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
> +        tb_lock();
>          tb_invalidate_phys_range(addr, addr + length);
> +        tb_unlock();
>          dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
>      }
>      cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
> diff --git a/translate-all.c b/translate-all.c
> index c650696..c53ae8c 100644
> --- a/translate-all.c
> +++ b/translate-all.c
> @@ -1363,12 +1363,11 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
>   * access: the virtual CPU will exit the current TB if code is modified 
> inside
>   * this TB.
>   *
> - * Called with mmap_lock held for user-mode emulation
> + * Called with mmap_lock held for user-mode emulation, grabs tb_lock
> + * Called with tb_lock held for system-mode emulation
>   */
> -void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
> +static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t 
> end)
>  {
> -    assert_memory_lock();
> -
>      while (start < end) {
>          tb_invalidate_phys_page_range(start, end, 0);
>          start &= TARGET_PAGE_MASK;
> @@ -1376,6 +1375,21 @@ void tb_invalidate_phys_range(tb_page_addr_t start, 
> tb_page_addr_t end)
>      }
>  }
>  
> +#ifdef CONFIG_SOFTMMU
> +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
> +{
> +    assert_tb_lock();
> +    tb_invalidate_phys_range_1(start, end);
> +}
> +#else
> +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
> +{
> +    assert_memory_lock();
> +    tb_lock();
> +    tb_invalidate_phys_range_1(start, end);
> +    tb_unlock();
> +}
> +#endif
>  /*
>   * Invalidate all TBs which intersect with the target physical address range
>   * [start;end[. NOTE: start and end must refer to the *same* physical page.
> @@ -1383,7 +1397,8 @@ void tb_invalidate_phys_range(tb_page_addr_t start, 
> tb_page_addr_t end)
>   * access: the virtual CPU will exit the current TB if code is modified 
> inside
>   * this TB.
>   *
> - * Called with mmap_lock held for user-mode emulation
> + * Called with tb_lock/mmap_lock held for user-mode emulation
> + * Called with tb_lock held for system-mode emulation
>   */
>  void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
>                                     int is_cpu_write_access)
> @@ -1406,6 +1421,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t 
> start, tb_page_addr_t end,
>  #endif /* TARGET_HAS_PRECISE_SMC */
>  
>      assert_memory_lock();
> +    assert_tb_lock();
>  
>      p = page_find(start >> TARGET_PAGE_BITS);
>      if (!p) {
> @@ -1420,7 +1436,6 @@ void tb_invalidate_phys_page_range(tb_page_addr_t 
> start, tb_page_addr_t end,
>      /* we remove all the TBs in the range [start, end[ */
>      /* XXX: see if in some cases it could be faster to invalidate all
>         the code */
> -    tb_lock();
>      tb = p->first_tb;
>      while (tb != NULL) {
>          n = (uintptr_t)tb & 3;
> @@ -1480,12 +1495,12 @@ void tb_invalidate_phys_page_range(tb_page_addr_t 
> start, tb_page_addr_t end,
>          cpu_loop_exit_noexc(cpu);
>      }
>  #endif
> -    tb_unlock();
>  }
>  
>  #ifdef CONFIG_SOFTMMU
>  /* len must be <= 8 and start must be a multiple of len.
> - * Called via softmmu_template.h, with iothread mutex not held.
> + * Called via softmmu_template.h when code areas are written to with
> + * tb_lock held.
>   */
>  void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
>  {
> @@ -1500,6 +1515,8 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, 
> int len)
>                    (intptr_t)cpu_single_env->segs[R_CS].base);
>      }
>  #endif
> +    assert_memory_lock();
> +
>      p = page_find(start >> TARGET_PAGE_BITS);
>      if (!p) {
>          return;
> @@ -1547,6 +1564,8 @@ static bool tb_invalidate_phys_page(tb_page_addr_t 
> addr, uintptr_t pc)
>      uint32_t current_flags = 0;
>  #endif
>  
> +    assert_memory_lock();
> +
>      addr &= TARGET_PAGE_MASK;
>      p = page_find(addr >> TARGET_PAGE_BITS);
>      if (!p) {
> @@ -1650,7 +1669,9 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr 
> addr)
>          return;
>      }
>      ram_addr = memory_region_get_ram_addr(mr) + addr;
> +    tb_lock();
>      tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
> +    tb_unlock();
>      rcu_read_unlock();
>  }
>  #endif /* !defined(CONFIG_USER_ONLY) */
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]