qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC 5/7] breakpoints: put breakpoints under RCU contro


From: Paolo Bonzini
Subject: Re: [Qemu-devel] [RFC 5/7] breakpoints: put breakpoints under RCU control
Date: Fri, 17 Jun 2016 18:59:48 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Thunderbird/45.1.0


On 17/06/2016 18:33, Alex Bennée wrote:
> Each time breakpoints are added/removed from the array it's done using
> an read-copy-update cycle. Simultaneous writes are protected by the
> debug_update_lock.
> 
> Signed-off-by: Alex Bennée <address@hidden>
> ---
>  cpus.c            |   3 +
>  exec.c            | 167 
> ++++++++++++++++++++++++++++++++++++++++++++----------
>  include/qom/cpu.h |  42 ++++++++++----
>  linux-user/main.c |  11 +---
>  4 files changed, 175 insertions(+), 48 deletions(-)
> 
> diff --git a/cpus.c b/cpus.c
> index 84c3520..b76300b 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -1441,6 +1441,9 @@ void qemu_init_vcpu(CPUState *cpu)
>          cpu_address_space_init(cpu, as, 0);
>      }
>  
> +    /* protects updates to debug info */

No need to comment here.

> +    qemu_mutex_init(&cpu->update_debug_lock);

Missing qemu_mutex_destroy.

>      if (kvm_enabled()) {
>          qemu_kvm_start_vcpu(cpu);
>      } else if (tcg_enabled()) {
> diff --git a/exec.c b/exec.c
> index c8e8823..d1d14c1 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -919,31 +919,94 @@ static inline bool 
> cpu_watchpoint_address_matches(CPUWatchpoint *wp,
>  }
>  
>  #endif
> -/* Find watchpoint with external reference */
> -CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref)
> +
> +/* Create a working copy of the breakpoints.
> + *
> + * The rcu_read_lock() may already be held depending on where this
> + * update has been triggered from. However it is safe to nest
> + * rcu_read_locks() so we do the copy under the lock here.

More precisely, the rcu_read_lock() and atomic_rcu_read() are
unnecessary in update paths because these already take the update lock.
However it is safe to nest rcu_read_lock() within itself or within the
update lock, so your code is fine.

> + */
> +
> +static GArray *rcu_copy_breakpoints(CPUState *cpu)
>  {
> -    CPUBreakpoint *bp = NULL;
> +    GArray *old, *new;
> +
> +    rcu_read_lock();
> +    old = atomic_rcu_read(&cpu->breakpoints);
> +    if (old) {
> +        new = g_array_sized_new(false, false, sizeof(CPUBreakpoint), 
> old->len);
> +        memcpy(new->data, old->data, old->len * sizeof(CPUBreakpoint));
> +        new->len = old->len;
> +    } else {
> +        new = g_array_new(false, false, sizeof(CPUBreakpoint));
> +    }
> +    rcu_read_unlock();
> +
> +    return new;
> +}
> +
> +struct BreakRCU {

What about creating a generic g_array_free_rcu(GArray *array, gboolean
free_segment)?  Or better g_array_unref_rcu(GArray *array) since we
require glib 2.22.

> +    struct rcu_head rcu;
> +    GArray *bkpts;
> +};
> +
> +/* RCU reclaim step */
> +static void rcu_free_breakpoints(struct BreakRCU *rcu_free)
> +{
> +    g_array_free(rcu_free->bkpts, false);

Why false?  Doesn't this leak?  (g_array_unref is another valid
replacement, as mentioned above).

> +    g_free(rcu_free);
> +}
> +
> +/* Called with update lock held */
> +static void rcu_update_breakpoints(CPUState *cpu, GArray *new_bkpts)
> +{
> +    GArray *bpts = atomic_rcu_read(&cpu->breakpoints);
> +    atomic_rcu_set(&cpu->breakpoints, new_bkpts);
> +
> +    if (bpts) {
> +        struct BreakRCU *rcu_free = g_malloc(sizeof(*rcu_free));
> +        rcu_free->bkpts = bpts;
> +        call_rcu(rcu_free, rcu_free_breakpoints, rcu);
> +    }
> +}
> +
> +/* Find watchpoint with external reference, only valid for duration of
> + * rcu_read_lock */
> +static CPUBreakpoint *find_bkpt_with_ref(GArray *bkpts, int ref)

breakpoint_get_by_ref, please.

> +{
> +    CPUBreakpoint *bp;
>      int i = 0;
> +
>      do {
> -        bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i++);
> -    } while (i < cpu->breakpoints->len && bp && bp->ref != ref);
> +        bp = &g_array_index(bkpts, CPUBreakpoint, i);
> +        if (bp->ref == ref) {
> +            return bp;
> +        }
> +    } while (i++ < bkpts->len);
>  
> -    return bp;
> +    return NULL;
> +}
> +
> +CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref)
> +{
> +    GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
> +    return find_bkpt_with_ref(bkpts, ref);
>  }
>  
>  /* Add a breakpoint.  */
>  int cpu_breakpoint_insert_with_ref(CPUState *cpu, vaddr pc, int flags, int 
> ref)
>  {
> +    GArray *bkpts;
>      CPUBreakpoint *bp = NULL;
>  
> -    /* Allocate if no previous breakpoints */
> -    if (!cpu->breakpoints) {
> -        cpu->breakpoints = g_array_new(false, true, sizeof(CPUBreakpoint));
> -    }
> +    qemu_mutex_lock(&cpu->update_debug_lock);
> +
> +    /* This will allocate if no previous breakpoints */
> +    bkpts = rcu_copy_breakpoints(cpu);
>  
>      /* Find old watchpoint */
>      if (ref != BPWP_NOREF) {
> -        bp = cpu_breakpoint_get_by_ref(cpu, ref);
> +        bp = find_bkpt_with_ref(bkpts, ref);
>      }
>  
>      if (bp) {
> @@ -958,14 +1021,17 @@ int cpu_breakpoint_insert_with_ref(CPUState *cpu, 
> vaddr pc, int flags, int ref)
>  
>          /* keep all GDB-injected breakpoints in front */
>          if (flags & BP_GDB) {
> -            g_array_prepend_val(cpu->breakpoints, brk);
> +            g_array_prepend_val(bkpts, brk);
>          } else {
> -            g_array_append_val(cpu->breakpoints, brk);
> +            g_array_append_val(bkpts, brk);
>          }
>      }
>  
>      breakpoint_invalidate(cpu, pc);
>  
> +    rcu_update_breakpoints(cpu, bkpts);
> +    qemu_mutex_unlock(&cpu->update_debug_lock);
> +
>      return 0;
>  }
>  
> @@ -974,67 +1040,110 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, 
> int flags)
>      return cpu_breakpoint_insert_with_ref(cpu, pc, flags, BPWP_NOREF);
>  }
>  
> -static void cpu_breakpoint_delete(CPUState *cpu, int index)
> +/* Called with update_debug_lock held */
> +static void cpu_breakpoint_delete(CPUState *cpu, GArray *bkpts, int index)
>  {
>      CPUBreakpoint *bp;
> -    bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, index);
> +    bp = &g_array_index(bkpts, CPUBreakpoint, index);
>      breakpoint_invalidate(cpu, bp->pc);
> -    g_array_remove_index(cpu->breakpoints, index);
> +    g_array_remove_index(bkpts, index);
>  }
>  
>  /* Remove a specific breakpoint.  */
>  int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
>  {
> +    GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
>      CPUBreakpoint *bp;
> +    int retval = -ENOENT;
>  
> -    if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
> +    if (unlikely(bkpts) && unlikely(bkpts->len)) {
>          int i = 0;
> +
> +        qemu_mutex_lock(&cpu->update_debug_lock);
> +        bkpts = rcu_copy_breakpoints(cpu);
> +
>          do {
> -            bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
> +            bp = &g_array_index(bkpts, CPUBreakpoint, i);
>              if (bp && bp->pc == pc && bp->flags == flags) {
> -                cpu_breakpoint_delete(cpu, i);
> +                cpu_breakpoint_delete(cpu, bkpts, i);
> +                retval = 0;
>              } else {
>                  i++;
>              }
> -        } while (i < cpu->breakpoints->len);
> +        } while (i < bkpts->len);
> +
> +        rcu_update_breakpoints(cpu, bkpts);
> +        qemu_mutex_unlock(&cpu->update_debug_lock);
> +
>      }
>  
> -    return -ENOENT;
> +    return retval;
>  }
>  
> +#ifdef CONFIG_USER_ONLY
> +void cpu_breakpoints_clone(CPUState *old_cpu, CPUState *new_cpu)
> +{
> +   GArray *bkpts = atomic_rcu_read(&old_cpu->breakpoints);
> +
> +   if (unlikely(bkpts) && unlikely(bkpts->len)) {
> +        qemu_mutex_lock(&new_cpu->update_debug_lock);
> +        bkpts = rcu_copy_breakpoints(old_cpu);
> +        rcu_update_breakpoints(new_cpu, bkpts);
> +        qemu_mutex_unlock(&new_cpu->update_debug_lock);
> +   }
> +}
> +#endif
> +
> +
>  /* Remove a specific breakpoint by reference.  */
>  void cpu_breakpoint_remove_by_ref(CPUState *cpu, int ref)
>  {
> +    GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
>      CPUBreakpoint *bp;
>  
> -    if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
> +    if (unlikely(bkpts) && unlikely(bkpts->len)) {
>          int i = 0;
> +
> +        qemu_mutex_lock(&cpu->update_debug_lock);
> +        bkpts = rcu_copy_breakpoints(cpu);
> +
>          do {
> -            bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
> +            bp = &g_array_index(bkpts, CPUBreakpoint, i);
>              if (bp && bp->ref == ref) {
> -                cpu_breakpoint_delete(cpu, i);
> +                cpu_breakpoint_delete(cpu, bkpts, i);
>              } else {
>                  i++;
>              }
> -        } while (i < cpu->breakpoints->len);
> +        } while (i < bkpts->len);
> +
> +        rcu_update_breakpoints(cpu, bkpts);
> +        qemu_mutex_unlock(&cpu->update_debug_lock);
>      }
>  }
>  
>  /* Remove all matching breakpoints. */
>  void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
>  {
> +    GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
>      CPUBreakpoint *bp;
>  
> -    if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
> +    if (unlikely(bkpts) && unlikely(bkpts->len)) {
>          int i = 0;
> +
> +        qemu_mutex_lock(&cpu->update_debug_lock);
> +        bkpts = rcu_copy_breakpoints(cpu);
> +
>          do {
> -            bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
> +            bp = &g_array_index(bkpts, CPUBreakpoint, i);
>              if (bp->flags & mask) {
> -                cpu_breakpoint_delete(cpu, i);
> +                cpu_breakpoint_delete(cpu, bkpts, i);
>              } else {
>                  i++;
>              }
> -        } while (i < cpu->breakpoints->len);
> +        } while (i < bkpts->len);
> +
> +        rcu_update_breakpoints(cpu, bkpts);
> +        qemu_mutex_unlock(&cpu->update_debug_lock);
>      }
>  }
>  
> diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> index f695afb..51ca28c 100644
> --- a/include/qom/cpu.h
> +++ b/include/qom/cpu.h
> @@ -326,8 +326,11 @@ struct CPUState {
>      /* Debugging support:
>       *
>       * Both the gdbstub and architectural debug support will add
> -     * references to these arrays.
> +     * references to these arrays. The list of breakpoints and
> +     * watchpoints are updated with RCU. The update_debug_lock is only
> +     * required for updating, not just reading.
>       */
> +    QemuMutex update_debug_lock;
>      GArray *breakpoints;
>      GArray *watchpoints;
>      CPUWatchpoint *watchpoint_hit;
> @@ -849,12 +852,13 @@ int cpu_breakpoint_insert_with_ref(CPUState *cpu, vaddr 
> pc, int flags, int ref);
>   * @cpu: CPU to monitor
>   * @ref: unique reference
>   *
> - * @return: a pointer to working copy of the breakpoint.
> + * @return: a pointer to working copy of the breakpoint or NULL.
>   *
> - * Return a working copy of the current referenced breakpoint. This
> - * obviously only works for breakpoints inserted with a reference. The
> - * lifetime of this objected will be limited and should not be kept
> - * beyond its immediate use. Otherwise return NULL.
> + * Return short term working copy of the a breakpoint marked with an
> + * external reference. This obviously only works for breakpoints
> + * inserted with a reference. The lifetime of this object is the
> + * duration of the rcu_read_lock() and it may be released when
> + * rcu_synchronize is called.

"and it may be released..." is implicit.

>   */
>  CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref);
>  
> @@ -871,14 +875,32 @@ void cpu_breakpoint_remove_by_ref(CPUState *cpu, int 
> ref);
>  
>  void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
>  
> -/* Return true if PC matches an installed breakpoint.  */
> +#ifdef CONFIG_USER_ONLY
> +/**
> + * cpu_breakpoints_clone:
> + *
> + * @old_cpu: source of breakpoints
> + * @new_cpu: destination for breakpoints
> + *
> + * When a new user-mode thread is created the CPU structure behind it
> + * needs a copy of the exiting breakpoint conditions. This helper does
> + * the copying.
> + */
> +void cpu_breakpoints_clone(CPUState *old_cpu, CPUState *new_cpu);
> +#endif
> +
> +/* Return true if PC matches an installed breakpoint.
> + * Called while the RCU read lock is held.

The "standard" phrasing is "Called from RCU critical section".

> + */
>  static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
>  {
> -    if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
> +    GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
> +
> +    if (unlikely(bkpts) && unlikely(bkpts->len)) {
>          CPUBreakpoint *bp;
>          int i;
> -        for (i = 0; i < cpu->breakpoints->len; i++) {
> -            bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
> +        for (i = 0; i < bkpts->len; i++) {
> +            bp = &g_array_index(bkpts, CPUBreakpoint, i);
>              if (bp->pc == pc && (bp->flags & mask)) {
>                  return true;
>              }
> diff --git a/linux-user/main.c b/linux-user/main.c
> index 179f2f2..2901626 100644
> --- a/linux-user/main.c
> +++ b/linux-user/main.c
> @@ -3808,17 +3808,10 @@ CPUArchState *cpu_copy(CPUArchState *env)
>  
>      memcpy(new_env, env, sizeof(CPUArchState));
>  
> -    /* Clone all break/watchpoints.
> +    /* Clone all breakpoints.
>         Note: Once we support ptrace with hw-debug register access, make sure
>         BP_CPU break/watchpoints are handled correctly on clone. */
> -    if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
> -        CPUBreakpoint *bp;
> -        int i;
> -        for (i = 0; i < cpu->breakpoints->len; i++) {
> -            bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
> -            cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags);
> -        }
> -    }
> +    cpu_breakpoints_clone(cpu, new_cpu);
>      if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
>          CPUWatchpoint *wp;
>          int i;
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]