qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v4 07/22] vhost: alloc shareable log


From: Michael S. Tsirkin
Subject: Re: [Qemu-devel] [PATCH v4 07/22] vhost: alloc shareable log
Date: Mon, 21 Sep 2015 22:29:29 +0300

On Mon, Sep 21, 2015 at 10:02:02AM -0400, Marc-André Lureau wrote:
> Hi
> 
> ----- Original Message -----
> > On Sat, Sep 19, 2015 at 12:11:58PM +0200, address@hidden wrote:
> > > From: Marc-André Lureau <address@hidden>
> > > 
> > > If the backend is of type VHOST_BACKEND_TYPE_USER, allocate
> > > shareable memory. Next patch will only allocate when the backend
> > > has the required feature.
> > > 
> > > Note: vhost_log_get() can use a global "vhost_log" that can be shared by
> > > several vhost devices. We may want instead a common shareable log and a
> > > common non-shareable one.
> > > 
> > > Signed-off-by: Marc-André Lureau <address@hidden>
> > 
> > Well you do at least need to count the number of times the log is
> > shared.  Otherwise, if you share the log, then unshare it, you are left
> > with a shared one.
> 
> Do you mean that if the device is removed (or stopped, so that the log
> is unref) and a shm log is no longer needed, it should replace the log
> of other devices with a non-shm log?

Yes. Basically, you have code to swicth non-shared -> shared
but not back. That's asymmetrical.

> In this case, wouldn't it make
> more sense to have a log shm per device, because replacing other
> devices log do not seem simple without more tracking of devices
> sharing the log.

A per device log slows down syncs. Jason coded the shared one
specifically to avoid the need to do that.

> Can this be considered a future enhancement?

What's the big issue? Just count the devices that need a shared one, if
that count is 0 reallocate with shared == false.

> > 
> > > ---
> > >  hw/virtio/vhost.c         | 38 +++++++++++++++++++++++++++++++-------
> > >  include/hw/virtio/vhost.h |  3 ++-
> > >  2 files changed, 33 insertions(+), 8 deletions(-)
> > > 
> > > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> > > index a08c36b..cd3af16 100644
> > > --- a/hw/virtio/vhost.c
> > > +++ b/hw/virtio/vhost.c
> > > @@ -18,6 +18,7 @@
> > >  #include "qemu/atomic.h"
> > >  #include "qemu/range.h"
> > >  #include "qemu/error-report.h"
> > > +#include "qemu/memfd.h"
> > >  #include <linux/vhost.h>
> > >  #include "exec/address-spaces.h"
> > >  #include "hw/virtio/virtio-bus.h"
> > > @@ -286,20 +287,34 @@ static uint64_t vhost_get_log_size(struct vhost_dev
> > > *dev)
> > >      }
> > >      return log_size;
> > >  }
> > > -static struct vhost_log *vhost_log_alloc(uint64_t size)
> > > +
> > > +static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
> > >  {
> > > -    struct vhost_log *log = g_malloc0(sizeof *log + size *
> > > sizeof(*(log->log)));
> > > +    struct vhost_log *log;
> > > +    uint64_t logsize = size * sizeof(*(log->log));
> > > +    int fd = -1;
> > > +
> > > +    log = g_new0(struct vhost_log, 1);
> > > +    if (share) {
> > > +        log->log = qemu_memfd_alloc("vhost-log", logsize,
> > > +                                    
> > > F_SEAL_GROW|F_SEAL_SHRINK|F_SEAL_SEAL,
> > > &fd);
> > > +        memset(log->log, 0, logsize);
> > > +    } else {
> > > +        log->log = g_malloc0(logsize);
> > > +    }
> > >  
> > >      log->size = size;
> > >      log->refcnt = 1;
> > > +    log->fd = fd;
> > >  
> > >      return log;
> > >  }
> > >  
> > > -static struct vhost_log *vhost_log_get(uint64_t size)
> > > +static struct vhost_log *vhost_log_get(uint64_t size, bool share)
> > >  {
> > > -    if (!vhost_log || vhost_log->size != size) {
> > > -        vhost_log = vhost_log_alloc(size);
> > > +    if (!vhost_log || vhost_log->size != size ||
> > > +        (share && vhost_log->fd == -1)) {
> > > +        vhost_log = vhost_log_alloc(size, share);
> > >      } else {
> > >          ++vhost_log->refcnt;
> > >      }
> > > @@ -324,13 +339,21 @@ static void vhost_log_put(struct vhost_dev *dev, 
> > > bool
> > > sync)
> > >          if (vhost_log == log) {
> > >              vhost_log = NULL;
> > >          }
> > > +
> > > +        if (log->fd == -1) {
> > > +            g_free(log->log);
> > > +        } else {
> > > +            qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
> > > +                            log->fd);
> > > +        }
> > >          g_free(log);
> > >      }
> > >  }
> > >  
> > >  static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t
> > >  size)
> > >  {
> > > -    struct vhost_log *log = vhost_log_get(size);
> > > +    bool share = dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER;
> > > +    struct vhost_log *log = vhost_log_get(size, share);
> > >      uint64_t log_base = (uintptr_t)log->log;
> > >      int r;
> > >  
> > > @@ -1136,9 +1159,10 @@ int vhost_dev_start(struct vhost_dev *hdev,
> > > VirtIODevice *vdev)
> > >  
> > >      if (hdev->log_enabled) {
> > >          uint64_t log_base;
> > > +        bool share = hdev->vhost_ops->backend_type ==
> > > VHOST_BACKEND_TYPE_USER;
> > >  
> > >          hdev->log_size = vhost_get_log_size(hdev);
> > > -        hdev->log = vhost_log_get(hdev->log_size);
> > > +        hdev->log = vhost_log_get(hdev->log_size, share);
> > >          log_base = (uintptr_t)hdev->log->log;
> > >          r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_LOG_BASE,
> > >                                          hdev->log_size ? &log_base :
> > >                                          NULL);
> > > diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> > > index 6467c73..ab1dcac 100644
> > > --- a/include/hw/virtio/vhost.h
> > > +++ b/include/hw/virtio/vhost.h
> > > @@ -31,7 +31,8 @@ typedef unsigned long vhost_log_chunk_t;
> > >  struct vhost_log {
> > >      unsigned long long size;
> > >      int refcnt;
> > > -    vhost_log_chunk_t log[0];
> > > +    int fd;
> > > +    vhost_log_chunk_t *log;
> > >  };
> > >  
> > >  struct vhost_memory;
> > > --
> > > 2.4.3
> > 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]