qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 07/15] dataplane: use object pool to speed up al


From: Ming Lei
Subject: Re: [Qemu-devel] [PATCH 07/15] dataplane: use object pool to speed up allocation for virtio blk request
Date: Thu, 31 Jul 2014 11:22:50 +0800

On Wed, Jul 30, 2014 at 10:14 PM, Paolo Bonzini <address@hidden> wrote:
> Il 30/07/2014 13:39, Ming Lei ha scritto:
>> g_slice_new(VirtIOBlockReq), its free pair and access the instance
>> is a bit slow since sizeof(VirtIOBlockReq) takes more than 40KB,
>> so use object pool to speed up its allocation and release.
>>
>> With this patch, ~5% throughput improvement is observed in the VM
>> based on server.
>>
>> Signed-off-by: Ming Lei <address@hidden>
>> ---
>>  hw/block/dataplane/virtio-blk.c |   12 ++++++++++++
>>  hw/block/virtio-blk.c           |   13 +++++++++++--
>>  include/hw/virtio/virtio-blk.h  |    2 ++
>>  3 files changed, 25 insertions(+), 2 deletions(-)
>>
>> diff --git a/hw/block/dataplane/virtio-blk.c 
>> b/hw/block/dataplane/virtio-blk.c
>> index 2093e4a..828fe99 100644
>> --- a/hw/block/dataplane/virtio-blk.c
>> +++ b/hw/block/dataplane/virtio-blk.c
>> @@ -24,6 +24,8 @@
>>  #include "hw/virtio/virtio-bus.h"
>>  #include "qom/object_interfaces.h"
>>
>> +#define REQ_POOL_SZ 128
>> +
>>  struct VirtIOBlockDataPlane {
>>      bool started;
>>      bool starting;
>> @@ -51,6 +53,10 @@ struct VirtIOBlockDataPlane {
>>      Error *blocker;
>>      void (*saved_complete_request)(struct VirtIOBlockReq *req,
>>                                     unsigned char status);
>> +
>> +    VirtIOBlockReq  reqs[REQ_POOL_SZ];
>> +    void *free_reqs[REQ_POOL_SZ];
>> +    ObjPool  req_pool;
>>  };
>>
>>  /* Raise an interrupt to signal guest, if necessary */
>> @@ -238,6 +244,10 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane 
>> *s)
>>          return;
>>      }
>>
>> +    vblk->obj_pool = &s->req_pool;
>> +    obj_pool_init(vblk->obj_pool, s->reqs, s->free_reqs,
>> +                  sizeof(VirtIOBlockReq), REQ_POOL_SZ);
>> +
>>      /* Set up guest notifier (irq) */
>>      if (k->set_guest_notifiers(qbus->parent, 1, true) != 0) {
>>          fprintf(stderr, "virtio-blk failed to set guest notifier, "
>> @@ -298,6 +308,8 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
>>
>>      aio_context_release(s->ctx);
>>
>> +    vblk->obj_pool = NULL;
>> +
>>      if (s->raw_format) {
>>          qemu_aio_set_bypass_co(s->ctx, false);
>>      }
>> diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
>> index c241c50..2a11bc4 100644
>> --- a/hw/block/virtio-blk.c
>> +++ b/hw/block/virtio-blk.c
>> @@ -31,7 +31,11 @@
>>
>>  VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
>>  {
>> -    VirtIOBlockReq *req = g_slice_new(VirtIOBlockReq);
>> +    VirtIOBlockReq *req = obj_pool_get(s->obj_pool);
>> +
>> +    if (!req) {
>> +        req = g_slice_new(VirtIOBlockReq);
>> +    }
>>      req->dev = s;
>>      req->qiov.size = 0;
>>      req->next = NULL;
>> @@ -41,7 +45,11 @@ VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
>>  void virtio_blk_free_request(VirtIOBlockReq *req)
>>  {
>>      if (req) {
>> -        g_slice_free(VirtIOBlockReq, req);
>> +        if (obj_pool_has_obj(req->dev->obj_pool, req)) {
>> +            obj_pool_put(req->dev->obj_pool, req);
>> +        } else {
>> +            g_slice_free(VirtIOBlockReq, req);
>> +        }
>>      }
>>  }
>>
>> @@ -801,6 +809,7 @@ static void virtio_blk_instance_init(Object *obj)
>>  {
>>      VirtIOBlock *s = VIRTIO_BLK(obj);
>>
>> +    s->obj_pool = NULL;
>>      object_property_add_link(obj, "iothread", TYPE_IOTHREAD,
>>                               (Object **)&s->blk.iothread,
>>                               qdev_prop_allow_set_link_before_realize,
>> diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
>> index afb7b8d..49ac234 100644
>> --- a/include/hw/virtio/virtio-blk.h
>> +++ b/include/hw/virtio/virtio-blk.h
>> @@ -18,6 +18,7 @@
>>  #include "hw/block/block.h"
>>  #include "sysemu/iothread.h"
>>  #include "block/block.h"
>> +#include "qemu/obj_pool.h"
>>
>>  #define TYPE_VIRTIO_BLK "virtio-blk-device"
>>  #define VIRTIO_BLK(obj) \
>> @@ -135,6 +136,7 @@ typedef struct VirtIOBlock {
>>      Notifier migration_state_notifier;
>>      struct VirtIOBlockDataPlane *dataplane;
>>  #endif
>> +    ObjPool *obj_pool;
>>  } VirtIOBlock;
>>
>>  typedef struct MultiReqBuffer {
>>
>
> The problem is that g_slice here is not using the slab-style allocator
> because the object is larger than roughly 500 bytes.  One solution would
> be to make virtqueue_pop/vring_pop allocate a VirtQueueElement of the
> right size (and virtqueue_push/vring_push free it), as mentioned in the
> review of patch 8.

Unluckily both iovec and addr array can't be fitted into 500 bytes, :-(
Not mention all users of VirtQueueElement need to be changed too,
I hate to make that work involved in this patchset, :-)

>
> However, I now remembered that VirtQueueElement is a mess because it's
> serialized directly into the migration state. :(  So you basically
> cannot change it without mucking with migration.  Please leave out patch
> 8 for now.

save_device code serializes elem in this way:

 qemu_put_buffer(f, (unsigned char *)&req->elem,
                        sizeof(VirtQueueElement));

so I am wondering why this patch may break migration.

And in my test live migration can survive with the patch.

>
> Let's use this object pool, however, please simplify the API, it should
> be just:
>
> void obj_pool_init(ObjPool *pool, unsigned obj_size, unsigned cnt);

In this way, obj pool can't be backed on static buffer, which sometimes
is very convenient since users need to worry about its release.

But it won't be a big deal for dataplane case.

> void *obj_pool_alloc(ObjPool *pool);
> void obj_pool_free(ObjPool *pool, void *obj);
> void obj_pool_destroy(ObjPool *pool);
>
> All allocations of the objs buffer, and all logic like
>
> +    VirtIOBlockReq *req = obj_pool_get(s->obj_pool);
> +
> +    if (!req) {
> +        req = g_slice_new(VirtIOBlockReq);
> +    }
>
> +        if (obj_pool_has_obj(req->dev->obj_pool, req)) {
> +            obj_pool_put(req->dev->obj_pool, req);
> +        } else {
> +            g_slice_free(VirtIOBlockReq, req);
> +        }
>
> should be part of the object pool, not its users.

Yeah, we can do that, but some callers may like to use malloc(),
and other users may like calloc(), that is why I didn't put the fallback
allocation into obj pool APIs.

It is still a bit simple in current way.

Thanks,



reply via email to

[Prev in Thread] Current Thread [Next in Thread]