qemu-stable
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-stable] [PATCH 1/1] virtio-blk: Use a req pool instead of mall


From: Paolo Bonzini
Subject: Re: [Qemu-stable] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free
Date: Wed, 26 Mar 2014 10:55:37 +0100
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:24.0) Gecko/20100101 Thunderbird/24.3.0

Il 26/03/2014 03:02, Li, Zhen-Hua ha scritto:
From: "Li, ZhenHua" <address@hidden>

In virtio-blk module, when there is new request, new req structure
will be created by malloc.  Use a req pool instead of this, will increase
performance;

Increacement: about 5% to 10%.

Can you try g_slice_new/g_slice_free instead?

Paolo

Signed-off-by: Li, ZhenHua <address@hidden>
---
 hw/block/virtio-blk.c |   87 ++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 75 insertions(+), 12 deletions(-)

diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 8a568e5..da5b570 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -39,6 +39,70 @@ typedef struct VirtIOBlockReq
     BlockAcctCookie acct;
 } VirtIOBlockReq;

+#define POOL_PAGE 512
+static VirtIOBlockReq * * req_pool;
+static char * req_pool_used;
+static unsigned long req_pool_size = 0;
+
+static void remalloc_reqs(void){
+    unsigned long old_size = req_pool_size;
+    unsigned long int i;
+    char * old_used = req_pool_used;
+    VirtIOBlockReq * * old_pool = req_pool;
+
+    req_pool_size += POOL_PAGE;
+    req_pool_used = (char * )malloc(req_pool_size * sizeof(char));
+    req_pool =  (VirtIOBlockReq * * )malloc(req_pool_size * 
sizeof(VirtIOBlockReq *));
+
+    if(old_size != 0){
+        memcpy(req_pool_used, old_used, old_size*(sizeof(char)));
+        memcpy(req_pool, old_pool, old_size*(sizeof(VirtIOBlockReq *)));
+    }
+    for(i=old_size; i<req_pool_size; i++){
+       req_pool[i] = (VirtIOBlockReq *)malloc(sizeof(VirtIOBlockReq));
+       req_pool_used[i] = 0;
+    }
+
+    if(old_size != 0){
+       free(old_used);
+       free(old_pool);
+    }
+}
+static VirtIOBlockReq * req_pool_get_new(void){
+    unsigned long int i;
+    char * used;
+    VirtIOBlockReq * * req;
+
+    if(req_pool_size == 0){
+        remalloc_reqs();
+    }
+    for(i=0, used=req_pool_used, req=req_pool;
+            i<req_pool_size; i++, used ++, req++){
+       if(*used == 0){
+            *used = 1;
+            return *req;
+        }
+    }
+    remalloc_reqs();
+    req_pool_used[req_pool_size-POOL_PAGE] = 1;
+    *req = req_pool[req_pool_size-POOL_PAGE];
+    return *req;
+}
+
+static void virtio_blk_free_request(VirtIOBlockReq *req0){
+    unsigned long int i;
+    char * used;
+    VirtIOBlockReq * * req;
+
+    for(i=0, used=req_pool_used, req=req_pool;
+            i<req_pool_size; i++, used ++, req++){
+       if(*req == req0){
+            *used = 0;
+        }
+    }
+}
+
+
 static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
 {
     VirtIOBlock *s = req->dev;
@@ -63,7 +127,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, 
int error,
     } else if (action == BDRV_ACTION_REPORT) {
         virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
         bdrv_acct_done(s->bs, &req->acct);
-        g_free(req);
+        virtio_blk_free_request(req);
     }

     bdrv_error_action(s->bs, action, is_read, error);
@@ -84,7 +148,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)

     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
     bdrv_acct_done(req->dev->bs, &req->acct);
-    g_free(req);
+    virtio_blk_free_request(req);
 }

 static void virtio_blk_flush_complete(void *opaque, int ret)
@@ -99,25 +163,24 @@ static void virtio_blk_flush_complete(void *opaque, int 
ret)

     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
     bdrv_acct_done(req->dev->bs, &req->acct);
-    g_free(req);
+    virtio_blk_free_request(req);
 }
-
 static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
 {
-    VirtIOBlockReq *req = g_malloc(sizeof(*req));
+    VirtIOBlockReq *req ;
+    req = req_pool_get_new();
     req->dev = s;
     req->qiov.size = 0;
     req->next = NULL;
     return req;
 }
-
 static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
 {
     VirtIOBlockReq *req = virtio_blk_alloc_request(s);

     if (req != NULL) {
         if (!virtqueue_pop(s->vq, &req->elem)) {
-            g_free(req);
+            virtio_blk_free_request(req);
             return NULL;
         }
     }
@@ -142,7 +205,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
      */
     if (req->elem.out_num < 2 || req->elem.in_num < 3) {
         virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
-        g_free(req);
+        virtio_blk_free_request(req);
         return;
     }

@@ -232,7 +295,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
     stl_p(&req->scsi->data_len, hdr.dxfer_len);

     virtio_blk_req_complete(req, status);
-    g_free(req);
+    virtio_blk_free_request(req);
     return;
 #else
     abort();
@@ -242,7 +305,7 @@ fail:
     /* Just put anything nonzero so that the ioctl fails in the guest.  */
     stl_p(&req->scsi->errors, 255);
     virtio_blk_req_complete(req, status);
-    g_free(req);
+    virtio_blk_free_request(req);
 }

 typedef struct MultiReqBuffer {
@@ -375,7 +438,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
                 s->blk.serial ? s->blk.serial : "",
                 MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
-        g_free(req);
+        virtio_blk_free_request(req);
     } else if (type & VIRTIO_BLK_T_OUT) {
         qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
                                  req->elem.out_num - 1);
@@ -387,7 +450,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
         virtio_blk_handle_read(req);
     } else {
         virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
-        g_free(req);
+        virtio_blk_free_request(req);
     }
 }






reply via email to

[Prev in Thread] Current Thread [Next in Thread]