qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 0/3] better I/O accounting V2


From: Christoph Hellwig
Subject: Re: [Qemu-devel] [PATCH 0/3] better I/O accounting V2
Date: Wed, 24 Aug 2011 20:45:06 +0200
User-agent: Mutt/1.5.17 (2007-11-01)

On Mon, Aug 22, 2011 at 11:46:00AM -0500, Ryan Harper wrote:
> So, I believe this is how it's happening.
> 
> we start accounting on a write which is turned into a multiwrite via
> virtio_blk_handle_write() which calls virtio_submit_multiwrite().
> 
> Then when the multiwrite completes, we invoke virtio_blk_rw_complete()
> on each part of the multiwrite.  None of these requests have had their
> acct structure initialized since there was just *one* initial write.
> We could do a bdrv_acct_start() on each req, but that would break the
> concept of hiding the additional writes under the initial request.

We do initialize all write fields correctly.  Where we fail right now
is for non-read/write requests.  I can reproduce your issue by reading
the serial attribute in sysfs - any chance Fedora does that during boot?

The patch below should take care of these cases:


Index: qemu/hw/virtio-blk.c
===================================================================
--- qemu.orig/hw/virtio-blk.c   2011-08-24 20:32:03.764503179 +0200
+++ qemu/hw/virtio-blk.c        2011-08-24 20:35:55.716579922 +0200
@@ -59,9 +59,6 @@ static void virtio_blk_req_complete(Virt
     stb_p(&req->in->status, status);
     virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in));
     virtio_notify(&s->vdev, s->vq);
-
-    bdrv_acct_done(s->bs, &req->acct);
-    g_free(req);
 }
 
 static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
@@ -83,6 +80,8 @@ static int virtio_blk_handle_rw_error(Vi
         vm_stop(VMSTOP_DISKFULL);
     } else {
         virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
+        bdrv_acct_done(s->bs, &req->acct);
+        g_free(req);
         bdrv_mon_event(s->bs, BDRV_ACTION_REPORT, is_read);
     }
 
@@ -102,6 +101,8 @@ static void virtio_blk_rw_complete(void
     }
 
     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
+    bdrv_acct_done(req->dev->bs, &req->acct);
+    g_free(req);
 }
 
 static void virtio_blk_flush_complete(void *opaque, int ret)
@@ -115,6 +116,8 @@ static void virtio_blk_flush_complete(vo
     }
 
     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
+    bdrv_acct_done(req->dev->bs, &req->acct);
+    g_free(req);
 }
 
 static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
@@ -157,6 +160,7 @@ static void virtio_blk_handle_scsi(VirtI
      */
     if (req->elem.out_num < 2 || req->elem.in_num < 3) {
         virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
+        g_free(req);
         return;
     }
 
@@ -165,6 +169,7 @@ static void virtio_blk_handle_scsi(VirtI
      */
     if (req->elem.out_num > 2 && req->elem.in_num > 3) {
         virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
+        g_free(req);
         return;
     }
 
@@ -231,11 +236,13 @@ static void virtio_blk_handle_scsi(VirtI
     stl_p(&req->scsi->data_len, hdr.dxfer_len);
 
     virtio_blk_req_complete(req, status);
+    g_free(req);
 }
 #else
 static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
 {
     virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
+    g_free(req);
 }
 #endif /* __linux__ */
 
@@ -378,6 +385,7 @@ static void virtio_blk_handle_request(Vi
                 s->serial ? s->serial : "",
                 MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
+        g_free(req);
     } else if (type & VIRTIO_BLK_T_OUT) {
         qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
                                  req->elem.out_num - 1);



reply via email to

[Prev in Thread] Current Thread [Next in Thread]