[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-block] [PATCH 2/5] qemu-img bench: Sequential writes
From: |
Kevin Wolf |
Subject: |
[Qemu-block] [PATCH 2/5] qemu-img bench: Sequential writes |
Date: |
Fri, 3 Jun 2016 14:30:18 +0200 |
This extends qemu-img bench with an option that makes it use sequential
writes instead of reads for the test run.
Signed-off-by: Kevin Wolf <address@hidden>
---
qemu-img-cmds.hx | 4 ++--
qemu-img.c | 24 +++++++++++++++++++-----
qemu-img.texi | 10 ++++++----
3 files changed, 27 insertions(+), 11 deletions(-)
diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx
index f3bd546..d651674 100644
--- a/qemu-img-cmds.hx
+++ b/qemu-img-cmds.hx
@@ -10,9 +10,9 @@ STEXI
ETEXI
DEF("bench", img_bench,
- "bench [-c count] [-d depth] [-f fmt] [-n] [-q] [-s buffer_size] [-t
cache] filename")
+ "bench [-c count] [-d depth] [-f fmt] [-n] [-q] [-s buffer_size] [-t
cache] [-w] filename")
STEXI
address@hidden bench [-c @var{count}] [-d @var{depth}] [-f @var{fmt}] [-n] [-q]
[-s @var{buffer_size}] [-t @var{cache}] @var{filename}
address@hidden bench [-c @var{count}] [-d @var{depth}] [-f @var{fmt}] [-n] [-q]
[-s @var{buffer_size}] [-t @var{cache}] [-w] @var{filename}
ETEXI
DEF("check", img_check,
diff --git a/qemu-img.c b/qemu-img.c
index d471d10..142efb1 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -3462,6 +3462,7 @@ out_no_progress:
typedef struct BenchData {
BlockBackend *blk;
uint64_t image_size;
+ bool write;
int bufsize;
int nrreq;
int n;
@@ -3487,8 +3488,13 @@ static void bench_cb(void *opaque, int ret)
}
while (b->n > b->in_flight && b->in_flight < b->nrreq) {
- acb = blk_aio_preadv(b->blk, b->offset, b->qiov, 0,
- bench_cb, b);
+ if (b->write) {
+ acb = blk_aio_pwritev(b->blk, b->offset, b->qiov, 0,
+ bench_cb, b);
+ } else {
+ acb = blk_aio_preadv(b->blk, b->offset, b->qiov, 0,
+ bench_cb, b);
+ }
if (!acb) {
error_report("Failed to issue request");
exit(EXIT_FAILURE);
@@ -3505,6 +3511,7 @@ static int img_bench(int argc, char **argv)
const char *fmt = NULL, *filename;
bool quiet = false;
bool image_opts = false;
+ bool is_write = false;
int count = 75000;
int depth = 64;
size_t bufsize = 4096;
@@ -3522,7 +3529,7 @@ static int img_bench(int argc, char **argv)
{"image-opts", no_argument, 0, OPTION_IMAGE_OPTS},
{0, 0, 0, 0}
};
- c = getopt_long(argc, argv, "hc:d:f:nqs:t:", long_options, NULL);
+ c = getopt_long(argc, argv, "hc:d:f:nqs:t:w", long_options, NULL);
if (c == -1) {
break;
}
@@ -3585,6 +3592,10 @@ static int img_bench(int argc, char **argv)
goto out;
}
break;
+ case 'w':
+ flags |= BDRV_O_RDWR;
+ is_write = true;
+ break;
case OPTION_IMAGE_OPTS:
image_opts = true;
break;
@@ -3614,11 +3625,14 @@ static int img_bench(int argc, char **argv)
.bufsize = bufsize,
.nrreq = depth,
.n = count,
+ .write = is_write,
};
- printf("Sending %d requests, %d bytes each, %d in parallel\n",
- data.n, data.bufsize, data.nrreq);
+ printf("Sending %d %s requests, %d bytes each, %d in parallel\n",
+ data.n, data.write ? "write" : "read", data.bufsize, data.nrreq);
data.buf = blk_blockalign(blk, data.nrreq * data.bufsize);
+ memset(data.buf, 0, data.nrreq * data.bufsize);
+
data.qiov = g_new(QEMUIOVector, data.nrreq);
for (i = 0; i < data.nrreq; i++) {
qemu_iovec_init(&data.qiov[i], 1);
diff --git a/qemu-img.texi b/qemu-img.texi
index b6b28e3..34e94db 100644
--- a/qemu-img.texi
+++ b/qemu-img.texi
@@ -131,11 +131,13 @@ Skip the creation of the target volume
Command description:
@table @option
address@hidden bench [-c @var{count}] [-d @var{depth}] [-f @var{fmt}] [-n] [-q]
[-s @var{buffer_size}] [-t @var{cache}] @var{filename}
address@hidden bench [-c @var{count}] [-d @var{depth}] [-f @var{fmt}] [-n] [-q]
[-s @var{buffer_size}] [-t @var{cache}] [-w] @var{filename}
-Run a simple sequential read benchmark on the specified image. A total number
-of @var{count} I/O requests is performed, each @var{buffer_size} bytes in size,
-and with @var{depth} requests in parallel.
+Run a simple sequential I/O benchmark on the specified image. If @code{-w} is
+specified, a write test is performed, otherwise a read test is performed.
+
+A total number of @var{count} I/O requests is performed, each @var{buffer_size}
+bytes in size, and with @var{depth} requests in parallel.
If @code{-n} is specified, the native AIO backend is used if possible. On
Linux, this option only works if @code{-t none} or @code{-t directsync} is
--
1.8.3.1