[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 1/6] block/block-copy: allocate buffer in block_copy_with_boun
From: |
Vladimir Sementsov-Ogievskiy |
Subject: |
[PATCH v2 1/6] block/block-copy: allocate buffer in block_copy_with_bounce_buffer |
Date: |
Wed, 16 Oct 2019 20:09:00 +0300 |
Move bounce_buffer allocation block_copy_with_bounce_buffer. This
commit simplifies further work on implementing copying by larger chunks
(of different size) and further asynchronous handling of block_copy
iterations (with help of block/aio_task API).
Allocation works fast, a lot faster than disk io, so it's not a problem
that we now allocate/free bounce_buffer more times. And we anyway will
have to allocate several bounce_buffers for parallel execution of loop
iterations in future.
Signed-off-by: Vladimir Sementsov-Ogievskiy <address@hidden>
Reviewed-by: Max Reitz <address@hidden>
---
block/block-copy.c | 21 ++++++++-------------
1 file changed, 8 insertions(+), 13 deletions(-)
diff --git a/block/block-copy.c b/block/block-copy.c
index 0f76ea1e63..22b0bd7d07 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -126,20 +126,17 @@ void block_copy_set_callbacks(
static int coroutine_fn block_copy_with_bounce_buffer(BlockCopyState *s,
int64_t start,
int64_t end,
- bool *error_is_read,
- void **bounce_buffer)
+ bool *error_is_read)
{
int ret;
int nbytes;
+ void *bounce_buffer = qemu_blockalign(s->source->bs, s->cluster_size);
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
bdrv_reset_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
nbytes = MIN(s->cluster_size, s->len - start);
- if (!*bounce_buffer) {
- *bounce_buffer = qemu_blockalign(s->source->bs, s->cluster_size);
- }
- ret = bdrv_co_pread(s->source, start, nbytes, *bounce_buffer, 0);
+ ret = bdrv_co_pread(s->source, start, nbytes, bounce_buffer, 0);
if (ret < 0) {
trace_block_copy_with_bounce_buffer_read_fail(s, start, ret);
if (error_is_read) {
@@ -148,7 +145,7 @@ static int coroutine_fn
block_copy_with_bounce_buffer(BlockCopyState *s,
goto fail;
}
- ret = bdrv_co_pwrite(s->target, start, nbytes, *bounce_buffer,
+ ret = bdrv_co_pwrite(s->target, start, nbytes, bounce_buffer,
s->write_flags);
if (ret < 0) {
trace_block_copy_with_bounce_buffer_write_fail(s, start, ret);
@@ -158,8 +155,11 @@ static int coroutine_fn
block_copy_with_bounce_buffer(BlockCopyState *s,
goto fail;
}
+ qemu_vfree(bounce_buffer);
+
return nbytes;
fail:
+ qemu_vfree(bounce_buffer);
bdrv_set_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
return ret;
@@ -271,7 +271,6 @@ int coroutine_fn block_copy(BlockCopyState *s,
{
int ret = 0;
int64_t end = bytes + start; /* bytes */
- void *bounce_buffer = NULL;
int64_t status_bytes;
BlockCopyInFlightReq req;
@@ -324,7 +323,7 @@ int coroutine_fn block_copy(BlockCopyState *s,
}
if (!s->use_copy_range) {
ret = block_copy_with_bounce_buffer(s, start, dirty_end,
- error_is_read, &bounce_buffer);
+ error_is_read);
}
if (ret < 0) {
break;
@@ -335,10 +334,6 @@ int coroutine_fn block_copy(BlockCopyState *s,
ret = 0;
}
- if (bounce_buffer) {
- qemu_vfree(bounce_buffer);
- }
-
block_copy_inflight_req_end(&req);
return ret;
--
2.21.0
- [PATCH v2 0/6] block-copy: memory limit, Vladimir Sementsov-Ogievskiy, 2019/10/16
- [PATCH v2 6/6] block/block-copy: increase buffered copy request, Vladimir Sementsov-Ogievskiy, 2019/10/16
- [PATCH v2 4/6] util: introduce SharedResource, Vladimir Sementsov-Ogievskiy, 2019/10/16
- [PATCH v2 1/6] block/block-copy: allocate buffer in block_copy_with_bounce_buffer,
Vladimir Sementsov-Ogievskiy <=
- [PATCH v2 5/6] block/block-copy: add memory limit, Vladimir Sementsov-Ogievskiy, 2019/10/16
- [PATCH v2 2/6] block/block-copy: limit copy_range_size to 16 MiB, Vladimir Sementsov-Ogievskiy, 2019/10/16
- [PATCH v2 3/6] block/block-copy: refactor copying, Vladimir Sementsov-Ogievskiy, 2019/10/16