[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 7/8] block/backup: merge duplicated logic into backu
From: |
Vladimir Sementsov-Ogievskiy |
Subject: |
[Qemu-devel] [PATCH 7/8] block/backup: merge duplicated logic into backup_do_cow |
Date: |
Wed, 7 Aug 2019 11:07:49 +0300 |
backup_cow_with_offload and backup_cow_with_bounce_buffer contains a
lot of duplicated logic. Move it into backup_do_cow.
Signed-off-by: Vladimir Sementsov-Ogievskiy <address@hidden>
---
block/backup.c | 83 +++++++++++++++++++-------------------------------
1 file changed, 31 insertions(+), 52 deletions(-)
diff --git a/block/backup.c b/block/backup.c
index c765c073ad..f19c9195fe 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -100,84 +100,60 @@ static void cow_request_end(CowRequest *req)
/* Copy range to target with a bounce buffer and return the bytes copied. If
* error occurred, return a negative error number */
-static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
- int64_t start,
- int64_t end,
- bool is_write_notifier,
- bool *error_is_read)
+static int coroutine_fn backup_cow_with_bounce_buffer(
+ BackupBlockJob *job, int64_t offset, int64_t bytes,
+ BdrvRequestFlags read_flags, bool *error_is_read)
{
- int ret;
+ int ret = 0;
BlockBackend *blk = job->common.blk;
- int nbytes;
- int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
- void *bounce_buffer = blk_try_blockalign(blk, end);
+ void *bounce_buffer = blk_try_blockalign(blk, bytes);
if (!bounce_buffer) {
return -ENOMEM;
}
- assert(QEMU_IS_ALIGNED(start, job->cluster_size));
- bdrv_reset_dirty_bitmap(job->copy_bitmap, start, end - start);
-
- nbytes = MIN(end - start, job->len - start);
-
- ret = blk_co_pread(blk, start, nbytes, bounce_buffer, read_flags);
+ ret = blk_co_pread(blk, offset, bytes, bounce_buffer, read_flags);
if (ret < 0) {
- trace_backup_do_cow_read_fail(job, start, ret);
+ trace_backup_do_cow_read_fail(job, offset, ret);
if (error_is_read) {
*error_is_read = true;
}
- goto fail;
+ goto out;
}
- ret = blk_co_pwrite(job->target, start, nbytes, bounce_buffer,
+ ret = blk_co_pwrite(job->target, offset, bytes, bounce_buffer,
job->write_flags);
if (ret < 0) {
- trace_backup_do_cow_write_fail(job, start, ret);
+ trace_backup_do_cow_write_fail(job, offset, ret);
if (error_is_read) {
*error_is_read = false;
}
- goto fail;
+ goto out;
}
+out:
qemu_vfree(bounce_buffer);
- return nbytes;
-fail:
- bdrv_set_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
- qemu_vfree(bounce_buffer);
return ret;
-
}
/* Copy range to target and return the bytes copied. If error occurred, return
a
* negative error number. */
static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
- int64_t start,
- int64_t end,
- bool is_write_notifier)
+ int64_t offset,
+ int64_t bytes,
+ BdrvRequestFlags read_flags)
{
int ret;
- int nr_clusters;
BlockBackend *blk = job->common.blk;
- int nbytes = MIN(end - start, job->len - start);
- int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
-
- assert(end - start < INT_MAX);
- assert(QEMU_IS_ALIGNED(start, job->cluster_size));
- nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
- bdrv_reset_dirty_bitmap(job->copy_bitmap, start,
- job->cluster_size * nr_clusters);
- ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
+
+ ret = blk_co_copy_range(blk, offset, job->target, offset, bytes,
read_flags, job->write_flags);
if (ret < 0) {
- trace_backup_do_cow_copy_range_fail(job, start, ret);
- bdrv_set_dirty_bitmap(job->copy_bitmap, start,
- job->cluster_size * nr_clusters);
- return ret;
+ trace_backup_do_cow_copy_range_fail(job, offset, ret);
}
- return nbytes;
+ return ret;
}
/*
@@ -260,6 +236,8 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
CowRequest cow_request;
int ret = 0;
int64_t start, end; /* bytes */
+ BdrvRequestFlags read_flags =
+ is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
qemu_co_rwlock_rdlock(&job->flush_rwlock);
@@ -285,6 +263,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
while (start < end) {
int64_t dirty_end;
+ int64_t cur_bytes;
if (!bdrv_dirty_bitmap_get(job->copy_bitmap, start)) {
trace_backup_do_cow_skip(job, start);
@@ -299,30 +278,30 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
}
trace_backup_do_cow_process(job, start);
+ cur_bytes = MIN(dirty_end - start, job->len - start);
+ bdrv_reset_dirty_bitmap(job->copy_bitmap, start, dirty_end - start);
if (job->use_copy_range) {
- ret = backup_cow_with_offload(job, start, dirty_end,
- is_write_notifier);
+ ret = backup_cow_with_offload(job, start, cur_bytes, read_flags);
if (ret < 0) {
job->use_copy_range = false;
}
}
if (!job->use_copy_range) {
- ret = backup_cow_with_bounce_buffer(job, start, dirty_end,
- is_write_notifier,
- error_is_read);
+ ret = backup_cow_with_bounce_buffer(job, start, cur_bytes,
+ read_flags, error_is_read);
}
if (ret < 0) {
+ bdrv_set_dirty_bitmap(job->copy_bitmap, start, dirty_end - start);
break;
}
/* Publish progress, guest I/O counts as progress too. Note that the
* offset field is an opaque progress value, it is not a disk offset.
*/
- start += ret;
- job->bytes_read += ret;
- job_progress_update(&job->common.job, ret);
- ret = 0;
+ start += cur_bytes;
+ job->bytes_read += cur_bytes;
+ job_progress_update(&job->common.job, cur_bytes);
}
cow_request_end(&cow_request);
--
2.18.0
[Qemu-devel] [PATCH 3/8] block/io: handle alignment and max_transfer for copy_range, Vladimir Sementsov-Ogievskiy, 2019/08/07
[Qemu-devel] [PATCH 7/8] block/backup: merge duplicated logic into backup_do_cow,
Vladimir Sementsov-Ogievskiy <=
[Qemu-devel] [PATCH 6/8] block/backup: teach backup_cow_with_bounce_buffer to copy more at once, Vladimir Sementsov-Ogievskiy, 2019/08/07
[Qemu-devel] [PATCH 8/8] block/backup: backup_do_cow: use bdrv_dirty_bitmap_next_dirty_area, Vladimir Sementsov-Ogievskiy, 2019/08/07
[Qemu-devel] [PATCH 2/8] block/backup: refactor write_flags, Vladimir Sementsov-Ogievskiy, 2019/08/07