[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 3/8] block/io: handle alignment and max_transfer for
From: |
Vladimir Sementsov-Ogievskiy |
Subject: |
[Qemu-devel] [PATCH 3/8] block/io: handle alignment and max_transfer for copy_range |
Date: |
Wed, 7 Aug 2019 11:07:45 +0300 |
copy_range ignores these limitations, let's improve it. block/backup
code handles max_transfer for copy_range by itself, now it's not needed
more, drop it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <address@hidden>
---
block/backup.c | 11 ++---------
block/io.c | 41 +++++++++++++++++++++++++++++++++--------
2 files changed, 35 insertions(+), 17 deletions(-)
diff --git a/block/backup.c b/block/backup.c
index 3cdbe973e6..11e27c844d 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -54,7 +54,6 @@ typedef struct BackupBlockJob {
QLIST_HEAD(, CowRequest) inflight_reqs;
bool use_copy_range;
- int64_t copy_range_size;
BdrvRequestFlags write_flags;
bool initializing_bitmap;
@@ -156,12 +155,11 @@ static int coroutine_fn
backup_cow_with_offload(BackupBlockJob *job,
int ret;
int nr_clusters;
BlockBackend *blk = job->common.blk;
- int nbytes;
+ int nbytes = end - start;
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
- assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
+ assert(end - start < INT_MAX);
assert(QEMU_IS_ALIGNED(start, job->cluster_size));
- nbytes = MIN(job->copy_range_size, end - start);
nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
bdrv_reset_dirty_bitmap(job->copy_bitmap, start,
job->cluster_size * nr_clusters);
@@ -756,11 +754,6 @@ BlockJob *backup_job_create(const char *job_id,
BlockDriverState *bs,
job->copy_bitmap = copy_bitmap;
copy_bitmap = NULL;
job->use_copy_range = !compress; /* compression isn't supported for it */
- job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
- blk_get_max_transfer(job->target));
- job->copy_range_size = MAX(job->cluster_size,
- QEMU_ALIGN_UP(job->copy_range_size,
- job->cluster_size));
/* Required permissions are already taken with target's blk_new() */
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
diff --git a/block/io.c b/block/io.c
index 06305c6ea6..5abbd0fff2 100644
--- a/block/io.c
+++ b/block/io.c
@@ -3005,6 +3005,12 @@ static int coroutine_fn bdrv_co_copy_range_internal(
{
BdrvTrackedRequest req;
int ret;
+ uint32_t align = MAX(src->bs->bl.request_alignment,
+ dst->bs->bl.request_alignment);
+ uint32_t max_transfer =
+ QEMU_ALIGN_DOWN(MIN_NON_ZERO(MIN_NON_ZERO(src->bs->bl.max_transfer,
+
dst->bs->bl.max_transfer),
+ INT_MAX), align);
/* TODO We can support BDRV_REQ_NO_FALLBACK here */
assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
@@ -3031,7 +3037,10 @@ static int coroutine_fn bdrv_co_copy_range_internal(
if (!src->bs->drv->bdrv_co_copy_range_from
|| !dst->bs->drv->bdrv_co_copy_range_to
- || src->bs->encrypted || dst->bs->encrypted) {
+ || src->bs->encrypted || dst->bs->encrypted ||
+ !QEMU_IS_ALIGNED(src_offset, src->bs->bl.request_alignment) ||
+ !QEMU_IS_ALIGNED(dst_offset, dst->bs->bl.request_alignment) ||
+ !QEMU_IS_ALIGNED(bytes, align)) {
return -ENOTSUP;
}
@@ -3046,11 +3055,22 @@ static int coroutine_fn bdrv_co_copy_range_internal(
wait_serialising_requests(&req);
}
- ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
- src, src_offset,
- dst, dst_offset,
- bytes,
- read_flags, write_flags);
+ while (bytes) {
+ int num = MIN(bytes, max_transfer);
+
+ ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
+ src, src_offset,
+ dst, dst_offset,
+ num,
+ read_flags,
+ write_flags);
+ if (ret < 0) {
+ break;
+ }
+ bytes -= num;
+ src_offset += num;
+ dst_offset += num;
+ }
tracked_request_end(&req);
bdrv_dec_in_flight(src->bs);
@@ -3060,12 +3080,17 @@ static int coroutine_fn bdrv_co_copy_range_internal(
BDRV_TRACKED_WRITE);
ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
write_flags);
- if (!ret) {
+ while (!ret && bytes) {
+ int num = MIN(bytes, max_transfer);
+
ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
src, src_offset,
dst, dst_offset,
- bytes,
+ num,
read_flags, write_flags);
+ bytes -= num;
+ src_offset += num;
+ dst_offset += num;
}
bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
tracked_request_end(&req);
--
2.18.0
- [Qemu-devel] [PATCH 1/8] block/backup: deal with zero detection, (continued)
- [Qemu-devel] [PATCH 1/8] block/backup: deal with zero detection, Vladimir Sementsov-Ogievskiy, 2019/08/07
- [Qemu-devel] [PATCH 4/8] block/backup: improve unallocated clusters skipping, Vladimir Sementsov-Ogievskiy, 2019/08/07
- Re: [Qemu-devel] [PATCH 4/8] block/backup: improve unallocated clusters skipping, Max Reitz, 2019/08/07
- Re: [Qemu-devel] [PATCH 4/8] block/backup: improve unallocated clusters skipping, Vladimir Sementsov-Ogievskiy, 2019/08/09
- Re: [Qemu-devel] [PATCH 4/8] block/backup: improve unallocated clusters skipping, Max Reitz, 2019/08/09
- Re: [Qemu-devel] [PATCH 4/8] block/backup: improve unallocated clusters skipping, Vladimir Sementsov-Ogievskiy, 2019/08/09
- Re: [Qemu-devel] [PATCH 4/8] block/backup: improve unallocated clusters skipping, Max Reitz, 2019/08/09
[Qemu-devel] [PATCH 3/8] block/io: handle alignment and max_transfer for copy_range,
Vladimir Sementsov-Ogievskiy <=
[Qemu-devel] [PATCH 7/8] block/backup: merge duplicated logic into backup_do_cow, Vladimir Sementsov-Ogievskiy, 2019/08/07
[Qemu-devel] [PATCH 6/8] block/backup: teach backup_cow_with_bounce_buffer to copy more at once, Vladimir Sementsov-Ogievskiy, 2019/08/07
[Qemu-devel] [PATCH 8/8] block/backup: backup_do_cow: use bdrv_dirty_bitmap_next_dirty_area, Vladimir Sementsov-Ogievskiy, 2019/08/07
[Qemu-devel] [PATCH 2/8] block/backup: refactor write_flags, Vladimir Sementsov-Ogievskiy, 2019/08/07