+{
+ switch (s->method) {
+ case COPY_READ_WRITE_CLUSTER:
+ return s->cluster_size;
+ case COPY_READ_WRITE:
+ case COPY_RANGE_SMALL:
+ return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER),
+ s->max_transfer);
+ case COPY_RANGE_FULL:
+ return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
+ s->max_transfer);
+ default:
+ abort();
+ }
+}
+
/*
* Search for the first dirty area in offset/bytes range and create
task at
* the beginning of it.
@@ -157,8 +181,9 @@ static BlockCopyTask
*block_copy_task_create(BlockCopyState *s,
int64_t offset, int64_t
bytes)
{
BlockCopyTask *task;
- int64_t max_chunk = MIN_NON_ZERO(s->copy_size,
call_state->max_chunk);
+ int64_t max_chunk = block_copy_chunk_size(s);
+ max_chunk = MIN_NON_ZERO(max_chunk, call_state->max_chunk);
if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
offset, offset + bytes,
max_chunk, &offset, &bytes))
@@ -265,28 +290,27 @@ BlockCopyState *block_copy_state_new(BdrvChild
*source, BdrvChild *target,
.len = bdrv_dirty_bitmap_size(copy_bitmap),
.write_flags = write_flags,
.mem = shres_create(BLOCK_COPY_MAX_MEM),
+ .max_transfer =
QEMU_ALIGN_DOWN(block_copy_max_transfer(source, target)
+ , cluster_size),
};
- if (block_copy_max_transfer(source, target) < cluster_size) {
+ if (s->max_transfer < cluster_size) {
/*
* copy_range does not respect max_transfer. We don't want
to bother
* with requests smaller than block-copy cluster size, so
fallback to
* buffered copying (read and write respect max_transfer on
their
* behalf).
*/
- s->use_copy_range = false;
- s->copy_size = cluster_size;
+ s->method = COPY_READ_WRITE_CLUSTER;
} else if (write_flags & BDRV_REQ_WRITE_COMPRESSED) {
/* Compression supports only cluster-size writes and no
copy-range. */
- s->use_copy_range = false;
- s->copy_size = cluster_size;
+ s->method = COPY_READ_WRITE_CLUSTER;
} else {
/*
* We enable copy-range, but keep small copy_size, until first
* successful copy_range (look at block_copy_do_copy).
*/
- s->use_copy_range = use_copy_range;
- s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
+ s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE;
}
ratelimit_init(&s->rate_limit);
@@ -369,30 +393,25 @@ static int coroutine_fn
block_copy_do_copy(BlockCopyState *s,
return ret;
}
- if (s->use_copy_range) {
+ if (s->method >= COPY_RANGE_SMALL) {
ret = bdrv_co_copy_range(s->source, offset, s->target,
offset, nbytes,
0, s->write_flags);
if (ret < 0) {
trace_block_copy_copy_range_fail(s, offset, ret);
- s->use_copy_range = false;
- s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
+ s->method = COPY_READ_WRITE;
/* Fallback to read+write with allocated buffer */
} else {
- if (s->use_copy_range) {
+ if (s->method == COPY_RANGE_SMALL) {
/*
* Successful copy-range. Now increase copy_size.
copy_range
* does not respect max_transfer (it's a TODO), so
we factor
* that in here.
*
- * Note: we double-check s->use_copy_range for the
case when
+ * Note: we double-check s->method for the case when
* parallel block-copy request unsets it during
previous
* bdrv_co_copy_range call.
*/
- s->copy_size =
- MIN(MAX(s->cluster_size,
BLOCK_COPY_MAX_COPY_RANGE),
-
QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
-
s->target),
- s->cluster_size));
+ s->method = COPY_RANGE_FULL;
}
goto out;
}