[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC 3/6] migration: Implement dirtylimit convergence algo
From: |
huangy81 |
Subject: |
[RFC 3/6] migration: Implement dirtylimit convergence algo |
Date: |
Tue, 17 May 2022 14:35:03 +0800 |
From: Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
Implement dirtylimit convergence algo for live migration,
which is kind of like auto-converge algo but using dirtylimit
instead of cpu throttle to make migration convergent.
Signed-off-by: Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
---
migration/ram.c | 53 +++++++++++++++++++++++++++++++++++++-------------
migration/trace-events | 1 +
2 files changed, 41 insertions(+), 13 deletions(-)
diff --git a/migration/ram.c b/migration/ram.c
index 3532f64..5dd3e69 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -44,6 +44,7 @@
#include "qapi/error.h"
#include "qapi/qapi-types-migration.h"
#include "qapi/qapi-events-migration.h"
+#include "qapi/qapi-commands-migration.h"
#include "qapi/qmp/qerror.h"
#include "trace.h"
#include "exec/ram_addr.h"
@@ -56,6 +57,8 @@
#include "qemu/iov.h"
#include "multifd.h"
#include "sysemu/runstate.h"
+#include "sysemu/dirtylimit.h"
+#include "sysemu/kvm.h"
#include "hw/boards.h" /* for machine_dump_guest_core() */
@@ -1082,6 +1085,21 @@ static void migration_update_rates(RAMState *rs, int64_t
end_time)
}
}
+/*
+ * Enable dirtylimit to throttle down the guest
+ */
+static void migration_dirtylimit_guest(void)
+{
+ if (!dirtylimit_in_service()) {
+ MigrationState *s = migrate_get_current();
+ int64_t quota_dirtyrate = s->parameters.vcpu_dirtylimit;
+
+ /* Set quota dirtyrate if dirty limit not in service */
+ qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate, NULL);
+ trace_migration_dirtylimit_guest(quota_dirtyrate);
+ }
+}
+
static void migration_trigger_throttle(RAMState *rs)
{
MigrationState *s = migrate_get_current();
@@ -1091,22 +1109,31 @@ static void migration_trigger_throttle(RAMState *rs)
uint64_t bytes_dirty_period = rs->num_dirty_pages_period *
TARGET_PAGE_SIZE;
uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
- /* During block migration the auto-converge logic incorrectly detects
- * that ram migration makes no progress. Avoid this by disabling the
- * throttling logic during the bulk phase of block migration. */
- if (migrate_auto_converge() && !blk_mig_bulk_active()) {
- /* The following detection logic can be refined later. For now:
- Check to see if the ratio between dirtied bytes and the approx.
- amount of bytes that just got transferred since the last time
- we were in this routine reaches the threshold. If that happens
- twice, start or increase throttling. */
-
- if ((bytes_dirty_period > bytes_dirty_threshold) &&
- (++rs->dirty_rate_high_cnt >= 2)) {
+ /*
+ * The following detection logic can be refined later. For now:
+ * Check to see if the ratio between dirtied bytes and the approx.
+ * amount of bytes that just got transferred since the last time
+ * we were in this routine reaches the threshold. If that happens
+ * twice, start or increase throttling.
+ */
+
+ if ((bytes_dirty_period > bytes_dirty_threshold) &&
+ (++rs->dirty_rate_high_cnt >= 2)) {
+ rs->dirty_rate_high_cnt = 0;
+ /*
+ * During block migration the auto-converge logic incorrectly detects
+ * that ram migration makes no progress. Avoid this by disabling the
+ * throttling logic during the bulk phase of block migration.
+ */
+
+ if (migrate_auto_converge() && !blk_mig_bulk_active()) {
trace_migration_throttle();
- rs->dirty_rate_high_cnt = 0;
mig_throttle_guest_down(bytes_dirty_period,
bytes_dirty_threshold);
+ } else if (migrate_dirtylimit() &&
+ kvm_dirty_ring_enabled() &&
+ migration_is_active(s)) {
+ migration_dirtylimit_guest();
}
}
}
diff --git a/migration/trace-events b/migration/trace-events
index 1aec580..2c341fc 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -89,6 +89,7 @@ migration_bitmap_sync_start(void) ""
migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64
migration_bitmap_clear_dirty(char *str, uint64_t start, uint64_t size,
unsigned long page) "rb %s start 0x%"PRIx64" size 0x%"PRIx64" page 0x%lx"
migration_throttle(void) ""
+migration_dirtylimit_guest(int64_t dirtyrate) "guest dirty page rate limit %"
PRIi64 " MB/s"
ram_discard_range(const char *rbname, uint64_t start, size_t len) "%s: start:
%" PRIx64 " %zx"
ram_load_loop(const char *rbname, uint64_t addr, int flags, void *host) "%s:
addr: 0x%" PRIx64 " flags: 0x%x host: %p"
ram_load_postcopy_loop(uint64_t addr, int flags) "@%" PRIx64 " %x"
--
1.8.3.1
- [RFC 0/6] migration: introduce dirtylimit capability, huangy81, 2022/05/17
- [RFC 4/6] migration: Introduce dirtylimit capability, huangy81, 2022/05/17
- [RFC 2/6] qapi/migration: Introduce vcpu-dirtylimit parameters, huangy81, 2022/05/17
- [RFC 1/6] qapi/migration: Introduce vcpu-dirtylimit-period parameters, huangy81, 2022/05/17
- [RFC 5/6] migration: Add dirtylimit data into migration info, huangy81, 2022/05/17
- [RFC 6/6] tests: Add migration dirtylimit capability test, huangy81, 2022/05/17
- [RFC 3/6] migration: Implement dirtylimit convergence algo,
huangy81 <=