[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 2/3] memory: introduce DirtyRateIncreasedPages and util functi
From: |
huangy81 |
Subject: |
[PATCH v2 2/3] memory: introduce DirtyRateIncreasedPages and util function |
Date: |
Tue, 13 Jul 2021 00:56:51 +0800 |
From: Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
introduce DirtyRateIncreasedPages to stat the increased dirty pages
during the calculation time along with ramblock_sync_dirty_bitmap.
introduce util functions to setup the DIRTY_MEMORY_MIGRATION
dirty bits for the convenience of tracking dirty bitmap when
calculating dirtyrate.
Signed-off-by: Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
---
include/exec/ram_addr.h | 87 ++++++++++++++++++++++++++++++++++++++++++++-----
softmmu/physmem.c | 35 ++++++++++++++++++++
2 files changed, 113 insertions(+), 9 deletions(-)
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 45c9132..c47d1a7 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -26,6 +26,8 @@
#include "exec/ramlist.h"
#include "exec/ramblock.h"
+static uint64_t DirtyRateIncreasedPages = 0;
+
/**
* clear_bmap_size: calculate clear bitmap size
*
@@ -422,6 +424,9 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t
start,
ram_addr_t length,
unsigned client);
+void cpu_physical_memory_dirtyrate_clear_bit(ram_addr_t start,
+ ram_addr_t length);
+
DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
(MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);
@@ -449,6 +454,8 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
uint64_t num_dirty = 0;
unsigned long *dest = rb->bmap;
+ assert(global_dirty_tracking);
+
/* start address and length is aligned at the start of a word? */
if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
(start + rb->offset) &&
@@ -466,12 +473,20 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock
*rb,
for (k = page; k < page + nr; k++) {
if (src[idx][offset]) {
- unsigned long bits = qatomic_xchg(&src[idx][offset], 0);
- unsigned long new_dirty;
- new_dirty = ~dest[k];
- dest[k] |= bits;
- new_dirty &= bits;
- num_dirty += ctpopl(new_dirty);
+ unsigned long bits;
+ if (global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE) {
+ bits = qatomic_read(&src[idx][offset]);
+ DirtyRateIncreasedPages += ctpopl(bits);
+ }
+
+ if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
+ unsigned long new_dirty;
+ bits = qatomic_xchg(&src[idx][offset], 0);
+ new_dirty = ~dest[k];
+ dest[k] |= bits;
+ new_dirty &= bits;
+ num_dirty += ctpopl(new_dirty);
+ }
}
if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
@@ -500,9 +515,15 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock
*rb,
start + addr + offset,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
- long k = (start + addr) >> TARGET_PAGE_BITS;
- if (!test_and_set_bit(k, dest)) {
- num_dirty++;
+ if (global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE) {
+ DirtyRateIncreasedPages++;
+ }
+
+ if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
+ long k = (start + addr) >> TARGET_PAGE_BITS;
+ if (!test_and_set_bit(k, dest)) {
+ num_dirty++;
+ }
}
}
}
@@ -510,5 +531,53 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock
*rb,
return num_dirty;
}
+
+static inline
+void cpu_physical_memory_dirtyrate_clear_dirty_bits(RAMBlock *rb)
+{
+ ram_addr_t addr;
+ ram_addr_t length = rb->used_length;
+ unsigned long word = BIT_WORD(rb->offset >> TARGET_PAGE_BITS);
+
+ /* start address and length is aligned at the start of a word? */
+ if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == rb->offset &&
+ !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
+ int k;
+ int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
+ unsigned long * const *src;
+ unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
+ DIRTY_MEMORY_BLOCK_SIZE);
+
+ src = qatomic_rcu_read(
+ &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
+
+ for (k = 0; k < nr; k++) {
+ if (src[idx][offset]) {
+ qatomic_set(&src[idx][offset], 0);
+ }
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
+ }
+ } else {
+ ram_addr_t offset = rb->offset;
+
+ for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
+ cpu_physical_memory_dirtyrate_clear_bit(addr + offset,
+ TARGET_PAGE_SIZE);
+ }
+ }
+
+ return;
+}
+
+static inline
+void cpu_physical_memory_dirtyrate_reset_protect(RAMBlock *rb)
+{
+ memory_region_clear_dirty_bitmap(rb->mr, 0, rb->used_length);
+ cpu_physical_memory_dirtyrate_clear_dirty_bits(rb);
+}
#endif
#endif
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 3c1912a..67cff31 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -1068,6 +1068,41 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t
start,
return dirty;
}
+void cpu_physical_memory_dirtyrate_clear_bit(ram_addr_t start,
+ ram_addr_t length)
+{
+ DirtyMemoryBlocks *blocks;
+ unsigned long end, page;
+ RAMBlock *ramblock;
+
+ if (length == 0) {
+ return;
+ }
+
+ end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
+ page = start >> TARGET_PAGE_BITS;
+
+ WITH_RCU_READ_LOCK_GUARD() {
+ blocks =
+ qatomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+ ramblock = qemu_get_ram_block(start);
+ /* Range sanity check on the ramblock */
+ assert(start >= ramblock->offset &&
+ start + length <= ramblock->offset + ramblock->used_length);
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page,
+ DIRTY_MEMORY_BLOCK_SIZE - offset);
+
+ clear_bit(num, blocks->blocks[idx]);
+ page += num;
+ }
+ }
+
+ return;
+}
+
DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
(MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
{
--
1.8.3.1