[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC][PATCH 09/14 v4] run dump at the background
From: |
Wen Congyang |
Subject: |
[Qemu-devel] [RFC][PATCH 09/14 v4] run dump at the background |
Date: |
Wed, 04 Jan 2012 14:12:06 +0800 |
User-agent: |
Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.9) Gecko/20100413 Fedora/3.0.4-2.fc13 Thunderbird/3.0.4 |
The new monitor command dump may take long time to finish. So we need run it
at the background.
Signed-off-by: Wen Congyang <address@hidden>
---
dump.c | 158 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 139 insertions(+), 19 deletions(-)
diff --git a/dump.c b/dump.c
index ab29a4c..09ed96a 100644
--- a/dump.c
+++ b/dump.c
@@ -75,12 +75,20 @@ typedef struct DumpState {
Monitor *mon;
int fd;
target_phys_addr_t memory_offset;
+ int64_t bandwidth;
+ RAMBlock *block;
+ ram_addr_t start;
+ target_phys_addr_t offset;
+ QEMUTimer *timer;
} DumpState;
+#define DEFAULT_THROTTLE (32 << 20) /* Default dump speed throttling */
+
static DumpState *dump_get_current(void)
{
static DumpState current_dump = {
.state = DUMP_STATE_SETUP,
+ .bandwidth = DEFAULT_THROTTLE,
};
return ¤t_dump;
@@ -91,11 +99,21 @@ static int dump_cleanup(DumpState *s)
int ret = 0;
free_memory_mapping_list(&s->list);
+
if (s->fd != -1) {
close(s->fd);
s->fd = -1;
}
+ if (s->timer) {
+ qemu_del_timer(s->timer);
+ qemu_free_timer(s->timer);
+ }
+
+ if (s->mon) {
+ monitor_resume(s->mon);
+ }
+
return ret;
}
@@ -330,25 +348,40 @@ static int write_data(DumpState *s, void *buf, int length,
}
/* write the memroy to vmcore. 1 page per I/O. */
-static int write_memory(DumpState *s, RAMBlock *block,
- target_phys_addr_t *offset)
+static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
+ target_phys_addr_t *offset, int64_t *size,
+ int64_t deadline)
{
int i, ret;
+ int64_t writen_size = 0;
+ int64_t time;
- for (i = 0; i < block->length / TARGET_PAGE_SIZE; i++) {
- ret = write_data(s, block->host + i * TARGET_PAGE_SIZE,
+ for (i = 0; i < *size / TARGET_PAGE_SIZE; i++) {
+ ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
TARGET_PAGE_SIZE, offset);
if (ret < 0) {
return -1;
}
+ writen_size += TARGET_PAGE_SIZE;
+ time = qemu_get_clock_ms(rt_clock);
+ if (time >= deadline) {
+ /* time out */
+ *size = writen_size;
+ return 1;
+ }
}
- if ((block->length % TARGET_PAGE_SIZE) != 0) {
- ret = write_data(s, block->host + i * TARGET_PAGE_SIZE,
- block->length % TARGET_PAGE_SIZE, offset);
+ if ((*size % TARGET_PAGE_SIZE) != 0) {
+ ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
+ *size % TARGET_PAGE_SIZE, offset);
if (ret < 0) {
return -1;
}
+ time = qemu_get_clock_ms(rt_clock);
+ if (time >= deadline) {
+ /* time out */
+ return 1;
+ }
}
return 0;
@@ -383,6 +416,9 @@ static DumpState *dump_init(Monitor *mon, int fd)
s->error = NULL;
s->mon = mon;
s->fd = fd;
+ s->block = QLIST_FIRST(&ram_list.blocks);
+ s->start = 0;
+ s->timer = NULL;
/*
* get dump info: endian, class and architecture.
@@ -425,6 +461,11 @@ static DumpState *dump_init(Monitor *mon, int fd)
s->phdr_num += s->list.num;
}
+ if (monitor_suspend(mon) != 0) {
+ monitor_printf(mon, "terminal does not allow synchronous "
+ "dump, continuing detached\n");
+ }
+
return s;
}
@@ -482,6 +523,7 @@ static int dump_begin(DumpState *s)
}
s->memory_offset = offset;
+ s->offset = offset;
return 0;
}
@@ -509,38 +551,116 @@ static int dump_completed(DumpState *s)
return 0;
}
-/* write all memory to vmcore */
-static int dump_iterate(DumpState *s)
+/*
+ * write memory to vmcore.
+ *
+ * this function has three return values:
+ * -1 : there was one error
+ * 0 : We haven't finished, caller have to go again
+ * 1 : We have finished, we can go to complete phase
+ */
+static int dump_iterate(DumpState *s, int64_t deadline)
{
- RAMBlock *block;
- target_phys_addr_t offset = s->memory_offset;
+ RAMBlock *block = s->block;
+ target_phys_addr_t offset = s->offset;
+ int64_t size, remain, writen_size;
+ int64_t total = s->bandwidth / 10;
int ret;
- /* write all memory to vmcore */
- QLIST_FOREACH(block, &ram_list.blocks, next) {
- ret = write_memory(s, block, &offset);
+ if ((block->length - s->start) >= total) {
+ size = total;
+ } else {
+ size = block->length - s->start;
+ }
+
+ ret = write_memory(s, block, s->start, &offset, &size, deadline);
+ if (ret < 0) {
+ return -1;
+ }
+
+ if (size == total || ret == 1) {
+ if ((size + s->start) == block->length) {
+ s->block = QLIST_NEXT(block, next);
+ s->start = 0;
+ } else {
+ s->start += size;
+ }
+ goto end;
+ }
+
+ while (size < total) {
+ block = QLIST_NEXT(block, next);
+ if (!block) {
+ /* we have finished */
+ return 1;
+ }
+
+ remain = total - size;
+ if (remain >= block->length) {
+ writen_size = block->length;
+ } else {
+ writen_size = remain;
+ }
+ ret = write_memory(s, block, 0, &offset, &writen_size, deadline);
if (ret < 0) {
return -1;
+ } else if (ret == 1) {
+ break;
}
+ size += writen_size;
+ }
+ if (writen_size == block->length) {
+ s->block = QLIST_NEXT(block, next);
+ s->start = 0;
+ } else {
+ s->block = block;
+ s->start = writen_size;
+ }
+
+end:
+ s->offset = offset;
+ if (!s->block) {
+ /* we have finished */
+ return 1;
}
- return dump_completed(s);
+ return 0;
}
-static int create_vmcore(DumpState *s)
+static void dump_rate_tick(void *opaque)
{
+ DumpState *s = opaque;
+ int64_t begin, end;
int ret;
- ret = dump_begin(s);
+ begin = qemu_get_clock_ms(rt_clock);
+ ret = dump_iterate(s, begin + 100);
if (ret < 0) {
- return -1;
+ return;
+ } else if (ret == 1) {
+ dump_completed(s);
+ return;
+ }
+ end = qemu_get_clock_ms(rt_clock);
+ if (end - begin >= 100) {
+ qemu_mod_timer(s->timer, end + 10);
+ } else {
+ qemu_mod_timer(s->timer, begin + 100);
}
+}
- ret = dump_iterate(s);
+static int create_vmcore(DumpState *s)
+{
+ int ret;
+
+ ret = dump_begin(s);
if (ret < 0) {
return -1;
}
+ s->timer = qemu_new_timer_ms(rt_clock, dump_rate_tick, s);
+ qemu_mod_timer(s->timer, qemu_get_clock_ms(rt_clock) + 100);
+
return 0;
}
--
1.7.1
- [Qemu-devel] [RFC][PATCH 03/14 v4] target-i386: implement cpu_get_memory_mapping(), (continued)
- [Qemu-devel] [RFC][PATCH 03/14 v4] target-i386: implement cpu_get_memory_mapping(), Wen Congyang, 2012/01/04
- [Qemu-devel] [RFC][PATCH 04/14 v4] Add API to get memory mapping, Wen Congyang, 2012/01/04
- [Qemu-devel] [RFC][PATCH 05/14 v4] target-i386: Add API to write elf notes to core file, Wen Congyang, 2012/01/04
- [Qemu-devel] [RFC][PATCH 06/14 v4] target-i386: Add API to add extra memory mapping, Wen Congyang, 2012/01/04
- [Qemu-devel] [RFC][PATCH 07/14 v4] target-i386: add API to get dump info, Wen Congyang, 2012/01/04
- [Qemu-devel] [RFC][PATCH 08/14 v4] introduce a new monitor command 'dump' to dump guest's memory, Wen Congyang, 2012/01/04
[Qemu-devel] [RFC][PATCH 09/14 v4] run dump at the background,
Wen Congyang <=
[Qemu-devel] [RFC][PATCH 10/14 v4] support detached dump, Wen Congyang, 2012/01/04
[Qemu-devel] [RFC][PATCH 11/14 v4] support to cancel the current dumping, Wen Congyang, 2012/01/04
[Qemu-devel] [RFC][PATCH 12/14 v4] support to set dumping speed, Wen Congyang, 2012/01/04
[Qemu-devel] [RFC][PATCH 13/14 v4] support to query dumping status, Wen Congyang, 2012/01/04
[Qemu-devel] [RFC][PATCH 14/14 v4] auto cancel dumping after vm state is changed to run, Wen Congyang, 2012/01/04
Re: [Qemu-devel] [RFC][PATCT 0/14 v4] dump memory when host pci device is used by guest, Wen Congyang, 2012/01/10