[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v2 04/11] qemu_flush_work for remote vcpu execution
From: |
Glauber Costa |
Subject: |
[Qemu-devel] [PATCH v2 04/11] qemu_flush_work for remote vcpu execution |
Date: |
Tue, 1 Dec 2009 10:51:30 -0200 |
This function is similar to qemu-kvm's on_vcpu mechanism. Totally synchronous,
and guarantees that a given function will be executed at the specified vcpu.
This patch also convert usage within the breakpoints system
Signed-off-by: Glauber Costa <address@hidden>
---
cpu-all.h | 3 ++
cpu-defs.h | 14 +++++++++++++
kvm-all.c | 17 +--------------
vl.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++++++----
4 files changed, 75 insertions(+), 20 deletions(-)
diff --git a/cpu-all.h b/cpu-all.h
index e214374..8270d43 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -763,6 +763,9 @@ extern CPUState *cpu_single_env;
extern int64_t qemu_icount;
extern int use_icount;
+void qemu_queue_work(CPUState *env, void (*func)(void *data), void *data);
+void qemu_flush_work(CPUState *env);
+
#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20
case) */
#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
diff --git a/cpu-defs.h b/cpu-defs.h
index 95068b5..18792fc 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -31,6 +31,8 @@
#include "qemu-queue.h"
#include "targphys.h"
+#include "qemu-thread.h"
+
#ifndef TARGET_LONG_BITS
#error TARGET_LONG_BITS must be defined before including this header
#endif
@@ -134,6 +136,13 @@ typedef struct CPUWatchpoint {
QTAILQ_ENTRY(CPUWatchpoint) entry;
} CPUWatchpoint;
+typedef struct QemuWorkItem {
+ void (*func)(void *data);
+ void *data;
+ int done;
+} QemuWorkItem;
+
+
#define CPU_TEMP_BUF_NLONGS 128
#define CPU_COMMON \
struct TranslationBlock *current_tb; /* currently executing TB */ \
@@ -175,6 +184,10 @@ typedef struct CPUWatchpoint {
QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
CPUWatchpoint *watchpoint_hit; \
\
+ QemuWorkItem queued_work; \
+ uint64_t queued_local, queued_total; \
+ struct QemuMutex queue_lock; \
+ \
struct GDBRegisterState *gdb_regs; \
\
/* Core interrupt code */ \
@@ -194,6 +207,7 @@ typedef struct CPUWatchpoint {
uint32_t created; \
struct QemuThread *thread; \
struct QemuCond *halt_cond; \
+ struct QemuCond work_cond; \
const char *cpu_model_str; \
struct KVMState *kvm_state; \
struct kvm_run *kvm_run; \
diff --git a/kvm-all.c b/kvm-all.c
index 0c0b3c3..40203f0 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -636,7 +636,7 @@ int kvm_cpu_exec(CPUState *env)
struct kvm_run *run = env->kvm_run;
int ret;
- dprintf("kvm_cpu_exec()\n");
+ dprintf("kvm_cpu_exec() %d\n", env->cpu_index);
do {
if (env->exit_request) {
@@ -951,19 +951,6 @@ void kvm_setup_guest_memory(void *start, size_t size)
}
#ifdef KVM_CAP_SET_GUEST_DEBUG
-static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
-{
-#ifdef CONFIG_IOTHREAD
- if (env == cpu_single_env) {
- func(data);
- return;
- }
- abort();
-#else
- func(data);
-#endif
-}
-
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
target_ulong pc)
{
@@ -1011,7 +998,7 @@ int kvm_update_guest_debug(CPUState *env, unsigned long
reinject_trap)
data.dbg.control |= reinject_trap;
data.env = env;
- on_vcpu(env, kvm_invoke_set_guest_debug, &data);
+ qemu_queue_work(env, kvm_invoke_set_guest_debug, &data);
return data.err;
}
diff --git a/vl.c b/vl.c
index 321b18d..c7b46a9 100644
--- a/vl.c
+++ b/vl.c
@@ -3403,6 +3403,11 @@ void qemu_notify_event(void)
}
}
+void qemu_queue_work(CPUState *env, void (*func)(void *data), void *data)
+{
+ func(data);
+}
+
void qemu_mutex_lock_iothread(void) {}
void qemu_mutex_unlock_iothread(void) {}
@@ -3436,8 +3441,7 @@ static int tcg_has_work(void);
static pthread_key_t current_env;
-CPUState *qemu_get_current_env(void);
-CPUState *qemu_get_current_env(void)
+static CPUState *qemu_get_current_env(void)
{
return pthread_getspecific(current_env);
}
@@ -3474,8 +3478,10 @@ static int qemu_init_main_loop(void)
static void qemu_wait_io_event(CPUState *env)
{
- while (!tcg_has_work())
+ while (!tcg_has_work()) {
+ qemu_flush_work(env);
qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
+ }
qemu_mutex_unlock(&qemu_global_mutex);
@@ -3488,6 +3494,7 @@ static void qemu_wait_io_event(CPUState *env)
qemu_mutex_unlock(&qemu_fair_mutex);
qemu_mutex_lock(&qemu_global_mutex);
+
if (env->stop) {
env->stop = 0;
env->stopped = 1;
@@ -3514,8 +3521,11 @@ static void *kvm_cpu_thread_fn(void *arg)
qemu_cond_signal(&qemu_cpu_cond);
/* and wait for machine initialization */
- while (!qemu_system_ready)
+ while (!qemu_system_ready) {
+ /* system reset and initialization is a heavy caller of queue_work() */
+ qemu_flush_work(env);
qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
+ }
while (1) {
if (cpu_can_run(env))
@@ -3542,8 +3552,9 @@ static void *tcg_cpu_thread_fn(void *arg)
qemu_cond_signal(&qemu_cpu_cond);
/* and wait for machine initialization */
- while (!qemu_system_ready)
+ while (!qemu_system_ready) {
qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
+ }
while (1) {
tcg_cpu_exec();
@@ -3561,6 +3572,44 @@ void qemu_cpu_kick(void *_env)
qemu_thread_signal(env->thread, SIGUSR1);
}
+void qemu_queue_work(CPUState *env, void (*func)(void *data), void *data)
+{
+ QemuWorkItem *wii;
+
+ env->queued_total++;
+
+ if (env == qemu_get_current_env()) {
+ env->queued_local++;
+ func(data);
+ return;
+ }
+
+ wii = &env->queued_work;
+ wii->func = func;
+ wii->data = data;
+ wii->done = 0;
+
+ qemu_thread_signal(env->thread, SIGUSR1);
+
+ while (!wii->done) {
+ qemu_cond_wait(&env->work_cond, &qemu_global_mutex);
+ }
+
+ wii->func = NULL;
+}
+
+void qemu_flush_work(CPUState *env)
+{
+ QemuWorkItem *wi;
+
+ wi = &env->queued_work;
+ if (wi->func) {
+ wi->func(wi->data);
+ wi->done = 1;
+ }
+ qemu_cond_broadcast(&env->work_cond);
+}
+
int qemu_cpu_self(void *_env)
{
CPUState *env = _env;
@@ -3719,6 +3768,8 @@ void qemu_init_vcpu(void *_env)
{
CPUState *env = _env;
+ qemu_cond_init(&env->work_cond);
+
if (kvm_enabled())
kvm_start_vcpu(env);
else
--
1.6.5.2
- [Qemu-devel] [PATCH v2 0/11], Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 01/11] Don't mess with halted state., Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 02/11] store thread-specific env information, Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 03/11] update halted state on mp_state sync, Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 04/11] qemu_flush_work for remote vcpu execution,
Glauber Costa <=
- [Qemu-devel] [PATCH v2 05/11] tell kernel about all registers instead of just mp_state, Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 06/11] flush state in migration post_load, Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 07/11] Don't call kvm cpu reset on initialization, Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 08/11] use cpu_kick instead of direct signalling., Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 09/11] Use per-cpu reset handlers., Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 10/11] Use __thread where available., Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 11/11] remove smp restriction from kvm, Glauber Costa, 2009/12/01
- Re: [Qemu-devel] [PATCH v2 05/11] tell kernel about all registers instead of just mp_state, Gleb Natapov, 2009/12/02
- Re: [Qemu-devel] [PATCH v2 05/11] tell kernel about all registers instead of just mp_state, Glauber Costa, 2009/12/02
- [Qemu-devel] Re: [PATCH v2 05/11] tell kernel about all registers instead of just mp_state, Jan Kiszka, 2009/12/02