qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [patch 2/7] qemu: separate thread for io


From: mtosatti
Subject: [Qemu-devel] [patch 2/7] qemu: separate thread for io
Date: Thu, 19 Mar 2009 11:57:07 -0300
User-agent: quilt/0.46-1

Index: qemu/exec.c
===================================================================
--- qemu.orig/exec.c
+++ qemu/exec.c
@@ -1513,6 +1513,20 @@ void cpu_interrupt(CPUState *env, int ma
     /* FIXME: This is probably not threadsafe.  A different thread could
        be in the middle of a read-modify-write operation.  */
     env->interrupt_request |= mask;
+
+    switch(mask) {
+    case CPU_INTERRUPT_HARD:
+    case CPU_INTERRUPT_SMI:
+    case CPU_INTERRUPT_NMI:
+    case CPU_INTERRUPT_EXIT:
+    /*
+     * only unlink the TB's if we're called from cpu thread context,
+     * otherwise signal cpu thread to do it.
+     */
+        if (qemu_notify_event(env))
+               return;
+    }
+
 #if defined(USE_NPTL)
     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
        problem and hope the cpu will stop of its own accord.  For userspace
Index: qemu/qemu-common.h
===================================================================
--- qemu.orig/qemu-common.h
+++ qemu/qemu-common.h
@@ -190,6 +190,8 @@ int cpu_load(QEMUFile *f, void *opaque, 
 
 /* Force QEMU to stop what it's doing and service IO */
 void qemu_service_io(void);
+void main_loop_break(void);
+int qemu_notify_event(void *env);
 
 typedef struct QEMUIOVector {
     struct iovec *iov;
Index: qemu/vl.c
===================================================================
--- qemu.orig/vl.c
+++ qemu/vl.c
@@ -155,6 +155,7 @@
 //#define DEBUG_NET
 //#define DEBUG_SLIRP
 
+#define SIG_IPI (SIGRTMIN+4)
 
 #ifdef DEBUG_IOPORT
 #  define LOG_IOPORT(...) qemu_log_mask(CPU_LOG_IOPORT, ## __VA_ARGS__)
@@ -265,6 +266,10 @@ static QEMUTimer *nographic_timer;
 uint8_t qemu_uuid[16];
 
 QemuMutex qemu_global_mutex;
+QemuMutex qemu_fair_mutex;
+
+QemuThread io_thread;
+QemuThread cpus_thread;
 
 /***********************************************************/
 /* x86 ISA bus support */
@@ -1328,7 +1333,6 @@ static void host_alarm_handler(int host_
                                qemu_get_clock(vm_clock))) ||
         qemu_timer_expired(active_timers[QEMU_TIMER_REALTIME],
                            qemu_get_clock(rt_clock))) {
-        CPUState *env = next_cpu;
 
 #ifdef _WIN32
         struct qemu_alarm_win32 *data = ((struct 
qemu_alarm_timer*)dwUser)->priv;
@@ -1339,15 +1343,6 @@ static void host_alarm_handler(int host_
 #endif
         alarm_timer->flags |= ALARM_FLAG_EXPIRED;
 
-        if (env) {
-            /* stop the currently executing cpu because a timer occured */
-            cpu_interrupt(env, CPU_INTERRUPT_EXIT);
-#ifdef USE_KQEMU
-            if (env->kqemu_enabled) {
-                kqemu_cpu_interrupt(env);
-            }
-#endif
-        }
         event_pending = 1;
     }
 }
@@ -2878,6 +2873,7 @@ int qemu_set_fd_handler2(int fd,
         ioh->opaque = opaque;
         ioh->deleted = 0;
     }
+    main_loop_break();
     return 0;
 }
 
@@ -3334,6 +3330,7 @@ void qemu_bh_schedule(QEMUBH *bh)
     if (env) {
         cpu_interrupt(env, CPU_INTERRUPT_EXIT);
     }
+    main_loop_break();
 }
 
 void qemu_bh_cancel(QEMUBH *bh)
@@ -3611,6 +3608,155 @@ static void host_main_loop_wait(int *tim
 }
 #endif
 
+static int wait_signal(int timeout)
+{
+    struct timespec ts;
+    sigset_t waitset;
+
+    ts.tv_sec = timeout / 1000;
+    ts.tv_nsec = (timeout % 1000) * 1000000;
+    sigemptyset(&waitset);
+    sigaddset(&waitset, SIGUSR1);
+
+    return sigtimedwait(&waitset, NULL, &ts);
+}
+
+static int has_work(CPUState *env)
+{
+    int r = 0;
+    if (!env->halted)
+        r = 1;
+    if (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI))
+        r = 1;
+    return r;
+}
+
+static void qemu_wait_io_event(CPUState *env, int timeout)
+{
+    qemu_mutex_unlock(&qemu_global_mutex);
+
+    if (timeout && !has_work(env))
+        wait_signal(timeout);
+    /*
+     * Users of qemu_global_mutex can be starved, having no chance
+     * to acquire it since this path will get to it first.
+     * So use another lock to provide fairness.
+     */
+    qemu_mutex_lock(&qemu_fair_mutex);
+    qemu_mutex_unlock(&qemu_fair_mutex);
+
+    qemu_mutex_lock(&qemu_global_mutex);
+}
+
+static void cpu_signal(int sig)
+{
+    QemuThread self;
+    CPUState *env = cpu_single_env;
+
+    event_pending = 1;
+
+    qemu_thread_self(&self);
+    if (!qemu_thread_equal(&self, &cpus_thread))
+        return;
+
+    if (env) {
+            /* stop the currently executing cpu because an event occurred */
+        cpu_interrupt(env, CPU_INTERRUPT_EXIT);
+#ifdef USE_KQEMU
+        if (env->kqemu_enabled) {
+            kqemu_cpu_interrupt(env);
+        }
+#endif
+    }
+}
+
+static void block_io_signals(void)
+{
+    sigset_t set;
+    struct sigaction sigact;
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR2);
+    sigaddset(&set, SIGIO);
+    sigaddset(&set, SIGALRM);
+    sigaddset(&set, SIG_IPI);
+    pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR1);
+    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+
+    memset(&sigact, 0, sizeof(sigact));
+    sigact.sa_handler = cpu_signal;
+    sigaction(SIGUSR1, &sigact, NULL);
+}
+
+/* used to wake up the io thread */
+static void sig_ipi_handler(int sig)
+{
+}
+
+static void unblock_io_signals(void)
+{
+    sigset_t set;
+    struct sigaction sigact;
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR2);
+    sigaddset(&set, SIGIO);
+    sigaddset(&set, SIGALRM);
+    sigaddset(&set, SIG_IPI);
+    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR1);
+    pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+    memset(&sigact, 0, sizeof(sigact));
+    sigact.sa_handler = sig_ipi_handler;
+    sigaction(SIG_IPI, &sigact, NULL);
+}
+
+int qemu_notify_event(void *cpu_env)
+{
+    QemuThread me;
+    qemu_thread_self(&me);
+
+    if (qemu_thread_equal(&cpus_thread, &me))
+        return 0;
+    qemu_thread_signal(&cpus_thread, SIGUSR1);
+    return 1;
+}
+
+void main_loop_break(void)
+{
+    QemuThread me;
+    qemu_thread_self(&me);
+    if (qemu_thread_equal(&io_thread, &me))
+        return;
+    qemu_thread_signal(&io_thread, SIG_IPI);
+}
+
+static void *io_thread_fn(void *arg)
+{
+    unblock_io_signals();
+    qemu_mutex_lock(&qemu_global_mutex);
+    while (1)
+        main_loop_wait(1000);
+}
+
+static void qemu_signal_lock(unsigned int msecs)
+{
+    qemu_mutex_lock(&qemu_fair_mutex);
+
+    while (qemu_mutex_trylock(&qemu_global_mutex)) {
+        qemu_thread_signal(&cpus_thread, SIGUSR1);
+        if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
+            break;
+    }
+    qemu_mutex_unlock(&qemu_fair_mutex);
+}
+
 void main_loop_wait(int timeout)
 {
     IOHandlerRecord *ioh;
@@ -3660,7 +3806,7 @@ void main_loop_wait(int timeout)
      */
     qemu_mutex_unlock(&qemu_global_mutex);
     ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
-    qemu_mutex_lock(&qemu_global_mutex);
+    qemu_signal_lock(100);
     if (ret > 0) {
         IOHandlerRecord **pioh;
 
@@ -3718,9 +3864,15 @@ static int main_loop(void)
 #endif
     CPUState *env;
 
+    qemu_mutex_init(&qemu_fair_mutex);
     qemu_mutex_init(&qemu_global_mutex);
     qemu_mutex_lock(&qemu_global_mutex);
 
+    qemu_thread_create(&io_thread, io_thread_fn, NULL);
+    block_io_signals();
+
+    qemu_thread_self(&cpus_thread);
+
     cur_cpu = first_cpu;
     next_cpu = cur_cpu->next_cpu ?: first_cpu;
     for(;;) {
@@ -3853,7 +4005,7 @@ static int main_loop(void)
 #ifdef CONFIG_PROFILER
         ti = profile_getclock();
 #endif
-        main_loop_wait(timeout);
+        qemu_wait_io_event(env, timeout);
 #ifdef CONFIG_PROFILER
         dev_time += profile_getclock() - ti;
 #endif

-- 





reply via email to

[Prev in Thread] Current Thread [Next in Thread]