qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PULL 13/28] linux-user: Use QemuMutex and QemuCond


From: Paolo Bonzini
Subject: [Qemu-devel] [PULL 13/28] linux-user: Use QemuMutex and QemuCond
Date: Mon, 26 Sep 2016 15:40:43 +0200

From: Sergey Fedorov <address@hidden>

Convert pthread_mutex_t and pthread_cond_t to QemuMutex and QemuCond.
This will allow to make some locks and conditional variables common
between user and system mode emulation.

Signed-off-by: Sergey Fedorov <address@hidden>
Signed-off-by: Sergey Fedorov <address@hidden>
Reviewed-by: Alex Bennée <address@hidden>
Signed-off-by: Alex Bennée <address@hidden>
Message-Id: <address@hidden>
Reviewed-by: Richard Henderson <address@hidden>
Signed-off-by: Paolo Bonzini <address@hidden>
---
 linux-user/main.c | 55 ++++++++++++++++++++++++++++++++-----------------------
 1 file changed, 32 insertions(+), 23 deletions(-)

diff --git a/linux-user/main.c b/linux-user/main.c
index 8daebe0..7a056fc 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -111,17 +111,25 @@ int cpu_get_pic_interrupt(CPUX86State *env)
    We don't require a full sync, only that no cpus are executing guest code.
    The alternative is to map target atomic ops onto host equivalents,
    which requires quite a lot of per host/target work.  */
-static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
-static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
+static QemuMutex cpu_list_lock;
+static QemuMutex exclusive_lock;
+static QemuCond exclusive_cond;
+static QemuCond exclusive_resume;
 static int pending_cpus;
 
+void qemu_init_cpu_loop(void)
+{
+    qemu_mutex_init(&cpu_list_lock);
+    qemu_mutex_init(&exclusive_lock);
+    qemu_cond_init(&exclusive_cond);
+    qemu_cond_init(&exclusive_resume);
+}
+
 /* Make sure everything is in a consistent state for calling fork().  */
 void fork_start(void)
 {
     qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
-    pthread_mutex_lock(&exclusive_lock);
+    qemu_mutex_lock(&exclusive_lock);
     mmap_fork_start();
 }
 
@@ -138,14 +146,14 @@ void fork_end(int child)
             }
         }
         pending_cpus = 0;
-        pthread_mutex_init(&exclusive_lock, NULL);
-        pthread_mutex_init(&cpu_list_mutex, NULL);
-        pthread_cond_init(&exclusive_cond, NULL);
-        pthread_cond_init(&exclusive_resume, NULL);
+        qemu_mutex_init(&exclusive_lock);
+        qemu_mutex_init(&cpu_list_lock);
+        qemu_cond_init(&exclusive_cond);
+        qemu_cond_init(&exclusive_resume);
         qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
         gdbserver_fork(thread_cpu);
     } else {
-        pthread_mutex_unlock(&exclusive_lock);
+        qemu_mutex_unlock(&exclusive_lock);
         qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
     }
 }
@@ -155,7 +163,7 @@ void fork_end(int child)
 static inline void exclusive_idle(void)
 {
     while (pending_cpus) {
-        pthread_cond_wait(&exclusive_resume, &exclusive_lock);
+        qemu_cond_wait(&exclusive_resume, &exclusive_lock);
     }
 }
 
@@ -165,7 +173,7 @@ static inline void start_exclusive(void)
 {
     CPUState *other_cpu;
 
-    pthread_mutex_lock(&exclusive_lock);
+    qemu_mutex_lock(&exclusive_lock);
     exclusive_idle();
 
     pending_cpus = 1;
@@ -176,8 +184,8 @@ static inline void start_exclusive(void)
             cpu_exit(other_cpu);
         }
     }
-    if (pending_cpus > 1) {
-        pthread_cond_wait(&exclusive_cond, &exclusive_lock);
+    while (pending_cpus > 1) {
+        qemu_cond_wait(&exclusive_cond, &exclusive_lock);
     }
 }
 
@@ -185,42 +193,42 @@ static inline void start_exclusive(void)
 static inline void __attribute__((unused)) end_exclusive(void)
 {
     pending_cpus = 0;
-    pthread_cond_broadcast(&exclusive_resume);
-    pthread_mutex_unlock(&exclusive_lock);
+    qemu_cond_broadcast(&exclusive_resume);
+    qemu_mutex_unlock(&exclusive_lock);
 }
 
 /* Wait for exclusive ops to finish, and begin cpu execution.  */
 static inline void cpu_exec_start(CPUState *cpu)
 {
-    pthread_mutex_lock(&exclusive_lock);
+    qemu_mutex_lock(&exclusive_lock);
     exclusive_idle();
     cpu->running = true;
-    pthread_mutex_unlock(&exclusive_lock);
+    qemu_mutex_unlock(&exclusive_lock);
 }
 
 /* Mark cpu as not executing, and release pending exclusive ops.  */
 static inline void cpu_exec_end(CPUState *cpu)
 {
-    pthread_mutex_lock(&exclusive_lock);
+    qemu_mutex_lock(&exclusive_lock);
     cpu->running = false;
     if (pending_cpus > 1) {
         pending_cpus--;
         if (pending_cpus == 1) {
-            pthread_cond_signal(&exclusive_cond);
+            qemu_cond_signal(&exclusive_cond);
         }
     }
     exclusive_idle();
-    pthread_mutex_unlock(&exclusive_lock);
+    qemu_mutex_unlock(&exclusive_lock);
 }
 
 void cpu_list_lock(void)
 {
-    pthread_mutex_lock(&cpu_list_mutex);
+    qemu_mutex_lock(&cpu_list_lock);
 }
 
 void cpu_list_unlock(void)
 {
-    pthread_mutex_unlock(&cpu_list_mutex);
+    qemu_mutex_unlock(&cpu_list_lock);
 }
 
 
@@ -4211,6 +4219,7 @@ int main(int argc, char **argv, char **envp)
     int ret;
     int execfd;
 
+    qemu_init_cpu_loop();
     module_call_init(MODULE_INIT_QOM);
 
     if ((envlist = envlist_create()) == NULL) {
-- 
2.7.4





reply via email to

[Prev in Thread] Current Thread [Next in Thread]