qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] Re: [PATCH v5] vnc: don't mess up with iohandlers in the vn


From: Peter Lieven
Subject: [Qemu-devel] Re: [PATCH v5] vnc: don't mess up with iohandlers in the vnc thread
Date: Tue, 15 Mar 2011 19:07:10 +0100
User-agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.13) Gecko/20101208 Thunderbird/3.1.7

On 15.03.2011 17:55, Peter Lieven wrote:
On 14.03.2011 10:19, Corentin Chary wrote:
On Thu, Mar 10, 2011 at 3:13 PM, Corentin Chary
<address@hidden>  wrote:
The threaded VNC servers messed up with QEMU fd handlers without
any kind of locking, and that can cause some nasty race conditions.

Using qemu_mutex_lock_iothread() won't work because vnc_dpy_cpy(),
which will wait for the current job queue to finish, can be called with
the iothread lock held.

Instead, we now store the data in a temporary buffer, and use a bottom
half to notify the main thread that new data is available.

vnc_[un]lock_ouput() is still needed to access VncState members like
abort, csock or jobs_buffer.

Signed-off-by: Corentin Chary<address@hidden>
---
ui/vnc-jobs-async.c | 48 +++++++++++++++++++++++++++++-------------------
  ui/vnc-jobs.h       |    1 +
  ui/vnc.c            |   12 ++++++++++++
  ui/vnc.h            |    2 ++
  4 files changed, 44 insertions(+), 19 deletions(-)

diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs-async.c
index f596247..4438776 100644
--- a/ui/vnc-jobs-async.c
+++ b/ui/vnc-jobs-async.c
@@ -28,6 +28,7 @@

  #include "vnc.h"
  #include "vnc-jobs.h"
+#include "qemu_socket.h"

  /*
  * Locking:
@@ -155,6 +156,24 @@ void vnc_jobs_join(VncState *vs)
         qemu_cond_wait(&queue->cond,&queue->mutex);
     }
     vnc_unlock_queue(queue);
+    vnc_jobs_consume_buffer(vs);
+}
+
+void vnc_jobs_consume_buffer(VncState *vs)
+{
+    bool flush;
+
+    vnc_lock_output(vs);
+    if (vs->jobs_buffer.offset) {
+        vnc_write(vs, vs->jobs_buffer.buffer, vs->jobs_buffer.offset);
+        buffer_reset(&vs->jobs_buffer);
+    }
+    flush = vs->csock != -1&&  vs->abort != true;
+    vnc_unlock_output(vs);
+
+    if (flush) {
+      vnc_flush(vs);
+    }
  }

  /*
@@ -197,7 +216,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
     VncState vs;
     int n_rectangles;
     int saved_offset;
-    bool flush;

     vnc_lock_queue(queue);
     while (QTAILQ_EMPTY(&queue->jobs)&&  !queue->exit) {
@@ -213,6 +231,7 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)

     vnc_lock_output(job->vs);
     if (job->vs->csock == -1 || job->vs->abort == true) {
+        vnc_unlock_output(job->vs);
         goto disconnected;
     }
     vnc_unlock_output(job->vs);
@@ -233,10 +252,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)

         if (job->vs->csock == -1) {
             vnc_unlock_display(job->vs->vd);
-            /* output mutex must be locked before going to
-             * disconnected:
-             */
-            vnc_lock_output(job->vs);
             goto disconnected;
         }

@@ -254,24 +269,19 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
     vs.output.buffer[saved_offset] = (n_rectangles>>  8)&  0xFF;
     vs.output.buffer[saved_offset + 1] = n_rectangles&  0xFF;

-    /* Switch back buffers */
     vnc_lock_output(job->vs);
-    if (job->vs->csock == -1) {
-        goto disconnected;
+    if (job->vs->csock != -1) {
+        buffer_reserve(&job->vs->jobs_buffer, vs.output.offset);
+        buffer_append(&job->vs->jobs_buffer, vs.output.buffer,
+                      vs.output.offset);
+        /* Copy persistent encoding data */
+        vnc_async_encoding_end(job->vs,&vs);
+
+       qemu_bh_schedule(job->vs->bh);
     }
-
-    vnc_write(job->vs, vs.output.buffer, vs.output.offset);
-
-disconnected:
-    /* Copy persistent encoding data */
-    vnc_async_encoding_end(job->vs,&vs);
-    flush = (job->vs->csock != -1&&  job->vs->abort != true);
     vnc_unlock_output(job->vs);

-    if (flush) {
-        vnc_flush(job->vs);
-    }
-
+disconnected:
     vnc_lock_queue(queue);
     QTAILQ_REMOVE(&queue->jobs, job, next);
     vnc_unlock_queue(queue);
diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h
index b8dab81..4c661f9 100644
--- a/ui/vnc-jobs.h
+++ b/ui/vnc-jobs.h
@@ -40,6 +40,7 @@ void vnc_jobs_join(VncState *vs);

  #ifdef CONFIG_VNC_THREAD

+void vnc_jobs_consume_buffer(VncState *vs);
  void vnc_start_worker_thread(void);
  bool vnc_worker_thread_running(void);
  void vnc_stop_worker_thread(void);
diff --git a/ui/vnc.c b/ui/vnc.c
index 610f884..f28873b 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -1011,7 +1011,10 @@ static void vnc_disconnect_finish(VncState *vs)

  #ifdef CONFIG_VNC_THREAD
     qemu_mutex_destroy(&vs->output_mutex);
+    qemu_bh_delete(vs->bh);
+    buffer_free(&vs->jobs_buffer);
  #endif
+
     for (i = 0; i<  VNC_STAT_ROWS; ++i) {
         qemu_free(vs->lossy_rect[i]);
     }
@@ -1226,6 +1229,14 @@ static long vnc_client_read_plain(VncState *vs)
     return ret;
  }

+#ifdef CONFIG_VNC_THREAD
+static void vnc_jobs_bh(void *opaque)
+{
+    VncState *vs = opaque;
+
+    vnc_jobs_consume_buffer(vs);
+}
+#endif

  /*
  * First function called whenever there is more data to be read from
@@ -2525,6 +2536,7 @@ static void vnc_connect(VncDisplay *vd, int csock)

  #ifdef CONFIG_VNC_THREAD
     qemu_mutex_init(&vs->output_mutex);
+    vs->bh = qemu_bh_new(vnc_jobs_bh, vs);
  #endif

     QTAILQ_INSERT_HEAD(&vd->clients, vs, next);
diff --git a/ui/vnc.h b/ui/vnc.h
index 8a1e7b9..bca5f87 100644
--- a/ui/vnc.h
+++ b/ui/vnc.h
@@ -283,6 +283,8 @@ struct VncState
     VncJob job;
  #else
     QemuMutex output_mutex;
+    QEMUBH *bh;
+    Buffer jobs_buffer;
  #endif

     /* Encoding specific, if you add something here, don't forget to
--
1.7.3.4


Paolo, Anthony, Jan, are you ok with that one ?
Peter Lieve, could you test that patch ?
i have seen one segfault. unfortunatly, i had no debugger attached.

but whats equally worse during stress test, i managed to trigger oom-killer.
it seems we have a memory leak somewhere....
commit c53af37f375ce9c4999ff451c51173bdc1167e67
seems to fix this. remember this is not in 0.14.0 which could cause
a lot of trouble to 0.14.0 users when they do a lot of vnc work...

peter




peter


Thanks






reply via email to

[Prev in Thread] Current Thread [Next in Thread]