qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v7 6/6] virtio-console: Throttle virtio-serial-bus i


From: Amit Shah
Subject: [Qemu-devel] [PATCH v7 6/6] virtio-console: Throttle virtio-serial-bus if we can't consume any more guest data
Date: Wed, 5 May 2010 03:09:30 +0530

If the char device we're connected to is overwhelmed with data and it
can't accept any more, signal to the virtio-serial-bus to stop sending
us more data till we tell otherwise.

If the current buffer being processed hasn't been completely written out
to the char device, we have to keep it around and re-try sending it
since the virtio-serial-bus code assumes we consume the entire buffer.

Allow the chardev backends to return -EAGAIN; we're ready with a
callback handler that will flush the remainder of the buffer.

Also register with savevm so that we save/restore such a buffer across
migration.

Signed-off-by: Amit Shah <address@hidden>
---
 hw/virtio-console.c |  120 +++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 117 insertions(+), 3 deletions(-)

diff --git a/hw/virtio-console.c b/hw/virtio-console.c
index 862a431..b0b4351 100644
--- a/hw/virtio-console.c
+++ b/hw/virtio-console.c
@@ -13,18 +13,90 @@
 #include "qemu-char.h"
 #include "virtio-serial.h"
 
+typedef struct Buffer {
+    uint8_t *buf;
+    size_t rem_len;
+    size_t offset;
+} Buffer;
+
 typedef struct VirtConsole {
     VirtIOSerialPort port;
     CharDriverState *chr;
+    Buffer unflushed_buf;
 } VirtConsole;
 
+static void add_unflushed_buf(VirtConsole *vcon, const uint8_t *buf, size_t 
len)
+{
+    assert(!vcon->unflushed_buf.buf);
+
+    vcon->unflushed_buf.buf = qemu_malloc(len);
+
+    memcpy(vcon->unflushed_buf.buf, buf, len);
+    vcon->unflushed_buf.rem_len = len;
+    vcon->unflushed_buf.offset = 0;
+}
+
+static void free_unflushed_buf(VirtConsole *vcon)
+{
+    if (vcon->unflushed_buf.buf) {
+        qemu_free(vcon->unflushed_buf.buf);
+        vcon->unflushed_buf.buf = NULL;
+    }
+}
+
+static ssize_t buffered_write_to_chardev(VirtConsole *vcon, const uint8_t *buf,
+                                         size_t len)
+{
+    size_t written;
+    ssize_t ret;
+
+    written = 0;
+    do {
+        ret = qemu_chr_write_nb(vcon->chr, buf + written, len - written);
+        if (ret < 0) {
+            if (vcon->unflushed_buf.buf) {
+                vcon->unflushed_buf.offset += written;
+                vcon->unflushed_buf.rem_len -= written;
+            } else {
+                virtio_serial_throttle_port(&vcon->port, true);
+                add_unflushed_buf(vcon, buf + written, len - written);
+            }
+            return ret;
+        }
+        written += ret;
+    } while (written != len);
+
+    return 0;
+}
+
+/* Callback function called when the chardev can accept more data */
+static void chr_write_unblocked(void *opaque)
+{
+    VirtConsole *vcon = opaque;
+
+    if (vcon->unflushed_buf.buf) {
+        ssize_t ret;
+
+        ret = buffered_write_to_chardev(vcon, vcon->unflushed_buf.buf
+                                              + vcon->unflushed_buf.offset,
+                                        vcon->unflushed_buf.rem_len);
+        if (ret < 0) {
+            return;
+        }
+        free_unflushed_buf(vcon);
+    }
+    virtio_serial_throttle_port(&vcon->port, false);
+}
 
 /* Callback function that's called when the guest sends us data */
 static void flush_buf(VirtIOSerialPort *port, const uint8_t *buf, size_t len)
 {
     VirtConsole *vcon = DO_UPCAST(VirtConsole, port, port);
 
-    qemu_chr_write(vcon->chr, buf, len);
+    /* If a previous write was incomplete, we should've been throttled. */
+    assert(!vcon->unflushed_buf.buf);
+
+    buffered_write_to_chardev(vcon, buf, len);
 }
 
 /* Readiness of the guest to accept data on a port */
@@ -48,19 +120,58 @@ static void chr_event(void *opaque, int event)
     VirtConsole *vcon = opaque;
 
     switch (event) {
-    case CHR_EVENT_OPENED: {
+    case CHR_EVENT_OPENED:
         virtio_serial_open(&vcon->port);
         break;
-    }
+
     case CHR_EVENT_CLOSED:
+        free_unflushed_buf(vcon);
         virtio_serial_close(&vcon->port);
         break;
     }
 }
 
+static void virtio_console_port_save(QEMUFile *f, void *opaque)
+{
+    VirtConsole *vcon = opaque;
+    uint32_t have_buffer;
+
+    have_buffer = vcon->unflushed_buf.buf ? true : false;
+
+    qemu_put_be32s(f, &have_buffer);
+    if (have_buffer) {
+        qemu_put_be64s(f, &vcon->unflushed_buf.rem_len);
+        qemu_put_buffer(f, vcon->unflushed_buf.buf
+                           + vcon->unflushed_buf.offset,
+                        vcon->unflushed_buf.rem_len);
+    }
+}
+
+static int virtio_console_port_load(QEMUFile *f, void *opaque, int version_id)
+{
+    VirtConsole *vcon = opaque;
+    uint32_t have_buffer;
+
+    if (version_id > 1) {
+        return -EINVAL;
+    }
+
+    qemu_get_be32s(f, &have_buffer);
+    if (have_buffer) {
+        qemu_get_be64s(f, &vcon->unflushed_buf.rem_len);
+        vcon->unflushed_buf.buf = qemu_malloc(vcon->unflushed_buf.rem_len);
+        vcon->unflushed_buf.offset = 0;
+
+        qemu_get_buffer(f, vcon->unflushed_buf.buf,
+                        vcon->unflushed_buf.rem_len);
+    }
+    return 0;
+}
+
 static const QemuChrHandlers chr_handlers = {
     .fd_can_read = chr_can_read,
     .fd_read = chr_read,
+    .fd_write_unblocked = chr_write_unblocked,
     .fd_event = chr_event,
 };
 
@@ -72,6 +183,8 @@ static int generic_port_init(VirtConsole *vcon, 
VirtIOSerialDevice *dev)
         qemu_chr_add_handlers(vcon->chr, &chr_handlers, vcon);
         vcon->port.info->have_data = flush_buf;
     }
+    register_savevm("virtio-console-ports", -1, 1, virtio_console_port_save,
+                   virtio_console_port_load, vcon);
     return 0;
 }
 
@@ -93,6 +206,7 @@ static int virtconsole_exitfn(VirtIOSerialDevice *dev)
     if (vcon->chr) {
         port->info->have_data = NULL;
         qemu_chr_close(vcon->chr);
+        free_unflushed_buf(vcon);
     }
 
     return 0;
-- 
1.6.2.5





reply via email to

[Prev in Thread] Current Thread [Next in Thread]