qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 2/2] block: gluster - add reopen support.


From: Jeff Cody
Subject: [Qemu-devel] [PATCH 2/2] block: gluster - add reopen support.
Date: Tue, 4 Feb 2014 14:26:59 -0500

Gluster does parse open flags in its .bdrv_open() implementation,
and the .bdrv_reopen_* implementations need to do the same.

A new gluster connection to the image file to be created is established
in the .bdrv_reopen_prepare(), and the image file opened with the new
flags.

If this is successful, then the old image file is closed, and the
old connection torn down. The relevant structure pointers in the gluster
state structure are updated to the new connection.

If it is not successful, then the new file handle and connection is
abandoned (if it exists), while the old connection is not modified at
all.

With reopen supported, block-commit (and offline commit) is now also
supported for image files whose base image uses the native gluster
protocol driver.

Signed-off-by: Jeff Cody <address@hidden>
---
 block/gluster.c | 106 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 106 insertions(+)

diff --git a/block/gluster.c b/block/gluster.c
index 79af3fd..f813ac8 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -342,6 +342,100 @@ out:
     return ret;
 }
 
+typedef struct BDRVGlusterReopenState {
+    struct glfs *glfs;
+    struct glfs_fd *fd;
+    int open_flags;
+} BDRVGlusterReopenState;
+
+
+static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
+                                       BlockReopenQueue *queue, Error **errp)
+{
+    int ret = 0;
+    BDRVGlusterState *s;
+    BDRVGlusterReopenState *reop_s;
+    GlusterConf *gconf = NULL;
+
+    assert(state != NULL);
+    assert(state->bs != NULL);
+
+    s = state->bs->opaque;
+
+    state->opaque = g_malloc0(sizeof(BDRVGlusterReopenState));
+    reop_s = state->opaque;
+
+    qemu_gluster_parse_flags(state->flags, &reop_s->open_flags);
+
+    gconf = g_malloc0(sizeof(GlusterConf));
+
+    reop_s->glfs = qemu_gluster_init(gconf, s->filename);
+    if (reop_s->glfs == NULL) {
+        ret = -errno;
+        goto exit;
+    }
+
+    reop_s->fd = glfs_open(reop_s->glfs, gconf->image, reop_s->open_flags);
+    if (reop_s->fd == NULL) {
+        /* reops->glfs will be cleaned up in _abort */
+        ret = -errno;
+        goto exit;
+    }
+
+exit:
+    /* state->opaque will be freed in either the _abort or _commit */
+    qemu_gluster_gconf_free(gconf);
+    return ret;
+}
+
+static void qemu_gluster_reopen_commit(BDRVReopenState *state)
+{
+    BDRVGlusterReopenState *reop_s = state->opaque;
+    BDRVGlusterState *s = state->bs->opaque;
+
+
+    /* close the old */
+    if (s->fd) {
+        glfs_close(s->fd);
+    }
+    if (s->glfs) {
+        glfs_fini(s->glfs);
+    }
+
+    /* use the newly opened image / connection */
+    s->fd         = reop_s->fd;
+    s->glfs       = reop_s->glfs;
+    s->open_flags = reop_s->open_flags;
+
+    g_free(state->opaque);
+    state->opaque = NULL;
+
+    return;
+}
+
+
+static void qemu_gluster_reopen_abort(BDRVReopenState *state)
+{
+    BDRVGlusterReopenState *reop_s = state->opaque;
+
+    if (reop_s == NULL) {
+        return;
+    }
+
+    if (reop_s->fd) {
+        glfs_close(reop_s->fd);
+    }
+
+    if (reop_s->glfs) {
+        glfs_fini(reop_s->glfs);
+    }
+
+    g_free(state->opaque);
+    state->opaque = NULL;
+
+    return;
+}
+
 #ifdef CONFIG_GLUSTERFS_ZEROFILL
 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
         int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
@@ -633,6 +727,9 @@ static BlockDriver bdrv_gluster = {
     .instance_size                = sizeof(BDRVGlusterState),
     .bdrv_needs_filename          = true,
     .bdrv_file_open               = qemu_gluster_open,
+    .bdrv_reopen_prepare          = qemu_gluster_reopen_prepare,
+    .bdrv_reopen_commit           = qemu_gluster_reopen_commit,
+    .bdrv_reopen_abort            = qemu_gluster_reopen_abort,
     .bdrv_close                   = qemu_gluster_close,
     .bdrv_create                  = qemu_gluster_create,
     .bdrv_getlength               = qemu_gluster_getlength,
@@ -657,6 +754,9 @@ static BlockDriver bdrv_gluster_tcp = {
     .instance_size                = sizeof(BDRVGlusterState),
     .bdrv_needs_filename          = true,
     .bdrv_file_open               = qemu_gluster_open,
+    .bdrv_reopen_prepare          = qemu_gluster_reopen_prepare,
+    .bdrv_reopen_commit           = qemu_gluster_reopen_commit,
+    .bdrv_reopen_abort            = qemu_gluster_reopen_abort,
     .bdrv_close                   = qemu_gluster_close,
     .bdrv_create                  = qemu_gluster_create,
     .bdrv_getlength               = qemu_gluster_getlength,
@@ -681,6 +781,9 @@ static BlockDriver bdrv_gluster_unix = {
     .instance_size                = sizeof(BDRVGlusterState),
     .bdrv_needs_filename          = true,
     .bdrv_file_open               = qemu_gluster_open,
+    .bdrv_reopen_prepare          = qemu_gluster_reopen_prepare,
+    .bdrv_reopen_commit           = qemu_gluster_reopen_commit,
+    .bdrv_reopen_abort            = qemu_gluster_reopen_abort,
     .bdrv_close                   = qemu_gluster_close,
     .bdrv_create                  = qemu_gluster_create,
     .bdrv_getlength               = qemu_gluster_getlength,
@@ -705,6 +808,9 @@ static BlockDriver bdrv_gluster_rdma = {
     .instance_size                = sizeof(BDRVGlusterState),
     .bdrv_needs_filename          = true,
     .bdrv_file_open               = qemu_gluster_open,
+    .bdrv_reopen_prepare          = qemu_gluster_reopen_prepare,
+    .bdrv_reopen_commit           = qemu_gluster_reopen_commit,
+    .bdrv_reopen_abort            = qemu_gluster_reopen_abort,
     .bdrv_close                   = qemu_gluster_close,
     .bdrv_create                  = qemu_gluster_create,
     .bdrv_getlength               = qemu_gluster_getlength,
-- 
1.8.3.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]