qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC PATCH] virtio-9p: Use clone approach to fix TOCTOU


From: Stefan Hajnoczi
Subject: Re: [Qemu-devel] [RFC PATCH] virtio-9p: Use clone approach to fix TOCTOU vulnerability
Date: Wed, 15 Jun 2011 16:24:12 +0100

On Tue, Jun 14, 2011 at 9:12 AM, M. Mohan Kumar <address@hidden> wrote:
> [RFC PATCH] virtio-9p: Use clone approach to fix TOCTOU vulnerability
>
> In passthrough security model, following a symbolic link in the server
> side could result in TOCTTOU vulnerability.
>
> Use clone system call to create a thread which runs in chrooted
> environment. All passthrough model file operations are done from this
> thread to avoid TOCTTOU vulnerability.
>
> Signed-off-by: Venkateswararao Jujjuri <address@hidden>
> Signed-off-by: M. Mohan Kumar <address@hidden>
> ---
>  fsdev/file-op-9p.h         |    1 +
>  hw/9pfs/virtio-9p-coth.c   |  105 +++++++++++++++++++++++++++++++++++++++++--
>  hw/9pfs/virtio-9p-coth.h   |   13 +++++-
>  hw/9pfs/virtio-9p-device.c |    7 +++-
>  hw/9pfs/virtio-9p.h        |    6 ++-
>  5 files changed, 124 insertions(+), 8 deletions(-)

This patch isn't against upstream virtio-9p.  Please post a link to a
repo or more information.

>
> diff --git a/fsdev/file-op-9p.h b/fsdev/file-op-9p.h
> index 5d088d4..c54f6bf 100644
> --- a/fsdev/file-op-9p.h
> +++ b/fsdev/file-op-9p.h
> @@ -60,6 +60,7 @@ typedef struct FsContext
>     SecModel fs_sm;
>     uid_t uid;
>     int open_flags;
> +    int enable_chroot;

Please use bool.  Using int is like using void*, it throws away information.

>     struct xattr_operations **xops;
>     /* fs driver specific data */
>     void *private;
> diff --git a/hw/9pfs/virtio-9p-coth.c b/hw/9pfs/virtio-9p-coth.c
> index e61b656..aa71a83 100644
> --- a/hw/9pfs/virtio-9p-coth.c
> +++ b/hw/9pfs/virtio-9p-coth.c
> @@ -17,6 +17,8 @@
>  #include "qemu-thread.h"
>  #include "qemu-coroutine.h"
>  #include "virtio-9p-coth.h"
> +#include <sys/syscall.h>

Do you need this header?  Please include system headers first, then
QEMU headers.

> +#include "qemu-error.h"
>
>  /* v9fs glib thread pool */
>  static V9fsThPool v9fs_pool;
> @@ -56,7 +58,96 @@ static void v9fs_thread_routine(gpointer data, gpointer 
> user_data)
>     } while (len == -1 && errno == EINTR);
>  }
>
> -int v9fs_init_worker_threads(void)
> +static int v9fs_chroot_function(void *arg)
> +{
> +    GError *err;
> +    V9fsChrootState *csp = arg;
> +    FsContext *fs_ctx = csp->fs_ctx;
> +    V9fsThPool *p = &v9fs_pool;
> +    int notifier_fds[2];
> +

Must acquire cs->mutex before calling any QEMU functions here -
otherwise this thread runs concurrently with QEMU threads and could
lead to race conditions.

> +    if (chroot(fs_ctx->fs_root) < 0) {
> +        error_report("chroot");
> +        goto error;
> +    }
> +
> +    if (qemu_pipe(notifier_fds) == -1) {
> +        return -1;
> +    }
> +    p->pool = g_thread_pool_new(v9fs_thread_routine, p, 10, TRUE, &err);
> +    if (!p->pool) {
> +        error_report("g_thread_pool_new");
> +        goto error;
> +    }
> +
> +    p->completed = g_async_queue_new();
> +    if (!p->completed) {
> +        /*
> +         * We are going to terminate.
> +         * So don't worry about cleanup
> +         */
> +        goto error;
> +    }
> +    p->rfd = notifier_fds[0];
> +    p->wfd = notifier_fds[1];
> +
> +    fcntl(p->rfd, F_SETFL, O_NONBLOCK);
> +    fcntl(p->wfd, F_SETFL, O_NONBLOCK);
> +
> +    qemu_set_fd_handler(p->rfd, v9fs_qemu_process_req_done, NULL, NULL);

These should be in a common function that v9fs_init_worker_threads()
can call instead of copy-paste.

> +
> +    /* Signal parent thread that thread pools are created */
> +    g_cond_signal(csp->cond);

Now cs->mutex can be released and the QEMU thread can continue.

> +    g_mutex_lock(csp->mutex_term);
> +    /* TODO: Wake up cs->terminate during 9p hotplug support */
> +    g_cond_wait(csp->terminate, csp->mutex);
> +    g_mutex_unlock(csp->mutex_term);
> +    g_mutex_free(csp->mutex);
> +    g_cond_free(csp->cond);
> +    g_mutex_free(csp->mutex_term);
> +    g_cond_free(csp->terminate);
> +    qemu_free(csp->stack);

There goes my stack!  It's only safe to free this in the QEMU thread
that signalled, but you need a way to join this thread.

Did you try syscall(SYS_exit, 0) instead?  I think this would make
cleanup much easier and you wouldn't have to juggle around many heap
allocated resources.

> +    qemu_free(csp);
> +    return 0;
> +error:
> +    p->pool = NULL;
> +    g_cond_signal(csp->cond);
> +    return 0;
> +}
> +
> +static int v9fs_clone_chroot_th(FsContext *fs_ctx)
> +{
> +    pid_t pid;
> +    V9fsChrootState *cs;
> +
> +    cs = qemu_malloc(sizeof(*cs));
> +    cs->stack = qemu_malloc(THREAD_STACK);
> +    cs->mutex = g_mutex_new();
> +    cs->cond = g_cond_new();
> +    cs->mutex_term = g_mutex_new();
> +    cs->terminate = g_cond_new();
> +    cs->fs_ctx = fs_ctx;
> +
> +    g_mutex_lock(cs->mutex);
> +    pid = clone(&v9fs_chroot_function, cs->stack + THREAD_STACK, CLONE_FILES 
> |
> +                                CLONE_SIGHAND | CLONE_VM | CLONE_THREAD, cs);

I'm a little concerned that thread-local storage is going to be broken
and glib will act weird, but I don't know the implementation details.

> +    if (pid == -1) {
> +        error_report("clone");
> +        g_mutex_unlock(cs->mutex);
> +        g_mutex_free(cs->mutex);
> +        g_cond_free(cs->cond);
> +        g_mutex_free(cs->mutex_term);
> +        g_cond_free(cs->terminate);
> +        qemu_free(cs->stack);
> +        qemu_free(cs);
> +        return pid;

You can avoid all the freeing by only creating the stack and mutex
before cloning.  If clone(2) fails you only need to free the stack,
mutex, and cs.  Allocate the rest after clone has succeeded but before
unlocking cs->mutex.

Stefan



reply via email to

[Prev in Thread] Current Thread [Next in Thread]