qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 3/4] 9pfs: use v9fs_init_qiov_from_pdu instead o


From: Greg Kurz
Subject: Re: [Qemu-devel] [PATCH 3/4] 9pfs: use v9fs_init_qiov_from_pdu instead of v9fs_pack
Date: Thu, 24 Nov 2016 15:48:11 +0100

On Mon, 21 Nov 2016 13:39:31 -0800
Stefano Stabellini <address@hidden> wrote:

> v9fs_xattr_read should not access VirtQueueElement elems directly.
> Move v9fs_init_qiov_from_pdu up in the file and call
> v9fs_init_qiov_from_pdu instead of v9fs_pack.
> 

instead of ? I see v9fs_init_qiov_from_pdu() gets called before calling 
v9fs_pack().
Also, I don't see the corresponding call to qemu_iovec_destroy() to free the
allocated iovec.

> Signed-off-by: Stefano Stabellini <address@hidden>
> ---
>  hw/9pfs/9p.c | 58 +++++++++++++++++++++++++++++-----------------------------
>  1 file changed, 29 insertions(+), 29 deletions(-)
> 
> diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
> index 5a20a13..b6ec042 100644
> --- a/hw/9pfs/9p.c
> +++ b/hw/9pfs/9p.c
> @@ -1633,14 +1633,39 @@ out_nofid:
>      pdu_complete(pdu, err);
>  }
>  
> +/*
> + * Create a QEMUIOVector for a sub-region of PDU iovecs
> + *
> + * @qiov:       uninitialized QEMUIOVector
> + * @skip:       number of bytes to skip from beginning of PDU
> + * @size:       number of bytes to include
> + * @is_write:   true - write, false - read
> + *
> + * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned 
> up
> + * with qemu_iovec_destroy().
> + */
> +static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
> +                                    size_t skip, size_t size,
> +                                    bool is_write)
> +{
> +    QEMUIOVector elem;
> +    struct iovec *iov;
> +    unsigned int niov;
> +
> +    pdu->s->transport->init_iov_from_pdu(pdu, &iov, &niov, is_write);
> +
> +    qemu_iovec_init_external(&elem, iov, niov);
> +    qemu_iovec_init(qiov, niov);
> +    qemu_iovec_concat(qiov, &elem, skip, size);
> +}
> +
>  static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
>                             uint64_t off, uint32_t max_count)
>  {
>      ssize_t err;
>      size_t offset = 7;
>      uint64_t read_count;
> -    V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
> -    VirtQueueElement *elem = v->elems[pdu->idx];
> +    QEMUIOVector qiov_full;
>  
>      if (fidp->fs.xattr.len < off) {
>          read_count = 0;
> @@ -1656,7 +1681,8 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, 
> V9fsFidState *fidp,
>      }
>      offset += err;
>  
> -    err = v9fs_pack(elem->in_sg, elem->in_num, offset,
> +    v9fs_init_qiov_from_pdu(&qiov_full, pdu, 0, read_count, false);
> +    err = v9fs_pack(qiov_full.iov, qiov_full.niov, offset,
>                      ((char *)fidp->fs.xattr.value) + off,
>                      read_count);
>      if (err < 0) {
> @@ -1732,32 +1758,6 @@ static int coroutine_fn 
> v9fs_do_readdir_with_stat(V9fsPDU *pdu,
>      return count;
>  }
>  
> -/*
> - * Create a QEMUIOVector for a sub-region of PDU iovecs
> - *
> - * @qiov:       uninitialized QEMUIOVector
> - * @skip:       number of bytes to skip from beginning of PDU
> - * @size:       number of bytes to include
> - * @is_write:   true - write, false - read
> - *
> - * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned 
> up
> - * with qemu_iovec_destroy().
> - */
> -static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
> -                                    size_t skip, size_t size,
> -                                    bool is_write)
> -{
> -    QEMUIOVector elem;
> -    struct iovec *iov;
> -    unsigned int niov;
> -
> -    pdu->s->transport->init_iov_from_pdu(pdu, &iov, &niov, is_write);
> -
> -    qemu_iovec_init_external(&elem, iov, niov);
> -    qemu_iovec_init(qiov, niov);
> -    qemu_iovec_concat(qiov, &elem, skip, size);
> -}
> -
>  static void coroutine_fn v9fs_read(void *opaque)
>  {
>      int32_t fid;




reply via email to

[Prev in Thread] Current Thread [Next in Thread]