qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v4] block/nvme: add support for discard


From: Max Reitz
Subject: Re: [Qemu-devel] [PATCH v4] block/nvme: add support for discard
Date: Fri, 5 Jul 2019 15:50:07 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.7.0

On 03.07.19 18:07, Maxim Levitsky wrote:
> Signed-off-by: Maxim Levitsky <address@hidden>
> ---
>  block/nvme.c       | 81 ++++++++++++++++++++++++++++++++++++++++++++++
>  block/trace-events |  2 ++
>  2 files changed, 83 insertions(+)
> 
> diff --git a/block/nvme.c b/block/nvme.c
> index 02e0846643..96a715dcc1 100644
> --- a/block/nvme.c
> +++ b/block/nvme.c

[...]

> @@ -460,6 +461,7 @@ static void nvme_identify(BlockDriverState *bs, int 
> namespace, Error **errp)
>                            s->page_size / sizeof(uint64_t) * s->page_size);
>  
>      s->supports_write_zeros = (idctrl->oncs & NVME_ONCS_WRITE_ZEROS) != 0;
> +    s->supports_discard = (idctrl->oncs & NVME_ONCS_DSM) != 0;

Shouldn’t this be le16_to_cpu(idctrl->oncs)?  Same in the previous
patch, now that I think about it.

>  
>      memset(resp, 0, 4096);
>  
> @@ -1149,6 +1151,84 @@ static coroutine_fn int 
> nvme_co_pwrite_zeroes(BlockDriverState *bs,
>  }
>  
>  
> +static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
> +                                         int64_t offset,
> +                                         int bytes)
> +{
> +    BDRVNVMeState *s = bs->opaque;
> +    NVMeQueuePair *ioq = s->queues[1];
> +    NVMeRequest *req;
> +    NvmeDsmRange *buf;
> +    QEMUIOVector local_qiov;
> +    int r;
> +
> +    NvmeCmd cmd = {
> +        .opcode = NVME_CMD_DSM,
> +        .nsid = cpu_to_le32(s->nsid),
> +        .cdw10 = 0, /*number of ranges - 0 based*/

I’d make this cpu_to_le32(0).  Sure, there is no effect for 0, but in
theory this is a variable value, so...

> +        .cdw11 = cpu_to_le32(1 << 2), /*deallocate bit*/
> +    };
> +
> +    NVMeCoData data = {
> +        .ctx = bdrv_get_aio_context(bs),
> +        .ret = -EINPROGRESS,
> +    };
> +
> +    if (!s->supports_discard) {
> +        return -ENOTSUP;
> +    }
> +
> +    assert(s->nr_queues > 1);
> +
> +    buf = qemu_try_blockalign0(bs, 4096);

I’m not sure whether this needs to be 4096 or whether 16 would suffice,
 but I suppose this gets us the least trouble.

> +    if (!buf) {
> +            return -ENOMEM;

Indentation is off.

> +    }
> +
> +    buf->nlb = cpu_to_le32(bytes >> s->blkshift);
> +    buf->slba = cpu_to_le64(offset >> s->blkshift);
> +    buf->cattr = 0;
> +
> +    qemu_iovec_init(&local_qiov, 1);
> +    qemu_iovec_add(&local_qiov, buf, 4096);
> +
> +    req = nvme_get_free_req(ioq);
> +    assert(req);
> +
> +    qemu_co_mutex_lock(&s->dma_map_lock);
> +    r = nvme_cmd_map_qiov(bs, &cmd, req, &local_qiov);
> +    qemu_co_mutex_unlock(&s->dma_map_lock);
> +
> +    if (r) {
> +        req->busy = false;
> +        return r;

Leaking buf and local_qiov here.

> +    }
> +
> +    trace_nvme_dsm(s, offset, bytes);
> +
> +    nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
> +
> +    data.co = qemu_coroutine_self();
> +    while (data.ret == -EINPROGRESS) {
> +        qemu_coroutine_yield();
> +    }
> +
> +    qemu_co_mutex_lock(&s->dma_map_lock);
> +    r = nvme_cmd_unmap_qiov(bs, &local_qiov);
> +    qemu_co_mutex_unlock(&s->dma_map_lock);
> +    if (r) {
> +        return r;

Leaking buf and local_qiov here, too.

Max

> +    }
> +
> +    trace_nvme_dsm_done(s, offset, bytes, data.ret);
> +
> +    qemu_iovec_destroy(&local_qiov);
> +    qemu_vfree(buf);
> +    return data.ret;
> +
> +}
> +
> +
>  static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
>                                 BlockReopenQueue *queue, Error **errp)
>  {

Attachment: signature.asc
Description: OpenPGP digital signature


reply via email to

[Prev in Thread] Current Thread [Next in Thread]