qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-block] [Qemu-devel] [PATCH v2 2/2] block/nvme: add support for


From: John Snow
Subject: Re: [Qemu-block] [Qemu-devel] [PATCH v2 2/2] block/nvme: add support for discard
Date: Wed, 18 Sep 2019 16:24:01 -0400
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Thunderbird/68.1.0


On 9/13/19 9:36 AM, Maxim Levitsky wrote:
> Signed-off-by: Maxim Levitsky <address@hidden>

Same comments as 1/2; but not worth holding anything up. We'll find out
from users if there are problems, but I wish we had a nicer way to test it.

Reviewed-by: John Snow <address@hidden>

> ---
>  block/nvme.c       | 83 ++++++++++++++++++++++++++++++++++++++++++++++
>  block/trace-events |  2 ++
>  2 files changed, 85 insertions(+)
> 
> diff --git a/block/nvme.c b/block/nvme.c
> index d95265fae4..c17edd6aae 100644
> --- a/block/nvme.c
> +++ b/block/nvme.c
> @@ -112,6 +112,7 @@ typedef struct {
>      bool plugged;
>  
>      bool supports_write_zeroes;
> +    bool supports_discard;
>  
>      CoMutex dma_map_lock;
>      CoQueue dma_flush_queue;
> @@ -463,6 +464,7 @@ static void nvme_identify(BlockDriverState *bs, int 
> namespace, Error **errp)
>  
>      oncs = le16_to_cpu(idctrl->oncs);
>      s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROS);
> +    s->supports_discard = !!(oncs & NVME_ONCS_DSM);
>  
>      memset(resp, 0, 4096);
>  
> @@ -1153,6 +1155,86 @@ static coroutine_fn int 
> nvme_co_pwrite_zeroes(BlockDriverState *bs,
>  }
>  
>  
> +static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
> +                                         int64_t offset,
> +                                         int bytes)
> +{
> +    BDRVNVMeState *s = bs->opaque;
> +    NVMeQueuePair *ioq = s->queues[1];
> +    NVMeRequest *req;
> +    NvmeDsmRange *buf;
> +    QEMUIOVector local_qiov;
> +    int ret;
> +
> +    NvmeCmd cmd = {
> +        .opcode = NVME_CMD_DSM,
> +        .nsid = cpu_to_le32(s->nsid),
> +        .cdw10 = cpu_to_le32(0), /*number of ranges - 0 based*/
> +        .cdw11 = cpu_to_le32(1 << 2), /*deallocate bit*/
> +    };
> +
> +    NVMeCoData data = {
> +        .ctx = bdrv_get_aio_context(bs),
> +        .ret = -EINPROGRESS,
> +    };
> +
> +    if (!s->supports_discard) {
> +        return -ENOTSUP;
> +    }
> +
> +    assert(s->nr_queues > 1);
> +
> +    buf = qemu_try_blockalign0(bs, s->page_size);
> +    if (!buf) {
> +        return -ENOMEM;
> +    }
> +
> +    buf->nlb = cpu_to_le32(bytes >> s->blkshift);
> +    buf->slba = cpu_to_le64(offset >> s->blkshift);
> +    buf->cattr = 0;
> +
> +    qemu_iovec_init(&local_qiov, 1);
> +    qemu_iovec_add(&local_qiov, buf, 4096);
> +
> +    req = nvme_get_free_req(ioq);
> +    assert(req);
> +
> +    qemu_co_mutex_lock(&s->dma_map_lock);
> +    ret = nvme_cmd_map_qiov(bs, &cmd, req, &local_qiov);
> +    qemu_co_mutex_unlock(&s->dma_map_lock);
> +
> +    if (ret) {
> +        req->busy = false;
> +        goto out;
> +    }
> +
> +    trace_nvme_dsm(s, offset, bytes);
> +
> +    nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
> +
> +    data.co = qemu_coroutine_self();
> +    while (data.ret == -EINPROGRESS) {
> +        qemu_coroutine_yield();
> +    }
> +
> +    qemu_co_mutex_lock(&s->dma_map_lock);
> +    ret = nvme_cmd_unmap_qiov(bs, &local_qiov);
> +    qemu_co_mutex_unlock(&s->dma_map_lock);
> +
> +    if (ret) {
> +        goto out;
> +    }
> +
> +    ret = data.ret;
> +    trace_nvme_dsm_done(s, offset, bytes, ret);
> +out:
> +    qemu_iovec_destroy(&local_qiov);
> +    qemu_vfree(buf);
> +    return ret;
> +
> +}
> +
> +
>  static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
>                                 BlockReopenQueue *queue, Error **errp)
>  {
> @@ -1259,6 +1341,7 @@ static BlockDriver bdrv_nvme = {
>      .bdrv_co_pwritev          = nvme_co_pwritev,
>  
>      .bdrv_co_pwrite_zeroes    = nvme_co_pwrite_zeroes,
> +    .bdrv_co_pdiscard         = nvme_co_pdiscard,
>  
>      .bdrv_co_flush_to_disk    = nvme_co_flush,
>      .bdrv_reopen_prepare      = nvme_reopen_prepare,
> diff --git a/block/trace-events b/block/trace-events
> index 651aa461d5..c61553b4b8 100644
> --- a/block/trace-events
> +++ b/block/trace-events
> @@ -153,6 +153,8 @@ nvme_write_zeroes(void *s, uint64_t offset, uint64_t 
> bytes, int flags) "s %p off
>  nvme_qiov_unaligned(const void *qiov, int n, void *base, size_t size, int 
> align) "qiov %p n %d base %p size 0x%zx align 0x%x"
>  nvme_prw_buffered(void *s, uint64_t offset, uint64_t bytes, int niov, int 
> is_write) "s %p offset %"PRId64" bytes %"PRId64" niov %d is_write %d"
>  nvme_rw_done(void *s, int is_write, uint64_t offset, uint64_t bytes, int 
> ret) "s %p is_write %d offset %"PRId64" bytes %"PRId64" ret %d"
> +nvme_dsm(void *s, uint64_t offset, uint64_t bytes) "s %p offset %"PRId64" 
> bytes %"PRId64""
> +nvme_dsm_done(void *s, uint64_t offset, uint64_t bytes, int ret) "s %p 
> offset %"PRId64" bytes %"PRId64" ret %d"
>  nvme_dma_map_flush(void *s) "s %p"
>  nvme_free_req_queue_wait(void *q) "q %p"
>  nvme_cmd_map_qiov(void *s, void *cmd, void *req, void *qiov, int entries) "s 
> %p cmd %p req %p qiov %p entries %d"
> 

-- 
—js



reply via email to

[Prev in Thread] Current Thread [Next in Thread]