qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 3/3] hw/block/nvme: report non-mdts command size limit for ds


From: Minwoo Im
Subject: Re: [PATCH 3/3] hw/block/nvme: report non-mdts command size limit for dsm
Date: Mon, 22 Feb 2021 21:11:05 +0900
User-agent: Mutt/1.11.4 (2019-03-13)

On 21-02-22 08:06:15, Klaus Jensen wrote:
> From: Gollu Appalanaidu <anaidu.gollu@samsung.com>
> 
> Dataset Management is not subject to MDTS, but exceeded a certain size
> per range causes internal looping. Report this limit (DMRSL) in the NVM
> command set specific identify controller data structure.
> 
> Signed-off-by: Gollu Appalanaidu <anaidu.gollu@samsung.com>
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> ---
>  hw/block/nvme.h       |  1 +
>  include/block/nvme.h  | 11 +++++++++++
>  hw/block/nvme.c       | 30 ++++++++++++++++++++----------
>  hw/block/trace-events |  1 +
>  4 files changed, 33 insertions(+), 10 deletions(-)
> 
> diff --git a/hw/block/nvme.h b/hw/block/nvme.h
> index cb2b5175f1a1..3046b82b3da1 100644
> --- a/hw/block/nvme.h
> +++ b/hw/block/nvme.h
> @@ -172,6 +172,7 @@ typedef struct NvmeCtrl {
>      int         aer_queued;
>  
>      uint8_t     zasl;
> +    uint32_t    dmrsl;
>  
>      NvmeSubsystem   *subsys;
>  
> diff --git a/include/block/nvme.h b/include/block/nvme.h
> index b23f3ae2279f..16d8c4c90f7e 100644
> --- a/include/block/nvme.h
> +++ b/include/block/nvme.h
> @@ -1041,6 +1041,16 @@ typedef struct NvmeIdCtrlZoned {
>      uint8_t     rsvd1[4095];
>  } NvmeIdCtrlZoned;
>  
> +typedef struct NvmeIdCtrlNvm {
> +    uint8_t     vsl;
> +    uint8_t     wzsl;
> +    uint8_t     wusl;
> +    uint8_t     dmrl;
> +    uint32_t    dmrsl;
> +    uint64_t    dmsl;
> +    uint8_t     rsvd16[4080];
> +} NvmeIdCtrlNvm;
> +
>  enum NvmeIdCtrlOacs {
>      NVME_OACS_SECURITY  = 1 << 0,
>      NVME_OACS_FORMAT    = 1 << 1,
> @@ -1396,6 +1406,7 @@ static inline void _nvme_check_size(void)
>      QEMU_BUILD_BUG_ON(sizeof(NvmeEffectsLog) != 4096);
>      QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl) != 4096);
>      QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlZoned) != 4096);
> +    QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlNvm) != 4096);
>      QEMU_BUILD_BUG_ON(sizeof(NvmeLBAF) != 4);
>      QEMU_BUILD_BUG_ON(sizeof(NvmeLBAFE) != 16);
>      QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096);
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 897b9ff0db91..5d6bba5fcb0d 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -1777,6 +1777,10 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
>              trace_pci_nvme_dsm_deallocate(nvme_cid(req), nvme_nsid(ns), slba,
>                                            nlb);
>  
> +            if (nlb > n->dmrsl) {
> +                trace_pci_nvme_dsm_single_range_limit_exceeded(nlb, 
> n->dmrsl);
> +            }
> +
>              offset = nvme_l2b(ns, slba);
>              len = nvme_l2b(ns, nlb);
>  
> @@ -3202,21 +3206,24 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, 
> NvmeRequest *req)
>  static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
>  {
>      NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> -    NvmeIdCtrlZoned id = {};
> +    uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {};
>  
>      trace_pci_nvme_identify_ctrl_csi(c->csi);
>  
> -    if (c->csi == NVME_CSI_NVM) {
> -        return nvme_rpt_empty_id_struct(n, req);
> -    } else if (c->csi == NVME_CSI_ZONED) {
> -        if (n->params.zasl_bs) {
> -            id.zasl = n->zasl;
> -        }
> -        return nvme_dma(n, (uint8_t *)&id, sizeof(id),
> -                        DMA_DIRECTION_FROM_DEVICE, req);
> +    switch (c->csi) {
> +    case NVME_CSI_NVM:
> +        ((NvmeIdCtrlNvm *)&id)->dmrsl = cpu_to_le32(n->dmrsl);
> +        break;
> +
> +    case NVME_CSI_ZONED:
> +        ((NvmeIdCtrlZoned *)&id)->zasl = n->zasl;

Question.  Are we okay without checking this like above ? :)

        if (n->params.zasl_bs) {
                ((NvmeIdCtrlZoned *)&id)->zasl = n->zasl;
        }

> +        break;
> +
> +    default:
> +        return NVME_INVALID_FIELD | NVME_DNR;
>      }
>  
> -    return NVME_INVALID_FIELD | NVME_DNR;
> +    return nvme_dma(n, id, sizeof(id), DMA_DIRECTION_FROM_DEVICE, req);
>  }
>  
>  static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
> @@ -4670,6 +4677,9 @@ int nvme_register_namespace(NvmeCtrl *n, NvmeNamespace 
> *ns, Error **errp)
>  
>      n->namespaces[nsid - 1] = ns;
>  
> +    n->dmrsl = MIN_NON_ZERO(n->dmrsl,
> +                            BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1));
> +
>      return 0;
>  }
>  
> diff --git a/hw/block/trace-events b/hw/block/trace-events
> index 1f958d09d2a9..27940fe2e98a 100644
> --- a/hw/block/trace-events
> +++ b/hw/block/trace-events
> @@ -51,6 +51,7 @@ pci_nvme_copy_cb(uint16_t cid) "cid %"PRIu16""
>  pci_nvme_block_status(int64_t offset, int64_t bytes, int64_t pnum, int ret, 
> bool zeroed) "offset %"PRId64" bytes %"PRId64" pnum %"PRId64" ret 0x%x zeroed 
> %d"
>  pci_nvme_dsm(uint16_t cid, uint32_t nsid, uint32_t nr, uint32_t attr) "cid 
> %"PRIu16" nsid %"PRIu32" nr %"PRIu32" attr 0x%"PRIx32""
>  pci_nvme_dsm_deallocate(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t 
> nlb) "cid %"PRIu16" nsid %"PRIu32" slba %"PRIu64" nlb %"PRIu32""
> +pci_nvme_dsm_single_range_limit_exceeded(uint32_t nlb, uint32_t dmrsl) "nlb 
> %"PRIu32" dmrsl %"PRIu32""
>  pci_nvme_compare(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) 
> "cid %"PRIu16" nsid %"PRIu32" slba 0x%"PRIx64" nlb %"PRIu32""
>  pci_nvme_compare_cb(uint16_t cid) "cid %"PRIu16""
>  pci_nvme_aio_discard_cb(uint16_t cid) "cid %"PRIu16""
> -- 
> 2.30.1
> 
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]