qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 14/16] hw/block/nvme: consolidate qsg/iov clearing


From: Klaus Jensen
Subject: Re: [PATCH 14/16] hw/block/nvme: consolidate qsg/iov clearing
Date: Wed, 29 Jul 2020 21:49:04 +0200

On Jul 29 21:18, Maxim Levitsky wrote:
> On Mon, 2020-07-20 at 13:37 +0200, Klaus Jensen wrote:
> > From: Klaus Jensen <k.jensen@samsung.com>
> > 
> > Always destroy the request qsg/iov at the end of request use.
> > 
> > Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> > ---
> >  hw/block/nvme.c | 48 +++++++++++++++++-------------------------------
> >  1 file changed, 17 insertions(+), 31 deletions(-)
> > 
> > diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> > index 54cd20f1ce22..b53afdeb3fb6 100644
> > --- a/hw/block/nvme.c
> > +++ b/hw/block/nvme.c
> > @@ -213,6 +213,14 @@ static void nvme_req_clear(NvmeRequest *req)
> >  {
> >      req->ns = NULL;
> >      memset(&req->cqe, 0x0, sizeof(req->cqe));
> > +
> > +    if (req->qsg.sg) {
> > +        qemu_sglist_destroy(&req->qsg);
> > +    }
> > +
> > +    if (req->iov.iov) {
> > +        qemu_iovec_destroy(&req->iov);
> > +    }
> >  }
> >  
> >  static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr 
> > addr,
> > @@ -297,15 +305,14 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
> > *qsg, QEMUIOVector *iov,
> >  
> >      status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
> >      if (status) {
> > -        goto unmap;
> > +        return status;
> >      }
> >  
> >      len -= trans_len;
> >      if (len) {
> >          if (unlikely(!prp2)) {
> >              trace_pci_nvme_err_invalid_prp2_missing();
> > -            status = NVME_INVALID_FIELD | NVME_DNR;
> > -            goto unmap;
> > +            return NVME_INVALID_FIELD | NVME_DNR;
> >          }
> >  
> >          if (len > n->page_size) {
> > @@ -326,13 +333,11 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
> > *qsg, QEMUIOVector *iov,
> >                  if (i == n->max_prp_ents - 1 && len > n->page_size) {
> >                      if (unlikely(!prp_ent || prp_ent & (n->page_size - 
> > 1))) {
> >                          trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> > -                        status = NVME_INVALID_FIELD | NVME_DNR;
> > -                        goto unmap;
> > +                        return NVME_INVALID_FIELD | NVME_DNR;
> >                      }
> >  
> >                      if (prp_list_in_cmb != nvme_addr_is_cmb(n, prp_ent)) {
> > -                        status = NVME_INVALID_USE_OF_CMB | NVME_DNR;
> > -                        goto unmap;
> > +                        return NVME_INVALID_USE_OF_CMB | NVME_DNR;
> >                      }
> >  
> >                      i = 0;
> > @@ -345,14 +350,13 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
> > *qsg, QEMUIOVector *iov,
> >  
> >                  if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
> >                      trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> > -                    status = NVME_INVALID_FIELD | NVME_DNR;
> > -                    goto unmap;
> > +                    return NVME_INVALID_FIELD | NVME_DNR;
> >                  }
> >  
> >                  trans_len = MIN(len, n->page_size);
> >                  status = nvme_map_addr(n, qsg, iov, prp_ent, trans_len);
> >                  if (status) {
> > -                    goto unmap;
> > +                    return status;
> >                  }
> >  
> >                  len -= trans_len;
> > @@ -361,27 +365,16 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList 
> > *qsg, QEMUIOVector *iov,
> >          } else {
> >              if (unlikely(prp2 & (n->page_size - 1))) {
> >                  trace_pci_nvme_err_invalid_prp2_align(prp2);
> > -                status = NVME_INVALID_FIELD | NVME_DNR;
> > -                goto unmap;
> > +                return NVME_INVALID_FIELD | NVME_DNR;
> >              }
> >              status = nvme_map_addr(n, qsg, iov, prp2, len);
> >              if (status) {
> > -                goto unmap;
> > +                return status;
> >              }
> >          }
> >      }
> > +
> >      return NVME_SUCCESS;
> > -
> > -unmap:
> > -    if (iov && iov->iov) {
> > -        qemu_iovec_destroy(iov);
> > -    }
> > -
> > -    if (qsg && qsg->sg) {
> > -        qemu_sglist_destroy(qsg);
> > -    }
> > -
> > -    return status;
> >  }
> >  
> >  static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> > @@ -601,13 +594,6 @@ static void nvme_rw_cb(void *opaque, int ret)
> >          req->status = NVME_INTERNAL_DEV_ERROR;
> >      }
> >  
> > -    if (req->qsg.nalloc) {
> > -        qemu_sglist_destroy(&req->qsg);
> > -    }
> > -    if (req->iov.nalloc) {
> > -        qemu_iovec_destroy(&req->iov);
> > -    }
> > -
> >      nvme_enqueue_req_completion(cq, req);
> >  }
> >  
> 
> This and former patch I guess answer my own question about why to clear the 
> request after its cqe got posted.
> 
> Looks reasonable.
> 

I ended up with a compromise. I keep clearing as a "before-use" job, but
we don't want to keep the qsg and iovs hanging around until the request
gets reused, so I'm adding a nvme_req_exit() to free that memory when
the cqe has been posted.



reply via email to

[Prev in Thread] Current Thread [Next in Thread]