[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v6 35/42] nvme: handle dma errors
From: |
Maxim Levitsky |
Subject: |
Re: [PATCH v6 35/42] nvme: handle dma errors |
Date: |
Wed, 25 Mar 2020 12:58:25 +0200 |
On Mon, 2020-03-16 at 07:29 -0700, Klaus Jensen wrote:
> From: Klaus Jensen <address@hidden>
>
> Handling DMA errors gracefully is required for the device to pass the
> block/011 test ("disable PCI device while doing I/O") in the blktests
> suite.
>
> With this patch the device passes the test by retrying "critical"
> transfers (posting of completion entries and processing of submission
> queue entries).
>
> If DMA errors occur at any other point in the execution of the command
> (say, while mapping the PRPs), the command is aborted with a Data
> Transfer Error status code.
>
> Signed-off-by: Klaus Jensen <address@hidden>
> Acked-by: Keith Busch <address@hidden>
> ---
> hw/block/nvme.c | 45 ++++++++++++++++++++++++++++++++-----------
> hw/block/trace-events | 2 ++
> include/block/nvme.h | 2 +-
> 3 files changed, 37 insertions(+), 12 deletions(-)
>
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 15ca2417af04..49d323566393 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -74,14 +74,14 @@ static inline bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr
> addr)
> return addr >= low && addr < hi;
> }
>
> -static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
> +static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
> {
> if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
> memcpy(buf, nvme_addr_to_cmb(n, addr), size);
> - return;
> + return 0;
> }
>
> - pci_dma_read(&n->parent_obj, addr, buf, size);
> + return pci_dma_read(&n->parent_obj, addr, buf, size);
> }
>
> static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
> @@ -164,7 +164,7 @@ static uint16_t nvme_map_addr_cmb(NvmeCtrl *n,
> QEMUIOVector *iov, hwaddr addr,
> size_t len)
> {
> if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len)) {
> - return NVME_DATA_TRAS_ERROR;
> + return NVME_DATA_TRANSFER_ERROR;
Minor nitpick: this is also a non functional refactoring.
I don't think that each piece of a refactoring should be in a separate patch,
so I usually group all the non functional (aka cosmetic) refactoring in one
patch, usually the first in the series.
But I try not to leave such refactoring in the functional patches.
However, since there is not that much cases like that left, I don't mind
leaving this particular case as is.
> }
>
> qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
> @@ -213,6 +213,7 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList
> *qsg, QEMUIOVector *iov,
> int num_prps = (len >> n->page_bits) + 1;
> uint16_t status;
> bool prp_list_in_cmb = false;
> + int ret;
>
> trace_nvme_dev_map_prp(nvme_cid(req), trans_len, len, prp1, prp2,
> num_prps);
> @@ -252,7 +253,12 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList
> *qsg, QEMUIOVector *iov,
>
> nents = (len + n->page_size - 1) >> n->page_bits;
> prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
> - nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
> + ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
> + if (ret) {
> + trace_nvme_dev_err_addr_read(prp2);
> + status = NVME_DATA_TRANSFER_ERROR;
> + goto unmap;
> + }
> while (len != 0) {
> uint64_t prp_ent = le64_to_cpu(prp_list[i]);
>
> @@ -271,8 +277,13 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList
> *qsg, QEMUIOVector *iov,
> i = 0;
> nents = (len + n->page_size - 1) >> n->page_bits;
> prp_trans = MIN(n->max_prp_ents, nents) *
> sizeof(uint64_t);
> - nvme_addr_read(n, prp_ent, (void *)prp_list,
> - prp_trans);
> + ret = nvme_addr_read(n, prp_ent, (void *)prp_list,
> + prp_trans);
> + if (ret) {
> + trace_nvme_dev_err_addr_read(prp_ent);
> + status = NVME_DATA_TRANSFER_ERROR;
> + goto unmap;
> + }
> prp_ent = le64_to_cpu(prp_list[i]);
> }
>
> @@ -466,6 +477,7 @@ static void nvme_post_cqes(void *opaque)
> NvmeCQueue *cq = opaque;
> NvmeCtrl *n = cq->ctrl;
> NvmeRequest *req, *next;
> + int ret;
>
> QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
> NvmeSQueue *sq;
> @@ -475,15 +487,21 @@ static void nvme_post_cqes(void *opaque)
> break;
> }
>
> - QTAILQ_REMOVE(&cq->req_list, req, entry);
> sq = req->sq;
> req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
> req->cqe.sq_id = cpu_to_le16(sq->sqid);
> req->cqe.sq_head = cpu_to_le16(sq->head);
> addr = cq->dma_addr + cq->tail * n->cqe_size;
> + ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
> + sizeof(req->cqe));
> + if (ret) {
> + trace_nvme_dev_err_addr_write(addr);
> + timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
> + 500 * SCALE_MS);
OK, this looks good.
> + break;
> + }
> + QTAILQ_REMOVE(&cq->req_list, req, entry);
> nvme_inc_cq_tail(cq);
> - pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
> - sizeof(req->cqe));
> nvme_req_clear(req);
> QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
> }
> @@ -1650,7 +1668,12 @@ static void nvme_process_sq(void *opaque)
>
> while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
> addr = sq->dma_addr + sq->head * n->sqe_size;
> - nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd));
> + if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
> + trace_nvme_dev_err_addr_read(addr);
> + timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
> + 500 * SCALE_MS);
> + break;
> + }
> nvme_inc_sq_head(sq);
>
> req = QTAILQ_FIRST(&sq->req_list);
> diff --git a/hw/block/trace-events b/hw/block/trace-events
> index aa449e314818..d51c09a4e454 100644
> --- a/hw/block/trace-events
> +++ b/hw/block/trace-events
> @@ -87,6 +87,8 @@ nvme_dev_mmio_doorbell_sq(uint16_t sqid, uint16_t new_tail)
> "cqid %"PRIu16" new_
> nvme_dev_err_mdts(uint16_t cid, size_t mdts, size_t len) "cid %"PRIu16" mdts
> %"PRIu64" len %"PRIu64""
> nvme_dev_err_prinfo(uint16_t cid, uint16_t ctrl) "cid %"PRIu16" ctrl
> %"PRIu16""
> nvme_dev_err_aio(uint16_t cid, void *aio, const char *blkname, uint64_t
> offset, const char *opc, void *req, uint16_t status) "cid %"PRIu16" aio %p
> blk \"%s\" offset %"PRIu64" opc \"%s\" req %p
> status 0x%"PRIx16""
> +nvme_dev_err_addr_read(uint64_t addr) "addr 0x%"PRIx64""
> +nvme_dev_err_addr_write(uint64_t addr) "addr 0x%"PRIx64""
> nvme_dev_err_invalid_dma(void) "PRP/SGL is too small for transfer size"
> nvme_dev_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null
> or not page aligned: 0x%"PRIx64""
> nvme_dev_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned:
> 0x%"PRIx64""
> diff --git a/include/block/nvme.h b/include/block/nvme.h
> index 293d68553538..d1ccde4cda4b 100644
> --- a/include/block/nvme.h
> +++ b/include/block/nvme.h
> @@ -458,7 +458,7 @@ enum NvmeStatusCodes {
> NVME_INVALID_OPCODE = 0x0001,
> NVME_INVALID_FIELD = 0x0002,
> NVME_CID_CONFLICT = 0x0003,
> - NVME_DATA_TRAS_ERROR = 0x0004,
> + NVME_DATA_TRANSFER_ERROR = 0x0004,
> NVME_POWER_LOSS_ABORT = 0x0005,
> NVME_INTERNAL_DEV_ERROR = 0x0006,
> NVME_CMD_ABORT_REQ = 0x0007,
Reviewed-by: Maxim Levitsky <address@hidden>
Best regards,
Maxim Levitsky
- Re: [PATCH v6 32/42] nvme: allow multiple aios per command, (continued)
- [PATCH v6 39/42] pci: allocate pci id for nvme, Klaus Jensen, 2020/03/16
- [PATCH v6 37/42] nvme: refactor identify active namespace id list, Klaus Jensen, 2020/03/16
- [PATCH v6 41/42] nvme: remove redundant NvmeCmd pointer parameter, Klaus Jensen, 2020/03/16
- [PATCH v6 38/42] nvme: support multiple namespaces, Klaus Jensen, 2020/03/16
- [PATCH v6 40/42] nvme: change controller pci id, Klaus Jensen, 2020/03/16
- [PATCH v6 35/42] nvme: handle dma errors, Klaus Jensen, 2020/03/16
- Re: [PATCH v6 35/42] nvme: handle dma errors,
Maxim Levitsky <=
- [PATCH v6 42/42] nvme: make lba data size configurable, Klaus Jensen, 2020/03/16
- Re: [PATCH v6 00/42] nvme: support NVMe v1.3d, SGLs and multiple namespaces, no-reply, 2020/03/16
- Re: [PATCH v6 00/42] nvme: support NVMe v1.3d, SGLs and multiple namespaces, Maxim Levitsky, 2020/03/25