qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v3] xen_disk: split discard input to match inter


From: Stefano Stabellini
Subject: Re: [Qemu-devel] [PATCH v3] xen_disk: split discard input to match internal representation
Date: Wed, 23 Nov 2016 10:46:18 -0800 (PST)
User-agent: Alpine 2.10 (DEB 1266 2009-07-14)

On Wed, 23 Nov 2016, Olaf Hering wrote:
> The guest sends discard requests as u64 sector/count pairs, but the
> block layer operates internally with s64/s32 pairs. The conversion
> leads to IO errors in the guest, the discard request is not processed.
> 
>   domU.cfg:
>   'vdev=xvda, format=qcow2, backendtype=qdisk, target=/x.qcow2'
>   domU:
>   mkfs.ext4 -F /dev/xvda
>   Discarding device blocks: failed - Input/output error
> 
> Fix this by splitting the request into chunks of BDRV_REQUEST_MAX_SECTORS.
> Add input range checking to avoid overflow.
> 
> Fixes f313520 ("xen_disk: add discard support")
> 
> Signed-off-by: Olaf Hering <address@hidden>

Reviewed-by: Stefano Stabellini <address@hidden>


> v3:
>  turn tab into spaces to fix checkpatch warning
> v2:
>  adjust overflow check
>  add Fixes revspec because the initial commit also failed to convert u64 to 
> s32
>  adjust summary
> 
>  hw/block/xen_disk.c | 42 ++++++++++++++++++++++++++++++++++++------
>  1 file changed, 36 insertions(+), 6 deletions(-)
> 
> diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
> index 3a7dc19..456a2d5 100644
> --- a/hw/block/xen_disk.c
> +++ b/hw/block/xen_disk.c
> @@ -660,6 +660,38 @@ static void qemu_aio_complete(void *opaque, int ret)
>      qemu_bh_schedule(ioreq->blkdev->bh);
>  }
>  
> +static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t 
> sector_number,
> +                              uint64_t nr_sectors)
> +{
> +    struct XenBlkDev *blkdev = ioreq->blkdev;
> +    int64_t byte_offset;
> +    int byte_chunk;
> +    uint64_t byte_remaining, limit;
> +    uint64_t sec_start = sector_number;
> +    uint64_t sec_count = nr_sectors;
> +
> +    /* Wrap around, or overflowing byte limit? */
> +    if (sec_start + sec_count < sec_count ||
> +        sec_start + sec_count > INT64_MAX >> BDRV_SECTOR_BITS) {
> +        return false;
> +    }
> +
> +    limit = BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS;
> +    byte_offset = sec_start << BDRV_SECTOR_BITS;
> +    byte_remaining = sec_count << BDRV_SECTOR_BITS;
> +
> +    do {
> +        byte_chunk = byte_remaining > limit ? limit : byte_remaining;
> +        ioreq->aio_inflight++;
> +        blk_aio_pdiscard(blkdev->blk, byte_offset, byte_chunk,
> +                         qemu_aio_complete, ioreq);
> +        byte_remaining -= byte_chunk;
> +        byte_offset += byte_chunk;
> +    } while (byte_remaining > 0);
> +
> +    return true;
> +}
> +
>  static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
>  {
>      struct XenBlkDev *blkdev = ioreq->blkdev;
> @@ -708,12 +740,10 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
>          break;
>      case BLKIF_OP_DISCARD:
>      {
> -        struct blkif_request_discard *discard_req = (void *)&ioreq->req;
> -        ioreq->aio_inflight++;
> -        blk_aio_pdiscard(blkdev->blk,
> -                         discard_req->sector_number << BDRV_SECTOR_BITS,
> -                         discard_req->nr_sectors << BDRV_SECTOR_BITS,
> -                         qemu_aio_complete, ioreq);
> +        struct blkif_request_discard *req = (void *)&ioreq->req;
> +        if (!blk_split_discard(ioreq, req->sector_number, req->nr_sectors)) {
> +            goto err;
> +        }
>          break;
>      }
>      default:
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]