[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH v5 3/7] vhost: Simplify ring verification checks
From: |
Dr. David Alan Gilbert |
Subject: |
Re: [Qemu-devel] [PATCH v5 3/7] vhost: Simplify ring verification checks |
Date: |
Tue, 16 Jan 2018 16:48:03 +0000 |
User-agent: |
Mutt/1.9.1 (2017-09-22) |
* Igor Mammedov (address@hidden) wrote:
> On Mon, 18 Dec 2017 20:13:36 +0000
> "Dr. David Alan Gilbert (git)" <address@hidden> wrote:
>
> > From: "Dr. David Alan Gilbert" <address@hidden>
> >
> > vhost_verify_ring_mappings() were used to verify that
> > rings are still accessible and related memory hasn't
> > been moved after flatview is updated.
> >
> > It was doing checks by mapping ring's GPA+len and
> > checking that HVA hadn't changed with new memory map.
> > To avoid maybe expensive mapping call, we were
> > identifying address range that changed and were doing
> > mapping only if ring was in changed range.
> >
> > However it's not neccessary to perform ring's GPA
> > mapping as we already have its current HVA and all
> > we need is to verify that ring's GPA translates to
> > the same HVA in updated flatview.
> >
> > This will allow the following patches to simplify the range
> > comparison that was previously needed to avoid expensive
> > verify_ring_mapping calls.
> >
> > Signed-off-by: Igor Mammedov <address@hidden>
> > with modifications by:
> > Signed-off-by: Dr. David Alan Gilbert <address@hidden>
>
> an additional question,
>
> in iommu case ring_hva == ring_gpa if we look in to vhost_memory_map()
> have you check if iommu case is working with new code?
It seems to be; I've tested a simple dpdk test that Maxime gave me.
Dave
>
> > ---
> > hw/virtio/vhost.c | 75
> > +++++++++++++++++++++++++++++--------------------------
> > 1 file changed, 40 insertions(+), 35 deletions(-)
> >
> > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> > index d4710fc05c..18611f0d40 100644
> > --- a/hw/virtio/vhost.c
> > +++ b/hw/virtio/vhost.c
> > @@ -450,35 +450,37 @@ static void vhost_memory_unmap(struct vhost_dev *dev,
> > void *buffer,
> > }
> > }
> >
> > -static int vhost_verify_ring_part_mapping(struct vhost_dev *dev,
> > - void *part,
> > - uint64_t part_addr,
> > - uint64_t part_size,
> > - uint64_t start_addr,
> > - uint64_t size)
> > +static int vhost_verify_ring_part_mapping(void *ring_hva,
> > + uint64_t ring_gpa,
> > + uint64_t ring_size,
> > + void *reg_hva,
> > + uint64_t reg_gpa,
> > + uint64_t reg_size)
> > {
> > - hwaddr l;
> > - void *p;
> > - int r = 0;
> > + uint64_t hva_ring_offset;
> > + uint64_t ring_last = range_get_last(ring_gpa, ring_size);
> > + uint64_t reg_last = range_get_last(reg_gpa, reg_size);
> >
> > - if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
> > + if (ring_last < reg_gpa || ring_gpa > reg_last) {
> > return 0;
> > }
> > - l = part_size;
> > - p = vhost_memory_map(dev, part_addr, &l, 1);
> > - if (!p || l != part_size) {
> > - r = -ENOMEM;
> > + /* check that whole ring's is mapped */
> > + if (ring_last > reg_last) {
> > + return -ENOMEM;
> > }
> > - if (p != part) {
> > - r = -EBUSY;
> > + /* check that ring's MemoryRegion wasn't replaced */
> > + hva_ring_offset = ring_gpa - reg_gpa;
> > + if (ring_hva != reg_hva + hva_ring_offset) {
> > + return -EBUSY;
> > }
> > - vhost_memory_unmap(dev, p, l, 0, 0);
> > - return r;
> > +
> > + return 0;
> > }
> >
> > static int vhost_verify_ring_mappings(struct vhost_dev *dev,
> > - uint64_t start_addr,
> > - uint64_t size)
> > + void *reg_hva,
> > + uint64_t reg_gpa,
> > + uint64_t reg_size)
> > {
> > int i, j;
> > int r = 0;
> > @@ -492,22 +494,25 @@ static int vhost_verify_ring_mappings(struct
> > vhost_dev *dev,
> > struct vhost_virtqueue *vq = dev->vqs + i;
> >
> > j = 0;
> > - r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys,
> > - vq->desc_size, start_addr,
> > size);
> > + r = vhost_verify_ring_part_mapping(
> > + vq->desc, vq->desc_phys, vq->desc_size,
> > + reg_hva, reg_gpa, reg_size);
> > if (r) {
> > break;
> > }
> >
> > j++;
> > - r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys,
> > - vq->avail_size, start_addr,
> > size);
> > + r = vhost_verify_ring_part_mapping(
> > + vq->desc, vq->desc_phys, vq->desc_size,
> > + reg_hva, reg_gpa, reg_size);
> > if (r) {
> > break;
> > }
> >
> > j++;
> > - r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys,
> > - vq->used_size, start_addr,
> > size);
> > + r = vhost_verify_ring_part_mapping(
> > + vq->desc, vq->desc_phys, vq->desc_size,
> > + reg_hva, reg_gpa, reg_size);
> > if (r) {
> > break;
> > }
> > @@ -635,13 +640,11 @@ static void vhost_commit(MemoryListener *listener)
> > {
> > struct vhost_dev *dev = container_of(listener, struct vhost_dev,
> > memory_listener);
> > - hwaddr start_addr = 0;
> > - ram_addr_t size = 0;
> > MemoryRegionSection *old_sections;
> > int n_old_sections;
> > -
> > uint64_t log_size;
> > int r;
> > + int i;
> >
> > old_sections = dev->mem_sections;
> > n_old_sections = dev->n_mem_sections;
> > @@ -658,12 +661,14 @@ static void vhost_commit(MemoryListener *listener)
> > goto out;
> > }
> >
> > - if (dev->started) {
> > - start_addr = dev->mem_changed_start_addr;
> > - size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
> > -
> > - r = vhost_verify_ring_mappings(dev, start_addr, size);
> > - assert(r >= 0);
> > + for (i = 0; i < dev->mem->nregions; i++) {
> > + if (vhost_verify_ring_mappings(dev,
> > + (void *)dev->mem->regions[i].userspace_addr,
> > + dev->mem->regions[i].guest_phys_addr,
> > + dev->mem->regions[i].memory_size)) {
> > + error_report("Verify ring failure on region %d", i);
> > + abort();
> > + }
> > }
> >
> > if (!dev->log_enabled) {
>
--
Dr. David Alan Gilbert / address@hidden / Manchester, UK