qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2] vhost-user: fix VHOST_USER_SET_MEM_TABLE


From: Damjan Marion (damarion)
Subject: Re: [Qemu-devel] [PATCH v2] vhost-user: fix VHOST_USER_SET_MEM_TABLE
Date: Thu, 26 Jun 2014 11:35:13 +0000

On 26 Jun 2014, at 13:30, Michael S. Tsirkin <address@hidden> wrote:

> On Thu, Jun 26, 2014 at 12:22:59PM +0200, Damjan Marion wrote:
>> Old code was affected by memory gaps which
>> resulted in buffer pointers pointing to
>> address outside of the mapped regions.
>> 
>> Signed-off-by: Damjan Marion <address@hidden>
>> ---
> 
> changelog?

Ok

> 
> does not look like all comments have been addressed.

Like?

> 
>> docs/specs/vhost-user.txt |  7 ++++---
>> exec.c                    |  7 +++++++
>> hw/virtio/vhost-user.c    | 23 ++++++++++++++---------
>> include/exec/ram_addr.h   |  1 +
>> 4 files changed, 26 insertions(+), 12 deletions(-)
>> 
>> diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
>> index 2641390..c108d07 100644
>> --- a/docs/specs/vhost-user.txt
>> +++ b/docs/specs/vhost-user.txt
>> @@ -78,13 +78,14 @@ Depending on the request type, payload can be:
>>    Padding: 32-bit
>> 
>>    A region is:
>> -   ---------------------------------------
>> -   | guest address | size | user address |
>> -   ---------------------------------------
>> +   -----------------------------------------------------------
>> +   | guest address | size | user address | shared mem offset |
>> +   -----------------------------------------------------------
>> 
>>    Guest address: a 64-bit guest address of the region
>>    Size: a 64-bit size
>>    User address: a 64-bit user address
>> +   Shared mem offset: 64-bit offset where region is located in the shared 
>> memory
>> 
>> 
>> In QEMU the vhost-user message is implemented with the following struct:
>> diff --git a/exec.c b/exec.c
>> index c849405..a94c583 100644
>> --- a/exec.c
>> +++ b/exec.c
>> @@ -1456,6 +1456,13 @@ int qemu_get_ram_fd(ram_addr_t addr)
>>     return block->fd;
>> }
>> 
>> +void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
>> +{
>> +    RAMBlock *block = qemu_get_ram_block(addr);
>> +
>> +    return block->host;
>> +}
>> +
>> /* Return a host pointer to ram allocated with qemu_ram_alloc.
>>    With the exception of the softmmu code in this file, this should
>>    only be used for local memory (e.g. video ram) that the device owns,
>> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
>> index 0df6a93..b9d7baa 100644
>> --- a/hw/virtio/vhost-user.c
>> +++ b/hw/virtio/vhost-user.c
>> @@ -14,6 +14,7 @@
>> #include "sysemu/kvm.h"
>> #include "qemu/error-report.h"
>> #include "qemu/sockets.h"
>> +#include "exec/ram_addr.h"
>> 
>> #include <fcntl.h>
>> #include <unistd.h>
>> @@ -47,6 +48,7 @@ typedef struct VhostUserMemoryRegion {
>>     uint64_t guest_phys_addr;
>>     uint64_t memory_size;
>>     uint64_t userspace_addr;
>> +    uint64_t shm_offset;
>> } VhostUserMemoryRegion;
>> 
>> typedef struct VhostUserMemory {
>> @@ -183,10 +185,10 @@ static int vhost_user_call(struct vhost_dev *dev, 
>> unsigned long int request,
>> {
>>     VhostUserMsg msg;
>>     VhostUserRequest msg_request;
>> -    RAMBlock *block = 0;
>>     struct vhost_vring_file *file = 0;
>>     int need_reply = 0;
>>     int fds[VHOST_MEMORY_MAX_NREGIONS];
>> +    int i, fd;
>>     size_t fd_num = 0;
>> 
>>     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
>> @@ -212,16 +214,19 @@ static int vhost_user_call(struct vhost_dev *dev, 
>> unsigned long int request,
>>         break;
>> 
>>     case VHOST_SET_MEM_TABLE:
>> -        QTAILQ_FOREACH(block, &ram_list.blocks, next)
>> -        {
>> -            if (block->fd > 0) {
>> -                msg.memory.regions[fd_num].userspace_addr =
>> -                    (uintptr_t) block->host;
>> -                msg.memory.regions[fd_num].memory_size = block->length;
>> -                msg.memory.regions[fd_num].guest_phys_addr = block->offset;
>> -                fds[fd_num++] = block->fd;
>> +        for (i = 0; i < dev->mem->nregions; ++i) {
>> +            struct vhost_memory_region *reg = dev->mem->regions + i;
>> +            fd = qemu_get_ram_fd(reg->guest_phys_addr);
>> +            if (fd > 0) {
>> +                msg.memory.regions[fd_num].userspace_addr = 
>> reg->userspace_addr;
>> +                msg.memory.regions[fd_num].memory_size  = reg->memory_size;
>> +                msg.memory.regions[fd_num].guest_phys_addr = 
>> reg->guest_phys_addr;
>> +                msg.memory.regions[fd_num].shm_offset = reg->userspace_addr 
>> -
>> +                    (uint64_t) 
>> qemu_get_ram_block_host_ptr(reg->guest_phys_addr);
>> +                fds[fd_num++] = fd;
>>             }
>>         }
>> +        assert(fd_num <= VHOST_MEMORY_MAX_NREGIONS);
>> 
>>         msg.memory.nregions = fd_num;
>> 
>> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
>> index 55ca676..e9eb831 100644
>> --- a/include/exec/ram_addr.h
>> +++ b/include/exec/ram_addr.h
>> @@ -29,6 +29,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void 
>> *host,
>>                                    MemoryRegion *mr);
>> ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
>> int qemu_get_ram_fd(ram_addr_t addr);
>> +void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
>> void *qemu_get_ram_ptr(ram_addr_t addr);
>> void qemu_ram_free(ram_addr_t addr);
>> void qemu_ram_free_from_ptr(ram_addr_t addr);
>> -- 
>> 1.9.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]