qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v6 1/2] net: tulip: check frame size and r/w data length


From: Jason Wang
Subject: Re: [PATCH v6 1/2] net: tulip: check frame size and r/w data length
Date: Fri, 27 Mar 2020 10:53:22 +0800
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.9.0


On 2020/3/27 上午10:35, Li Qiang wrote:


Jason Wang <address@hidden <mailto:address@hidden>> 于2020年3月27日周五 上午10:09写道:


    On 2020/3/24 下午10:54, Li Qiang wrote:
    >
    >
    > Jason Wang <address@hidden <mailto:address@hidden>
    <mailto:address@hidden <mailto:address@hidden>>>
    > 于2020年3月24日周二 下午1:45写道:
    >
    >
    >     On 2020/3/24 上午9:29, Li Qiang wrote:
    >     >
    >     >
    >     > P J P <address@hidden <mailto:address@hidden>
    <mailto:address@hidden <mailto:address@hidden>>
    >     <mailto:address@hidden <mailto:address@hidden>
    <mailto:address@hidden <mailto:address@hidden>>>>
    >     于2020年3月23日周一
    >     > 下午8:24写道:
    >     >
    >     >     From: Prasad J Pandit <address@hidden
    <mailto:address@hidden>
    >     <mailto:address@hidden <mailto:address@hidden>>
    >     >     <mailto:address@hidden
    <mailto:address@hidden> <mailto:address@hidden
    <mailto:address@hidden>>>>
    >     >
    >     >     Tulip network driver while copying tx/rx buffers does
    not check
    >     >     frame size against r/w data length. This may lead to
    OOB buffer
    >     >     access. Add check to avoid it.
    >     >
    >     >     Limit iterations over descriptors to avoid potential
    infinite
    >     >     loop issue in tulip_xmit_list_update.
    >     >
    >     >     Reported-by: Li Qiang <address@hidden
    <mailto:address@hidden>
    >     <mailto:address@hidden <mailto:address@hidden>>
    >     >     <mailto:address@hidden
    <mailto:address@hidden> <mailto:address@hidden
    <mailto:address@hidden>>>>
    >     >     Reported-by: Ziming Zhang <address@hidden
    <mailto:address@hidden>
    >     <mailto:address@hidden <mailto:address@hidden>>
    >     >     <mailto:address@hidden <mailto:address@hidden>
    <mailto:address@hidden <mailto:address@hidden>>>>
    >     >     Reported-by: Jason Wang <address@hidden
    <mailto:address@hidden>
    >     <mailto:address@hidden <mailto:address@hidden>>
    >     >     <mailto:address@hidden
    <mailto:address@hidden> <mailto:address@hidden
    <mailto:address@hidden>>>>
    >     >     Signed-off-by: Prasad J Pandit <address@hidden
    <mailto:address@hidden>
    >     <mailto:address@hidden <mailto:address@hidden>>
    >     >     <mailto:address@hidden
    <mailto:address@hidden> <mailto:address@hidden
    <mailto:address@hidden>>>>
    >     >
    >     >
    >     >
    >     > Tested-by: Li Qiang <address@hidden
    <mailto:address@hidden> <mailto:address@hidden
    <mailto:address@hidden>>
    >     <mailto:address@hidden <mailto:address@hidden>
    <mailto:address@hidden <mailto:address@hidden>>>>
    >     > But I have a minor question....
    >     >
    >     >     ---
    >     >      hw/net/tulip.c | 36 +++++++++++++++++++++++++++---------
    >     >      1 file changed, 27 insertions(+), 9 deletions(-)
    >     >
    >     >     Update v3: return a value from tulip_copy_tx_buffers()
    and avoid
    >     >     infinite loop
    >     >       ->
    >     >
    https://lists.gnu.org/archive/html/qemu-devel/2020-02/msg06275.html
    >     >
    >     >     diff --git a/hw/net/tulip.c b/hw/net/tulip.c
    >     >     index cfac2719d3..fbe40095da 100644
    >     >     --- a/hw/net/tulip.c
    >     >     +++ b/hw/net/tulip.c
    >     >     @@ -170,6 +170,10 @@ static void
    tulip_copy_rx_bytes(TULIPState
    >     >     *s, struct tulip_descriptor *desc)
    >     >              } else {
    >     >                  len = s->rx_frame_len;
    >     >              }
    >     >     +
    >     >     +        if (s->rx_frame_len + len >=
    sizeof(s->rx_frame)) {
    >     >     +            return;
    >     >     +        }
    >     >
    >     >
    >     >
    >     > Why here is '>=' instead of '>'.
    >     > IIUC the total sending length can reach to
    sizeof(s->rx_frame).
    >     > Same in the other place in this patch.
    >
    >
    >     Yes, this need to be fixed.
    >
    >
    >     >
    >     > PS: I have planned to write a qtest case. But my personal
    qemu dev
    >     > environment is broken.
    >     > I will try to write it tonight or tomorrow night.
    >
    >
    >     Cool, good to know this.
    >
    >
    > Hi all,
    > I have countered an interesting issue. Let's look at the
    definition of
    > TULIPState.
    >
    >   21 typedef struct TULIPState {
    >   22     PCIDevice dev;
    >   23     MemoryRegion io;
    >   24     MemoryRegion memory;
    >   25     NICConf c;
    >   26     qemu_irq irq;
    >   27     NICState *nic;
    >   28     eeprom_t *eeprom;
    >   29     uint32_t csr[16];
    >   30
    >   31     /* state for MII */
    >   32     uint32_t old_csr9;
    >   33     uint32_t mii_word;
    >   34     uint32_t mii_bitcnt;
    >   35
    >   36     hwaddr current_rx_desc;
    >   37     hwaddr current_tx_desc;
    >   38
    >   39     uint8_t rx_frame[2048];
    >   40     uint8_t tx_frame[2048];
    >   41     uint16_t tx_frame_len;
    >   42     uint16_t rx_frame_len;
    >   43     uint16_t rx_frame_size;
    >   44
    >   45     uint32_t rx_status;
    >   46     uint8_t filter[16][6];
    >   47 } TULIPState;
    >
    > Here we can see the overflow is occured after 'tx_frame'.
    > In my quest, I have see the overflow(the s->tx_frame_len is big).
    > However here doesn't cause SEGV in qtest.
    > In real case, the qemu process will access the data after
    TULIPState
    > in heap and trigger segv.
    > However in qtest mode I don't know how to trigger this.


    If it's just the mangling of tx_frame_len, it won't hit SIGSEV.

    I wonder maybe, somehow that large tx_frame_len is used for buffer
    copying or other stuffs that can lead the crash.


This is because in real qemu process, the OOB copy corrupts the head data after 'TULIPState' struct. And maybe later(other thread) access the corrupted data thus leading crash.


Ok, so I think ASAN can detect this crash. But I'm not sure whether or not it was enabled for qtest. If not, we probably need that.

I wrote a qtest for virtio-net that can lead OOB, so it should probably work for tulip but need to check.

Or if you don't want to depend on ASAN, we can check user visible status after tx_frame[], but it looks to me all other fields are not visible by guest.

Maybe we can reorder to place csr[] after tx_frame[] then check csr[] afterwards.


However in qtest mode, I don't remember the core code of qtest. But seems it's not a really VM? just a interface emulation.


If my memory is correct, it's not a VM.

Thanks


In my case, it's backtrace is as this:
Program received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7ffbdb7fe700 (LWP 60813)]
0x0000000000000000 in ?? ()
...
(gdb) bt
#0  0x0000000000000000 in  ()
#1  0x0000555555a7dfa3 in qemu_set_irq (irq=0x5555579e0710, level=1) at hw/core/irq.c:44 #2  0x0000555555b2b87a in tulip_update_int (s=0x5555579da7c0) at hw/net/tulip.c:121 #3  0x0000555555b2cdd9 in tulip_xmit_list_update (s=0x5555579da7c0) at hw/net/tulip.c:667 #4  0x0000555555b2d19d in tulip_write (opaque=0x5555579da7c0, addr=32, data=931909632, size=4) at hw/net/tulip.c:759 #5  0x000055555587eed1 in memory_region_write_accessor (mr=0x5555579db0b0, addr=32, value=0x7ffbdb7fd6a8, size=4, shift=0, mask=4294967295, attrs=...) at /xxx/qemu/memory.c:483 #6  0x000055555587f0da in access_with_adjusted_size (addr=32, value=0x7ffbdb7fd6a8, size=4, access_size_min=4, access_size_max=4, access_fn=0x55555587edf2 <memory_region_write_accessor>, mr=0x5555579db0b0, attrs=...)
    at /xxx/qemu/memory.c:544
#7  0x000055555588213b in memory_region_dispatch_write (mr=0x5555579db0b0, addr=32, data=931909632, op=MO_32, attrs=...) at /xxx/qemu/memory.c:1476 #8  0x000055555581fe9c in flatview_write_continue (fv=0x7ffbb001eae0, addr=49184, attrs=..., ptr=0x7ffff7e13000, len=4, addr1=32, l=4, mr=0x5555579db0b0) at /xxx/qemu/exec.c:3125 #9  0x000055555581fff4 in flatview_write (fv=0x7ffbb001eae0, addr=49184, attrs=..., buf=0x7ffff7e13000, len=4) at /xxx/qemu/exec.c:3165 #10 0x0000555555820368 in address_space_write (as=0x555556762560 <address_space_io>, addr=49184, attrs=..., buf=0x7ffff7e13000, len=4) at /xxx/qemu/exec.c:3256 #11 0x00005555558203da in address_space_rw (as=0x555556762560 <address_space_io>, addr=49184, attrs=..., buf=0x7ffff7e13000, len=4, is_write=true) at /xxx/qemu/exec.c:3266 #12 0x000055555589723b in kvm_handle_io (port=49184, attrs=..., data=0x7ffff7e13000, direction=1, size=4, count=1) at /xxx/qemu/accel/kvm/kvm-all.c:2140 #13 0x00005555558979d6 in kvm_cpu_exec (cpu=0x555556b1e220) at /xxx/qemu/accel/kvm/kvm-all.c:2386 #14 0x00005555558701c5 in qemu_kvm_cpu_thread_fn (arg=0x555556b1e220) at /xxx/qemu/cpus.c:1246 #15 0x0000555555de3ce4 in qemu_thread_start (args=0x555556b44040) at util/qemu-thread-posix.c:519
#16 0x00007ffff5bb0e25 in start_thread () at /lib64/libpthread.so.0
#17 0x00007ffff58d8f1d in clone () at /lib64/libc.so.6

I will try to dig into the qtest code and hope find a way to trigger a segv in qtest.

Thanks,
Li Qiang


    Thanks


    >
    > The core code like this:
    >
    >  qpci_device_enable(dev);
    > bar = qpci_iomap(dev, 0, NULL);
    >     context_pa = guest_alloc(alloc, sizeof(context));
    >     guest_pa = guest_alloc(alloc, 4096);
    > memset(guest_data, 'A', sizeof(guest_data));
    >     context[0].status = 1 << 31;
    > context[0].control = 0x7ff << 11 | 0x7ff;
    > context[0].buf_addr2 = context_pa + sizeof(struct tulip_descriptor);
    > context[0].buf_addr1 = guest_pa;
    >     for (i = 1; i < ARRAY_SIZE(context); ++i) {
    >         context_pa += sizeof(struct tulip_descriptor);
    >         context[i].status = 1 << 31;
    > context[i].control = 0x7ff << 11 | 0x7ff;
    > context[i].buf_addr2 = context_pa + sizeof(struct tulip_descriptor);
    > context[i].buf_addr1 = guest_pa;
    > }
    >
    > qtest_memwrite(dev->bus->qts, context_pa, context, sizeof(context));
    > qtest_memwrite(dev->bus->qts, guest_pa, guest_data,
    sizeof(guest_data));
    > qpci_io_writel(dev, bar, 0x20, context_pa);
    > qpci_io_writel(dev, bar, 0x30, 1 << 13);
    >
    > Paolo may give some hints?
    >
    > Thanks,
    > Li Qiang
    >
    >     Thanks
    >
    >
    >     >
    >     > Thanks,
    >     > Li Qiang
    >     >
    >     >
    >     >
    >     >
    >     >              pci_dma_write(&s->dev, desc->buf_addr1,
    s->rx_frame +
    >     >                  (s->rx_frame_size - s->rx_frame_len), len);
    >     >              s->rx_frame_len -= len;
    >     >     @@ -181,6 +185,10 @@ static void
    tulip_copy_rx_bytes(TULIPState
    >     >     *s, struct tulip_descriptor *desc)
    >     >              } else {
    >     >                  len = s->rx_frame_len;
    >     >              }
    >     >     +
    >     >     +        if (s->rx_frame_len + len >=
    sizeof(s->rx_frame)) {
    >     >     +            return;
    >     >     +        }
    >     >              pci_dma_write(&s->dev, desc->buf_addr2,
    s->rx_frame +
    >     >                  (s->rx_frame_size - s->rx_frame_len), len);
    >     >              s->rx_frame_len -= len;
    >     >     @@ -227,7 +235,8 @@ static ssize_t
    tulip_receive(TULIPState *s,
    >     >     const uint8_t *buf, size_t size)
    >     >
    >     >          trace_tulip_receive(buf, size);
    >     >
    >     >     -    if (size < 14 || size > 2048 || s->rx_frame_len ||
    >     >     tulip_rx_stopped(s)) {
    >     >     +    if (size < 14 || size > sizeof(s->rx_frame) - 4
    >     >     +        || s->rx_frame_len || tulip_rx_stopped(s)) {
    >     >              return 0;
    >     >          }
    >     >
    >     >     @@ -275,7 +284,6 @@ static ssize_t
    >     tulip_receive_nc(NetClientState
    >     >     *nc,
    >     >          return tulip_receive(qemu_get_nic_opaque(nc),
    buf, size);
    >     >      }
    >     >
    >     >     -
    >     >      static NetClientInfo net_tulip_info = {
    >     >          .type = NET_CLIENT_DRIVER_NIC,
    >     >          .size = sizeof(NICState),
    >     >     @@ -558,7 +566,7 @@ static void tulip_tx(TULIPState
    *s, struct
    >     >     tulip_descriptor *desc)
    >     >              if ((s->csr[6] >> CSR6_OM_SHIFT) &
    CSR6_OM_MASK) {
    >     >                  /* Internal or external Loopback */
    >     >                  tulip_receive(s, s->tx_frame,
    s->tx_frame_len);
    >     >     -        } else {
    >     >     +        } else if (s->tx_frame_len <=
    sizeof(s->tx_frame)) {
    >     >  qemu_send_packet(qemu_get_queue(s->nic),
    >     >                      s->tx_frame, s->tx_frame_len);
    >     >              }
    >     >     @@ -570,23 +578,31 @@ static void tulip_tx(TULIPState
    *s, struct
    >     >     tulip_descriptor *desc)
    >     >          }
    >     >      }
    >     >
    >     >     -static void tulip_copy_tx_buffers(TULIPState *s, struct
    >     >     tulip_descriptor *desc)
    >     >     +static int tulip_copy_tx_buffers(TULIPState *s, struct
    >     >     tulip_descriptor *desc)
    >     >      {
    >     >          int len1 = (desc->control >> TDES1_BUF1_SIZE_SHIFT) &
    >     >     TDES1_BUF1_SIZE_MASK;
    >     >          int len2 = (desc->control >> TDES1_BUF2_SIZE_SHIFT) &
    >     >     TDES1_BUF2_SIZE_MASK;
    >     >
    >     >     +    if (s->tx_frame_len + len1 >= sizeof(s->tx_frame)) {
    >     >     +        return -1;
    >     >     +    }
    >     >          if (len1) {
    >     >              pci_dma_read(&s->dev, desc->buf_addr1,
    >     >                  s->tx_frame + s->tx_frame_len, len1);
    >     >              s->tx_frame_len += len1;
    >     >          }
    >     >
    >     >     +    if (s->tx_frame_len + len2 >= sizeof(s->tx_frame)) {
    >     >     +        return -1;
    >     >     +    }
    >     >          if (len2) {
    >     >              pci_dma_read(&s->dev, desc->buf_addr2,
    >     >                  s->tx_frame + s->tx_frame_len, len2);
    >     >              s->tx_frame_len += len2;
    >     >          }
    >     >          desc->status = (len1 + len2) ? 0 : 0x7fffffff;
    >     >     +
    >     >     +    return 0;
    >     >      }
    >     >
    >     >      static void tulip_setup_filter_addr(TULIPState *s,
    uint8_t
    >     *buf,
    >     >     int n)
    >     >     @@ -651,13 +667,15 @@ static uint32_t
    tulip_ts(TULIPState *s)
    >     >
    >     >      static void tulip_xmit_list_update(TULIPState *s)
    >     >      {
    >     >     +#define TULIP_DESC_MAX 128
    >     >     +    uint8_t i = 0;
    >     >          struct tulip_descriptor desc;
    >     >
    >     >          if (tulip_ts(s) != CSR5_TS_SUSPENDED) {
    >     >              return;
    >     >          }
    >     >
    >     >     -    for (;;) {
    >     >     +    for (i = 0; i < TULIP_DESC_MAX; i++) {
    >     >              tulip_desc_read(s, s->current_tx_desc, &desc);
    >     >              tulip_dump_tx_descriptor(s, &desc);
    >     >
    >     >     @@ -675,10 +693,10 @@ static void
    >     >     tulip_xmit_list_update(TULIPState *s)
    >     >                      s->tx_frame_len = 0;
    >     >                  }
    >     >
    >     >     -            tulip_copy_tx_buffers(s, &desc);
    >     >     -
    >     >     -            if (desc.control & TDES1_LS) {
    >     >     -                tulip_tx(s, &desc);
    >     >     +            if (!tulip_copy_tx_buffers(s, &desc)) {
    >     >     +                if (desc.control & TDES1_LS) {
    >     >     +                    tulip_tx(s, &desc);
    >     >     +                }
    >     >                  }
    >     >              }
    >     >              tulip_desc_write(s, s->current_tx_desc, &desc);
    >     >     --
    >     >     2.25.1
    >     >
    >     >
    >





reply via email to

[Prev in Thread] Current Thread [Next in Thread]