qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC v1 1/4] net: Introduce qemu_get_peer


From: Jason Wang
Subject: Re: [RFC v1 1/4] net: Introduce qemu_get_peer
Date: Tue, 21 Apr 2020 11:23:20 +0800
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.9.0


On 2020/4/20 下午5:32, Cindy Lu wrote:
This is a small function  that can get the peer from given NetClientState and 
queue_index


Unnecessary space  between 'function' and 'that'.



Signed-off-by: Cindy Lu <address@hidden>


Please split this patch into two parts:

1) introduce the function
2) the actual user for this fucntion


---
  hw/net/vhost_net.c | 16 ++++++++++------
  include/net/net.h  |  1 +
  net/net.c          |  6 ++++++
  3 files changed, 17 insertions(+), 6 deletions(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 6b82803fa7..4096d64aaf 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -306,7 +306,9 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
      BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
      VirtioBusState *vbus = VIRTIO_BUS(qbus);
      VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
+    struct vhost_net *net;
      int r, e, i;
+    NetClientState *peer;
if (!k->set_guest_notifiers) {
          error_report("binding does not support guest notifiers");
@@ -314,9 +316,9 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
      }
for (i = 0; i < total_queues; i++) {
-        struct vhost_net *net;
- net = get_vhost_net(ncs[i].peer);
+        peer = qemu_get_peer(ncs, i);
+        net = get_vhost_net(peer);
          vhost_net_set_vq_index(net, i * 2);
/* Suppress the masking guest notifiers on vhost user
@@ -335,15 +337,16 @@ int vhost_net_start(VirtIODevice *dev, NetClientState 
*ncs,
      }
for (i = 0; i < total_queues; i++) {
-        r = vhost_net_start_one(get_vhost_net(ncs[i].peer), dev);
+        peer = qemu_get_peer(ncs, i);
+        r = vhost_net_start_one(get_vhost_net(peer), dev);
if (r < 0) {
              goto err_start;
          }
- if (ncs[i].peer->vring_enable) {
+        if (peer->vring_enable) {
              /* restore vring enable state */
-            r = vhost_set_vring_enable(ncs[i].peer, ncs[i].peer->vring_enable);
+            r = vhost_set_vring_enable(peer, peer->vring_enable);
if (r < 0) {
                  goto err_start;
@@ -355,7 +358,8 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
err_start:
      while (--i >= 0) {
-        vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
+        peer = qemu_get_peer(ncs , i);
+        vhost_net_stop_one(get_vhost_net(peer), dev);
      }
      e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
      if (e < 0) {
diff --git a/include/net/net.h b/include/net/net.h
index e175ba9677..0a74324ccd 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -175,6 +175,7 @@ void hmp_info_network(Monitor *mon, const QDict *qdict);
  void net_socket_rs_init(SocketReadState *rs,
                          SocketReadStateFinalize *finalize,
                          bool vnet_hdr);
+NetClientState *qemu_get_peer(NetClientState *nc, int queue_index);
/* NIC info */ diff --git a/net/net.c b/net/net.c
index 84aa6d8d00..ac5080dda1 100644
--- a/net/net.c
+++ b/net/net.c
@@ -324,6 +324,12 @@ void *qemu_get_nic_opaque(NetClientState *nc)
return nic->opaque;
  }
+NetClientState *qemu_get_peer(NetClientState *nc, int queue_index)
+{
+    NetClientState *ncs  =  nc + queue_index;


Unnecessary space around '='.

Thanks


+    assert(ncs != NULL);
+    return ncs->peer;
+}
static void qemu_cleanup_net_client(NetClientState *nc)
  {




reply via email to

[Prev in Thread] Current Thread [Next in Thread]