[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [RFC PATCH v3 5/6] virtio-net: Added eBPF RSS to virtio-net.
From: |
Yuri Benditovich |
Subject: |
Re: [RFC PATCH v3 5/6] virtio-net: Added eBPF RSS to virtio-net. |
Date: |
Sun, 17 Jan 2021 11:04:07 +0200 |
On Fri, Jan 15, 2021 at 9:20 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> On 2021/1/15 上午5:16, Andrew Melnychenko wrote:
> > From: Andrew <andrew@daynix.com>
> >
> > When RSS is enabled the device tries to load the eBPF program
> > to select RX virtqueue in the TUN. If eBPF can be loaded
> > the RSS will function also with vhost (works with kernel 5.8 and later).
> > Software RSS is used as a fallback with vhost=off when eBPF can't be loaded
> > or when hash population requested by the guest.
> >
> > Signed-off-by: Yuri Benditovich <yuri.benditovich@daynix.com>
> > Signed-off-by: Andrew Melnychenko <andrew@daynix.com>
> > ---
> > hw/net/vhost_net.c | 2 +
> > hw/net/virtio-net.c | 125 +++++++++++++++++++++++++++++++--
> > include/hw/virtio/virtio-net.h | 4 ++
> > net/vhost-vdpa.c | 2 +
> > 4 files changed, 129 insertions(+), 4 deletions(-)
> >
> > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> > index 24d555e764..16124f99c3 100644
> > --- a/hw/net/vhost_net.c
> > +++ b/hw/net/vhost_net.c
> > @@ -71,6 +71,8 @@ static const int user_feature_bits[] = {
> > VIRTIO_NET_F_MTU,
> > VIRTIO_F_IOMMU_PLATFORM,
> > VIRTIO_F_RING_PACKED,
> > + VIRTIO_NET_F_RSS,
> > + VIRTIO_NET_F_HASH_REPORT,
> >
> > /* This bit implies RARP isn't sent by QEMU out of band */
> > VIRTIO_NET_F_GUEST_ANNOUNCE,
> > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > index 09ceb02c9d..37016fc73a 100644
> > --- a/hw/net/virtio-net.c
> > +++ b/hw/net/virtio-net.c
> > @@ -691,6 +691,19 @@ static void virtio_net_set_queues(VirtIONet *n)
> >
> > static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
> >
> > +static uint64_t fix_ebpf_vhost_features(uint64_t features)
> > +{
> > + /* If vhost=on & CONFIG_EBPF doesn't set - disable RSS feature */
>
>
> I still think we should not clear feature silently. This may break
> migraiton if the feature is cleared on destination.
Do I understand it correctly that if we do not clear features silently
and implement a graceful drop to vhost=off when we can't do what we
need with vhost - then we do not need to add any migration blocker?
>
>
> > + uint64_t ret = features;
> > +#ifndef CONFIG_EBPF
> > + virtio_clear_feature(&ret, VIRTIO_NET_F_RSS);
> > +#endif
> > + /* for now, there is no solution for populating the hash from eBPF */
> > + virtio_clear_feature(&ret, VIRTIO_NET_F_HASH_REPORT);
> > +
> > + return ret;
> > +}
> > +
> > static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t
> > features,
> > Error **errp)
> > {
> > @@ -725,9 +738,9 @@ static uint64_t virtio_net_get_features(VirtIODevice
> > *vdev, uint64_t features,
> > return features;
> > }
> >
> > - virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> > - virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
> > - features = vhost_net_get_features(get_vhost_net(nc->peer), features);
> > + features = fix_ebpf_vhost_features(
> > + vhost_net_get_features(get_vhost_net(nc->peer), features));
> > +
> > vdev->backend_features = features;
> >
> > if (n->mtu_bypass_backend &&
> > @@ -1151,12 +1164,79 @@ static int virtio_net_handle_announce(VirtIONet *n,
> > uint8_t cmd,
> > }
> > }
> >
> > +static void virtio_net_detach_epbf_rss(VirtIONet *n);
> > +
> > static void virtio_net_disable_rss(VirtIONet *n)
> > {
> > if (n->rss_data.enabled) {
> > trace_virtio_net_rss_disable();
> > }
> > n->rss_data.enabled = false;
> > +
> > + virtio_net_detach_epbf_rss(n);
> > +}
> > +
> > +static bool virtio_net_attach_ebpf_to_backend(NICState *nic, int prog_fd)
> > +{
> > + NetClientState *nc = qemu_get_peer(qemu_get_queue(nic), 0);
> > + if (nc == NULL || nc->info->set_steering_ebpf == NULL) {
> > + return false;
> > + }
> > +
> > + return nc->info->set_steering_ebpf(nc, prog_fd);
> > +}
> > +
> > +static void rss_data_to_rss_config(struct VirtioNetRssData *data,
> > + struct EBPFRSSConfig *config)
> > +{
> > + config->redirect = data->redirect;
> > + config->populate_hash = data->populate_hash;
> > + config->hash_types = data->hash_types;
> > + config->indirections_len = data->indirections_len;
> > + config->default_queue = data->default_queue;
> > +}
> > +
> > +static bool virtio_net_attach_epbf_rss(VirtIONet *n)
> > +{
> > + struct EBPFRSSConfig config = {};
> > +
> > + if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> > + return false;
> > + }
> > +
> > + rss_data_to_rss_config(&n->rss_data, &config);
> > +
> > + if (!ebpf_rss_set_all(&n->ebpf_rss, &config,
> > + n->rss_data.indirections_table,
> > n->rss_data.key)) {
> > + return false;
> > + }
> > +
> > + if (!virtio_net_attach_ebpf_to_backend(n->nic,
> > n->ebpf_rss.program_fd)) {
> > + return false;
> > + }
> > +
> > + return true;
> > +}
> > +
> > +static void virtio_net_detach_epbf_rss(VirtIONet *n)
> > +{
> > + virtio_net_attach_ebpf_to_backend(n->nic, -1);
> > +}
> > +
> > +static bool virtio_net_load_ebpf(VirtIONet *n)
> > +{
> > + if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
> > + /* backend does't support steering ebpf */
> > + return false;
> > + }
> > +
> > + return ebpf_rss_load(&n->ebpf_rss);
> > +}
> > +
> > +static void virtio_net_unload_ebpf(VirtIONet *n)
> > +{
> > + virtio_net_attach_ebpf_to_backend(n->nic, -1);
> > + ebpf_rss_unload(&n->ebpf_rss);
> > }
> >
> > static uint16_t virtio_net_handle_rss(VirtIONet *n,
> > @@ -1271,6 +1351,25 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
> > goto error;
> > }
> > n->rss_data.enabled = true;
> > +
> > + if (!n->rss_data.populate_hash) {
> > + if (!virtio_net_attach_epbf_rss(n)) {
> > + /* EBPF must be loaded for vhost */
> > + if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
> > + warn_report("Can't load eBPF RSS for vhost");
> > + goto error;
>
>
> How about stop the vhost in this case?
>
> Thanks
>
>
[RFC PATCH v3 3/6] ebpf: Added eBPF RSS program., Andrew Melnychenko, 2021/01/14
[RFC PATCH v3 4/6] ebpf: Added eBPF RSS loader., Andrew Melnychenko, 2021/01/14