[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191126100744.5083-15-prashantbhole.linux@gmail.com>
Date: Tue, 26 Nov 2019 19:07:40 +0900
From: Prashant Bhole <prashantbhole.linux@...il.com>
To: "David S . Miller" <davem@...emloft.net>,
"Michael S . Tsirkin" <mst@...hat.com>
Cc: Jason Wang <jasowang@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jakub Kicinski <jakub.kicinski@...ronome.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
Andrii Nakryiko <andriin@...com>, netdev@...r.kernel.org,
qemu-devel@...gnu.org, kvm@...r.kernel.org,
Prashant Bhole <prashantbhole.linux@...il.com>
Subject: [RFC net-next 14/18] virtio_net: add XDP prog offload infrastructure
From: Jason Wang <jasowang@...hat.com>
This patch prepares virtio_net of XDP offloading. It adds data
structures, blank callback implementations for bpf_prog_offload_ops.
It also implements ndo_init, ndo_uninit operations for setting up
offload related data structures.
Signed-off-by: Jason Wang <jasowang@...hat.com>
Co-developed-by: Prashant Bhole <prashantbhole.linux@...il.com>
Signed-off-by: Prashant Bhole <prashantbhole.linux@...il.com>
---
drivers/net/virtio_net.c | 103 +++++++++++++++++++++++++++++++++++++++
1 file changed, 103 insertions(+)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cee5c2b15c62..a1088d0114f2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -229,8 +229,14 @@ struct virtnet_info {
struct failover *failover;
struct bpf_prog __rcu *xdp_prog;
+ struct bpf_prog __rcu *offload_xdp_prog;
struct xdp_attachment_info xdp;
+ struct xdp_attachment_info xdp_hw;
+
+ struct bpf_offload_dev *bpf_dev;
+
+ struct list_head bpf_bound_progs;
};
struct padded_vnet_hdr {
@@ -258,6 +264,14 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
}
+struct virtnet_bpf_bound_prog {
+ struct virtnet_info *vi;
+ struct bpf_prog *prog;
+ struct list_head list;
+ u32 len;
+ struct bpf_insn insnsi[0];
+};
+
/* Converting between virtqueue no. and kernel tx/rx queue no.
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
*/
@@ -2506,13 +2520,63 @@ static int virtnet_xdp_set(struct net_device *dev, struct netdev_bpf *bpf)
return err;
}
+static int virtnet_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx,
+ int prev_insn)
+{
+ return 0;
+}
+
+static void virtnet_bpf_destroy_prog(struct bpf_prog *prog)
+{
+}
+
+static int virtnet_xdp_set_offload(struct virtnet_info *vi,
+ struct netdev_bpf *bpf)
+{
+ return -EBUSY;
+}
+
+static int virtnet_bpf_verifier_setup(struct bpf_prog *prog)
+{
+ return -ENOMEM;
+}
+
+static int virtnet_bpf_verifier_prep(struct bpf_prog *prog)
+{
+ return 0;
+}
+
+static int virtnet_bpf_translate(struct bpf_prog *prog)
+{
+ return 0;
+}
+
+static int virtnet_bpf_finalize(struct bpf_verifier_env *env)
+{
+ return 0;
+}
+
+static const struct bpf_prog_offload_ops virtnet_bpf_dev_ops = {
+ .setup = virtnet_bpf_verifier_setup,
+ .prepare = virtnet_bpf_verifier_prep,
+ .insn_hook = virtnet_bpf_verify_insn,
+ .finalize = virtnet_bpf_finalize,
+ .translate = virtnet_bpf_translate,
+ .destroy = virtnet_bpf_destroy_prog,
+};
+
static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
+ struct virtnet_info *vi = netdev_priv(dev);
switch (xdp->command) {
case XDP_SETUP_PROG:
return virtnet_xdp_set(dev, xdp);
case XDP_QUERY_PROG:
return xdp_attachment_query(&vi->xdp, xdp);
+ case XDP_SETUP_PROG_HW:
+ return virtnet_xdp_set_offload(vi, xdp);
+ case XDP_QUERY_PROG_HW:
+ return xdp_attachment_query(&vi->xdp_hw, xdp);
default:
return -EINVAL;
}
@@ -2559,7 +2623,46 @@ static int virtnet_set_features(struct net_device *dev,
return 0;
}
+static int virtnet_bpf_init(struct virtnet_info *vi)
+{
+ int err;
+
+ vi->bpf_dev = bpf_offload_dev_create(&virtnet_bpf_dev_ops, NULL);
+ err = PTR_ERR_OR_ZERO(vi->bpf_dev);
+ if (err)
+ return err;
+
+ err = bpf_offload_dev_netdev_register(vi->bpf_dev, vi->dev);
+ if (err)
+ goto err_netdev_register;
+
+ INIT_LIST_HEAD(&vi->bpf_bound_progs);
+
+ return 0;
+
+err_netdev_register:
+ bpf_offload_dev_destroy(vi->bpf_dev);
+ return err;
+}
+
+static int virtnet_init(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ return virtnet_bpf_init(vi);
+}
+
+static void virtnet_uninit(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ bpf_offload_dev_netdev_unregister(vi->bpf_dev, vi->dev);
+ bpf_offload_dev_destroy(vi->bpf_dev);
+}
+
static const struct net_device_ops virtnet_netdev = {
+ .ndo_init = virtnet_init,
+ .ndo_uninit = virtnet_uninit,
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
.ndo_start_xmit = start_xmit,
--
2.20.1
Powered by blists - more mailing lists