[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <152214314536.9023.10525071999910155063.stgit@firesoul>
Date: Tue, 27 Mar 2018 11:32:25 +0200
From: Jesper Dangaard Brouer <brouer@...hat.com>
To: netdev@...r.kernel.org,
BjörnTöpel <bjorn.topel@...el.com>,
magnus.karlsson@...el.com
Cc: eugenia@...lanox.com, Jason Wang <jasowang@...hat.com>,
John Fastabend <john.fastabend@...il.com>,
Eran Ben Elisha <eranbe@...lanox.com>,
Saeed Mahameed <saeedm@...lanox.com>, galp@...lanox.com,
Jesper Dangaard Brouer <brouer@...hat.com>,
Daniel Borkmann <borkmann@...earbox.net>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
Tariq Toukan <tariqt@...lanox.com>
Subject: [bpf-next V6 PATCH 06/15] tun: convert to use generic xdp_frame and
xdp_return_frame API
The tuntap driver invented it's own driver specific way of queuing
XDP packets, by storing the xdp_buff information in the top of
the XDP frame data.
Convert it over to use the more generic xdp_frame structure. The
main problem with the in-driver method is that the xdp_rxq_info pointer
cannot be trused/used when dequeueing the frame.
V3: Remove check based on feedback from Jason
Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
---
drivers/net/tun.c | 43 ++++++++++++++++++++-----------------------
drivers/vhost/net.c | 7 ++++---
include/linux/if_tun.h | 4 ++--
3 files changed, 26 insertions(+), 28 deletions(-)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index baeafa004463..6750980d9f30 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -248,11 +248,11 @@ struct veth {
__be16 h_vlan_TCI;
};
-bool tun_is_xdp_buff(void *ptr)
+bool tun_is_xdp_frame(void *ptr)
{
return (unsigned long)ptr & TUN_XDP_FLAG;
}
-EXPORT_SYMBOL(tun_is_xdp_buff);
+EXPORT_SYMBOL(tun_is_xdp_frame);
void *tun_xdp_to_ptr(void *ptr)
{
@@ -660,10 +660,10 @@ static void tun_ptr_free(void *ptr)
{
if (!ptr)
return;
- if (tun_is_xdp_buff(ptr)) {
- struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+ if (tun_is_xdp_frame(ptr)) {
+ struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
- put_page(virt_to_head_page(xdp->data));
+ xdp_return_frame(xdpf->data, &xdpf->mem);
} else {
__skb_array_destroy_skb(ptr);
}
@@ -1290,17 +1290,14 @@ static const struct net_device_ops tun_netdev_ops = {
static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
{
struct tun_struct *tun = netdev_priv(dev);
- struct xdp_buff *buff = xdp->data_hard_start;
- int headroom = xdp->data - xdp->data_hard_start;
+ struct xdp_frame *frame;
struct tun_file *tfile;
u32 numqueues;
int ret = 0;
- /* Assure headroom is available and buff is properly aligned */
- if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
- return -ENOSPC;
-
- *buff = *xdp;
+ frame = convert_to_xdp_frame(xdp);
+ if (unlikely(!frame))
+ return -EOVERFLOW;
rcu_read_lock();
@@ -1315,7 +1312,7 @@ static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
/* Encode the XDP flag into lowest bit for consumer to differ
* XDP buffer from sk_buff.
*/
- if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
+ if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(frame))) {
this_cpu_inc(tun->pcpu_stats->tx_dropped);
ret = -ENOSPC;
}
@@ -1993,11 +1990,11 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
static ssize_t tun_put_user_xdp(struct tun_struct *tun,
struct tun_file *tfile,
- struct xdp_buff *xdp,
+ struct xdp_frame *xdp_frame,
struct iov_iter *iter)
{
int vnet_hdr_sz = 0;
- size_t size = xdp->data_end - xdp->data;
+ size_t size = xdp_frame->len;
struct tun_pcpu_stats *stats;
size_t ret;
@@ -2013,7 +2010,7 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
}
- ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
+ ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
@@ -2181,11 +2178,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
return err;
}
- if (tun_is_xdp_buff(ptr)) {
- struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+ if (tun_is_xdp_frame(ptr)) {
+ struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
- ret = tun_put_user_xdp(tun, tfile, xdp, to);
- put_page(virt_to_head_page(xdp->data));
+ ret = tun_put_user_xdp(tun, tfile, xdpf, to);
+ xdp_return_frame(xdpf->data, &xdpf->mem);
} else {
struct sk_buff *skb = ptr;
@@ -2424,10 +2421,10 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
static int tun_ptr_peek_len(void *ptr)
{
if (likely(ptr)) {
- if (tun_is_xdp_buff(ptr)) {
- struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+ if (tun_is_xdp_frame(ptr)) {
+ struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
- return xdp->data_end - xdp->data;
+ return xdpf->len;
}
return __skb_array_len_with_tag(ptr);
} else {
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index b5fb56b822fd..5aee3aaf6c8c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -32,6 +32,7 @@
#include <linux/skbuff.h>
#include <net/sock.h>
+#include <net/xdp.h>
#include "vhost.h"
@@ -177,10 +178,10 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
static int vhost_net_buf_peek_len(void *ptr)
{
- if (tun_is_xdp_buff(ptr)) {
- struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+ if (tun_is_xdp_frame(ptr)) {
+ struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
- return xdp->data_end - xdp->data;
+ return xdpf->len;
}
return __skb_array_len_with_tag(ptr);
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index c5b0a75a7812..33b817b172af 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -22,7 +22,7 @@
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
-bool tun_is_xdp_buff(void *ptr);
+bool tun_is_xdp_frame(void *ptr);
void *tun_xdp_to_ptr(void *ptr);
void *tun_ptr_to_xdp(void *ptr);
#else
@@ -38,7 +38,7 @@ static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
-static inline bool tun_is_xdp_buff(void *ptr)
+static inline bool tun_is_xdp_frame(void *ptr)
{
return false;
}
Powered by blists - more mailing lists