[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180423135619.7179-12-bjorn.topel@gmail.com>
Date: Mon, 23 Apr 2018 15:56:15 +0200
From: Björn Töpel <bjorn.topel@...il.com>
To: bjorn.topel@...il.com, magnus.karlsson@...el.com,
alexander.h.duyck@...el.com, alexander.duyck@...il.com,
john.fastabend@...il.com, ast@...com, brouer@...hat.com,
willemdebruijn.kernel@...il.com, daniel@...earbox.net,
mst@...hat.com, netdev@...r.kernel.org
Cc: michael.lundkvist@...csson.com, jesse.brandeburg@...el.com,
anjali.singhai@...el.com, qi.z.zhang@...el.com
Subject: [PATCH bpf-next 11/15] xsk: add umem completion queue support and mmap
From: Magnus Karlsson <magnus.karlsson@...el.com>
Here, we add another setsockopt for registered user memory (umem)
called XDP_UMEM_COMPLETION_QUEUE. Using this socket option, the
process can ask the kernel to allocate a queue (ring buffer) and also
mmap it (XDP_UMEM_PGOFF_COMPLETION_QUEUE) into the process.
The queue is used to explicitly pass ownership of umem frames from the
kernel to user process. This will be used by the TX path to tell user
space that a certain frame has been transmitted and user space can use
it for something else, if it wishes.
Signed-off-by: Magnus Karlsson <magnus.karlsson@...el.com>
---
include/uapi/linux/if_xdp.h | 2 ++
net/xdp/xdp_umem.c | 7 ++++++-
net/xdp/xdp_umem.h | 1 +
net/xdp/xsk.c | 7 ++++++-
4 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
index e5091881f776..71581a139f26 100644
--- a/include/uapi/linux/if_xdp.h
+++ b/include/uapi/linux/if_xdp.h
@@ -36,6 +36,7 @@ struct sockaddr_xdp {
#define XDP_RX_RING 1
#define XDP_UMEM_REG 3
#define XDP_UMEM_FILL_RING 4
+#define XDP_UMEM_COMPLETION_RING 5
struct xdp_umem_reg {
__u64 addr; /* Start of packet data area */
@@ -47,6 +48,7 @@ struct xdp_umem_reg {
/* Pgoff for mmaping the rings */
#define XDP_PGOFF_RX_RING 0
#define XDP_UMEM_PGOFF_FILL_RING 0x100000000
+#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000
struct xdp_desc {
__u32 idx;
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 6b36bb365c01..f1e835e46c03 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -67,6 +67,11 @@ static void xdp_umem_release(struct xdp_umem *umem)
umem->fq = NULL;
}
+ if (umem->cq) {
+ xskq_destroy(umem->cq);
+ umem->cq = NULL;
+ }
+
if (umem->pgs) {
xdp_umem_unpin_pages(umem);
@@ -247,5 +252,5 @@ int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
bool xdp_umem_validate_queues(struct xdp_umem *umem)
{
- return umem->fq;
+ return (umem->fq && umem->cq);
}
diff --git a/net/xdp/xdp_umem.h b/net/xdp/xdp_umem.h
index 8706c904d732..f8c2e27dc105 100644
--- a/net/xdp/xdp_umem.h
+++ b/net/xdp/xdp_umem.h
@@ -23,6 +23,7 @@
struct xdp_umem {
struct xsk_queue *fq;
+ struct xsk_queue *cq;
struct page **pgs;
struct xdp_umem_props props;
u32 npgs;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 2ae501a8814a..e8eec4ac08d4 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -268,6 +268,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
} else {
/* This xsk has its own umem. */
xskq_set_umem(xs->umem->fq, &xs->umem->props);
+ xskq_set_umem(xs->umem->cq, &xs->umem->props);
}
/* Rebind? */
@@ -347,6 +348,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
return 0;
}
case XDP_UMEM_FILL_RING:
+ case XDP_UMEM_COMPLETION_RING:
{
struct xsk_queue **q;
int entries;
@@ -358,7 +360,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
return -EFAULT;
mutex_lock(&xs->mutex);
- q = &xs->umem->fq;
+ q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
+ &xs->umem->cq;
err = xsk_init_queue(entries, q, true);
mutex_unlock(&xs->mutex);
return err;
@@ -388,6 +391,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
if (offset == XDP_UMEM_PGOFF_FILL_RING)
q = xs->umem->fq;
+ else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
+ q = xs->umem->cq;
else
return -EINVAL;
}
--
2.14.1
Powered by blists - more mailing lists