[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180604115715.17895-2-bjorn.topel@gmail.com>
Date: Mon, 4 Jun 2018 13:57:11 +0200
From: Björn Töpel <bjorn.topel@...il.com>
To: bjorn.topel@...il.com, magnus.karlsson@...el.com,
magnus.karlsson@...il.com, alexander.h.duyck@...el.com,
alexander.duyck@...il.com, ast@...com, brouer@...hat.com,
daniel@...earbox.net, netdev@...r.kernel.org,
mykyta.iziumtsev@...aro.org
Cc: Björn Töpel <bjorn.topel@...el.com>,
john.fastabend@...il.com, willemdebruijn.kernel@...il.com,
mst@...hat.com, michael.lundkvist@...csson.com,
jesse.brandeburg@...el.com, anjali.singhai@...el.com,
qi.z.zhang@...el.com, francois.ozog@...aro.org,
ilias.apalodimas@...aro.org, brian.brooks@...aro.org,
andy@...yhouse.net, michael.chan@...adcom.com
Subject: [PATCH bpf-next 1/5] xsk: proper fill queue descriptor validation
From: Björn Töpel <bjorn.topel@...el.com>
Previously the fill queue descriptor was not copied to kernel space
prior validating it, making it possible for userland to change the
descriptor post-kernel-validation.
Signed-off-by: Björn Töpel <bjorn.topel@...el.com>
---
net/xdp/xsk.c | 11 +++++------
net/xdp/xsk_queue.h | 32 +++++++++-----------------------
2 files changed, 14 insertions(+), 29 deletions(-)
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index cce0e4f8a536..43554eb56fe6 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -41,20 +41,19 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
- u32 *id, len = xdp->data_end - xdp->data;
+ u32 id, len = xdp->data_end - xdp->data;
void *buffer;
- int err = 0;
+ int err;
if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
return -EINVAL;
- id = xskq_peek_id(xs->umem->fq);
- if (!id)
+ if (!xskq_peek_id(xs->umem->fq, &id))
return -ENOSPC;
- buffer = xdp_umem_get_data_with_headroom(xs->umem, *id);
+ buffer = xdp_umem_get_data_with_headroom(xs->umem, id);
memcpy(buffer, xdp->data, len);
- err = xskq_produce_batch_desc(xs->rx, *id, len,
+ err = xskq_produce_batch_desc(xs->rx, id, len,
xs->umem->frame_headroom);
if (!err)
xskq_discard_id(xs->umem->fq);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index cb8e5be35110..b5924e7aeb2b 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -85,14 +85,15 @@ static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx)
return true;
}
-static inline u32 *xskq_validate_id(struct xsk_queue *q)
+static inline u32 *xskq_validate_id(struct xsk_queue *q, u32 *id)
{
while (q->cons_tail != q->cons_head) {
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
unsigned int idx = q->cons_tail & q->ring_mask;
- if (xskq_is_valid_id(q, ring->desc[idx]))
- return &ring->desc[idx];
+ *id = READ_ONCE(ring->desc[idx]);
+ if (xskq_is_valid_id(q, *id))
+ return id;
q->cons_tail++;
}
@@ -100,28 +101,22 @@ static inline u32 *xskq_validate_id(struct xsk_queue *q)
return NULL;
}
-static inline u32 *xskq_peek_id(struct xsk_queue *q)
+static inline u32 *xskq_peek_id(struct xsk_queue *q, u32 *id)
{
- struct xdp_umem_ring *ring;
-
if (q->cons_tail == q->cons_head) {
WRITE_ONCE(q->ring->consumer, q->cons_tail);
q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
/* Order consumer and data */
smp_rmb();
-
- return xskq_validate_id(q);
}
- ring = (struct xdp_umem_ring *)q->ring;
- return &ring->desc[q->cons_tail & q->ring_mask];
+ return xskq_validate_id(q, id);
}
static inline void xskq_discard_id(struct xsk_queue *q)
{
q->cons_tail++;
- (void)xskq_validate_id(q);
}
static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
@@ -174,11 +169,9 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
unsigned int idx = q->cons_tail & q->ring_mask;
- if (xskq_is_valid_desc(q, &ring->desc[idx])) {
- if (desc)
- *desc = ring->desc[idx];
+ *desc = READ_ONCE(ring->desc[idx]);
+ if (xskq_is_valid_desc(q, desc))
return desc;
- }
q->cons_tail++;
}
@@ -189,27 +182,20 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
struct xdp_desc *desc)
{
- struct xdp_rxtx_ring *ring;
-
if (q->cons_tail == q->cons_head) {
WRITE_ONCE(q->ring->consumer, q->cons_tail);
q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
/* Order consumer and data */
smp_rmb();
-
- return xskq_validate_desc(q, desc);
}
- ring = (struct xdp_rxtx_ring *)q->ring;
- *desc = ring->desc[q->cons_tail & q->ring_mask];
- return desc;
+ return xskq_validate_desc(q, desc);
}
static inline void xskq_discard_desc(struct xsk_queue *q)
{
q->cons_tail++;
- (void)xskq_validate_desc(q, NULL);
}
static inline int xskq_produce_batch_desc(struct xsk_queue *q,
--
2.14.1
Powered by blists - more mailing lists