[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4AF447F7.6000700@gmail.com>
Date: Fri, 06 Nov 2009 16:59:51 +0100
From: Eric Dumazet <eric.dumazet@...il.com>
To: Lucian Adrian Grijincu <lgrijincu@...acom.com>,
David Miller <davem@...emloft.net>
CC: netdev@...r.kernel.org, opurdila@...acom.com
Subject: [PATCH net-next-2.6] udp: Optimise multicast reception
Eric Dumazet a écrit :
> Yes, but this single skb freeing is in multicast very slow path
> (it happens if we receive a multicast packet with no listener, which should
> not happen with multicast aware network...)
>
>
> If you really want to optimize this part, we could use an array of
> 32 (or 64) socket pointers, to be able to perform the really expensive
> work (skb_clone(), udp_queue_rcv_skb()) outside of the lock.
>
> Something like this untested patch :
I did some tests and made one fix.
With this kind of stacking, we eventually could try a rcu lookup as well.
[PATCH net-next-2.6] udp: Optimise multicast reception
UDP multicast rx path is a bit complex and can hold a spinlock
for a long time.
Using a small (32 or 64 entries) stack of socket pointers can help
to perform expensive operations (skb_clone(), udp_queue_rcv_skb())
outside of the lock, in most cases.
Signed-off-by: Eric Dumazet <eric.dumazet@...il.com>
---
net/ipv4/udp.c | 70 ++++++++++++++++++++++++++++++-----------------
1 files changed, 46 insertions(+), 24 deletions(-)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d5e75e9..89637d0 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1190,6 +1190,24 @@ drop:
return -1;
}
+
+static void flush_stack(struct sock **stack, unsigned int count,
+ struct sk_buff *skb, unsigned int final)
+{
+ unsigned int i;
+ struct sk_buff *skb1 = NULL;
+
+ for (i = 0; i < count; i++) {
+ if (likely(skb1 == NULL))
+ skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
+
+ if (skb1 && udp_queue_rcv_skb(stack[i], skb1) <= 0)
+ skb1 = NULL;
+ }
+ if (skb1)
+ consume_skb(skb1);
+}
+
/*
* Multicasts and broadcasts go to each listener.
*
@@ -1201,38 +1219,42 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
struct udp_table *udptable)
{
- struct sock *sk;
+ struct sock *sk, *stack[256 / sizeof(struct sock *)];
struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
int dif;
+ unsigned int i, count = 0;
spin_lock(&hslot->lock);
sk = sk_nulls_head(&hslot->head);
dif = skb->dev->ifindex;
sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
- if (sk) {
- struct sock *sknext = NULL;
-
- do {
- struct sk_buff *skb1 = skb;
-
- sknext = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
- daddr, uh->source, saddr,
- dif);
- if (sknext)
- skb1 = skb_clone(skb, GFP_ATOMIC);
-
- if (skb1) {
- int ret = udp_queue_rcv_skb(sk, skb1);
- if (ret > 0)
- /* we should probably re-process instead
- * of dropping packets here. */
- kfree_skb(skb1);
- }
- sk = sknext;
- } while (sknext);
- } else
- consume_skb(skb);
+ while (sk) {
+ stack[count++] = sk;
+ sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
+ daddr, uh->source, saddr, dif);
+ if (unlikely(count == ARRAY_SIZE(stack))) {
+ if (!sk)
+ break;
+ flush_stack(stack, count, skb, ~0);
+ count = 0;
+ }
+
+ }
+ /*
+ * before releasing the lock, we must take reference on sockets
+ */
+ for (i = 0; i < count; i++)
+ sock_hold(stack[i]);
+
spin_unlock(&hslot->lock);
+
+ /*
+ * do the slow work with no lock held
+ */
+ flush_stack(stack, count, skb, count - 1);
+
+ for (i = 0; i < count; i++)
+ sock_put(stack[i]);
return 0;
}
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists