[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4AF43C5E.4060300@gmail.com>
Date: Fri, 06 Nov 2009 16:10:22 +0100
From: Eric Dumazet <eric.dumazet@...il.com>
To: Lucian Adrian Grijincu <lgrijincu@...acom.com>
CC: David Miller <davem@...emloft.net>, netdev@...r.kernel.org,
opurdila@...acom.com
Subject: Re: [PATCH 1/2] udp: cleanup __udp4_lib_mcast_deliver
Lucian Adrian Grijincu a écrit :
>
> As far as I understand it, the spin locks protect the hslot, and freeing the
> skb does not walk/change or interact with the hslot in any way.
>
Yes, but this single skb freeing is in multicast very slow path
(it happens if we receive a multicast packet with no listener, which should
not happen with multicast aware network...)
If you really want to optimize this part, we could use an array of
32 (or 64) socket pointers, to be able to perform the really expensive
work (skb_clone(), udp_queue_rcv_skb()) outside of the lock.
Something like this untested patch :
net/ipv4/udp.c | 68 ++++++++++++++++++++++++++++++-----------------
1 files changed, 44 insertions(+), 24 deletions(-)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d5e75e9..5d71aee 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1190,6 +1190,24 @@ drop:
return -1;
}
+
+static void flush_stack(struct sock **stack, unsigned int count,
+ struct sk_buff *skb, unsigned int final)
+{
+ unsigned int i;
+ struct sk_buff *skb1 = NULL;
+
+ for (i = 0; i < count; i++) {
+ if (likely(skb1 == NULL))
+ skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
+
+ if (likely(skb1 && udp_queue_rcv_skb(stack[i], skb1) <= 0))
+ skb1 = NULL;
+ }
+ if (unlikely(skb1))
+ consume_skb(skb1);
+}
+
/*
* Multicasts and broadcasts go to each listener.
*
@@ -1201,38 +1219,40 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
struct udp_table *udptable)
{
- struct sock *sk;
+ struct sock *sk, *stack[256 / sizeof(void *)];
struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
int dif;
+ unsigned int i, count = 0;
spin_lock(&hslot->lock);
sk = sk_nulls_head(&hslot->head);
dif = skb->dev->ifindex;
sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
- if (sk) {
- struct sock *sknext = NULL;
-
- do {
- struct sk_buff *skb1 = skb;
-
- sknext = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
- daddr, uh->source, saddr,
- dif);
- if (sknext)
- skb1 = skb_clone(skb, GFP_ATOMIC);
-
- if (skb1) {
- int ret = udp_queue_rcv_skb(sk, skb1);
- if (ret > 0)
- /* we should probably re-process instead
- * of dropping packets here. */
- kfree_skb(skb1);
- }
- sk = sknext;
- } while (sknext);
- } else
- consume_skb(skb);
+ while (sk) {
+ stack[count++] = sk;
+ if (unlikely(count == ARRAY_SIZE(stack))) {
+ flush_stack(stack, count, skb, ~0);
+ count = 0;
+ }
+
+ sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
+ daddr, uh->source, saddr, dif);
+ }
+ /*
+ * before releasing the lock, we must take reference on sockets
+ */
+ for (i = 0; i < count; i++)
+ sock_hold(stack[i]);
+
spin_unlock(&hslot->lock);
+
+ /*
+ * do the slow work with no lock held
+ */
+ flush_stack(stack, count, skb, count - 1);
+
+ for (i = 0; i < count; i++)
+ sock_put(stack[i]);
return 0;
}
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists