[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170421194515.GB8853@breakpoint.cc>
Date: Fri, 21 Apr 2017 21:45:15 +0200
From: Florian Westphal <fw@...len.de>
To: Florian Westphal <fw@...len.de>
Cc: Eric Dumazet <edumazet@...gle.com>,
Andrey Konovalov <andreyknvl@...gle.com>,
Cong Wang <xiyou.wangcong@...il.com>,
netdev <netdev@...r.kernel.org>,
LKML <linux-kernel@...r.kernel.org>,
Dmitry Vyukov <dvyukov@...gle.com>,
Kostya Serebryany <kcc@...gle.com>,
syzkaller <syzkaller@...glegroups.com>
Subject: Re: net: cleanup_net is slow
Florian Westphal <fw@...len.de> wrote:
> Indeed. Setting net.netfilter.nf_conntrack_default_on=0 cuts time
> cleanup time by 2/3 ...
>
> nf unregister is way too happy to issue synchronize_net(), I'll work on
> a fix.
I'll test this patch as a start. Maybe we can also leverage exit_batch
more on netfilter side.
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index a87a6f8a74d8..08fe1f526265 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -126,14 +126,15 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
}
EXPORT_SYMBOL(nf_register_net_hook);
-void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
+static struct nf_hook_entry *
+__nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
struct nf_hook_entry __rcu **pp;
struct nf_hook_entry *p;
pp = nf_hook_entry_head(net, reg);
if (WARN_ON_ONCE(!pp))
- return;
+ return NULL;
mutex_lock(&nf_hook_mutex);
for (; (p = nf_entry_dereference(*pp)) != NULL; pp = &p->next) {
@@ -145,7 +146,7 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
mutex_unlock(&nf_hook_mutex);
if (!p) {
WARN(1, "nf_unregister_net_hook: hook not found!\n");
- return;
+ return NULL;
}
#ifdef CONFIG_NETFILTER_INGRESS
if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
@@ -154,6 +155,17 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
#ifdef HAVE_JUMP_LABEL
static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
+
+ return p;
+}
+
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
+{
+ struct nf_hook_entry *p = __nf_unregister_net_hook(net, reg);
+
+ if (!p)
+ return;
+
synchronize_net();
nf_queue_nf_hook_drop(net, p);
/* other cpu might still process nfqueue verdict that used reg */
@@ -183,10 +195,36 @@ int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
EXPORT_SYMBOL(nf_register_net_hooks);
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
- unsigned int n)
+ unsigned int hookcount)
{
- while (n-- > 0)
- nf_unregister_net_hook(net, ®[n]);
+ struct nf_hook_entry *to_free[16];
+ unsigned int i, n;
+
+ WARN_ON_ONCE(hookcount > ARRAY_SIZE(to_free));
+
+ next_round:
+ n = min_t(unsigned int, hookcount, ARRAY_SIZE(to_free));
+
+ for (i = 0; i < n; i++)
+ to_free[i] = __nf_unregister_net_hook(net, ®[i]);
+
+ synchronize_net();
+
+ for (i = 0; i < n; i++) {
+ if (to_free[i])
+ nf_queue_nf_hook_drop(net, to_free[i]);
+ }
+
+ synchronize_net();
+
+ for (i = 0; i < n; i++)
+ kfree(to_free[i]);
+
+ if (n < hookcount) {
+ hookcount -= n;
+ reg += n;
+ goto next_round;
+ }
}
EXPORT_SYMBOL(nf_unregister_net_hooks);
Powered by blists - more mailing lists