[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190121134914.168149053@linuxfoundation.org>
Date: Mon, 21 Jan 2019 14:47:58 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Florian Westphal <fw@...len.de>,
Shawn Bohrer <sbohrer@...udflare.com>,
Pablo Neira Ayuso <pablo@...filter.org>
Subject: [PATCH 4.19 06/99] netfilter: nf_conncount: replace CONNCOUNT_LOCK_SLOTS with CONNCOUNT_SLOTS
4.19-stable review patch. If anyone has any objections, please let me know.
------------------
From: Shawn Bohrer <sbohrer@...udflare.com>
commit c78e7818f16f687389174c4569243abbec8dc68f upstream.
Most of the time these were the same value anyway, but when
CONFIG_LOCKDEP was enabled we would use a smaller number of locks to
reduce overhead. Unfortunately having two values is confusing and not
worth the complexity.
This fixes a bug where tree_gc_worker() would only GC up to
CONNCOUNT_LOCK_SLOTS trees which meant when CONFIG_LOCKDEP was enabled
not all trees would be GCed by tree_gc_worker().
Fixes: 5c789e131cbb9 ("netfilter: nf_conncount: Add list lock and gc worker, and RCU for init tree search")
Signed-off-by: Florian Westphal <fw@...len.de>
Signed-off-by: Shawn Bohrer <sbohrer@...udflare.com>
Signed-off-by: Pablo Neira Ayuso <pablo@...filter.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
net/netfilter/nf_conncount.c | 19 +++++--------------
1 file changed, 5 insertions(+), 14 deletions(-)
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -33,12 +33,6 @@
#define CONNCOUNT_SLOTS 256U
-#ifdef CONFIG_LOCKDEP
-#define CONNCOUNT_LOCK_SLOTS 8U
-#else
-#define CONNCOUNT_LOCK_SLOTS 256U
-#endif
-
#define CONNCOUNT_GC_MAX_NODES 8
#define MAX_KEYLEN 5
@@ -60,7 +54,7 @@ struct nf_conncount_rb {
struct rcu_head rcu_head;
};
-static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
+static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
struct nf_conncount_data {
unsigned int keylen;
@@ -353,7 +347,7 @@ insert_tree(struct net *net,
unsigned int count = 0, gc_count = 0;
bool node_found = false;
- spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+ spin_lock_bh(&nf_conncount_locks[hash]);
parent = NULL;
rbnode = &(root->rb_node);
@@ -430,7 +424,7 @@ insert_tree(struct net *net,
rb_link_node_rcu(&rbconn->node, parent, rbnode);
rb_insert_color(&rbconn->node, root);
out_unlock:
- spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+ spin_unlock_bh(&nf_conncount_locks[hash]);
return count;
}
@@ -499,7 +493,7 @@ static void tree_gc_worker(struct work_s
struct rb_node *node;
unsigned int tree, next_tree, gc_count = 0;
- tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS;
+ tree = data->gc_tree % CONNCOUNT_SLOTS;
root = &data->root[tree];
rcu_read_lock();
@@ -621,10 +615,7 @@ static int __init nf_conncount_modinit(v
{
int i;
- BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
- BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
-
- for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
+ for (i = 0; i < CONNCOUNT_SLOTS; ++i)
spin_lock_init(&nf_conncount_locks[i]);
conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
Powered by blists - more mailing lists