[NETFILTER]: nf_conntrack: early_drop improvement When the maximum number of conntrack entries is reached and a new one needs to be allocated, conntrack tries to drop an unassured connection from the same hash bucket the new conntrack would hash to. Since with a properly sized hash the average number of entries per bucket is 1, the chances of actually finding one are not very good. This patch increases the range of buckets scanned to 8 to increase those chances. Based on patch by Vasily Averin . Signed-off-by: Patrick McHardy --- commit e43d18c1b499b10971954aead30fb403aed89180 tree ce34b0c817a29d78727705e42e267756917aef28 parent 665d98d03473cab252830129f414e1b38fb2b038 author Patrick McHardy Wed, 27 Jun 2007 13:57:47 +0200 committer Patrick McHardy Wed, 27 Jun 2007 13:57:47 +0200 net/netfilter/nf_conntrack_core.c | 25 ++++++++++++++++--------- 1 files changed, 16 insertions(+), 9 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index d7e62ad..72b3ba0 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -377,24 +377,32 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, } EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); +#define NF_CT_EVICTION_RANGE 8 + /* There's a small race here where we may free a just-assured connection. Too bad: we're in trouble anyway. */ -static int early_drop(struct hlist_head *chain) +static int early_drop(unsigned int hash) { /* Use oldest entry, which is roughly LRU */ struct nf_conntrack_tuple_hash *h; struct nf_conn *ct = NULL, *tmp; struct hlist_node *n; + unsigned int i; int dropped = 0; read_lock_bh(&nf_conntrack_lock); - hlist_for_each_entry(h, n, chain, hnode) { - tmp = nf_ct_tuplehash_to_ctrack(h); - if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) - ct = tmp; + for (i = 0; i < NF_CT_EVICTION_RANGE; i++) { + hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) { + tmp = nf_ct_tuplehash_to_ctrack(h); + if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) + ct = tmp; + } + if (ct) { + atomic_inc(&ct->ct_general.use); + break; + } + hash = (hash + 1) % nf_conntrack_htable_size; } - if (ct) - atomic_inc(&ct->ct_general.use); read_unlock_bh(&nf_conntrack_lock); if (!ct) @@ -425,8 +433,7 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, if (nf_conntrack_max && atomic_read(&nf_conntrack_count) > nf_conntrack_max) { unsigned int hash = hash_conntrack(orig); - /* Try dropping from this hash chain. */ - if (!early_drop(&nf_conntrack_hash[hash])) { + if (!early_drop(hash)) { atomic_dec(&nf_conntrack_count); if (net_ratelimit()) printk(KERN_WARNING