[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <152929037048.23173.1713873344580842520.stgit@noble>
Date: Mon, 18 Jun 2018 12:52:50 +1000
From: NeilBrown <neilb@...e.com>
To: Thomas Graf <tgraf@...g.ch>,
Herbert Xu <herbert@...dor.apana.org.au>,
David Miller <davem@...emloft.net>
Cc: netdev@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH 5/7] rhashtable: simplify nested_table_alloc() and
rht_bucket_nested_insert()
Now that we don't use the hash value or shift in nested_table_alloc()
there is room for simplification.
We only need to pass a "is this a leaf" flag to nested_table_alloc(),
and don't need to track as much information in
rht_bucket_nested_insert().
Note there is another minor cleanup in nested_table_alloc() here.
The number of elements in a page of "union nested_tables" is most naturally
PAGE_SIZE / sizeof(ntbl[0])
The previous code had
PAGE_SIZE / sizeof(ntbl[0].bucket)
which happens to be the correct value only because the bucket uses all
the space in the union.
Acked-by: Herbert Xu <herbert@...dor.apana.org.au>
Signed-off-by: NeilBrown <neilb@...e.com>
---
lib/rhashtable.c | 18 ++++++------------
1 file changed, 6 insertions(+), 12 deletions(-)
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a81cd27d518c..2aa41c15df17 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -116,7 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head)
static union nested_table *nested_table_alloc(struct rhashtable *ht,
union nested_table __rcu **prev,
- unsigned int shifted)
+ bool leaf)
{
union nested_table *ntbl;
int i;
@@ -127,8 +127,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
- if (ntbl && shifted) {
- for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
+ if (ntbl && leaf) {
+ for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
}
@@ -155,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
return NULL;
if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
- 0)) {
+ false)) {
kfree(tbl);
return NULL;
}
@@ -1207,24 +1207,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl;
- unsigned int shifted;
- unsigned int nhash;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
hash >>= tbl->nest;
- nhash = index;
- shifted = tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0);
+ size <= (1 << shift));
while (ntbl && size > (1 << shift)) {
index = hash & ((1 << shift) - 1);
size >>= shift;
hash >>= shift;
- nhash |= index << shifted;
- shifted += shift;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0);
+ size <= (1 << shift));
}
if (!ntbl)
Powered by blists - more mailing lists