[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANn89iKGa9TWnGZJJZAL-B-onmJ7gRXQsWxy=7FvwJr+Y2DuCg@mail.gmail.com>
Date: Tue, 22 Oct 2024 16:40:34 +0200
From: Eric Dumazet <edumazet@...gle.com>
To: Gilad Naaman <gnaaman@...venets.com>
Cc: netdev <netdev@...r.kernel.org>, "David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Kuniyuki Iwashima <kuniyu@...zon.com>
Subject: Re: [PATCH net-next v7 1/6] neighbour: Add hlist_node to struct neighbour
On Tue, Oct 22, 2024 at 3:44 PM Gilad Naaman <gnaaman@...venets.com> wrote:
>
> Add a doubly-linked node to neighbours, so that they
> can be deleted without iterating the entire bucket they're in.
>
> Signed-off-by: Gilad Naaman <gnaaman@...venets.com>
> ---
> include/net/neighbour.h | 2 ++
> net/core/neighbour.c | 40 ++++++++++++++++++++++++++++++++++++++--
> 2 files changed, 40 insertions(+), 2 deletions(-)
>
> diff --git a/include/net/neighbour.h b/include/net/neighbour.h
> index 3887ed9e5026..0402447854c7 100644
> --- a/include/net/neighbour.h
> +++ b/include/net/neighbour.h
> @@ -136,6 +136,7 @@ struct neigh_statistics {
>
> struct neighbour {
> struct neighbour __rcu *next;
> + struct hlist_node hash;
> struct neigh_table *tbl;
> struct neigh_parms *parms;
> unsigned long confirmed;
> @@ -191,6 +192,7 @@ struct pneigh_entry {
>
> struct neigh_hash_table {
> struct neighbour __rcu **hash_buckets;
> + struct hlist_head *hash_heads;
> unsigned int hash_shift;
> __u32 hash_rnd[NEIGH_NUM_HASH_RND];
> struct rcu_head rcu;
> diff --git a/net/core/neighbour.c b/net/core/neighbour.c
> index 395ae1626eef..7df4cfc0ac9a 100644
> --- a/net/core/neighbour.c
> +++ b/net/core/neighbour.c
> @@ -217,6 +217,7 @@ static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
> neigh = rcu_dereference_protected(n->next,
> lockdep_is_held(&tbl->lock));
> rcu_assign_pointer(*np, neigh);
> + hlist_del_rcu(&n->hash);
> neigh_mark_dead(n);
> retval = true;
> }
> @@ -403,6 +404,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
> rcu_assign_pointer(*np,
> rcu_dereference_protected(n->next,
> lockdep_is_held(&tbl->lock)));
> + hlist_del_rcu(&n->hash);
> write_lock(&n->lock);
> neigh_del_timer(n);
> neigh_mark_dead(n);
> @@ -530,27 +532,47 @@ static void neigh_get_hash_rnd(u32 *x)
>
> static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
> {
> + size_t hash_heads_size = (1 << shift) * sizeof(struct hlist_head);
> size_t size = (1 << shift) * sizeof(struct neighbour *);
> - struct neigh_hash_table *ret;
> struct neighbour __rcu **buckets;
> + struct hlist_head *hash_heads;
> + struct neigh_hash_table *ret;
> int i;
>
> + hash_heads = NULL;
> +
> ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
> if (!ret)
> return NULL;
> if (size <= PAGE_SIZE) {
> buckets = kzalloc(size, GFP_ATOMIC);
> +
> + if (buckets) {
> + hash_heads = kzalloc(hash_heads_size, GFP_ATOMIC);
> + if (!hash_heads)
> + kfree(buckets);
> + }
Oh well, I strongly suggest we first switch to kvzalloc() and
kvfree(), instead of copy/pasting old work arounds...
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 395ae1626eef2f22f5b81051671371ed67eb5943..a44511218a600ff55513a7255e90641cd7c2e983
100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -538,14 +538,7 @@ static struct neigh_hash_table
*neigh_hash_alloc(unsigned int shift)
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
if (!ret)
return NULL;
- if (size <= PAGE_SIZE) {
- buckets = kzalloc(size, GFP_ATOMIC);
- } else {
- buckets = (struct neighbour __rcu **)
- __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
- get_order(size));
- kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
- }
+ buckets = kvzalloc(size, GFP_ATOMIC);
if (!buckets) {
kfree(ret);
return NULL;
@@ -562,15 +555,8 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
struct neigh_hash_table *nht = container_of(head,
struct neigh_hash_table,
rcu);
- size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
- struct neighbour __rcu **buckets = nht->hash_buckets;
- if (size <= PAGE_SIZE) {
- kfree(buckets);
- } else {
- kmemleak_free(buckets);
- free_pages((unsigned long)buckets, get_order(size));
- }
+ kvfree(nht->hash_buckets);
kfree(nht);
}
Powered by blists - more mailing lists