[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250916064614.605075-3-xuanqiang.luo@linux.dev>
Date: Tue, 16 Sep 2025 14:46:13 +0800
From: xuanqiang.luo@...ux.dev
To: edumazet@...gle.com,
kuniyu@...gle.com
Cc: kerneljasonxing@...il.com,
davem@...emloft.net,
kuba@...nel.org,
netdev@...r.kernel.org,
Xuanqiang Luo <luoxuanqiang@...inos.cn>
Subject: [PATCH net-next v2 2/3] inet: Avoid ehash lookup race in inet_ehash_insert()
From: Xuanqiang Luo <luoxuanqiang@...inos.cn>
Since ehash lookups are lockless, if one CPU performs a lookup while
another concurrently deletes and inserts (removing reqsk and inserting sk),
the lookup may fail to find the socket, an RST may be sent.
The call trace map is drawn as follows:
CPU 0 CPU 1
----- -----
inet_ehash_insert()
spin_lock()
sk_nulls_del_node_init_rcu(osk)
__inet_lookup_established()
(lookup failed)
__sk_nulls_add_node_rcu(sk, list)
spin_unlock()
As both deletion and insertion operate on the same ehash chain, this patch
introduces two new sk_nulls_replace_* helper functions to implement atomic
replacement.
Fixes: 5e0724d027f0 ("tcp/dccp: fix hashdance race for passive sessions")
Signed-off-by: Xuanqiang Luo <luoxuanqiang@...inos.cn>
---
include/net/sock.h | 23 +++++++++++++++++++++++
net/ipv4/inet_hashtables.c | 4 +++-
2 files changed, 26 insertions(+), 1 deletion(-)
diff --git a/include/net/sock.h b/include/net/sock.h
index 0fd465935334..e709376eaf0a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -854,6 +854,29 @@ static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
return rc;
}
+static inline bool __sk_nulls_replace_node_init_rcu(struct sock *old,
+ struct sock *new)
+{
+ if (sk_hashed(old)) {
+ hlist_nulls_replace_init_rcu(&old->sk_nulls_node,
+ &new->sk_nulls_node);
+ return true;
+ }
+ return false;
+}
+
+static inline bool sk_nulls_replace_node_init_rcu(struct sock *old,
+ struct sock *new)
+{
+ bool rc = __sk_nulls_replace_node_init_rcu(old, new);
+
+ if (rc) {
+ WARN_ON(refcount_read(&old->sk_refcnt) == 1);
+ __sock_put(old);
+ }
+ return rc;
+}
+
static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
{
hlist_add_head(&sk->sk_node, list);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index ef4ccfd46ff6..83c9ec625419 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -685,7 +685,8 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
spin_lock(lock);
if (osk) {
WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
- ret = sk_nulls_del_node_init_rcu(osk);
+ ret = sk_nulls_replace_node_init_rcu(osk, sk);
+ goto unlock;
} else if (found_dup_sk) {
*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
if (*found_dup_sk)
@@ -695,6 +696,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
if (ret)
__sk_nulls_add_node_rcu(sk, list);
+unlock:
spin_unlock(lock);
return ret;
--
2.25.1
Powered by blists - more mailing lists