[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260207-unix-proto-update-null-ptr-deref-v2-2-9f091330e7cd@rbox.co>
Date: Sat, 07 Feb 2026 15:34:55 +0100
From: Michal Luczaj <mhal@...x.co>
To: John Fastabend <john.fastabend@...il.com>,
Jakub Sitnicki <jakub@...udflare.com>,
Kuniyuki Iwashima <kuniyu@...gle.com>,
"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Simon Horman <horms@...nel.org>, Daniel Borkmann <daniel@...earbox.net>,
Willem de Bruijn <willemb@...gle.com>, Cong Wang <cong.wang@...edance.com>,
Alexei Starovoitov <ast@...nel.org>, Yonghong Song <yhs@...com>,
Andrii Nakryiko <andrii@...nel.org>, Eduard Zingerman <eddyz87@...il.com>,
Martin KaFai Lau <martin.lau@...ux.dev>, Song Liu <song@...nel.org>,
Yonghong Song <yonghong.song@...ux.dev>, KP Singh <kpsingh@...nel.org>,
Stanislav Fomichev <sdf@...ichev.me>, Hao Luo <haoluo@...gle.com>,
Jiri Olsa <jolsa@...nel.org>, Shuah Khan <shuah@...nel.org>
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-kselftest@...r.kernel.org,
Michal Luczaj <mhal@...x.co>
Subject: [PATCH bpf v2 2/4] bpf, sockmap: Use
sock_map_sk_{acquire,release}() where open-coded
Instead of repeating the same (un)locking pattern, reuse
sock_map_sk_{acquire,release}(). This centralizes the code and makes it
easier to adapt sockmap to af_unix-specific locking.
Signed-off-by: Michal Luczaj <mhal@...x.co>
---
net/core/sock_map.c | 21 +++++++--------------
1 file changed, 7 insertions(+), 14 deletions(-)
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index d4f15b846ad4..b6586d9590b7 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -353,11 +353,9 @@ static void sock_map_free(struct bpf_map *map)
sk = xchg(psk, NULL);
if (sk) {
sock_hold(sk);
- lock_sock(sk);
- rcu_read_lock();
+ sock_map_sk_acquire(sk);
sock_map_unref(sk, psk);
- rcu_read_unlock();
- release_sock(sk);
+ sock_map_sk_release(sk);
sock_put(sk);
}
}
@@ -1176,11 +1174,9 @@ static void sock_hash_free(struct bpf_map *map)
*/
hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
hlist_del(&elem->node);
- lock_sock(elem->sk);
- rcu_read_lock();
+ sock_map_sk_acquire(elem->sk);
sock_map_unref(elem->sk, elem);
- rcu_read_unlock();
- release_sock(elem->sk);
+ sock_map_sk_release(elem->sk);
sock_put(elem->sk);
sock_hash_free_elem(htab, elem);
}
@@ -1676,8 +1672,7 @@ void sock_map_close(struct sock *sk, long timeout)
void (*saved_close)(struct sock *sk, long timeout);
struct sk_psock *psock;
- lock_sock(sk);
- rcu_read_lock();
+ sock_map_sk_acquire(sk);
psock = sk_psock(sk);
if (likely(psock)) {
saved_close = psock->saved_close;
@@ -1685,16 +1680,14 @@ void sock_map_close(struct sock *sk, long timeout)
psock = sk_psock_get(sk);
if (unlikely(!psock))
goto no_psock;
- rcu_read_unlock();
sk_psock_stop(psock);
- release_sock(sk);
+ sock_map_sk_release(sk);
cancel_delayed_work_sync(&psock->work);
sk_psock_put(sk, psock);
} else {
saved_close = READ_ONCE(sk->sk_prot)->close;
no_psock:
- rcu_read_unlock();
- release_sock(sk);
+ sock_map_sk_release(sk);
}
/* Make sure we do not recurse. This is a bug.
--
2.52.0
Powered by blists - more mailing lists