[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220412202613.234896-4-axboe@kernel.dk>
Date: Tue, 12 Apr 2022 14:26:12 -0600
From: Jens Axboe <axboe@...nel.dk>
To: io-uring@...r.kernel.org, netdev@...r.kernel.org
Cc: Jens Axboe <axboe@...nel.dk>
Subject: [PATCH 3/4] net: add support for socket no-lock
If we have a guaranteed single user of a socket, then we can optimize
the lock/release of it.
Signed-off-by: Jens Axboe <axboe@...nel.dk>
---
include/net/sock.h | 10 ++++++++--
net/core/sock.c | 31 +++++++++++++++++++++++++++++++
2 files changed, 39 insertions(+), 2 deletions(-)
diff --git a/include/net/sock.h b/include/net/sock.h
index 99fcc4d7eed9..aefc94677c94 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1674,7 +1674,7 @@ do { \
static inline bool lockdep_sock_is_held(const struct sock *sk)
{
- return lockdep_is_held(&sk->sk_lock) ||
+ return sk->sk_no_lock || lockdep_is_held(&sk->sk_lock) ||
lockdep_is_held(&sk->sk_lock.slock);
}
@@ -1774,18 +1774,20 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
static inline void sock_owned_by_me(const struct sock *sk)
{
#ifdef CONFIG_LOCKDEP
- WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
+ WARN_ON_ONCE(!sk->sk_no_lock && !lockdep_sock_is_held(sk) && debug_locks);
#endif
}
static inline bool sock_owned_by_user(const struct sock *sk)
{
sock_owned_by_me(sk);
+ smp_rmb();
return sk->sk_lock.owned;
}
static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
{
+ smp_rmb();
return sk->sk_lock.owned;
}
@@ -1794,6 +1796,10 @@ static inline void sock_release_ownership(struct sock *sk)
if (sock_owned_by_user_nocheck(sk)) {
sk->sk_lock.owned = 0;
+ if (sk->sk_no_lock) {
+ smp_wmb();
+ return;
+ }
/* The sk_lock has mutex_unlock() semantics: */
mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
}
diff --git a/net/core/sock.c b/net/core/sock.c
index fec892b384a4..d7eea29c5699 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2764,6 +2764,9 @@ void __lock_sock(struct sock *sk)
{
DEFINE_WAIT(wait);
+ if (WARN_ON_ONCE(sk->sk_no_lock))
+ return;
+
for (;;) {
prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
TASK_UNINTERRUPTIBLE);
@@ -3307,8 +3310,21 @@ void sock_init_data(struct socket *sock, struct sock *sk)
}
EXPORT_SYMBOL(sock_init_data);
+static inline bool lock_sock_nolock(struct sock *sk)
+{
+ if (sk->sk_no_lock) {
+ sk->sk_lock.owned = 1;
+ smp_wmb();
+ return true;
+ }
+ return false;
+}
+
void lock_sock_nested(struct sock *sk, int subclass)
{
+ if (lock_sock_nolock(sk))
+ return;
+
/* The sk_lock has mutex_lock() semantics here. */
mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
@@ -3321,8 +3337,23 @@ void lock_sock_nested(struct sock *sk, int subclass)
}
EXPORT_SYMBOL(lock_sock_nested);
+static inline bool release_sock_nolock(struct sock *sk)
+{
+ if (!sk->sk_no_lock)
+ return false;
+ if (READ_ONCE(sk->sk_backlog.tail))
+ return false;
+ if (sk->sk_prot->release_cb)
+ sk->sk_prot->release_cb(sk);
+ sock_release_ownership(sk);
+ return true;
+}
+
void release_sock(struct sock *sk)
{
+ if (release_sock_nolock(sk))
+ return;
+
spin_lock_bh(&sk->sk_lock.slock);
if (sk->sk_backlog.tail)
__release_sock(sk);
--
2.35.1
Powered by blists - more mailing lists