[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220727060909.2371812-1-kafai@fb.com>
Date: Tue, 26 Jul 2022 23:09:09 -0700
From: Martin KaFai Lau <kafai@...com>
To: <bpf@...r.kernel.org>, <netdev@...r.kernel.org>
CC: Alexei Starovoitov <ast@...nel.org>,
Andrii Nakryiko <andrii@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
David Miller <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, <kernel-team@...com>,
Paolo Abeni <pabeni@...hat.com>
Subject: [PATCH bpf-next 02/14] bpf: net: Avoid sock_setsockopt() taking sk lock when called from bpf
Most of the codes in bpf_setsockopt(SOL_SOCKET) are duplicated from
the sock_setsockopt(). The number of supported options are
increasing ever and so as the duplicated codes.
One issue in reusing sock_setsockopt() is that the bpf prog
has already acquired the sk lock. sockptr_t is useful to handle this.
sockptr_t already has a bit 'is_kernel' to handle the kernel-or-user
memory copy. This patch adds a 'is_bpf' bit to tell if sk locking
has already been ensured by the bpf prog.
{lock,release}_sock_sockopt() helpers are added for the
sock_setsockopt() to use. These helpers will handle the new
'is_bpf' bit.
Note on the change in sock_setbindtodevice(). lock_sock_sockopt()
is done in sock_setbindtodevice() instead of doing the lock_sock
in sock_bindtoindex(..., lock_sk = true).
Signed-off-by: Martin KaFai Lau <kafai@...com>
---
include/linux/sockptr.h | 8 +++++++-
include/net/sock.h | 12 ++++++++++++
net/core/sock.c | 8 +++++---
3 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
index d45902fb4cad..787d0be08fb7 100644
--- a/include/linux/sockptr.h
+++ b/include/linux/sockptr.h
@@ -16,7 +16,8 @@ typedef struct {
void *kernel;
void __user *user;
};
- bool is_kernel : 1;
+ bool is_kernel : 1,
+ is_bpf : 1;
} sockptr_t;
static inline bool sockptr_is_kernel(sockptr_t sockptr)
@@ -24,6 +25,11 @@ static inline bool sockptr_is_kernel(sockptr_t sockptr)
return sockptr.is_kernel;
}
+static inline sockptr_t KERNEL_SOCKPTR_BPF(void *p)
+{
+ return (sockptr_t) { .kernel = p, .is_kernel = true, .is_bpf = true };
+}
+
static inline sockptr_t KERNEL_SOCKPTR(void *p)
{
return (sockptr_t) { .kernel = p, .is_kernel = true };
diff --git a/include/net/sock.h b/include/net/sock.h
index 9e2539dcc293..2f913bdf0035 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1721,6 +1721,18 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
}
}
+static inline void lock_sock_sockopt(struct sock *sk, sockptr_t optval)
+{
+ if (!optval.is_bpf)
+ lock_sock(sk);
+}
+
+static inline void release_sock_sockopt(struct sock *sk, sockptr_t optval)
+{
+ if (!optval.is_bpf)
+ release_sock(sk);
+}
+
/* Used by processes to "lock" a socket state, so that
* interrupts and bottom half handlers won't change it
* from under us. It essentially blocks any incoming
diff --git a/net/core/sock.c b/net/core/sock.c
index 18bb4f269cf1..61d927a5f6cb 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -703,7 +703,9 @@ static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
goto out;
}
- return sock_bindtoindex(sk, index, true);
+ lock_sock_sockopt(sk, optval);
+ ret = sock_bindtoindex_locked(sk, index);
+ release_sock_sockopt(sk, optval);
out:
#endif
@@ -1067,7 +1069,7 @@ int sock_setsockopt(struct sock *sk, int level, int optname,
valbool = val ? 1 : 0;
- lock_sock(sk);
+ lock_sock_sockopt(sk, optval);
switch (optname) {
case SO_DEBUG:
@@ -1496,7 +1498,7 @@ int sock_setsockopt(struct sock *sk, int level, int optname,
ret = -ENOPROTOOPT;
break;
}
- release_sock(sk);
+ release_sock_sockopt(sk, optval);
return ret;
}
EXPORT_SYMBOL(sock_setsockopt);
--
2.30.2
Powered by blists - more mailing lists