[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250829010026.347440-4-kuniyu@google.com>
Date: Fri, 29 Aug 2025 01:00:06 +0000
From: Kuniyuki Iwashima <kuniyu@...gle.com>
To: Alexei Starovoitov <ast@...nel.org>, Andrii Nakryiko <andrii@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>, Martin KaFai Lau <martin.lau@...ux.dev>
Cc: John Fastabend <john.fastabend@...il.com>, Stanislav Fomichev <sdf@...ichev.me>,
Johannes Weiner <hannes@...xchg.org>, Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>, Shakeel Butt <shakeel.butt@...ux.dev>,
"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Neal Cardwell <ncardwell@...gle.com>, Willem de Bruijn <willemb@...gle.com>,
Mina Almasry <almasrymina@...gle.com>, Kuniyuki Iwashima <kuniyu@...gle.com>,
Kuniyuki Iwashima <kuni1840@...il.com>, bpf@...r.kernel.org, netdev@...r.kernel.org
Subject: [PATCH v4 bpf-next/net 3/5] bpf: Introduce SK_BPF_MEMCG_FLAGS and SK_BPF_MEMCG_SOCK_ISOLATED.
We will decouple sockets from the global protocol memory accounting
if sockets have SK_BPF_MEMCG_SOCK_ISOLATED.
This can be flagged (and cleared) at the BPF_CGROUP_INET_SOCK_CREATE
hook by bpf_setsockopt() and is inherited to child sockets.
u32 flags = SK_BPF_MEMCG_SOCK_ISOLATED;
bpf_setsockopt(ctx, SOL_SOCKET, SK_BPF_MEMCG_FLAGS,
&flags, sizeof(flags));
bpf_setsockopt(SK_BPF_MEMCG_FLAGS) is only supported at
bpf_unlocked_sock_setsockopt() and not supported on other hooks
for some reasons:
1. UDP charges memory under sk->sk_receive_queue.lock instead
of lock_sock()
2. For TCP child sockets, memory accounting is adjusted only in
__inet_accept() which sk->sk_memcg allocation is deferred to
3. Modifying the flag after skb is charged to sk requires such
adjustment during bpf_setsockopt() and complicates the logic
unnecessarily
We can support other hooks later if a real use case justifies that.
OTOH, bpf_getsockopt() is supported on other hooks, e.g. bpf_iter,
for the debugging purpose.
Given sk->sk_memcg can be accessed in the fast path, it would
be preferable to place the flag field in the same cache line as
sk->sk_memcg.
However, struct sock does not have such a 1-byte hole.
Let's store the flag in the lowest bit of sk->sk_memcg and add
a helper to check the bit.
In the next patch, if mem_cgroup_sk_isolated() returns true,
the socket will not be charged to sk->sk_prot->memory_allocated.
Signed-off-by: Kuniyuki Iwashima <kuniyu@...gle.com>
---
v4:
* Only allow inet_create() to set flags
* Inherit flags from listener to child in sk_clone_lock()
* Support clearing flags
v3:
* Allow setting flags without sk->sk_memcg in sk_bpf_set_get_memcg_flags()
* Preserve flags in __inet_accept()
v2:
* s/mem_cgroup_sk_set_flag/mem_cgroup_sk_set_flags/ when CONFIG_MEMCG=n
* Use CONFIG_CGROUP_BPF instead of CONFIG_BPF_SYSCALL for ifdef
---
include/net/sock.h | 50 ++++++++++++++++++++++++++++++++++
include/uapi/linux/bpf.h | 6 ++++
net/core/filter.c | 43 ++++++++++++++++++++++++++++-
net/core/sock.c | 1 +
net/ipv4/af_inet.c | 4 +++
tools/include/uapi/linux/bpf.h | 6 ++++
6 files changed, 109 insertions(+), 1 deletion(-)
diff --git a/include/net/sock.h b/include/net/sock.h
index 63a6a48afb48..703cb9116c6e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2596,10 +2596,41 @@ static inline gfp_t gfp_memcg_charge(void)
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
+#define SK_BPF_MEMCG_FLAG_MASK (SK_BPF_MEMCG_FLAG_MAX - 1)
+#define SK_BPF_MEMCG_PTR_MASK ~SK_BPF_MEMCG_FLAG_MASK
+
#ifdef CONFIG_MEMCG
static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk)
{
+#ifdef CONFIG_CGROUP_BPF
+ unsigned long val = (unsigned long)sk->sk_memcg;
+
+ val &= SK_BPF_MEMCG_PTR_MASK;
+ return (struct mem_cgroup *)val;
+#else
return sk->sk_memcg;
+#endif
+}
+
+static inline void mem_cgroup_sk_set_flags(struct sock *sk, unsigned short flags)
+{
+#ifdef CONFIG_CGROUP_BPF
+ unsigned long val = (unsigned long)mem_cgroup_from_sk(sk);
+
+ val |= flags;
+ sk->sk_memcg = (struct mem_cgroup *)val;
+#endif
+}
+
+static inline unsigned short mem_cgroup_sk_get_flags(const struct sock *sk)
+{
+#ifdef CONFIG_CGROUP_BPF
+ unsigned long val = (unsigned long)sk->sk_memcg;
+
+ return val & SK_BPF_MEMCG_FLAG_MASK;
+#else
+ return 0;
+#endif
}
static inline bool mem_cgroup_sk_enabled(const struct sock *sk)
@@ -2607,6 +2638,11 @@ static inline bool mem_cgroup_sk_enabled(const struct sock *sk)
return mem_cgroup_sockets_enabled && mem_cgroup_from_sk(sk);
}
+static inline bool mem_cgroup_sk_isolated(const struct sock *sk)
+{
+ return mem_cgroup_sk_get_flags(sk) & SK_BPF_MEMCG_SOCK_ISOLATED;
+}
+
static inline bool mem_cgroup_sk_under_memory_pressure(const struct sock *sk)
{
struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
@@ -2629,11 +2665,25 @@ static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk)
return NULL;
}
+static inline void mem_cgroup_sk_set_flags(struct sock *sk, unsigned short flags)
+{
+}
+
+static inline unsigned short mem_cgroup_sk_get_flags(const struct sock *sk)
+{
+ return 0;
+}
+
static inline bool mem_cgroup_sk_enabled(const struct sock *sk)
{
return false;
}
+static inline bool mem_cgroup_sk_isolated(const struct sock *sk)
+{
+ return false;
+}
+
static inline bool mem_cgroup_sk_under_memory_pressure(const struct sock *sk)
{
return false;
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 233de8677382..52b8c2278589 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -7182,6 +7182,7 @@ enum {
TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */
SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */
+ SK_BPF_MEMCG_FLAGS = 1010, /* Get or Set flags saved in sk->sk_memcg */
};
enum {
@@ -7204,6 +7205,11 @@ enum {
*/
};
+enum {
+ SK_BPF_MEMCG_SOCK_ISOLATED = (1UL << 0),
+ SK_BPF_MEMCG_FLAG_MAX = (1UL << 1),
+};
+
struct bpf_perf_event_value {
__u64 counter;
__u64 enabled;
diff --git a/net/core/filter.c b/net/core/filter.c
index b6d514039cf8..eb2f87a732ef 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5267,6 +5267,35 @@ static int sk_bpf_set_get_cb_flags(struct sock *sk, char *optval, bool getopt)
return 0;
}
+static int sk_bpf_get_memcg_flags(struct sock *sk, char *optval)
+{
+ if (!sk_has_account(sk))
+ return -EOPNOTSUPP;
+
+ *(u32 *)optval = mem_cgroup_sk_get_flags(sk);
+
+ return 0;
+}
+
+static int sk_bpf_set_memcg_flags(struct sock *sk, char *optval, int optlen)
+{
+ u32 flags;
+
+ if (optlen != sizeof(u32))
+ return -EINVAL;
+
+ if (!sk_has_account(sk))
+ return -EOPNOTSUPP;
+
+ flags = *(u32 *)optval;
+ if (flags >= SK_BPF_MEMCG_FLAG_MAX)
+ return -EINVAL;
+
+ mem_cgroup_sk_set_flags(sk, flags);
+
+ return 0;
+}
+
static int sol_socket_sockopt(struct sock *sk, int optname,
char *optval, int *optlen,
bool getopt)
@@ -5284,6 +5313,7 @@ static int sol_socket_sockopt(struct sock *sk, int optname,
case SO_BINDTOIFINDEX:
case SO_TXREHASH:
case SK_BPF_CB_FLAGS:
+ case SK_BPF_MEMCG_FLAGS:
if (*optlen != sizeof(int))
return -EINVAL;
break;
@@ -5293,8 +5323,15 @@ static int sol_socket_sockopt(struct sock *sk, int optname,
return -EINVAL;
}
- if (optname == SK_BPF_CB_FLAGS)
+ switch (optname) {
+ case SK_BPF_CB_FLAGS:
return sk_bpf_set_get_cb_flags(sk, optval, getopt);
+ case SK_BPF_MEMCG_FLAGS:
+ if (!IS_ENABLED(CONFIG_MEMCG) || !getopt)
+ return -EOPNOTSUPP;
+
+ return sk_bpf_get_memcg_flags(sk, optval);
+ }
if (getopt) {
if (optname == SO_BINDTODEVICE)
@@ -5726,6 +5763,10 @@ static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
BPF_CALL_5(bpf_unlocked_sock_setsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{
+ if (IS_ENABLED(CONFIG_MEMCG) &&
+ level == SOL_SOCKET && optname == SK_BPF_MEMCG_FLAGS)
+ return sk_bpf_set_memcg_flags(sk, optval, optlen);
+
return __bpf_setsockopt(sk, level, optname, optval, optlen);
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 8002ac6293dc..ae30d7d54498 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2515,6 +2515,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
#ifdef CONFIG_MEMCG
/* sk->sk_memcg will be populated at accept() time */
newsk->sk_memcg = NULL;
+ mem_cgroup_sk_set_flags(newsk, mem_cgroup_sk_get_flags(sk));
#endif
cgroup_sk_clone(&newsk->sk_cgrp_data);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d42757f74c6e..9b62f1ae13ba 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -758,12 +758,16 @@ void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *new
(!IS_ENABLED(CONFIG_IP_SCTP) ||
sk_is_tcp(newsk) || sk_is_mptcp(newsk))) {
gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
+ unsigned short flags;
+ flags = mem_cgroup_sk_get_flags(newsk);
mem_cgroup_sk_alloc(newsk);
if (mem_cgroup_from_sk(newsk)) {
int amt;
+ mem_cgroup_sk_set_flags(newsk, flags);
+
/* The socket has not been accepted yet, no need
* to look at newsk->sk_wmem_queued.
*/
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 233de8677382..52b8c2278589 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -7182,6 +7182,7 @@ enum {
TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */
SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */
+ SK_BPF_MEMCG_FLAGS = 1010, /* Get or Set flags saved in sk->sk_memcg */
};
enum {
@@ -7204,6 +7205,11 @@ enum {
*/
};
+enum {
+ SK_BPF_MEMCG_SOCK_ISOLATED = (1UL << 0),
+ SK_BPF_MEMCG_FLAG_MAX = (1UL << 1),
+};
+
struct bpf_perf_event_value {
__u64 counter;
__u64 enabled;
--
2.51.0.318.gd7df087d1a-goog
Powered by blists - more mailing lists