[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250910192057.1045711-6-kuniyu@google.com>
Date: Wed, 10 Sep 2025 19:19:32 +0000
From: Kuniyuki Iwashima <kuniyu@...gle.com>
To: Alexei Starovoitov <ast@...nel.org>, Andrii Nakryiko <andrii@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>, Martin KaFai Lau <martin.lau@...ux.dev>
Cc: John Fastabend <john.fastabend@...il.com>, Stanislav Fomichev <sdf@...ichev.me>,
Johannes Weiner <hannes@...xchg.org>, Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>, Shakeel Butt <shakeel.butt@...ux.dev>,
"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Neal Cardwell <ncardwell@...gle.com>, Willem de Bruijn <willemb@...gle.com>,
Mina Almasry <almasrymina@...gle.com>, Kuniyuki Iwashima <kuniyu@...gle.com>,
Kuniyuki Iwashima <kuni1840@...il.com>, bpf@...r.kernel.org, netdev@...r.kernel.org
Subject: [PATCH v8 bpf-next/net 5/6] bpf: Introduce SK_BPF_MEMCG_FLAGS and SK_BPF_MEMCG_EXCLUSIVE.
If a socket has sk->sk_memcg with SK_MEMCG_EXCLUSIVE, it is decoupled
from the global protocol memory accounting.
This is controlled by net.core.memcg_exclusive sysctl, but it lacks
flexibility.
Let's support flagging (and clearing) SK_MEMCG_EXCLUSIVE via
bpf_setsockopt() at the BPF_CGROUP_INET_SOCK_CREATE hook.
u32 flags = SK_BPF_MEMCG_EXCLUSIVE;
bpf_setsockopt(ctx, SOL_SOCKET, SK_BPF_MEMCG_FLAGS,
&flags, sizeof(flags));
As with net.core.memcg_exclusive, this is inherited to child sockets,
and BPF always takes precedence over sysctl at socket(2) and accept(2).
SK_BPF_MEMCG_FLAGS is only supported at BPF_CGROUP_INET_SOCK_CREATE
and not supported on other hooks for some reasons:
1. UDP charges memory under sk->sk_receive_queue.lock instead
of lock_sock()
2. For TCP child sockets, memory accounting is adjusted only in
__inet_accept() which sk->sk_memcg allocation is deferred to
3. Modifying the flag after skb is charged to sk requires such
adjustment during bpf_setsockopt() and complicates the logic
unnecessarily
We can support other hooks later if a real use case justifies that.
Most changes are inline and hard to trace, but a microbenchmark on
__sk_mem_raise_allocated() during neper/tcp_stream showed that more
samples completed faster with SK_MEMCG_EXCLUSIVE. This will be more
visible under tcp_mem pressure.
# bpftrace -e 'kprobe:__sk_mem_raise_allocated { @start[tid] = nsecs; }
kretprobe:__sk_mem_raise_allocated /@...rt[tid]/
{ @end[tid] = nsecs - @start[tid]; @times = hist(@end[tid]); delete(@start[tid]); }'
# tcp_stream -6 -F 1000 -N -T 256
Without bpf prog:
[128, 256) 3846 | |
[256, 512) 1505326 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@|
[512, 1K) 1371006 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |
[1K, 2K) 198207 |@@@@@@ |
[2K, 4K) 31199 |@ |
With bpf prog in the next patch:
(must be attached before tcp_stream)
# bpftool prog load sk_memcg.bpf.o /sys/fs/bpf/sk_memcg type cgroup/sock_create
# bpftool cgroup attach /sys/fs/cgroup/test cgroup_inet_sock_create pinned /sys/fs/bpf/sk_memcg
[128, 256) 6413 | |
[256, 512) 1868425 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@|
[512, 1K) 1101697 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |
[1K, 2K) 117031 |@@@@ |
[2K, 4K) 11773 | |
Signed-off-by: Kuniyuki Iwashima <kuniyu@...gle.com>
---
v7:
* Update commit message.
v5:
* Limit getsockopt() to BPF_CGROUP_INET_SOCK_CREATE
v4:
* Only allow inet_create() to set flags
* Inherit flags from listener to child in sk_clone_lock()
* Support clearing flags
v3:
* Allow setting flags without sk->sk_memcg in sk_bpf_set_get_memcg_flags()
* Preserve flags in __inet_accept()
v2:
* s/mem_cgroup_sk_set_flag/mem_cgroup_sk_set_flags/ when CONFIG_MEMCG=n
* Use CONFIG_CGROUP_BPF instead of CONFIG_BPF_SYSCALL for ifdef
---
include/uapi/linux/bpf.h | 6 ++++++
mm/memcontrol.c | 3 +++
net/core/filter.c | 34 ++++++++++++++++++++++++++++++++++
tools/include/uapi/linux/bpf.h | 6 ++++++
4 files changed, 49 insertions(+)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 233de8677382..35e3ce40ac90 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -7182,6 +7182,7 @@ enum {
TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */
SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */
+ SK_BPF_MEMCG_FLAGS = 1010, /* Get or Set flags saved in sk->sk_memcg */
};
enum {
@@ -7204,6 +7205,11 @@ enum {
*/
};
+enum {
+ SK_BPF_MEMCG_EXCLUSIVE = (1UL << 0),
+ SK_BPF_MEMCG_FLAG_MAX = (1UL << 1),
+};
+
struct bpf_perf_event_value {
__u64 counter;
__u64 enabled;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 88028af8ac28..b7d405b57e23 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4997,6 +4997,9 @@ EXPORT_SYMBOL(memcg_sockets_enabled_key);
static void mem_cgroup_sk_set(struct sock *sk, struct mem_cgroup *memcg)
{
+ BUILD_BUG_ON((unsigned short)SK_MEMCG_EXCLUSIVE != SK_BPF_MEMCG_EXCLUSIVE);
+ BUILD_BUG_ON((unsigned short)SK_MEMCG_FLAG_MAX != SK_BPF_MEMCG_FLAG_MAX);
+
sk->sk_memcg = memcg;
#ifdef CONFIG_NET
diff --git a/net/core/filter.c b/net/core/filter.c
index 31b259f02ee9..df2496120076 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5723,9 +5723,39 @@ static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
.arg5_type = ARG_CONST_SIZE,
};
+static int sk_bpf_set_get_memcg_flags(struct sock *sk,
+ char *optval, int optlen,
+ bool getopt)
+{
+ u32 flags;
+
+ if (optlen != sizeof(u32))
+ return -EINVAL;
+
+ if (!sk_has_account(sk))
+ return -EOPNOTSUPP;
+
+ if (getopt) {
+ *(u32 *)optval = mem_cgroup_sk_get_flags(sk);
+ return 0;
+ }
+
+ flags = *(u32 *)optval;
+ if (flags >= SK_BPF_MEMCG_FLAG_MAX)
+ return -EINVAL;
+
+ mem_cgroup_sk_set_flags(sk, flags);
+
+ return 0;
+}
+
BPF_CALL_5(bpf_sock_create_setsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{
+ if (IS_ENABLED(CONFIG_MEMCG) &&
+ level == SOL_SOCKET && optname == SK_BPF_MEMCG_FLAGS)
+ return sk_bpf_set_get_memcg_flags(sk, optval, optlen, false);
+
return __bpf_setsockopt(sk, level, optname, optval, optlen);
}
@@ -5743,6 +5773,10 @@ static const struct bpf_func_proto bpf_sock_create_setsockopt_proto = {
BPF_CALL_5(bpf_sock_create_getsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{
+ if (IS_ENABLED(CONFIG_MEMCG) &&
+ level == SOL_SOCKET && optname == SK_BPF_MEMCG_FLAGS)
+ return sk_bpf_set_get_memcg_flags(sk, optval, optlen, true);
+
return __bpf_getsockopt(sk, level, optname, optval, optlen);
}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 233de8677382..35e3ce40ac90 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -7182,6 +7182,7 @@ enum {
TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */
SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */
+ SK_BPF_MEMCG_FLAGS = 1010, /* Get or Set flags saved in sk->sk_memcg */
};
enum {
@@ -7204,6 +7205,11 @@ enum {
*/
};
+enum {
+ SK_BPF_MEMCG_EXCLUSIVE = (1UL << 0),
+ SK_BPF_MEMCG_FLAG_MAX = (1UL << 1),
+};
+
struct bpf_perf_event_value {
__u64 counter;
__u64 enabled;
--
2.51.0.384.g4c02a37b29-goog
Powered by blists - more mailing lists