[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250910192057.1045711-4-kuniyu@google.com>
Date: Wed, 10 Sep 2025 19:19:30 +0000
From: Kuniyuki Iwashima <kuniyu@...gle.com>
To: Alexei Starovoitov <ast@...nel.org>, Andrii Nakryiko <andrii@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>, Martin KaFai Lau <martin.lau@...ux.dev>
Cc: John Fastabend <john.fastabend@...il.com>, Stanislav Fomichev <sdf@...ichev.me>,
Johannes Weiner <hannes@...xchg.org>, Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>, Shakeel Butt <shakeel.butt@...ux.dev>,
"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Neal Cardwell <ncardwell@...gle.com>, Willem de Bruijn <willemb@...gle.com>,
Mina Almasry <almasrymina@...gle.com>, Kuniyuki Iwashima <kuniyu@...gle.com>,
Kuniyuki Iwashima <kuni1840@...il.com>, bpf@...r.kernel.org, netdev@...r.kernel.org
Subject: [PATCH v8 bpf-next/net 3/6] net-memcg: Introduce net.core.memcg_exclusive
sysctl.
If net.core.memcg_exclusive is 1 when sk->sk_memcg is allocated,
the socket is flagged with SK_MEMCG_EXCLUSIVE internally and skips
the global per-protocol memory accounting.
OTOH, for accept()ed child sockets, this flag is inherited from
the listening socket in sk_clone_lock() and set in __inet_accept().
This is to preserve the decision by BPF which will be supported later.
Given sk->sk_memcg can be accessed in the fast path, it would
be preferable to place the flag field in the same cache line as
sk->sk_memcg.
However, struct sock does not have such a 1-byte hole.
Let's store the flag in the lowest bit of sk->sk_memcg and check
it in mem_cgroup_sk_exclusive().
Tested with a script that creates local socket pairs and send()s a
bunch of data without recv()ing.
Setup:
# mkdir /sys/fs/cgroup/test
# echo $$ >> /sys/fs/cgroup/test/cgroup.procs
# sysctl -q net.ipv4.tcp_mem="1000 1000 1000"
Without net.core.memcg_exclusive, charged to memcg & tcp_mem:
# prlimit -n=524288:524288 bash -c "python3 pressure.py" &
# cat /sys/fs/cgroup/test/memory.stat | grep sock
sock 22642688 <-------------------------------------- charged to memcg
# cat /proc/net/sockstat| grep TCP
TCP: inuse 2006 orphan 0 tw 0 alloc 2008 mem 5376 <-- charged to tcp_mem
# ss -tn | head -n 5
State Recv-Q Send-Q Local Address:Port Peer Address:Port
ESTAB 2000 0 127.0.0.1:34479 127.0.0.1:53188
ESTAB 2000 0 127.0.0.1:34479 127.0.0.1:49972
ESTAB 2000 0 127.0.0.1:34479 127.0.0.1:53868
ESTAB 2000 0 127.0.0.1:34479 127.0.0.1:53554
# nstat | grep Pressure || echo no pressure
TcpExtTCPMemoryPressures 1 0.0
With net.core.memcg_exclusive=1, only charged to memcg:
# sysctl -q net.core.memcg_exclusive=1
# prlimit -n=524288:524288 bash -c "python3 pressure.py" &
# cat /sys/fs/cgroup/test/memory.stat | grep sock
sock 2757468160 <------------------------------------ charged to memcg
# cat /proc/net/sockstat | grep TCP
TCP: inuse 2006 orphan 0 tw 0 alloc 2008 mem 0 <- NOT charged to tcp_mem
# ss -tn | head -n 5
State Recv-Q Send-Q Local Address:Port Peer Address:Port
ESTAB 111000 0 127.0.0.1:36019 127.0.0.1:49026
ESTAB 110000 0 127.0.0.1:36019 127.0.0.1:45630
ESTAB 110000 0 127.0.0.1:36019 127.0.0.1:44870
ESTAB 111000 0 127.0.0.1:36019 127.0.0.1:45274
# nstat | grep Pressure || echo no pressure
no pressure
Signed-off-by: Kuniyuki Iwashima <kuniyu@...gle.com>
---
v8: Fix build failure when CONFIG_NET=n
---
Documentation/admin-guide/sysctl/net.rst | 9 ++++++
include/net/netns/core.h | 3 ++
include/net/sock.h | 39 ++++++++++++++++++++++--
mm/memcontrol.c | 12 +++++++-
net/core/sock.c | 1 +
net/core/sysctl_net_core.c | 11 +++++++
net/ipv4/af_inet.c | 4 +++
7 files changed, 76 insertions(+), 3 deletions(-)
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 2ef50828aff1..7272194dcf45 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -212,6 +212,15 @@ mem_pcpu_rsv
Per-cpu reserved forward alloc cache size in page units. Default 1MB per CPU.
+memcg_exclusive
+---------------
+
+Skip charging socket buffers to the per-protocol global memory accounting
+(controlled by net.ipv4.tcp_mem, etc) if they are already charged to the
+cgroup memory controller ("sock" in memory.stat file).
+
+Default: 0
+
rmem_default
------------
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 9b36f0ff0c20..ec511088e67d 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -16,6 +16,9 @@ struct netns_core {
int sysctl_optmem_max;
u8 sysctl_txrehash;
u8 sysctl_tstamp_allow_data;
+#ifdef CONFIG_MEMCG
+ u8 sysctl_memcg_exclusive;
+#endif
#ifdef CONFIG_PROC_FS
struct prot_inuse __percpu *prot_inuse;
diff --git a/include/net/sock.h b/include/net/sock.h
index 66501ab670eb..0f597f4deaa3 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2596,10 +2596,36 @@ static inline gfp_t gfp_memcg_charge(void)
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
+enum {
+ SK_MEMCG_EXCLUSIVE = (1UL << 0),
+ SK_MEMCG_FLAG_MAX = (1UL << 1),
+};
+
+#define SK_MEMCG_FLAG_MASK (SK_MEMCG_FLAG_MAX - 1)
+#define SK_MEMCG_PTR_MASK ~SK_MEMCG_FLAG_MASK
+
#ifdef CONFIG_MEMCG
static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk)
{
- return sk->sk_memcg;
+ unsigned long val = (unsigned long)sk->sk_memcg;
+
+ val &= SK_MEMCG_PTR_MASK;
+ return (struct mem_cgroup *)val;
+}
+
+static inline void mem_cgroup_sk_set_flags(struct sock *sk, unsigned short flags)
+{
+ unsigned long val = (unsigned long)mem_cgroup_from_sk(sk);
+
+ val |= flags;
+ sk->sk_memcg = (struct mem_cgroup *)val;
+}
+
+static inline unsigned short mem_cgroup_sk_get_flags(const struct sock *sk)
+{
+ unsigned long val = (unsigned long)sk->sk_memcg;
+
+ return val & SK_MEMCG_FLAG_MASK;
}
static inline bool mem_cgroup_sk_enabled(const struct sock *sk)
@@ -2609,7 +2635,7 @@ static inline bool mem_cgroup_sk_enabled(const struct sock *sk)
static inline bool mem_cgroup_sk_exclusive(const struct sock *sk)
{
- return false;
+ return mem_cgroup_sk_get_flags(sk) & SK_MEMCG_EXCLUSIVE;
}
static inline bool mem_cgroup_sk_under_memory_pressure(const struct sock *sk)
@@ -2634,6 +2660,15 @@ static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk)
return NULL;
}
+static inline void mem_cgroup_sk_set_flags(struct sock *sk, unsigned short flags)
+{
+}
+
+static inline unsigned short mem_cgroup_sk_get_flags(const struct sock *sk)
+{
+ return 0;
+}
+
static inline bool mem_cgroup_sk_enabled(const struct sock *sk)
{
return false;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index df3e9205c9e6..88028af8ac28 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4995,6 +4995,16 @@ void mem_cgroup_migrate(struct folio *old, struct folio *new)
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
EXPORT_SYMBOL(memcg_sockets_enabled_key);
+static void mem_cgroup_sk_set(struct sock *sk, struct mem_cgroup *memcg)
+{
+ sk->sk_memcg = memcg;
+
+#ifdef CONFIG_NET
+ if (READ_ONCE(sock_net(sk)->core.sysctl_memcg_exclusive))
+ mem_cgroup_sk_set_flags(sk, SK_MEMCG_EXCLUSIVE);
+#endif
+}
+
void mem_cgroup_sk_alloc(struct sock *sk)
{
struct mem_cgroup *memcg;
@@ -5013,7 +5023,7 @@ void mem_cgroup_sk_alloc(struct sock *sk)
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
goto out;
if (css_tryget(&memcg->css))
- sk->sk_memcg = memcg;
+ mem_cgroup_sk_set(sk, memcg);
out:
rcu_read_unlock();
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 814966309b0e..348e599c3fbc 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2519,6 +2519,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
#ifdef CONFIG_MEMCG
/* sk->sk_memcg will be populated at accept() time */
newsk->sk_memcg = NULL;
+ mem_cgroup_sk_set_flags(newsk, mem_cgroup_sk_get_flags(sk));
#endif
cgroup_sk_clone(&newsk->sk_cgrp_data);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 8cf04b57ade1..c8b5fc3b8435 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -676,6 +676,17 @@ static struct ctl_table netns_core_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE
},
+#ifdef CONFIG_MEMCG
+ {
+ .procname = "memcg_exclusive",
+ .data = &init_net.core.sysctl_memcg_exclusive,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE
+ },
+#endif
/* sysctl_core_net_init() will set the values after this
* to readonly in network namespaces
*/
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 52d060bc9009..3fbb076ffd36 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -759,12 +759,16 @@ void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *new
(!IS_ENABLED(CONFIG_IP_SCTP) ||
sk_is_tcp(newsk) || sk_is_mptcp(newsk))) {
gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
+ unsigned short flags;
+ flags = mem_cgroup_sk_get_flags(newsk);
mem_cgroup_sk_alloc(newsk);
if (mem_cgroup_from_sk(newsk)) {
int amt;
+ mem_cgroup_sk_set_flags(newsk, flags);
+
/* The socket has not been accepted yet, no need
* to look at newsk->sk_wmem_queued.
*/
--
2.51.0.384.g4c02a37b29-goog
Powered by blists - more mailing lists