[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230508020801.10702-3-cathy.zhang@intel.com>
Date: Sun, 7 May 2023 19:08:01 -0700
From: Cathy Zhang <cathy.zhang@...el.com>
To: edumazet@...gle.com,
davem@...emloft.net,
kuba@...nel.org,
pabeni@...hat.com
Cc: jesse.brandeburg@...el.com,
suresh.srinivas@...el.com,
tim.c.chen@...el.com,
lizhen.you@...el.com,
cathy.zhang@...el.com,
eric.dumazet@...il.com,
netdev@...r.kernel.org
Subject: [PATCH net-next 2/2] net: Add sysctl_reclaim_threshold
Add a new ABI /proc/sys/net/core/reclaim_threshold which allows to
change the size of reserved memory from reclaiming in sk_mem_uncharge.
It allows to keep sk->sk_forward_alloc as small as possible when
system is under memory pressure, it also allows to change it larger to
avoid memcg charge overhead and improve performance when system is not
under memory pressure. The original reclaim threshold for reserved
memory per-socket is 2MB, it's selected as the max value, while the
default value is 64KB which is closer to the maximum size of sk_buff.
Issue the following command as root to change the default value:
echo 16384 > /proc/sys/net/core/reclaim_threshold
Signed-off-by: Cathy Zhang <cathy.zhang@...el.com>
Signed-off-by: Lizhen You <lizhen.you@...el.com>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@...el.com>
Reviewed-by: Tim Chen <tim.c.chen@...ux.intel.com>
Reviewed-by: Suresh Srinivas <suresh.srinivas@...el.com>
---
Documentation/admin-guide/sysctl/net.rst | 12 ++++++++++++
include/net/sock.h | 13 +++++++++++--
net/core/sysctl_net_core.c | 14 ++++++++++++++
3 files changed, 37 insertions(+), 2 deletions(-)
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 466c560b0c30..2981278af3d9 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -413,6 +413,18 @@ historical importance.
Default: 0
+reclaim_threshold
+------------------------
+
+The threshold indicates when it can start to reclaim memory during a TCP
+connection lifecycle. If the per-socket forward allocated memory is beyond the
+threshold, it will reclaim the part exceeding this value. It could help keep
+per-socket forward allocated memory with a proper size to improve performance
+and make system away from memory pressure meanwhile. The threshold value is
+allowed to be changed in [4096, 2097152].
+
+Default: 64 KB
+
2. /proc/sys/net/unix - Parameters for Unix domain sockets
----------------------------------------------------------
diff --git a/include/net/sock.h b/include/net/sock.h
index 6d2960479a80..3ca4c03a23ba 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -89,6 +89,10 @@ void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
}
#endif
+#if IS_ENABLED(CONFIG_SYSCTL)
+extern unsigned int sysctl_reclaim_threshold __read_mostly;
+#endif
+
/* This is the per-socket lock. The spinlock provides a synchronization
* between user contexts and software interrupt processing, whereas the
* mini-semaphore synchronizes multiple users amongst themselves.
@@ -1663,6 +1667,11 @@ static inline void sk_mem_charge(struct sock *sk, int size)
static inline void sk_mem_uncharge(struct sock *sk, int size)
{
int reclaimable;
+#if IS_ENABLED(CONFIG_SYSCTL)
+ int reclaim_threshold = READ_ONCE(sysctl_reclaim_threshold);
+#else
+ int reclaim_threshold = SK_RECLAIM_THRESHOLD;
+#endif
if (!sk_has_account(sk))
return;
@@ -1680,8 +1689,8 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
* In order to avoid the above issue, it's necessary to keep
* sk->sk_forward_alloc with a proper size while doing reclaim.
*/
- if (reclaimable > SK_RECLAIM_THRESHOLD) {
- reclaimable -= SK_RECLAIM_THRESHOLD;
+ if (reclaimable > reclaim_threshold) {
+ reclaimable -= reclaim_threshold;
__sk_mem_reclaim(sk, reclaimable);
}
}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 782273bb93c2..82aee37769ba 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -46,6 +46,11 @@ EXPORT_SYMBOL(sysctl_fb_tunnels_only_for_init_net);
int sysctl_devconf_inherit_init_net __read_mostly;
EXPORT_SYMBOL(sysctl_devconf_inherit_init_net);
+static unsigned int min_reclaim = PAGE_SIZE;
+static unsigned int max_reclaim = 2 * 1024 * 1024;
+unsigned int sysctl_reclaim_threshold __read_mostly = 64 * 1024;
+EXPORT_SYMBOL(sysctl_reclaim_threshold);
+
#if IS_ENABLED(CONFIG_NET_FLOW_LIMIT) || IS_ENABLED(CONFIG_RPS)
static void dump_cpumask(void *buffer, size_t *lenp, loff_t *ppos,
struct cpumask *mask)
@@ -407,6 +412,15 @@ static struct ctl_table net_core_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_rcvbuf,
},
+ {
+ .procname = "reclaim_threshold",
+ .data = &sysctl_reclaim_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = &min_reclaim,
+ .extra2 = &max_reclaim,
+ },
{
.procname = "dev_weight",
.data = &weight_p,
--
2.34.1
Powered by blists - more mailing lists