lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230506042958.15051-3-cathy.zhang@intel.com>
Date: Fri,  5 May 2023 21:29:58 -0700
From: Cathy Zhang <cathy.zhang@...el.com>
To: edumazet@...gle.com,
	davem@...emloft.net,
	kuba@...nel.org,
	pabeni@...hat.com
Cc: jesse.brandeburg@...el.com,
	suresh.srinivas@...el.com,
	tim.c.chen@...el.com,
	lizhen.you@...el.com,
	cathy.zhang@...el.com,
	eric.dumazet@...il.com,
	netdev@...r.kernel.org
Subject: [PATCH 2/2] net: Add sysctl_reclaim_threshold

Add a new ABI /proc/sys/net/core/reclaim_threshold which allows to
change the size of reserved memory from reclaiming in sk_mem_uncharge.
It allows to keep sk->sk_forward_alloc as small as possible when
system is under memory pressure, it also allows to change it larger to
avoid memcg charge overhead and improve performance when system is not
under memory pressure. The original reclaim threshold for reserved
memory per-socket is 2MB, it's selected as the max value, while the
default value is 64KB which is closer to the maximum size of sk_buff.

Issue the following command as root to change the default value:

	echo 16384 > /proc/sys/net/core/reclaim_threshold

Signed-off-by: Cathy Zhang <cathy.zhang@...el.com>
Signed-off-by: Lizhen You <lizhen.you@...el.com>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@...el.com>
Reviewed-by: Tim Chen <tim.c.chen@...ux.intel.com>
Reviewed-by: Suresh Srinivas <suresh.srinivas@...el.com>
---
 Documentation/admin-guide/sysctl/net.rst | 12 ++++++++++++
 include/net/sock.h                       | 11 ++++++-----
 net/core/sysctl_net_core.c               | 14 ++++++++++++++
 3 files changed, 32 insertions(+), 5 deletions(-)

diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 466c560b0c30..2981278af3d9 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -413,6 +413,18 @@ historical importance.
 
 Default: 0
 
+reclaim_threshold
+------------------------
+
+The threshold indicates when it can start to reclaim memory during a TCP
+connection lifecycle. If the per-socket forward allocated memory is beyond the
+threshold, it will reclaim the part exceeding this value. It could help keep
+per-socket forward allocated memory with a proper size to improve performance
+and make system away from memory pressure meanwhile. The threshold value is
+allowed to be changed in [4096, 2097152].
+
+Default: 64 KB
+
 2. /proc/sys/net/unix - Parameters for Unix domain sockets
 ----------------------------------------------------------
 
diff --git a/include/net/sock.h b/include/net/sock.h
index 6d2960479a80..bd8162ed2056 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -89,6 +89,8 @@ void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
 }
 #endif
 
+extern unsigned int sysctl_reclaim_threshold __read_mostly;
+
 /* This is the per-socket lock.  The spinlock provides a synchronization
  * between user contexts and software interrupt processing, whereas the
  * mini-semaphore synchronizes multiple users amongst themselves.
@@ -1657,12 +1659,10 @@ static inline void sk_mem_charge(struct sock *sk, int size)
 	sk->sk_forward_alloc -= size;
 }
 
-/* The following macro controls memory reclaiming in sk_mem_uncharge().
- */
-#define SK_RECLAIM_THRESHOLD	(1 << 16)
 static inline void sk_mem_uncharge(struct sock *sk, int size)
 {
 	int reclaimable;
+	int reclaim_threshold;
 
 	if (!sk_has_account(sk))
 		return;
@@ -1680,8 +1680,9 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
 	 * In order to avoid the above issue, it's necessary to keep
 	 * sk->sk_forward_alloc with a proper size while doing reclaim.
 	 */
-	if (reclaimable > SK_RECLAIM_THRESHOLD) {
-		reclaimable -= SK_RECLAIM_THRESHOLD;
+	reclaim_threshold = READ_ONCE(sysctl_reclaim_threshold);
+	if (reclaimable > reclaim_threshold) {
+		reclaimable -= reclaim_threshold;
 		__sk_mem_reclaim(sk, reclaimable);
 	}
 }
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 782273bb93c2..82aee37769ba 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -46,6 +46,11 @@ EXPORT_SYMBOL(sysctl_fb_tunnels_only_for_init_net);
 int sysctl_devconf_inherit_init_net __read_mostly;
 EXPORT_SYMBOL(sysctl_devconf_inherit_init_net);
 
+static unsigned int min_reclaim = PAGE_SIZE;
+static unsigned int max_reclaim = 2 * 1024 * 1024;
+unsigned int sysctl_reclaim_threshold __read_mostly = 64 * 1024;
+EXPORT_SYMBOL(sysctl_reclaim_threshold);
+
 #if IS_ENABLED(CONFIG_NET_FLOW_LIMIT) || IS_ENABLED(CONFIG_RPS)
 static void dump_cpumask(void *buffer, size_t *lenp, loff_t *ppos,
 			 struct cpumask *mask)
@@ -407,6 +412,15 @@ static struct ctl_table net_core_table[] = {
 		.proc_handler	= proc_dointvec_minmax,
 		.extra1		= &min_rcvbuf,
 	},
+	{
+		.procname	= "reclaim_threshold",
+		.data		= &sysctl_reclaim_threshold,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_douintvec_minmax,
+		.extra1		= &min_reclaim,
+		.extra2		= &max_reclaim,
+	},
 	{
 		.procname	= "dev_weight",
 		.data		= &weight_p,
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ