lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1522320104-6573-1-git-send-email-zhaoyang.huang@spreadtrum.com>
Date:   Thu, 29 Mar 2018 18:41:44 +0800
From:   Zhaoyang Huang <huangzhaoyang@...il.com>
To:     Steven Rostedt <rostedt@...dmis.org>,
        Ingo Molnar <mingo@...nel.org>, linux-kernel@...r.kernel.org,
        kernel-patch-test@...ts.linaro.org
Subject: [PATCH v1] kernel/trace:check the val against the available mem

It is reported that some user app would like to echo a huge
number to "/sys/kernel/debug/tracing/buffer_size_kb" regardless
 of the available memory, which will cause the coinstantaneous
page allocation failed and introduce OOM. The commit checking the
val against the available mem first to avoid the consequence allocation.

Signed-off-by: Zhaoyang Huang <zhaoyang.huang@...eadtrum.com>
---
 kernel/trace/trace.c | 39 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 38 insertions(+), 1 deletion(-)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2d0ffcc..a4a4237 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -43,6 +43,8 @@
 #include <linux/trace.h>
 #include <linux/sched/rt.h>
 
+#include <linux/mm.h>
+#include <linux/swap.h>
 #include "trace.h"
 #include "trace_output.h"
 
@@ -5967,6 +5969,39 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 	return ret;
 }
 
+static long get_available_mem(void)
+{
+	struct sysinfo i;
+	long available;
+	unsigned long pagecache;
+	unsigned long wmark_low = 0;
+	unsigned long pages[NR_LRU_LISTS];
+	struct zone *zone;
+	int lru;
+
+	si_meminfo(&i);
+	si_swapinfo(&i);
+
+	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
+		pages[lru] = global_page_state(NR_LRU_BASE + lru);
+
+	for_each_zone(zone)
+		wmark_low += zone->watermark[WMARK_LOW];
+
+	available = i.freeram - wmark_low;
+
+	pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
+	pagecache -= min(pagecache / 2, wmark_low);
+	available += pagecache;
+
+	available += global_page_state(NR_SLAB_RECLAIMABLE) -
+		min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
+
+	if (available < 0)
+		available = 0;
+	return available;
+}
+
 static ssize_t
 tracing_entries_write(struct file *filp, const char __user *ubuf,
 		      size_t cnt, loff_t *ppos)
@@ -5975,13 +6010,15 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 	struct trace_array *tr = inode->i_private;
 	unsigned long val;
 	int ret;
+	long available;
 
+	available = get_available_mem();
 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 	if (ret)
 		return ret;
 
 	/* must have at least 1 entry */
-	if (!val)
+	if (!val || (val > available))
 		return -EINVAL;
 
 	/* value is in KB */
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ