lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1413430551-22392-4-git-send-email-zhuhui@xiaomi.com>
Date:	Thu, 16 Oct 2014 11:35:50 +0800
From:	Hui Zhu <zhuhui@...omi.com>
To:	<rjw@...ysocki.net>, <len.brown@...el.com>, <pavel@....cz>,
	<m.szyprowski@...sung.com>, <akpm@...ux-foundation.org>,
	<mina86@...a86.com>, <aneesh.kumar@...ux.vnet.ibm.com>,
	<iamjoonsoo.kim@....com>, <hannes@...xchg.org>, <riel@...hat.com>,
	<mgorman@...e.de>, <minchan@...nel.org>, <nasa4836@...il.com>,
	<ddstreet@...e.org>, <hughd@...gle.com>, <mingo@...nel.org>,
	<rientjes@...gle.com>, <peterz@...radead.org>,
	<keescook@...omium.org>, <atomlin@...hat.com>, <raistlin@...ux.it>,
	<axboe@...com>, <paulmck@...ux.vnet.ibm.com>,
	<kirill.shutemov@...ux.intel.com>, <n-horiguchi@...jp.nec.com>,
	<k.khlebnikov@...sung.com>, <msalter@...hat.com>, <deller@....de>,
	<tangchen@...fujitsu.com>, <ben@...adent.org.uk>,
	<akinobu.mita@...il.com>, <lauraa@...eaurora.org>,
	<vbabka@...e.cz>, <sasha.levin@...cle.com>,
	<vdavydov@...allels.com>, <suleiman@...gle.com>
CC:	<linux-kernel@...r.kernel.org>, <linux-pm@...r.kernel.org>,
	<linux-mm@...ck.org>, Hui Zhu <zhuhui@...omi.com>
Subject: [PATCH 3/4] (CMA_AGGRESSIVE) Update reserve custom contiguous area code

Add cma_alloc_counter, cma_aggressive_switch, cma_aggressive_free_min and
cma_aggressive_shrink_switch.

cma_aggressive_switch is the swith for all CMA_AGGRESSIVE function.  It can be
controlled by sysctl vm.cma-aggressive-switch.

cma_aggressive_free_min can be controlled by sysctl
"vm.cma-aggressive-free-min".  If the number of CMA free pages is small than
this sysctl value, CMA_AGGRESSIVE will not work in page alloc code.

cma_aggressive_shrink_switch can be controlled by sysctl
"vm.cma-aggressive-shrink-switch".  If sysctl "vm.cma-aggressive-shrink-switch"
is true and free normal memory's size is smaller than the size that it want to
allocate, do memory shrink with function shrink_all_memory before driver
allocate pages from CMA.

When Linux kernel try to reserve custom contiguous area, increase the value of
cma_alloc_counter.  CMA_AGGRESSIVE will not work in page alloc code.
After reserve custom contiguous area function return, decreases the value of
cma_alloc_counter.

Signed-off-by: Hui Zhu <zhuhui@...omi.com>
---
 include/linux/cma.h |  7 +++++++
 kernel/sysctl.c     | 27 +++++++++++++++++++++++++++
 mm/cma.c            | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 88 insertions(+)

diff --git a/include/linux/cma.h b/include/linux/cma.h
index 0430ed0..df96abf 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -15,6 +15,13 @@
 
 struct cma;
 
+#ifdef CONFIG_CMA_AGGRESSIVE
+extern atomic_t cma_alloc_counter;
+extern int cma_aggressive_switch;
+extern unsigned long cma_aggressive_free_min;
+extern int cma_aggressive_shrink_switch;
+#endif
+
 extern phys_addr_t cma_get_base(struct cma *cma);
 extern unsigned long cma_get_size(struct cma *cma);
 
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4aada6d..646929e2 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -92,6 +92,10 @@
 #include <linux/nmi.h>
 #endif
 
+#ifdef CONFIG_CMA_AGGRESSIVE
+#include <linux/cma.h>
+#endif
+
 
 #if defined(CONFIG_SYSCTL)
 
@@ -1485,6 +1489,29 @@ static struct ctl_table vm_table[] = {
 		.mode		= 0644,
 		.proc_handler	= proc_doulongvec_minmax,
 	},
+#ifdef CONFIG_CMA_AGGRESSIVE
+	{
+		.procname	= "cma-aggressive-switch",
+		.data		= &cma_aggressive_switch,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "cma-aggressive-free-min",
+		.data		= &cma_aggressive_free_min,
+		.maxlen		= sizeof(unsigned long),
+		.mode		= 0600,
+		.proc_handler	= proc_doulongvec_minmax,
+	},
+	{
+		.procname	= "cma-aggressive-shrink-switch",
+		.data		= &cma_aggressive_shrink_switch,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= proc_dointvec,
+	},
+#endif
 	{ }
 };
 
diff --git a/mm/cma.c b/mm/cma.c
index 963bc4a..566ed5f 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -33,6 +33,7 @@
 #include <linux/log2.h>
 #include <linux/cma.h>
 #include <linux/highmem.h>
+#include <linux/swap.h>
 
 struct cma {
 	unsigned long	base_pfn;
@@ -127,6 +128,27 @@ err:
 	return -EINVAL;
 }
 
+#ifdef CONFIG_CMA_AGGRESSIVE
+/* The counter for the dma_alloc_from_contiguous and
+   dma_release_from_contiguous.  */
+atomic_t cma_alloc_counter = ATOMIC_INIT(0);
+
+/* Swich of CMA_AGGRESSIVE.  */
+int cma_aggressive_switch __read_mostly;
+
+/* If the number of CMA free pages is small than this value, CMA_AGGRESSIVE will
+   not work. */
+#ifdef CONFIG_CMA_AGGRESSIVE_FREE_MIN
+unsigned long cma_aggressive_free_min __read_mostly =
+					CONFIG_CMA_AGGRESSIVE_FREE_MIN;
+#else
+unsigned long cma_aggressive_free_min __read_mostly = 500;
+#endif
+
+/* Swich of CMA_AGGRESSIVE shink.  */
+int cma_aggressive_shrink_switch __read_mostly;
+#endif
+
 static int __init cma_init_reserved_areas(void)
 {
 	int i;
@@ -138,6 +160,22 @@ static int __init cma_init_reserved_areas(void)
 			return ret;
 	}
 
+#ifdef CONFIG_CMA_AGGRESSIVE
+	cma_aggressive_switch = 0;
+#ifdef CONFIG_CMA_AGGRESSIVE_PHY_MAX
+	if (memblock_phys_mem_size() <= CONFIG_CMA_AGGRESSIVE_PHY_MAX)
+#else
+	if (memblock_phys_mem_size() <= 0x40000000)
+#endif
+		cma_aggressive_switch = 1;
+
+	cma_aggressive_shrink_switch = 0;
+#ifdef CONFIG_CMA_AGGRESSIVE_SHRINK
+	if (cma_aggressive_switch)
+		cma_aggressive_shrink_switch = 1;
+#endif
+#endif
+
 	return 0;
 }
 core_initcall(cma_init_reserved_areas);
@@ -312,6 +350,11 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
 	struct page *page = NULL;
 	int ret;
+#ifdef CONFIG_CMA_AGGRESSIVE
+	int free = global_page_state(NR_FREE_PAGES)
+			- global_page_state(NR_FREE_CMA_PAGES)
+			- totalreserve_pages;
+#endif
 
 	if (!cma || !cma->count)
 		return NULL;
@@ -326,6 +369,13 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
 	bitmap_maxno = cma_bitmap_maxno(cma);
 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
 
+#ifdef CONFIG_CMA_AGGRESSIVE
+	atomic_inc(&cma_alloc_counter);
+	if (cma_aggressive_switch && cma_aggressive_shrink_switch
+	    && free < count)
+		shrink_all_memory(count - free, false);
+#endif
+
 	for (;;) {
 		mutex_lock(&cma->lock);
 		bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
@@ -361,6 +411,10 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
 		start = bitmap_no + mask + 1;
 	}
 
+#ifdef CONFIG_CMA_AGGRESSIVE
+	atomic_dec(&cma_alloc_counter);
+#endif
+
 	pr_debug("%s(): returned %p\n", __func__, page);
 	return page;
 }
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ