[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-id: <1333462221-3987-13-git-send-email-m.szyprowski@samsung.com>
Date: Tue, 03 Apr 2012 16:10:17 +0200
From: Marek Szyprowski <m.szyprowski@...sung.com>
To: linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
linux-media@...r.kernel.org, linux-mm@...ck.org,
linaro-mm-sig@...ts.linaro.org
Cc: Michal Nazarewicz <mina86@...a86.com>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Kyungmin Park <kyungmin.park@...sung.com>,
Russell King <linux@....linux.org.uk>,
Andrew Morton <akpm@...ux-foundation.org>,
KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>,
Daniel Walker <dwalker@...eaurora.org>,
Mel Gorman <mel@....ul.ie>, Arnd Bergmann <arnd@...db.de>,
Jesse Barker <jesse.barker@...aro.org>,
Jonathan Corbet <corbet@....net>,
Chunsang Jeong <chunsang.jeong@...aro.org>,
Dave Hansen <dave@...ux.vnet.ibm.com>,
Benjamin Gaignard <benjamin.gaignard@...aro.org>,
Rob Clark <rob.clark@...aro.org>,
Ohad Ben-Cohen <ohad@...ery.com>,
Sandeep Patil <psandeep.s@...il.com>
Subject: [PATCHv24 12/16] mm: trigger page reclaim in alloc_contig_range() to
stabilise watermarks
alloc_contig_range() performs memory allocation so it also should keep
track on keeping the correct level of memory watermarks. This commit adds
a call to *_slowpath style reclaim to grab enough pages to make sure that
the final collection of contiguous pages from freelists will not starve
the system.
Signed-off-by: Marek Szyprowski <m.szyprowski@...sung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@...sung.com>
CC: Michal Nazarewicz <mina86@...a86.com>
Tested-by: Rob Clark <rob.clark@...aro.org>
Tested-by: Ohad Ben-Cohen <ohad@...ery.com>
Tested-by: Benjamin Gaignard <benjamin.gaignard@...aro.org>
Tested-by: Robert Nelson <robertcnelson@...il.com>
Tested-by: Barry Song <Baohua.Song@....com>
---
include/linux/mmzone.h | 9 +++++++
mm/page_alloc.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 69 insertions(+), 0 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 8c1335f..26f2040 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -63,8 +63,10 @@ enum {
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define cma_wmark_pages(zone) zone->min_cma_pages
#else
# define is_migrate_cma(migratetype) false
+# define cma_wmark_pages(zone) 0
#endif
#define for_each_migratetype_order(order, type) \
@@ -371,6 +373,13 @@ struct zone {
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
#endif
+#ifdef CONFIG_CMA
+ /*
+ * CMA needs to increase watermark levels during the allocation
+ * process to make sure that the system is not starved.
+ */
+ unsigned long min_cma_pages;
+#endif
struct free_area free_area[MAX_ORDER];
#ifndef CONFIG_SPARSEMEM
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5a90ada..17a5521 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5079,6 +5079,11 @@ static void __setup_per_zone_wmarks(void)
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+
+ zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
+ zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
+ zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
+
setup_zone_migrate_reserve(zone);
spin_unlock_irqrestore(&zone->lock, flags);
}
@@ -5684,6 +5689,54 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
return ret > 0 ? 0 : ret;
}
+/*
+ * Update zone's cma pages counter used for watermark level calculation.
+ */
+static inline void __update_cma_watermarks(struct zone *zone, int count)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&zone->lock, flags);
+ zone->min_cma_pages += count;
+ spin_unlock_irqrestore(&zone->lock, flags);
+ setup_per_zone_wmarks();
+}
+
+/*
+ * Trigger memory pressure bump to reclaim some pages in order to be able to
+ * allocate 'count' pages in single page units. Does similar work as
+ *__alloc_pages_slowpath() function.
+ */
+static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
+{
+ enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+ struct zonelist *zonelist = node_zonelist(0, gfp_mask);
+ int did_some_progress = 0;
+ int order = 1;
+
+ /*
+ * Increase level of watermarks to force kswapd do his job
+ * to stabilise at new watermark level.
+ */
+ __update_cma_watermarks(zone, count);
+
+ /* Obey watermarks as if the page was being allocated */
+ while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
+ wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
+
+ did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+ NULL);
+ if (!did_some_progress) {
+ /* Exhausted what can be done so it's blamo time */
+ out_of_memory(zonelist, gfp_mask, order, NULL, false);
+ }
+ }
+
+ /* Restore original watermark levels. */
+ __update_cma_watermarks(zone, -count);
+
+ return count;
+}
+
/**
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
@@ -5782,6 +5835,13 @@ int alloc_contig_range(unsigned long start, unsigned long end,
goto done;
}
+ /*
+ * Reclaim enough pages to make sure that contiguous allocation
+ * will not starve the system.
+ */
+ __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
+
+ /* Grab isolated pages from freelists. */
outer_end = isolate_freepages_range(outer_start, end);
if (!outer_end) {
ret = -EBUSY;
--
1.7.1.569.g6f426
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists