[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20120220172339.22196.87364.stgit@zurg>
Date: Mon, 20 Feb 2012 21:23:39 +0400
From: Konstantin Khlebnikov <khlebnikov@...nvz.org>
To: linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org
Cc: Hugh Dickins <hughd@...gle.com>,
KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
Subject: [PATCH v2 17/22] mm: handle lruvec relocks on lumpy reclaim
Prepare for lock splitting in lumly reclaim logic.
Now move_active_pages_to_lru() and putback_inactive_pages()
can put pages into different lruvecs.
* relock book before SetPageLRU()
* update reclaim_stat pointer after relocks
* return currently locked lruvec
Signed-off-by: Konstantin Khlebnikov <khlebnikov@...nvz.org>
---
mm/vmscan.c | 48 ++++++++++++++++++++++++++++++++++--------------
1 files changed, 34 insertions(+), 14 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4dba1df..39b4525 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1120,6 +1120,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
unsigned long *nr_scanned, struct scan_control *sc,
isolate_mode_t mode, int active, int file)
{
+ struct lruvec *cursor_lruvec = lruvec;
struct list_head *src;
unsigned long nr_taken = 0;
unsigned long nr_lumpy_taken = 0;
@@ -1203,14 +1204,16 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
!PageSwapCache(cursor_page))
break;
+ /* Switch cursor_lruvec lock for lumpy isolate */
+ if (!catch_page_lruvec(&cursor_lruvec, cursor_page))
+ continue;
+
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
unsigned int isolated_pages;
- struct lruvec *cursor_lruvec;
int cursor_lru = page_lru(cursor_page);
list_move(&cursor_page->lru, dst);
isolated_pages = hpage_nr_pages(cursor_page);
- cursor_lruvec = page_lruvec(cursor_page);
cursor_lruvec->pages_count[cursor_lru] -=
isolated_pages;
VM_BUG_ON((long)cursor_lruvec->
@@ -1241,6 +1244,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
}
}
+ /* Restore original lruvec lock */
+ cursor_lruvec = __relock_page_lruvec(cursor_lruvec, page);
+
/* If we break out of the loop above, lumpy reclaim failed */
if (pfn < end_pfn)
nr_lumpy_failed++;
@@ -1331,7 +1337,10 @@ static int too_many_isolated(struct zone *zone, int file,
return isolated > inactive;
}
-static noinline_for_stack void
+/*
+ * Returns currently locked lruvec
+ */
+static noinline_for_stack struct lruvec *
putback_inactive_pages(struct lruvec *lruvec,
struct list_head *page_list)
{
@@ -1353,11 +1362,14 @@ putback_inactive_pages(struct lruvec *lruvec,
lock_lruvec_irq(lruvec);
continue;
}
+
+ /* can differ only on lumpy reclaim */
+ lruvec = __relock_page_lruvec(lruvec, page);
+ reclaim_stat = &lruvec->reclaim_stat;
+
SetPageLRU(page);
lru = page_lru(page);
- /* can differ only on lumpy reclaim */
- lruvec = page_lruvec(page);
add_page_to_lru_list(lruvec, page, lru);
if (is_active_lru(lru)) {
int file = is_file_lru(lru);
@@ -1382,6 +1394,8 @@ putback_inactive_pages(struct lruvec *lruvec,
* To save our caller's stack, now use input list for pages to free.
*/
list_splice(&pages_to_free, page_list);
+
+ return lruvec;
}
static noinline_for_stack void
@@ -1551,7 +1565,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
__count_vm_events(KSWAPD_STEAL, nr_reclaimed);
__count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
- putback_inactive_pages(lruvec, &page_list);
+ lruvec = putback_inactive_pages(lruvec, &page_list);
__mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
__mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
@@ -1610,12 +1624,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
*
* The downside is that we have to touch page->_count against each page.
* But we had to alter page->flags anyway.
+ *
+ * Returns currently locked lruvec
*/
-static void move_active_pages_to_lru(struct lruvec *lruvec,
- struct list_head *list,
- struct list_head *pages_to_free,
- enum lru_list lru)
+static struct lruvec *
+move_active_pages_to_lru(struct lruvec *lruvec,
+ struct list_head *list,
+ struct list_head *pages_to_free,
+ enum lru_list lru)
{
unsigned long pgmoved = 0;
struct page *page;
@@ -1637,11 +1654,12 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
page = lru_to_page(list);
+ /* can differ only on lumpy reclaim */
+ lruvec = __relock_page_lruvec(lruvec, page);
+
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
- /* can differ only on lumpy reclaim */
- lruvec = page_lruvec(page);
list_move(&page->lru, &lruvec->pages_lru[lru]);
numpages = hpage_nr_pages(page);
lruvec->pages_count[lru] += numpages;
@@ -1663,6 +1681,8 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
__mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, pgmoved);
if (!is_active_lru(lru))
__count_vm_events(PGDEACTIVATE, pgmoved);
+
+ return lruvec;
}
static void shrink_active_list(unsigned long nr_to_scan,
@@ -1751,9 +1771,9 @@ static void shrink_active_list(unsigned long nr_to_scan,
*/
reclaim_stat->recent_rotated[file] += nr_rotated;
- move_active_pages_to_lru(lruvec, &l_active, &l_hold,
+ lruvec = move_active_pages_to_lru(lruvec, &l_active, &l_hold,
LRU_ACTIVE + file * LRU_FILE);
- move_active_pages_to_lru(lruvec, &l_inactive, &l_hold,
+ lruvec = move_active_pages_to_lru(lruvec, &l_inactive, &l_hold,
LRU_BASE + file * LRU_FILE);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
unlock_lruvec_irq(lruvec);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists