[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20090512025153.GB7518@localhost>
Date: Tue, 12 May 2009 10:51:53 +0800
From: Wu Fengguang <fengguang.wu@...el.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: "hannes@...xchg.org" <hannes@...xchg.org>,
"peterz@...radead.org" <peterz@...radead.org>,
"riel@...hat.com" <riel@...hat.com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"tytso@....edu" <tytso@....edu>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"elladan@...imo.com" <elladan@...imo.com>,
"npiggin@...e.de" <npiggin@...e.de>,
"cl@...ux-foundation.org" <cl@...ux-foundation.org>,
"kosaki.motohiro@...fujitsu.com" <kosaki.motohiro@...fujitsu.com>,
"minchan.kim@...il.com" <minchan.kim@...il.com>
Subject: [PATCH -mm] vmscan: report vm_flags in page_referenced()
Collect the vma->vm_flags in every VMA that page_referenced() walked.
Note that the walked VMAs
- may not actually have the page installed in PTE
- may not be equal to all VMAs that covered the page
This is a preparation for more informed reclaim heuristics, eg. to
protect executable file pages more aggressively.
For now only VM_EXEC is used by the caller, in which case we don't care
whether the VMAs actually has that page installed in their PTEs. The fact
that the page is covered by some VM_EXEC VMA and therefore is likely part
of an executable's text section is enough information.
CC: Johannes Weiner <hannes@...xchg.org>
CC: Minchan Kim <minchan.kim@...il.com>
Signed-off-by: Wu Fengguang <fengguang.wu@...el.com>
---
include/linux/rmap.h | 5 +++--
mm/rmap.c | 30 +++++++++++++++++++++---------
mm/vmscan.c | 7 +++++--
3 files changed, 29 insertions(+), 13 deletions(-)
--- linux.orig/include/linux/rmap.h
+++ linux/include/linux/rmap.h
@@ -83,7 +83,8 @@ static inline void page_dup_rmap(struct
/*
* Called from mm/vmscan.c to handle paging out
*/
-int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt);
+int page_referenced(struct page *, int is_locked,
+ struct mem_cgroup *cnt, unsigned long *vm_flags);
int try_to_unmap(struct page *, int ignore_refs);
/*
@@ -128,7 +129,7 @@ int page_wrprotect(struct page *page, in
#define anon_vma_prepare(vma) (0)
#define anon_vma_link(vma) do {} while (0)
-#define page_referenced(page,l,cnt) TestClearPageReferenced(page)
+#define page_referenced(page, locked, cnt, flags) TestClearPageReferenced(page)
#define try_to_unmap(page, refs) SWAP_FAIL
static inline int page_mkclean(struct page *page)
--- linux.orig/mm/rmap.c
+++ linux/mm/rmap.c
@@ -333,7 +333,8 @@ static int page_mapped_in_vma(struct pag
* repeatedly from either page_referenced_anon or page_referenced_file.
*/
static int page_referenced_one(struct page *page,
- struct vm_area_struct *vma, unsigned int *mapcount)
+ struct vm_area_struct *vma,
+ unsigned int *mapcount)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
@@ -385,7 +386,8 @@ out:
}
static int page_referenced_anon(struct page *page,
- struct mem_cgroup *mem_cont)
+ struct mem_cgroup *mem_cont,
+ unsigned long *vm_flags)
{
unsigned int mapcount;
struct anon_vma *anon_vma;
@@ -406,6 +408,7 @@ static int page_referenced_anon(struct p
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
continue;
referenced += page_referenced_one(page, vma, &mapcount);
+ *vm_flags |= vma->vm_flags;
if (!mapcount)
break;
}
@@ -418,6 +421,7 @@ static int page_referenced_anon(struct p
* page_referenced_file - referenced check for object-based rmap
* @page: the page we're checking references on.
* @mem_cont: target memory controller
+ * @vm_flags: collect encountered vma->vm_flags
*
* For an object-based mapped page, find all the places it is mapped and
* check/clear the referenced flag. This is done by following the page->mapping
@@ -427,7 +431,8 @@ static int page_referenced_anon(struct p
* This function is only called from page_referenced for object-based pages.
*/
static int page_referenced_file(struct page *page,
- struct mem_cgroup *mem_cont)
+ struct mem_cgroup *mem_cont,
+ unsigned long *vm_flags)
{
unsigned int mapcount;
struct address_space *mapping = page->mapping;
@@ -468,6 +473,7 @@ static int page_referenced_file(struct p
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
continue;
referenced += page_referenced_one(page, vma, &mapcount);
+ *vm_flags |= vma->vm_flags;
if (!mapcount)
break;
}
@@ -481,29 +487,35 @@ static int page_referenced_file(struct p
* @page: the page to test
* @is_locked: caller holds lock on the page
* @mem_cont: target memory controller
+ * @vm_flags: collect encountered vma->vm_flags
*
* Quick test_and_clear_referenced for all mappings to a page,
* returns the number of ptes which referenced the page.
*/
-int page_referenced(struct page *page, int is_locked,
- struct mem_cgroup *mem_cont)
+int page_referenced(struct page *page,
+ int is_locked,
+ struct mem_cgroup *mem_cont,
+ unsigned long *vm_flags)
{
int referenced = 0;
if (TestClearPageReferenced(page))
referenced++;
+ *vm_flags = 0;
if (page_mapped(page) && page->mapping) {
if (PageAnon(page))
- referenced += page_referenced_anon(page, mem_cont);
+ referenced += page_referenced_anon(page, mem_cont,
+ vm_flags);
else if (is_locked)
- referenced += page_referenced_file(page, mem_cont);
+ referenced += page_referenced_file(page, mem_cont,
+ vm_flags);
else if (!trylock_page(page))
referenced++;
else {
if (page->mapping)
- referenced +=
- page_referenced_file(page, mem_cont);
+ referenced += page_referenced_file(page,
+ mem_cont, vm_flags);
unlock_page(page);
}
}
--- linux.orig/mm/vmscan.c
+++ linux/mm/vmscan.c
@@ -598,6 +598,7 @@ static unsigned long shrink_page_list(st
struct pagevec freed_pvec;
int pgactivate = 0;
unsigned long nr_reclaimed = 0;
+ unsigned long vm_flags;
cond_resched();
@@ -648,7 +649,8 @@ static unsigned long shrink_page_list(st
goto keep_locked;
}
- referenced = page_referenced(page, 1, sc->mem_cgroup);
+ referenced = page_referenced(page, 1,
+ sc->mem_cgroup, &vm_flags);
/* In active use or really unfreeable? Activate it. */
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
referenced && page_mapping_inuse(page))
@@ -1229,6 +1231,7 @@ static void shrink_active_list(unsigned
{
unsigned long pgmoved;
unsigned long pgscanned;
+ unsigned long vm_flags;
LIST_HEAD(l_hold); /* The pages which were snipped off */
LIST_HEAD(l_inactive);
struct page *page;
@@ -1269,7 +1272,7 @@ static void shrink_active_list(unsigned
/* page_referenced clears PageReferenced */
if (page_mapping_inuse(page) &&
- page_referenced(page, 0, sc->mem_cgroup))
+ page_referenced(page, 0, sc->mem_cgroup, &vm_flags))
pgmoved++;
list_add(&page->lru, &l_inactive);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists