[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200228033819.3857058-3-ying.huang@intel.com>
Date: Fri, 28 Feb 2020 11:38:18 +0800
From: "Huang, Ying" <ying.huang@...el.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Huang Ying <ying.huang@...el.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
David Hildenbrand <david@...hat.com>,
Mel Gorman <mgorman@...e.de>, Vlastimil Babka <vbabka@...e.cz>,
Zi Yan <ziy@...dia.com>, Michal Hocko <mhocko@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Minchan Kim <minchan@...nel.org>,
Johannes Weiner <hannes@...xchg.org>,
Hugh Dickins <hughd@...gle.com>
Subject: [RFC 2/3] mm: Add a new page flag PageLayzyFree() for MADV_FREE
From: Huang Ying <ying.huang@...el.com>
Now !PageSwapBacked() is used as the flag for the pages freed lazily
via MADV_FREE. This isn't obvious enough. So Dave suggested to add a
new page flag for that to improve the code readability.
Signed-off-by: "Huang, Ying" <ying.huang@...el.com>
Suggested-by: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: David Hildenbrand <david@...hat.com>
Cc: Mel Gorman <mgorman@...e.de>
Cc: Vlastimil Babka <vbabka@...e.cz>
Cc: Zi Yan <ziy@...dia.com>
Cc: Michal Hocko <mhocko@...nel.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Minchan Kim <minchan@...nel.org>
Cc: Johannes Weiner <hannes@...xchg.org>
Cc: Hugh Dickins <hughd@...gle.com>
---
include/linux/page-flags.h | 25 +++++++++++++++++++++++++
mm/rmap.c | 6 +++---
mm/swap.c | 11 +++--------
mm/vmscan.c | 7 +++----
4 files changed, 34 insertions(+), 15 deletions(-)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 49c2697046b9..759748fbcfad 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -498,6 +498,31 @@ static __always_inline int PageKsm(struct page *page)
TESTPAGEFLAG_FALSE(Ksm)
#endif
+/*
+ * For pages freed lazily via MADV_FREE. lazyfree pages are clean
+ * anonymous pages. They have SwapBacked flag cleared to distinguish
+ * with normal anonymous pages
+ */
+static __always_inline int PageLazyFree(struct page *page)
+{
+ page = compound_head(page);
+ return PageAnon(page) && !PageSwapBacked(page);
+}
+
+static __always_inline void SetPageLazyFree(struct page *page)
+{
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ VM_BUG_ON_PAGE(!PageAnon(page), page);
+ ClearPageSwapBacked(page);
+}
+
+static __always_inline void ClearPageLazyFree(struct page *page)
+{
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ VM_BUG_ON_PAGE(!PageAnon(page), page);
+ SetPageSwapBacked(page);
+}
+
u64 stable_page_flags(struct page *page);
static inline int PageUptodate(struct page *page)
diff --git a/mm/rmap.c b/mm/rmap.c
index 03c5b116d30e..1dcbb1771dd7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1598,7 +1598,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* Store the swap location in the pte.
* See handle_pte_fault() ...
*/
- if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
+ if (unlikely(PageLazyFree(page) == PageSwapCache(page))) {
WARN_ON_ONCE(1);
ret = false;
/* We have to invalidate as we cleared the pte */
@@ -1609,7 +1609,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
/* MADV_FREE page check */
- if (!PageSwapBacked(page)) {
+ if (PageLazyFree(page)) {
if (!PageDirty(page)) {
/* Invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm,
@@ -1623,7 +1623,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* discarded. Remap the page to page table.
*/
set_pte_at(mm, address, pvmw.pte, pteval);
- SetPageSwapBacked(page);
+ ClearPageLazyFree(page);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
diff --git a/mm/swap.c b/mm/swap.c
index f502a2155e85..bd5e40e14c94 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -564,7 +564,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
- if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
+ if (PageLRU(page) && PageAnon(page) && !PageLazyFree(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
bool active = PageActive(page);
@@ -572,12 +572,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
LRU_INACTIVE_ANON + active);
ClearPageActive(page);
ClearPageReferenced(page);
- /*
- * lazyfree pages are clean anonymous pages. They have
- * SwapBacked flag cleared to distinguish normal anonymous
- * pages
- */
- ClearPageSwapBacked(page);
+ SetPageLazyFree(page);
add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
__count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
@@ -678,7 +673,7 @@ void deactivate_page(struct page *page)
*/
void mark_page_lazyfree(struct page *page)
{
- if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
+ if (PageLRU(page) && PageAnon(page) && !PageLazyFree(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f14c8c6069a6..0aaee7052fb0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1043,8 +1043,7 @@ static void page_check_dirty_writeback(struct page *page,
* Anonymous pages are not handled by flushers and must be written
* from reclaim context. Do not stall reclaim based on them
*/
- if (!page_is_file_cache(page) ||
- (PageAnon(page) && !PageSwapBacked(page))) {
+ if (!page_is_file_cache(page) || PageLazyFree(page)) {
*dirty = false;
*writeback = false;
return;
@@ -1235,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* Try to allocate it some swap space here.
* Lazyfree page could be freed directly
*/
- if (PageAnon(page) && PageSwapBacked(page)) {
+ if (PageAnon(page) && !PageLazyFree(page)) {
if (!PageSwapCache(page)) {
if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked;
@@ -1411,7 +1410,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
}
- if (PageAnon(page) && !PageSwapBacked(page)) {
+ if (PageLazyFree(page)) {
/* follow __remove_mapping for reference */
if (!page_ref_freeze(page, 1))
goto keep_locked;
--
2.25.0
Powered by blists - more mailing lists