[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200928175428.4110504-23-zi.yan@sent.com>
Date: Mon, 28 Sep 2020 13:54:20 -0400
From: Zi Yan <zi.yan@...t.com>
To: linux-mm@...ck.org
Cc: "Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Roman Gushchin <guro@...com>, Rik van Riel <riel@...riel.com>,
Matthew Wilcox <willy@...radead.org>,
Shakeel Butt <shakeelb@...gle.com>,
Yang Shi <shy828301@...il.com>,
Jason Gunthorpe <jgg@...dia.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Michal Hocko <mhocko@...e.com>,
David Hildenbrand <david@...hat.com>,
William Kucharski <william.kucharski@...cle.com>,
Andrea Arcangeli <aarcange@...hat.com>,
John Hubbard <jhubbard@...dia.com>,
David Nellans <dnellans@...dia.com>,
linux-kernel@...r.kernel.org, Zi Yan <ziy@...dia.com>
Subject: [RFC PATCH v2 22/30] mm: thp: split PUD THPs at page reclaim.
From: Zi Yan <ziy@...dia.com>
We cannot swap PUD THPs, so split them before swap them out. PUD THPs
will be split into PMD THPs, so that if THP_SWAP is enabled, PMD THPs
can be swapped out as a whole.
Signed-off-by: Zi Yan <ziy@...dia.com>
---
mm/swap_slots.c | 2 ++
mm/vmscan.c | 33 +++++++++++++++++++++++++++------
2 files changed, 29 insertions(+), 6 deletions(-)
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 3e6453573a89..65b8742a0446 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -312,6 +312,8 @@ swp_entry_t get_swap_page(struct page *page)
entry.val = 0;
if (PageTransHuge(page)) {
+ if (compound_order(page) == HPAGE_PUD_ORDER)
+ return entry;
if (IS_ENABLED(CONFIG_THP_SWAP))
get_swap_pages(1, &entry, HPAGE_PMD_NR);
goto out;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eae57d092931..12e169af663c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1244,7 +1244,21 @@ static unsigned int shrink_page_list(struct list_head *page_list,
if (!PageSwapCache(page)) {
if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked;
- if (PageTransHuge(page)) {
+ if (!PageTransHuge(page))
+ goto try_to_swap;
+ if (compound_order(page) == HPAGE_PUD_ORDER) {
+ /* cannot split THP, skip it */
+ if (!can_split_huge_pud_page(page, NULL))
+ goto activate_locked;
+ /* Split PUD THPs before swapping */
+ if (split_huge_pud_page_to_list(page, page_list))
+ goto activate_locked;
+ else {
+ sc->nr_scanned -= (nr_pages - HPAGE_PMD_NR);
+ nr_pages = HPAGE_PMD_NR;
+ }
+ }
+ if (compound_order(page) == HPAGE_PMD_ORDER) {
/* cannot split THP, skip it */
if (!can_split_huge_page(page, NULL))
goto activate_locked;
@@ -1254,14 +1268,17 @@ static unsigned int shrink_page_list(struct list_head *page_list,
* tail pages can be freed without IO.
*/
if (!compound_mapcount(page) &&
- split_huge_page_to_list(page,
- page_list))
+ split_huge_page_to_list(page,
+ page_list))
goto activate_locked;
}
+try_to_swap:
if (!add_to_swap(page)) {
if (!PageTransHuge(page))
goto activate_locked_split;
/* Fallback to swap normal pages */
+ VM_BUG_ON_PAGE(compound_order(page) != HPAGE_PMD_ORDER,
+ page);
if (split_huge_page_to_list(page,
page_list))
goto activate_locked;
@@ -1278,6 +1295,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
mapping = page_mapping(page);
}
} else if (unlikely(PageTransHuge(page))) {
+ VM_BUG_ON_PAGE(compound_order(page) != HPAGE_PMD_ORDER, page);
/* Split file THP */
if (split_huge_page_to_list(page, page_list))
goto keep_locked;
@@ -1303,9 +1321,12 @@ static unsigned int shrink_page_list(struct list_head *page_list,
enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
bool was_swapbacked = PageSwapBacked(page);
- if (unlikely(PageTransHuge(page)))
- flags |= TTU_SPLIT_HUGE_PMD;
-
+ if (unlikely(PageTransHuge(page))) {
+ if (compound_order(page) == HPAGE_PMD_ORDER)
+ flags |= TTU_SPLIT_HUGE_PMD;
+ else if (compound_order(page) == HPAGE_PUD_ORDER)
+ flags |= TTU_SPLIT_HUGE_PUD;
+ }
if (!try_to_unmap(page, flags)) {
stat->nr_unmap_fail += nr_pages;
if (!was_swapbacked && PageSwapBacked(page))
--
2.28.0
Powered by blists - more mailing lists