[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230218002819.1486479-29-jthoughton@google.com>
Date: Sat, 18 Feb 2023 00:28:01 +0000
From: James Houghton <jthoughton@...gle.com>
To: Mike Kravetz <mike.kravetz@...cle.com>,
Muchun Song <songmuchun@...edance.com>,
Peter Xu <peterx@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: David Hildenbrand <david@...hat.com>,
David Rientjes <rientjes@...gle.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Mina Almasry <almasrymina@...gle.com>,
"Zach O'Keefe" <zokeefe@...gle.com>,
Manish Mishra <manish.mishra@...anix.com>,
Naoya Horiguchi <naoya.horiguchi@....com>,
"Dr . David Alan Gilbert" <dgilbert@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Vlastimil Babka <vbabka@...e.cz>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Yang Shi <shy828301@...il.com>,
Frank van der Linden <fvdl@...gle.com>,
Jiaqi Yan <jiaqiyan@...gle.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
James Houghton <jthoughton@...gle.com>
Subject: [PATCH v2 28/46] mm: rmap: in try_to_{migrate,unmap}, check head page
for hugetlb page flags
The main complication here is that HugeTLB pages have their poison
status stored in the head page as the HWPoison page flag. Because
HugeTLB high-granularity mapping can create PTEs that point to subpages
instead of always the head of a hugepage, we need to check the
compound_head for page flags.
Signed-off-by: James Houghton <jthoughton@...gle.com>
diff --git a/mm/rmap.c b/mm/rmap.c
index 0a019ae32f04..4908ede83173 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1456,10 +1456,11 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
pte_t pteval;
- struct page *subpage;
+ struct page *subpage, *page_flags_page;
bool anon_exclusive, ret = true;
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
+ bool page_poisoned;
/*
* When racing against e.g. zap_pte_range() on another cpu,
@@ -1512,9 +1513,17 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
subpage = folio_page(folio,
pte_pfn(*pvmw.pte) - folio_pfn(folio));
+ /*
+ * We check the page flags of HugeTLB pages by checking the
+ * head page.
+ */
+ page_flags_page = folio_test_hugetlb(folio)
+ ? &folio->page
+ : subpage;
+ page_poisoned = PageHWPoison(page_flags_page);
address = pvmw.address;
anon_exclusive = folio_test_anon(folio) &&
- PageAnonExclusive(subpage);
+ PageAnonExclusive(page_flags_page);
if (folio_test_hugetlb(folio)) {
bool anon = folio_test_anon(folio);
@@ -1523,7 +1532,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* The try_to_unmap() is only passed a hugetlb page
* in the case where the hugetlb page is poisoned.
*/
- VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
+ VM_BUG_ON_FOLIO(!page_poisoned, folio);
/*
* huge_pmd_unshare may unmap an entire PMD page.
* There is no way of knowing exactly which PMDs may
@@ -1606,7 +1615,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
- if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) {
+ if (page_poisoned && !(flags & TTU_IGNORE_HWPOISON)) {
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(1UL << pvmw.pte_order, mm);
@@ -1632,7 +1641,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
} else if (folio_test_anon(folio)) {
- swp_entry_t entry = { .val = page_private(subpage) };
+ swp_entry_t entry = {
+ .val = page_private(page_flags_page)
+ };
pte_t swp_pte;
/*
* Store the swap location in the pte.
@@ -1822,7 +1833,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
pte_t pteval;
- struct page *subpage;
+ struct page *subpage, *page_flags_page;
bool anon_exclusive, ret = true;
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
@@ -1902,9 +1913,16 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
subpage = folio_page(folio,
pte_pfn(*pvmw.pte) - folio_pfn(folio));
}
+ /*
+ * We check the page flags of HugeTLB pages by checking the
+ * head page.
+ */
+ page_flags_page = folio_test_hugetlb(folio)
+ ? &folio->page
+ : subpage;
address = pvmw.address;
anon_exclusive = folio_test_anon(folio) &&
- PageAnonExclusive(subpage);
+ PageAnonExclusive(page_flags_page);
if (folio_test_hugetlb(folio)) {
bool anon = folio_test_anon(folio);
@@ -2023,7 +2041,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* No need to invalidate here it will synchronize on
* against the special swap migration pte.
*/
- } else if (PageHWPoison(subpage)) {
+ } else if (PageHWPoison(page_flags_page)) {
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(1L << pvmw.pte_order, mm);
--
2.39.2.637.g21b0678d19-goog
Powered by blists - more mailing lists