[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230105101844.1893104-24-jthoughton@google.com>
Date: Thu, 5 Jan 2023 10:18:21 +0000
From: James Houghton <jthoughton@...gle.com>
To: Mike Kravetz <mike.kravetz@...cle.com>,
Muchun Song <songmuchun@...edance.com>,
Peter Xu <peterx@...hat.com>
Cc: David Hildenbrand <david@...hat.com>,
David Rientjes <rientjes@...gle.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Mina Almasry <almasrymina@...gle.com>,
"Zach O'Keefe" <zokeefe@...gle.com>,
Manish Mishra <manish.mishra@...anix.com>,
Naoya Horiguchi <naoya.horiguchi@....com>,
"Dr . David Alan Gilbert" <dgilbert@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Vlastimil Babka <vbabka@...e.cz>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Yang Shi <shy828301@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
James Houghton <jthoughton@...gle.com>
Subject: [PATCH 23/46] mm: rmap: make page_vma_mapped_walk callers use pte_order
This also updates the callers' hugetlb mapcounting code to handle
mapcount properly for subpage-mapped hugetlb pages.
Signed-off-by: James Houghton <jthoughton@...gle.com>
---
mm/migrate.c | 2 +-
mm/rmap.c | 17 +++++++++++++----
2 files changed, 14 insertions(+), 5 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c
index 832f639fc49a..0062689f4878 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -244,7 +244,7 @@ static bool remove_migration_pte(struct folio *folio,
#ifdef CONFIG_HUGETLB_PAGE
if (folio_test_hugetlb(folio)) {
- unsigned int shift = huge_page_shift(hstate_vma(vma));
+ unsigned int shift = pvmw.pte_order + PAGE_SHIFT;
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
if (folio_test_anon(folio))
diff --git a/mm/rmap.c b/mm/rmap.c
index 8a24b90d9531..ff7e6c770b0a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1608,7 +1608,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) {
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
- hugetlb_count_sub(folio_nr_pages(folio), mm);
+ hugetlb_count_sub(1UL << pvmw.pte_order, mm);
set_huge_pte_at(mm, address, pvmw.pte, pteval);
} else {
dec_mm_counter(mm, mm_counter(&folio->page));
@@ -1767,7 +1767,11 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*
* See Documentation/mm/mmu_notifier.rst
*/
- page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
+ if (folio_test_hugetlb(folio))
+ page_remove_rmap(&folio->page, vma, true);
+ else
+ page_remove_rmap(subpage, vma, false);
+
if (vma->vm_flags & VM_LOCKED)
mlock_page_drain_local();
folio_put(folio);
@@ -2030,7 +2034,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
} else if (PageHWPoison(subpage)) {
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
- hugetlb_count_sub(folio_nr_pages(folio), mm);
+ hugetlb_count_sub(1L << pvmw.pte_order, mm);
set_huge_pte_at(mm, address, pvmw.pte, pteval);
} else {
dec_mm_counter(mm, mm_counter(&folio->page));
@@ -2122,7 +2126,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*
* See Documentation/mm/mmu_notifier.rst
*/
- page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
+ if (folio_test_hugetlb(folio))
+ page_remove_rmap(&folio->page, vma, true);
+ else
+ page_remove_rmap(subpage, vma, false);
if (vma->vm_flags & VM_LOCKED)
mlock_page_drain_local();
folio_put(folio);
@@ -2206,6 +2213,8 @@ static bool page_make_device_exclusive_one(struct folio *folio,
args->owner);
mmu_notifier_invalidate_range_start(&range);
+ VM_BUG_ON_FOLIO(folio_test_hugetlb(folio), folio);
+
while (page_vma_mapped_walk(&pvmw)) {
/* Unexpected PMD-mapped THP? */
VM_BUG_ON_FOLIO(!pvmw.pte, folio);
--
2.39.0.314.g84b9a713c41-goog
Powered by blists - more mailing lists