[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211217113049.23850-11-david@redhat.com>
Date: Fri, 17 Dec 2021 12:30:48 +0100
From: David Hildenbrand <david@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Hugh Dickins <hughd@...gle.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
David Rientjes <rientjes@...gle.com>,
Shakeel Butt <shakeelb@...gle.com>,
John Hubbard <jhubbard@...dia.com>,
Jason Gunthorpe <jgg@...dia.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Mike Rapoport <rppt@...ux.ibm.com>,
Yang Shi <shy828301@...il.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Matthew Wilcox <willy@...radead.org>,
Vlastimil Babka <vbabka@...e.cz>, Jann Horn <jannh@...gle.com>,
Michal Hocko <mhocko@...nel.org>,
Nadav Amit <namit@...are.com>, Rik van Riel <riel@...riel.com>,
Roman Gushchin <guro@...com>,
Andrea Arcangeli <aarcange@...hat.com>,
Peter Xu <peterx@...hat.com>,
Donald Dutile <ddutile@...hat.com>,
Christoph Hellwig <hch@....de>,
Oleg Nesterov <oleg@...hat.com>, Jan Kara <jack@...e.cz>,
linux-mm@...ck.org, linux-kselftest@...r.kernel.org,
linux-doc@...r.kernel.org, David Hildenbrand <david@...hat.com>
Subject: [PATCH v1 10/11] mm: thp: introduce and use page_trans_huge_anon_shared()
Let's add an optimized way to check "page_trans_huge_mapcount() > 1"
that is allowed to break the loop early.
This commit is based on a prototype patch by Andrea.
Co-developed-by: Andrea Arcangeli <aarcange@...hat.com>
Signed-off-by: Andrea Arcangeli <aarcange@...hat.com>
Reviewed-by: Peter Xu <peterx@...hat.com>
Signed-off-by: David Hildenbrand <david@...hat.com>
---
include/linux/huge_mm.h | 7 +++++++
mm/gup.c | 2 +-
mm/huge_memory.c | 34 ++++++++++++++++++++++++++++++++++
3 files changed, 42 insertions(+), 1 deletion(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 44e02d47c65a..3a9d8cf64219 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -361,6 +361,8 @@ static inline void thp_mapcount_unlock(struct page *page,
local_irq_restore(irq_flags);
}
+extern bool page_trans_huge_anon_shared(struct page *page);
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -532,6 +534,11 @@ static inline void thp_mapcount_unlock(struct page *page,
{
}
+static inline bool page_trans_huge_anon_shared(struct page *page)
+{
+ return false;
+}
+
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/**
diff --git a/mm/gup.c b/mm/gup.c
index 35d1b28e3829..496575ff9ac8 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -70,7 +70,7 @@ bool gup_must_unshare(unsigned int flags, struct page *page, bool is_head)
return __page_mapcount(page) > 1;
if (is_head) {
VM_BUG_ON(!PageTransHuge(page));
- return page_trans_huge_mapcount(page, NULL) > 1;
+ return page_trans_huge_anon_shared(page);
}
return page_mapcount(page) > 1;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 57842e8b13d4..dced82274f1d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1281,6 +1281,40 @@ void huge_pmd_set_accessed(struct vm_fault *vmf)
spin_unlock(vmf->ptl);
}
+
+static bool __page_trans_huge_anon_shared(struct page *page)
+{
+ int i, mapcount;
+
+ mapcount = head_compound_mapcount(page);
+ if (mapcount > 1)
+ return true;
+ if (PageDoubleMap(page))
+ mapcount -= 1;
+ for (i = 0; i < thp_nr_pages(page); i++) {
+ if (atomic_read(&page[i]._mapcount) + mapcount + 1 > 1)
+ return true;
+ }
+ return false;
+}
+
+/* A lightweight check corresponding to "page_trans_huge_mapcount() > 1". */
+bool page_trans_huge_anon_shared(struct page *page)
+{
+ unsigned int seqcount;
+ bool shared;
+
+ VM_BUG_ON_PAGE(PageHuge(page) || PageTail(page), page);
+ VM_BUG_ON_PAGE(!PageAnon(page) || !PageTransHuge(page), page);
+
+ do {
+ seqcount = thp_mapcount_read_begin(page);
+ shared = __page_trans_huge_anon_shared(page);
+ } while (thp_mapcount_read_retry(page, seqcount));
+
+ return shared;
+}
+
vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
--
2.31.1
Powered by blists - more mailing lists