[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230105101844.1893104-8-jthoughton@google.com>
Date: Thu, 5 Jan 2023 10:18:05 +0000
From: James Houghton <jthoughton@...gle.com>
To: Mike Kravetz <mike.kravetz@...cle.com>,
Muchun Song <songmuchun@...edance.com>,
Peter Xu <peterx@...hat.com>
Cc: David Hildenbrand <david@...hat.com>,
David Rientjes <rientjes@...gle.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Mina Almasry <almasrymina@...gle.com>,
"Zach O'Keefe" <zokeefe@...gle.com>,
Manish Mishra <manish.mishra@...anix.com>,
Naoya Horiguchi <naoya.horiguchi@....com>,
"Dr . David Alan Gilbert" <dgilbert@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Vlastimil Babka <vbabka@...e.cz>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Yang Shi <shy828301@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
James Houghton <jthoughton@...gle.com>
Subject: [PATCH 07/46] hugetlb: rename __vma_shareable_flags_pmd to __vma_has_hugetlb_vma_lock
Previously, if the hugetlb VMA lock was present, that meant that the VMA
was PMD-shareable. Now it is possible that the VMA lock is allocated but
the VMA is not PMD-shareable: if the VMA is a high-granularity VMA.
It is possible for a high-granularity VMA not to have a VMA lock; in
this case, MADV_COLLAPSE will not be able to collapse the mappings.
Signed-off-by: James Houghton <jthoughton@...gle.com>
---
include/linux/hugetlb.h | 15 ++++++++++-----
mm/hugetlb.c | 16 ++++++++--------
2 files changed, 18 insertions(+), 13 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b6b10101bea7..aa49fd8cb47c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -1235,7 +1235,8 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
#endif
-static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
+static inline bool
+__vma_has_hugetlb_vma_lock(struct vm_area_struct *vma)
{
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
}
@@ -1252,13 +1253,17 @@ hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
/*
- * If pmd sharing possible, locking needed to safely walk the
- * hugetlb pgtables. More information can be found at the comment
- * above huge_pte_offset() in the same file.
+ * If the VMA has the hugetlb vma lock (PMD sharable or HGM
+ * collapsible), locking needed to safely walk the hugetlb pgtables.
+ * More information can be found at the comment above huge_pte_offset()
+ * in the same file.
+ *
+ * This doesn't do a full high-granularity walk, so we are concerned
+ * only with PMD unsharing.
*
* NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
*/
- if (__vma_shareable_lock(vma))
+ if (__vma_has_hugetlb_vma_lock(vma))
WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
!lockdep_is_held(
&vma->vm_file->f_mapping->i_mmap_rwsem));
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 99fadd7680ec..2f86fedef283 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -260,7 +260,7 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
*/
void hugetlb_vma_lock_read(struct vm_area_struct *vma)
{
- if (__vma_shareable_lock(vma)) {
+ if (__vma_has_hugetlb_vma_lock(vma)) {
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
down_read(&vma_lock->rw_sema);
@@ -269,7 +269,7 @@ void hugetlb_vma_lock_read(struct vm_area_struct *vma)
void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
{
- if (__vma_shareable_lock(vma)) {
+ if (__vma_has_hugetlb_vma_lock(vma)) {
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
up_read(&vma_lock->rw_sema);
@@ -278,7 +278,7 @@ void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
void hugetlb_vma_lock_write(struct vm_area_struct *vma)
{
- if (__vma_shareable_lock(vma)) {
+ if (__vma_has_hugetlb_vma_lock(vma)) {
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
down_write(&vma_lock->rw_sema);
@@ -287,7 +287,7 @@ void hugetlb_vma_lock_write(struct vm_area_struct *vma)
void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
{
- if (__vma_shareable_lock(vma)) {
+ if (__vma_has_hugetlb_vma_lock(vma)) {
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
up_write(&vma_lock->rw_sema);
@@ -298,7 +298,7 @@ int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
{
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
- if (!__vma_shareable_lock(vma))
+ if (!__vma_has_hugetlb_vma_lock(vma))
return 1;
return down_write_trylock(&vma_lock->rw_sema);
@@ -306,7 +306,7 @@ int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
{
- if (__vma_shareable_lock(vma)) {
+ if (__vma_has_hugetlb_vma_lock(vma)) {
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
lockdep_assert_held(&vma_lock->rw_sema);
@@ -338,7 +338,7 @@ static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
{
- if (__vma_shareable_lock(vma)) {
+ if (__vma_has_hugetlb_vma_lock(vma)) {
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
__hugetlb_vma_unlock_write_put(vma_lock);
@@ -350,7 +350,7 @@ static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
/*
* Only present in sharable vmas.
*/
- if (!vma || !__vma_shareable_lock(vma))
+ if (!vma || !__vma_has_hugetlb_vma_lock(vma))
return;
if (vma->vm_private_data) {
--
2.39.0.314.g84b9a713c41-goog
Powered by blists - more mailing lists