[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a5dce57-6dfa-5559-4698-e817eb2f993@google.com>
Date: Tue, 11 Jul 2023 21:43:36 -0700 (PDT)
From: Hugh Dickins <hughd@...gle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
cc: Mike Kravetz <mike.kravetz@...cle.com>,
Mike Rapoport <rppt@...nel.org>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Matthew Wilcox <willy@...radead.org>,
David Hildenbrand <david@...hat.com>,
Suren Baghdasaryan <surenb@...gle.com>,
Qi Zheng <zhengqi.arch@...edance.com>,
Yang Shi <shy828301@...il.com>,
Mel Gorman <mgorman@...hsingularity.net>,
Peter Xu <peterx@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Will Deacon <will@...nel.org>, Yu Zhao <yuzhao@...gle.com>,
Alistair Popple <apopple@...dia.com>,
Ralph Campbell <rcampbell@...dia.com>,
Ira Weiny <ira.weiny@...el.com>,
Steven Price <steven.price@....com>,
SeongJae Park <sj@...nel.org>,
Lorenzo Stoakes <lstoakes@...il.com>,
Huang Ying <ying.huang@...el.com>,
Naoya Horiguchi <naoya.horiguchi@....com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Zack Rusin <zackr@...are.com>, Jason Gunthorpe <jgg@...pe.ca>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Anshuman Khandual <anshuman.khandual@....com>,
Pasha Tatashin <pasha.tatashin@...een.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Minchan Kim <minchan@...nel.org>,
Christoph Hellwig <hch@...radead.org>,
Song Liu <song@...nel.org>,
Thomas Hellstrom <thomas.hellstrom@...ux.intel.com>,
Russell King <linux@...linux.org.uk>,
"David S. Miller" <davem@...emloft.net>,
Michael Ellerman <mpe@...erman.id.au>,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>,
Heiko Carstens <hca@...ux.ibm.com>,
Christian Borntraeger <borntraeger@...ux.ibm.com>,
Claudio Imbrenda <imbrenda@...ux.ibm.com>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Gerald Schaefer <gerald.schaefer@...ux.ibm.com>,
Vasily Gorbik <gor@...ux.ibm.com>,
Jann Horn <jannh@...gle.com>,
Vishal Moola <vishal.moola@...il.com>,
Vlastimil Babka <vbabka@...e.cz>, Zi Yan <ziy@...dia.com>,
linux-arm-kernel@...ts.infradead.org, sparclinux@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org, linux-s390@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: [PATCH v3 11/13] mm/khugepaged: delete
khugepaged_collapse_pte_mapped_thps()
Now that retract_page_tables() can retract page tables reliably, without
depending on trylocks, delete all the apparatus for khugepaged to try
again later: khugepaged_collapse_pte_mapped_thps() etc; and free up the
per-mm memory which was set aside for that in the khugepaged_mm_slot.
But one part of that is worth keeping: when hpage_collapse_scan_file()
found SCAN_PTE_MAPPED_HUGEPAGE, that address was noted in the mm_slot
to be tried for retraction later - catching, for example, page tables
where a reversible mprotect() of a portion had required splitting the
pmd, but now it can be recollapsed. Call collapse_pte_mapped_thp()
directly in this case (why was it deferred before? I assume an issue
with needing mmap_lock for write, but now it's only needed for read).
Signed-off-by: Hugh Dickins <hughd@...gle.com>
---
mm/khugepaged.c | 125 +++++++-------------------------------------------
1 file changed, 16 insertions(+), 109 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 46986eb4eebb..7c7aaddbe130 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -92,8 +92,6 @@ static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
static struct kmem_cache *mm_slot_cache __read_mostly;
-#define MAX_PTE_MAPPED_THP 8
-
struct collapse_control {
bool is_khugepaged;
@@ -107,15 +105,9 @@ struct collapse_control {
/**
* struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
* @slot: hash lookup from mm to mm_slot
- * @nr_pte_mapped_thp: number of pte mapped THP
- * @pte_mapped_thp: address array corresponding pte mapped THP
*/
struct khugepaged_mm_slot {
struct mm_slot slot;
-
- /* pte-mapped THP in this mm */
- int nr_pte_mapped_thp;
- unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
};
/**
@@ -1439,50 +1431,6 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
}
#ifdef CONFIG_SHMEM
-/*
- * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
- * khugepaged should try to collapse the page table.
- *
- * Note that following race exists:
- * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
- * emptying the A's ->pte_mapped_thp[] array.
- * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
- * retract_page_tables() finds a VMA in mm_struct A mapping the same extent
- * (at virtual address X) and adds an entry (for X) into mm_struct A's
- * ->pte-mapped_thp[] array.
- * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
- * sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
- * (for X) into mm_struct A's ->pte-mapped_thp[] array.
- * Thus, it's possible the same address is added multiple times for the same
- * mm_struct. Should this happen, we'll simply attempt
- * collapse_pte_mapped_thp() multiple times for the same address, under the same
- * exclusive mmap_lock, and assuming the first call is successful, subsequent
- * attempts will return quickly (without grabbing any additional locks) when
- * a huge pmd is found in find_pmd_or_thp_or_none(). Since this is a cheap
- * check, and since this is a rare occurrence, the cost of preventing this
- * "multiple-add" is thought to be more expensive than just handling it, should
- * it occur.
- */
-static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
-{
- struct khugepaged_mm_slot *mm_slot;
- struct mm_slot *slot;
- bool ret = false;
-
- VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
-
- spin_lock(&khugepaged_mm_lock);
- slot = mm_slot_lookup(mm_slots_hash, mm);
- mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
- if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
- mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
- ret = true;
- }
- spin_unlock(&khugepaged_mm_lock);
- return ret;
-}
-
/* hpage must be locked, and mmap_lock must be held */
static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmdp, struct page *hpage)
@@ -1706,29 +1654,6 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
return result;
}
-static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
-{
- struct mm_slot *slot = &mm_slot->slot;
- struct mm_struct *mm = slot->mm;
- int i;
-
- if (likely(mm_slot->nr_pte_mapped_thp == 0))
- return;
-
- if (!mmap_write_trylock(mm))
- return;
-
- if (unlikely(hpage_collapse_test_exit(mm)))
- goto out;
-
- for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
- collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
-
-out:
- mm_slot->nr_pte_mapped_thp = 0;
- mmap_write_unlock(mm);
-}
-
static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
{
struct vm_area_struct *vma;
@@ -2370,16 +2295,6 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
{
BUILD_BUG();
}
-
-static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
-{
-}
-
-static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
-{
- return false;
-}
#endif
static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
@@ -2409,7 +2324,6 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
khugepaged_scan.mm_slot = mm_slot;
}
spin_unlock(&khugepaged_mm_lock);
- khugepaged_collapse_pte_mapped_thps(mm_slot);
mm = slot->mm;
/*
@@ -2462,36 +2376,29 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
khugepaged_scan.address);
mmap_read_unlock(mm);
- *result = hpage_collapse_scan_file(mm,
- khugepaged_scan.address,
- file, pgoff, cc);
mmap_locked = false;
+ *result = hpage_collapse_scan_file(mm,
+ khugepaged_scan.address, file, pgoff, cc);
+ if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
+ mmap_read_lock(mm);
+ mmap_locked = true;
+ if (hpage_collapse_test_exit(mm)) {
+ fput(file);
+ goto breakouterloop;
+ }
+ *result = collapse_pte_mapped_thp(mm,
+ khugepaged_scan.address, false);
+ if (*result == SCAN_PMD_MAPPED)
+ *result = SCAN_SUCCEED;
+ }
fput(file);
} else {
*result = hpage_collapse_scan_pmd(mm, vma,
- khugepaged_scan.address,
- &mmap_locked,
- cc);
+ khugepaged_scan.address, &mmap_locked, cc);
}
- switch (*result) {
- case SCAN_PTE_MAPPED_HUGEPAGE: {
- pmd_t *pmd;
- *result = find_pmd_or_thp_or_none(mm,
- khugepaged_scan.address,
- &pmd);
- if (*result != SCAN_SUCCEED)
- break;
- if (!khugepaged_add_pte_mapped_thp(mm,
- khugepaged_scan.address))
- break;
- } fallthrough;
- case SCAN_SUCCEED:
+ if (*result == SCAN_SUCCEED)
++khugepaged_pages_collapsed;
- break;
- default:
- break;
- }
/* move to next address */
khugepaged_scan.address += HPAGE_PMD_SIZE;
--
2.35.3
Powered by blists - more mailing lists