[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210114175934.13070-7-will@kernel.org>
Date: Thu, 14 Jan 2021 17:59:32 +0000
From: Will Deacon <will@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, linux-arm-kernel@...ts.infradead.org,
Will Deacon <will@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
Jan Kara <jack@...e.cz>, Minchan Kim <minchan@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Vinayak Menon <vinmenon@...eaurora.org>,
Hugh Dickins <hughd@...gle.com>, kernel-team@...roid.com
Subject: [RFC PATCH 6/8] mm: Avoid modifying vmf.info.address in __collapse_huge_page_swapin()
In preparation for const-ifying the 'info' field of 'struct vm_fault',
rework __collapse_huge_page_swapin() to avoid continously updating
vmf.info.address and instead populate a new 'struct vm_fault' on the
stack for each page being processed.
Cc: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Signed-off-by: Will Deacon <will@...nel.org>
---
mm/khugepaged.c | 41 ++++++++++++++++++++---------------------
1 file changed, 20 insertions(+), 21 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 4494c90075fb..86c51a5d92d2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -991,40 +991,43 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
static bool __collapse_huge_page_swapin(struct mm_struct *mm,
struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmd,
+ unsigned long haddr, pmd_t *pmd,
int referenced)
{
int swapped_in = 0;
vm_fault_t ret = 0;
- struct vm_fault vmf = {
- .info = {
- .vma = vma,
- .address = address,
- .pgoff = linear_page_index(vma, address),
- },
- .flags = FAULT_FLAG_ALLOW_RETRY,
- .pmd = pmd,
- };
-
- vmf.pte = pte_offset_map(pmd, address);
- for (; vmf.info.address < address + HPAGE_PMD_NR*PAGE_SIZE;
- vmf.pte++, vmf.info.address += PAGE_SIZE) {
+ unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
+
+ for (address = haddr; address < end; address += PAGE_SIZE) {
+ struct vm_fault vmf = {
+ .info = {
+ .vma = vma,
+ .address = address,
+ .pgoff = linear_page_index(vma, haddr),
+ },
+ .flags = FAULT_FLAG_ALLOW_RETRY,
+ .pmd = pmd,
+ };
+
+ vmf.pte = pte_offset_map(pmd, address);
vmf.orig_pte = *vmf.pte;
- if (!is_swap_pte(vmf.orig_pte))
+ if (!is_swap_pte(vmf.orig_pte)) {
+ pte_unmap(vmf.pte);
continue;
+ }
swapped_in++;
ret = do_swap_page(&vmf);
/* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
if (ret & VM_FAULT_RETRY) {
mmap_read_lock(mm);
- if (hugepage_vma_revalidate(mm, address, &vmf.info.vma)) {
+ if (hugepage_vma_revalidate(mm, haddr, &vma)) {
/* vma is no longer available, don't continue to swapin */
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
return false;
}
/* check if the pmd is still valid */
- if (mm_find_pmd(mm, address) != pmd) {
+ if (mm_find_pmd(mm, haddr) != pmd) {
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
return false;
}
@@ -1033,11 +1036,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
return false;
}
- /* pte is unmapped now, we need to map it */
- vmf.pte = pte_offset_map(pmd, vmf.info.address);
}
- vmf.pte--;
- pte_unmap(vmf.pte);
/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
if (swapped_in)
--
2.30.0.284.gd98b1dd5eaa7-goog
Powered by blists - more mailing lists