[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210225072910.2811795-6-namit@vmware.com>
Date: Wed, 24 Feb 2021 23:29:09 -0800
From: Nadav Amit <nadav.amit@...il.com>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc: Hugh Dickins <hughd@...gle.com>, Andy Lutomirski <luto@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Nadav Amit <namit@...are.com>,
Sean Christopherson <seanjc@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>, x86@...nel.org
Subject: [RFC 5/6] mm: use lightweight reclaim on FAULT_FLAG_RETRY_NOWAIT
From: Nadav Amit <namit@...are.com>
When FAULT_FLAG_RETRY_NOWAIT is set, the caller arguably wants only a
lightweight reclaim to avoid a long reclamation, which would not respect
the "NOWAIT" semantic. Regard the request in swap and file-backed
page-faults accordingly during the first try.
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Sean Christopherson <seanjc@...gle.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: x86@...nel.org
Signed-off-by: Nadav Amit <namit@...are.com>
---
mm/memory.c | 32 ++++++++++++++++++++++----------
1 file changed, 22 insertions(+), 10 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 13b9cf36268f..70899c92a9e6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2679,18 +2679,31 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
return ret;
}
-static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
+static gfp_t massage_page_gfp_mask(gfp_t gfp_mask, unsigned long vmf_flags)
{
- struct file *vm_file = vma->vm_file;
+ if (fault_flag_allow_retry_first(vmf_flags) &&
+ (vmf_flags & FAULT_FLAG_RETRY_NOWAIT))
+ gfp_mask |= __GFP_NORETRY | __GFP_NOWARN;
- if (vm_file)
- return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
+ return gfp_mask;
+}
+
+static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma,
+ unsigned long flags)
+{
+ struct file *vm_file = vma->vm_file;
+ gfp_t gfp_mask;
/*
* Special mappings (e.g. VDSO) do not have any file so fake
* a default GFP_KERNEL for them.
*/
- return GFP_KERNEL;
+ if (!vm_file)
+ return GFP_KERNEL;
+
+ gfp_mask = mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
+
+ return massage_page_gfp_mask(gfp_mask, flags);
}
/*
@@ -3253,6 +3266,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
*/
vm_fault_t do_swap_page(struct vm_fault *vmf)
{
+ gfp_t gfp_mask = massage_page_gfp_mask(GFP_HIGHUSER_MOVABLE, vmf->flags);
struct vm_area_struct *vma = vmf->vma;
struct page *page = NULL, *swapcache;
swp_entry_t entry;
@@ -3293,8 +3307,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) {
/* skip swapcache */
- page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
- vmf->address);
+ page = alloc_page_vma(gfp_mask, vma, vmf->address);
if (page) {
int err;
@@ -3320,8 +3333,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
swap_readpage(page, true);
}
} else {
- page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
- vmf);
+ page = swapin_readahead(entry, gfp_mask, vmf);
swapcache = page;
}
@@ -4452,7 +4464,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
.address = address & PAGE_MASK,
.flags = flags,
.pgoff = linear_page_index(vma, address),
- .gfp_mask = __get_fault_gfp_mask(vma),
+ .gfp_mask = __get_fault_gfp_mask(vma, flags),
};
unsigned int dirty = flags & FAULT_FLAG_WRITE;
struct mm_struct *mm = vma->vm_mm;
--
2.25.1
Powered by blists - more mailing lists