[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <15efa3c57935f7f8894094b94c1803c2f322c511.1752770784.git.lorenzo.stoakes@oracle.com>
Date: Thu, 17 Jul 2025 17:55:53 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Peter Xu <peterx@...hat.com>, Alexander Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>, Jan Kara <jack@...e.cz>,
"Liam R . Howlett" <Liam.Howlett@...cle.com>,
Vlastimil Babka <vbabka@...e.cz>, Jann Horn <jannh@...gle.com>,
Pedro Falcato <pfalcato@...e.de>, Rik van Riel <riel@...riel.com>,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-api@...r.kernel.org
Subject: [PATCH v4 03/10] mm/mremap: put VMA check and prep logic into helper function
Rather than lumping everything together in do_mremap(), add a new helper
function, check_prep_vma(), to do the work relating to each VMA.
This further lays groundwork for subsequent patches which will allow for
batched VMA mremap().
Additionally, if we set vrm->new_addr == vrm->addr when prepping the VMA,
this avoids us needing to do so in the expand VMA mlocked case.
No functional change intended.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
Reviewed-by: Vlastimil Babka <vbabka@...e.cz>
---
mm/mremap.c | 58 ++++++++++++++++++++++++++---------------------------
1 file changed, 28 insertions(+), 30 deletions(-)
diff --git a/mm/mremap.c b/mm/mremap.c
index 9ce20c238ffd..60eb0ac8634b 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -1634,7 +1634,6 @@ static bool align_hugetlb(struct vma_remap_struct *vrm)
static unsigned long expand_vma(struct vma_remap_struct *vrm)
{
unsigned long err;
- unsigned long addr = vrm->addr;
err = remap_is_valid(vrm);
if (err)
@@ -1649,16 +1648,8 @@ static unsigned long expand_vma(struct vma_remap_struct *vrm)
if (err)
return err;
- /*
- * We want to populate the newly expanded portion of the VMA to
- * satisfy the expectation that mlock()'ing a VMA maintains all
- * of its pages in memory.
- */
- if (vrm->mlocked)
- vrm->new_addr = addr;
-
/* OK we're done! */
- return addr;
+ return vrm->addr;
}
/*
@@ -1714,10 +1705,33 @@ static unsigned long mremap_at(struct vma_remap_struct *vrm)
return -EINVAL;
}
+static int check_prep_vma(struct vma_remap_struct *vrm)
+{
+ struct vm_area_struct *vma = vrm->vma;
+
+ if (!vma)
+ return -EFAULT;
+
+ /* If mseal()'d, mremap() is prohibited. */
+ if (!can_modify_vma(vma))
+ return -EPERM;
+
+ /* Align to hugetlb page size, if required. */
+ if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm))
+ return -EINVAL;
+
+ vrm_set_delta(vrm);
+ vrm->remap_type = vrm_remap_type(vrm);
+ /* For convenience, we set new_addr even if VMA won't move. */
+ if (!vrm_implies_new_addr(vrm))
+ vrm->new_addr = vrm->addr;
+
+ return 0;
+}
+
static unsigned long do_mremap(struct vma_remap_struct *vrm)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
unsigned long res;
vrm->old_len = PAGE_ALIGN(vrm->old_len);
@@ -1731,26 +1745,10 @@ static unsigned long do_mremap(struct vma_remap_struct *vrm)
return -EINTR;
vrm->mmap_locked = true;
- vma = vrm->vma = vma_lookup(mm, vrm->addr);
- if (!vma) {
- res = -EFAULT;
- goto out;
- }
-
- /* If mseal()'d, mremap() is prohibited. */
- if (!can_modify_vma(vma)) {
- res = -EPERM;
- goto out;
- }
-
- /* Align to hugetlb page size, if required. */
- if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm)) {
- res = -EINVAL;
+ vrm->vma = vma_lookup(current->mm, vrm->addr);
+ res = check_prep_vma(vrm);
+ if (res)
goto out;
- }
-
- vrm_set_delta(vrm);
- vrm->remap_type = vrm_remap_type(vrm);
/* Actually execute mremap. */
res = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm);
--
2.50.1
Powered by blists - more mailing lists