[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260115182720.1691130-6-Liam.Howlett@oracle.com>
Date: Thu, 15 Jan 2026 13:27:15 -0500
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Suren Baghdasaryan <surenb@...gle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Pedro Falcato <pfalcato@...e.de>, David Hildenbrand <david@...hat.com>,
Vlastimil Babka <vbabka@...e.cz>, Michal Hocko <mhocko@...e.com>,
Jann Horn <jannh@...gle.com>, shikemeng@...weicloud.com,
kasong@...cent.com, nphamcs@...il.com, bhe@...hat.com,
baohua@...nel.org, chrisl@...nel.org,
Matthew Wilcox <willy@...radead.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>
Subject: [PATCH v2 05/10] mm/vma: Add page table limit to unmap_region()
The unmap_region() calls need to pass through the page table limit for a
future patch.
No functional changes intended.
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
Reviewed-by: Pedro Falcato <pfalcato@...e.de>
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
mm/vma.c | 7 ++++---
mm/vma.h | 3 ++-
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/mm/vma.c b/mm/vma.c
index b2b9e7b3284f3..b92383e5eebd1 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -474,7 +474,8 @@ void remove_vma(struct vm_area_struct *vma)
*/
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
unsigned long vma_start, unsigned long vma_end,
- struct vm_area_struct *prev, struct vm_area_struct *next)
+ unsigned long pg_max, struct vm_area_struct *prev,
+ struct vm_area_struct *next)
{
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb;
@@ -484,8 +485,7 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
unmap_vmas(&tlb, mas, vma, vma_start, vma_end, vma_end);
mas_set(mas, vma->vm_end);
free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
- next ? next->vm_start : USER_PGTABLES_CEILING,
- next ? next->vm_start : USER_PGTABLES_CEILING,
+ pg_max, next ? next->vm_start : USER_PGTABLES_CEILING,
/* mm_wr_locked = */ true);
tlb_finish_mmu(&tlb);
}
@@ -2469,6 +2469,7 @@ static int __mmap_new_file_vma(struct mmap_state *map,
vma_iter_set(vmi, vma->vm_end);
/* Undo any partial mapping done by a device driver. */
unmap_region(&vmi->mas, vma, vma->vm_start, vma->vm_end,
+ map->next ? map->next->vm_start : USER_PGTABLES_CEILING,
map->prev, map->next);
return error;
diff --git a/mm/vma.h b/mm/vma.h
index e671adced3a03..7c2c95fef240b 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -265,7 +265,8 @@ void remove_vma(struct vm_area_struct *vma);
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
unsigned long vma_start, unsigned long vma_end,
- struct vm_area_struct *prev, struct vm_area_struct *next);
+ unsigned long pg_max, struct vm_area_struct *prev,
+ struct vm_area_struct *next);
/**
* vma_modify_flags() - Perform any necessary split/merge in preparation for
--
2.47.3
Powered by blists - more mailing lists