[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20240723173719.e0894a5855e1b46d95cbc3cd@linux-foundation.org>
Date: Tue, 23 Jul 2024 17:37:19 -0700
From: Andrew Morton <akpm@...ux-foundation.org>
To: Oscar Salvador <osalvador@...e.de>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org, Peter Xu
<peterx@...hat.com>, Muchun Song <muchun.song@...ux.dev>, Michal Hocko
<mhocko@...e.com>, Donet Tom <donettom@...ux.ibm.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>, Matthew Wilcox
<willy@...radead.org>, Vlastimil Babka <vbabka@...e.cz>
Subject: Re: [PATCH 7/9] mm: Drop hugetlb_get_unmapped_area{_*} functions
On Thu, 18 Jul 2024 12:59:01 +0200 Oscar Salvador <osalvador@...e.de> wrote:
> Hugetlb mappings are now handled through normal channels just like any
> other mapping, so we no longer need hugetlb_get_unmapped_area* specific
> functions.
>
> ...
>
> fs/hugetlbfs/inode.c | 86 ------------------------
The fs/hugetlbfs/inode.c code which this patch removes is quite
different from the code which is currently present.
The concern is that important recent changes to this code were not
carried over into the code which replaced it ("normal channels"). And
that this patchset hasn't been tested against latest trees.
So can we please have a redo?
(Here's my diff of what-is-there versus what-this-patch-removes)
--- /tmp/1 2024-07-23 17:29:43.704770012 -0700
+++ /tmp/2 2024-07-23 17:29:42.232748301 -0700
@@ -1,29 +1,28 @@
-static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags)
+static unsigned long
+hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info = {};
info.length = len;
info.low_limit = current->mm->mmap_base;
- info.high_limit = TASK_SIZE;
+ info.high_limit = arch_get_mmap_end(addr, len, flags);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
return vm_unmapped_area(&info);
}
-static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
- unsigned long addr0, unsigned long len,
- unsigned long pgoff, unsigned long flags)
+static unsigned long
+hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info = {};
- unsigned long addr;
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
- info.high_limit = current->mm->mmap_base;
+ info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
addr = vm_unmapped_area(&info);
@@ -33,53 +32,56 @@
* can happen with large stack limits and large mmap()
* allocations.
*/
- if (addr & ~PAGE_MASK) {
+ if (unlikely(offset_in_page(addr))) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
- info.low_limit = TASK_UNMAPPED_BASE;
- info.high_limit = TASK_SIZE;
+ info.low_limit = current->mm->mmap_base;
+ info.high_limit = arch_get_mmap_end(addr, len, flags);
addr = vm_unmapped_area(&info);
}
return addr;
}
-unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+unsigned long
+generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
{
- struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
+ struct hstate *h = hstate_file(file);
+ const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
if (len & ~huge_page_mask(h))
return -EINVAL;
- if (len > TASK_SIZE - mmap_min_addr)
+ if (len > mmap_end - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
- goto check_asce_limit;
+ return addr;
}
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
- goto check_asce_limit;
+ vma = find_vma_prev(mm, addr, &prev);
+ if (mmap_end - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
+ return addr;
}
- if (!test_bit(MMF_TOPDOWN, &mm->flags))
- addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
- pgoff, flags);
- else
- addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
+ /*
+ * Use MMF_TOPDOWN flag as a hint to use topdown routine.
+ * If architectures have special needs, they should define their own
+ * version of hugetlb_get_unmapped_area.
+ */
+ if (test_bit(MMF_TOPDOWN, &mm->flags))
+ return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
- if (offset_in_page(addr))
- return addr;
-
-check_asce_limit:
- return check_asce_limit(mm, addr, len);
+ return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+ pgoff, flags);
}
Powered by blists - more mailing lists