lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1638336766.91gluy2ru8.astroid@bobo.none>
Date:   Wed, 01 Dec 2021 15:43:06 +1000
From:   Nicholas Piggin <npiggin@...il.com>
To:     alex@...ti.fr, Benjamin Herrenschmidt <benh@...nel.crashing.org>,
        Christophe Leroy <christophe.leroy@...roup.eu>,
        Michael Ellerman <mpe@...erman.id.au>,
        Paul Mackerras <paulus@...ba.org>
Cc:     linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        linuxppc-dev@...ts.ozlabs.org
Subject: Re: [PATCH v2 rebased 2/9] powerpc/mm: Move vma_mmu_pagesize() and
 hugetlb_get_unmapped_area() to slice.c

Excerpts from Christophe Leroy's message of November 26, 2021 3:52 am:
> vma_mmu_pagesize() is only required for slices,
> otherwise there is a generic weak version.

Fine.

> hugetlb_get_unmapped_area() is dedicated to slices.
> radix__hugetlb_get_unmapped_area() as well.

Hmm, that's interesting. It would be nice if we could just use the 
generic code version for it but that will require some hacking.

This means my patch series changes behaviour (as in the FIXME) when 
HASH=n I think? I will have to fix that somehow.

Thanks,
Nick

> 
> Move them to slice.c
> 
> Signed-off-by: Christophe Leroy <christophe.leroy@...roup.eu>
> ---
>  arch/powerpc/include/asm/book3s/64/hugetlb.h |  4 --
>  arch/powerpc/mm/book3s64/radix_hugetlbpage.c | 55 --------------
>  arch/powerpc/mm/book3s64/slice.c             | 76 ++++++++++++++++++++
>  arch/powerpc/mm/hugetlbpage.c                | 28 --------
>  4 files changed, 76 insertions(+), 87 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
> index 12e150e615b7..b37a28f62cf6 100644
> --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
> +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
> @@ -8,10 +8,6 @@
>   */
>  void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
>  void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
> -extern unsigned long
> -radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
> -				unsigned long len, unsigned long pgoff,
> -				unsigned long flags);
>  
>  extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
>  						unsigned long addr, pte_t *ptep,
> diff --git a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
> index 23d3e08911d3..d2fb776febb4 100644
> --- a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
> +++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
> @@ -41,61 +41,6 @@ void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long st
>  		radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
>  }
>  
> -/*
> - * A vairant of hugetlb_get_unmapped_area doing topdown search
> - * FIXME!! should we do as x86 does or non hugetlb area does ?
> - * ie, use topdown or not based on mmap_is_legacy check ?
> - */
> -unsigned long
> -radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
> -				unsigned long len, unsigned long pgoff,
> -				unsigned long flags)
> -{
> -	struct mm_struct *mm = current->mm;
> -	struct vm_area_struct *vma;
> -	struct hstate *h = hstate_file(file);
> -	int fixed = (flags & MAP_FIXED);
> -	unsigned long high_limit;
> -	struct vm_unmapped_area_info info;
> -
> -	high_limit = DEFAULT_MAP_WINDOW;
> -	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
> -		high_limit = TASK_SIZE;
> -
> -	if (len & ~huge_page_mask(h))
> -		return -EINVAL;
> -	if (len > high_limit)
> -		return -ENOMEM;
> -
> -	if (fixed) {
> -		if (addr > high_limit - len)
> -			return -ENOMEM;
> -		if (prepare_hugepage_range(file, addr, len))
> -			return -EINVAL;
> -		return addr;
> -	}
> -
> -	if (addr) {
> -		addr = ALIGN(addr, huge_page_size(h));
> -		vma = find_vma(mm, addr);
> -		if (high_limit - len >= addr && addr >= mmap_min_addr &&
> -		    (!vma || addr + len <= vm_start_gap(vma)))
> -			return addr;
> -	}
> -	/*
> -	 * We are always doing an topdown search here. Slice code
> -	 * does that too.
> -	 */
> -	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
> -	info.length = len;
> -	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
> -	info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
> -	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
> -	info.align_offset = 0;
> -
> -	return vm_unmapped_area(&info);
> -}
> -
>  void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
>  					 unsigned long addr, pte_t *ptep,
>  					 pte_t old_pte, pte_t pte)
> diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
> index c83be371c6e7..4c3e9601fdf6 100644
> --- a/arch/powerpc/mm/book3s64/slice.c
> +++ b/arch/powerpc/mm/book3s64/slice.c
> @@ -777,4 +777,80 @@ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
>  
>  	return !slice_check_range_fits(mm, maskp, addr, len);
>  }
> +
> +unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
> +{
> +	/* With radix we don't use slice, so derive it from vma*/
> +	if (radix_enabled())
> +		return vma_kernel_pagesize(vma);
> +
> +	return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start));
> +}
> +
> +/*
> + * A variant of hugetlb_get_unmapped_area() doing topdown search
> + * FIXME!! should we do as x86 does or non hugetlb area does ?
> + * ie, use topdown or not based on mmap_is_legacy check ?
> + */
> +static unsigned long
> +radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
> +				 unsigned long pgoff, unsigned long flags)
> +{
> +	struct mm_struct *mm = current->mm;
> +	struct vm_area_struct *vma;
> +	struct hstate *h = hstate_file(file);
> +	int fixed = (flags & MAP_FIXED);
> +	unsigned long high_limit;
> +	struct vm_unmapped_area_info info;
> +
> +	high_limit = DEFAULT_MAP_WINDOW;
> +	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
> +		high_limit = TASK_SIZE;
> +
> +	if (len & ~huge_page_mask(h))
> +		return -EINVAL;
> +	if (len > high_limit)
> +		return -ENOMEM;
> +
> +	if (fixed) {
> +		if (addr > high_limit - len)
> +			return -ENOMEM;
> +		if (prepare_hugepage_range(file, addr, len))
> +			return -EINVAL;
> +		return addr;
> +	}
> +
> +	if (addr) {
> +		addr = ALIGN(addr, huge_page_size(h));
> +		vma = find_vma(mm, addr);
> +		if (high_limit - len >= addr && addr >= mmap_min_addr &&
> +		    (!vma || addr + len <= vm_start_gap(vma)))
> +			return addr;
> +	}
> +	/*
> +	 * We are always doing an topdown search here. Slice code
> +	 * does that too.
> +	 */
> +	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
> +	info.length = len;
> +	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
> +	info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
> +	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
> +	info.align_offset = 0;
> +
> +	return vm_unmapped_area(&info);
> +}
> +
> +unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
> +					unsigned long len, unsigned long pgoff,
> +					unsigned long flags)
> +{
> +	struct hstate *hstate = hstate_file(file);
> +	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
> +
> +	if (radix_enabled())
> +		return radix__hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
> +
> +	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
> +}
>  #endif
> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
> index 82d8b368ca6d..eb9de09e49a3 100644
> --- a/arch/powerpc/mm/hugetlbpage.c
> +++ b/arch/powerpc/mm/hugetlbpage.c
> @@ -542,34 +542,6 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
>  	return page;
>  }
>  
> -#ifdef CONFIG_PPC_MM_SLICES
> -unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
> -					unsigned long len, unsigned long pgoff,
> -					unsigned long flags)
> -{
> -	struct hstate *hstate = hstate_file(file);
> -	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
> -
> -#ifdef CONFIG_PPC_RADIX_MMU
> -	if (radix_enabled())
> -		return radix__hugetlb_get_unmapped_area(file, addr, len,
> -						       pgoff, flags);
> -#endif
> -	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
> -}
> -#endif
> -
> -unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
> -{
> -	/* With radix we don't use slice, so derive it from vma*/
> -	if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
> -		unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
> -
> -		return 1UL << mmu_psize_to_shift(psize);
> -	}
> -	return vma_kernel_pagesize(vma);
> -}
> -
>  bool __init arch_hugetlb_valid_size(unsigned long size)
>  {
>  	int shift = __ffs(size);
> -- 
> 2.33.1
> 
> 
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ