lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <B06C1160-D40B-4D38-8ECF-F8BDE80F6DC0@oracle.com>
Date:   Tue, 26 May 2020 16:05:58 -0600
From:   William Kucharski <william.kucharski@...cle.com>
To:     Matthew Wilcox <willy@...radead.org>
Cc:     linux-fsdevel@...r.kernel.org, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 36/36] mm: Align THP mappings for non-DAX

Thinking about this, if the intent is to make THP usable for any
greater than PAGESIZE page size, this routine should probably go back
to taking a size or perhaps order parameter so it could be called to
align addresses accordingly rather than hard code PMD_SIZE.


> On May 15, 2020, at 7:16 AM, Matthew Wilcox <willy@...radead.org> wrote:
> 
> From: William Kucharski <william.kucharski@...cle.com>
> 
> When we have the opportunity to use transparent huge pages to map a
> file, we want to follow the same rules as DAX.
> 
> Signed-off-by: William Kucharski <william.kucharski@...cle.com>
> [Inline __thp_get_unmapped_area() into thp_get_unmapped_area()]
> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
> ---
> mm/huge_memory.c | 40 +++++++++++++---------------------------
> 1 file changed, 13 insertions(+), 27 deletions(-)
> 
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 15a86b06befc..e78686b628ae 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -535,30 +535,30 @@ bool is_transparent_hugepage(struct page *page)
> }
> EXPORT_SYMBOL_GPL(is_transparent_hugepage);
> 
> -static unsigned long __thp_get_unmapped_area(struct file *filp,
> -		unsigned long addr, unsigned long len,
> -		loff_t off, unsigned long flags, unsigned long size)
> +unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
> +		unsigned long len, unsigned long pgoff, unsigned long flags)
> {
> +	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
> 	loff_t off_end = off + len;
> -	loff_t off_align = round_up(off, size);
> +	loff_t off_align = round_up(off, PMD_SIZE);
> 	unsigned long len_pad, ret;
> 
> -	if (off_end <= off_align || (off_end - off_align) < size)
> -		return 0;
> +	if (off_end <= off_align || (off_end - off_align) < PMD_SIZE)
> +		goto regular;
> 
> -	len_pad = len + size;
> +	len_pad = len + PMD_SIZE;
> 	if (len_pad < len || (off + len_pad) < off)
> -		return 0;
> +		goto regular;
> 
> 	ret = current->mm->get_unmapped_area(filp, addr, len_pad,
> 					      off >> PAGE_SHIFT, flags);
> 
> 	/*
> -	 * The failure might be due to length padding. The caller will retry
> -	 * without the padding.
> +	 * The failure might be due to length padding.  Retry without
> +	 * the padding.
> 	 */
> 	if (IS_ERR_VALUE(ret))
> -		return 0;
> +		goto regular;
> 
> 	/*
> 	 * Do not try to align to THP boundary if allocation at the address
> @@ -567,23 +567,9 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
> 	if (ret == addr)
> 		return addr;
> 
> -	ret += (off - ret) & (size - 1);
> +	ret += (off - ret) & (PMD_SIZE - 1);
> 	return ret;
> -}
> -
> -unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
> -		unsigned long len, unsigned long pgoff, unsigned long flags)
> -{
> -	unsigned long ret;
> -	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
> -
> -	if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
> -		goto out;
> -
> -	ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
> -	if (ret)
> -		return ret;
> -out:
> +regular:
> 	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
> }
> EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
> -- 
> 2.26.2
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ