lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 16 Sep 2021 17:16:29 +0100
From:   Catalin Marinas <catalin.marinas@....com>
To:     Anshuman Khandual <anshuman.khandual@....com>
Cc:     linux-arm-kernel@...ts.infradead.org, mark.rutland@....com,
        suzuki.poulose@....com, Will Deacon <will@...nel.org>,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH] arm64/mm: Add pud_sect_supported()

On Wed, Sep 15, 2021 at 09:14:19AM +0530, Anshuman Khandual wrote:
> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
> index 23505fc35324..641854f0e8ee 100644
> --- a/arch/arm64/mm/hugetlbpage.c
> +++ b/arch/arm64/mm/hugetlbpage.c
> @@ -40,11 +40,10 @@ void __init arm64_hugetlb_cma_reserve(void)
>  {
>  	int order;
>  
> -#ifdef CONFIG_ARM64_4K_PAGES
> -	order = PUD_SHIFT - PAGE_SHIFT;
> -#else
> -	order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
> -#endif
> +	if (pud_sect_supported())
> +		order = PUD_SHIFT - PAGE_SHIFT;
> +	else
> +		order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
>  	/*
>  	 * HugeTLB CMA reservation is required for gigantic
>  	 * huge pages which could not be allocated via the
> @@ -62,8 +61,9 @@ bool arch_hugetlb_migration_supported(struct hstate *h)
>  	size_t pagesize = huge_page_size(h);
>  
>  	switch (pagesize) {
> -#ifdef CONFIG_ARM64_4K_PAGES
> +#ifndef __PAGETABLE_PUD_FOLDED
>  	case PUD_SIZE:
> +		return pud_sect_supported();
>  #endif
>  	case PMD_SIZE:
>  	case CONT_PMD_SIZE:

Is this the same thing? With 4K pages and 3-levels (39-bit VA), the PUD
is folded but we do have a valid PUD_SIZE == PGDIR_SIZE and different
from PMD_SIZE. Do we disallow section mappings at the top level in this
case? If not, we should have check for __PAGETABLE_PMD_FOLDED instead.

> @@ -126,8 +126,11 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
>  	*pgsize = size;
>  
>  	switch (size) {
> -#ifdef CONFIG_ARM64_4K_PAGES
> +#ifndef __PAGETABLE_PUD_FOLDED
>  	case PUD_SIZE:
> +		if (pud_sect_supported())
> +			contig_ptes = 1;
> +		break;
>  #endif
>  	case PMD_SIZE:
>  		contig_ptes = 1;

Same here.

> @@ -489,9 +492,9 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
>  
>  static int __init hugetlbpage_init(void)
>  {
> -#ifdef CONFIG_ARM64_4K_PAGES
> -	hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
> -#endif
> +	if (pud_sect_supported())
> +		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
> +
>  	hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
>  	hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
>  	hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
> @@ -503,8 +506,9 @@ arch_initcall(hugetlbpage_init);
>  bool __init arch_hugetlb_valid_size(unsigned long size)
>  {
>  	switch (size) {
> -#ifdef CONFIG_ARM64_4K_PAGES
> +#ifndef __PAGETABLE_PUD_FOLDED
>  	case PUD_SIZE:
> +		return pud_sect_supported();
>  #endif
>  	case CONT_PMD_SIZE:
>  	case PMD_SIZE:

And here.

-- 
Catalin

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ