lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1623922742.sam09kpmhp.astroid@bobo.none>
Date:   Thu, 17 Jun 2021 19:40:49 +1000
From:   Nicholas Piggin <npiggin@...il.com>
To:     akpm@...ux-foundation.org, Daniel Axtens <dja@...ens.net>,
        kasan-dev@...glegroups.com, linux-kernel@...r.kernel.org,
        linux-mm@...ck.org
Cc:     Andrey Konovalov <andreyknvl@...il.com>,
        David Gow <davidgow@...gle.com>,
        Dmitry Vyukov <dvyukov@...gle.com>,
        Uladzislau Rezki <urezki@...il.com>
Subject: Re: [PATCH] mm/vmalloc: unbreak kasan vmalloc support

Excerpts from Daniel Axtens's message of June 17, 2021 6:13 pm:
> In commit 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings"),
> __vmalloc_node_range was changed such that __get_vm_area_node was no
> longer called with the requested/real size of the vmalloc allocation, but
> rather with a rounded-up size.
> 
> This means that __get_vm_area_node called kasan_unpoision_vmalloc() with
> a rounded up size rather than the real size. This led to it allowing
> access to too much memory and so missing vmalloc OOBs and failing the
> kasan kunit tests.
> 
> Pass the real size and the desired shift into __get_vm_area_node. This
> allows it to round up the size for the underlying allocators while
> still unpoisioning the correct quantity of shadow memory.
> 
> Adjust the other call-sites to pass in PAGE_SHIFT for the shift value.
> 
> Cc: Nicholas Piggin <npiggin@...il.com>
> Cc: David Gow <davidgow@...gle.com>
> Cc: Dmitry Vyukov <dvyukov@...gle.com>
> Cc: Andrey Konovalov <andreyknvl@...il.com>
> Cc: Uladzislau Rezki (Sony) <urezki@...il.com>
> Link: https://bugzilla.kernel.org/show_bug.cgi?id=213335
> Fixes: 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings")

Thanks Daniel, good debugging.

Reviewed-by: Nicholas Piggin <npiggin@...il.com>

> Signed-off-by: Daniel Axtens <dja@...ens.net>
> ---
>  mm/vmalloc.c | 24 ++++++++++++++----------
>  1 file changed, 14 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index aaad569e8963..3471cbeb083c 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2362,15 +2362,16 @@ static void clear_vm_uninitialized_flag(struct vm_struct *vm)
>  }
>  
>  static struct vm_struct *__get_vm_area_node(unsigned long size,
> -		unsigned long align, unsigned long flags, unsigned long start,
> -		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
> +		unsigned long align, unsigned long shift, unsigned long flags,
> +		unsigned long start, unsigned long end, int node,
> +		gfp_t gfp_mask, const void *caller)
>  {
>  	struct vmap_area *va;
>  	struct vm_struct *area;
>  	unsigned long requested_size = size;
>  
>  	BUG_ON(in_interrupt());
> -	size = PAGE_ALIGN(size);
> +	size = ALIGN(size, 1ul << shift);
>  	if (unlikely(!size))
>  		return NULL;
>  
> @@ -2402,8 +2403,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
>  				       unsigned long start, unsigned long end,
>  				       const void *caller)
>  {
> -	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
> -				  GFP_KERNEL, caller);
> +	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
> +				  NUMA_NO_NODE, GFP_KERNEL, caller);
>  }
>  
>  /**
> @@ -2419,7 +2420,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
>   */
>  struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
>  {
> -	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
> +	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
> +				  VMALLOC_START, VMALLOC_END,
>  				  NUMA_NO_NODE, GFP_KERNEL,
>  				  __builtin_return_address(0));
>  }
> @@ -2427,7 +2429,8 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
>  struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
>  				const void *caller)
>  {
> -	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
> +	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
> +				  VMALLOC_START, VMALLOC_END,
>  				  NUMA_NO_NODE, GFP_KERNEL, caller);
>  }
>  
> @@ -2949,9 +2952,9 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
>  	}
>  
>  again:
> -	size = PAGE_ALIGN(size);
> -	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
> -				vm_flags, start, end, node, gfp_mask, caller);
> +	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
> +				  VM_UNINITIALIZED | vm_flags, start, end, node,
> +				  gfp_mask, caller);
>  	if (!area) {
>  		warn_alloc(gfp_mask, NULL,
>  			"vmalloc error: size %lu, vm_struct allocation failed",
> @@ -2970,6 +2973,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
>  	 */
>  	clear_vm_uninitialized_flag(area);
>  
> +	size = PAGE_ALIGN(size);
>  	kmemleak_vmalloc(area, size, gfp_mask);
>  
>  	return addr;
> -- 
> 2.30.2
> 
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ