lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZerLB/LNWAOvC2HM@MiWiFi-R3L-srv>
Date: Fri, 8 Mar 2024 16:23:35 +0800
From: Baoquan He <bhe@...hat.com>
To: Uladzislau Rezki <urezki@...il.com>
Cc: rulinhuang <rulin.huang@...el.com>, akpm@...ux-foundation.org,
	colin.king@...el.com, hch@...radead.org,
	linux-kernel@...r.kernel.org, linux-mm@...ck.org,
	lstoakes@...il.com, tianyou.li@...el.com, tim.c.chen@...el.com,
	wangyang.guo@...el.com, zhiguo.zhou@...el.com
Subject: Re: [PATCH v7 1/2] mm/vmalloc: Moved macros with no functional
 change happened

On 03/07/24 at 08:16pm, Uladzislau Rezki wrote:
> On Thu, Mar 07, 2024 at 09:23:10AM +0800, Baoquan He wrote:
> > On 03/06/24 at 08:01pm, Uladzislau Rezki wrote:
> > > On Fri, Mar 01, 2024 at 10:54:16AM -0500, rulinhuang wrote:
> > ......
> > > 
> > > Sorry for the late answer, i also just noticed this email. It was not in
> > > my inbox...
> > > 
> > > OK, now you move part of the per-cpu allocator on the top and leave
> > > another part down making it split. This is just for the:
> > > 
> > > BUG_ON(va_flags & VMAP_RAM);
> > > 
> > > VMAP_RAM macro. Do we really need this BUG_ON()?
> > 
> > Sorry, I suggested that when reviewing v5:
> > https://lore.kernel.org/all/ZdiltpK5fUvwVWtD@MiWiFi-R3L-srv/T/#u
> > 
> > About part of per-cpu kva allocator moving and the split making, I would
> > argue that we will have vmap_nodes defintion and basic helper functions
> > like addr_to_node_id() etc at top, and leave other part like
> > size_to_va_pool(), node_pool_add_va() etc down. These are similar.
> > 
> > While about whether we should add 'BUG_ON(va_flags & VMAP_RAM);', I am
> > not sure about it. When I suggested that, I am also hesitant. From the
> > current code, alloc_vmap_area() is called in below three functions, only
> > __get_vm_area_node() will pass the non-NULL vm. 
> >  new_vmap_block()     -|
> >  vm_map_ram()         ----> alloc_vmap_area()
> >  __get_vm_area_node() -|
> > 
> > It could be wrongly passed in the future? Only checking if vm is
> > non-NULL makes me feel a little unsafe. While I am fine if removing the
> > BUG_ON, because there's no worry in the current code. We can wait and
> > see in the future.
> > 
> >        if (vm) {
> >                BUG_ON(va_flags & VMAP_RAM);
> >                setup_vmalloc_vm(vm, va, flags, caller);
> >        }
> > 
> I would remove it, because it is really hard to mess it, there is only
> one place also BUG_ON() is really a show stopper. I really appreciate
> what rulinhuang <rulin.huang@...el.com> is doing and i understand that
> it might be not so easy.

I agree, I was hesitant, now it firms up my mind.

> 
> So, if we can avoid of moving the code, that looks to me that we can do,
> if we can pass less arguments into alloc_vmap_area() since it is overloaded 
> that would be great.

Agree too, less arguments is much better. While I personnally prefer the open
coding a little bit like below. There is suspicion of excessive packaging in
__pre/__post_setup_vmalloc_vm() wrapping. They are very simple and few
assignments after all. 

---
 mm/vmalloc.c | 20 ++++++++++++--------
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0fd8ebaad17b..0c738423976d 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1924,8 +1924,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
 				unsigned long align,
 				unsigned long vstart, unsigned long vend,
 				int node, gfp_t gfp_mask,
-				unsigned long va_flags, struct vm_struct *vm,
-				unsigned long flags, const void *caller)
+				unsigned long va_flags, struct vm_struct *vm)
 {
 	struct vmap_node *vn;
 	struct vmap_area *va;
@@ -1988,8 +1987,11 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
 	va->vm = NULL;
 	va->flags = (va_flags | vn_id);
 
-	if (vm)
-		setup_vmalloc_vm(vm, va, flags, caller);
+	if (vm) {
+		vm->addr = (void *)va->va_start;
+		vm->size = va->va_end - va->va_start;
+		va->vm = vm;
+	}
 
 	vn = addr_to_node(va->va_start);
 
@@ -2565,8 +2567,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
 					VMALLOC_START, VMALLOC_END,
 					node, gfp_mask,
-					VMAP_RAM|VMAP_BLOCK, NULL,
-					0, NULL);
+					VMAP_RAM|VMAP_BLOCK, NULL);
 	if (IS_ERR(va)) {
 		kfree(vb);
 		return ERR_CAST(va);
@@ -2924,7 +2925,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)
 		va = alloc_vmap_area(size, PAGE_SIZE,
 				VMALLOC_START, VMALLOC_END,
 				node, GFP_KERNEL, VMAP_RAM,
-				NULL, 0, NULL);
+				NULL);
 		if (IS_ERR(va))
 			return NULL;
 
@@ -3063,7 +3064,10 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
 	if (!(flags & VM_NO_GUARD))
 		size += PAGE_SIZE;
 
-	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area, flags, caller);
+	area->flags = flags;
+	area->caller = caller;
+
+	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
 	if (IS_ERR(va)) {
 		kfree(area);
 		return NULL;
-- 
2.41.0


> 
> Just an example:
> 
> <snip>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 25a8df497255..b6050e018539 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1841,6 +1841,30 @@ node_alloc(unsigned long size, unsigned long align,
>  	return va;
>  }
>  
> +static inline void
> +__pre_setup_vmalloc_vm(struct vm_struct *vm,
> +		unsigned long flags, const void *caller)
> +{
> +	vm->flags = flags;
> +	vm->caller = caller;
> +}
> +
> +static inline void
> +__post_setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va)
> +{
> +	vm->addr = (void *)va->va_start;
> +	vm->size = va->va_end - va->va_start;
> +	va->vm = vm;
> +}
> +
> +static inline void
> +setup_vmalloc_vm_locked(struct vm_struct *vm, struct vmap_area *va,
> +		unsigned long flags, const void *caller)
> +{
> +	__pre_setup_vmalloc_vm(vm, flags, caller);
> +	__post_setup_vmalloc_vm(vm, va);
> +}
> +
>  /*
>   * Allocate a region of KVA of the specified size and alignment, within the
>   * vstart and vend.
> @@ -1849,7 +1873,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  				unsigned long align,
>  				unsigned long vstart, unsigned long vend,
>  				int node, gfp_t gfp_mask,
> -				unsigned long va_flags)
> +				unsigned long va_flags, struct vm_struct *vm)
>  {
>  	struct vmap_node *vn;
>  	struct vmap_area *va;
> @@ -1912,6 +1936,9 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  	va->vm = NULL;
>  	va->flags = (va_flags | vn_id);
>  
> +	if (vm)
> +		__post_setup_vmalloc_vm(vm, va);
> +
>  	vn = addr_to_node(va->va_start);
>  
>  	spin_lock(&vn->busy.lock);
> @@ -2486,7 +2513,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
>  	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
>  					VMALLOC_START, VMALLOC_END,
>  					node, gfp_mask,
> -					VMAP_RAM|VMAP_BLOCK);
> +					VMAP_RAM|VMAP_BLOCK, NULL);
>  	if (IS_ERR(va)) {
>  		kfree(vb);
>  		return ERR_CAST(va);
> @@ -2843,7 +2870,8 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)
>  		struct vmap_area *va;
>  		va = alloc_vmap_area(size, PAGE_SIZE,
>  				VMALLOC_START, VMALLOC_END,
> -				node, GFP_KERNEL, VMAP_RAM);
> +				node, GFP_KERNEL, VMAP_RAM, NULL);
> +
>  		if (IS_ERR(va))
>  			return NULL;
>  
> @@ -2946,26 +2974,6 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align)
>  	kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
>  }
>  
> -static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
> -	struct vmap_area *va, unsigned long flags, const void *caller)
> -{
> -	vm->flags = flags;
> -	vm->addr = (void *)va->va_start;
> -	vm->size = va->va_end - va->va_start;
> -	vm->caller = caller;
> -	va->vm = vm;
> -}
> -
> -static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
> -			      unsigned long flags, const void *caller)
> -{
> -	struct vmap_node *vn = addr_to_node(va->va_start);
> -
> -	spin_lock(&vn->busy.lock);
> -	setup_vmalloc_vm_locked(vm, va, flags, caller);
> -	spin_unlock(&vn->busy.lock);
> -}
> -
>  static void clear_vm_uninitialized_flag(struct vm_struct *vm)
>  {
>  	/*
> @@ -3002,14 +3010,15 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
>  	if (!(flags & VM_NO_GUARD))
>  		size += PAGE_SIZE;
>  
> -	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
> +	/* post-setup is done in the alloc_vmap_area(). */
> +	__pre_setup_vmalloc_vm(area, flags, caller);
> +
> +	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
>  	if (IS_ERR(va)) {
>  		kfree(area);
>  		return NULL;
>  	}
>  
> -	setup_vmalloc_vm(area, va, flags, caller);
> -
>  	/*
>  	 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
>  	 * best-effort approach, as they can be mapped outside of vmalloc code.
> <snip>
> 
> --
> Uladzislau Rezki
> 


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ