lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZeoSiP-hOeHG89BJ@pc638.lan>
Date: Thu, 7 Mar 2024 20:16:24 +0100
From: Uladzislau Rezki <urezki@...il.com>
To: Baoquan He <bhe@...hat.com>, rulinhuang <rulin.huang@...el.com>
Cc: Uladzislau Rezki <urezki@...il.com>, rulinhuang <rulin.huang@...el.com>,
	akpm@...ux-foundation.org, colin.king@...el.com, hch@...radead.org,
	linux-kernel@...r.kernel.org, linux-mm@...ck.org,
	lstoakes@...il.com, tianyou.li@...el.com, tim.c.chen@...el.com,
	wangyang.guo@...el.com, zhiguo.zhou@...el.com
Subject: Re: [PATCH v7 1/2] mm/vmalloc: Moved macros with no functional
 change happened

On Thu, Mar 07, 2024 at 09:23:10AM +0800, Baoquan He wrote:
> On 03/06/24 at 08:01pm, Uladzislau Rezki wrote:
> > On Fri, Mar 01, 2024 at 10:54:16AM -0500, rulinhuang wrote:
> ......
> > 
> > Sorry for the late answer, i also just noticed this email. It was not in
> > my inbox...
> > 
> > OK, now you move part of the per-cpu allocator on the top and leave
> > another part down making it split. This is just for the:
> > 
> > BUG_ON(va_flags & VMAP_RAM);
> > 
> > VMAP_RAM macro. Do we really need this BUG_ON()?
> 
> Sorry, I suggested that when reviewing v5:
> https://lore.kernel.org/all/ZdiltpK5fUvwVWtD@MiWiFi-R3L-srv/T/#u
> 
> About part of per-cpu kva allocator moving and the split making, I would
> argue that we will have vmap_nodes defintion and basic helper functions
> like addr_to_node_id() etc at top, and leave other part like
> size_to_va_pool(), node_pool_add_va() etc down. These are similar.
> 
> While about whether we should add 'BUG_ON(va_flags & VMAP_RAM);', I am
> not sure about it. When I suggested that, I am also hesitant. From the
> current code, alloc_vmap_area() is called in below three functions, only
> __get_vm_area_node() will pass the non-NULL vm. 
>  new_vmap_block()     -|
>  vm_map_ram()         ----> alloc_vmap_area()
>  __get_vm_area_node() -|
> 
> It could be wrongly passed in the future? Only checking if vm is
> non-NULL makes me feel a little unsafe. While I am fine if removing the
> BUG_ON, because there's no worry in the current code. We can wait and
> see in the future.
> 
>        if (vm) {
>                BUG_ON(va_flags & VMAP_RAM);
>                setup_vmalloc_vm(vm, va, flags, caller);
>        }
> 
I would remove it, because it is really hard to mess it, there is only
one place also BUG_ON() is really a show stopper. I really appreciate
what rulinhuang <rulin.huang@...el.com> is doing and i understand that
it might be not so easy.

So, if we can avoid of moving the code, that looks to me that we can do,
if we can pass less arguments into alloc_vmap_area() since it is overloaded 
that would be great.

Just an example:

<snip>
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 25a8df497255..b6050e018539 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1841,6 +1841,30 @@ node_alloc(unsigned long size, unsigned long align,
 	return va;
 }
 
+static inline void
+__pre_setup_vmalloc_vm(struct vm_struct *vm,
+		unsigned long flags, const void *caller)
+{
+	vm->flags = flags;
+	vm->caller = caller;
+}
+
+static inline void
+__post_setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va)
+{
+	vm->addr = (void *)va->va_start;
+	vm->size = va->va_end - va->va_start;
+	va->vm = vm;
+}
+
+static inline void
+setup_vmalloc_vm_locked(struct vm_struct *vm, struct vmap_area *va,
+		unsigned long flags, const void *caller)
+{
+	__pre_setup_vmalloc_vm(vm, flags, caller);
+	__post_setup_vmalloc_vm(vm, va);
+}
+
 /*
  * Allocate a region of KVA of the specified size and alignment, within the
  * vstart and vend.
@@ -1849,7 +1873,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
 				unsigned long align,
 				unsigned long vstart, unsigned long vend,
 				int node, gfp_t gfp_mask,
-				unsigned long va_flags)
+				unsigned long va_flags, struct vm_struct *vm)
 {
 	struct vmap_node *vn;
 	struct vmap_area *va;
@@ -1912,6 +1936,9 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
 	va->vm = NULL;
 	va->flags = (va_flags | vn_id);
 
+	if (vm)
+		__post_setup_vmalloc_vm(vm, va);
+
 	vn = addr_to_node(va->va_start);
 
 	spin_lock(&vn->busy.lock);
@@ -2486,7 +2513,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
 					VMALLOC_START, VMALLOC_END,
 					node, gfp_mask,
-					VMAP_RAM|VMAP_BLOCK);
+					VMAP_RAM|VMAP_BLOCK, NULL);
 	if (IS_ERR(va)) {
 		kfree(vb);
 		return ERR_CAST(va);
@@ -2843,7 +2870,8 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)
 		struct vmap_area *va;
 		va = alloc_vmap_area(size, PAGE_SIZE,
 				VMALLOC_START, VMALLOC_END,
-				node, GFP_KERNEL, VMAP_RAM);
+				node, GFP_KERNEL, VMAP_RAM, NULL);
+
 		if (IS_ERR(va))
 			return NULL;
 
@@ -2946,26 +2974,6 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align)
 	kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
 }
 
-static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
-	struct vmap_area *va, unsigned long flags, const void *caller)
-{
-	vm->flags = flags;
-	vm->addr = (void *)va->va_start;
-	vm->size = va->va_end - va->va_start;
-	vm->caller = caller;
-	va->vm = vm;
-}
-
-static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
-			      unsigned long flags, const void *caller)
-{
-	struct vmap_node *vn = addr_to_node(va->va_start);
-
-	spin_lock(&vn->busy.lock);
-	setup_vmalloc_vm_locked(vm, va, flags, caller);
-	spin_unlock(&vn->busy.lock);
-}
-
 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
 {
 	/*
@@ -3002,14 +3010,15 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
 	if (!(flags & VM_NO_GUARD))
 		size += PAGE_SIZE;
 
-	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
+	/* post-setup is done in the alloc_vmap_area(). */
+	__pre_setup_vmalloc_vm(area, flags, caller);
+
+	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
 	if (IS_ERR(va)) {
 		kfree(area);
 		return NULL;
 	}
 
-	setup_vmalloc_vm(area, va, flags, caller);
-
 	/*
 	 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
 	 * best-effort approach, as they can be mapped outside of vmalloc code.
<snip>

--
Uladzislau Rezki

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ