[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CY8PR11MB77473CF2A8F9BFBFE3B3A09E8D5E2@CY8PR11MB7747.namprd11.prod.outlook.com>
Date: Fri, 1 Mar 2024 09:14:25 +0000
From: "King, Colin" <colin.king@...el.com>
To: "Huang, Rulin" <rulin.huang@...el.com>, "urezki@...il.com"
<urezki@...il.com>, "bhe@...hat.com" <bhe@...hat.com>
CC: "akpm@...ux-foundation.org" <akpm@...ux-foundation.org>,
"hch@...radead.org" <hch@...radead.org>, "linux-kernel@...r.kernel.org"
<linux-kernel@...r.kernel.org>, "linux-mm@...ck.org" <linux-mm@...ck.org>,
"lstoakes@...il.com" <lstoakes@...il.com>, "Li, Tianyou"
<tianyou.li@...el.com>, "Chen, Tim C" <tim.c.chen@...el.com>, "Guo, Wangyang"
<wangyang.guo@...el.com>, "Zhou, Zhiguo" <zhiguo.zhou@...el.com>
Subject: RE: [PATCH v6] mm/vmalloc: lock contention optimization under
multi-threading
Just to confirm, looks good to me. Thanks Rulin.
Colin
-----Original Message-----
From: Huang, Rulin <rulin.huang@...el.com>
Sent: Thursday, February 29, 2024 8:26 AM
To: urezki@...il.com; bhe@...hat.com
Cc: akpm@...ux-foundation.org; King, Colin <colin.king@...el.com>; hch@...radead.org; linux-kernel@...r.kernel.org; linux-mm@...ck.org; lstoakes@...ilcom; Huang, Rulin <rulin.huang@...el.com>; Li, Tianyou <tianyou.li@...el.com>; Chen, Tim C <tim.c.chen@...el.com>; Guo, Wangyang <wangyang.guo@...el.com>; Zhou, Zhiguo <zhiguo.zhou@...el.com>
Subject: [PATCH v6] mm/vmalloc: lock contention optimization under multi-threading
When allocating a new memory area where the mapping address range is known, it is observed that the vmap_node->busy.lock is acquired twice.
The first acquisition occurs in the alloc_vmap_area() function when inserting the vm area into the vm mapping red-black tree. The second acquisition occurs in the setup_vmalloc_vm() function when updating the properties of the vm, such as flags and address, etc.
Combine these two operations together in alloc_vmap_area(), which improves scalability when the vmap_node->busy.lock is contended.
By doing so, the need to acquire the lock twice can also be eliminated to once.
With the above change, tested on intel sapphire rapids
platform(224 vcpu), a 4% performance improvement is gained on stress-ng/pthread(https://github.com/ColinIanKing/stress-ng),
which is the stress test of thread creations.
Reviewed-by: Uladzislau Rezki <urezki@...il.com>
Reviewed-by: Baoquan He <bhe@...hat.com>
Reviewed-by: "Chen, Tim C" <tim.c.chen@...el.com>
Reviewed-by: "King, Colin" <colin.king@...el.com>
Signed-off-by: rulinhuang <rulin.huang@...el.com>
---
V1 -> V2: Avoided the partial initialization issue of vm and separated insert_vmap_area() from alloc_vmap_area()
V2 -> V3: Rebased on 6.8-rc5
V3 -> V4: Rebased on mm-unstable branch
V4 -> V5: cancel the split of alloc_vmap_area() and keep insert_vmap_area()
V5 -> V6: add bug_on
---
mm/vmalloc.c | 132 +++++++++++++++++++++++++--------------------------
1 file changed, 64 insertions(+), 68 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 25a8df497255..5ae028b0d58d 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1841,15 +1841,66 @@ node_alloc(unsigned long size, unsigned long align,
return va;
}
+/*** Per cpu kva allocator ***/
+
+/*
+ * vmap space is limited especially on 32 bit architectures. Ensure
+there is
+ * room for at least 16 percpu vmap blocks per CPU.
+ */
+/*
+ * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
+ * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
+ * instead (we just need a rough idea)
+ */
+#if BITS_PER_LONG == 32
+#define VMALLOC_SPACE (128UL*1024*1024)
+#else
+#define VMALLOC_SPACE (128UL*1024*1024*1024)
+#endif
+
+#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
+#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
+#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
+#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
+#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
+#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
+#define VMAP_BBMAP_BITS \
+ VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
+ VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
+ VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
+
+#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
+
+/*
+ * Purge threshold to prevent overeager purging of fragmented blocks
+for
+ * regular operations: Purge if vb->free is less than 1/4 of the capacity.
+ */
+#define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4)
+
+#define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
+#define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
+#define VMAP_FLAGS_MASK 0x3
+
+static inline void setup_vmalloc_vm(struct vm_struct *vm,
+ struct vmap_area *va, unsigned long flags, const void *caller) {
+ vm->flags = flags;
+ vm->addr = (void *)va->va_start;
+ vm->size = va->va_end - va->va_start;
+ vm->caller = caller;
+ va->vm = vm;
+}
+
/*
* Allocate a region of KVA of the specified size and alignment, within the
- * vstart and vend.
+ * vstart and vend. If vm is passed in, the two will also be bound.
*/
static struct vmap_area *alloc_vmap_area(unsigned long size,
unsigned long align,
unsigned long vstart, unsigned long vend,
int node, gfp_t gfp_mask,
- unsigned long va_flags)
+ unsigned long va_flags, struct vm_struct *vm,
+ unsigned long flags, const void *caller)
{
struct vmap_node *vn;
struct vmap_area *va;
@@ -1912,6 +1963,11 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
va->vm = NULL;
va->flags = (va_flags | vn_id);
+ if (vm) {
+ BUG_ON(va_flags & VMAP_RAM);
+ setup_vmalloc_vm(vm, va, flags, caller);
+ }
+
vn = addr_to_node(va->va_start);
spin_lock(&vn->busy.lock);
@@ -2325,46 +2381,6 @@ static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
return NULL;
}
-/*** Per cpu kva allocator ***/
-
-/*
- * vmap space is limited especially on 32 bit architectures. Ensure there is
- * room for at least 16 percpu vmap blocks per CPU.
- */
-/*
- * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
- * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
- * instead (we just need a rough idea)
- */
-#if BITS_PER_LONG == 32
-#define VMALLOC_SPACE (128UL*1024*1024)
-#else
-#define VMALLOC_SPACE (128UL*1024*1024*1024)
-#endif
-
-#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
-#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
-#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
-#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
-#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
-#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
-#define VMAP_BBMAP_BITS \
- VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
- VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
- VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
-
-#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
-
-/*
- * Purge threshold to prevent overeager purging of fragmented blocks for
- * regular operations: Purge if vb->free is less than 1/4 of the capacity.
- */
-#define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4)
-
-#define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
-#define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
-#define VMAP_FLAGS_MASK 0x3
-
struct vmap_block_queue {
spinlock_t lock;
struct list_head free;
@@ -2486,7 +2502,8 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
VMALLOC_START, VMALLOC_END,
node, gfp_mask,
- VMAP_RAM|VMAP_BLOCK);
+ VMAP_RAM|VMAP_BLOCK, NULL,
+ 0, NULL);
if (IS_ERR(va)) {
kfree(vb);
return ERR_CAST(va);
@@ -2843,7 +2860,8 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)
struct vmap_area *va;
va = alloc_vmap_area(size, PAGE_SIZE,
VMALLOC_START, VMALLOC_END,
- node, GFP_KERNEL, VMAP_RAM);
+ node, GFP_KERNEL, VMAP_RAM,
+ NULL, 0, NULL);
if (IS_ERR(va))
return NULL;
@@ -2946,26 +2964,6 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align)
kasan_populate_early_vm_area_shadow(vm->addr, vm->size); }
-static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
- struct vmap_area *va, unsigned long flags, const void *caller)
-{
- vm->flags = flags;
- vm->addr = (void *)va->va_start;
- vm->size = va->va_end - va->va_start;
- vm->caller = caller;
- va->vm = vm;
-}
-
-static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
- unsigned long flags, const void *caller)
-{
- struct vmap_node *vn = addr_to_node(va->va_start);
-
- spin_lock(&vn->busy.lock);
- setup_vmalloc_vm_locked(vm, va, flags, caller);
- spin_unlock(&vn->busy.lock);
-}
-
static void clear_vm_uninitialized_flag(struct vm_struct *vm) {
/*
@@ -3002,14 +3000,12 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
if (!(flags & VM_NO_GUARD))
size += PAGE_SIZE;
- va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
+ va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area,
+flags, caller);
if (IS_ERR(va)) {
kfree(area);
return NULL;
}
- setup_vmalloc_vm(area, va, flags, caller);
-
/*
* Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
* best-effort approach, as they can be mapped outside of vmalloc code.
@@ -4584,7 +4580,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
spin_lock(&vn->busy.lock);
insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
- setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
+ setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
pcpu_get_vm_areas);
spin_unlock(&vn->busy.lock);
}
base-commit: 7e6ae2db7f319bf9613ec6db8fa3c9bc1de1b346
--
2.43.0
Powered by blists - more mailing lists