[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190212175648.28738-2-guro@fb.com>
Date: Tue, 12 Feb 2019 09:56:46 -0800
From: Roman Gushchin <guroan@...il.com>
To: linux-mm@...ck.org
Cc: Matthew Wilcox <willy@...radead.org>,
Johannes Weiner <hannes@...xchg.org>, kernel-team@...com,
Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org, Roman Gushchin <guro@...com>
Subject: [PATCH v2 1/3] mm: refactor __vunmap() to avoid duplicated call to find_vm_area()
__vunmap() calls find_vm_area() twice without an obvious reason:
first directly to get the area pointer, second indirectly by calling
remove_vm_area(), which is again searching for the area.
To remove this redundancy, let's split remove_vm_area() into
__remove_vm_area(struct vmap_area *), which performs the actual area
removal, and remove_vm_area(const void *addr) wrapper, which can
be used everywhere, where it has been used before.
On my test setup, I've got 5-10% speed up on vfree()'ing 1000000
of 4-pages vmalloc blocks.
Signed-off-by: Roman Gushchin <guro@...com>
Cc: Johannes Weiner <hannes@...xchg.org>
Cc: Matthew Wilcox <willy@...radead.org>
---
mm/vmalloc.c | 47 +++++++++++++++++++++++++++--------------------
1 file changed, 27 insertions(+), 20 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b7455d4c8c12..8f0179895fb5 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1477,6 +1477,24 @@ struct vm_struct *find_vm_area(const void *addr)
return NULL;
}
+static struct vm_struct *__remove_vm_area(struct vmap_area *va)
+{
+ struct vm_struct *vm = va->vm;
+
+ might_sleep();
+
+ spin_lock(&vmap_area_lock);
+ va->vm = NULL;
+ va->flags &= ~VM_VM_AREA;
+ va->flags |= VM_LAZY_FREE;
+ spin_unlock(&vmap_area_lock);
+
+ kasan_free_shadow(vm);
+ free_unmap_vmap_area(va);
+
+ return vm;
+}
+
/**
* remove_vm_area - find and remove a continuous kernel virtual area
* @addr: base address
@@ -1489,31 +1507,20 @@ struct vm_struct *find_vm_area(const void *addr)
*/
struct vm_struct *remove_vm_area(const void *addr)
{
+ struct vm_struct *vm = NULL;
struct vmap_area *va;
- might_sleep();
-
va = find_vmap_area((unsigned long)addr);
- if (va && va->flags & VM_VM_AREA) {
- struct vm_struct *vm = va->vm;
-
- spin_lock(&vmap_area_lock);
- va->vm = NULL;
- va->flags &= ~VM_VM_AREA;
- va->flags |= VM_LAZY_FREE;
- spin_unlock(&vmap_area_lock);
-
- kasan_free_shadow(vm);
- free_unmap_vmap_area(va);
+ if (va && va->flags & VM_VM_AREA)
+ vm = __remove_vm_area(va);
- return vm;
- }
- return NULL;
+ return vm;
}
static void __vunmap(const void *addr, int deallocate_pages)
{
struct vm_struct *area;
+ struct vmap_area *va;
if (!addr)
return;
@@ -1522,17 +1529,18 @@ static void __vunmap(const void *addr, int deallocate_pages)
addr))
return;
- area = find_vm_area(addr);
- if (unlikely(!area)) {
+ va = find_vmap_area((unsigned long)addr);
+ if (unlikely(!va || !(va->flags & VM_VM_AREA))) {
WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
addr);
return;
}
+ area = va->vm;
debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
- remove_vm_area(addr);
+ __remove_vm_area(va);
if (deallocate_pages) {
int i;
@@ -1547,7 +1555,6 @@ static void __vunmap(const void *addr, int deallocate_pages)
}
kfree(area);
- return;
}
static inline void __vfree_deferred(const void *addr)
--
2.20.1
Powered by blists - more mailing lists