lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201129004548.1619714-13-namit@vmware.com>
Date:   Sat, 28 Nov 2020 16:45:47 -0800
From:   Nadav Amit <nadav.amit@...il.com>
To:     linux-fsdevel@...r.kernel.org
Cc:     Nadav Amit <namit@...are.com>, Jens Axboe <axboe@...nel.dk>,
        Andrea Arcangeli <aarcange@...hat.com>,
        Peter Xu <peterx@...hat.com>,
        Alexander Viro <viro@...iv.linux.org.uk>,
        io-uring@...r.kernel.org, linux-kernel@...r.kernel.org,
        linux-mm@...ck.org
Subject: [RFC PATCH 12/13] fs/userfaultfd: kmem-cache for wait-queue objects

From: Nadav Amit <namit@...are.com>

Allocating work-queue objects on the stack has usually negative
performance side-effects. First, it is hard to ensure alignment to
cache-lines without increasing the stack size. Second, it might cause
false sharing. Third, it is more likely to encounter TLB misses as
objects are more likely reside on different pages.

Allocate userfaultfd wait-queue objects on the heap using kmem-cache for
better performance.

Cc: Jens Axboe <axboe@...nel.dk>
Cc: Andrea Arcangeli <aarcange@...hat.com>
Cc: Peter Xu <peterx@...hat.com>
Cc: Alexander Viro <viro@...iv.linux.org.uk>
Cc: io-uring@...r.kernel.org
Cc: linux-fsdevel@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org
Signed-off-by: Nadav Amit <namit@...are.com>
---
 fs/userfaultfd.c | 60 +++++++++++++++++++++++++++++-------------------
 1 file changed, 36 insertions(+), 24 deletions(-)

diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 5c22170544e3..224b595ec758 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -32,6 +32,7 @@
 int sysctl_unprivileged_userfaultfd __read_mostly = 1;
 
 static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
+static struct kmem_cache *userfaultfd_wait_queue_cachep __read_mostly;
 
 enum userfaultfd_state {
 	UFFD_STATE_WAIT_API,
@@ -904,14 +905,15 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
 static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
 {
 	struct userfaultfd_ctx *ctx = fctx->orig;
-	struct userfaultfd_wait_queue ewq;
+	struct userfaultfd_wait_queue *ewq = kmem_cache_zalloc(userfaultfd_wait_queue_cachep, GFP_KERNEL);
 
-	msg_init(&ewq.msg);
+	msg_init(&ewq->msg);
 
-	ewq.msg.event = UFFD_EVENT_FORK;
-	ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
+	ewq->msg.event = UFFD_EVENT_FORK;
+	ewq->msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
 
-	userfaultfd_event_wait_completion(ctx, &ewq);
+	userfaultfd_event_wait_completion(ctx, ewq);
+	kmem_cache_free(userfaultfd_wait_queue_cachep, ewq);
 }
 
 void dup_userfaultfd_complete(struct list_head *fcs)
@@ -951,7 +953,7 @@ void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
 				 unsigned long len)
 {
 	struct userfaultfd_ctx *ctx = vm_ctx->ctx;
-	struct userfaultfd_wait_queue ewq;
+	struct userfaultfd_wait_queue *ewq = kmem_cache_zalloc(userfaultfd_wait_queue_cachep, GFP_KERNEL);
 
 	if (!ctx)
 		return;
@@ -961,14 +963,15 @@ void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
 		return;
 	}
 
-	msg_init(&ewq.msg);
+	msg_init(&ewq->msg);
 
-	ewq.msg.event = UFFD_EVENT_REMAP;
-	ewq.msg.arg.remap.from = from;
-	ewq.msg.arg.remap.to = to;
-	ewq.msg.arg.remap.len = len;
+	ewq->msg.event = UFFD_EVENT_REMAP;
+	ewq->msg.arg.remap.from = from;
+	ewq->msg.arg.remap.to = to;
+	ewq->msg.arg.remap.len = len;
 
-	userfaultfd_event_wait_completion(ctx, &ewq);
+	userfaultfd_event_wait_completion(ctx, ewq);
+	kmem_cache_free(userfaultfd_wait_queue_cachep, ewq);
 }
 
 bool userfaultfd_remove(struct vm_area_struct *vma,
@@ -976,23 +979,25 @@ bool userfaultfd_remove(struct vm_area_struct *vma,
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct userfaultfd_ctx *ctx;
-	struct userfaultfd_wait_queue ewq;
+	struct userfaultfd_wait_queue *ewq;
 
 	ctx = vma->vm_userfaultfd_ctx.ctx;
 	if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
 		return true;
 
+	ewq = kmem_cache_zalloc(userfaultfd_wait_queue_cachep, GFP_KERNEL);
 	userfaultfd_ctx_get(ctx);
 	WRITE_ONCE(ctx->mmap_changing, true);
 	mmap_read_unlock(mm);
 
-	msg_init(&ewq.msg);
+	msg_init(&ewq->msg);
 
-	ewq.msg.event = UFFD_EVENT_REMOVE;
-	ewq.msg.arg.remove.start = start;
-	ewq.msg.arg.remove.end = end;
+	ewq->msg.event = UFFD_EVENT_REMOVE;
+	ewq->msg.arg.remove.start = start;
+	ewq->msg.arg.remove.end = end;
 
-	userfaultfd_event_wait_completion(ctx, &ewq);
+	userfaultfd_event_wait_completion(ctx, ewq);
+	kmem_cache_free(userfaultfd_wait_queue_cachep, ewq);
 
 	return false;
 }
@@ -1040,20 +1045,21 @@ int userfaultfd_unmap_prep(struct vm_area_struct *vma,
 void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
 {
 	struct userfaultfd_unmap_ctx *ctx, *n;
-	struct userfaultfd_wait_queue ewq;
+	struct userfaultfd_wait_queue *ewq = kmem_cache_zalloc(userfaultfd_wait_queue_cachep, GFP_KERNEL);
 
 	list_for_each_entry_safe(ctx, n, uf, list) {
-		msg_init(&ewq.msg);
+		msg_init(&ewq->msg);
 
-		ewq.msg.event = UFFD_EVENT_UNMAP;
-		ewq.msg.arg.remove.start = ctx->start;
-		ewq.msg.arg.remove.end = ctx->end;
+		ewq->msg.event = UFFD_EVENT_UNMAP;
+		ewq->msg.arg.remove.start = ctx->start;
+		ewq->msg.arg.remove.end = ctx->end;
 
-		userfaultfd_event_wait_completion(ctx->ctx, &ewq);
+		userfaultfd_event_wait_completion(ctx->ctx, ewq);
 
 		list_del(&ctx->list);
 		kfree(ctx);
 	}
+	kmem_cache_free(userfaultfd_wait_queue_cachep, ewq);
 }
 
 static void userfaultfd_cancel_async_reads(struct userfaultfd_ctx *ctx)
@@ -2471,6 +2477,12 @@ static int __init userfaultfd_init(void)
 						0,
 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
 						init_once_userfaultfd_ctx);
+
+	userfaultfd_wait_queue_cachep = kmem_cache_create("userfaultfd_wait_queue_cache",
+						sizeof(struct userfaultfd_wait_queue),
+						0,
+						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+						NULL);
 	return 0;
 }
 __initcall(userfaultfd_init);
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ