lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 29 Aug 2019 13:49:53 +0800
From:   Peikan Tsai <peikantsai@...il.com>
To:     gregkh@...uxfoundation.org, arve@...roid.com, tkjos@...roid.com,
        maco@...roid.com, joel@...lfernandes.org, christian@...uner.io
Cc:     devel@...verdev.osuosl.org, linux-kernel@...r.kernel.org
Subject: [PATCH] binder: Use kmem_cache for binder_thread

Hi,

The allocated size for each binder_thread is 512 bytes by kzalloc.
Because the size of binder_thread is fixed and it's only 304 bytes.
It will save 208 bytes per binder_thread when use create a kmem_cache
for the binder_thread.

Signed-off-by: Peikan Tsai <peikantsai@...il.com>
---
 drivers/android/binder.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index dc1c83eafc22..043e0ebd0fe7 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -87,6 +87,8 @@ static struct dentry *binder_debugfs_dir_entry_root;
 static struct dentry *binder_debugfs_dir_entry_proc;
 static atomic_t binder_last_id;

+static struct kmem_cache *binder_thread_cachep;
+
 static int proc_show(struct seq_file *m, void *unused);
 DEFINE_SHOW_ATTRIBUTE(proc);

@@ -4696,14 +4698,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
 	thread = binder_get_thread_ilocked(proc, NULL);
 	binder_inner_proc_unlock(proc);
 	if (!thread) {
-		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+		new_thread = kmem_cache_zalloc(binder_thread_cachep,
+					       GFP_KERNEL);
 		if (new_thread == NULL)
 			return NULL;
 		binder_inner_proc_lock(proc);
 		thread = binder_get_thread_ilocked(proc, new_thread);
 		binder_inner_proc_unlock(proc);
 		if (thread != new_thread)
-			kfree(new_thread);
+			kmem_cache_free(binder_thread_cachep, new_thread);
 	}
 	return thread;
 }
@@ -4723,7 +4726,7 @@ static void binder_free_thread(struct binder_thread *thread)
 	BUG_ON(!list_empty(&thread->todo));
 	binder_stats_deleted(BINDER_STAT_THREAD);
 	binder_proc_dec_tmpref(thread->proc);
-	kfree(thread);
+	kmem_cache_free(binder_thread_cachep, thread);
 }

 static int binder_thread_release(struct binder_proc *proc,
@@ -6095,6 +6098,12 @@ static int __init binder_init(void)
 	if (ret)
 		return ret;

+	binder_thread_cachep = kmem_cache_create("binder_thread",
+						 sizeof(struct binder_thread),
+						 0, 0, NULL);
+	if (!binder_thread_cachep)
+		return -ENOMEM;
+
 	atomic_set(&binder_transaction_log.cur, ~0U);
 	atomic_set(&binder_transaction_log_failed.cur, ~0U);

@@ -6167,6 +6176,7 @@ static int __init binder_init(void)

 err_alloc_device_names_failed:
 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
+	kmem_cache_destroy(binder_thread_cachep);

 	return ret;
 }
--
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ