[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190829134329.GA63638@google.com>
Date: Thu, 29 Aug 2019 09:43:29 -0400
From: Joel Fernandes <joel@...lfernandes.org>
To: Peikan Tsai <peikantsai@...il.com>
Cc: gregkh@...uxfoundation.org, arve@...roid.com, tkjos@...roid.com,
maco@...roid.com, christian@...uner.io, devel@...verdev.osuosl.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] binder: Use kmem_cache for binder_thread
On Thu, Aug 29, 2019 at 01:49:53PM +0800, Peikan Tsai wrote:
> Hi,
>
> The allocated size for each binder_thread is 512 bytes by kzalloc.
> Because the size of binder_thread is fixed and it's only 304 bytes.
> It will save 208 bytes per binder_thread when use create a kmem_cache
> for the binder_thread.
Awesome change and observation!!!
Reviewed-by: Joel Fernandes (Google) <joel@...lfernandes.org>
(Another thought: how did you discover this? Are you using some tools to look
into slab fragmentation?).
thanks,
- Joel
> Signed-off-by: Peikan Tsai <peikantsai@...il.com>
> ---
> drivers/android/binder.c | 16 +++++++++++++---
> 1 file changed, 13 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/android/binder.c b/drivers/android/binder.c
> index dc1c83eafc22..043e0ebd0fe7 100644
> --- a/drivers/android/binder.c
> +++ b/drivers/android/binder.c
> @@ -87,6 +87,8 @@ static struct dentry *binder_debugfs_dir_entry_root;
> static struct dentry *binder_debugfs_dir_entry_proc;
> static atomic_t binder_last_id;
>
> +static struct kmem_cache *binder_thread_cachep;
> +
> static int proc_show(struct seq_file *m, void *unused);
> DEFINE_SHOW_ATTRIBUTE(proc);
>
> @@ -4696,14 +4698,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
> thread = binder_get_thread_ilocked(proc, NULL);
> binder_inner_proc_unlock(proc);
> if (!thread) {
> - new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
> + new_thread = kmem_cache_zalloc(binder_thread_cachep,
> + GFP_KERNEL);
> if (new_thread == NULL)
> return NULL;
> binder_inner_proc_lock(proc);
> thread = binder_get_thread_ilocked(proc, new_thread);
> binder_inner_proc_unlock(proc);
> if (thread != new_thread)
> - kfree(new_thread);
> + kmem_cache_free(binder_thread_cachep, new_thread);
> }
> return thread;
> }
> @@ -4723,7 +4726,7 @@ static void binder_free_thread(struct binder_thread *thread)
> BUG_ON(!list_empty(&thread->todo));
> binder_stats_deleted(BINDER_STAT_THREAD);
> binder_proc_dec_tmpref(thread->proc);
> - kfree(thread);
> + kmem_cache_free(binder_thread_cachep, thread);
> }
>
> static int binder_thread_release(struct binder_proc *proc,
> @@ -6095,6 +6098,12 @@ static int __init binder_init(void)
> if (ret)
> return ret;
>
> + binder_thread_cachep = kmem_cache_create("binder_thread",
> + sizeof(struct binder_thread),
> + 0, 0, NULL);
> + if (!binder_thread_cachep)
> + return -ENOMEM;
> +
> atomic_set(&binder_transaction_log.cur, ~0U);
> atomic_set(&binder_transaction_log_failed.cur, ~0U);
>
> @@ -6167,6 +6176,7 @@ static int __init binder_init(void)
>
> err_alloc_device_names_failed:
> debugfs_remove_recursive(binder_debugfs_dir_entry_root);
> + kmem_cache_destroy(binder_thread_cachep);
>
> return ret;
> }
> --
> 2.17.1
>
Powered by blists - more mailing lists