[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190520141450.7575-4-longman@redhat.com>
Date: Mon, 20 May 2019 10:14:48 -0400
From: Waiman Long <longman@...hat.com>
To: Thomas Gleixner <tglx@...utronix.de>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-kernel@...r.kernel.org,
Yang Shi <yang.shi@...ux.alibaba.com>,
"Joel Fernandes (Google)" <joel@...lfernandes.org>,
Qian Cai <cai@....us>, Zhong Jiang <zhongjiang@...wei.com>,
Waiman Long <longman@...hat.com>
Subject: [PATCH 3/5] debugobjects: Reduce number of pool_lock acquisitions in fill_pool()
In fill_pool(), the pool_lock is acquired and then released once per
debug object. If many objects are to be filled, the constant lock and
unlock operations are extra overhead. To reduce these overhead, we
batch them up and do an allocation of 4 objects per lock/unlock sequence.
Signed-off-by: Waiman Long <longman@...hat.com>
---
lib/debugobjects.c | 24 ++++++++++++++++--------
1 file changed, 16 insertions(+), 8 deletions(-)
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index c30c9d308e9f..7573c736da7d 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -120,7 +120,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
static void fill_pool(void)
{
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
- struct debug_obj *new, *obj;
+ struct debug_obj *obj;
unsigned long flags;
if (likely(obj_pool_free >= debug_objects_pool_min_level))
@@ -136,7 +136,7 @@ static void fill_pool(void)
* Recheck with the lock held as the worker thread might have
* won the race and freed the global free list already.
*/
- if (obj_nr_tofree) {
+ while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
hlist_del(&obj->node);
obj_nr_tofree--;
@@ -150,15 +150,23 @@ static void fill_pool(void)
return;
while (obj_pool_free < debug_objects_pool_min_level) {
+ struct debug_obj *new[ODEBUG_BATCH_SIZE];
+ int cnt;
- new = kmem_cache_zalloc(obj_cache, gfp);
- if (!new)
+ for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
+ new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
+ if (!new[cnt])
+ break;
+ }
+ if (!cnt)
return;
raw_spin_lock_irqsave(&pool_lock, flags);
- hlist_add_head(&new->node, &obj_pool);
- debug_objects_allocated++;
- obj_pool_free++;
+ while (cnt) {
+ hlist_add_head(&new[--cnt]->node, &obj_pool);
+ debug_objects_allocated++;
+ obj_pool_free++;
+ }
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
}
@@ -280,7 +288,7 @@ static void free_obj_work(struct work_struct *work)
/*
* The objs on the pool list might be allocated before the work is
* run, so recheck if pool list it full or not, if not fill pool
- * list from the global free list
+ * list from the global free list.
*/
while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
--
2.18.1
Powered by blists - more mailing lists