[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240627075959.611783-2-chengming.zhou@linux.dev>
Date: Thu, 27 Jun 2024 15:59:59 +0800
From: Chengming Zhou <chengming.zhou@...ux.dev>
To: minchan@...nel.org,
senozhatsky@...omium.org,
akpm@...ux-foundation.org
Cc: linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
chengming.zhou@...ux.dev
Subject: [PATCH 2/2] mm/zsmalloc: move record_obj() into obj_malloc()
We always record_obj() to make handle points to object after obj_malloc(),
so simplify the code by moving record_obj() into obj_malloc(). There
should be no functional change.
Signed-off-by: Chengming Zhou <chengming.zhou@...ux.dev>
---
mm/zsmalloc.c | 15 ++++++---------
1 file changed, 6 insertions(+), 9 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 7fc25fa4e6b3..c2f4e62ffb46 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1306,7 +1306,6 @@ static unsigned long obj_malloc(struct zs_pool *pool,
void *vaddr;
class = pool->size_class[zspage->class];
- handle |= OBJ_ALLOCATED_TAG;
obj = get_freeobj(zspage);
offset = obj * class->size;
@@ -1322,15 +1321,16 @@ static unsigned long obj_malloc(struct zs_pool *pool,
set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
if (likely(!ZsHugePage(zspage)))
/* record handle in the header of allocated chunk */
- link->handle = handle;
+ link->handle = handle | OBJ_ALLOCATED_TAG;
else
/* record handle to page->index */
- zspage->first_page->index = handle;
+ zspage->first_page->index = handle | OBJ_ALLOCATED_TAG;
kunmap_atomic(vaddr);
mod_zspage_inuse(zspage, 1);
obj = location_to_obj(m_page, obj);
+ record_obj(handle, obj);
return obj;
}
@@ -1348,7 +1348,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
*/
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
{
- unsigned long handle, obj;
+ unsigned long handle;
struct size_class *class;
int newfg;
struct zspage *zspage;
@@ -1371,10 +1371,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
spin_lock(&class->lock);
zspage = find_get_zspage(class);
if (likely(zspage)) {
- obj = obj_malloc(pool, zspage, handle);
+ obj_malloc(pool, zspage, handle);
/* Now move the zspage to another fullness group, if required */
fix_fullness_group(class, zspage);
- record_obj(handle, obj);
class_stat_inc(class, ZS_OBJS_INUSE, 1);
goto out;
@@ -1389,10 +1388,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
}
spin_lock(&class->lock);
- obj = obj_malloc(pool, zspage, handle);
+ obj_malloc(pool, zspage, handle);
newfg = get_fullness_group(class, zspage);
insert_zspage(class, zspage, newfg);
- record_obj(handle, obj);
atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
class_stat_inc(class, ZS_OBJS_INUSE, 1);
@@ -1591,7 +1589,6 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
free_obj = obj_malloc(pool, dst_zspage, handle);
zs_object_copy(class, free_obj, used_obj);
obj_idx++;
- record_obj(handle, free_obj);
obj_free(class->size, used_obj);
/* Stop if there is no more space */
--
2.20.1
Powered by blists - more mailing lists