[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230709025817.3842416-1-senozhatsky@chromium.org>
Date: Sun, 9 Jul 2023 11:56:26 +0900
From: Sergey Senozhatsky <senozhatsky@...omium.org>
To: Andrew Morton <akpm@...ux-foundation.org>,
Minchan Kim <minchan@...nel.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Sergey Senozhatsky <senozhatsky@...omium.org>
Subject: [PATCH] zsmalloc: remove obj_tagged()
obj_tagged() is not needed at this point, because objects can
only have one tag: OBJ_ALLOCATED_TAG. We needed obj_tagged()
for the zsmalloc LRU implementation, which has now been
removed. Simplify zsmalloc code and revert to the previous
implementation that was in place before the zsmalloc LRU series.
Signed-off-by: Sergey Senozhatsky <senozhatsky@...omium.org>
---
mm/zsmalloc.c | 29 +++++++----------------------
1 file changed, 7 insertions(+), 22 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 84beadc088b8..32f5bc4074df 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -795,8 +795,8 @@ static unsigned long handle_to_obj(unsigned long handle)
return *(unsigned long *)handle;
}
-static bool obj_tagged(struct page *page, void *obj, unsigned long *phandle,
- int tag)
+static inline bool obj_allocated(struct page *page, void *obj,
+ unsigned long *phandle)
{
unsigned long handle;
struct zspage *zspage = get_zspage(page);
@@ -807,7 +807,7 @@ static bool obj_tagged(struct page *page, void *obj, unsigned long *phandle,
} else
handle = *(unsigned long *)obj;
- if (!(handle & tag))
+ if (!(handle & OBJ_ALLOCATED_TAG))
return false;
/* Clear all tags before returning the handle */
@@ -815,11 +815,6 @@ static bool obj_tagged(struct page *page, void *obj, unsigned long *phandle,
return true;
}
-static inline bool obj_allocated(struct page *page, void *obj, unsigned long *phandle)
-{
- return obj_tagged(page, obj, phandle, OBJ_ALLOCATED_TAG);
-}
-
static void reset_page(struct page *page)
{
__ClearPageMovable(page);
@@ -1551,11 +1546,11 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
}
/*
- * Find object with a certain tag in zspage from index object and
+ * Find alloced object in zspage from index object and
* return handle.
*/
-static unsigned long find_tagged_obj(struct size_class *class,
- struct page *page, int *obj_idx, int tag)
+static unsigned long find_alloced_obj(struct size_class *class,
+ struct page *page, int *obj_idx)
{
unsigned int offset;
int index = *obj_idx;
@@ -1566,7 +1561,7 @@ static unsigned long find_tagged_obj(struct size_class *class,
offset += class->size * index;
while (offset < PAGE_SIZE) {
- if (obj_tagged(page, addr + offset, &handle, tag))
+ if (obj_allocated(page, addr + offset, &handle))
break;
offset += class->size;
@@ -1580,16 +1575,6 @@ static unsigned long find_tagged_obj(struct size_class *class,
return handle;
}
-/*
- * Find alloced object in zspage from index object and
- * return handle.
- */
-static unsigned long find_alloced_obj(struct size_class *class,
- struct page *page, int *obj_idx)
-{
- return find_tagged_obj(class, page, obj_idx, OBJ_ALLOCATED_TAG);
-}
-
static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
struct zspage *dst_zspage)
{
--
2.41.0.255.g8b1d071c50-goog
Powered by blists - more mailing lists