[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230713042037.980211-11-42.hyeyoo@gmail.com>
Date: Thu, 13 Jul 2023 13:20:25 +0900
From: Hyeonggon Yoo <42.hyeyoo@...il.com>
To: Minchan Kim <minchan@...nel.org>,
Sergey Senozhatsky <senozhatsky@...omium.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, Matthew Wilcox <willy@...radead.org>,
Mike Rapoport <rppt@...nel.org>,
Hyeonggon Yoo <42.hyeyoo@...il.com>
Subject: [RFC PATCH v2 10/21] mm/zsmalloc: convert obj_allocated() and related helpers to use zsdesc
Convert obj_allocated(), and related helpers to take zsdesc. Also make
its callers to cast (struct page *) to (struct zsdesc *) when calling them.
The users will be converted gradually as there are many.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@...il.com>
---
mm/zsmalloc.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 48bfdbbe3b1e..efd7a0f78962 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -942,15 +942,15 @@ static unsigned long handle_to_obj(unsigned long handle)
return *(unsigned long *)handle;
}
-static inline bool obj_allocated(struct page *page, void *obj,
+static inline bool obj_allocated(struct zsdesc *zsdesc, void *obj,
unsigned long *phandle)
{
unsigned long handle;
- struct zspage *zspage = get_zspage(page);
+ struct zspage *zspage = get_zspage(zsdesc_page(zsdesc));
if (unlikely(ZsHugePage(zspage))) {
- VM_BUG_ON_PAGE(!is_first_page(page), page);
- handle = page->index;
+ VM_BUG_ON_PAGE(!is_first_zsdesc(zsdesc), zsdesc_page(zsdesc));
+ handle = zsdesc->handle;
} else
handle = *(unsigned long *)obj;
@@ -1698,18 +1698,18 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
* return handle.
*/
static unsigned long find_alloced_obj(struct size_class *class,
- struct page *page, int *obj_idx)
+ struct zsdesc *zsdesc, int *obj_idx)
{
unsigned int offset;
int index = *obj_idx;
unsigned long handle = 0;
- void *addr = kmap_atomic(page);
+ void *addr = zsdesc_kmap_atomic(zsdesc);
- offset = get_first_obj_offset(page);
+ offset = get_first_obj_offset(zsdesc_page(zsdesc));
offset += class->size * index;
while (offset < PAGE_SIZE) {
- if (obj_allocated(page, addr + offset, &handle))
+ if (obj_allocated(zsdesc, addr + offset, &handle))
break;
offset += class->size;
@@ -1733,7 +1733,7 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
struct size_class *class = pool->size_class[src_zspage->class];
while (1) {
- handle = find_alloced_obj(class, s_page, &obj_idx);
+ handle = find_alloced_obj(class, page_zsdesc(s_page), &obj_idx);
if (!handle) {
s_page = get_next_page(s_page);
if (!s_page)
@@ -1990,7 +1990,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
addr += class->size) {
- if (obj_allocated(page, addr, &handle)) {
+ if (obj_allocated(page_zsdesc(page), addr, &handle)) {
old_obj = handle_to_obj(handle);
obj_to_location(old_obj, &dummy, &obj_idx);
--
2.41.0
Powered by blists - more mailing lists