[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3xsreqvvclcuqyllgdz5avxwdvhc3rqri4565xj2hbwk6r6uol@6mnhvdgaxfrl>
Date: Wed, 7 Jan 2026 12:03:37 +0900
From: Sergey Senozhatsky <senozhatsky@...omium.org>
To: Yosry Ahmed <yosry.ahmed@...ux.dev>
Cc: Sergey Senozhatsky <senozhatsky@...omium.org>,
Andrew Morton <akpm@...ux-foundation.org>, Nhat Pham <nphamcs@...il.com>, Minchan Kim <minchan@...nel.org>,
Johannes Weiner <hannes@...xchg.org>, Brian Geffon <bgeffon@...gle.com>, linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: Re: [PATCH] zsmalloc: use actual object size to detect spans
On (26/01/07 02:10), Yosry Ahmed wrote:
> I think the changes need to be shuffled around to avoid this, or just
> have a combined patch, which would be less pretty.
Dunno. Do we want to completely separate HugePage handling
and make it a fast path? Then it seems things begin to work.
---
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index cb449acc8809..9b067853b6c2 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1077,6 +1077,7 @@ void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle,
unsigned long obj, off;
unsigned int obj_idx;
struct size_class *class;
+ size_t sizes[2];
void *addr;
/* Guarantee we can get zspage from handle safely */
@@ -1089,35 +1090,27 @@ void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle,
zspage_read_lock(zspage);
read_unlock(&pool->lock);
+ /* Fast path for huge size class */
+ if (ZsHugePage(zspage))
+ return kmap_local_zpdesc(zpdesc);
+
class = zspage_class(pool, zspage);
off = offset_in_page(class->size * obj_idx);
- /* Normal classes have inlined handle */
- if (!ZsHugePage(zspage))
- mem_len += ZS_HANDLE_SIZE;
-
+ off += ZS_HANDLE_SIZE;
if (off + mem_len <= PAGE_SIZE) {
/* this object is contained entirely within a page */
- addr = kmap_local_zpdesc(zpdesc);
- addr += off;
- } else {
- size_t sizes[2];
-
- /* this object spans two pages */
- sizes[0] = PAGE_SIZE - off;
- sizes[1] = mem_len - sizes[0];
- addr = local_copy;
-
- memcpy_from_page(addr, zpdesc_page(zpdesc),
- off, sizes[0]);
- zpdesc = get_next_zpdesc(zpdesc);
- memcpy_from_page(addr + sizes[0],
- zpdesc_page(zpdesc),
- 0, sizes[1]);
+ return kmap_local_zpdesc(zpdesc) + off;
}
- if (!ZsHugePage(zspage))
- addr += ZS_HANDLE_SIZE;
+ /* this object spans two pages */
+ sizes[0] = PAGE_SIZE - off;
+ sizes[1] = mem_len - sizes[0];
+ addr = local_copy;
+
+ memcpy_from_page(addr, zpdesc_page(zpdesc), off, sizes[0]);
+ zpdesc = get_next_zpdesc(zpdesc);
+ memcpy_from_page(addr + sizes[0], zpdesc_page(zpdesc), 0, sizes[1]);
return addr;
}
@@ -1135,20 +1128,21 @@ void zs_obj_read_end(struct zs_pool *pool, unsigned long handle,
obj = handle_to_obj(handle);
obj_to_location(obj, &zpdesc, &obj_idx);
zspage = get_zspage(zpdesc);
+
+ /* Fast path for huge size class */
+ if (ZsHugePage(zspage)) {
+ kunmap_local(handle_mem);
+ goto unlock;
+ }
+
class = zspage_class(pool, zspage);
off = offset_in_page(class->size * obj_idx);
- /* Normal classes have inlined handle */
- if (!ZsHugePage(zspage))
- mem_len += ZS_HANDLE_SIZE;
-
- if (off + mem_len <= PAGE_SIZE) {
- if (!ZsHugePage(zspage))
- off += ZS_HANDLE_SIZE;
- handle_mem -= off;
+ off += ZS_HANDLE_SIZE;
+ if (off + mem_len <= PAGE_SIZE)
kunmap_local(handle_mem);
- }
+unlock:
zspage_read_unlock(zspage);
}
EXPORT_SYMBOL_GPL(zs_obj_read_end);
Powered by blists - more mailing lists