[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240902072136.578720-4-alexs@kernel.org>
Date: Mon, 2 Sep 2024 15:21:14 +0800
From: alexs@...nel.org
To: Vitaly Wool <vitaly.wool@...sulko.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
minchan@...nel.org,
willy@...radead.org,
senozhatsky@...omium.org,
david@...hat.com,
42.hyeyoo@...il.com,
Yosry Ahmed <yosryahmed@...gle.com>,
nphamcs@...il.com
Cc: Alex Shi <alexs@...nel.org>
Subject: [PATCH v7 03/21] mm/zsmalloc: convert __zs_map_object/__zs_unmap_object to use zpdesc
From: Hyeonggon Yoo <42.hyeyoo@...il.com>
These two functions take pointer to an array of struct page. Introduce
zpdesc_kmap_atomic() and make __zs_{map,unmap}_object() take pointer
to an array of zpdesc instead of page.
Add silly type casting when calling them. Casting will be removed late.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@...il.com>
Signed-off-by: Alex Shi <alexs@...nel.org>
---
mm/zsmalloc.c | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 9806b1629880..1b49f74fd728 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -243,6 +243,11 @@ struct zs_pool {
atomic_t compaction_in_progress;
};
+static inline void *zpdesc_kmap_atomic(struct zpdesc *zpdesc)
+{
+ return kmap_atomic(zpdesc_page(zpdesc));
+}
+
struct zspage {
struct {
unsigned int huge:HUGE_BITS;
@@ -1054,7 +1059,7 @@ static inline void __zs_cpu_down(struct mapping_area *area)
}
static void *__zs_map_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
+ struct zpdesc *zpdescs[2], int off, int size)
{
int sizes[2];
void *addr;
@@ -1071,10 +1076,10 @@ static void *__zs_map_object(struct mapping_area *area,
sizes[1] = size - sizes[0];
/* copy object to per-cpu buffer */
- addr = kmap_atomic(pages[0]);
+ addr = zpdesc_kmap_atomic(zpdescs[0]);
memcpy(buf, addr + off, sizes[0]);
kunmap_atomic(addr);
- addr = kmap_atomic(pages[1]);
+ addr = zpdesc_kmap_atomic(zpdescs[1]);
memcpy(buf + sizes[0], addr, sizes[1]);
kunmap_atomic(addr);
out:
@@ -1082,7 +1087,7 @@ static void *__zs_map_object(struct mapping_area *area,
}
static void __zs_unmap_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
+ struct zpdesc *zpdescs[2], int off, int size)
{
int sizes[2];
void *addr;
@@ -1101,10 +1106,10 @@ static void __zs_unmap_object(struct mapping_area *area,
sizes[1] = size - sizes[0];
/* copy per-cpu buffer to object */
- addr = kmap_atomic(pages[0]);
+ addr = zpdesc_kmap_atomic(zpdescs[0]);
memcpy(addr + off, buf, sizes[0]);
kunmap_atomic(addr);
- addr = kmap_atomic(pages[1]);
+ addr = zpdesc_kmap_atomic(zpdescs[1]);
memcpy(addr, buf + sizes[0], sizes[1]);
kunmap_atomic(addr);
@@ -1245,7 +1250,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
pages[1] = get_next_page(page);
BUG_ON(!pages[1]);
- ret = __zs_map_object(area, pages, off, class->size);
+ ret = __zs_map_object(area, (struct zpdesc **)pages, off, class->size);
out:
if (likely(!ZsHugePage(zspage)))
ret += ZS_HANDLE_SIZE;
@@ -1280,7 +1285,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
pages[1] = get_next_page(page);
BUG_ON(!pages[1]);
- __zs_unmap_object(area, pages, off, class->size);
+ __zs_unmap_object(area, (struct zpdesc **)pages, off, class->size);
}
local_unlock(&zs_map_area.lock);
--
2.46.0
Powered by blists - more mailing lists