[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240806022311.3924442-5-alexs@kernel.org>
Date: Tue, 6 Aug 2024 10:22:51 +0800
From: alexs@...nel.org
To: Vitaly Wool <vitaly.wool@...sulko.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
minchan@...nel.org,
willy@...radead.org,
senozhatsky@...omium.org,
david@...hat.com,
42.hyeyoo@...il.com,
Yosry Ahmed <yosryahmed@...gle.com>,
nphamcs@...il.com
Cc: Alex Shi <alexs@...nel.org>
Subject: [PATCH v5 04/21] mm/zsmalloc: add and use pfn/zpdesc seeking funcs
From: Hyeonggon Yoo <42.hyeyoo@...il.com>
Add pfn_zpdesc conversion, convert obj_to_location() to take zpdesc
and also convert its users to use zpdesc.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@...il.com>
Signed-off-by: Alex Shi <alexs@...nel.org>
---
mm/zpdesc.h | 9 +++++++
mm/zsmalloc.c | 75 ++++++++++++++++++++++++++-------------------------
2 files changed, 47 insertions(+), 37 deletions(-)
diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index 421eeeef6f8f..2101de23d16d 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -98,4 +98,13 @@ static inline void zpdesc_put(struct zpdesc *zpdesc)
folio_put(zpdesc_folio(zpdesc));
}
+static inline unsigned long zpdesc_pfn(struct zpdesc *zpdesc)
+{
+ return page_to_pfn(zpdesc_page(zpdesc));
+}
+
+static inline struct zpdesc *pfn_zpdesc(unsigned long pfn)
+{
+ return page_zpdesc(pfn_to_page(pfn));
+}
#endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b9b5e2824f2c..384a5ba49788 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -773,15 +773,15 @@ static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc)
}
/**
- * obj_to_location - get (<page>, <obj_idx>) from encoded object value
+ * obj_to_location - get (<zpdesc>, <obj_idx>) from encoded object value
* @obj: the encoded object value
- * @page: page object resides in zspage
+ * @zpdesc: zpdesc object resides in zspage
* @obj_idx: object index
*/
-static void obj_to_location(unsigned long obj, struct page **page,
+static void obj_to_location(unsigned long obj, struct zpdesc **zpdesc,
unsigned int *obj_idx)
{
- *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
+ *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS);
*obj_idx = (obj & OBJ_INDEX_MASK);
}
@@ -1208,13 +1208,13 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm)
{
struct zspage *zspage;
- struct page *page;
+ struct zpdesc *zpdesc;
unsigned long obj, off;
unsigned int obj_idx;
struct size_class *class;
struct mapping_area *area;
- struct page *pages[2];
+ struct zpdesc *zpdescs[2];
void *ret;
/*
@@ -1227,8 +1227,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
/* It guarantees it can get zspage from handle safely */
read_lock(&pool->migrate_lock);
obj = handle_to_obj(handle);
- obj_to_location(obj, &page, &obj_idx);
- zspage = get_zspage(page);
+ obj_to_location(obj, &zpdesc, &obj_idx);
+ zspage = get_zspage(zpdesc_page(zpdesc));
/*
* migration cannot move any zpages in this zspage. Here, class->lock
@@ -1247,17 +1247,17 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
- area->vm_addr = kmap_atomic(page);
+ area->vm_addr = zpdesc_kmap_atomic(zpdesc);
ret = area->vm_addr + off;
goto out;
}
/* this object spans two pages */
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
+ zpdescs[0] = zpdesc;
+ zpdescs[1] = get_next_zpdesc(zpdesc);
+ BUG_ON(!zpdescs[1]);
- ret = __zs_map_object(area, (struct zpdesc **)pages, off, class->size);
+ ret = __zs_map_object(area, zpdescs, off, class->size);
out:
if (likely(!ZsHugePage(zspage)))
ret += ZS_HANDLE_SIZE;
@@ -1269,7 +1269,7 @@ EXPORT_SYMBOL_GPL(zs_map_object);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
{
struct zspage *zspage;
- struct page *page;
+ struct zpdesc *zpdesc;
unsigned long obj, off;
unsigned int obj_idx;
@@ -1277,8 +1277,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
struct mapping_area *area;
obj = handle_to_obj(handle);
- obj_to_location(obj, &page, &obj_idx);
- zspage = get_zspage(page);
+ obj_to_location(obj, &zpdesc, &obj_idx);
+ zspage = get_zspage(zpdesc_page(zpdesc));
class = zspage_class(pool, zspage);
off = offset_in_page(class->size * obj_idx);
@@ -1286,13 +1286,13 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
if (off + class->size <= PAGE_SIZE)
kunmap_atomic(area->vm_addr);
else {
- struct page *pages[2];
+ struct zpdesc *zpdescs[2];
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
+ zpdescs[0] = zpdesc;
+ zpdescs[1] = get_next_zpdesc(zpdesc);
+ BUG_ON(!zpdescs[1]);
- __zs_unmap_object(area, (struct zpdesc **)pages, off, class->size);
+ __zs_unmap_object(area, zpdescs, off, class->size);
}
local_unlock(&zs_map_area.lock);
@@ -1434,23 +1434,24 @@ static void obj_free(int class_size, unsigned long obj)
{
struct link_free *link;
struct zspage *zspage;
- struct page *f_page;
+ struct zpdesc *f_zpdesc;
unsigned long f_offset;
unsigned int f_objidx;
void *vaddr;
- obj_to_location(obj, &f_page, &f_objidx);
+
+ obj_to_location(obj, &f_zpdesc, &f_objidx);
f_offset = offset_in_page(class_size * f_objidx);
- zspage = get_zspage(f_page);
+ zspage = get_zspage(zpdesc_page(f_zpdesc));
- vaddr = kmap_atomic(f_page);
+ vaddr = zpdesc_kmap_atomic(f_zpdesc);
link = (struct link_free *)(vaddr + f_offset);
/* Insert this object in containing zspage's freelist */
if (likely(!ZsHugePage(zspage)))
link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
else
- f_page->index = 0;
+ f_zpdesc->next = NULL;
set_freeobj(zspage, f_objidx);
kunmap_atomic(vaddr);
@@ -1495,7 +1496,7 @@ EXPORT_SYMBOL_GPL(zs_free);
static void zs_object_copy(struct size_class *class, unsigned long dst,
unsigned long src)
{
- struct page *s_page, *d_page;
+ struct zpdesc *s_zpdesc, *d_zpdesc;
unsigned int s_objidx, d_objidx;
unsigned long s_off, d_off;
void *s_addr, *d_addr;
@@ -1504,8 +1505,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
s_size = d_size = class->size;
- obj_to_location(src, &s_page, &s_objidx);
- obj_to_location(dst, &d_page, &d_objidx);
+ obj_to_location(src, &s_zpdesc, &s_objidx);
+ obj_to_location(dst, &d_zpdesc, &d_objidx);
s_off = offset_in_page(class->size * s_objidx);
d_off = offset_in_page(class->size * d_objidx);
@@ -1516,8 +1517,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
if (d_off + class->size > PAGE_SIZE)
d_size = PAGE_SIZE - d_off;
- s_addr = kmap_atomic(s_page);
- d_addr = kmap_atomic(d_page);
+ s_addr = zpdesc_kmap_atomic(s_zpdesc);
+ d_addr = zpdesc_kmap_atomic(d_zpdesc);
while (1) {
size = min(s_size, d_size);
@@ -1542,17 +1543,17 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
if (s_off >= PAGE_SIZE) {
kunmap_atomic(d_addr);
kunmap_atomic(s_addr);
- s_page = get_next_page(s_page);
- s_addr = kmap_atomic(s_page);
- d_addr = kmap_atomic(d_page);
+ s_zpdesc = get_next_zpdesc(s_zpdesc);
+ s_addr = zpdesc_kmap_atomic(s_zpdesc);
+ d_addr = zpdesc_kmap_atomic(d_zpdesc);
s_size = class->size - written;
s_off = 0;
}
if (d_off >= PAGE_SIZE) {
kunmap_atomic(d_addr);
- d_page = get_next_page(d_page);
- d_addr = kmap_atomic(d_page);
+ d_zpdesc = get_next_zpdesc(d_zpdesc);
+ d_addr = zpdesc_kmap_atomic(d_zpdesc);
d_size = class->size - written;
d_off = 0;
}
@@ -1791,7 +1792,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
struct zs_pool *pool;
struct size_class *class;
struct zspage *zspage;
- struct page *dummy;
+ struct zpdesc *dummy;
void *s_addr, *d_addr, *addr;
unsigned int offset;
unsigned long handle;
--
2.43.0
Powered by blists - more mailing lists