[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230713042037.980211-15-42.hyeyoo@gmail.com>
Date: Thu, 13 Jul 2023 13:20:29 +0900
From: Hyeonggon Yoo <42.hyeyoo@...il.com>
To: Minchan Kim <minchan@...nel.org>,
Sergey Senozhatsky <senozhatsky@...omium.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, Matthew Wilcox <willy@...radead.org>,
Mike Rapoport <rppt@...nel.org>,
Hyeonggon Yoo <42.hyeyoo@...il.com>
Subject: [RFC PATCH v2 14/21] mm/zsmalloc: convert zs_page_{isolate,migrate,putback} to use zsdesc
Convert the functions for movable operations of zsmalloc to use zsdesc.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@...il.com>
---
mm/zsmalloc.c | 50 ++++++++++++++++++++++++++++++++------------------
1 file changed, 32 insertions(+), 18 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 085f5c791a03..9e4ced14e1eb 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -399,6 +399,16 @@ static void reset_zsdesc(struct zsdesc *zsdesc)
page->index = 0;
}
+static inline bool zsdesc_is_isolated(struct zsdesc *zsdesc)
+{
+ return PageIsolated(zsdesc_page(zsdesc));
+}
+
+struct zone *zsdesc_zone(struct zsdesc *zsdesc)
+{
+ return page_zone(zsdesc_page(zsdesc));
+}
+
/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
static void SetZsHugePage(struct zspage *zspage)
{
@@ -1928,14 +1938,15 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
{
struct zspage *zspage;
+ struct zsdesc *zsdesc = page_zsdesc(page);
/*
* Page is locked so zspage couldn't be destroyed. For detail, look at
* lock_zspage in free_zspage.
*/
- VM_BUG_ON_PAGE(PageIsolated(page), page);
+ VM_BUG_ON_PAGE(zsdesc_is_isolated(zsdesc), zsdesc_page(zsdesc));
- zspage = get_zspage(page);
+ zspage = get_zspage(zsdesc_page(zsdesc));
migrate_write_lock(zspage);
inc_zspage_isolation(zspage);
migrate_write_unlock(zspage);
@@ -1950,6 +1961,8 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
struct size_class *class;
struct zspage *zspage;
struct zsdesc *dummy;
+ struct zsdesc *new_zsdesc = page_zsdesc(newpage);
+ struct zsdesc *zsdesc = page_zsdesc(page);
void *s_addr, *d_addr, *addr;
unsigned int offset;
unsigned long handle;
@@ -1964,10 +1977,10 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
if (mode == MIGRATE_SYNC_NO_COPY)
return -EINVAL;
- VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ VM_BUG_ON_PAGE(!zsdesc_is_isolated(zsdesc), zsdesc_page(zsdesc));
/* The page is locked, so this pointer must remain valid */
- zspage = get_zspage(page);
+ zspage = get_zspage(zsdesc_page(zsdesc));
pool = zspage->pool;
/*
@@ -1980,30 +1993,30 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
/* the migrate_write_lock protects zpage access via zs_map_object */
migrate_write_lock(zspage);
- offset = get_first_obj_offset(page);
- s_addr = kmap_atomic(page);
+ offset = get_first_obj_offset(zsdesc_page(zsdesc));
+ s_addr = zsdesc_kmap_atomic(zsdesc);
/*
* Here, any user cannot access all objects in the zspage so let's move.
*/
- d_addr = kmap_atomic(newpage);
+ d_addr = zsdesc_kmap_atomic(new_zsdesc);
memcpy(d_addr, s_addr, PAGE_SIZE);
kunmap_atomic(d_addr);
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
addr += class->size) {
- if (obj_allocated(page_zsdesc(page), addr, &handle)) {
+ if (obj_allocated(zsdesc, addr, &handle)) {
old_obj = handle_to_obj(handle);
obj_to_location(old_obj, &dummy, &obj_idx);
- new_obj = (unsigned long)location_to_obj(newpage,
+ new_obj = (unsigned long)location_to_obj(zsdesc_page(new_zsdesc),
obj_idx);
record_obj(handle, new_obj);
}
}
kunmap_atomic(s_addr);
- replace_sub_page(class, zspage, page_zsdesc(newpage), page_zsdesc(page));
+ replace_sub_page(class, zspage, new_zsdesc, zsdesc);
/*
* Since we complete the data copy and set up new zspage structure,
* it's okay to release the pool's lock.
@@ -2012,14 +2025,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
dec_zspage_isolation(zspage);
migrate_write_unlock(zspage);
- get_page(newpage);
- if (page_zone(newpage) != page_zone(page)) {
- dec_zone_page_state(page, NR_ZSPAGES);
- inc_zone_page_state(newpage, NR_ZSPAGES);
+ zsdesc_get(new_zsdesc);
+ if (zsdesc_zone(new_zsdesc) != zsdesc_zone(zsdesc)) {
+ zsdesc_dec_zone_page_state(zsdesc);
+ zsdesc_inc_zone_page_state(new_zsdesc);
}
- reset_zsdesc(page_zsdesc(page));
- put_page(page);
+ reset_zsdesc(zsdesc);
+ zsdesc_put(zsdesc);
return MIGRATEPAGE_SUCCESS;
}
@@ -2027,10 +2040,11 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
static void zs_page_putback(struct page *page)
{
struct zspage *zspage;
+ struct zsdesc *zsdesc = page_zsdesc(page);
- VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ VM_BUG_ON_PAGE(!zsdesc_is_isolated(zsdesc), zsdesc_page(zsdesc));
- zspage = get_zspage(page);
+ zspage = get_zspage(zsdesc_page(zsdesc));
migrate_write_lock(zspage);
dec_zspage_isolation(zspage);
migrate_write_unlock(zspage);
--
2.41.0
Powered by blists - more mailing lists