[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1463754225-31311-10-git-send-email-minchan@kernel.org>
Date: Fri, 20 May 2016 23:23:42 +0900
From: Minchan Kim <minchan@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Minchan Kim <minchan@...nel.org>,
Sergey Senozhatsky <sergey.senozhatsky@...il.com>
Subject: [PATCH v6 09/12] zsmalloc: separate free_zspage from putback_zspage
Currently, putback_zspage does free zspage under class->lock
if fullness become ZS_EMPTY but it makes trouble to implement
locking scheme for new zspage migration.
So, this patch is to separate free_zspage from putback_zspage
and free zspage out of class->lock which is preparation for
zspage migration.
Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
Signed-off-by: Minchan Kim <minchan@...nel.org>
---
mm/zsmalloc.c | 27 +++++++++++----------------
1 file changed, 11 insertions(+), 16 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b24842bd4537..8fc16cc4d76d 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1683,14 +1683,12 @@ static struct zspage *isolate_zspage(struct size_class *class, bool source)
/*
* putback_zspage - add @zspage into right class's fullness list
- * @pool: target pool
* @class: destination class
* @zspage: target page
*
* Return @zspage's fullness_group
*/
-static enum fullness_group putback_zspage(struct zs_pool *pool,
- struct size_class *class,
+static enum fullness_group putback_zspage(struct size_class *class,
struct zspage *zspage)
{
enum fullness_group fullness;
@@ -1699,15 +1697,6 @@ static enum fullness_group putback_zspage(struct zs_pool *pool,
insert_zspage(class, zspage, fullness);
set_zspage_mapping(zspage, class->index, fullness);
- if (fullness == ZS_EMPTY) {
- zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
- class->size, class->pages_per_zspage));
- atomic_long_sub(class->pages_per_zspage,
- &pool->pages_allocated);
-
- free_zspage(pool, zspage);
- }
-
return fullness;
}
@@ -1756,23 +1745,29 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
if (!migrate_zspage(pool, class, &cc))
break;
- putback_zspage(pool, class, dst_zspage);
+ putback_zspage(class, dst_zspage);
}
/* Stop if we couldn't find slot */
if (dst_zspage == NULL)
break;
- putback_zspage(pool, class, dst_zspage);
- if (putback_zspage(pool, class, src_zspage) == ZS_EMPTY)
+ putback_zspage(class, dst_zspage);
+ if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
+ zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+ class->size, class->pages_per_zspage));
+ atomic_long_sub(class->pages_per_zspage,
+ &pool->pages_allocated);
+ free_zspage(pool, src_zspage);
pool->stats.pages_compacted += class->pages_per_zspage;
+ }
spin_unlock(&class->lock);
cond_resched();
spin_lock(&class->lock);
}
if (src_zspage)
- putback_zspage(pool, class, src_zspage);
+ putback_zspage(class, src_zspage);
spin_unlock(&class->lock);
}
--
1.9.1
Powered by blists - more mailing lists