[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1380761503-14509-10-git-send-email-john.stultz@linaro.org>
Date: Wed, 2 Oct 2013 17:51:38 -0700
From: John Stultz <john.stultz@...aro.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Minchan Kim <minchan@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Android Kernel Team <kernel-team@...roid.com>,
Robert Love <rlove@...gle.com>, Mel Gorman <mel@....ul.ie>,
Hugh Dickins <hughd@...gle.com>,
Dave Hansen <dave.hansen@...el.com>,
Rik van Riel <riel@...hat.com>,
Dmitry Adamushko <dmitry.adamushko@...il.com>,
Dave Chinner <david@...morbit.com>, Neil Brown <neilb@...e.de>,
Andrea Righi <andrea@...terlinux.com>,
Andrea Arcangeli <aarcange@...hat.com>,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.vnet.ibm.com>,
Mike Hommey <mh@...ndium.org>, Taras Glek <tglek@...illa.com>,
Dhaval Giani <dhaval.giani@...il.com>, Jan Kara <jack@...e.cz>,
KOSAKI Motohiro <kosaki.motohiro@...il.com>,
Michel Lespinasse <walken@...gle.com>,
Rob Clark <robdclark@...il.com>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
John Stultz <john.stultz@...aro.org>
Subject: [PATCH 09/14] vrange: Add vrange LRU list for purging
From: Minchan Kim <minchan@...nel.org>
This patch adds vrange LRU list for managing vranges to purge by
something (In this implementation, I will use slab shrinker introduced
by upcoming patches).
This is necessary to purge vranges on swapless system because currently
the VM only ages anonymous pages if the system has a swap device.
In this case, because we would otherwise be duplicating the page LRUs
tracking of hot/cold pages, we utilize a vrange LRU, to manage the
shrinking order. Thus the shrinker will discard the entire vrange at
once, and vranges are purged in the order they are marked volatile.
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Android Kernel Team <kernel-team@...roid.com>
Cc: Robert Love <rlove@...gle.com>
Cc: Mel Gorman <mel@....ul.ie>
Cc: Hugh Dickins <hughd@...gle.com>
Cc: Dave Hansen <dave.hansen@...el.com>
Cc: Rik van Riel <riel@...hat.com>
Cc: Dmitry Adamushko <dmitry.adamushko@...il.com>
Cc: Dave Chinner <david@...morbit.com>
Cc: Neil Brown <neilb@...e.de>
Cc: Andrea Righi <andrea@...terlinux.com>
Cc: Andrea Arcangeli <aarcange@...hat.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@...ux.vnet.ibm.com>
Cc: Mike Hommey <mh@...ndium.org>
Cc: Taras Glek <tglek@...illa.com>
Cc: Dhaval Giani <dhaval.giani@...il.com>
Cc: Jan Kara <jack@...e.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@...il.com>
Cc: Michel Lespinasse <walken@...gle.com>
Cc: Rob Clark <robdclark@...il.com>
Cc: Minchan Kim <minchan@...nel.org>
Cc: linux-mm@...ck.org <linux-mm@...ck.org>
Signed-off-by: Minchan Kim <minchan@...nel.org>
Signed-off-by: John Stultz <john.stultz@...aro.org>
---
include/linux/vrange_types.h | 2 ++
mm/vrange.c | 61 ++++++++++++++++++++++++++++++++++++++++----
2 files changed, 58 insertions(+), 5 deletions(-)
diff --git a/include/linux/vrange_types.h b/include/linux/vrange_types.h
index 0d48b42..d7d451c 100644
--- a/include/linux/vrange_types.h
+++ b/include/linux/vrange_types.h
@@ -20,6 +20,8 @@ struct vrange {
struct interval_tree_node node;
struct vrange_root *owner;
int purged;
+ struct list_head lru;
+ atomic_t refcount;
};
#endif
diff --git a/mm/vrange.c b/mm/vrange.c
index c19a966..33e3ac1 100644
--- a/mm/vrange.c
+++ b/mm/vrange.c
@@ -14,8 +14,21 @@
static struct kmem_cache *vrange_cachep;
+static struct vrange_list {
+ struct list_head list;
+ unsigned long size;
+ struct mutex lock;
+} vrange_list;
+
+static inline unsigned int vrange_size(struct vrange *range)
+{
+ return range->node.last + 1 - range->node.start;
+}
+
static int __init vrange_init(void)
{
+ INIT_LIST_HEAD(&vrange_list.list);
+ mutex_init(&vrange_list.lock);
vrange_cachep = KMEM_CACHE(vrange, SLAB_PANIC);
return 0;
}
@@ -27,19 +40,56 @@ static struct vrange *__vrange_alloc(gfp_t flags)
if (!vrange)
return vrange;
vrange->owner = NULL;
+ INIT_LIST_HEAD(&vrange->lru);
+ atomic_set(&vrange->refcount, 1);
+
return vrange;
}
static void __vrange_free(struct vrange *range)
{
WARN_ON(range->owner);
+ WARN_ON(atomic_read(&range->refcount) != 0);
+ WARN_ON(!list_empty(&range->lru));
+
kmem_cache_free(vrange_cachep, range);
}
+static inline void __vrange_lru_add(struct vrange *range)
+{
+ mutex_lock(&vrange_list.lock);
+ WARN_ON(!list_empty(&range->lru));
+ list_add(&range->lru, &vrange_list.list);
+ vrange_list.size += vrange_size(range);
+ mutex_unlock(&vrange_list.lock);
+}
+
+static inline void __vrange_lru_del(struct vrange *range)
+{
+ mutex_lock(&vrange_list.lock);
+ if (!list_empty(&range->lru)) {
+ list_del_init(&range->lru);
+ vrange_list.size -= vrange_size(range);
+ WARN_ON(range->owner);
+ }
+ mutex_unlock(&vrange_list.lock);
+}
+
static void __vrange_add(struct vrange *range, struct vrange_root *vroot)
{
range->owner = vroot;
interval_tree_insert(&range->node, &vroot->v_rb);
+
+ WARN_ON(atomic_read(&range->refcount) <= 0);
+ __vrange_lru_add(range);
+}
+
+static inline void __vrange_put(struct vrange *range)
+{
+ if (atomic_dec_and_test(&range->refcount)) {
+ __vrange_lru_del(range);
+ __vrange_free(range);
+ }
}
static void __vrange_remove(struct vrange *range)
@@ -64,6 +114,7 @@ static inline void __vrange_resize(struct vrange *range,
bool purged = range->purged;
__vrange_remove(range);
+ __vrange_lru_del(range);
__vrange_set(range, start_idx, end_idx, purged);
__vrange_add(range, vroot);
}
@@ -100,7 +151,7 @@ static int vrange_add(struct vrange_root *vroot,
range = vrange_from_node(node);
/* old range covers new range fully */
if (node->start <= start_idx && node->last >= end_idx) {
- __vrange_free(new_range);
+ __vrange_put(new_range);
goto out;
}
@@ -109,7 +160,7 @@ static int vrange_add(struct vrange_root *vroot,
purged |= range->purged;
__vrange_remove(range);
- __vrange_free(range);
+ __vrange_put(range);
node = next;
}
@@ -150,7 +201,7 @@ static int vrange_remove(struct vrange_root *vroot,
if (start_idx <= node->start && end_idx >= node->last) {
/* argumented range covers the range fully */
__vrange_remove(range);
- __vrange_free(range);
+ __vrange_put(range);
} else if (node->start >= start_idx) {
/*
* Argumented range covers over the left of the
@@ -181,7 +232,7 @@ static int vrange_remove(struct vrange_root *vroot,
vrange_unlock(vroot);
if (!used_new)
- __vrange_free(new_range);
+ __vrange_put(new_range);
return 0;
}
@@ -204,7 +255,7 @@ void vrange_root_cleanup(struct vrange_root *vroot)
while ((node = rb_first(&vroot->v_rb))) {
range = vrange_entry(node);
__vrange_remove(range);
- __vrange_free(range);
+ __vrange_put(range);
}
vrange_unlock(vroot);
}
--
1.8.1.2
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists