[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1266516162-14154-4-git-send-email-mel@csn.ul.ie>
Date: Thu, 18 Feb 2010 18:02:33 +0000
From: Mel Gorman <mel@....ul.ie>
To: Andrea Arcangeli <aarcange@...hat.com>
Cc: Christoph Lameter <cl@...ux-foundation.org>,
Adam Litke <agl@...ibm.com>, Avi Kivity <avi@...hat.com>,
David Rientjes <rientjes@...gle.com>,
KOSAKI Motohiro <kosaki.motohiro@...fujitsu.com>,
Rik van Riel <riel@...hat.com>, Mel Gorman <mel@....ul.ie>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: [PATCH 03/12] mm: Share the anon_vma ref counts between KSM and page migration
For clarity of review, KSM and page migration have separate refcounts on
the anon_vma. While clear, this is a waste of memory. This patch gets
KSM and page migration to share their toys in a spirit of harmony.
Signed-off-by: Mel Gorman <mel@....ul.ie>
---
include/linux/rmap.h | 50 ++++++++++++++++++--------------------------------
mm/ksm.c | 4 ++--
mm/migrate.c | 4 ++--
mm/rmap.c | 6 ++----
4 files changed, 24 insertions(+), 40 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 6b5a1a9..55c0e9e 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -26,11 +26,17 @@
*/
struct anon_vma {
spinlock_t lock; /* Serialize access to vma list */
-#ifdef CONFIG_KSM
- atomic_t ksm_refcount;
-#endif
-#ifdef CONFIG_MIGRATION
- atomic_t migrate_refcount;
+#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
+
+ /*
+ * The refcount is taken by either KSM or page migration
+ * to take a reference to an anon_vma when there is no
+ * guarantee that the vma of page tables will exist for
+ * the duration of the operation. A caller that takes
+ * the reference is responsible for clearing up the
+ * anon_vma if they are the last user on release
+ */
+ atomic_t refcount;
#endif
/*
* NOTE: the LSB of the head.next is set by
@@ -44,46 +50,26 @@ struct anon_vma {
};
#ifdef CONFIG_MMU
-#ifdef CONFIG_KSM
-static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
+static inline void anonvma_refcount_init(struct anon_vma *anon_vma)
{
- atomic_set(&anon_vma->ksm_refcount, 0);
+ atomic_set(&anon_vma->refcount, 0);
}
-static inline int ksm_refcount(struct anon_vma *anon_vma)
+static inline int anonvma_refcount(struct anon_vma *anon_vma)
{
- return atomic_read(&anon_vma->ksm_refcount);
+ return atomic_read(&anon_vma->refcount);
}
#else
-static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+static inline void anonvma_refcount_init(struct anon_vma *anon_vma)
{
}
-static inline int ksm_refcount(struct anon_vma *anon_vma)
+static inline int anonvma_refcount(struct anon_vma *anon_vma)
{
return 0;
}
#endif /* CONFIG_KSM */
-#ifdef CONFIG_MIGRATION
-static inline void migrate_refcount_init(struct anon_vma *anon_vma)
-{
- atomic_set(&anon_vma->migrate_refcount, 0);
-}
-
-static inline int migrate_refcount(struct anon_vma *anon_vma)
-{
- return atomic_read(&anon_vma->migrate_refcount);
-}
-#else
-static inline void migrate_refcount_init(struct anon_vma *anon_vma)
-{
-}
-
-static inline int migrate_refcount(struct anon_vma *anon_vma)
-{
- return 0;
-}
-#endif /* CONFIG_MIGRATE */
static inline struct anon_vma *page_anon_vma(struct page *page)
{
diff --git a/mm/ksm.c b/mm/ksm.c
index 56a0da1..7decf73 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -318,14 +318,14 @@ static void hold_anon_vma(struct rmap_item *rmap_item,
struct anon_vma *anon_vma)
{
rmap_item->anon_vma = anon_vma;
- atomic_inc(&anon_vma->ksm_refcount);
+ atomic_inc(&anon_vma->refcount);
}
static void drop_anon_vma(struct rmap_item *rmap_item)
{
struct anon_vma *anon_vma = rmap_item->anon_vma;
- if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) {
+ if (atomic_dec_and_lock(&anon_vma->refcount, &anon_vma->lock)) {
int empty = list_empty(&anon_vma->head);
spin_unlock(&anon_vma->lock);
if (empty)
diff --git a/mm/migrate.c b/mm/migrate.c
index 1ce6a2f..00777b0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -619,7 +619,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
rcu_read_lock();
rcu_locked = 1;
anon_vma = page_anon_vma(page);
- atomic_inc(&anon_vma->migrate_refcount);
+ atomic_inc(&anon_vma->refcount);
}
/*
@@ -661,7 +661,7 @@ skip_unmap:
rcu_unlock:
/* Drop an anon_vma reference if we took one */
- if (anon_vma && atomic_dec_and_lock(&anon_vma->migrate_refcount, &anon_vma->lock)) {
+ if (anon_vma && atomic_dec_and_lock(&anon_vma->refcount, &anon_vma->lock)) {
int empty = list_empty(&anon_vma->head);
spin_unlock(&anon_vma->lock);
if (empty)
diff --git a/mm/rmap.c b/mm/rmap.c
index 11ba74a..96b5905 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -172,8 +172,7 @@ void anon_vma_unlink(struct vm_area_struct *vma)
list_del(&vma->anon_vma_node);
/* We must garbage collect the anon_vma if it's empty */
- empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma) &&
- !migrate_refcount(anon_vma);
+ empty = list_empty(&anon_vma->head) && !anonvma_refcount(anon_vma);
spin_unlock(&anon_vma->lock);
if (empty)
@@ -185,8 +184,7 @@ static void anon_vma_ctor(void *data)
struct anon_vma *anon_vma = data;
spin_lock_init(&anon_vma->lock);
- ksm_refcount_init(anon_vma);
- migrate_refcount_init(anon_vma);
+ anonvma_refcount_init(anon_vma);
INIT_LIST_HEAD(&anon_vma->head);
}
--
1.6.5
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists