[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20100727170225.64f78b15.kamezawa.hiroyu@jp.fujitsu.com>
Date: Tue, 27 Jul 2010 17:02:25 +0900
From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
To: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
Cc: "linux-mm@...ck.org" <linux-mm@...ck.org>,
"nishimura@....nes.nec.co.jp" <nishimura@....nes.nec.co.jp>,
"balbir@...ux.vnet.ibm.com" <balbir@...ux.vnet.ibm.com>,
gthelen@...gle.com, m-ikeda@...jp.nec.com,
"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: [RFC][PATCH 7/7][memcg] use spin lock instead of bit_spin_lock in
page_cgroup
From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
This patch replaces page_cgroup's bit_spinlock with spinlock. In general,
spinlock has good implementation than bit_spin_lock and we should use
it if we have a room for it. In 64bit arch, we have extra 4bytes.
Let's use it.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
--
Index: mmotm-0719/include/linux/page_cgroup.h
===================================================================
--- mmotm-0719.orig/include/linux/page_cgroup.h
+++ mmotm-0719/include/linux/page_cgroup.h
@@ -10,8 +10,14 @@
* All page cgroups are allocated at boot or memory hotplug event,
* then the page cgroup for pfn always exists.
*/
+#ifdef CONFIG_64BIT
+#define PCG_HAS_SPINLOCK
+#endif
struct page_cgroup {
unsigned long flags;
+#ifdef PCG_HAS_SPINLOCK
+ spinlock_t lock;
+#endif
unsigned short mem_cgroup; /* ID of assigned memory cgroup */
unsigned short blk_cgroup; /* Not Used..but will be. */
struct page *page;
@@ -90,6 +96,16 @@ static inline enum zone_type page_cgroup
return page_zonenum(pc->page);
}
+#ifdef PCG_HAS_SPINLOCK
+static inline void lock_page_cgroup(struct page_cgroup *pc)
+{
+ spin_lock(&pc->lock);
+}
+static inline void unlock_page_cgroup(struct page_cgroup *pc)
+{
+ spin_unlock(&pc->lock);
+}
+#else
static inline void lock_page_cgroup(struct page_cgroup *pc)
{
bit_spin_lock(PCG_LOCK, &pc->flags);
@@ -99,6 +115,7 @@ static inline void unlock_page_cgroup(st
{
bit_spin_unlock(PCG_LOCK, &pc->flags);
}
+#endif
static inline void SetPCGFileFlag(struct page_cgroup *pc, int idx)
{
Index: mmotm-0719/mm/page_cgroup.c
===================================================================
--- mmotm-0719.orig/mm/page_cgroup.c
+++ mmotm-0719/mm/page_cgroup.c
@@ -17,6 +17,9 @@ __init_page_cgroup(struct page_cgroup *p
pc->mem_cgroup = 0;
pc->page = pfn_to_page(pfn);
INIT_LIST_HEAD(&pc->lru);
+#ifdef PCG_HAS_SPINLOCK
+ spin_lock_init(&pc->lock);
+#endif
}
static unsigned long total_usage;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists