[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20080820190324.f723d222.kamezawa.hiroyu@jp.fujitsu.com>
Date: Wed, 20 Aug 2008 19:03:24 +0900
From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
To: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
Cc: LKML <linux-kernel@...r.kernel.org>,
"balbir@...ux.vnet.ibm.com" <balbir@...ux.vnet.ibm.com>,
"yamamoto@...inux.co.jp" <yamamoto@...inux.co.jp>,
"nishimura@....nes.nec.co.jp" <nishimura@....nes.nec.co.jp>,
ryov@...inux.co.jp, "linux-mm@...ck.org" <linux-mm@...ck.org>
Subject: [RFC][PATCH -mm 3/7] memcg: freeing page_cgroup by rcu.patch
By delayed_batch_freeing_of_page_cgroup.patch, page_cgroup can be
freed lazily. After this patch, page_cgroup is freed by RCU and
page_cgroup is RCU safe. This is necessary for lockless page_cgroup patch
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
---
mm/memcontrol.c | 44 ++++++++++++++++++++++++++++++++++++--------
1 file changed, 36 insertions(+), 8 deletions(-)
Index: mmtom-2.6.27-rc3+/mm/memcontrol.c
===================================================================
--- mmtom-2.6.27-rc3+.orig/mm/memcontrol.c
+++ mmtom-2.6.27-rc3+/mm/memcontrol.c
@@ -577,19 +577,23 @@ unsigned long mem_cgroup_isolate_pages(u
* Free obsolete page_cgroups which is linked to per-cpu drop list.
*/
-static void __free_obsolete_page_cgroup(void)
+struct page_cgroup_rcu_work {
+ struct rcu_head head;
+ struct page_cgroup *list;
+};
+
+static void __free_obsolete_page_cgroup_cb(struct rcu_head *head)
{
struct mem_cgroup *memcg;
struct page_cgroup *pc, *next;
struct mem_cgroup_per_zone *mz, *page_mz;
- struct mem_cgroup_sink_list *mcsl;
+ struct page_cgroup_rcu_work *work;
unsigned long flags;
- mcsl = &get_cpu_var(memcg_sink_list);
- next = mcsl->next;
- mcsl->next = NULL;
- mcsl->count = 0;
- put_cpu_var(memcg_sink_list);
+
+ work = container_of(head, struct page_cgroup_rcu_work, head);
+ next = work->list;
+ kfree(work);
mz = NULL;
@@ -616,6 +620,26 @@ static void __free_obsolete_page_cgroup(
local_irq_restore(flags);
}
+static int __free_obsolete_page_cgroup(void)
+{
+ struct page_cgroup_rcu_work *work;
+ struct mem_cgroup_sink_list *mcsl;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
+ INIT_RCU_HEAD(&work->head);
+
+ mcsl = &get_cpu_var(memcg_sink_list);
+ work->list = mcsl->next;
+ mcsl->next = NULL;
+ mcsl->count = 0;
+ put_cpu_var(memcg_sink_list);
+
+ call_rcu(&work->head, __free_obsolete_page_cgroup_cb);
+ return 0;
+}
+
static void free_obsolete_page_cgroup(struct page_cgroup *pc)
{
int count;
@@ -638,13 +662,17 @@ static DEFINE_MUTEX(memcg_force_drain_mu
static void mem_cgroup_local_force_drain(struct work_struct *work)
{
- __free_obsolete_page_cgroup();
+ int ret;
+ do {
+ ret = __free_obsolete_page_cgroup();
+ } while (ret);
}
static void mem_cgroup_all_force_drain(void)
{
mutex_lock(&memcg_force_drain_mutex);
schedule_on_each_cpu(mem_cgroup_local_force_drain);
+ synchronize_rcu();
mutex_unlock(&memcg_force_drain_mutex);
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists