[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20080819174132.3fb69e31.kamezawa.hiroyu@jp.fujitsu.com>
Date: Tue, 19 Aug 2008 17:41:32 +0900
From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
To: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
Cc: LKML <linux-kernel@...r.kernel.org>,
"balbir@...ux.vnet.ibm.com" <balbir@...ux.vnet.ibm.com>,
"yamamoto@...inux.co.jp" <yamamoto@...inux.co.jp>,
"nishimura@....nes.nec.co.jp" <nishimura@....nes.nec.co.jp>,
ryov@...inux.co.jp
Subject: [PATCH -mm][preview] memcg: a patch series for next [5/9]
Making freeing of page_cgroup to be rcu routine.
This patch avoid directly freeing per-cpu page_cgroup free and
pass freeq to RCU routine.
This patch is a base patch for removing lock_page_cgroup().
By this, page_cgroup object is valid while rcu_read_lock() is taken.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
---
mm/memcontrol.c | 39 +++++++++++++++++++++++++++++++++------
1 file changed, 33 insertions(+), 6 deletions(-)
Index: linux-2.6.27-rc1-mm1/mm/memcontrol.c
===================================================================
--- linux-2.6.27-rc1-mm1.orig/mm/memcontrol.c
+++ linux-2.6.27-rc1-mm1/mm/memcontrol.c
@@ -638,21 +638,25 @@ unsigned long mem_cgroup_isolate_pages(u
return nr_taken;
}
-void __mem_cgroup_drop_lru(void)
+struct memcg_rcu_work {
+ struct rcu_head head;
+ struct page_cgroup *list;
+};
+
+
+void __mem_cgroup_drop_lru(struct rcu_head *head)
{
struct mem_cgroup *memcg;
struct page_cgroup *pc, *next;
struct mem_cgroup_per_zone *mz, *page_mz;
- struct mem_cgroup_lazy_lru *mll;
unsigned long flags;
+ struct memcg_rcu_work *work;
- mll = &get_cpu_var(memcg_lazy_lru);
- next = mll->next;
- mll->next = NULL;
- mll->count = 0;
- put_cpu_var(memcg_lazy_lru);
+ work = container_of(head, struct memcg_rcu_work, head);
+ next = work->list;
mz = NULL;
+ kfree(work);
local_irq_save(flags);
while (next) {
@@ -678,6 +682,27 @@ void __mem_cgroup_drop_lru(void)
return;
}
+static int mem_cgroup_drop_lru_rcu(void)
+{
+ struct mem_cgroup_lazy_lru *mll;
+ struct memcg_rcu_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return 1;
+
+ INIT_RCU_HEAD(&work->head);
+
+ mll = &get_cpu_var(memcg_lazy_lru);
+ work->list = mll->next;
+ mll->next = NULL;
+ mll->count = 0;
+ put_cpu_var(memcg_lazy_lru);
+ call_rcu(&work->head, __mem_cgroup_drop_lru);
+
+ return 0;
+}
+
static void mem_cgroup_drop_lru(struct page_cgroup *pc)
{
int count;
@@ -690,14 +715,17 @@ static void mem_cgroup_drop_lru(struct p
put_cpu_var(memcg_lazy_lru);
if (count >= MEMCG_LRU_THRESH)
- __mem_cgroup_drop_lru();
+ mem_cgroup_drop_lru_rcu();
}
static DEFINE_MUTEX(memcg_force_drain_mutex);
static void mem_cgroup_local_force_drain(struct work_struct *work)
{
- __mem_cgroup_drop_lru();
+ int ret;
+ do {
+ ret = mem_cgroup_drop_lru_rcu();
+ } while (ret);
}
static void mem_cgroup_all_force_drain(struct mem_cgroup *memcg)
@@ -705,6 +733,7 @@ static void mem_cgroup_all_force_drain(s
mutex_lock(&memcg_force_drain_mutex);
schedule_on_each_cpu(mem_cgroup_local_force_drain);
mutex_unlock(&memcg_force_drain_mutex);
+ synchronize_rcu();
}
/*
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists