[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20100311133123.ab10183c.nishimura@mxp.nes.nec.co.jp>
Date: Thu, 11 Mar 2010 13:31:23 +0900
From: Daisuke Nishimura <nishimura@....nes.nec.co.jp>
To: balbir@...ux.vnet.ibm.com
Cc: linux-mm@...ck.org, Andrea Righi <arighi@...eler.com>,
linux-kernel@...r.kernel.org,
Trond Myklebust <trond.myklebust@....uio.no>,
Suleiman Souhlal <suleiman@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
containers@...ts.linux-foundation.org,
Vivek Goyal <vgoyal@...hat.com>,
Daisuke Nishimura <nishimura@....nes.nec.co.jp>
Subject: Re: [PATCH mmotm 2.5/4] memcg: disable irq at page cgroup lock (Re:
[PATCH -mmotm 3/4] memcg: dirty pages accounting and limiting
infrastructure)
On Wed, 10 Mar 2010 09:26:24 +0530, Balbir Singh <balbir@...ux.vnet.ibm.com> wrote:
> * nishimura@....nes.nec.co.jp <nishimura@....nes.nec.co.jp> [2010-03-10 10:43:09]:
>
> > > Please please measure the performance overhead of this change.
> > >
> >
> > here.
> >
> > > > > > > > I made a patch below and measured the time(average of 10 times) of kernel build
> > > > > > > > on tmpfs(make -j8 on 8 CPU machine with 2.6.33 defconfig).
> > > > > > > >
> > > > > > > > <before>
> > > > > > > > - root cgroup: 190.47 sec
> > > > > > > > - child cgroup: 192.81 sec
> > > > > > > >
> > > > > > > > <after>
> > > > > > > > - root cgroup: 191.06 sec
> > > > > > > > - child cgroup: 193.06 sec
> > > > > > > >
> >
> > <after2(local_irq_save/restore)>
> > - root cgroup: 191.42 sec
> > - child cgroup: 193.55 sec
> >
> > hmm, I think it's in error range, but I can see a tendency by testing several times
> > that it's getting slower as I add additional codes. Using local_irq_disable()/enable()
> > except in mem_cgroup_update_file_mapped(it can be the only candidate to be called
> > with irq disabled in future) might be the choice.
> >
>
> Error range would depend on things like standard deviation and
> repetition. It might be good to keep update_file_mapped and see the
> impact. My concern is with large systems, the difference might be
> larger.
>
> --
> Three Cheers,
> Balbir
I made a patch(attached) using both local_irq_disable/enable and local_irq_save/restore.
local_irq_save/restore is used only in mem_cgroup_update_file_mapped.
And I attached a histogram graph of 30 times kernel build in root cgroup for each.
before_root: no irq operation(original)
after_root: local_irq_disable/enable for all
after2_root: local_irq_save/restore for all
after3_root: mixed version(attached)
hmm, there seems to be a tendency that before < after < after3 < after2 ?
Should I replace save/restore version to mixed version ?
Thanks,
Daisuke Nishimura.
===
include/linux/page_cgroup.h | 28 ++++++++++++++++++++++++++--
mm/memcontrol.c | 36 ++++++++++++++++++------------------
2 files changed, 44 insertions(+), 20 deletions(-)
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 30b0813..c0aca62 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -83,16 +83,40 @@ static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
return page_zonenum(pc->page);
}
-static inline void lock_page_cgroup(struct page_cgroup *pc)
+static inline void __lock_page_cgroup(struct page_cgroup *pc)
{
bit_spin_lock(PCG_LOCK, &pc->flags);
}
-static inline void unlock_page_cgroup(struct page_cgroup *pc)
+static inline void __unlock_page_cgroup(struct page_cgroup *pc)
{
bit_spin_unlock(PCG_LOCK, &pc->flags);
}
+#define lock_page_cgroup_irq(pc) \
+ do { \
+ local_irq_disable(); \
+ __lock_page_cgroup(pc); \
+ } while (0)
+
+#define unlock_page_cgroup_irq(pc) \
+ do { \
+ __unlock_page_cgroup(pc); \
+ local_irq_enable(); \
+ } while (0)
+
+#define lock_page_cgroup_irqsave(pc, flags) \
+ do { \
+ local_irq_save(flags); \
+ __lock_page_cgroup(pc); \
+ } while (0)
+
+#define unlock_page_cgroup_irqrestore(pc, flags) \
+ do { \
+ __unlock_page_cgroup(pc); \
+ local_irq_restore(flags); \
+ } while (0)
+
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
struct page_cgroup;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 02ea959..11d483e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1354,12 +1354,13 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
{
struct mem_cgroup *mem;
struct page_cgroup *pc;
+ unsigned long flags;
pc = lookup_page_cgroup(page);
if (unlikely(!pc))
return;
- lock_page_cgroup(pc);
+ lock_page_cgroup_irqsave(pc, flags);
mem = pc->mem_cgroup;
if (!mem)
goto done;
@@ -1373,7 +1374,7 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], val);
done:
- unlock_page_cgroup(pc);
+ unlock_page_cgroup_irqrestore(pc, flags);
}
/*
@@ -1711,7 +1712,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
VM_BUG_ON(!PageLocked(page));
pc = lookup_page_cgroup(page);
- lock_page_cgroup(pc);
+ lock_page_cgroup_irq(pc);
if (PageCgroupUsed(pc)) {
mem = pc->mem_cgroup;
if (mem && !css_tryget(&mem->css))
@@ -1725,7 +1726,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
mem = NULL;
rcu_read_unlock();
}
- unlock_page_cgroup(pc);
+ unlock_page_cgroup_irq(pc);
return mem;
}
@@ -1742,9 +1743,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
if (!mem)
return;
- lock_page_cgroup(pc);
+ lock_page_cgroup_irq(pc);
if (unlikely(PageCgroupUsed(pc))) {
- unlock_page_cgroup(pc);
+ unlock_page_cgroup_irq(pc);
mem_cgroup_cancel_charge(mem);
return;
}
@@ -1774,7 +1775,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
mem_cgroup_charge_statistics(mem, pc, true);
- unlock_page_cgroup(pc);
+ unlock_page_cgroup_irq(pc);
/*
* "charge_statistics" updated event counter. Then, check it.
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
@@ -1844,12 +1845,12 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
{
int ret = -EINVAL;
- lock_page_cgroup(pc);
+ lock_page_cgroup_irq(pc);
if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
__mem_cgroup_move_account(pc, from, to, uncharge);
ret = 0;
}
- unlock_page_cgroup(pc);
+ unlock_page_cgroup_irq(pc);
/*
* check events
*/
@@ -1977,16 +1978,15 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
if (!(gfp_mask & __GFP_WAIT)) {
struct page_cgroup *pc;
-
pc = lookup_page_cgroup(page);
if (!pc)
return 0;
- lock_page_cgroup(pc);
+ lock_page_cgroup_irq(pc);
if (PageCgroupUsed(pc)) {
- unlock_page_cgroup(pc);
+ unlock_page_cgroup_irq(pc);
return 0;
}
- unlock_page_cgroup(pc);
+ unlock_page_cgroup_irq(pc);
}
if (unlikely(!mm && !mem))
@@ -2182,7 +2182,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
if (unlikely(!pc || !PageCgroupUsed(pc)))
return NULL;
- lock_page_cgroup(pc);
+ lock_page_cgroup_irq(pc);
mem = pc->mem_cgroup;
@@ -2221,7 +2221,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
*/
mz = page_cgroup_zoneinfo(pc);
- unlock_page_cgroup(pc);
+ unlock_page_cgroup_irq(pc);
memcg_check_events(mem, page);
/* at swapout, this memcg will be accessed to record to swap */
@@ -2231,7 +2231,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
return mem;
unlock_out:
- unlock_page_cgroup(pc);
+ unlock_page_cgroup_irq(pc);
return NULL;
}
@@ -2424,12 +2424,12 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
return 0;
pc = lookup_page_cgroup(page);
- lock_page_cgroup(pc);
+ lock_page_cgroup_irq(pc);
if (PageCgroupUsed(pc)) {
mem = pc->mem_cgroup;
css_get(&mem->css);
}
- unlock_page_cgroup(pc);
+ unlock_page_cgroup_irq(pc);
if (mem) {
ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
Download attachment "root_cgroup.bmp.gz" of type "application/octet-stream" (9426 bytes)
Powered by blists - more mailing lists