lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 3 Sep 2009 09:34:23 +0900 (JST)
From:	"KAMEZAWA Hiroyuki" <kamezawa.hiroyu@...fujitsu.com>
To:	"Balbir Singh" <balbir@...ux.vnet.ibm.com>
Cc:	"KAMEZAWA Hiroyuki" <kamezawa.hiroyu@...fujitsu.com>,
	"linux-mm@...ck.org" <linux-mm@...ck.org>,
	"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
	"nishimura@....nes.nec.co.jp" <nishimura@....nes.nec.co.jp>,
	"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>
Subject: Re: [mmotm][PATCH 2/2 v2] memcg: reduce calls for soft limit excess

Balbir Singh さんは書きました:
> On Wed, Sep 2, 2009 at 11:26 AM, KAMEZAWA
> Hiroyuki<kamezawa.hiroyu@...fujitsu.com> wrote:
>> In charge/uncharge/reclaim path, usage_in_excess is calculated
>> repeatedly and
>> it takes res_counter's spin_lock every time.
>>
>
> I think the changelog needs to mention some refactoring you've done
> below as well, like change new_charge_in_excess to excess.
>
will do when I sent out v3. (and I'll have to do, anyway.)

Bye,
-Kame
>
>
>> This patch removes unnecessary calls for res_count_soft_limit_excess.
>>
>> Changelog:
>> &#160;- fixed description.
>> &#160;- fixed unsigned long to be unsigned long long (Thanks, Nishimura)
>>
>> Reviewed-by: Daisuke Nishimura <nishimura@....nes.nec.co.jp>
>> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
>> ---
>> &#160;mm/memcontrol.c | &#160; 31 +++++++++++++++----------------
>> &#160;1 file changed, 15 insertions(+), 16 deletions(-)
>>
>> Index: mmotm-2.6.31-Aug27/mm/memcontrol.c
>> ===================================================================
>> --- mmotm-2.6.31-Aug27.orig/mm/memcontrol.c
>> +++ mmotm-2.6.31-Aug27/mm/memcontrol.c
>> @@ -313,7 +313,8 @@ soft_limit_tree_from_page(struct page *p
>> &#160;static void
>> &#160;__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160; &#160; &#160; &#160; &#160;struct mem_cgroup_per_zone
*mz,
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160; &#160; &#160; &#160; struct mem_cgroup_tree_per_zone
*mctz)
>> + &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160; &#160; &#160; &#160; struct mem_cgroup_tree_per_zone
*mctz,
>> + &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160; &#160; &#160; &#160; unsigned long long
new_usage_in_excess)
>> &#160;{
>> &#160; &#160; &#160; &#160;struct rb_node **p = &mctz->rb_root.rb_node;
>> &#160; &#160; &#160; &#160;struct rb_node *parent = NULL;
>> @@ -322,7 +323,9 @@ __mem_cgroup_insert_exceeded(struct mem_
>> &#160; &#160; &#160; &#160;if (mz->on_tree)
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;return;
>>
>> - &#160; &#160; &#160; mz->usage_in_excess =
res_counter_soft_limit_excess(&mem->res);
>> + &#160; &#160; &#160; mz->usage_in_excess = new_usage_in_excess;
>> + &#160; &#160; &#160; if (!mz->usage_in_excess)
>> + &#160; &#160; &#160; &#160; &#160; &#160; &#160; return;
>> &#160; &#160; &#160; &#160;while (*p) {
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;parent = *p;
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;mz_node =
rb_entry(parent, struct mem_cgroup_per_zone,
>> @@ -382,7 +385,7 @@ static bool mem_cgroup_soft_limit_check(
>>
>> &#160;static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct
page *page)
>> &#160;{
>> - &#160; &#160; &#160; unsigned long long new_usage_in_excess;
>> + &#160; &#160; &#160; unsigned long long excess;
>> &#160; &#160; &#160; &#160;struct mem_cgroup_per_zone *mz;
>> &#160; &#160; &#160; &#160;struct mem_cgroup_tree_per_zone *mctz;
>> &#160; &#160; &#160; &#160;int nid = page_to_nid(page);
>> @@ -395,25 +398,21 @@ static void mem_cgroup_update_tree(struc
>> &#160; &#160; &#160; &#160; */
>> &#160; &#160; &#160; &#160;for (; mem; mem = parent_mem_cgroup(mem)) {
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;mz =
mem_cgroup_zoneinfo(mem, nid, zid);
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; new_usage_in_excess =
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; res_counter_soft_limit_excess(&mem->res);
>> + &#160; &#160; &#160; &#160; &#160; &#160; &#160; excess =
res_counter_soft_limit_excess(&mem->res);
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;/*
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; * We have to
update the tree if mz is on RB-tree or
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; * mem is over
its softlimit.
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; */
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; if
(new_usage_in_excess || mz->on_tree) {
>> + &#160; &#160; &#160; &#160; &#160; &#160; &#160; if (excess ||
mz->on_tree) {
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160;spin_lock(&mctz->lock);
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160;/* if on-tree, remove it */
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160;if (mz->on_tree)
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160; &#160; &#160; &#160;
&#160;__mem_cgroup_remove_exceeded(mem, mz, mctz);
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160;/*
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160;* if over soft limit, insert again. mz->usage_in_excess
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160;* will be updated properly.
>> + &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160;* Insert again. mz->usage_in_excess will be updated.
>> + &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160;* If excess is 0, no tree ops.
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160; */
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; if (new_usage_in_excess)
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160; &#160; &#160; &#160; __mem_cgroup_insert_exceeded(mem,
mz, mctz);
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; else
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160; &#160; &#160; &#160; mz->usage_in_excess = 0;
>> + &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160;spin_unlock(&mctz->lock);
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;}
>> &#160; &#160; &#160; &#160;}
>> @@ -2216,6 +2215,7 @@ unsigned long mem_cgroup_soft_limit_recl
>> &#160; &#160; &#160; &#160;unsigned long reclaimed;
>> &#160; &#160; &#160; &#160;int loop = 0;
>> &#160; &#160; &#160; &#160;struct mem_cgroup_tree_per_zone *mctz;
>> + &#160; &#160; &#160; unsigned long long excess;
>>
>> &#160; &#160; &#160; &#160;if (order > 0)
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;return 0;
>> @@ -2260,9 +2260,8 @@ unsigned long mem_cgroup_soft_limit_recl
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160; &#160; &#160; &#160;
&#160;__mem_cgroup_largest_soft_limit_node(mctz);
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; &#160;} while (next_mz == mz);
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;}
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; mz->usage_in_excess =
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; res_counter_soft_limit_excess(&mz->mem->res);
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160;__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
>> + &#160; &#160; &#160; &#160; &#160; &#160; &#160; excess =
res_counter_soft_limit_excess(&mz->mem->res);
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;/*
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; * One school of
thought says that we should not add
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; * back the node
to the tree if reclaim returns 0.
>> @@ -2271,8 +2270,8 @@ unsigned long mem_cgroup_soft_limit_recl
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; * memory to
reclaim from. Consider this as a longer
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; * term TODO.
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; */
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; if
(mz->usage_in_excess)
>> - &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160; __mem_cgroup_insert_exceeded(mz->mem, mz, mctz);
>> + &#160; &#160; &#160; &#160; &#160; &#160; &#160; /* If excess == 0,
no tree ops */
>> + &#160; &#160; &#160; &#160; &#160; &#160; &#160;
__mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160;spin_unlock(&mctz->lock);
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160;
&#160;css_put(&mz->mem->css);
>> &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;loop++;
>
> OK.. so everytime we call __mem_cgroup_insert_exceeded we save one
> res_counter operation.
>
> Looks good
>
> Acked-by: Balbir Singh <balbir@...ux.vnet.ibm.com>
>
> Balbir Singh
>


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ