[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ff8069241ae388c64cbb2d7d8d51fe4e.squirrel@webmail-b.css.fujitsu.com>
Date: Thu, 3 Sep 2009 09:34:23 +0900 (JST)
From: "KAMEZAWA Hiroyuki" <kamezawa.hiroyu@...fujitsu.com>
To: "Balbir Singh" <balbir@...ux.vnet.ibm.com>
Cc: "KAMEZAWA Hiroyuki" <kamezawa.hiroyu@...fujitsu.com>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"nishimura@....nes.nec.co.jp" <nishimura@....nes.nec.co.jp>,
"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>
Subject: Re: [mmotm][PATCH 2/2 v2] memcg: reduce calls for soft limit excess
Balbir Singh さんは書きました:
> On Wed, Sep 2, 2009 at 11:26 AM, KAMEZAWA
> Hiroyuki<kamezawa.hiroyu@...fujitsu.com> wrote:
>> In charge/uncharge/reclaim path, usage_in_excess is calculated
>> repeatedly and
>> it takes res_counter's spin_lock every time.
>>
>
> I think the changelog needs to mention some refactoring you've done
> below as well, like change new_charge_in_excess to excess.
>
will do when I sent out v3. (and I'll have to do, anyway.)
Bye,
-Kame
>
>
>> This patch removes unnecessary calls for res_count_soft_limit_excess.
>>
>> Changelog:
>>  - fixed description.
>>  - fixed unsigned long to be unsigned long long (Thanks, Nishimura)
>>
>> Reviewed-by: Daisuke Nishimura <nishimura@....nes.nec.co.jp>
>> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
>> ---
>>  mm/memcontrol.c |   31 +++++++++++++++----------------
>>  1 file changed, 15 insertions(+), 16 deletions(-)
>>
>> Index: mmotm-2.6.31-Aug27/mm/memcontrol.c
>> ===================================================================
>> --- mmotm-2.6.31-Aug27.orig/mm/memcontrol.c
>> +++ mmotm-2.6.31-Aug27/mm/memcontrol.c
>> @@ -313,7 +313,8 @@ soft_limit_tree_from_page(struct page *p
>>  static void
>>  __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
>>                    
           struct mem_cgroup_per_zone
*mz,
>> -                    
          struct mem_cgroup_tree_per_zone
*mctz)
>> +                    
          struct mem_cgroup_tree_per_zone
*mctz,
>> +                    
          unsigned long long
new_usage_in_excess)
>>  {
>>        struct rb_node **p = &mctz->rb_root.rb_node;
>>        struct rb_node *parent = NULL;
>> @@ -322,7 +323,9 @@ __mem_cgroup_insert_exceeded(struct mem_
>>        if (mz->on_tree)
>>                return;
>>
>> -       mz->usage_in_excess =
res_counter_soft_limit_excess(&mem->res);
>> +       mz->usage_in_excess = new_usage_in_excess;
>> +       if (!mz->usage_in_excess)
>> +               return;
>>        while (*p) {
>>                parent = *p;
>>                mz_node =
rb_entry(parent, struct mem_cgroup_per_zone,
>> @@ -382,7 +385,7 @@ static bool mem_cgroup_soft_limit_check(
>>
>>  static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct
page *page)
>>  {
>> -       unsigned long long new_usage_in_excess;
>> +       unsigned long long excess;
>>        struct mem_cgroup_per_zone *mz;
>>        struct mem_cgroup_tree_per_zone *mctz;
>>        int nid = page_to_nid(page);
>> @@ -395,25 +398,21 @@ static void mem_cgroup_update_tree(struc
>>         */
>>        for (; mem; mem = parent_mem_cgroup(mem)) {
>>                mz =
mem_cgroup_zoneinfo(mem, nid, zid);
>> -               new_usage_in_excess =
>> -                    
  res_counter_soft_limit_excess(&mem->res);
>> +               excess =
res_counter_soft_limit_excess(&mem->res);
>>                /*
>>                 * We have to
update the tree if mz is on RB-tree or
>>                 * mem is over
its softlimit.
>>                 */
>> -               if
(new_usage_in_excess || mz->on_tree) {
>> +               if (excess ||
mz->on_tree) {
>>                    
   spin_lock(&mctz->lock);
>>                    
   /* if on-tree, remove it */
>>                    
   if (mz->on_tree)
>>                    
         
 __mem_cgroup_remove_exceeded(mem, mz, mctz);
>>                    
   /*
>> -                    
   * if over soft limit, insert again. mz->usage_in_excess
>> -                    
   * will be updated properly.
>> +                    
   * Insert again. mz->usage_in_excess will be updated.
>> +                    
   * If excess is 0, no tree ops.
>>                    
    */
>> -                    
  if (new_usage_in_excess)
>> -                    
          __mem_cgroup_insert_exceeded(mem,
mz, mctz);
>> -                    
  else
>> -                    
          mz->usage_in_excess = 0;
>> +                    
  __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
>>                    
   spin_unlock(&mctz->lock);
>>                }
>>        }
>> @@ -2216,6 +2215,7 @@ unsigned long mem_cgroup_soft_limit_recl
>>        unsigned long reclaimed;
>>        int loop = 0;
>>        struct mem_cgroup_tree_per_zone *mctz;
>> +       unsigned long long excess;
>>
>>        if (order > 0)
>>                return 0;
>> @@ -2260,9 +2260,8 @@ unsigned long mem_cgroup_soft_limit_recl
>>                    
         
 __mem_cgroup_largest_soft_limit_node(mctz);
>>                    
   } while (next_mz == mz);
>>                }
>> -               mz->usage_in_excess =
>> -                    
  res_counter_soft_limit_excess(&mz->mem->res);
>>              
 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
>> +               excess =
res_counter_soft_limit_excess(&mz->mem->res);
>>                /*
>>                 * One school of
thought says that we should not add
>>                 * back the node
to the tree if reclaim returns 0.
>> @@ -2271,8 +2270,8 @@ unsigned long mem_cgroup_soft_limit_recl
>>                 * memory to
reclaim from. Consider this as a longer
>>                 * term TODO.
>>                 */
>> -               if
(mz->usage_in_excess)
>> -                    
  __mem_cgroup_insert_exceeded(mz->mem, mz, mctz);
>> +               /* If excess == 0,
no tree ops */
>> +              
__mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
>>              
 spin_unlock(&mctz->lock);
>>              
 css_put(&mz->mem->css);
>>                loop++;
>
> OK.. so everytime we call __mem_cgroup_insert_exceeded we save one
> res_counter operation.
>
> Looks good
>
> Acked-by: Balbir Singh <balbir@...ux.vnet.ibm.com>
>
> Balbir Singh
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists