lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20090406094351.GI7082@balbir.in.ibm.com>
Date:	Mon, 6 Apr 2009 15:13:51 +0530
From:	Balbir Singh <balbir@...ux.vnet.ibm.com>
To:	KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
Cc:	"linux-mm@...ck.org" <linux-mm@...ck.org>,
	"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
	"kosaki.motohiro@...fujitsu.com" <kosaki.motohiro@...fujitsu.com>
Subject: Re: [RFC][PATCH 3/9] soft limit update filter

* KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com> [2009-04-03 17:12:02]:

> No changes from v1.
> ==
> From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
> 
> Check/Update softlimit information at every charge is over-killing, so
> we need some filter.
> 
> This patch tries to count events in the memcg and if events > threshold
> tries to update memcg's soft limit status and reset event counter to 0.
> 
> Event counter is maintained by per-cpu which has been already used,
> Then, no siginificant overhead(extra cache-miss etc..) in theory.
> 
> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
> ---
> Index: mmotm-2.6.29-Mar23/mm/memcontrol.c
> ===================================================================
> --- mmotm-2.6.29-Mar23.orig/mm/memcontrol.c
> +++ mmotm-2.6.29-Mar23/mm/memcontrol.c
> @@ -66,6 +66,7 @@ enum mem_cgroup_stat_index {
>  	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */
>  	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */
> 
> +	MEM_CGROUP_STAT_EVENTS,  /* sum of page-in/page-out for internal use */
>  	MEM_CGROUP_STAT_NSTATS,
>  };
> 
> @@ -105,6 +106,22 @@ static s64 mem_cgroup_local_usage(struct
>  	return ret;
>  }
> 
> +/* For intenal use of per-cpu event counting. */
> +
> +static inline void
> +__mem_cgroup_stat_reset_safe(struct mem_cgroup_stat_cpu *stat,
> +		enum mem_cgroup_stat_index idx)
> +{
> +	stat->count[idx] = 0;
> +}

Why do we do this and why do we need a special event?

> +
> +static inline s64
> +__mem_cgroup_stat_read_local(struct mem_cgroup_stat_cpu *stat,
> +			    enum mem_cgroup_stat_index idx)
> +{
> +	return stat->count[idx];
> +}
> +
>  /*
>   * per-zone information in memory controller.
>   */
> @@ -235,6 +252,8 @@ static void mem_cgroup_charge_statistics
>  	else
>  		__mem_cgroup_stat_add_safe(cpustat,
>  				MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
> +	__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_EVENTS, 1);
> +
>  	put_cpu();
>  }
> 
> @@ -897,9 +916,26 @@ static void record_last_oom(struct mem_c
>  	mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
>  }
> 
> +#define SOFTLIMIT_EVENTS_THRESH (1024) /* 1024 times of page-in/out */
> +/*
> + * Returns true if sum of page-in/page-out events since last check is
> + * over SOFTLIMIT_EVENT_THRESH. (counter is per-cpu.)
> + */
>  static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
>  {
> -	return false;
> +	bool ret = false;
> +	int cpu = get_cpu();
> +	s64 val;
> +	struct mem_cgroup_stat_cpu *cpustat;
> +
> +	cpustat = &mem->stat.cpustat[cpu];
> +	val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_EVENTS);
> +	if (unlikely(val > SOFTLIMIT_EVENTS_THRESH)) {
> +		__mem_cgroup_stat_reset_safe(cpustat, MEM_CGROUP_STAT_EVENTS);
> +		ret = true;
> +	}
> +	put_cpu();
> +	return ret;
>  }
>

It is good to have the caller and the function in the same patch.
Otherwise, you'll notice unused warnings. I think this function can be
simplified further

1. Lets gid rid of MEM_CGRUP_STAT_EVENTS
2. Lets rewrite mem_cgroup_soft_limit_check as

static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
{
     bool ret = false;
     int cpu = get_cpu();
     s64 pgin, pgout;
     struct mem_cgroup_stat_cpu *cpustat;

     cpustat = &mem->stat.cpustat[cpu];
     pgin = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_PGPGIN_COUNT);
     pgout = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
     val = pgin + pgout - mem->last_event_count;
     if (unlikely(val > SOFTLIMIT_EVENTS_THRESH)) {
             mem->last_event_count = pgin + pgout;
             ret = true;
     }
     put_cpu();
     return ret;
}

mem->last_event_count can either be atomic or protected using one of
the locks you intend to introduce. This will avoid the overhead of
incrementing event at every charge_statistics.


 
>  static void mem_cgroup_update_soft_limit(struct mem_cgroup *mem)
> 
> 

-- 
	Balbir
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ