lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 7 Aug 2014 15:36:14 +0200
From:	Michal Hocko <mhocko@...e.cz>
To:	Johannes Weiner <hannes@...xchg.org>
Cc:	Andrew Morton <akpm@...ux-foundation.org>,
	Tejun Heo <tj@...nel.org>, linux-mm@...ck.org,
	cgroups@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [patch 2/4] mm: memcontrol: add memory.current and memory.high
 to default hierarchy

On Mon 04-08-14 17:14:55, Johannes Weiner wrote:
[...]
> @@ -132,6 +137,19 @@ u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
>  u64 res_counter_uncharge_until(struct res_counter *counter,
>  			       struct res_counter *top,
>  			       unsigned long val);
> +
> +static inline unsigned long long res_counter_high(struct res_counter *cnt)

soft limit used res_counter_soft_limit_excess which has quite a long
name but at least those two should be consistent.
I will post two helper patches which I have used to make this and other
operations on res counter easier as a reply to this.

> +{
> +	unsigned long long high = 0;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&cnt->lock, flags);
> +	if (cnt->usage > cnt->high)
> +		high = cnt->usage - cnt->high;
> +	spin_unlock_irqrestore(&cnt->lock, flags);
> +	return high;
> +}
> +
>  /**
>   * res_counter_margin - calculate chargeable space of a counter
>   * @cnt: the counter
> @@ -193,6 +211,17 @@ static inline void res_counter_reset_failcnt(struct res_counter *cnt)
>  	spin_unlock_irqrestore(&cnt->lock, flags);
>  }
>  
> +static inline int res_counter_set_high(struct res_counter *cnt,
> +				       unsigned long long high)
> +{
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&cnt->lock, flags);
> +	cnt->high = high;
> +	spin_unlock_irqrestore(&cnt->lock, flags);
> +	return 0;
> +}
> +
[...]
> @@ -2541,16 +2541,16 @@ retry:
>  		goto done;
>  
>  	size = batch * PAGE_SIZE;
> -	if (!res_counter_charge(&memcg->res, size, &fail_res)) {
> +	if (!res_counter_charge(&memcg->res, size, &res)) {
>  		if (!do_swap_account)
>  			goto done_restock;
> -		if (!res_counter_charge(&memcg->memsw, size, &fail_res))
> +		if (!res_counter_charge(&memcg->memsw, size, &res))
>  			goto done_restock;
>  		res_counter_uncharge(&memcg->res, size);
> -		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
> +		mem_over_limit = mem_cgroup_from_res_counter(res, memsw);
>  		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
>  	} else
> -		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
> +		mem_over_limit = mem_cgroup_from_res_counter(res, res);
>  
>  	if (batch > nr_pages) {
>  		batch = nr_pages;
> @@ -2621,6 +2621,20 @@ bypass:
>  done_restock:
>  	if (batch > nr_pages)
>  		refill_stock(memcg, batch - nr_pages);
> +
> +	res = &memcg->res;
> +	while (res) {
> +		unsigned long long high = res_counter_high(res);
> +
> +		if (high) {
> +			unsigned long high_pages = high >> PAGE_SHIFT;
> +			struct mem_cgroup *memcg;
> +
> +			memcg = mem_cgroup_from_res_counter(res, res);
> +			mem_cgroup_reclaim(memcg, high_pages, gfp_mask, 0);
> +		}
> +		res = res->parent;
> +	}
>  done:
>  	return ret;
>  }

Why haven't you followed what we do for hard limit here? In my
implementation I have the following:

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a37465fcd8ae..6a797c740ea5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2529,6 +2529,21 @@ static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
 	return NOTIFY_OK;
 }
 
+static bool high_limit_excess(struct mem_cgroup *memcg,
+		struct mem_cgroup **memcg_over_limit)
+{
+	struct mem_cgroup *parent = memcg;
+
+	do {
+		if (res_counter_limit_excess(&parent->res, RES_HIGH_LIMIT)) {
+			*memcg_over_limit = parent;
+			return true;
+		}
+	} while ((parent = parent_mem_cgroup(parent)));
+
+	return false;
+}
+
 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
 		      unsigned int nr_pages)
 {
@@ -2623,6 +2638,10 @@ bypass:
 	goto retry;
 
 done_restock:
+	/* Throttle charger a bit if it is above high limit. */
+	if (high_limit_excess(memcg, &mem_over_limit))
+		mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
+
 	if (batch > nr_pages)
 		refill_stock(memcg, batch - nr_pages);
 done:

-- 
Michal Hocko
SUSE Labs
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ