lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 15 Oct 2014 17:28:30 +0200
From:	Michal Hocko <mhocko@...e.cz>
To:	Johannes Weiner <hannes@...xchg.org>
Cc:	Andrew Morton <akpm@...ux-foundation.org>,
	Vladimir Davydov <vdavydov@...allels.com>, linux-mm@...ck.org,
	cgroups@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [patch 5/5] mm: memcontrol: remove synchroneous stock draining
 code

On Tue 14-10-14 12:20:37, Johannes Weiner wrote:
> With charge reparenting, the last synchroneous stock drainer left.
> 
> Signed-off-by: Johannes Weiner <hannes@...xchg.org>

Acked-by: Michal Hocko <mhocko@...e.cz>

> ---
>  mm/memcontrol.c | 46 ++++++----------------------------------------
>  1 file changed, 6 insertions(+), 40 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index ce3ed7cc5c30..ac7d6cefcc63 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -634,8 +634,6 @@ static void disarm_static_keys(struct mem_cgroup *memcg)
>  	disarm_kmem_keys(memcg);
>  }
>  
> -static void drain_all_stock_async(struct mem_cgroup *memcg);
> -
>  static struct mem_cgroup_per_zone *
>  mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
>  {
> @@ -2285,13 +2283,15 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>  
>  /*
>   * Drains all per-CPU charge caches for given root_memcg resp. subtree
> - * of the hierarchy under it. sync flag says whether we should block
> - * until the work is done.
> + * of the hierarchy under it.
>   */
> -static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
> +static void drain_all_stock(struct mem_cgroup *root_memcg)
>  {
>  	int cpu, curcpu;
>  
> +	/* If someone's already draining, avoid adding running more workers. */
> +	if (!mutex_trylock(&percpu_charge_mutex))
> +		return;
>  	/* Notify other cpus that system-wide "drain" is running */
>  	get_online_cpus();
>  	curcpu = get_cpu();
> @@ -2312,41 +2312,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
>  		}
>  	}
>  	put_cpu();
> -
> -	if (!sync)
> -		goto out;
> -
> -	for_each_online_cpu(cpu) {
> -		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
> -		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
> -			flush_work(&stock->work);
> -	}
> -out:
>  	put_online_cpus();
> -}
> -
> -/*
> - * Tries to drain stocked charges in other cpus. This function is asynchronous
> - * and just put a work per cpu for draining localy on each cpu. Caller can
> - * expects some charges will be back later but cannot wait for it.
> - */
> -static void drain_all_stock_async(struct mem_cgroup *root_memcg)
> -{
> -	/*
> -	 * If someone calls draining, avoid adding more kworker runs.
> -	 */
> -	if (!mutex_trylock(&percpu_charge_mutex))
> -		return;
> -	drain_all_stock(root_memcg, false);
> -	mutex_unlock(&percpu_charge_mutex);
> -}
> -
> -/* This is a synchronous drain interface. */
> -static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
> -{
> -	/* called when force_empty is called */
> -	mutex_lock(&percpu_charge_mutex);
> -	drain_all_stock(root_memcg, true);
>  	mutex_unlock(&percpu_charge_mutex);
>  }
>  
> @@ -2455,7 +2421,7 @@ retry:
>  		goto retry;
>  
>  	if (!drained) {
> -		drain_all_stock_async(mem_over_limit);
> +		drain_all_stock(mem_over_limit);
>  		drained = true;
>  		goto retry;
>  	}
> -- 
> 2.1.2
> 

-- 
Michal Hocko
SUSE Labs
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ