lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:	Mon, 12 Aug 2013 16:07:22 +1000
From:	Stephen Rothwell <sfr@...b.auug.org.au>
To:	Andrew Morton <akpm@...ux-foundation.org>
Cc:	linux-next@...r.kernel.org, linux-kernel@...r.kernel.org,
	Tejun Heo <tj@...nel.org>, Michal Hocko <mhocko@...e.cz>,
	Li Zefan <lizefan@...wei.com>
Subject: linux-next: manual merge of the akpm-current tree with the cgroup
 tree

Hi Andrew,

Today's linux-next merge of the akpm-current tree got a conflict in
mm/memcontrol.c between commits from the cgroup tree and commits from the
akpm-current tree.

I fixed it up (using Michal's email as a guide - see below) and can carry
the fix as necessary (no action is required).

-- 
Cheers,
Stephen Rothwell                    sfr@...b.auug.org.au

diff --cc mm/memcontrol.c
index 57f6d67,6f292b8..0000000
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@@ -1075,13 -966,30 +959,13 @@@ mem_cgroup_filter(struct mem_cgroup *me
   * helper function to be used by mem_cgroup_iter
   */
  static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
- 		struct mem_cgroup *last_visited)
+ 		struct mem_cgroup *last_visited, mem_cgroup_iter_filter cond)
  {
 -	struct cgroup *prev_cgroup, *next_cgroup;
 -
 -	/*
 -	 * Root is not visited by cgroup iterators so it needs an
 -	 * explicit visit.
 -	 */
 -	if (!last_visited) {
 -		switch (mem_cgroup_filter(root, root, cond)) {
 -		case VISIT:
 -			return root;
 -		case SKIP:
 -			break;
 -		case SKIP_TREE:
 -			return NULL;
 -		}
 -	}
 +	struct cgroup_subsys_state *prev_css, *next_css;
  
 -	prev_cgroup = (last_visited == root || !last_visited) ? NULL
 -		: last_visited->css.cgroup;
 +	prev_css = last_visited ? &last_visited->css : NULL;
  skip_node:
 -	next_cgroup = cgroup_next_descendant_pre(
 -			prev_cgroup, root->css.cgroup);
 +	next_css = css_next_descendant_pre(prev_css, &root->css);
  
  	/*
  	 * Even if we found a group we have to make sure it is
@@@ -1090,14 -998,33 +974,34 @@@
  	 * last_visited css is safe to use because it is
  	 * protected by css_get and the tree walk is rcu safe.
  	 */
 -	if (next_cgroup) {
 -		struct mem_cgroup *mem = mem_cgroup_from_cont(
 -				next_cgroup);
 +	if (next_css) {
 +		struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
  
- 		if (css_tryget(&mem->css))
- 			return mem;
- 		else {
- 			prev_css = next_css;
+ 		switch (mem_cgroup_filter(mem, root, cond)) {
+ 		case SKIP:
+ 			prev_cgroup = next_cgroup;
  			goto skip_node;
+ 		case SKIP_TREE:
++			if (mem == root)
++				return NULL;
+ 			/*
+ 			 * cgroup_rightmost_descendant is not an optimal way to
+ 			 * skip through a subtree (especially for imbalanced
+ 			 * trees leaning to right) but that's what we have right
+ 			 * now. More effective solution would be traversing
+ 			 * right-up for first non-NULL without calling
+ 			 * cgroup_next_descendant_pre afterwards.
+ 			 */
 -			prev_cgroup = cgroup_rightmost_descendant(next_cgroup);
++			prev_css = css_rightmost_descendant(next_css);
+ 			goto skip_node;
+ 		case VISIT:
+ 			if (css_tryget(&mem->css))
+ 				return mem;
+ 			else {
+ 				prev_cgroup = next_cgroup;
+ 				goto skip_node;
+ 			}
+ 			break;
  		}
  	}
  
@@@ -4983,11 -4841,9 +4815,10 @@@ static int mem_cgroup_force_empty(struc
  	return 0;
  }
  
 -static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
 +static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
 +					unsigned int event)
  {
 -	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
 +	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
- 	int ret;
  
  	if (mem_cgroup_is_root(memcg))
  		return -EINVAL;
@@@ -5136,8 -4991,8 +4963,8 @@@ static int memcg_update_kmem_limit(stru
  	 */
  	mutex_lock(&memcg_create_mutex);
  	mutex_lock(&set_limit_mutex);
- 	if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
+ 	if (!memcg->kmem_account_flags && val != RES_COUNTER_MAX) {
 -		if (cgroup_task_count(cont) || memcg_has_children(memcg)) {
 +		if (cgroup_task_count(css->cgroup) || memcg_has_children(memcg)) {
  			ret = -EBUSY;
  			goto out;
  		}
@@@ -6174,31 -6035,8 +5998,8 @@@ struct mem_cgroup *parent_mem_cgroup(st
  }
  EXPORT_SYMBOL(parent_mem_cgroup);
  
- static void __init mem_cgroup_soft_limit_tree_init(void)
- {
- 	struct mem_cgroup_tree_per_node *rtpn;
- 	struct mem_cgroup_tree_per_zone *rtpz;
- 	int tmp, node, zone;
- 
- 	for_each_node(node) {
- 		tmp = node;
- 		if (!node_state(node, N_NORMAL_MEMORY))
- 			tmp = -1;
- 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
- 		BUG_ON(!rtpn);
- 
- 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
- 
- 		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
- 			rtpz = &rtpn->rb_tree_per_zone[zone];
- 			rtpz->rb_root = RB_ROOT;
- 			spin_lock_init(&rtpz->lock);
- 		}
- 	}
- }
- 
  static struct cgroup_subsys_state * __ref
 -mem_cgroup_css_alloc(struct cgroup *cont)
 +mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
  {
  	struct mem_cgroup *memcg;
  	long error = -ENOMEM;
@@@ -6295,14 -6135,21 +6097,23 @@@ static void mem_cgroup_invalidate_recla
  		mem_cgroup_iter_invalidate(root_mem_cgroup);
  }
  
 -static void mem_cgroup_css_offline(struct cgroup *cont)
 +static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
  {
 -	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
 +	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 +
 +	kmem_cgroup_css_offline(memcg);
  
+ 	kmem_cgroup_css_offline(memcg);
+ 
  	mem_cgroup_invalidate_reclaim_iterators(memcg);
  	mem_cgroup_reparent_charges(memcg);
+ 	if (memcg->soft_contributed) {
+ 		while ((memcg = parent_mem_cgroup(memcg)))
+ 			atomic_dec(&memcg->children_in_excess);
+ 
+ 		if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
+ 			atomic_dec(&root_mem_cgroup->children_in_excess);
+ 	}
  	mem_cgroup_destroy_all_caches(memcg);
  	vmpressure_cleanup(&memcg->vmpressure);
  }

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists