lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20181002173956.7e5ca5c3@canb.auug.org.au>
Date:   Tue, 2 Oct 2018 17:39:56 +1000
From:   Stephen Rothwell <sfr@...b.auug.org.au>
To:     Andrew Morton <akpm@...ux-foundation.org>,
        Matthew Wilcox <willy@...radead.org>
Cc:     Linux-Next Mailing List <linux-next@...r.kernel.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        Johannes Weiner <hannes@...xchg.org>
Subject: linux-next: manual merge of the akpm-current tree with the xarray
 tree

Hi all,

Today's linux-next merge of the akpm-current tree got a conflict in:

  mm/workingset.c

between commit:

  3159f943aafd ("xarray: Replace exceptional entries")
  bb7bbd491985 ("mm: Convert workingset to XArray")

from the xarray tree and commit:

  2685383fce55 ("mm: workingset: don't drop refault information prematurely")
  544eea1cbede ("mm: workingset: tell cache transitions from workingset thrashing")

from the akpm-current tree.

I fixed it up (hopefully - see below) and can carry the fix as
necessary. This is now fixed as far as linux-next is concerned, but any
non trivial conflicts should be mentioned to your upstream maintainer
when your tree is submitted for merging.  You may also want to consider
cooperating with the maintainer of the conflicting tree to minimise any
particularly complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc mm/workingset.c
index 5cfb29ec3fd9,c3a9bb797b6d..000000000000
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@@ -155,9 -167,8 +167,8 @@@
   * refault distance will immediately activate the refaulting page.
   */
  
 -#define EVICTION_SHIFT	(RADIX_TREE_EXCEPTIONAL_ENTRY + \
 +#define EVICTION_SHIFT	((BITS_PER_LONG - BITS_PER_XA_VALUE) +	\
- 			 NODES_SHIFT +				\
- 			 MEM_CGROUP_ID_SHIFT)
+ 			 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
  #define EVICTION_MASK	(~0UL >> EVICTION_SHIFT)
  
  /*
@@@ -170,22 -181,28 +181,27 @@@
   */
  static unsigned int bucket_order __read_mostly;
  
- static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction)
+ static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
+ 			 bool workingset)
  {
  	eviction >>= bucket_order;
 +	eviction &= EVICTION_MASK;
  	eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
  	eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
+ 	eviction = (eviction << 1) | workingset;
 -	eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
  
 -	return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
 +	return xa_mk_value(eviction);
  }
  
  static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
- 			  unsigned long *evictionp)
+ 			  unsigned long *evictionp, bool *workingsetp)
  {
 -	unsigned long entry = (unsigned long)shadow;
 +	unsigned long entry = xa_to_value(shadow);
  	int memcgid, nid;
+ 	bool workingset;
  
 -	entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
+ 	workingset = entry & 1;
+ 	entry >>= 1;
  	nid = entry & ((1UL << NODES_SHIFT) - 1);
  	entry >>= NODES_SHIFT;
  	memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
@@@ -387,16 -415,16 +414,16 @@@ static unsigned long count_shadow_nodes
  	 * each, this will reclaim shadow entries when they consume
  	 * ~1.8% of available memory:
  	 *
 -	 * PAGE_SIZE / radix_tree_nodes / node_entries * 8 / PAGE_SIZE
 +	 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
  	 */
- 	if (sc->memcg) {
- 		cache = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
- 						     LRU_ALL_FILE);
- 	} else {
- 		cache = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) +
- 			node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE);
- 	}
- 	max_nodes = cache >> (XA_CHUNK_SHIFT - 3);
+ #ifdef CONFIG_MEMCG
+ 	if (sc->memcg)
+ 		pages = page_counter_read(&sc->memcg->memory);
+ 	else
+ #endif
+ 		pages = node_present_pages(sc->nid);
+ 
 -	max_nodes = pages >> (RADIX_TREE_MAP_SHIFT - 3);
++	max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
  
  	if (!nodes)
  		return SHRINK_EMPTY;

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ