lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Wed, 1 May 2019 21:10:31 +1000
From:   Stephen Rothwell <sfr@...b.auug.org.au>
To:     Andrew Morton <akpm@...ux-foundation.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...e.hu>, "H. Peter Anvin" <hpa@...or.com>,
        Peter Zijlstra <peterz@...radead.org>
Cc:     Linux Next Mailing List <linux-next@...r.kernel.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        Rick Edgecombe <rick.p.edgecombe@...el.com>,
        Roman Gushchin <guro@...com>
Subject: linux-next: manual merge of the akpm-current tree with the tip tree

Hi all,

Today's linux-next merge of the akpm-current tree got a conflict in:

  mm/vmalloc.c

between commit:

  bade3b4bdcdb ("mm/vmalloc.c: refactor __vunmap() to avoid duplicated call to find_vm_area()")

from the tip tree and commit:

  868b104d7379 ("mm/vmalloc: Add flag for freeing of special permsissions")

from the akpm-current tree.

I fixed it up (I made an attempt ta a fix up - see below) and can carry
the fix as necessary. This is now fixed as far as linux-next is
concerned, but any non trivial conflicts should be mentioned to your
upstream maintainer when your tree is submitted for merging.  You may
also want to consider cooperating with the maintainer of the
conflicting tree to minimise any particularly complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc mm/vmalloc.c
index e5e9e1fcac01,4a91acce4b5f..000000000000
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@@ -1490,94 -2103,16 +2110,83 @@@ static struct vm_struct *__remove_vm_ar
   */
  struct vm_struct *remove_vm_area(const void *addr)
  {
+ 	struct vm_struct *vm = NULL;
  	struct vmap_area *va;
  
- 	might_sleep();
- 
  	va = find_vmap_area((unsigned long)addr);
- 	if (va && va->flags & VM_VM_AREA) {
- 		struct vm_struct *vm = va->vm;
- 
- 		spin_lock(&vmap_area_lock);
- 		va->vm = NULL;
- 		va->flags &= ~VM_VM_AREA;
- 		va->flags |= VM_LAZY_FREE;
- 		spin_unlock(&vmap_area_lock);
- 
- 		kasan_free_shadow(vm);
- 		free_unmap_vmap_area(va);
+ 	if (va && va->flags & VM_VM_AREA)
+ 		vm = __remove_vm_area(va);
  
- 		return vm;
- 	}
- 	return NULL;
+ 	return vm;
  }
  
 +static inline void set_area_direct_map(const struct vm_struct *area,
 +				       int (*set_direct_map)(struct page *page))
 +{
 +	int i;
 +
 +	for (i = 0; i < area->nr_pages; i++)
 +		if (page_address(area->pages[i]))
 +			set_direct_map(area->pages[i]);
 +}
 +
 +/* Handle removing and resetting vm mappings related to the vm_struct. */
- static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
++static void vm_remove_mappings(struct vmap_area *va, int deallocate_pages)
 +{
++	struct vm_struct *area = va->vm;
 +	unsigned long addr = (unsigned long)area->addr;
 +	unsigned long start = ULONG_MAX, end = 0;
 +	int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
 +	int i;
 +
 +	/*
 +	 * The below block can be removed when all architectures that have
 +	 * direct map permissions also have set_direct_map_() implementations.
 +	 * This is concerned with resetting the direct map any an vm alias with
 +	 * execute permissions, without leaving a RW+X window.
 +	 */
 +	if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
 +		set_memory_nx(addr, area->nr_pages);
 +		set_memory_rw(addr, area->nr_pages);
 +	}
 +
- 	remove_vm_area(area->addr);
++	__remove_vm_area(va);
 +
 +	/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
 +	if (!flush_reset)
 +		return;
 +
 +	/*
 +	 * If not deallocating pages, just do the flush of the VM area and
 +	 * return.
 +	 */
 +	if (!deallocate_pages) {
 +		vm_unmap_aliases();
 +		return;
 +	}
 +
 +	/*
 +	 * If execution gets here, flush the vm mapping and reset the direct
 +	 * map. Find the start and end range of the direct mappings to make sure
 +	 * the vm_unmap_aliases() flush includes the direct map.
 +	 */
 +	for (i = 0; i < area->nr_pages; i++) {
 +		if (page_address(area->pages[i])) {
 +			start = min(addr, start);
 +			end = max(addr, end);
 +		}
 +	}
 +
 +	/*
 +	 * Set direct map to something invalid so that it won't be cached if
 +	 * there are any accesses after the TLB flush, then flush the TLB and
 +	 * reset the direct map permissions to the default.
 +	 */
 +	set_area_direct_map(area, set_direct_map_invalid_noflush);
 +	_vm_unmap_aliases(start, end, 1);
 +	set_area_direct_map(area, set_direct_map_default_noflush);
 +}
 +
  static void __vunmap(const void *addr, int deallocate_pages)
  {
  	struct vm_struct *area;
@@@ -1599,8 -2136,7 +2210,8 @@@
  	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
  	debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
  
- 	vm_remove_mappings(area, deallocate_pages);
 -	__remove_vm_area(va);
++	vm_remove_mappings(va, deallocate_pages);
 +
  	if (deallocate_pages) {
  		int i;
  

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ