lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Thu, 4 Jul 2019 20:36:58 +1000
From:   Stephen Rothwell <sfr@...b.auug.org.au>
To:     Andrew Morton <akpm@...ux-foundation.org>,
        Jason Gunthorpe <jgg@...lanox.com>
Cc:     Linux Next Mailing List <linux-next@...r.kernel.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        Dan Williams <dan.j.williams@...el.com>,
        Christoph Hellwig <hch@....de>
Subject: linux-next: manual merge of the akpm-current tree with the hmm tree

Hi all,

Today's linux-next merge of the akpm-current tree got a conflict in:

  kernel/memremap.c

between commit:

  514caf23a70f ("memremap: replace the altmap_valid field with a PGMAP_ALTMAP_VALID flag")

from the hmm tree and commit:

  a10a0f39cae6 ("mm/devm_memremap_pages: enable sub-section remap")

from the akpm-current tree.

I fixed it up (I think - see below) and can carry the fix as
necessary. This is now fixed as far as linux-next is concerned, but any
non trivial conflicts should be mentioned to your upstream maintainer
when your tree is submitted for merging.  You may also want to consider
cooperating with the maintainer of the conflicting tree to minimise any
particularly complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc kernel/memremap.c
index bea6f887adad,a0e5f6b91b04..000000000000
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@@ -11,39 -11,39 +11,37 @@@
  #include <linux/types.h>
  #include <linux/wait_bit.h>
  #include <linux/xarray.h>
 -#include <linux/hmm.h>
  
  static DEFINE_XARRAY(pgmap_array);
- #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
- #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
  
 -#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
 -vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
 -		       unsigned long addr,
 -		       swp_entry_t entry,
 -		       unsigned int flags,
 -		       pmd_t *pmdp)
 +#ifdef CONFIG_DEV_PAGEMAP_OPS
 +DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
 +EXPORT_SYMBOL(devmap_managed_key);
 +static atomic_t devmap_managed_enable;
 +
 +static void devmap_managed_enable_put(void *data)
  {
 -	struct page *page = device_private_entry_to_page(entry);
 -	struct hmm_devmem *devmem;
 +	if (atomic_dec_and_test(&devmap_managed_enable))
 +		static_branch_disable(&devmap_managed_key);
 +}
  
 -	devmem = container_of(page->pgmap, typeof(*devmem), pagemap);
 +static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
 +{
 +	if (!pgmap->ops || !pgmap->ops->page_free) {
 +		WARN(1, "Missing page_free method\n");
 +		return -EINVAL;
 +	}
  
 -	/*
 -	 * The page_fault() callback must migrate page back to system memory
 -	 * so that CPU can access it. This might fail for various reasons
 -	 * (device issue, device was unsafely unplugged, ...). When such
 -	 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
 -	 *
 -	 * Note that because memory cgroup charges are accounted to the device
 -	 * memory, this should never fail because of memory restrictions (but
 -	 * allocation of regular system page might still fail because we are
 -	 * out of memory).
 -	 *
 -	 * There is a more in-depth description of what that callback can and
 -	 * cannot do, in include/linux/memremap.h
 -	 */
 -	return devmem->page_fault(vma, addr, page, flags, pmdp);
 +	if (atomic_inc_return(&devmap_managed_enable) == 1)
 +		static_branch_enable(&devmap_managed_key);
 +	return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL);
  }
 -#endif /* CONFIG_DEVICE_PRIVATE */
 +#else
 +static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
 +{
 +	return -EINVAL;
 +}
 +#endif /* CONFIG_DEV_PAGEMAP_OPS */
  
  static void pgmap_array_delete(struct resource *res)
  {
@@@ -54,8 -54,14 +52,8 @@@
  
  static unsigned long pfn_first(struct dev_pagemap *pgmap)
  {
- 	return (pgmap->res.start >> PAGE_SHIFT) +
 -	const struct resource *res = &pgmap->res;
 -	struct vmem_altmap *altmap = &pgmap->altmap;
 -	unsigned long pfn;
 -
 -	pfn = PHYS_PFN(res->start);
 -	if (pgmap->altmap_valid)
 -		pfn += vmem_altmap_offset(altmap);
 -	return pfn;
++	return (PHYS_PFN(pgmap->res.start)) +
 +		vmem_altmap_offset(pgmap_altmap(pgmap));
  }
  
  static unsigned long pfn_end(struct dev_pagemap *pgmap)
@@@ -101,28 -89,23 +99,23 @@@ static void devm_memremap_pages_release
  	unsigned long pfn;
  	int nid;
  
 -	pgmap->kill(pgmap->ref);
 +	dev_pagemap_kill(pgmap);
  	for_each_device_pfn(pfn, pgmap)
  		put_page(pfn_to_page(pfn));
 -	pgmap->cleanup(pgmap->ref);
 +	dev_pagemap_cleanup(pgmap);
  
  	/* pages are dead and unused, undo the arch mapping */
- 	align_start = res->start & ~(SECTION_SIZE - 1);
- 	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
- 		- align_start;
- 
- 	nid = page_to_nid(pfn_to_page(align_start >> PAGE_SHIFT));
+ 	nid = page_to_nid(pfn_to_page(PHYS_PFN(res->start)));
  
  	mem_hotplug_begin();
  	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
- 		pfn = align_start >> PAGE_SHIFT;
+ 		pfn = PHYS_PFN(res->start);
  		__remove_pages(page_zone(pfn_to_page(pfn)), pfn,
- 				align_size >> PAGE_SHIFT, NULL);
+ 				PHYS_PFN(resource_size(res)), NULL);
  	} else {
- 		arch_remove_memory(nid, align_start, align_size,
+ 		arch_remove_memory(nid, res->start, resource_size(res),
 -				pgmap->altmap_valid ? &pgmap->altmap : NULL);
 +				pgmap_altmap(pgmap));
- 		kasan_remove_zero_shadow(__va(align_start), align_size);
+ 		kasan_remove_zero_shadow(__va(res->start), resource_size(res));
  	}
  	mem_hotplug_done();
  
@@@ -173,64 -146,13 +165,59 @@@ void *devm_memremap_pages(struct devic
  	};
  	pgprot_t pgprot = PAGE_KERNEL;
  	int error, nid, is_ram;
 +	bool need_devmap_managed = true;
 +
 +	switch (pgmap->type) {
 +	case MEMORY_DEVICE_PRIVATE:
 +		if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
 +			WARN(1, "Device private memory not supported\n");
 +			return ERR_PTR(-EINVAL);
 +		}
 +		if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
 +			WARN(1, "Missing migrate_to_ram method\n");
 +			return ERR_PTR(-EINVAL);
 +		}
 +		break;
 +	case MEMORY_DEVICE_FS_DAX:
 +		if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
 +		    IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
 +			WARN(1, "File system DAX not supported\n");
 +			return ERR_PTR(-EINVAL);
 +		}
 +		break;
 +	case MEMORY_DEVICE_DEVDAX:
 +	case MEMORY_DEVICE_PCI_P2PDMA:
 +		need_devmap_managed = false;
 +		break;
 +	default:
 +		WARN(1, "Invalid pgmap type %d\n", pgmap->type);
 +		break;
 +	}
 +
 +	if (!pgmap->ref) {
 +		if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
 +			return ERR_PTR(-EINVAL);
 +
 +		init_completion(&pgmap->done);
 +		error = percpu_ref_init(&pgmap->internal_ref,
 +				dev_pagemap_percpu_release, 0, GFP_KERNEL);
 +		if (error)
 +			return ERR_PTR(error);
 +		pgmap->ref = &pgmap->internal_ref;
 +	} else {
 +		if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
 +			WARN(1, "Missing reference count teardown definition\n");
 +			return ERR_PTR(-EINVAL);
 +		}
 +	}
  
 -	if (!pgmap->ref || !pgmap->kill || !pgmap->cleanup) {
 -		WARN(1, "Missing reference count teardown definition\n");
 -		return ERR_PTR(-EINVAL);
 +	if (need_devmap_managed) {
 +		error = devmap_managed_enable_get(dev, pgmap);
 +		if (error)
 +			return ERR_PTR(error);
  	}
  
- 	align_start = res->start & ~(SECTION_SIZE - 1);
- 	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
- 		- align_start;
- 	align_end = align_start + align_size - 1;
- 
- 	conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
+ 	conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);
  	if (conflict_pgmap) {
  		dev_WARN(dev, "Conflicting mapping in same section\n");
  		put_dev_pagemap(conflict_pgmap);

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ