lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230616115856.3ce7682c@canb.auug.org.au>
Date:   Fri, 16 Jun 2023 11:58:56 +1000
From:   Stephen Rothwell <sfr@...b.auug.org.au>
To:     Jens Axboe <axboe@...nel.dk>,
        Andrew Morton <akpm@...ux-foundation.org>
Cc:     David Howells <dhowells@...hat.com>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        Linux Next Mailing List <linux-next@...r.kernel.org>,
        "Vishal Moola (Oracle)" <vishal.moola@...il.com>
Subject: linux-next: manual merge of the block tree with the mm tree

Hi all,

Today's linux-next merge of the block tree got a conflict in:

  mm/gup.c

between commit:

  0f3f569eca46 ("mm/gup.c: reorganize try_get_folio()")

from the mm tree and commit:

  c8070b787519 ("mm: Don't pin ZERO_PAGE in pin_user_pages()")

from the block tree.

I fixed it up (I think - see below) and can carry the fix as necessary.
This is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc mm/gup.c
index ce14d4d28503,0814576b7366..000000000000
--- a/mm/gup.c
+++ b/mm/gup.c
@@@ -132,50 -127,62 +133,57 @@@ struct folio *try_grab_folio(struct pag
  	if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
  		return NULL;
  
 +	folio = try_get_folio(page, refs);
 +
  	if (flags & FOLL_GET)
 -		return try_get_folio(page, refs);
 -	else if (flags & FOLL_PIN) {
 -		struct folio *folio;
 -
 -		/*
 -		 * Don't take a pin on the zero page - it's not going anywhere
 -		 * and it is used in a *lot* of places.
 -		 */
 -		if (is_zero_page(page))
 -			return page_folio(page);
 -
 -		/*
 -		 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
 -		 * right zone, so fail and let the caller fall back to the slow
 -		 * path.
 -		 */
 -		if (unlikely((flags & FOLL_LONGTERM) &&
 -			     !is_longterm_pinnable_page(page)))
 -			return NULL;
 -
 -		/*
 -		 * CAUTION: Don't use compound_head() on the page before this
 -		 * point, the result won't be stable.
 -		 */
 -		folio = try_get_folio(page, refs);
 -		if (!folio)
 -			return NULL;
 -
 -		/*
 -		 * When pinning a large folio, use an exact count to track it.
 -		 *
 -		 * However, be sure to *also* increment the normal folio
 -		 * refcount field at least once, so that the folio really
 -		 * is pinned.  That's why the refcount from the earlier
 -		 * try_get_folio() is left intact.
 -		 */
 -		if (folio_test_large(folio))
 -			atomic_add(refs, &folio->_pincount);
 -		else
 -			folio_ref_add(folio,
 -					refs * (GUP_PIN_COUNTING_BIAS - 1));
 -		/*
 -		 * Adjust the pincount before re-checking the PTE for changes.
 -		 * This is essentially a smp_mb() and is paired with a memory
 -		 * barrier in page_try_share_anon_rmap().
 -		 */
 -		smp_mb__after_atomic();
 -
 -		node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
 -
  		return folio;
 +
 +	/* FOLL_PIN is set */
 +	if (!folio)
 +		return NULL;
 +
++	/*
++	 * Don't take a pin on the zero page - it's not going anywhere
++	 * and it is used in a *lot* of places.
++	 */
++	if (is_zero_page(page))
++		return page_folio(page);
++
 +	/*
 +	 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
 +	 * right zone, so fail and let the caller fall back to the slow
 +	 * path.
 +	 */
 +	if (unlikely((flags & FOLL_LONGTERM) &&
 +		     !folio_is_longterm_pinnable(folio))) {
 +		if (!put_devmap_managed_page_refs(&folio->page, refs))
 +			folio_put_refs(folio, refs);
 +		return NULL;
  	}
  
 -	WARN_ON_ONCE(1);
 -	return NULL;
 +	/*
 +	 * When pinning a large folio, use an exact count to track it.
 +	 *
 +	 * However, be sure to *also* increment the normal folio
 +	 * refcount field at least once, so that the folio really
 +	 * is pinned.  That's why the refcount from the earlier
 +	 * try_get_folio() is left intact.
 +	 */
 +	if (folio_test_large(folio))
 +		atomic_add(refs, &folio->_pincount);
 +	else
 +		folio_ref_add(folio,
 +				refs * (GUP_PIN_COUNTING_BIAS - 1));
 +	/*
 +	 * Adjust the pincount before re-checking the PTE for changes.
 +	 * This is essentially a smp_mb() and is paired with a memory
 +	 * barrier in page_try_share_anon_rmap().
 +	 */
 +	smp_mb__after_atomic();
 +
 +	node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
 +
 +	return folio;
  }
  
  static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
@@@ -3250,9 -3193,13 +3300,12 @@@ EXPORT_SYMBOL(pin_user_pages_remote)
   *
   * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
   * see Documentation/core-api/pin_user_pages.rst for details.
+  *
+  * Note that if a zero_page is amongst the returned pages, it will not have
+  * pins in it and unpin_user_page*() will not remove pins from it.
   */
  long pin_user_pages(unsigned long start, unsigned long nr_pages,
 -		    unsigned int gup_flags, struct page **pages,
 -		    struct vm_area_struct **vmas)
 +		    unsigned int gup_flags, struct page **pages)
  {
  	int locked = 1;
  

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ