lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 6 Sep 2022 19:01:41 +1000
From:   Stephen Rothwell <sfr@...b.auug.org.au>
To:     Andrew Morton <akpm@...ux-foundation.org>
Cc:     Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        Linux Next Mailing List <linux-next@...r.kernel.org>,
        Rolf Eike Beer <eb@...ix.com>,
        Steven Price <steven.price@....com>
Subject: linux-next: manual merge of the mm tree with Linus' tree

Hi all,

Today's linux-next merge of the mm tree got a conflict in:

  mm/pagewalk.c

between commit:

  8782fb61cc84 ("mm: pagewalk: Fix race between unmap and page walker")

from Linus' tree and commits:

  fa02fb928200 ("mm: pagewalk: make error checks more obvious")
  66c217081bd0 ("mm: pagewalk: allow walk_page_range_novma() without mm")

from the mm tree.

I fixed it up (I think - see below) and can carry the fix as
necessary. This is now fixed as far as linux-next is concerned, but any
non trivial conflicts should be mentioned to your upstream maintainer
when your tree is submitted for merging.  You may also want to consider
cooperating with the maintainer of the conflicting tree to minimise any
particularly complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc mm/pagewalk.c
index fa7a3d21a751,418717eec09e..000000000000
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@@ -108,13 -104,16 +104,16 @@@ static int walk_pmd_range(pud_t *pud, u
  
  	pmd = pmd_offset(pud, addr);
  	do {
- again:
+ 		int err;
+ 
+  again:
  		next = pmd_addr_end(addr, end);
 -		if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) {
 +		if (pmd_none(*pmd)) {
- 			if (ops->pte_hole)
+ 			if (ops->pte_hole) {
  				err = ops->pte_hole(addr, next, depth, walk);
- 			if (err)
- 				break;
+ 				if (err)
+ 					return err;
+ 			}
  			continue;
  		}
  
@@@ -169,13 -168,16 +168,16 @@@ static int walk_pud_range(p4d_t *p4d, u
  
  	pud = pud_offset(p4d, addr);
  	do {
+ 		int err;
+ 
   again:
  		next = pud_addr_end(addr, end);
 -		if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) {
 +		if (pud_none(*pud)) {
- 			if (ops->pte_hole)
+ 			if (ops->pte_hole) {
  				err = ops->pte_hole(addr, next, depth, walk);
- 			if (err)
- 				break;
+ 				if (err)
+ 					return err;
+ 			}
  			continue;
  		}
  
@@@ -447,20 -456,17 +456,21 @@@ int walk_page_range(struct mm_struct *m
  
  	vma = find_vma(walk.mm, start);
  	do {
+ 		int err;
+ 
+ 		walk.vma = vma;
  		if (!vma) { /* after the last vma */
- 			walk.vma = NULL;
  			next = end;
 +			if (ops->pte_hole)
 +				err = ops->pte_hole(start, next, -1, &walk);
  		} else if (start < vma->vm_start) { /* outside vma */
  			walk.vma = NULL;
  			next = min(end, vma->vm_start);
 +			if (ops->pte_hole)
 +				err = ops->pte_hole(start, next, -1, &walk);
  		} else { /* inside vma */
- 			walk.vma = vma;
  			next = min(end, vma->vm_end);
- 			vma = vma->vm_next;
+ 			vma = find_vma(mm, vma->vm_end);
  
  			err = walk_page_test(start, next, &walk);
  			if (err > 0) {
@@@ -472,17 -478,26 +482,24 @@@
  				continue;
  			}
  			if (err < 0)
- 				break;
+ 				return err;
 -		}
 -		if (walk.vma || walk.ops->pte_hole) {
  			err = __walk_page_range(start, next, &walk);
+ 			if (err)
+ 				return err;
  		}
- 		if (err)
- 			break;
  	} while (start = next, start < end);
- 	return err;
+ 	return 0;
  }
  
- /*
+ /**
+  * walk_page_range_novma - walk a range of pagetables not backed by a vma
+  * @mm:		mm_struct representing the target process of page table walk
+  * @start:	start address of the virtual address range
+  * @end:	end address of the virtual address range
+  * @ops:	operation to call during the walk
+  * @pgd:	pgd to walk if different from mm->pgd
+  * @private:	private data for callbacks' usage
+  *
   * Similar to walk_page_range() but can walk any page tables even if they are
   * not backed by VMAs. Because 'unusual' entries may be walked this function
   * will also not lock the PTEs for the pte_entry() callback. This is useful for
@@@ -501,10 -518,11 +520,11 @@@ int walk_page_range_novma(struct mm_str
  		.no_vma		= true
  	};
  
- 	if (start >= end || !walk.mm)
+ 	if (start >= end || (!walk.mm && !walk.pgd))
  		return -EINVAL;
  
- 	mmap_assert_write_locked(walk.mm);
+ 	if (walk.mm)
 -		mmap_assert_locked(walk.mm);
++		mmap_assert_write_locked(walk.mm);
  
  	return walk_pgd_range(start, end, &walk);
  }

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ