lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+G9fYsRcL1mcFhH47ek3YdGcK6sDSa+Gb0UoJzWkMcTsfxWdQ@mail.gmail.com>
Date:   Thu, 16 Jul 2020 12:07:52 +0530
From:   Naresh Kamboju <naresh.kamboju@...aro.org>
To:     Linus Torvalds <torvalds@...ux-foundation.org>
Cc:     "Kirill A. Shutemov" <kirill@...temov.name>,
        Joel Fernandes <joel@...lfernandes.org>,
        "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Linux-MM <linux-mm@...ck.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        William Kucharski <william.kucharski@...cle.com>
Subject: Re: [PATCHv2] mm: Fix warning in move_normal_pmd()

On Thu, 16 Jul 2020 at 04:49, Linus Torvalds
<torvalds@...ux-foundation.org> wrote:
>
> On Wed, Jul 15, 2020 at 4:04 PM Linus Torvalds
> <torvalds@...ux-foundation.org> wrote:
> >
> > It *might* be as simple as this incremental thing on top
>
> No, it needs to be
>
> +       if (*old_addr + *len < old->vm_end)
> +               return;
>
> in try_to_align_end(), of course.
>
> Now I'm going for a lie-down, because this cross-eyed thing isn't working.


Just want to double check.
Here is the diff after those two patches applied. Please correct me if
it is wrong.
This patch applied on top of Linus mainline master branch.
I am starting my test cycles.

---
 mm/mremap.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 73 insertions(+)

diff --git a/mm/mremap.c b/mm/mremap.c
index 6b153dc05fe4..4961c18d2008 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -254,6 +254,77 @@ static bool move_normal_pmd(struct vm_area_struct
*vma, unsigned long old_addr,

  return true;
 }
+
+#define ADDR_BEFORE_PREV(addr, vma) \
+ ((vma)->vm_prev && (addr) < (vma)->vm_prev->vm_end)
+
+static inline void try_to_align_start(unsigned long *len,
+ struct vm_area_struct *old, unsigned long *old_addr,
+ struct vm_area_struct *new, unsigned long *new_addr)
+{
+ if (*old_addr > old->vm_start)
+ return;
+
+ if (ADDR_BEFORE_PREV(*old_addr & PMD_MASK, old))
+ return;
+
+ if (ADDR_BEFORE_PREV(*new_addr & PMD_MASK, new))
+ return;
+
+ /* Bingo! */
+ *len += *new_addr & ~PMD_MASK;
+ *old_addr &= PMD_MASK;
+ *new_addr &= PMD_MASK;
+}
+
+/*
+ * When aligning the end, avoid ALIGN() (which can overflow
+ * if the user space is the full address space, and overshoot
+ * the vm_start of the next vma).
+ *
+ * Align the upper limit down instead, and check that it's not
+ * in the same PMD as the end.
+ */
+#define ADDR_AFTER_NEXT(addr, vma) \
+ ((vma)->vm_next && (addr) > (PMD_MASK & (vma)->vm_next->vm_start))
+
+static inline void try_to_align_end(unsigned long *len,
+ struct vm_area_struct *old, unsigned long *old_addr,
+ struct vm_area_struct *new, unsigned long *new_addr)
+{
+ if (*old_addr < old->vm_end)
+ return;
+
+ if (ADDR_AFTER_NEXT(*old_addr + *len, old))
+ return;
+
+ if (ADDR_AFTER_NEXT(*new_addr + *len, new))
+ return;
+
+ /* Mutual alignment means this is same for new/old addr */
+ *len = ALIGN(*new_addr + *len, PMD_SIZE) - *new_addr;
+}
+
+/*
+ * The PMD move case is much more efficient, so if we have the
+ * mutually aligned case, try to see if we can extend the
+ * beginning and end to be aligned too.
+ *
+ * The pointer dereferences look bad, but with inlining, the
+ * compiler will sort it out.
+ */
+static inline void try_to_align_range(unsigned long *len,
+ struct vm_area_struct *old, unsigned long *old_addr,
+ struct vm_area_struct *new, unsigned long *new_addr)
+{
+ if ((*old_addr ^ *new_addr) & ~PMD_MASK)
+ return;
+
+ try_to_align_start(len, old, old_addr, new, new_addr);
+ try_to_align_end(len, old, old_addr, new, new_addr);
+}
+#else
+#define try_to_align_range(len,old,olda,new,newa) do { } while(0);
 #endif

 unsigned long move_page_tables(struct vm_area_struct *vma,
@@ -272,6 +343,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
  old_addr, old_end);
  mmu_notifier_invalidate_range_start(&range);

+ try_to_align_range(&len, vma, &old_addr, new_vma, &new_addr);
+
  for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
  cond_resched();
  next = (old_addr + PMD_SIZE) & PMD_MASK;
-- 
2.27.0

>
>               Linus

- Naresh

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ