[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160829124233.GA40092@black.fi.intel.com>
Date: Mon, 29 Aug 2016 15:42:33 +0300
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
To: Dmitry Vyukov <dvyukov@...gle.com>,
Ebru Akagunduz <ebru.akagunduz@...il.com>,
Andrea Arcangeli <aarcange@...hat.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Vlastimil Babka <vbabka@...e.cz>,
Mel Gorman <mgorman@...hsingularity.net>,
Johannes Weiner <hannes@...xchg.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
LKML <linux-kernel@...r.kernel.org>,
Vegard Nossum <vegard.nossum@...cle.com>,
Sasha Levin <levinsasha928@...il.com>,
Konstantin Khlebnikov <koct9i@...il.com>,
Andrey Ryabinin <ryabinin.a.a@...il.com>,
Greg Thelen <gthelen@...gle.com>,
Suleiman Souhlal <suleiman@...gle.com>,
Hugh Dickins <hughd@...gle.com>,
David Rientjes <rientjes@...gle.com>,
syzkaller <syzkaller@...glegroups.com>,
Kostya Serebryany <kcc@...gle.com>,
Alexander Potapenko <glider@...gle.com>
Subject: Re: mm: use-after-free in collapse_huge_page
On Sun, Aug 28, 2016 at 12:42:21PM +0200, Dmitry Vyukov wrote:
> Hello,
>
> I've git the following use-after-free in collapse_huge_page while
> running syzkaller fuzzer. It is in khugepaged, so not reproducible. On
> commit 61c04572de404e52a655a36752e696bbcb483cf5 (Aug 25).
>
> ==================================================================
> BUG: KASAN: use-after-free in collapse_huge_page+0x28b1/0x3500 at addr
> ffff88006c731388
> Read of size 8 by task khugepaged/1327
> CPU: 0 PID: 1327 Comm: khugepaged Not tainted 4.8.0-rc3+ #33
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
> ffffffff884b8280 ffff88003c207920 ffffffff82d1b239 ffffffff89ec1520
> fffffbfff1097050 ffff88003e94c700 ffff88006c731300 ffff88006c7313c0
> 0000000000000000 ffff88003c207b88 ffff88003c207948 ffffffff817da1fc
> Call Trace:
> [<ffffffff817da82e>] __asan_report_load8_noabort+0x3e/0x40
> mm/kasan/report.c:322
> [<ffffffff817ff651>] collapse_huge_page+0x28b1/0x3500 mm/khugepaged.c:1004
Okay, I think the patch below should do the trick. Build tested only.
Andrea, Ebru, could you re-check if it's reasonable.
>From bc6c3589a3da75fabd6d440cce3c8ea23b69c2e5 Mon Sep 17 00:00:00 2001
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Date: Mon, 29 Aug 2016 15:32:50 +0300
Subject: [PATCH] khugepaged: fix use-after-free in collapse_huge_page()
hugepage_vma_revalidate() tries to re-check if we still should try to
collapse small pages into huge one after the re-acquiring mmap_sem.
The problem Dmitry Vyukov reported[1] is that the vma found by
hugepage_vma_revalidate() can be suitable for huge pages, but not the
same vma we had before dropping mmap_sem. And dereferencing original vma
can lead to fun results..
Let's use vma hugepage_vma_revalidate() found instead of assuming it's
the same as what we had before the lock was dropped.
[1] http://lkml.kernel.org/r/CACT4Y+Z3gigBvhca9kRJFcjX0G70V_nRhbwKBU+yGoESBDKi9Q@mail.gmail.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Reported-by: Dmitry Vyukov <dvyukov@...gle.com>
---
mm/khugepaged.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 79c52d0061af..3c8253f160ca 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -838,7 +838,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
* value (scan code).
*/
-static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
+static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
+ struct vm_area_struct **vmap)
{
struct vm_area_struct *vma;
unsigned long hstart, hend;
@@ -846,7 +847,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
if (unlikely(khugepaged_test_exit(mm)))
return SCAN_ANY_PROCESS;
- vma = find_vma(mm, address);
+ *vmap = vma = find_vma(mm, address);
if (!vma)
return SCAN_VMA_NULL;
@@ -898,13 +899,13 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
if (ret & VM_FAULT_RETRY) {
down_read(&mm->mmap_sem);
- if (hugepage_vma_revalidate(mm, address)) {
+ if (hugepage_vma_revalidate(mm, address, &vma)) {
/* vma is no longer available, don't continue to swapin */
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
return false;
}
/* check if the pmd is still valid */
- if (mm_find_pmd(mm, address) != pmd)
+ if (mm_find_pmd(mm, address) != pmd || vma != fe.vma)
return false;
}
if (ret & VM_FAULT_ERROR) {
@@ -923,7 +924,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
- struct vm_area_struct *vma,
int node, int referenced)
{
pmd_t *pmd, _pmd;
@@ -933,6 +933,7 @@ static void collapse_huge_page(struct mm_struct *mm,
spinlock_t *pmd_ptl, *pte_ptl;
int isolated = 0, result = 0;
struct mem_cgroup *memcg;
+ struct vm_area_struct *vma;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
gfp_t gfp;
@@ -961,7 +962,7 @@ static void collapse_huge_page(struct mm_struct *mm,
}
down_read(&mm->mmap_sem);
- result = hugepage_vma_revalidate(mm, address);
+ result = hugepage_vma_revalidate(mm, address, &vma);
if (result) {
mem_cgroup_cancel_charge(new_page, memcg, true);
up_read(&mm->mmap_sem);
@@ -994,7 +995,7 @@ static void collapse_huge_page(struct mm_struct *mm,
* handled by the anon_vma lock + PG_lock.
*/
down_write(&mm->mmap_sem);
- result = hugepage_vma_revalidate(mm, address);
+ result = hugepage_vma_revalidate(mm, address, &vma);
if (result)
goto out;
/* check if the pmd is still valid */
@@ -1202,7 +1203,7 @@ out_unmap:
if (ret) {
node = khugepaged_find_target_node();
/* collapse_huge_page will return with the mmap_sem released */
- collapse_huge_page(mm, address, hpage, vma, node, referenced);
+ collapse_huge_page(mm, address, hpage, node, referenced);
}
out:
trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
--
Kirill A. Shutemov
Powered by blists - more mailing lists