[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221024113108.359729536@linuxfoundation.org>
Date: Mon, 24 Oct 2022 13:34:20 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Liu Shixin <liushixin2@...wei.com>,
Kefeng Wang <wangkefeng.wang@...wei.com>,
syzbot+193f9cee8638750b23cf@...kaller.appspotmail.com,
Liu Zixian <liuzixian4@...wei.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
David Hildenbrand <david@...hat.com>,
John Hubbard <jhubbard@...dia.com>,
Muchun Song <songmuchun@...edance.com>,
Sidhartha Kumar <sidhartha.kumar@...cle.com>,
Andrew Morton <akpm@...ux-foundation.org>
Subject: [PATCH 5.15 516/530] mm: hugetlb: fix UAF in hugetlb_handle_userfault
From: Liu Shixin <liushixin2@...wei.com>
commit 958f32ce832ba781ac20e11bb2d12a9352ea28fc upstream.
The vma_lock and hugetlb_fault_mutex are dropped before handling userfault
and reacquire them again after handle_userfault(), but reacquire the
vma_lock could lead to UAF[1,2] due to the following race,
hugetlb_fault
hugetlb_no_page
/*unlock vma_lock */
hugetlb_handle_userfault
handle_userfault
/* unlock mm->mmap_lock*/
vm_mmap_pgoff
do_mmap
mmap_region
munmap_vma_range
/* clean old vma */
/* lock vma_lock again <--- UAF */
/* unlock vma_lock */
Since the vma_lock will unlock immediately after
hugetlb_handle_userfault(), let's drop the unneeded lock and unlock in
hugetlb_handle_userfault() to fix the issue.
[1] https://lore.kernel.org/linux-mm/000000000000d5e00a05e834962e@google.com/
[2] https://lore.kernel.org/linux-mm/20220921014457.1668-1-liuzixian4@huawei.com/
Link: https://lkml.kernel.org/r/20220923042113.137273-1-liushixin2@huawei.com
Fixes: 1a1aad8a9b7b ("userfaultfd: hugetlbfs: add userfaultfd hugetlb hook")
Signed-off-by: Liu Shixin <liushixin2@...wei.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@...wei.com>
Reported-by: syzbot+193f9cee8638750b23cf@...kaller.appspotmail.com
Reported-by: Liu Zixian <liuzixian4@...wei.com>
Reviewed-by: Mike Kravetz <mike.kravetz@...cle.com>
Cc: David Hildenbrand <david@...hat.com>
Cc: John Hubbard <jhubbard@...dia.com>
Cc: Muchun Song <songmuchun@...edance.com>
Cc: Sidhartha Kumar <sidhartha.kumar@...cle.com>
Cc: <stable@...r.kernel.org> [4.14+]
Signed-off-by: Andrew Morton <akpm@...ux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
mm/hugetlb.c | 37 +++++++++++++++++--------------------
1 file changed, 17 insertions(+), 20 deletions(-)
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4844,7 +4844,6 @@ static inline vm_fault_t hugetlb_handle_
unsigned long haddr,
unsigned long reason)
{
- vm_fault_t ret;
u32 hash;
struct vm_fault vmf = {
.vma = vma,
@@ -4861,18 +4860,14 @@ static inline vm_fault_t hugetlb_handle_
};
/*
- * hugetlb_fault_mutex and i_mmap_rwsem must be
- * dropped before handling userfault. Reacquire
- * after handling fault to make calling code simpler.
+ * vma_lock and hugetlb_fault_mutex must be dropped before handling
+ * userfault. Also mmap_lock will be dropped during handling
+ * userfault, any vma operation should be careful from here.
*/
hash = hugetlb_fault_mutex_hash(mapping, idx);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
i_mmap_unlock_read(mapping);
- ret = handle_userfault(&vmf, reason);
- i_mmap_lock_read(mapping);
- mutex_lock(&hugetlb_fault_mutex_table[hash]);
-
- return ret;
+ return handle_userfault(&vmf, reason);
}
static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
@@ -4889,6 +4884,7 @@ static vm_fault_t hugetlb_no_page(struct
spinlock_t *ptl;
unsigned long haddr = address & huge_page_mask(h);
bool new_page, new_pagecache_page = false;
+ u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
/*
* Currently, we are forced to kill the process in the event the
@@ -4898,7 +4894,7 @@ static vm_fault_t hugetlb_no_page(struct
if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
current->pid);
- return ret;
+ goto out;
}
/*
@@ -4915,12 +4911,10 @@ retry:
page = find_lock_page(mapping, idx);
if (!page) {
/* Check for page in userfault range */
- if (userfaultfd_missing(vma)) {
- ret = hugetlb_handle_userfault(vma, mapping, idx,
+ if (userfaultfd_missing(vma))
+ return hugetlb_handle_userfault(vma, mapping, idx,
flags, haddr,
VM_UFFD_MISSING);
- goto out;
- }
page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page)) {
@@ -4980,10 +4974,9 @@ retry:
if (userfaultfd_minor(vma)) {
unlock_page(page);
put_page(page);
- ret = hugetlb_handle_userfault(vma, mapping, idx,
+ return hugetlb_handle_userfault(vma, mapping, idx,
flags, haddr,
VM_UFFD_MINOR);
- goto out;
}
}
@@ -5034,6 +5027,8 @@ retry:
unlock_page(page);
out:
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ i_mmap_unlock_read(mapping);
return ret;
backout:
@@ -5131,10 +5126,12 @@ vm_fault_t hugetlb_fault(struct mm_struc
mutex_lock(&hugetlb_fault_mutex_table[hash]);
entry = huge_ptep_get(ptep);
- if (huge_pte_none(entry)) {
- ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
- goto out_mutex;
- }
+ if (huge_pte_none(entry))
+ /*
+ * hugetlb_no_page will drop vma lock and hugetlb fault
+ * mutex internally, which make us return immediately.
+ */
+ return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
ret = 0;
Powered by blists - more mailing lists