lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e7e5bcb1-362a-6868-45c9-a04756f069e7@redhat.com>
Date:   Fri, 29 Apr 2022 11:48:42 +0200
From:   David Hildenbrand <david@...hat.com>
To:     Miaohe Lin <linmiaohe@...wei.com>, akpm@...ux-foundation.org,
        mike.kravetz@...cle.com, naoya.horiguchi@....com
Cc:     ying.huang@...el.com, hch@....de, dhowells@...hat.com,
        cl@...ux.com, linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 4/4] mm/migration: fix potential pte_unmap on an not
 mapped pte

On 25.04.22 15:27, Miaohe Lin wrote:
> __migration_entry_wait and migration_entry_wait_on_locked assume pte is
> always mapped from caller. But this is not the case when it's called from
> migration_entry_wait_huge and follow_huge_pmd. Add a hugetlbfs variant that
> calls hugetlb_migration_entry_wait(ptep == NULL) to fix this issue.
> 
> Fixes: 30dad30922cc ("mm: migration: add migrate_entry_wait_huge()")
> Suggested-by: David Hildenbrand <david@...hat.com>
> Signed-off-by: Miaohe Lin <linmiaohe@...wei.com>
> ---
>  include/linux/swapops.h | 12 ++++++++----
>  mm/hugetlb.c            |  4 ++--
>  mm/migrate.c            | 23 +++++++++++++++++++----
>  3 files changed, 29 insertions(+), 10 deletions(-)
> 
> diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> index 30cded849ee4..862e5a2053b1 100644
> --- a/include/linux/swapops.h
> +++ b/include/linux/swapops.h
> @@ -244,8 +244,10 @@ extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
>  					spinlock_t *ptl);
>  extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
>  					unsigned long address);
> -extern void migration_entry_wait_huge(struct vm_area_struct *vma,
> -		struct mm_struct *mm, pte_t *pte);
> +#ifdef CONFIG_HUGETLB_PAGE
> +extern void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl);
> +extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
> +#endif
>  #else
>  static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
>  {
> @@ -271,8 +273,10 @@ static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
>  					spinlock_t *ptl) { }
>  static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
>  					 unsigned long address) { }
> -static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
> -		struct mm_struct *mm, pte_t *pte) { }
> +#ifdef CONFIG_HUGETLB_PAGE
> +static inline void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) { }
> +static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
> +#endif
>  static inline int is_writable_migration_entry(swp_entry_t entry)
>  {
>  	return 0;
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 098f81e8550d..994361ec75e0 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -5689,7 +5689,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
>  		 */
>  		entry = huge_ptep_get(ptep);
>  		if (unlikely(is_hugetlb_entry_migration(entry))) {
> -			migration_entry_wait_huge(vma, mm, ptep);
> +			migration_entry_wait_huge(vma, ptep);
>  			return 0;
>  		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
>  			return VM_FAULT_HWPOISON_LARGE |
> @@ -6907,7 +6907,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
>  	} else {
>  		if (is_hugetlb_entry_migration(pte)) {
>  			spin_unlock(ptl);
> -			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
> +			__migration_entry_wait_huge((pte_t *)pmd, ptl);

The unlock+immediate relock looks a bit sub-optimal, but that's already
been that way before your change.

Reviewed-by: David Hildenbrand <david@...hat.com>

-- 
Thanks,

David / dhildenb

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ