[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAA_GA1cBwQqaO-rj_4+MbmEgSsQ9H2fxAgs1T5X+Fze1shnkXQ@mail.gmail.com>
Date: Mon, 3 Dec 2012 10:29:47 +0800
From: Bob Liu <lliubbo@...il.com>
To: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Cc: Jiri Slaby <jslaby@...e.cz>, linux-mm@...ck.org,
LKML <linux-kernel@...r.kernel.org>,
David Rientjes <rientjes@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Andrea Arcangeli <aarcange@...hat.com>
Subject: Re: [PATCH 2/2] thp: avoid race on multiple parallel page faults to
the same page
On Fri, Nov 30, 2012 at 11:03 PM, Kirill A. Shutemov
<kirill.shutemov@...ux.intel.com> wrote:
> From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
>
> pmd value is stable only with mm->page_table_lock taken. After taking
> the lock we need to check that nobody modified the pmd before change it.
>
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Reviewed-by: Bob Liu <lliubbo@...il.com>
> ---
> mm/huge_memory.c | 29 ++++++++++++++++++++++++-----
> 1 file changed, 24 insertions(+), 5 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 9d6f521..51cb8fe 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -770,17 +770,20 @@ static inline struct page *alloc_hugepage(int defrag)
> }
> #endif
>
> -static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
> +static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
> struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
> unsigned long zero_pfn)
> {
> pmd_t entry;
> + if (!pmd_none(*pmd))
> + return false;
> entry = pfn_pmd(zero_pfn, vma->vm_page_prot);
> entry = pmd_wrprotect(entry);
> entry = pmd_mkhuge(entry);
> set_pmd_at(mm, haddr, pmd, entry);
> pgtable_trans_huge_deposit(mm, pgtable);
> mm->nr_ptes++;
> + return true;
> }
>
> int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
> @@ -800,6 +803,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
> transparent_hugepage_use_zero_page()) {
> pgtable_t pgtable;
> unsigned long zero_pfn;
> + bool set;
> pgtable = pte_alloc_one(mm, haddr);
> if (unlikely(!pgtable))
> return VM_FAULT_OOM;
> @@ -810,9 +814,13 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
> goto out;
> }
> spin_lock(&mm->page_table_lock);
> - set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
> + set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
> zero_pfn);
> spin_unlock(&mm->page_table_lock);
> + if (!set) {
> + pte_free(mm, pgtable);
> + put_huge_zero_page();
> + }
> return 0;
> }
> page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
> @@ -1046,14 +1054,16 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
> */
> if (is_huge_zero_pmd(pmd)) {
> unsigned long zero_pfn;
> + bool set;
> /*
> * get_huge_zero_page() will never allocate a new page here,
> * since we already have a zero page to copy. It just takes a
> * reference.
> */
> zero_pfn = get_huge_zero_page();
> - set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
> + set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
> zero_pfn);
> + BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
> ret = 0;
> goto out_unlock;
> }
> @@ -1110,7 +1120,7 @@ unlock:
>
> static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
> struct vm_area_struct *vma, unsigned long address,
> - pmd_t *pmd, unsigned long haddr)
> + pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr)
> {
> pgtable_t pgtable;
> pmd_t _pmd;
> @@ -1139,6 +1149,9 @@ static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
> mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
>
> spin_lock(&mm->page_table_lock);
> + if (unlikely(!pmd_same(*pmd, orig_pmd)))
> + goto out_free_page;
> +
> pmdp_clear_flush(vma, haddr, pmd);
> /* leave pmd empty until pte is filled */
>
> @@ -1171,6 +1184,12 @@ static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
> ret |= VM_FAULT_WRITE;
> out:
> return ret;
> +out_free_page:
> + spin_unlock(&mm->page_table_lock);
> + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
> + mem_cgroup_uncharge_page(page);
> + put_page(page);
> + goto out;
> }
>
> static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
> @@ -1317,7 +1336,7 @@ alloc:
> count_vm_event(THP_FAULT_FALLBACK);
> if (is_huge_zero_pmd(orig_pmd)) {
> ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
> - address, pmd, haddr);
> + address, pmd, orig_pmd, haddr);
> } else {
> ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
> pmd, orig_pmd, page, haddr);
> --
> 1.7.11.7
>
--
Regards,
--Bob
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists