[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190924232459.214097-4-yuzhao@google.com>
Date: Tue, 24 Sep 2019 17:24:59 -0600
From: Yu Zhao <yuzhao@...gle.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Michal Hocko <mhocko@...e.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...hat.com>,
Namhyung Kim <namhyung@...nel.org>,
Vlastimil Babka <vbabka@...e.cz>,
Hugh Dickins <hughd@...gle.com>,
"Jérôme Glisse" <jglisse@...hat.com>,
Andrea Arcangeli <aarcange@...hat.com>,
"Aneesh Kumar K . V" <aneesh.kumar@...ux.ibm.com>,
David Rientjes <rientjes@...gle.com>,
Matthew Wilcox <willy@...radead.org>,
Lance Roy <ldr709@...il.com>,
Ralph Campbell <rcampbell@...dia.com>,
Jason Gunthorpe <jgg@...pe.ca>,
Dave Airlie <airlied@...hat.com>,
Thomas Hellstrom <thellstrom@...are.com>,
Souptick Joarder <jrdr.linux@...il.com>,
Mel Gorman <mgorman@...e.de>, Jan Kara <jack@...e.cz>,
Mike Kravetz <mike.kravetz@...cle.com>,
Huang Ying <ying.huang@...el.com>,
Aaron Lu <ziqian.lzq@...fin.com>,
Omar Sandoval <osandov@...com>,
Thomas Gleixner <tglx@...utronix.de>,
Vineeth Remanan Pillai <vpillai@...italocean.com>,
Daniel Jordan <daniel.m.jordan@...cle.com>,
Mike Rapoport <rppt@...ux.ibm.com>,
Joel Fernandes <joel@...lfernandes.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Duyck <alexander.h.duyck@...ux.intel.com>,
Pavel Tatashin <pavel.tatashin@...rosoft.com>,
David Hildenbrand <david@...hat.com>,
Juergen Gross <jgross@...e.com>,
Anthony Yznaga <anthony.yznaga@...cle.com>,
Johannes Weiner <hannes@...xchg.org>,
"Darrick J . Wong" <darrick.wong@...cle.com>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Yu Zhao <yuzhao@...gle.com>
Subject: [PATCH v3 4/4] mm: remove unnecessary smp_wmb() in __SetPageUptodate()
smp_wmb()s added in the previous patch guarantee that the user data
appears before a page is exposed by set_pte_at(). So there is no
need for __SetPageUptodate() to have a built-in one.
There are total 13 __SetPageUptodate() for the non-hugetlb case. 12
of them reuse smp_wmb()s added in the previous patch.
The one in shmem_mfill_atomic_pte() doesn't need a explicit write
barrier because of the following shmem_add_to_page_cache().
Signed-off-by: Yu Zhao <yuzhao@...gle.com>
---
include/linux/page-flags.h | 6 +++++-
kernel/events/uprobes.c | 2 +-
mm/huge_memory.c | 11 +++--------
mm/khugepaged.c | 2 +-
mm/memory.c | 13 ++++---------
mm/migrate.c | 7 +------
mm/swapfile.c | 2 +-
mm/userfaultfd.c | 7 +------
8 files changed, 17 insertions(+), 33 deletions(-)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f91cb8898ff0..2481f9ad5f5b 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -508,10 +508,14 @@ static inline int PageUptodate(struct page *page)
return ret;
}
+/*
+ * Only use this function when there is a following write barrier, e.g.,
+ * an explicit smp_wmb() and/or the page will be added to page or swap
+ * cache locked.
+ */
static __always_inline void __SetPageUptodate(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
- smp_wmb();
__set_bit(PG_uptodate, &page->flags);
}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 7069785e2e52..6ceae92afcc0 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -194,7 +194,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
ptep_clear_flush_notify(vma, addr, pvmw.pte);
- /* commit non-atomic ops before exposing to fast gup */
+ /* commit non-atomic ops and user data */
smp_wmb();
set_pte_at_notify(mm, addr, pvmw.pte,
mk_pte(new_page, vma->vm_page_prot));
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 21d271a29d96..101e7bd61e8f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -580,11 +580,6 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
}
clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
- /*
- * The memory barrier inside __SetPageUptodate makes sure that
- * clear_huge_page writes become visible before the set_pmd_at()
- * write.
- */
__SetPageUptodate(page);
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
@@ -616,7 +611,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
mem_cgroup_commit_charge(page, memcg, false, true);
lru_cache_add_active_or_unevictable(page, vma);
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
- /* commit non-atomic ops before exposing to fast gup */
+ /* commit non-atomic ops and user data */
smp_wmb();
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
@@ -1278,7 +1273,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
}
kfree(pages);
- /* commit non-atomic ops before exposing to fast gup */
+ /* commit non-atomic ops and user data */
smp_wmb(); /* make pte visible before pmd */
pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
@@ -1427,7 +1422,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
page_add_new_anon_rmap(new_page, vma, haddr, true);
mem_cgroup_commit_charge(new_page, memcg, false, true);
lru_cache_add_active_or_unevictable(new_page, vma);
- /* commit non-atomic ops before exposing to fast gup */
+ /* commit non-atomic ops and user data */
smp_wmb();
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index f2901edce6de..668918842712 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1074,7 +1074,7 @@ static void collapse_huge_page(struct mm_struct *mm,
count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
lru_cache_add_active_or_unevictable(new_page, vma);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
- /* commit non-atomic ops before exposing to fast gup */
+ /* commit non-atomic ops and user data */
smp_wmb();
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
diff --git a/mm/memory.c b/mm/memory.c
index 6dabbc3cd3b7..db001d919e60 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2367,7 +2367,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* mmu page tables (such as kvm shadow page tables), we want the
* new page to be mapped directly into the secondary page table.
*/
- /* commit non-atomic ops before exposing to fast gup */
+ /* commit non-atomic ops and user data */
smp_wmb();
set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
update_mmu_cache(vma, vmf->address, vmf->pte);
@@ -2887,7 +2887,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
page_add_new_anon_rmap(page, vma, vmf->address, false);
mem_cgroup_commit_charge(page, memcg, false, false);
lru_cache_add_active_or_unevictable(page, vma);
- /* commit non-atomic ops before exposing to fast gup */
+ /* commit non-atomic ops and user data */
smp_wmb();
} else {
do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
@@ -3006,11 +3006,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
false))
goto oom_free_page;
- /*
- * The memory barrier inside __SetPageUptodate makes sure that
- * preceeding stores to the page contents become visible before
- * the set_pte_at() write.
- */
__SetPageUptodate(page);
entry = mk_pte(page, vma->vm_page_prot);
@@ -3038,7 +3033,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
page_add_new_anon_rmap(page, vma, vmf->address, false);
mem_cgroup_commit_charge(page, memcg, false, false);
lru_cache_add_active_or_unevictable(page, vma);
- /* commit non-atomic ops before exposing to fast gup */
+ /* commit non-atomic ops and user data */
smp_wmb();
setpte:
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
@@ -3303,7 +3298,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
page_add_new_anon_rmap(page, vma, vmf->address, false);
mem_cgroup_commit_charge(page, memcg, false, false);
lru_cache_add_active_or_unevictable(page, vma);
- /* commit non-atomic ops before exposing to fast gup */
+ /* commit non-atomic ops and user data */
smp_wmb();
} else {
inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
diff --git a/mm/migrate.c b/mm/migrate.c
index 943d147ecc3e..dc0ab9fbe36e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2729,11 +2729,6 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
goto abort;
- /*
- * The memory barrier inside __SetPageUptodate makes sure that
- * preceding stores to the page contents become visible before
- * the set_pte_at() write.
- */
__SetPageUptodate(page);
if (is_zone_device_page(page)) {
@@ -2783,7 +2778,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
lru_cache_add_active_or_unevictable(page, vma);
get_page(page);
- /* commit non-atomic ops before exposing to fast gup */
+ /* commit non-atomic ops and user data */
smp_wmb();
if (flush) {
flush_cache_page(vma, addr, pte_pfn(*ptep));
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 5c5547053ee0..dc9f1b1ba1a6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1887,7 +1887,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
page_add_new_anon_rmap(page, vma, addr, false);
mem_cgroup_commit_charge(page, memcg, false, false);
lru_cache_add_active_or_unevictable(page, vma);
- /* commit non-atomic ops before exposing to fast gup */
+ /* commit non-atomic ops and user data */
smp_wmb();
}
set_pte_at(vma->vm_mm, addr, pte,
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 4f92913242a1..34083680869e 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -58,11 +58,6 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
*pagep = NULL;
}
- /*
- * The memory barrier inside __SetPageUptodate makes sure that
- * preceeding stores to the page contents become visible before
- * the set_pte_at() write.
- */
__SetPageUptodate(page);
ret = -ENOMEM;
@@ -92,7 +87,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
mem_cgroup_commit_charge(page, memcg, false, false);
lru_cache_add_active_or_unevictable(page, dst_vma);
- /* commit non-atomic ops before exposing to fast gup */
+ /* commit non-atomic ops and user data */
smp_wmb();
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
--
2.23.0.351.gc4317032e6-goog
Powered by blists - more mailing lists