[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200331142927.1237-7-yezhenyu2@huawei.com>
Date: Tue, 31 Mar 2020 22:29:25 +0800
From: Zhenyu Ye <yezhenyu2@...wei.com>
To: <peterz@...radead.org>, <mark.rutland@....com>, <will@...nel.org>,
<catalin.marinas@....com>, <aneesh.kumar@...ux.ibm.com>,
<akpm@...ux-foundation.org>, <npiggin@...il.com>, <arnd@...db.de>,
<rostedt@...dmis.org>, <maz@...nel.org>, <suzuki.poulose@....com>,
<tglx@...utronix.de>, <yuzhao@...gle.com>, <Dave.Martin@....com>,
<steven.price@....com>, <broonie@...nel.org>,
<guohanjun@...wei.com>, <corbet@....net>, <vgupta@...opsys.com>,
<tony.luck@...el.com>
CC: <yezhenyu2@...wei.com>, <linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>, <linux-arch@...r.kernel.org>,
<linux-mm@...ck.org>, <arm@...nel.org>, <xiexiangyou@...wei.com>,
<prime.zeng@...ilicon.com>, <zhangshaokun@...ilicon.com>,
<kuhn.chenqun@...wei.com>
Subject: [RFC PATCH v5 6/8] mm: tlb: Pass struct mmu_gather to flush_hugetlb_tlb_range
Preparations to support for passing struct mmu_gather to
flush_tlb_range. See in future patches.
Signed-off-by: Zhenyu Ye <yezhenyu2@...wei.com>
---
arch/powerpc/include/asm/book3s/64/tlbflush.h | 3 ++-
mm/hugetlb.c | 17 ++++++++++++-----
2 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 6445d179ac15..968f10ef3d51 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -57,7 +57,8 @@ static inline void flush_pmd_tlb_range(struct mmu_gather *tlb,
}
#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
-static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+static inline void flush_hugetlb_tlb_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dd8737a94bec..f913ce0b4831 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4441,7 +4441,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
* ARCHes with special requirements for evicting HUGETLB backing TLB entries can
* implement this.
*/
-#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#define flush_hugetlb_tlb_range(tlb, vma, addr, end) \
+ flush_tlb_range(vma, addr, end)
#endif
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
@@ -4455,6 +4456,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long pages = 0;
bool shared_pmd = false;
struct mmu_notifier_range range;
+ struct mmu_gather tlb;
/*
* In the case of shared PMDs, the area to flush could be beyond
@@ -4520,10 +4522,15 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* and that page table be reused and filled with junk. If we actually
* did unshare a page of pmds, flush the range corresponding to the pud.
*/
- if (shared_pmd)
- flush_hugetlb_tlb_range(vma, range.start, range.end);
- else
- flush_hugetlb_tlb_range(vma, start, end);
+ if (shared_pmd) {
+ tlb_gather_mmu(&tlb, mm, range.start, range.end);
+ flush_hugetlb_tlb_range(&tlb, vma, range.start, range.end);
+ tlb_finish_mmu(&tlb, range.start, range.end);
+ } else {
+ tlb_gather_mmu(&tlb, mm, start, end);
+ flush_hugetlb_tlb_range(&tlb, vma, start, end);
+ tlb_finish_mmu(&tlb, start, end);
+ }
/*
* No need to call mmu_notifier_invalidate_range() we are downgrading
* page table protection not changing it to point to a new page.
--
2.19.1
Powered by blists - more mailing lists