[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180823084709.19717-3-npiggin@gmail.com>
Date: Thu, 23 Aug 2018 18:47:09 +1000
From: Nicholas Piggin <npiggin@...il.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Nicholas Piggin <npiggin@...il.com>, torvalds@...ux-foundation.org,
luto@...nel.org, x86@...nel.org, bp@...en8.de, will.deacon@....com,
riel@...riel.com, jannh@...gle.com, ascannell@...gle.com,
dave.hansen@...el.com, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, David Miller <davem@...emloft.net>,
Martin Schwidefsky <schwidefsky@...ibm.com>,
Michael Ellerman <mpe@...erman.id.au>,
linux-arch@...r.kernel.org
Subject: [RFC PATCH 2/2] mm: mmu_notifier fix for tlb_end_vma
The generic tlb_end_vma does not call invalidate_range mmu notifier,
and it resets resets the mmu_gather range, which means the notifier
won't be called on part of the range in case of an unmap that spans
multiple vmas.
ARM64 seems to be the only arch I could see that has notifiers and
uses the generic tlb_end_vma. I have not actually tested it.
Signed-off-by: Nicholas Piggin <npiggin@...il.com>
---
include/asm-generic/tlb.h | 17 +++++++++++++----
mm/memory.c | 10 ----------
2 files changed, 13 insertions(+), 14 deletions(-)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e811ef7b8350..79cb76b89c26 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -15,6 +15,7 @@
#ifndef _ASM_GENERIC__TLB_H
#define _ASM_GENERIC__TLB_H
+#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
@@ -138,6 +139,16 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
}
}
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+ if (!tlb->end)
+ return;
+
+ tlb_flush(tlb);
+ mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
+ __tlb_reset_range(tlb);
+}
+
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
@@ -186,10 +197,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define __tlb_end_vma(tlb, vma) \
do { \
- if (!tlb->fullmm && tlb->end) { \
- tlb_flush(tlb); \
- __tlb_reset_range(tlb); \
- } \
+ if (!tlb->fullmm) \
+ tlb_flush_mmu_tlbonly(tlb); \
} while (0)
#ifndef tlb_end_vma
diff --git a/mm/memory.c b/mm/memory.c
index 7c58310734eb..c4a7b6cb17c8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -238,16 +238,6 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
__tlb_reset_range(tlb);
}
-static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
- if (!tlb->end)
- return;
-
- tlb_flush(tlb);
- mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
- __tlb_reset_range(tlb);
-}
-
static void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
--
2.17.0
Powered by blists - more mailing lists