lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue,  8 Aug 2017 13:22:20 -0700
From:   Ashok Raj <ashok.raj@...el.com>
To:     linux-kernel@...r.kernel.org, Joerg Roedel <joro@...tes.org>
Cc:     Huang Ying <ying.huang@...el.com>, Ashok Raj <ashok.raj@...el.com>,
        Dave Hansen <dave.hansen@...el.com>,
        CQ Tang <cq.tang@...el.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>,
        "H . Peter Anvin" <hpa@...or.com>,
        Andy Lutomirski <luto@...nel.org>,
        Rik van Riel <riel@...hat.com>,
        Kees Cook <keescook@...omium.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
        Michal Hocko <mhocko@...e.com>,
        "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
        Vegard Nossum <vegard.nossum@...cle.com>, x86@...nel.org,
        linux-mm@...ck.org, iommu@...ts-foundation.org,
        David Woodhouse <dwmw2@...radead.org>,
        Jean-Phillipe Brucker <jean-philippe.brucker@....com>
Subject: [PATCH 3/4] mm: Add kernel MMU notifier to manage remote TLB

From: Huang Ying <ying.huang@...el.com>

Shared Virtual Memory (SVM) devices have TLBs that cache entries from
the CPU's page tables.  We need SVM device drivers to flush them at
the same time that we flush the CPU TLBs.  We can use the existing MMU
notifiers for userspace updates, but we lack a mechanism to get
notified when kernel page tables are updated.

To implement the MMU notification mechanism for the kernel address
space, a kernel MMU notifier chain is defined, and will be called when
the CPU TLB is flushed for the kernel address space.  The IOMMU SVM
driver can register on the notifier chain to flush the device TLBs
when necessary.

To: linux-kernel@...r.kernel.org
To: Joerg Roedel <joro@...tes.org>
Cc: Ashok Raj <ashok.raj@...el.com>
Cc: Dave Hansen <dave.hansen@...el.com>
Cc: CQ Tang <cq.tang@...el.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Rik van Riel <riel@...hat.com>
Cc: Kees Cook <keescook@...omium.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Cc: Michal Hocko <mhocko@...e.com>
Cc: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
Cc: Vegard Nossum <vegard.nossum@...cle.com>
Cc: x86@...nel.org
Cc: linux-mm@...ck.org
Cc: iommu@...ts-foundation.org
Cc: David Woodhouse <dwmw2@...radead.org>
CC: Jean-Phillipe Brucker <jean-philippe.brucker@....com>
Signed-off-by: "Huang, Ying" <ying.huang@...el.com>
---
 arch/x86/include/asm/tlbflush.h |  1 +
 arch/x86/mm/tlb.c               |  1 +
 include/linux/mmu_notifier.h    | 33 +++++++++++++++++++++++++++++++++
 mm/mmu_notifier.c               | 25 +++++++++++++++++++++++++
 4 files changed, 60 insertions(+)

diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 50ea348..f5fd0b8 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -3,6 +3,7 @@
 
 #include <linux/mm.h>
 #include <linux/sched.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/processor.h>
 #include <asm/cpufeature.h>
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 014d07a..6dea8e9 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -314,6 +314,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 		info.end = end;
 		on_each_cpu(do_kernel_range_flush, &info, 1);
 	}
+	kernel_mmu_notifier_invalidate_range(start, end);
 }
 
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index c91b3bc..4a96089 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -418,6 +418,25 @@ extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
 				   void (*func)(struct rcu_head *rcu));
 extern void mmu_notifier_synchronize(void);
 
+struct kernel_mmu_address_range {
+	unsigned long start;
+	unsigned long end;
+};
+
+/*
+ * Before the virtual address range managed by kernel (vmalloc/kmap)
+ * is reused, That is, remapped to the new physical addresses, the
+ * kernel MMU notifier will be called with KERNEL_MMU_INVALIDATE_RANGE
+ * and struct kernel_mmu_address_range as parameters.  This is used to
+ * manage the remote TLB.
+ */
+#define KERNEL_MMU_INVALIDATE_RANGE		1
+extern int kernel_mmu_notifier_register(struct notifier_block *nb);
+extern int kernel_mmu_notifier_unregister(struct notifier_block *nb);
+
+extern int kernel_mmu_notifier_invalidate_range(unsigned long start,
+						unsigned long end);
+
 #else /* CONFIG_MMU_NOTIFIER */
 
 static inline void mmu_notifier_release(struct mm_struct *mm)
@@ -479,6 +498,20 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
 #define set_pte_at_notify set_pte_at
 
+static inline int kernel_mmu_notifier_register(struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline int kernel_mmu_notifier_unregister(struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline void kernel_mmu_notifier_invalidate_range(unsigned long start,
+							unsigned long end)
+{
+}
 #endif /* CONFIG_MMU_NOTIFIER */
 
 #endif /* _LINUX_MMU_NOTIFIER_H */
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 54ca545..a919038 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -400,3 +400,28 @@ void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
 	mmdrop(mm);
 }
 EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);
+
+static ATOMIC_NOTIFIER_HEAD(kernel_mmu_notifier_list);
+
+int kernel_mmu_notifier_register(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&kernel_mmu_notifier_list, nb);
+}
+
+int kernel_mmu_notifier_unregister(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&kernel_mmu_notifier_list, nb);
+}
+
+int kernel_mmu_notifier_invalidate_range(unsigned long start,
+					 unsigned long end)
+{
+	struct kernel_mmu_address_range range = {
+		.start	= start,
+		.end	= end,
+	};
+
+	return atomic_notifier_call_chain(&kernel_mmu_notifier_list,
+					  KERNEL_MMU_INVALIDATE_RANGE,
+					  &range);
+}
-- 
2.7.4

Powered by blists - more mailing lists