lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180728215357.3249-10-riel@surriel.com>
Date:   Sat, 28 Jul 2018 17:53:56 -0400
From:   Rik van Riel <riel@...riel.com>
To:     linux-kernel@...r.kernel.org
Cc:     kernel-team@...com, peterz@...radead.org, luto@...nel.org,
        x86@...nel.org, vkuznets@...hat.com, mingo@...nel.org,
        efault@....de, dave.hansen@...el.com, will.daecon@....com,
        catalin.marinas@....com, benh@...nel.crashing.org,
        Rik van Riel <riel@...riel.com>
Subject: [PATCH 09/10] mm,x86: shoot down lazy TLB references at exit_mmap time

Shooting down lazy TLB references to an mm at exit_mmap time ensures
that no users of the mm_struct will be left anywhere in the system,
allowing it to be torn down and freed immediately.

Signed-off-by: Rik van Riel <riel@...riel.com>
Suggested-by: Andy Lutomirski <luto@...nel.org>
Suggested-by: Peter Zijlstra <peterz@...radead.org>
---
 arch/x86/Kconfig                   |  1 +
 arch/x86/include/asm/mmu_context.h |  1 +
 arch/x86/include/asm/tlbflush.h    |  2 ++
 arch/x86/mm/tlb.c                  | 15 +++++++++++++++
 4 files changed, 19 insertions(+)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6d4774f203d0..ecdfc6933203 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -75,6 +75,7 @@ config X86
 	select ARCH_MIGHT_HAVE_ACPI_PDC		if ACPI
 	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select ARCH_MIGHT_HAVE_PC_SERIO
+	select ARCH_NO_ACTIVE_MM_REFCOUNTING
 	select ARCH_SUPPORTS_ATOMIC_RMW
 	select ARCH_SUPPORTS_NUMA_BALANCING	if X86_64
 	select ARCH_USE_BUILTIN_BSWAP
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index eeeb9289c764..529bf7bc5f75 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -238,6 +238,7 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
 {
 	paravirt_arch_exit_mmap(mm);
 	ldt_arch_exit_mmap(mm);
+	lazy_tlb_exit_mmap(mm);
 }
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 511bf5fae8b8..3966a45367cd 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -538,6 +538,8 @@ extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 	native_flush_tlb_others(mask, info)
 #endif
 
+extern void lazy_tlb_exit_mmap(struct mm_struct *mm);
+
 extern void tlb_flush_remove_tables(struct mm_struct *mm);
 extern void tlb_flush_remove_tables_local(void *arg);
 
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index ea4ef5ceaba2..7b1add904396 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -713,6 +713,21 @@ void tlb_flush_remove_tables(struct mm_struct *mm)
 	put_cpu();
 }
 
+/*
+ * At exit or execve time, all other threads of a process have disappeared,
+ * but other CPUs could still be referencing this mm in lazy TLB mode.
+ * Get rid of those references before releasing the mm.
+ */
+void lazy_tlb_exit_mmap(struct mm_struct *mm)
+{
+	int cpu = get_cpu();
+
+	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
+		on_each_cpu_mask(mm_cpumask(mm), leave_mm, NULL, 1);
+
+	put_cpu();
+}
+
 static void do_flush_tlb_all(void *info)
 {
 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
-- 
2.14.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ