[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220124174744.1054712-32-ardb@kernel.org>
Date: Mon, 24 Jan 2022 18:47:43 +0100
From: Ard Biesheuvel <ardb@...nel.org>
To: linux@...linux.org.uk, linux-arm-kernel@...ts.infradead.org
Cc: linux-hardening@...r.kernel.org, Ard Biesheuvel <ardb@...nel.org>,
Nicolas Pitre <nico@...xnic.net>,
Arnd Bergmann <arnd@...db.de>,
Kees Cook <keescook@...omium.org>,
Keith Packard <keithpac@...zon.com>,
Linus Walleij <linus.walleij@...aro.org>,
Nick Desaulniers <ndesaulniers@...gle.com>,
Tony Lindgren <tony@...mide.com>,
Marc Zyngier <maz@...nel.org>,
Vladimir Murzin <vladimir.murzin@....com>,
Jesse Taube <mr.bossman075@...il.com>
Subject: [PATCH v5 31/32] ARM: mm: prepare vmalloc_seq handling for use under SMP
Currently, the vmalloc_seq counter is only used to keep track of changes
in the vmalloc region on !SMP builds, which means there is no need to
deal with concurrency explicitly.
However, in a subsequent patch, we will wire up this same mechanism for
ensuring that vmap'ed stacks are guaranteed to be mapped by the active
mm before switching to a task, and here we need to ensure that changes
to the page tables are visible to other CPUs when they observe a change
in the sequence count.
Since LPAE needs none of this, fold a check against it into the
vmalloc_seq counter check after breaking it out into a separate static
inline helper.
Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
---
arch/arm/include/asm/mmu.h | 2 +-
arch/arm/include/asm/mmu_context.h | 13 +++++++++++--
arch/arm/mm/context.c | 3 +--
arch/arm/mm/ioremap.c | 18 +++++++++++-------
4 files changed, 24 insertions(+), 12 deletions(-)
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 1592a4264488..e049723840d3 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -10,7 +10,7 @@ typedef struct {
#else
int switch_pending;
#endif
- unsigned int vmalloc_seq;
+ atomic_t vmalloc_seq;
unsigned long sigpage;
#ifdef CONFIG_VDSO
unsigned long vdso;
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 84e58956fcab..71a26986efb9 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -23,6 +23,16 @@
void __check_vmalloc_seq(struct mm_struct *mm);
+#ifdef CONFIG_MMU
+static inline void check_vmalloc_seq(struct mm_struct *mm)
+{
+ if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
+ unlikely(atomic_read(&mm->context.vmalloc_seq) !=
+ atomic_read(&init_mm.context.vmalloc_seq)))
+ __check_vmalloc_seq(mm);
+}
+#endif
+
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
@@ -52,8 +62,7 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
- if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
- __check_vmalloc_seq(mm);
+ check_vmalloc_seq(mm);
if (irqs_disabled())
/*
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 48091870db89..4204ffa2d104 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -240,8 +240,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
unsigned int cpu = smp_processor_id();
u64 asid;
- if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
- __check_vmalloc_seq(mm);
+ check_vmalloc_seq(mm);
/*
* We cannot update the pgd and the ASID atomicly with classic
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 197f8eb3a775..aa08bcb72db9 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -117,16 +117,21 @@ EXPORT_SYMBOL(ioremap_page);
void __check_vmalloc_seq(struct mm_struct *mm)
{
- unsigned int seq;
+ int seq;
do {
- seq = init_mm.context.vmalloc_seq;
+ seq = atomic_read(&init_mm.context.vmalloc_seq);
memcpy(pgd_offset(mm, VMALLOC_START),
pgd_offset_k(VMALLOC_START),
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
pgd_index(VMALLOC_START)));
- mm->context.vmalloc_seq = seq;
- } while (seq != init_mm.context.vmalloc_seq);
+ /*
+ * Use a store-release so that other CPUs that observe the
+ * counter's new value are guaranteed to see the results of the
+ * memcpy as well.
+ */
+ atomic_set_release(&mm->context.vmalloc_seq, seq);
+ } while (seq != atomic_read(&init_mm.context.vmalloc_seq));
}
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
@@ -157,7 +162,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
* Note: this is still racy on SMP machines.
*/
pmd_clear(pmdp);
- init_mm.context.vmalloc_seq++;
+ atomic_inc_return_release(&init_mm.context.vmalloc_seq);
/*
* Free the page table, if there was one.
@@ -174,8 +179,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
* Ensure that the active_mm is up to date - we want to
* catch any use-after-iounmap cases.
*/
- if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
- __check_vmalloc_seq(current->active_mm);
+ check_vmalloc_seq(current->active_mm);
flush_tlb_kernel_range(virt, end);
}
--
2.30.2
Powered by blists - more mailing lists