[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220223052223.1202152-5-junaids@google.com>
Date: Tue, 22 Feb 2022 21:21:40 -0800
From: Junaid Shahid <junaids@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: kvm@...r.kernel.org, pbonzini@...hat.com, jmattson@...gle.com,
pjt@...gle.com, oweisse@...gle.com, alexandre.chartre@...cle.com,
rppt@...ux.ibm.com, dave.hansen@...ux.intel.com,
peterz@...radead.org, tglx@...utronix.de, luto@...nel.org,
linux-mm@...ck.org
Subject: [RFC PATCH 04/47] mm: asi: ASI support in interrupts/exceptions
Add support for potentially switching address spaces from within
interrupts/exceptions/NMIs etc. An interrupt does not automatically
switch to the unrestricted address space. It can switch if needed to
access some memory not available in the restricted address space, using
the normal asi_exit call.
On return from the outermost interrupt, if the target address space was
the restricted address space (e.g. we were in the critical code path
between ASI Enter and VM Enter), the restricted address space will be
automatically restored. Otherwise, execution will continue in the
unrestricted address space until the next explicit ASI Enter.
In order to keep track of when to restore the restricted address space,
an interrupt/exception nesting depth counter is maintained per-task.
An alternative implementation without needing this counter is also
possible, but the counter unlocks an additional nice-to-have benefit by
allowing detection of whether or not we are currently executing inside
an exception context, which would be useful in a later patch.
Signed-off-by: Junaid Shahid <junaids@...gle.com>
---
arch/x86/include/asm/asi.h | 35 ++++++++++++++++++++++++++++++++
arch/x86/include/asm/idtentry.h | 25 +++++++++++++++++++++--
arch/x86/include/asm/processor.h | 5 +++++
arch/x86/kernel/process.c | 2 ++
arch/x86/kernel/traps.c | 2 ++
arch/x86/mm/asi.c | 3 ++-
kernel/entry/common.c | 6 ++++++
7 files changed, 75 insertions(+), 3 deletions(-)
diff --git a/arch/x86/include/asm/asi.h b/arch/x86/include/asm/asi.h
index 0a4af23ed0eb..7702332c62e8 100644
--- a/arch/x86/include/asm/asi.h
+++ b/arch/x86/include/asm/asi.h
@@ -4,6 +4,8 @@
#include <asm-generic/asi.h>
+#include <linux/sched.h>
+
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
#include <asm/cpufeature.h>
@@ -51,6 +53,11 @@ void asi_destroy(struct asi *asi);
void asi_enter(struct asi *asi);
void asi_exit(void);
+static inline void asi_init_thread_state(struct thread_struct *thread)
+{
+ thread->intr_nest_depth = 0;
+}
+
static inline void asi_set_target_unrestricted(void)
{
if (static_cpu_has(X86_FEATURE_ASI)) {
@@ -85,6 +92,34 @@ static inline bool asi_is_target_unrestricted(void)
#define static_asi_enabled() cpu_feature_enabled(X86_FEATURE_ASI)
+static inline void asi_intr_enter(void)
+{
+ if (static_cpu_has(X86_FEATURE_ASI)) {
+ current->thread.intr_nest_depth++;
+ barrier();
+ }
+}
+
+static inline void asi_intr_exit(void)
+{
+ void __asi_enter(void);
+
+ if (static_cpu_has(X86_FEATURE_ASI)) {
+ barrier();
+
+ if (--current->thread.intr_nest_depth == 0)
+ __asi_enter();
+ }
+}
+
+#else /* CONFIG_ADDRESS_SPACE_ISOLATION */
+
+static inline void asi_intr_enter(void) { }
+
+static inline void asi_intr_exit(void) { }
+
+static inline void asi_init_thread_state(struct thread_struct *thread) { }
+
#endif /* CONFIG_ADDRESS_SPACE_ISOLATION */
#endif
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 1345088e9902..ea5cdc90403d 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -10,6 +10,7 @@
#include <linux/hardirq.h>
#include <asm/irq_stack.h>
+#include <asm/asi.h>
/**
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
@@ -133,7 +134,16 @@ static __always_inline void __##func(struct pt_regs *regs, \
* is required before the enter/exit() helpers are invoked.
*/
#define DEFINE_IDTENTRY_RAW(func) \
-__visible noinstr void func(struct pt_regs *regs)
+static __always_inline void __##func(struct pt_regs *regs); \
+ \
+__visible noinstr void func(struct pt_regs *regs) \
+{ \
+ asi_intr_enter(); \
+ __##func (regs); \
+ asi_intr_exit(); \
+} \
+ \
+static __always_inline void __##func(struct pt_regs *regs)
/**
* DECLARE_IDTENTRY_RAW_ERRORCODE - Declare functions for raw IDT entry points
@@ -161,7 +171,18 @@ __visible noinstr void func(struct pt_regs *regs)
* is required before the enter/exit() helpers are invoked.
*/
#define DEFINE_IDTENTRY_RAW_ERRORCODE(func) \
-__visible noinstr void func(struct pt_regs *regs, unsigned long error_code)
+static __always_inline void __##func(struct pt_regs *regs, \
+ unsigned long error_code); \
+ \
+__visible noinstr void func(struct pt_regs *regs, unsigned long error_code)\
+{ \
+ asi_intr_enter(); \
+ __##func (regs, error_code); \
+ asi_intr_exit(); \
+} \
+ \
+static __always_inline void __##func(struct pt_regs *regs, \
+ unsigned long error_code)
/**
* DECLARE_IDTENTRY_IRQ - Declare functions for device interrupt IDT entry
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 355d38c0cf60..20116efd2756 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -519,6 +519,11 @@ struct thread_struct {
unsigned int iopl_warn:1;
unsigned int sig_on_uaccess_err:1;
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+ /* The nesting depth of exceptions/interrupts */
+ int intr_nest_depth;
+#endif
+
/*
* Protection Keys Register for Userspace. Loaded immediately on
* context switch. Store it in thread_struct to avoid a lookup in
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 04143a653a8a..c8d4a00a4de7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -90,6 +90,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
#ifdef CONFIG_VM86
dst->thread.vm86 = NULL;
#endif
+ asi_init_thread_state(&dst->thread);
+
/* Drop the copied pointer to current's fpstate */
dst->thread.fpu.fpstate = NULL;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c9d566dcf89a..acf675ddda96 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -61,6 +61,7 @@
#include <asm/insn.h>
#include <asm/insn-eval.h>
#include <asm/vdso.h>
+#include <asm/asi.h>
#ifdef CONFIG_X86_64
#include <asm/x86_init.h>
@@ -413,6 +414,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
}
#endif
+ asi_exit();
irqentry_nmi_enter(regs);
instrumentation_begin();
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
diff --git a/arch/x86/mm/asi.c b/arch/x86/mm/asi.c
index d274c86f89b7..2453124f221d 100644
--- a/arch/x86/mm/asi.c
+++ b/arch/x86/mm/asi.c
@@ -107,12 +107,13 @@ void asi_destroy(struct asi *asi)
}
EXPORT_SYMBOL_GPL(asi_destroy);
-static void __asi_enter(void)
+void __asi_enter(void)
{
u64 asi_cr3;
struct asi *target = this_cpu_read(asi_cpu_state.target_asi);
VM_BUG_ON(preemptible());
+ VM_BUG_ON(current->thread.intr_nest_depth != 0);
if (!target || target == this_cpu_read(asi_cpu_state.curr_asi))
return;
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index d5a61d565ad5..9064253085c7 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -9,6 +9,8 @@
#include "common.h"
+#include <asm/asi.h>
+
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
@@ -321,6 +323,8 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
.exit_rcu = false,
};
+ asi_intr_enter();
+
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
return ret;
@@ -416,6 +420,7 @@ noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
instrumentation_end();
rcu_irq_exit();
lockdep_hardirqs_on(CALLER_ADDR0);
+ asi_intr_exit();
return;
}
@@ -438,6 +443,7 @@ noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
if (state.exit_rcu)
rcu_irq_exit();
}
+ asi_intr_exit();
}
irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
--
2.35.1.473.g83b2b277ed-goog
Powered by blists - more mailing lists