[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200419203335.856333226@linutronix.de>
Date: Sun, 19 Apr 2020 22:31:39 +0200
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: x86@...nel.org, Kees Cook <keescook@...omium.org>,
Paolo Bonzini <pbonzini@...hat.com>,
Thomas Lendacky <Thomas.Lendacky@....com>,
Juergen Gross <jgross@...e.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>
Subject: [patch 02/15] x86/cpu: Uninline CR4 accessors
cpu_tlbstate is exported because various TLB related functions need access
to it, but cpu_tlbstate is sensitive information which should only be
accessed by well contained kernel functions and not be directly exposed to
modules.
The various CR4 accessors require cpu_tlbstate as the CR4 shadow cache is
located there.
In preparation of unexporting cpu_tlbstate create a builtin function for
manipulating CR4 and rework the various helpers to use it.
Export native_write_cr4() only when CONFIG_LKTDM=m.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
arch/x86/include/asm/tlbflush.h | 36 +++++-------------------------------
arch/x86/kernel/cpu/common.c | 25 ++++++++++++++++++++++++-
arch/x86/kernel/process.c | 11 +++++++++++
3 files changed, 40 insertions(+), 32 deletions(-)
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -276,37 +276,25 @@ static inline bool nmi_uaccess_okay(void
#define nmi_uaccess_okay nmi_uaccess_okay
+void cr4_update_irqsoff(unsigned long set, unsigned long clear);
+unsigned long cr4_read_shadow(void);
+
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
}
-static inline void __cr4_set(unsigned long cr4)
-{
- lockdep_assert_irqs_disabled();
- this_cpu_write(cpu_tlbstate.cr4, cr4);
- __write_cr4(cr4);
-}
-
/* Set in this cpu's CR4. */
static inline void cr4_set_bits_irqsoff(unsigned long mask)
{
- unsigned long cr4;
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
- if ((cr4 | mask) != cr4)
- __cr4_set(cr4 | mask);
+ cr4_update_irqsoff(mask, 0);
}
/* Clear in this cpu's CR4. */
static inline void cr4_clear_bits_irqsoff(unsigned long mask)
{
- unsigned long cr4;
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
- if ((cr4 & ~mask) != cr4)
- __cr4_set(cr4 & ~mask);
+ cr4_update_irqsoff(0, mask);
}
/* Set in this cpu's CR4. */
@@ -329,20 +317,6 @@ static inline void cr4_clear_bits(unsign
local_irq_restore(flags);
}
-static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
-{
- unsigned long cr4;
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
- __cr4_set(cr4 ^ mask);
-}
-
-/* Read the CR4 shadow. */
-static inline unsigned long cr4_read_shadow(void)
-{
- return this_cpu_read(cpu_tlbstate.cr4);
-}
-
/*
* Mark all other ASIDs as invalid, preserves the current.
*/
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -387,7 +387,30 @@ void native_write_cr4(unsigned long val)
bits_missing);
}
}
-EXPORT_SYMBOL(native_write_cr4);
+#if IS_MODULE(CONFIG_LKDTM)
+EXPORT_SYMBOL_GPL(native_write_cr4);
+#endif
+
+void cr4_update_irqsoff(unsigned long set, unsigned long clear)
+{
+ unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
+
+ lockdep_assert_irqs_disabled();
+
+ newval = (cr4 & ~clear) | set;
+ if (newval != cr4) {
+ this_cpu_write(cpu_tlbstate.cr4, newval);
+ __write_cr4(newval);
+ }
+}
+EXPORT_SYMBOL(cr4_update_irqsoff);
+
+/* Read the CR4 shadow. */
+unsigned long cr4_read_shadow(void)
+{
+ return this_cpu_read(cpu_tlbstate.cr4);
+}
+EXPORT_SYMBOL_GPL(cr4_read_shadow);
void cr4_init(void)
{
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -612,6 +612,17 @@ void speculation_ctrl_update_current(voi
preempt_enable();
}
+static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
+{
+ unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
+
+ newval = cr4 ^ mask;
+ if (newval != cr4) {
+ this_cpu_write(cpu_tlbstate.cr4, newval);
+ __write_cr4(newval);
+ }
+}
+
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
{
unsigned long tifp, tifn;
Powered by blists - more mailing lists