[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20171127104923.14378-21-mingo@kernel.org>
Date: Mon, 27 Nov 2017 11:49:19 +0100
From: Ingo Molnar <mingo@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: Dave Hansen <dave.hansen@...ux.intel.com>,
Andy Lutomirski <luto@...capital.net>,
Thomas Gleixner <tglx@...utronix.de>,
"H . Peter Anvin" <hpa@...or.com>,
Peter Zijlstra <peterz@...radead.org>,
Borislav Petkov <bp@...en8.de>,
Linus Torvalds <torvalds@...ux-foundation.org>
Subject: [PATCH 20/24] x86/mm/kaiser: Simplify disabling of global pages
From: Thomas Gleixner <tglx@...utronix.de>
The current way of disabling global pages at compile time prevents boot
time disabling of Kaiser and creates unnecessary indirections.
Global pages can be supressed by __supported_pte_mask as well. The shadow
mappings set PAGE_GLOBAL for the minimal kernel mappings which are required
for entry/exit. These mappings are set up manually so the filtering does not
take place.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Brian Gerst <brgerst@...il.com>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Denys Vlasenko <dvlasenk@...hat.com>
Cc: Josh Poimboeuf <jpoimboe@...hat.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Rik van Riel <riel@...hat.com>
Cc: daniel.gruss@...k.tugraz.at
Cc: hughd@...gle.com
Cc: keescook@...gle.com
Cc: linux-mm@...ck.org
Cc: michael.schwarz@...k.tugraz.at
Cc: moritz.lipp@...k.tugraz.at
Cc: richard.fellner@...dent.tugraz.at
Link: http://lkml.kernel.org/r/20171126232414.393912629@linutronix.de
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
arch/x86/include/asm/pgtable_types.h | 16 +---------------
arch/x86/mm/init.c | 13 ++++++++++---
arch/x86/mm/pageattr.c | 16 ++++++++--------
3 files changed, 19 insertions(+), 26 deletions(-)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 503bb6999a75..289dde8a9eb3 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -191,23 +191,9 @@ enum page_cache_mode {
#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_ACCESSED)
-/*
- * Disable global pages for anything using the default
- * __PAGE_KERNEL* macros.
- *
- * PGE will still be enabled and _PAGE_GLOBAL may still be used carefully
- * for a few selected kernel mappings which must be visible to userspace,
- * when KAISER is enabled, like the entry/exit code and data.
- */
-#ifdef CONFIG_KAISER
-#define __PAGE_KERNEL_GLOBAL 0
-#else
-#define __PAGE_KERNEL_GLOBAL _PAGE_GLOBAL
-#endif
-
#define __PAGE_KERNEL_EXEC \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | \
- __PAGE_KERNEL_GLOBAL)
+ _PAGE_GLOBAL)
#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 9618e57d46cf..7c0126835f22 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -161,6 +161,13 @@ struct map_range {
static int page_size_mask;
+static void enable_global_pages(void)
+{
+#ifndef CONFIG_KAISER
+ __supported_pte_mask |= _PAGE_GLOBAL;
+#endif
+}
+
static void __init probe_page_size_mask(void)
{
/*
@@ -179,11 +186,11 @@ static void __init probe_page_size_mask(void)
cr4_set_bits_and_update_boot(X86_CR4_PSE);
/* Enable PGE if available */
+ __supported_pte_mask |= _PAGE_GLOBAL;
if (boot_cpu_has(X86_FEATURE_PGE)) {
cr4_set_bits_and_update_boot(X86_CR4_PGE);
- __supported_pte_mask |= _PAGE_GLOBAL;
- } else
- __supported_pte_mask &= ~_PAGE_GLOBAL;
+ enable_global_pages();
+ }
/* Enable 1 GB linear kernel mappings if available: */
if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 1b3dbf3b3846..f09d8b362194 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -585,9 +585,9 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* for the ancient hardware that doesn't support it.
*/
if (pgprot_val(req_prot) & _PAGE_PRESENT)
- pgprot_val(req_prot) |= _PAGE_PSE | __PAGE_KERNEL_GLOBAL;
+ pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
else
- pgprot_val(req_prot) &= ~(_PAGE_PSE | __PAGE_KERNEL_GLOBAL);
+ pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
req_prot = canon_pgprot(req_prot);
@@ -705,9 +705,9 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
* for the ancient hardware that doesn't support it.
*/
if (pgprot_val(ref_prot) & _PAGE_PRESENT)
- pgprot_val(ref_prot) |= __PAGE_KERNEL_GLOBAL;
+ pgprot_val(ref_prot) |= _PAGE_GLOBAL;
else
- pgprot_val(ref_prot) &= ~__PAGE_KERNEL_GLOBAL;
+ pgprot_val(ref_prot) &= ~_PAGE_GLOBAL;
/*
* Get the target pfn from the original entry:
@@ -938,9 +938,9 @@ static void populate_pte(struct cpa_data *cpa,
* support it.
*/
if (pgprot_val(pgprot) & _PAGE_PRESENT)
- pgprot_val(pgprot) |= __PAGE_KERNEL_GLOBAL;
+ pgprot_val(pgprot) |= _PAGE_GLOBAL;
else
- pgprot_val(pgprot) &= ~__PAGE_KERNEL_GLOBAL;
+ pgprot_val(pgprot) &= ~_PAGE_GLOBAL;
pgprot = canon_pgprot(pgprot);
@@ -1242,9 +1242,9 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
* support it.
*/
if (pgprot_val(new_prot) & _PAGE_PRESENT)
- pgprot_val(new_prot) |= __PAGE_KERNEL_GLOBAL;
+ pgprot_val(new_prot) |= _PAGE_GLOBAL;
else
- pgprot_val(new_prot) &= ~__PAGE_KERNEL_GLOBAL;
+ pgprot_val(new_prot) &= ~_PAGE_GLOBAL;
/*
* We need to keep the pfn from the existing PTE,
--
2.14.1
Powered by blists - more mailing lists