[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201012153850.26996-15-yu-cheng.yu@intel.com>
Date: Mon, 12 Oct 2020 08:38:38 -0700
From: Yu-cheng Yu <yu-cheng.yu@...el.com>
To: x86@...nel.org, "H. Peter Anvin" <hpa@...or.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, linux-kernel@...r.kernel.org,
linux-doc@...r.kernel.org, linux-mm@...ck.org,
linux-arch@...r.kernel.org, linux-api@...r.kernel.org,
Arnd Bergmann <arnd@...db.de>,
Andy Lutomirski <luto@...nel.org>,
Balbir Singh <bsingharora@...il.com>,
Borislav Petkov <bp@...en8.de>,
Cyrill Gorcunov <gorcunov@...il.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Eugene Syromiatnikov <esyr@...hat.com>,
Florian Weimer <fweimer@...hat.com>,
"H.J. Lu" <hjl.tools@...il.com>, Jann Horn <jannh@...gle.com>,
Jonathan Corbet <corbet@....net>,
Kees Cook <keescook@...omium.org>,
Mike Kravetz <mike.kravetz@...cle.com>,
Nadav Amit <nadav.amit@...il.com>,
Oleg Nesterov <oleg@...hat.com>, Pavel Machek <pavel@....cz>,
Peter Zijlstra <peterz@...radead.org>,
Randy Dunlap <rdunlap@...radead.org>,
"Ravi V. Shankar" <ravi.v.shankar@...el.com>,
Vedvyas Shanbhogue <vedvyas.shanbhogue@...el.com>,
Dave Martin <Dave.Martin@....com>,
Weijiang Yang <weijiang.yang@...el.com>,
Pengfei Xu <pengfei.xu@...el.com>
Cc: Yu-cheng Yu <yu-cheng.yu@...el.com>
Subject: [PATCH v14 14/26] x86/mm: Update maybe_mkwrite() for shadow stack
Shadow stack memory is writable, but its VMA has VM_SHSTK instead of
VM_WRITE. Update maybe_mkwrite() to include the shadow stack.
Signed-off-by: Yu-cheng Yu <yu-cheng.yu@...el.com>
---
arch/x86/Kconfig | 4 ++++
arch/x86/mm/pgtable.c | 18 ++++++++++++++++++
include/linux/mm.h | 2 ++
include/linux/pgtable.h | 24 ++++++++++++++++++++++++
mm/huge_memory.c | 2 ++
5 files changed, 50 insertions(+)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 415fcc869afc..7578327226e3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1935,6 +1935,9 @@ config AS_HAS_SHADOW_STACK
config X86_CET
def_bool n
+config ARCH_MAYBE_MKWRITE
+ def_bool n
+
config ARCH_HAS_SHADOW_STACK
def_bool n
@@ -1945,6 +1948,7 @@ config X86_SHADOW_STACK_USER
depends on AS_HAS_SHADOW_STACK
select ARCH_USES_HIGH_VMA_FLAGS
select X86_CET
+ select ARCH_MAYBE_MKWRITE
select ARCH_HAS_SHADOW_STACK
help
Shadow Stacks provides protection against program stack
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index dfd82f51ba66..a9666b64bc05 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -610,6 +610,24 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
}
#endif
+#ifdef CONFIG_ARCH_MAYBE_MKWRITE
+pte_t arch_maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ if (likely(vma->vm_flags & VM_SHSTK))
+ pte = pte_mkwrite_shstk(pte);
+ return pte;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+pmd_t arch_maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+{
+ if (likely(vma->vm_flags & VM_SHSTK))
+ pmd = pmd_mkwrite_shstk(pmd);
+ return pmd;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_ARCH_MAYBE_MKWRITE */
+
/**
* reserve_top_address - reserves a hole in the top of kernel address space
* @reserve - size of hole to reserve
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 12be96b061c7..4f6305106feb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -969,6 +969,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
pte = pte_mkwrite(pte);
+ else
+ pte = arch_maybe_mkwrite(pte, vma);
return pte;
}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 90654cb63e9e..157f5e726896 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1356,6 +1356,30 @@ static inline bool arch_has_pfn_modify_check(void)
}
#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
+#ifdef CONFIG_MMU
+#ifdef CONFIG_ARCH_MAYBE_MKWRITE
+pte_t arch_maybe_mkwrite(pte_t pte, struct vm_area_struct *vma);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+pmd_t arch_maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#else /* !CONFIG_ARCH_MAYBE_MKWRITE */
+static inline pte_t arch_maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ return pte;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t arch_maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+{
+ return pmd;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#endif /* CONFIG_ARCH_MAYBE_MKWRITE */
+#endif /* CONFIG_MMU */
+
/*
* Architecture PAGE_KERNEL_* fallbacks
*
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index da397779a6d4..01252b00cd06 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -464,6 +464,8 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
pmd = pmd_mkwrite(pmd);
+ else
+ pmd = arch_maybe_pmd_mkwrite(pmd, vma);
return pmd;
}
--
2.21.0
Powered by blists - more mailing lists