lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201116144757.1920077-12-alexandre.chartre@oracle.com>
Date:   Mon, 16 Nov 2020 15:47:47 +0100
From:   Alexandre Chartre <alexandre.chartre@...cle.com>
To:     tglx@...utronix.de, mingo@...hat.com, bp@...en8.de, hpa@...or.com,
        x86@...nel.org, dave.hansen@...ux.intel.com, luto@...nel.org,
        peterz@...radead.org, linux-kernel@...r.kernel.org,
        thomas.lendacky@....com, jroedel@...e.de
Cc:     konrad.wilk@...cle.com, jan.setjeeilers@...cle.com,
        junaids@...gle.com, oweisse@...gle.com, rppt@...ux.vnet.ibm.com,
        graf@...zon.de, mgross@...ux.intel.com, kuzuno@...il.com,
        alexandre.chartre@...cle.com
Subject: [RFC][PATCH v2 11/21] x86/pti: Extend PTI user mappings

Extend PTI user mappings so that more kernel entry code can be executed
with the user page-table. To do so, we need to map syscall and interrupt
entry code, per cpu offsets (__per_cpu_offset, which is used some in
entry code), the stack canary, and the PTI stack (which is defined per
task).

Signed-off-by: Alexandre Chartre <alexandre.chartre@...cle.com>
---
 arch/x86/entry/entry_64.S |  2 --
 arch/x86/mm/pti.c         | 19 +++++++++++++++++++
 kernel/fork.c             | 22 ++++++++++++++++++++++
 3 files changed, 41 insertions(+), 2 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 6e0b5b010e0b..458af12ed9a1 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -274,7 +274,6 @@ SYM_FUNC_END(__switch_to_asm)
  * rbx: kernel thread func (NULL for user thread)
  * r12: kernel thread arg
  */
-.pushsection .text, "ax"
 SYM_CODE_START(ret_from_fork)
 	UNWIND_HINT_REGS
 	movq	%rsp, %rdi			/* pt_regs */
@@ -284,7 +283,6 @@ SYM_CODE_START(ret_from_fork)
 	call	return_from_fork		/* returns with IRQs disabled */
 	jmp	swapgs_restore_regs_and_return_to_usermode
 SYM_CODE_END(ret_from_fork)
-.popsection
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 71ca245d7b38..e4c6cb4a4840 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -449,6 +449,7 @@ static void __init pti_clone_percpu_page(void *addr)
  */
 static void __init pti_clone_user_shared(void)
 {
+	unsigned long start, end;
 	unsigned int cpu;
 
 	pti_clone_p4d(CPU_ENTRY_AREA_BASE);
@@ -465,7 +466,16 @@ static void __init pti_clone_user_shared(void)
 		 */
 		pti_clone_percpu_page(&per_cpu(cpu_tss_rw, cpu));
 
+		/*
+		 * Map fixed_percpu_data to get the stack canary.
+		 */
+		if (IS_ENABLED(CONFIG_STACKPROTECTOR))
+			pti_clone_percpu_page(&per_cpu(fixed_percpu_data, cpu));
 	}
+
+	start = (unsigned long)__per_cpu_offset;
+	end = start + sizeof(__per_cpu_offset);
+	pti_clone_init_pgtable(start, end, PTI_CLONE_PTE);
 }
 
 #else /* CONFIG_X86_64 */
@@ -505,6 +515,15 @@ static void pti_clone_entry_text(void)
 	pti_clone_init_pgtable((unsigned long) __entry_text_start,
 			       (unsigned long) __entry_text_end,
 			       PTI_CLONE_PMD);
+
+       /*
+	* Syscall and interrupt entry code (which is in the noinstr
+	* section) will be entered with the user page-table, so that
+	* code has to be mapped in.
+	*/
+	pti_clone_init_pgtable((unsigned long) __noinstr_text_start,
+			       (unsigned long) __noinstr_text_end,
+			       PTI_CLONE_PMD);
 }
 
 /*
diff --git a/kernel/fork.c b/kernel/fork.c
index 6d266388d380..31cd77dbdba3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -999,6 +999,25 @@ static void mm_init_uprobes_state(struct mm_struct *mm)
 #endif
 }
 
+static void mm_map_task(struct mm_struct *mm, struct task_struct *tsk)
+{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	unsigned long addr;
+
+	if (!tsk || !static_cpu_has(X86_FEATURE_PTI))
+		return;
+
+	/*
+	 * Map the task stack after the kernel stack into the user
+	 * address space, so that this stack can be used when entering
+	 * syscall or interrupt from user mode.
+	 */
+	BUG_ON(!task_stack_page(tsk));
+	addr = (unsigned long)task_top_of_kernel_stack(tsk);
+	pti_clone_pgtable(mm, addr, addr + KERNEL_STACK_SIZE, PTI_CLONE_PTE);
+#endif
+}
+
 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
 	struct user_namespace *user_ns)
 {
@@ -1043,6 +1062,8 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
 	if (init_new_context(p, mm))
 		goto fail_nocontext;
 
+	mm_map_task(mm, p);
+
 	mm->user_ns = get_user_ns(user_ns);
 	return mm;
 
@@ -1404,6 +1425,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
 	vmacache_flush(tsk);
 
 	if (clone_flags & CLONE_VM) {
+		mm_map_task(oldmm, tsk);
 		mmget(oldmm);
 		mm = oldmm;
 		goto good_mm;
-- 
2.18.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ