lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 28 Apr 2023 17:51:00 +0800
From:   "Hou Wenlong" <houwenlong.hwl@...group.com>
To:     linux-kernel@...r.kernel.org
Cc:     "Thomas Garnier" <thgarnie@...omium.org>,
        "Lai Jiangshan" <jiangshan.ljs@...group.com>,
        "Kees Cook" <keescook@...omium.org>,
        "Hou Wenlong" <houwenlong.hwl@...group.com>,
        "Thomas Gleixner" <tglx@...utronix.de>,
        "Ingo Molnar" <mingo@...hat.com>, "Borislav Petkov" <bp@...en8.de>,
        "Dave Hansen" <dave.hansen@...ux.intel.com>, <x86@...nel.org>,
        "H. Peter Anvin" <hpa@...or.com>,
        "David Woodhouse" <dwmw@...zon.co.uk>,
        "Peter Zijlstra" <peterz@...radead.org>,
        "Brian Gerst" <brgerst@...il.com>,
        "Josh Poimboeuf" <jpoimboe@...nel.org>,
        "Sami Tolvanen" <samitolvanen@...gle.com>
Subject: [PATCH RFC 20/43] x86/percpu: Adapt percpu references relocation for PIE support

The original design of percpu references relocation only handles
relative references and ignores absolute references. Because percpu
variable had already been relative based on segment. And .percpu ELF
section has a virtual address of zero and absolute references can be
kept if kaslr is enabled. As for a little relative references, they
needs to be relocated by negative offset.

However, it is not compatible with PIE, because almost all percpu
references would be RIP-relative. But RIP-relative addressing could only
support -2G ~ +2G. In order to move kernel address below top 2G, percpu
relative references wouldn't be relocated, instead, percpu base could be
adjusted. As for absolute references, they would be relocated like
normal variable. After that, percpu references in .altinstr_replacement
section couldn't work right, because no fixups are applied for percpu
references in apply_alternatives(). However, it could be caught by
objtool.  Currently, only call depth tracking uses it, so disable it if
X86_PIE is enabled.

Suggested-by: Lai Jiangshan <jiangshan.ljs@...group.com>
Signed-off-by: Hou Wenlong <houwenlong.hwl@...group.com>
Cc: Thomas Garnier <thgarnie@...omium.org>
Cc: Kees Cook <keescook@...omium.org>
---
 arch/x86/Kconfig          |  2 +-
 arch/x86/kernel/head_64.S | 10 ++++++++++
 arch/x86/tools/relocs.c   | 17 ++++++++++++++---
 3 files changed, 25 insertions(+), 4 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b26941ef50ee..715f0734d065 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2635,7 +2635,7 @@ config CPU_UNRET_ENTRY
 
 config CALL_DEPTH_TRACKING
 	bool "Mitigate RSB underflow with call depth tracking"
-	depends on CPU_SUP_INTEL && HAVE_CALL_THUNKS
+	depends on CPU_SUP_INTEL && HAVE_CALL_THUNKS && !X86_PIE
 	select HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
 	select CALL_THUNKS
 	default y
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 1eed50b7d1ac..94c5defec8cc 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -72,6 +72,11 @@ SYM_CODE_START_NOALIGN(startup_64)
 	leaq	INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
 #elif defined(CONFIG_SMP)
 	movabs	$__per_cpu_load, %rdx
+#ifdef CONFIG_X86_PIE
+	movabs	$__per_cpu_start, %rax
+	subq	%rax, %rdx
+	movq	%rdx, __per_cpu_offset(%rip)
+#endif
 #else
 	xorl	%edx, %edx
 #endif
@@ -79,6 +84,11 @@ SYM_CODE_START_NOALIGN(startup_64)
 	shrq	$32,  %rdx
 	wrmsr
 
+#if defined(CONFIG_X86_PIE) && defined(CONFIG_SMP)
+	movq	__per_cpu_offset(%rip), %rdx
+	movq	%rdx, PER_CPU_VAR(this_cpu_off)
+#endif
+
 	pushq	%rsi
 	call	startup_64_setup_env
 	popq	%rsi
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 2925074b9a58..038e9c12fad3 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -848,6 +848,7 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
 
 	case R_X86_64_PC32:
 	case R_X86_64_PLT32:
+#ifndef CONFIG_X86_PIE
 		/*
 		 * PC relative relocations don't need to be adjusted unless
 		 * referencing a percpu symbol.
@@ -856,6 +857,7 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
 		 */
 		if (is_percpu_sym(sym, symname))
 			add_reloc(&relocs32neg, offset);
+#endif
 		break;
 
 	case R_X86_64_PC64:
@@ -871,10 +873,18 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
 	case R_X86_64_32S:
 	case R_X86_64_64:
 		/*
-		 * References to the percpu area don't need to be adjusted.
+		 * References to the percpu area don't need to be adjusted when
+		 * CONFIG_X86_PIE is not enabled.
 		 */
-		if (is_percpu_sym(sym, symname))
+		if (is_percpu_sym(sym, symname)) {
+#if CONFIG_X86_PIE
+			if (r_type != R_X86_64_64)
+				die("Invalid absolute reference against per-CPU symbol %s\n",
+				    symname);
+			add_reloc(&relocs64, offset);
+#endif
 			break;
+		}
 
 		if (shn_abs) {
 			/*
@@ -1044,7 +1054,8 @@ static int cmp_relocs(const void *va, const void *vb)
 
 static void sort_relocs(struct relocs *r)
 {
-	qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs);
+	if (r->count)
+		qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs);
 }
 
 static int write32(uint32_t v, FILE *f)
-- 
2.31.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ