lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211223002209.1092165-4-alexandr.lobakin@intel.com>
Date:   Thu, 23 Dec 2021 01:21:57 +0100
From:   Alexander Lobakin <alexandr.lobakin@...el.com>
To:     linux-hardening@...r.kernel.org, x86@...nel.org
Cc:     Alexander Lobakin <alexandr.lobakin@...el.com>,
        Jesse Brandeburg <jesse.brandeburg@...el.com>,
        Kristen Carlson Accardi <kristen@...ux.intel.com>,
        Kees Cook <keescook@...omium.org>,
        Miklos Szeredi <miklos@...redi.hu>,
        Ard Biesheuvel <ardb@...nel.org>,
        Tony Luck <tony.luck@...el.com>,
        Bruce Schlobohm <bruce.schlobohm@...el.com>,
        Jessica Yu <jeyu@...nel.org>,
        kernel test robot <lkp@...el.com>,
        Miroslav Benes <mbenes@...e.cz>,
        Evgenii Shatokhin <eshatokhin@...tuozzo.com>,
        Jonathan Corbet <corbet@....net>,
        Masahiro Yamada <masahiroy@...nel.org>,
        Michal Marek <michal.lkml@...kovi.net>,
        Nick Desaulniers <ndesaulniers@...gle.com>,
        Herbert Xu <herbert@...dor.apana.org.au>,
        "David S. Miller" <davem@...emloft.net>,
        Thomas Gleixner <tglx@...utronix.de>,
        Will Deacon <will@...nel.org>, Ingo Molnar <mingo@...hat.com>,
        Borislav Petkov <bp@...en8.de>,
        Dave Hansen <dave.hansen@...ux.intel.com>,
        "H. Peter Anvin" <hpa@...or.com>,
        Andy Lutomirski <luto@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Arnd Bergmann <arnd@...db.de>,
        Josh Poimboeuf <jpoimboe@...hat.com>,
        Nathan Chancellor <nathan@...nel.org>,
        Masami Hiramatsu <mhiramat@...nel.org>,
        Marios Pomonis <pomonis@...gle.com>,
        Sami Tolvanen <samitolvanen@...gle.com>,
        "H.J. Lu" <hjl.tools@...il.com>, Nicolas Pitre <nico@...xnic.net>,
        linux-kernel@...r.kernel.org, linux-kbuild@...r.kernel.org,
        linux-arch@...r.kernel.org, live-patching@...r.kernel.org,
        llvm@...ts.linux.dev
Subject: [PATCH v9 03/15] kallsyms: Hide layout

From: Kristen Carlson Accardi <kristen@...ux.intel.com>

This patch makes /proc/kallsyms display in a random order, rather
than sorted by address in order to hide the newly randomized address
layout.

alobakin:
Don't depend FG-KASLR and always do that for unpriviledged accesses
as suggested by several folks.
Also, introduce and use a shuffle_array() macro which shuffles an
array using Fisher-Yates. We'll make use of it several more times
later on.

Signed-off-by: Kristen Carlson Accardi <kristen@...ux.intel.com>
Reviewed-by: Tony Luck <tony.luck@...el.com>
Tested-by: Tony Luck <tony.luck@...el.com>
Reported-by: kernel test robot <lkp@...el.com> # swap.cocci
Suggested-by: Ard Biesheuvel <ardb@...nel.org> # always do that
Suggested-by: Josh Poimboeuf <jpoimboe@...hat.com> # always do that
Suggested-by: Peter Zijlstra <peterz@...radead.org> # always do that, macro
Co-developed-by: Alexander Lobakin <alexandr.lobakin@...el.com>
Signed-off-by: Alexander Lobakin <alexandr.lobakin@...el.com>
---
 include/linux/random.h | 16 ++++++++
 kernel/kallsyms.c      | 93 ++++++++++++++++++++++++++++++++++--------
 2 files changed, 93 insertions(+), 16 deletions(-)

diff --git a/include/linux/random.h b/include/linux/random.h
index f45b8be3e3c4..c859a698089c 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -110,6 +110,22 @@ declare_get_random_var_wait(long)
 
 unsigned long randomize_page(unsigned long start, unsigned long range);
 
+/**
+ * shuffle_array - use a Fisher Yates algorithm to shuffle an array.
+ * @arr: pointer to the array
+ * @nents: the number of elements in the array
+ */
+#define shuffle_array(arr, nents) ({				\
+	typeof(&(arr)[0]) __arr = &(arr)[0];			\
+	size_t __i;						\
+								\
+	for (__i = (nents) - 1; __i > 0; __i--) {		\
+		size_t __j = get_random_long() % (__i + 1);	\
+								\
+		swap(__arr[__i], __arr[__j]);			\
+	}							\
+})
+
 /*
  * This is designed to be standalone for just prandom
  * users, but for now we include it from <linux/random.h>
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 3011bc33a5ba..5d41b993113f 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -574,13 +574,15 @@ struct kallsym_iter {
 	loff_t pos_mod_end;
 	loff_t pos_ftrace_mod_end;
 	loff_t pos_bpf_end;
+	loff_t pos_end;
 	unsigned long value;
 	unsigned int nameoff; /* If iterating in core kernel symbols. */
 	char type;
 	char name[KSYM_NAME_LEN];
 	char module_name[MODULE_NAME_LEN];
 	int exported;
-	int show_value;
+	bool show_layout;
+	loff_t shuffled_pos[];
 };
 
 int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value,
@@ -660,11 +662,19 @@ static int get_ksymbol_bpf(struct kallsym_iter *iter)
  */
 static int get_ksymbol_kprobe(struct kallsym_iter *iter)
 {
+	int ret;
+
 	strlcpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN);
 	iter->exported = 0;
-	return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end,
-				  &iter->value, &iter->type,
-				  iter->name) < 0 ? 0 : 1;
+	ret = kprobe_get_kallsym(iter->pos - iter->pos_bpf_end,
+				 &iter->value, &iter->type,
+				 iter->name);
+	if (ret < 0) {
+		iter->pos_end = iter->pos;
+		return 0;
+	}
+
+	return 1;
 }
 
 /* Returns space to next name. */
@@ -687,11 +697,12 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
 	iter->name[0] = '\0';
 	iter->nameoff = get_symbol_offset(new_pos);
 	iter->pos = new_pos;
-	if (new_pos == 0) {
+	if (iter->show_layout && new_pos == 0) {
 		iter->pos_arch_end = 0;
 		iter->pos_mod_end = 0;
 		iter->pos_ftrace_mod_end = 0;
 		iter->pos_bpf_end = 0;
+		iter->pos_end = 0;
 	}
 }
 
@@ -720,13 +731,23 @@ static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
 	    get_ksymbol_bpf(iter))
 		return 1;
 
-	return get_ksymbol_kprobe(iter);
+	if ((!iter->pos_end || iter->pos_end > pos) &&
+	    get_ksymbol_kprobe(iter))
+		return 1;
+
+	return 0;
 }
 
 /* Returns false if pos at or past end of file. */
 static int update_iter(struct kallsym_iter *iter, loff_t pos)
 {
-	/* Module symbols can be accessed randomly. */
+	if (!iter->show_layout) {
+		if (pos > iter->pos_end)
+			return 0;
+
+		pos = iter->shuffled_pos[pos];
+	}
+
 	if (pos >= kallsyms_num_syms)
 		return update_iter_mod(iter, pos);
 
@@ -769,7 +790,7 @@ static int s_show(struct seq_file *m, void *p)
 	if (!iter->name[0])
 		return 0;
 
-	value = iter->show_value ? (void *)iter->value : NULL;
+	value = iter->show_layout ? (void *)iter->value : NULL;
 
 	if (iter->module_name[0]) {
 		char type;
@@ -806,9 +827,10 @@ static inline int kallsyms_for_perf(void)
 }
 
 /*
- * We show kallsyms information even to normal users if we've enabled
- * kernel profiling and are explicitly not paranoid (so kptr_restrict
- * is clear, and sysctl_perf_event_paranoid isn't set).
+ * We show kallsyms information and display them sorted by address even
+ * to normal users if we've enabled kernel profiling and are explicitly
+ * not paranoid (so kptr_restrict is clear, and sysctl_perf_event_paranoid
+ * isn't set).
  *
  * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
  * block even that).
@@ -838,16 +860,54 @@ static int kallsyms_open(struct inode *inode, struct file *file)
 	 * using get_symbol_offset for every symbol.
 	 */
 	struct kallsym_iter *iter;
-	iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter));
-	if (!iter)
-		return -ENOMEM;
-	reset_iter(iter, 0);
+	/*
+	 * This fake iter is needed for the cases with unprivileged
+	 * access. We need to know the exact number of symbols to
+	 * randomize the display layout.
+	 */
+	struct kallsym_iter fake;
+	size_t size = sizeof(*iter);
+	loff_t pos;
+
+	fake.show_layout = true;
+	reset_iter(&fake, 0);
 
 	/*
 	 * Instead of checking this on every s_show() call, cache
 	 * the result here at open time.
 	 */
-	iter->show_value = kallsyms_show_value(file->f_cred);
+	fake.show_layout = kallsyms_show_value(file->f_cred);
+	if (fake.show_layout)
+		goto open;
+
+	for (pos = kallsyms_num_syms; update_iter_mod(&fake, pos); pos++)
+		;
+
+	size = struct_size(iter, shuffled_pos, fake.pos_end + 1);
+
+open:
+	iter = __seq_open_private(file, &kallsyms_op, size);
+	if (!iter)
+		return -ENOMEM;
+
+	iter->show_layout = fake.show_layout;
+	reset_iter(iter, 0);
+
+	if (iter->show_layout)
+		return 0;
+
+	/* Copy the bounds since they were already discovered above */
+	iter->pos_arch_end = fake.pos_arch_end;
+	iter->pos_mod_end = fake.pos_mod_end;
+	iter->pos_ftrace_mod_end = fake.pos_ftrace_mod_end;
+	iter->pos_bpf_end = fake.pos_bpf_end;
+	iter->pos_end = fake.pos_end;
+
+	for (pos = 0; pos <= iter->pos_end; pos++)
+		iter->shuffled_pos[pos] = pos;
+
+	shuffle_array(iter->shuffled_pos, iter->pos_end + 1);
+
 	return 0;
 }
 
@@ -858,6 +918,7 @@ const char *kdb_walk_kallsyms(loff_t *pos)
 	if (*pos == 0) {
 		memset(&kdb_walk_kallsyms_iter, 0,
 		       sizeof(kdb_walk_kallsyms_iter));
+		kdb_walk_kallsyms_iter.show_layout = true;
 		reset_iter(&kdb_walk_kallsyms_iter, 0);
 	}
 	while (1) {
-- 
2.33.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ