lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210708194638.128950-3-posk@google.com>
Date:   Thu,  8 Jul 2021 12:46:37 -0700
From:   Peter Oskolkov <posk@...k.io>
To:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        linux-kernel@...r.kernel.org, linux-api@...r.kernel.org
Cc:     Paul Turner <pjt@...gle.com>, Ben Segall <bsegall@...gle.com>,
        Peter Oskolkov <posk@...gle.com>,
        Peter Oskolkov <posk@...k.io>,
        Joel Fernandes <joel@...lfernandes.org>,
        Andrei Vagin <avagin@...gle.com>,
        Jim Newsome <jnewsome@...project.org>,
        Jann Horn <jannh@...gle.com>
Subject: [RFC PATCH 2/3 v0.2] sched/umcg: RFC: add userspace atomic helpers

Add helper functions to work atomically with userspace 32/64 bit values -
there are some .*futex.* named helpers, but they are not exactly
what is needed for UMCG; I haven't found what else I could use, so I
rolled these.

At the moment only X86_64 is supported.

Note: the helpers should probably go into arch/ somewhere; I have
them in kernel/sched/umcg.h temporarily for convenience. Please
let me know where I should put them and how to name them.

Signed-off-by: Peter Oskolkov <posk@...gle.com>
---
 kernel/sched/umcg.h | 264 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 264 insertions(+)
 create mode 100644 kernel/sched/umcg.h

diff --git a/kernel/sched/umcg.h b/kernel/sched/umcg.h
new file mode 100644
index 000000000000..aa8fb24964ed
--- /dev/null
+++ b/kernel/sched/umcg.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _KERNEL_SCHED_UMCG_H
+#define _KERNEL_SCHED_UMCG_H
+
+#ifdef CONFIG_UMCG
+#ifdef CONFIG_X86_64
+
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/umcg.h>
+
+#include <asm/asm.h>
+#include <linux/atomic.h>
+
+/* TODO: move atomic operations below into arch/ headers */
+static inline int umcg_atomic_cmpxchg_32(u32 *uval, u32 __user *uaddr,
+						u32 oldval, u32 newval)
+{
+	int ret = 0;
+
+	if (!user_access_begin(uaddr, sizeof(u32)))
+		return -EFAULT;
+	asm volatile("\n"
+		"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
+		"2:\n"
+		"\t.section .fixup, \"ax\"\n"
+		"3:\tmov     %3, %0\n"
+		"\tjmp     2b\n"
+		"\t.previous\n"
+		_ASM_EXTABLE_UA(1b, 3b)
+		: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+		: "i" (-EFAULT), "r" (newval), "1" (oldval)
+		: "memory"
+	);
+	user_access_end();
+	*uval = oldval;
+	return ret;
+}
+
+static inline int umcg_atomic_cmpxchg_64(u64 *uval, u64 __user *uaddr,
+						u64 oldval, u64 newval)
+{
+	int ret = 0;
+
+	if (!user_access_begin(uaddr, sizeof(u64)))
+		return -EFAULT;
+	asm volatile("\n"
+		"1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"
+		"2:\n"
+		"\t.section .fixup, \"ax\"\n"
+		"3:\tmov     %3, %0\n"
+		"\tjmp     2b\n"
+		"\t.previous\n"
+		_ASM_EXTABLE_UA(1b, 3b)
+		: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+		: "i" (-EFAULT), "r" (newval), "1" (oldval)
+		: "memory"
+	);
+	user_access_end();
+	*uval = oldval;
+	return ret;
+}
+
+static inline int fix_pagefault(unsigned long uaddr, bool write_fault)
+{
+	struct mm_struct *mm = current->mm;
+	int ret;
+
+	mmap_read_lock(mm);
+	ret = fixup_user_fault(mm, uaddr, write_fault ? FAULT_FLAG_WRITE : 0,
+			NULL);
+	mmap_read_unlock(mm);
+
+	return ret < 0 ? ret : 0;
+}
+
+static inline int umcg_get_user_32(u32 __user *uaddr, u32 *val)
+{
+	while (true) {
+		int ret;
+		u32 out;
+
+		pagefault_disable();
+		ret = __get_user(out, uaddr);
+		pagefault_enable();
+
+		if (!ret) {
+			*val = out;
+			return 0;
+		}
+
+		if (WARN_ONCE(ret != -EFAULT, "Unexpected error"))
+			return -EFAULT;
+
+		ret = fix_pagefault((unsigned long)uaddr, false);
+		if (ret)
+			return -EFAULT;
+	}
+}
+
+/**
+ * umcg_cmpxchg_32_user - compare_exchange 32-bit values
+ *
+ * Return:
+ * 0 - OK
+ * -EFAULT: memory access error
+ * -EAGAIN: @expected did not match; consult @prev
+ */
+static inline int umcg_cmpxchg_32_user(u32 __user *uaddr, u32 *prev, u32 val)
+{
+	while (true) {
+		int ret;
+		u32 expected = *prev;
+
+		pagefault_disable();
+		ret = umcg_atomic_cmpxchg_32(prev, uaddr, expected, val);
+		pagefault_enable();
+
+		if (!ret)
+			return *prev == expected ? 0 : -EAGAIN;
+
+		if (WARN_ONCE(ret != -EFAULT, "Unexpected error"))
+			return -EFAULT;
+
+		ret = fix_pagefault((unsigned long)uaddr, true);
+		if (ret)
+			return -EFAULT;
+	}
+}
+
+/**
+ * umcg_cmpxchg_64_user - compare_exchange 64-bit values
+ *
+ * Return:
+ * 0 - OK
+ * -EFAULT: memory access error
+ * -EAGAIN: @expected did not match; consult @prev
+ */
+static inline int umcg_cmpxchg_64_user(u64 __user *uaddr, u64 *prev, u64 val)
+{
+	while (true) {
+		int ret;
+		u64 expected = *prev;
+
+		pagefault_disable();
+		ret = umcg_atomic_cmpxchg_64(prev, uaddr, expected, val);
+		pagefault_enable();
+
+		if (!ret)
+			return *prev == expected ? 0 : -EAGAIN;
+
+		if (WARN_ONCE(ret != -EFAULT, "Unexpected error"))
+			return -EFAULT;
+
+		ret = fix_pagefault((unsigned long)uaddr, true);
+		if (ret)
+			return -EFAULT;
+	}
+}
+
+/**
+ * atomic_stack_push_user - push a node onto the stack
+ * @head - a pointer to the head of the stack;
+ * @node - a pointer to the node to push.
+ *
+ * Push a node onto a single-linked list (stack). Atomicity/correctness
+ * is guaranteed by locking the head via settings its first bit (assuming
+ * the pointer is aligned).
+ *
+ * Return: 0 on success, -EFAULT on error.
+ */
+static inline int atomic_stack_push_user(u64 __user *head, u64 __user *node)
+{
+	while (true) {
+		int ret;
+		u64 first;
+
+		smp_mb();  /* Make the read below clean. */
+		if (get_user(first, head))
+			return -EFAULT;
+
+		if (first & 1UL) {
+			cpu_relax();
+			continue;  /* first is being deleted. */
+		}
+
+		if (put_user(first, node))
+			return -EFAULT;
+		smp_mb();  /* Make the write above visible. */
+
+		ret = umcg_cmpxchg_64_user(head, &first, (u64)node);
+		if (!ret)
+			return 0;
+
+		if (ret == -EAGAIN) {
+			cpu_relax();
+			continue;
+		}
+
+		if (WARN_ONCE(ret != -EFAULT, "unexpected umcg_cmpxchg result"))
+			return -EFAULT;
+
+		return -EFAULT;
+	}
+}
+
+/**
+ * atomic_stack_pop_user - pop a node from the stack
+ * @head - a pointer to the head of the stack;
+ * @value - a pointer to where store the popped value.
+ *
+ * Pop a node from a single-linked list (stack). Atomicity/correctness
+ * is guaranteed by locking the head via settings its first bit (assuming
+ * the pointer is aligned).
+ *
+ * Note: on success, @value should be cast to (u64 __user *) if not zero.
+ *
+ * Return: 0 on success, -EFAULT on error.
+ */
+static inline int atomic_stack_pop_user(u64 __user *head, u64 *value)
+{
+	while (true) {
+		int ret;
+		u64 next, first;
+
+		smp_mb();  /* Make the read below clean. */
+		if (get_user(first, head))
+			return -EFAULT;
+
+		if (!first) {
+			*value = 0UL;
+			return 0;
+		}
+
+		if (first & 1UL) {
+			cpu_relax();
+			continue;  /* first is being deleted. */
+		}
+
+		ret = umcg_cmpxchg_64_user(head, &first, first | 1UL);
+		if (ret == -EAGAIN) {
+			cpu_relax();
+			continue;
+		}
+
+		if (ret)
+			return -EFAULT;
+
+		if (get_user(next, (u64 __user *)first))
+			return -EFAULT;
+
+		first |= 1UL;
+
+		ret = umcg_cmpxchg_64_user(head, &first, next);
+		if (ret)
+			return -EFAULT;
+
+		*value = first & ~1UL;
+		return 0;
+	}
+}
+#endif  /* CONFIG_X86_64 */
+#endif  /* CONFIG_UMCG */
+#endif  /* _KERNEL_SCHED_UMCG_H */
--
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ