[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210413162240.3131033-4-eric.dumazet@gmail.com>
Date: Tue, 13 Apr 2021 09:22:39 -0700
From: Eric Dumazet <eric.dumazet@...il.com>
To: Ingo Molnar <mingo@...nel.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Peter Zijlstra <peterz@...radead.org>
Cc: "Paul E . McKenney" <paulmck@...nel.org>,
Boqun Feng <boqun.feng@...il.com>,
Arjun Roy <arjunroy@...gle.com>,
linux-kernel <linux-kernel@...r.kernel.org>,
Eric Dumazet <edumazet@...gle.com>,
Eric Dumazet <eric.dumazet@...il.com>
Subject: [PATCH v2 3/3] rseq: optimise rseq_get_rseq_cs() and clear_rseq_cs()
From: Eric Dumazet <edumazet@...gle.com>
Commit ec9c82e03a74 ("rseq: uapi: Declare rseq_cs field as union,
update includes") added regressions for our servers.
Using copy_from_user() and clear_user() for 64bit values
is suboptimal.
We can use faster put_user() and get_user().
32bit arches can be changed to use the ptr32 field,
since the padding field must always be zero.
v2: added ideas from Peter and Mathieu about making this
generic, since my initial patch was only dealing with
64bit arches.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: "Paul E. McKenney" <paulmck@...nel.org>
Cc: Boqun Feng <boqun.feng@...il.com>
Cc: Arjun Roy <arjunroy@...gle.com>
Cc: Ingo Molnar <mingo@...nel.org>
---
kernel/rseq.c | 41 +++++++++++++++++++++++++++++++++--------
1 file changed, 33 insertions(+), 8 deletions(-)
diff --git a/kernel/rseq.c b/kernel/rseq.c
index cfe01ab5253c1c424c0e8b25acbb6a8e1b41a5b6..f2eee3f7f5d330688c81cb2e57d47ca6b843873e 100644
--- a/kernel/rseq.c
+++ b/kernel/rseq.c
@@ -119,23 +119,46 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
return 0;
}
+#ifdef CONFIG_64BIT
+static int rseq_get_cs_ptr(struct rseq_cs __user **uptrp,
+ const struct rseq __user *rseq)
+{
+ u64 ptr;
+
+ if (get_user(ptr, &rseq->rseq_cs.ptr64))
+ return -EFAULT;
+ *uptrp = (struct rseq_cs __user *)ptr;
+ return 0;
+}
+#else
+static int rseq_get_cs_ptr(struct rseq_cs __user **uptrp,
+ const struct rseq __user *rseq)
+{
+ u32 ptr;
+
+ if (get_user(ptr, &rseq->rseq_cs.ptr.ptr32))
+ return -EFAULT;
+ *uptrp = (struct rseq_cs __user *)ptr;
+ return 0;
+}
+#endif
+
static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
{
struct rseq_cs __user *urseq_cs;
- u64 ptr;
u32 __user *usig;
u32 sig;
int ret;
- if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr)))
+ if (rseq_get_cs_ptr(&urseq_cs, t->rseq))
return -EFAULT;
- if (!ptr) {
+ if (!urseq_cs) {
memset(rseq_cs, 0, sizeof(*rseq_cs));
return 0;
}
- if (ptr >= TASK_SIZE)
+ if ((unsigned long)urseq_cs >= TASK_SIZE)
return -EINVAL;
- urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr;
+
if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs)))
return -EFAULT;
@@ -211,9 +234,11 @@ static int clear_rseq_cs(struct task_struct *t)
*
* Set rseq_cs to NULL.
*/
- if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64)))
- return -EFAULT;
- return 0;
+#ifdef CONFIG_64BIT
+ return put_user(0UL, &t->rseq->rseq_cs.ptr64);
+#else
+ return put_user(0UL, &t->rseq->rseq_cs.ptr.ptr32);
+#endif
}
/*
--
2.31.1.295.g9ea45b61b8-goog
Powered by blists - more mailing lists