[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <567941475.72456.1618332885342.JavaMail.zimbra@efficios.com>
Date: Tue, 13 Apr 2021 12:54:45 -0400 (EDT)
From: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
To: Eric Dumazet <eric.dumazet@...il.com>,
David Laight <David.Laight@...LAB.COM>
Cc: Ingo Molnar <mingo@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
paulmck <paulmck@...nel.org>, Boqun Feng <boqun.feng@...il.com>,
Arjun Roy <arjunroy@...gle.com>,
linux-kernel <linux-kernel@...r.kernel.org>,
Eric Dumazet <edumazet@...gle.com>
Subject: Re: [PATCH v2 3/3] rseq: optimise rseq_get_rseq_cs() and
clear_rseq_cs()
----- On Apr 13, 2021, at 12:22 PM, Eric Dumazet eric.dumazet@...il.com wrote:
> From: Eric Dumazet <edumazet@...gle.com>
>
> Commit ec9c82e03a74 ("rseq: uapi: Declare rseq_cs field as union,
> update includes") added regressions for our servers.
>
> Using copy_from_user() and clear_user() for 64bit values
> is suboptimal.
>
> We can use faster put_user() and get_user().
>
> 32bit arches can be changed to use the ptr32 field,
> since the padding field must always be zero.
>
> v2: added ideas from Peter and Mathieu about making this
> generic, since my initial patch was only dealing with
> 64bit arches.
Ah, now I remember the reason why reading and clearing the entire 64-bit
is important: it's because we don't want to allow user-space processes to
use this change in behavior to figure out whether they are running on a
32-bit or in a 32-bit compat mode on a 64-bit kernel.
So although I'm fine with making 64-bit kernels faster, we'll want to keep
updating the entire 64-bit ptr field on 32-bit kernels as well.
Thanks,
Mathieu
>
> Signed-off-by: Eric Dumazet <edumazet@...gle.com>
> Cc: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
> Cc: Peter Zijlstra <peterz@...radead.org>
> Cc: "Paul E. McKenney" <paulmck@...nel.org>
> Cc: Boqun Feng <boqun.feng@...il.com>
> Cc: Arjun Roy <arjunroy@...gle.com>
> Cc: Ingo Molnar <mingo@...nel.org>
> ---
> kernel/rseq.c | 41 +++++++++++++++++++++++++++++++++--------
> 1 file changed, 33 insertions(+), 8 deletions(-)
>
> diff --git a/kernel/rseq.c b/kernel/rseq.c
> index
> cfe01ab5253c1c424c0e8b25acbb6a8e1b41a5b6..f2eee3f7f5d330688c81cb2e57d47ca6b843873e
> 100644
> --- a/kernel/rseq.c
> +++ b/kernel/rseq.c
> @@ -119,23 +119,46 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
> return 0;
> }
>
> +#ifdef CONFIG_64BIT
> +static int rseq_get_cs_ptr(struct rseq_cs __user **uptrp,
> + const struct rseq __user *rseq)
> +{
> + u64 ptr;
> +
> + if (get_user(ptr, &rseq->rseq_cs.ptr64))
> + return -EFAULT;
> + *uptrp = (struct rseq_cs __user *)ptr;
> + return 0;
> +}
> +#else
> +static int rseq_get_cs_ptr(struct rseq_cs __user **uptrp,
> + const struct rseq __user *rseq)
> +{
> + u32 ptr;
> +
> + if (get_user(ptr, &rseq->rseq_cs.ptr.ptr32))
> + return -EFAULT;
> + *uptrp = (struct rseq_cs __user *)ptr;
> + return 0;
> +}
> +#endif
> +
> static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
> {
> struct rseq_cs __user *urseq_cs;
> - u64 ptr;
> u32 __user *usig;
> u32 sig;
> int ret;
>
> - if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr)))
> + if (rseq_get_cs_ptr(&urseq_cs, t->rseq))
> return -EFAULT;
> - if (!ptr) {
> + if (!urseq_cs) {
> memset(rseq_cs, 0, sizeof(*rseq_cs));
> return 0;
> }
> - if (ptr >= TASK_SIZE)
> + if ((unsigned long)urseq_cs >= TASK_SIZE)
> return -EINVAL;
> - urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr;
> +
> if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs)))
> return -EFAULT;
>
> @@ -211,9 +234,11 @@ static int clear_rseq_cs(struct task_struct *t)
> *
> * Set rseq_cs to NULL.
> */
> - if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64)))
> - return -EFAULT;
> - return 0;
> +#ifdef CONFIG_64BIT
> + return put_user(0UL, &t->rseq->rseq_cs.ptr64);
> +#else
> + return put_user(0UL, &t->rseq->rseq_cs.ptr.ptr32);
> +#endif
> }
>
> /*
> --
> 2.31.1.295.g9ea45b61b8-goog
--
Mathieu Desnoyers
EfficiOS Inc.
http://www.efficios.com
Powered by blists - more mailing lists