lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0f22bf8f-6211-427d-6c3f-f23cae07971c@redhat.com>
Date:   Thu, 26 Aug 2021 15:03:43 +0800
From:   Jason Wang <jasowang@...hat.com>
To:     Thomas Gleixner <tglx@...utronix.de>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Daniel Bristot de Oliveira <bristot@...hat.com>,
        Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
        "Michael S. Tsirkin" <mst@...hat.com>,
        Juri Lelli <jlelli@...hat.com>
Cc:     LKML <linux-kernel@...r.kernel.org>,
        Al Viro <viro@...iv.linux.org.uk>,
        He Zhe <zhe.he@...driver.com>, Jens Axboe <axboe@...com>,
        Peter Zijlstra <peterz@...radead.org>
Subject: Re: [PATCH] eventfd: Make signal recursion protection a task bit


在 2021/7/29 下午7:01, Thomas Gleixner 写道:
> The recursion protection for eventfd_signal() is based on a per CPU
> variable and relies on the !RT semantics of spin_lock_irqsave() for
> protecting this per CPU variable. On RT kernels spin_lock_irqsave() neither
> disables preemption nor interrupts which allows the spin lock held section
> to be preempted. If the preempting task invokes eventfd_signal() as well,
> then the recursion warning triggers.
>
> Paolo suggested to protect the per CPU variable with a local lock, but
> that's heavyweight and actually not necessary. The goal of this protection
> is to prevent the task stack from overflowing, which can be achieved with a
> per task recursion protection as well.
>
> Replace the per CPU variable with a per task bit similar to other recursion
> protection bits like task_struct::in_page_owner. This works on both !RT and
> RT kernels and removes as a side effect the extra per CPU storage.
>
> No functional change for !RT kernels.
>
> Reported-by: Daniel Bristot de Oliveira <bristot@...hat.com>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>


Acked-by: Jason Wang <jasowang@...hat.com>

Anyone want to pick this patch?


> ---
>   fs/aio.c                |    2 +-
>   fs/eventfd.c            |   12 +++++-------
>   include/linux/eventfd.h |   11 +++++------
>   include/linux/sched.h   |    4 ++++
>   4 files changed, 15 insertions(+), 14 deletions(-)
>
> --- a/fs/aio.c
> +++ b/fs/aio.c
> @@ -1695,7 +1695,7 @@ static int aio_poll_wake(struct wait_que
>   		list_del(&iocb->ki_list);
>   		iocb->ki_res.res = mangle_poll(mask);
>   		req->done = true;
> -		if (iocb->ki_eventfd && eventfd_signal_count()) {
> +		if (iocb->ki_eventfd && eventfd_signal_allowed()) {
>   			iocb = NULL;
>   			INIT_WORK(&req->work, aio_poll_put_work);
>   			schedule_work(&req->work);
> --- a/fs/eventfd.c
> +++ b/fs/eventfd.c
> @@ -25,8 +25,6 @@
>   #include <linux/idr.h>
>   #include <linux/uio.h>
>   
> -DEFINE_PER_CPU(int, eventfd_wake_count);
> -
>   static DEFINE_IDA(eventfd_ida);
>   
>   struct eventfd_ctx {
> @@ -67,21 +65,21 @@ struct eventfd_ctx {
>   	 * Deadlock or stack overflow issues can happen if we recurse here
>   	 * through waitqueue wakeup handlers. If the caller users potentially
>   	 * nested waitqueues with custom wakeup handlers, then it should
> -	 * check eventfd_signal_count() before calling this function. If
> -	 * it returns true, the eventfd_signal() call should be deferred to a
> +	 * check eventfd_signal_allowed() before calling this function. If
> +	 * it returns false, the eventfd_signal() call should be deferred to a
>   	 * safe context.
>   	 */
> -	if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
> +	if (WARN_ON_ONCE(current->in_eventfd_signal))
>   		return 0;
>   
>   	spin_lock_irqsave(&ctx->wqh.lock, flags);
> -	this_cpu_inc(eventfd_wake_count);
> +	current->in_eventfd_signal = 1;
>   	if (ULLONG_MAX - ctx->count < n)
>   		n = ULLONG_MAX - ctx->count;
>   	ctx->count += n;
>   	if (waitqueue_active(&ctx->wqh))
>   		wake_up_locked_poll(&ctx->wqh, EPOLLIN);
> -	this_cpu_dec(eventfd_wake_count);
> +	current->in_eventfd_signal = 0;
>   	spin_unlock_irqrestore(&ctx->wqh.lock, flags);
>   
>   	return n;
> --- a/include/linux/eventfd.h
> +++ b/include/linux/eventfd.h
> @@ -14,6 +14,7 @@
>   #include <linux/err.h>
>   #include <linux/percpu-defs.h>
>   #include <linux/percpu.h>
> +#include <linux/sched.h>
>   
>   /*
>    * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
> @@ -43,11 +44,9 @@ int eventfd_ctx_remove_wait_queue(struct
>   				  __u64 *cnt);
>   void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
>   
> -DECLARE_PER_CPU(int, eventfd_wake_count);
> -
> -static inline bool eventfd_signal_count(void)
> +static inline bool eventfd_signal_allowed(void)
>   {
> -	return this_cpu_read(eventfd_wake_count);
> +	return !current->in_eventfd_signal;
>   }
>   
>   #else /* CONFIG_EVENTFD */
> @@ -78,9 +77,9 @@ static inline int eventfd_ctx_remove_wai
>   	return -ENOSYS;
>   }
>   
> -static inline bool eventfd_signal_count(void)
> +static inline bool eventfd_signal_allowed(void)
>   {
> -	return false;
> +	return true;
>   }
>   
>   static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -863,6 +863,10 @@ struct task_struct {
>   	/* Used by page_owner=on to detect recursion in page tracking. */
>   	unsigned			in_page_owner:1;
>   #endif
> +#ifdef CONFIG_EVENTFD
> +	/* Recursion prevention for eventfd_signal() */
> +	unsigned			in_eventfd_signal:1;
> +#endif
>   
>   	unsigned long			atomic_flags; /* Flags requiring atomic access. */
>   
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ