[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200210122444.857728145@linuxfoundation.org>
Date: Mon, 10 Feb 2020 04:32:18 -0800
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Jens Axboe <axboe@...nel.dk>
Subject: [PATCH 5.5 225/367] eventfd: track eventfd_signal() recursion depth
From: Jens Axboe <axboe@...nel.dk>
commit b5e683d5cab8cd433b06ae178621f083cabd4f63 upstream.
eventfd use cases from aio and io_uring can deadlock due to circular
or resursive calling, when eventfd_signal() tries to grab the waitqueue
lock. On top of that, it's also possible to construct notification
chains that are deep enough that we could blow the stack.
Add a percpu counter that tracks the percpu recursion depth, warn if we
exceed it. The counter is also exposed so that users of eventfd_signal()
can do the right thing if it's non-zero in the context where it is
called.
Cc: stable@...r.kernel.org # 4.19+
Signed-off-by: Jens Axboe <axboe@...nel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
fs/eventfd.c | 15 +++++++++++++++
include/linux/eventfd.h | 14 ++++++++++++++
2 files changed, 29 insertions(+)
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -24,6 +24,8 @@
#include <linux/seq_file.h>
#include <linux/idr.h>
+DEFINE_PER_CPU(int, eventfd_wake_count);
+
static DEFINE_IDA(eventfd_ida);
struct eventfd_ctx {
@@ -60,12 +62,25 @@ __u64 eventfd_signal(struct eventfd_ctx
{
unsigned long flags;
+ /*
+ * Deadlock or stack overflow issues can happen if we recurse here
+ * through waitqueue wakeup handlers. If the caller users potentially
+ * nested waitqueues with custom wakeup handlers, then it should
+ * check eventfd_signal_count() before calling this function. If
+ * it returns true, the eventfd_signal() call should be deferred to a
+ * safe context.
+ */
+ if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+ return 0;
+
spin_lock_irqsave(&ctx->wqh.lock, flags);
+ this_cpu_inc(eventfd_wake_count);
if (ULLONG_MAX - ctx->count < n)
n = ULLONG_MAX - ctx->count;
ctx->count += n;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
+ this_cpu_dec(eventfd_wake_count);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n;
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -12,6 +12,8 @@
#include <linux/fcntl.h>
#include <linux/wait.h>
#include <linux/err.h>
+#include <linux/percpu-defs.h>
+#include <linux/percpu.h>
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -40,6 +42,13 @@ __u64 eventfd_signal(struct eventfd_ctx
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
+DECLARE_PER_CPU(int, eventfd_wake_count);
+
+static inline bool eventfd_signal_count(void)
+{
+ return this_cpu_read(eventfd_wake_count);
+}
+
#else /* CONFIG_EVENTFD */
/*
@@ -68,6 +77,11 @@ static inline int eventfd_ctx_remove_wai
return -ENOSYS;
}
+static inline bool eventfd_signal_count(void)
+{
+ return false;
+}
+
#endif
#endif /* _LINUX_EVENTFD_H */
Powered by blists - more mailing lists