[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210119045920.447-2-xieyongji@bytedance.com>
Date: Tue, 19 Jan 2021 12:59:10 +0800
From: Xie Yongji <xieyongji@...edance.com>
To: mst@...hat.com, jasowang@...hat.com, stefanha@...hat.com,
sgarzare@...hat.com, parav@...dia.com, bob.liu@...cle.com,
hch@...radead.org, rdunlap@...radead.org, willy@...radead.org,
viro@...iv.linux.org.uk, axboe@...nel.dk, bcrl@...ck.org,
corbet@....net
Cc: virtualization@...ts.linux-foundation.org, netdev@...r.kernel.org,
kvm@...r.kernel.org, linux-aio@...ck.org,
linux-fsdevel@...r.kernel.org
Subject: [RFC v3 01/11] eventfd: track eventfd_signal() recursion depth separately in different cases
Now we have a global percpu counter to limit the recursion depth
of eventfd_signal(). This can avoid deadlock or stack overflow.
But in stack overflow case, it should be OK to increase the
recursion depth if needed. So we add a percpu counter in eventfd_ctx
to limit the recursion depth for deadlock case. Then it could be
fine to increase the global percpu counter later.
Signed-off-by: Xie Yongji <xieyongji@...edance.com>
---
fs/aio.c | 3 ++-
fs/eventfd.c | 20 +++++++++++++++++++-
include/linux/eventfd.h | 5 +----
3 files changed, 22 insertions(+), 6 deletions(-)
diff --git a/fs/aio.c b/fs/aio.c
index 1f32da13d39e..5d82903161f5 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1698,7 +1698,8 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
list_del(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
req->done = true;
- if (iocb->ki_eventfd && eventfd_signal_count()) {
+ if (iocb->ki_eventfd &&
+ eventfd_signal_count(iocb->ki_eventfd)) {
iocb = NULL;
INIT_WORK(&req->work, aio_poll_put_work);
schedule_work(&req->work);
diff --git a/fs/eventfd.c b/fs/eventfd.c
index e265b6dd4f34..2df24f9bada3 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -25,6 +25,8 @@
#include <linux/idr.h>
#include <linux/uio.h>
+#define EVENTFD_WAKE_DEPTH 0
+
DEFINE_PER_CPU(int, eventfd_wake_count);
static DEFINE_IDA(eventfd_ida);
@@ -42,9 +44,17 @@ struct eventfd_ctx {
*/
__u64 count;
unsigned int flags;
+ int __percpu *wake_count;
int id;
};
+bool eventfd_signal_count(struct eventfd_ctx *ctx)
+{
+ return (this_cpu_read(*ctx->wake_count) ||
+ this_cpu_read(eventfd_wake_count) > EVENTFD_WAKE_DEPTH);
+}
+EXPORT_SYMBOL_GPL(eventfd_signal_count);
+
/**
* eventfd_signal - Adds @n to the eventfd counter.
* @ctx: [in] Pointer to the eventfd context.
@@ -71,17 +81,19 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
* it returns true, the eventfd_signal() call should be deferred to a
* safe context.
*/
- if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+ if (WARN_ON_ONCE(eventfd_signal_count(ctx)))
return 0;
spin_lock_irqsave(&ctx->wqh.lock, flags);
this_cpu_inc(eventfd_wake_count);
+ this_cpu_inc(*ctx->wake_count);
if (ULLONG_MAX - ctx->count < n)
n = ULLONG_MAX - ctx->count;
ctx->count += n;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
this_cpu_dec(eventfd_wake_count);
+ this_cpu_dec(*ctx->wake_count);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n;
@@ -92,6 +104,7 @@ static void eventfd_free_ctx(struct eventfd_ctx *ctx)
{
if (ctx->id >= 0)
ida_simple_remove(&eventfd_ida, ctx->id);
+ free_percpu(ctx->wake_count);
kfree(ctx);
}
@@ -423,6 +436,11 @@ static int do_eventfd(unsigned int count, int flags)
kref_init(&ctx->kref);
init_waitqueue_head(&ctx->wqh);
+ ctx->wake_count = alloc_percpu(int);
+ if (!ctx->wake_count) {
+ kfree(ctx);
+ return -ENOMEM;
+ }
ctx->count = count;
ctx->flags = flags;
ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index fa0a524baed0..1a11ebbd74a9 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -45,10 +45,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
DECLARE_PER_CPU(int, eventfd_wake_count);
-static inline bool eventfd_signal_count(void)
-{
- return this_cpu_read(eventfd_wake_count);
-}
+bool eventfd_signal_count(struct eventfd_ctx *ctx);
#else /* CONFIG_EVENTFD */
--
2.11.0
Powered by blists - more mailing lists