[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190311195831.GA12807@lst.de>
Date: Mon, 11 Mar 2019 20:58:31 +0100
From: Christoph Hellwig <hch@....de>
To: Al Viro <viro@...IV.linux.org.uk>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>,
Eric Dumazet <eric.dumazet@...il.com>,
David Miller <davem@...emloft.net>,
Jason Baron <jbaron@...mai.com>, kgraul@...ux.ibm.com,
ktkhai@...tuozzo.com, kyeongdon.kim@....com,
Linux List Kernel Mailing <linux-kernel@...r.kernel.org>,
Netdev <netdev@...r.kernel.org>, pabeni@...hat.com,
syzkaller-bugs@...glegroups.com, xiyou.wangcong@...il.com,
Christoph Hellwig <hch@....de>,
zhengbin <zhengbin13@...wei.com>, bcrl@...ck.org,
linux-fsdevel@...r.kernel.org, linux-aio@...ck.org,
houtao1@...wei.com, yi.zhang@...wei.com
Subject: Re: [PATCH 4/8] Fix aio_poll() races
Where do we put the second iocb reference in case we return from
vfs_poll without ever being woken?
Also it seems like the complete code would still benefit from a little
helper, something like:
diff --git a/fs/aio.c b/fs/aio.c
index b2a5c7b3a1fe..8415e5e484ce 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1611,6 +1611,13 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
return 0;
}
+static void aio_poll_finish(struct aio_kiocb *iocb, __poll_t mask)
+{
+ list_del_init(&iocb->ki_list);
+ iocb->ki_res.res = mangle_poll(mask);
+ iocb->poll.done = true;
+}
+
static void aio_poll_complete_work(struct work_struct *work)
{
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1635,9 +1642,7 @@ static void aio_poll_complete_work(struct work_struct *work)
spin_unlock_irq(&ctx->ctx_lock);
return;
}
- list_del_init(&iocb->ki_list);
- iocb->ki_res.res = mangle_poll(mask);
- req->done = true;
+ aio_poll_finish(iocb, mask);
spin_unlock_irq(&ctx->ctx_lock);
iocb_put(iocb);
@@ -1674,24 +1679,20 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
list_del_init(&req->wait.entry);
- if (mask) {
+ if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
/*
* Try to complete the iocb inline if we can. Use
* irqsave/irqrestore because not all filesystems (e.g. fuse)
* call this function with IRQs disabled and because IRQs
* have to be disabled before ctx_lock is obtained.
*/
- if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
- list_del(&iocb->ki_list);
- iocb->ki_res.res = mangle_poll(mask);
- req->done = true;
- spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
- iocb_put(iocb);
- return 1;
- }
+ aio_poll_finish(iocb, mask);
+ spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
+ iocb_put(iocb);
+ } else {
+ schedule_work(&req->work);
}
- schedule_work(&req->work);
return 1;
}
Powered by blists - more mailing lists