[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <49B8A0B9.2060602@cosmosbay.com>
Date: Thu, 12 Mar 2009 06:42:17 +0100
From: Eric Dumazet <dada1@...mosbay.com>
To: Andrew Morton <akpm@...ux-foundation.org>
CC: Jeff Moyer <jmoyer@...hat.com>, Avi Kivity <avi@...hat.com>,
linux-aio <linux-aio@...ck.org>, zach.brown@...cle.com,
bcrl@...ck.org, linux-kernel@...r.kernel.org,
Davide Libenzi <davidel@...ilserver.org>,
Christoph Lameter <cl@...ux-foundation.org>
Subject: [PATCH] aio: fput() can be called from interrupt context
Eric Dumazet a écrit :
>> Path could be :
>>
>> 1) fput() changes so that calling it from interrupt context is possible
>> (Using a working queue to make sure __fput() is called from process context)
>>
>> 2) Changes aio to use fput() as is (and zap its internal work_queue and aio_fput_routine() stuff)
>>
>> 3) Once atomic_long_dec_and_test(&filp->f_count) only performed in fput(),
>> SLAB_DESTROY_BY_RCU for "struct file" get back :)
>>
Here is the second patch
Thank you
[PATCH] aio: cleanup, since fput() is IRQ safe
Once fput() is IRQ safe, we can cleanup aio code and delete its work_queue.
Signed-off-by: Eric Dumazet <dada1@...mosbay.com>
---
fs/aio.c | 52 ++--------------------------------------------------
1 files changed, 2 insertions(+), 50 deletions(-)
diff --git a/fs/aio.c b/fs/aio.c
index 8fa77e2..b0351a1 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -53,13 +53,6 @@ static struct kmem_cache *kioctx_cachep;
static struct workqueue_struct *aio_wq;
-/* Used for rare fput completion. */
-static void aio_fput_routine(struct work_struct *);
-static DECLARE_WORK(fput_work, aio_fput_routine);
-
-static DEFINE_SPINLOCK(fput_lock);
-static LIST_HEAD(fput_head);
-
static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *);
@@ -469,15 +462,7 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx)
static inline struct kiocb *aio_get_req(struct kioctx *ctx)
{
struct kiocb *req;
- /* Handle a potential starvation case -- should be exceedingly rare as
- * requests will be stuck on fput_head only if the aio_fput_routine is
- * delayed and the requests were the last user of the struct file.
- */
req = __aio_get_req(ctx);
- if (unlikely(NULL == req)) {
- aio_fput_routine(NULL);
- req = __aio_get_req(ctx);
- }
return req;
}
@@ -498,30 +483,6 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
wake_up(&ctx->wait);
}
-static void aio_fput_routine(struct work_struct *data)
-{
- spin_lock_irq(&fput_lock);
- while (likely(!list_empty(&fput_head))) {
- struct kiocb *req = list_kiocb(fput_head.next);
- struct kioctx *ctx = req->ki_ctx;
-
- list_del(&req->ki_list);
- spin_unlock_irq(&fput_lock);
-
- /* Complete the fput */
- __fput(req->ki_filp);
-
- /* Link the iocb into the context's free list */
- spin_lock_irq(&ctx->ctx_lock);
- really_put_req(ctx, req);
- spin_unlock_irq(&ctx->ctx_lock);
-
- put_ioctx(ctx);
- spin_lock_irq(&fput_lock);
- }
- spin_unlock_irq(&fput_lock);
-}
-
/* __aio_put_req
* Returns true if this put was the last user of the request.
*/
@@ -540,17 +501,8 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
req->ki_cancel = NULL;
req->ki_retry = NULL;
- /* Must be done under the lock to serialise against cancellation.
- * Call this aio_fput as it duplicates fput via the fput_work.
- */
- if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
- get_ioctx(ctx);
- spin_lock(&fput_lock);
- list_add(&req->ki_list, &fput_head);
- spin_unlock(&fput_lock);
- queue_work(aio_wq, &fput_work);
- } else
- really_put_req(ctx, req);
+ fput(req->ki_filp);
+ really_put_req(ctx, req);
return 1;
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists