[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20150304235739.17330.85116.stgit@notabene.brown>
Date: Thu, 05 Mar 2015 10:57:39 +1100
From: NeilBrown <neilb@...e.de>
To: Alexander Viro <viro@...iv.linux.org.uk>
Cc: linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH 2/2] block_dev/DIO - cache one bio allocation when caching a
DIO.
When performing an O_DIRECT write to a block device, a 'struct bio' is
allocated from a mempool.
There is only one mempool for all block devices so if a single block
device blocked indefinitely, the mempool could in theory be exhausted
and other block devices would be affected.
When mdmon needs to update RAID metadata (see previous patch) it needs
to perform an O_DIRECT write to some block devices while another block
device (the array) is frozen. This could conceivably lead to a
deadlock.
Rather than allocate one mempool per block device (which would be an
effective solution), this patch effects a single-bio pool for each
'struct dio' that is being used by an mlockall(MCL_FUTURE) process.
'cache_bio' is added to 'struct dio' and placed at the end so that it
isn't zeroed out regularly.
When an allocation is needed, the bio is used if it is present and
large enough. When a bio if freed, it is placed here if appropriate.
Naturally it is freed when the file is closed.
All other allocations to serve O_DIRECT writes are further down the
stack and use mempools that cannot be exhausted by a frozen md array.
Signed-off-by: NeilBrown <neilb@...e.de>
---
fs/direct-io.c | 45 +++++++++++++++++++++++++++++++++++++++------
1 file changed, 39 insertions(+), 6 deletions(-)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index ece5e45933d2..554913e9cc30 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -139,12 +139,17 @@ struct dio {
struct page *pages[DIO_PAGES]; /* page buffer */
struct work_struct complete_work;/* deferred AIO completion */
};
+ struct bio *cache_bio;
} ____cacheline_aligned_in_smp;
static struct kmem_cache *dio_cache __read_mostly;
void dio_free(struct dio *dio)
{
+ if (dio->cache_bio) {
+ bio_put(dio->cache_bio);
+ dio->cache_bio = NULL;
+ }
kmem_cache_free(dio_cache, dio);
}
@@ -362,13 +367,24 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
struct block_device *bdev,
sector_t first_sector, int nr_vecs)
{
- struct bio *bio;
+ struct bio *bio = NULL;
+ if ((dio->flags & DIO_PERSISTENT_DIO) && dio->cache_bio) {
+ spin_lock_irq(&dio->bio_lock);
+ if (dio->cache_bio &&
+ dio->cache_bio->bi_max_vecs >= nr_vecs) {
+ bio = dio->cache_bio;
+ dio->cache_bio = NULL;
+ bio_reset(bio);
+ }
+ spin_unlock_irq(&dio->bio_lock);
+ }
/*
* bio_alloc() is guaranteed to return a bio when called with
* __GFP_WAIT and we request a valid number of vectors.
*/
- bio = bio_alloc(GFP_KERNEL, nr_vecs);
+ if (!bio)
+ bio = bio_alloc(GFP_KERNEL, nr_vecs);
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = first_sector;
@@ -480,7 +496,21 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
set_page_dirty_lock(page);
page_cache_release(page);
}
- bio_put(bio);
+ if (dio->flags & DIO_PERSISTENT_DIO) {
+ spin_lock_irq(&dio->bio_lock);
+ if (dio->cache_bio &&
+ dio->cache_bio->bi_max_vecs < bio->bi_max_vecs) {
+ bio_put(dio->cache_bio);
+ dio->cache_bio = NULL;
+ }
+ if (dio->cache_bio == NULL) {
+ dio->cache_bio = bio;
+ bio = NULL;
+ }
+ spin_unlock_irq(&dio->bio_lock);
+ }
+ if (bio)
+ bio_put(bio);
}
return uptodate ? 0 : -EIO;
}
@@ -1144,8 +1174,11 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
if (cmpxchg(&iocb->ki_filp->private_data, dio, NULL) != dio)
dio = NULL;
}
- if (!dio)
+ if (!dio) {
dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
+ if (dio)
+ dio->cache_bio = NULL;
+ }
retval = -ENOMEM;
if (!dio)
goto out;
@@ -1169,7 +1202,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
end - 1);
if (retval) {
mutex_unlock(&inode->i_mutex);
- kmem_cache_free(dio_cache, dio);
+ dio_free(dio);
goto out;
}
}
@@ -1205,7 +1238,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
* We grab i_mutex only for reads so we don't have
* to release it here
*/
- kmem_cache_free(dio_cache, dio);
+ dio_free(dio);
goto out;
}
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists