[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1406150608-19351-2-git-send-email-mhalcrow@google.com>
Date: Wed, 23 Jul 2014 14:23:24 -0700
From: Michael Halcrow <mhalcrow@...gle.com>
To: linux-ext4@...r.kernel.org, linux-fsdevel@...r.kernel.org
Cc: zohar@...ux.vnet.ibm.com, mhalcrow@...gle.com,
herbert@...dor.apana.org.au, pavel@....cz, hch@...radead.org,
lczerner@...hat.com, tytso@....edu, tyhicks@...onical.com,
serge.hallyn@...onical.com
Subject: [PATCH 1/5] ext4: Adds callback support for bio read completion
Adds callback support for bio read completion. This
supports data transformation such as encryption.
Signed-off-by: Michael Halcrow <mhalcrow@...gle.com>
---
fs/buffer.c | 46 +++++++++++++++++++++++++++++++++++++++------
include/linux/blk_types.h | 4 ++++
include/linux/buffer_head.h | 8 ++++++++
3 files changed, 52 insertions(+), 6 deletions(-)
diff --git a/fs/buffer.c b/fs/buffer.c
index eba6e4f..a5527c5 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -296,7 +296,7 @@ static void free_more_memory(void)
* I/O completion handler for block_read_full_page() - pages
* which come unlocked at the end of I/O.
*/
-static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
+void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
unsigned long flags;
struct buffer_head *first;
@@ -339,6 +339,13 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
local_irq_restore(flags);
+ if (bh->b_private) {
+ struct bio *bio = (struct bio *)bh->b_private;
+ BUG_ON(!bio->bi_cb);
+ if (!bio->bi_cb(bio, !(page_uptodate && !PageError(page))))
+ goto out;
+ }
+
/*
* If none of the buffers had errors and they are all
* uptodate then we can set the page uptodate.
@@ -346,6 +353,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
if (page_uptodate && !PageError(page))
SetPageUptodate(page);
unlock_page(page);
+out:
return;
still_busy:
@@ -353,6 +361,7 @@ still_busy:
local_irq_restore(flags);
return;
}
+EXPORT_SYMBOL_GPL(end_buffer_async_read);
/*
* Completion handler for block_write_full_page() - pages which are unlocked
@@ -431,11 +440,12 @@ EXPORT_SYMBOL(end_buffer_async_write);
* PageLocked prevents anyone from starting writeback of a page which is
* under read I/O (PageWriteback is only ever set against a locked page).
*/
-static void mark_buffer_async_read(struct buffer_head *bh)
+void mark_buffer_async_read(struct buffer_head *bh)
{
bh->b_end_io = end_buffer_async_read;
set_buffer_async_read(bh);
}
+EXPORT_SYMBOL_GPL(mark_buffer_async_read);
static void mark_buffer_async_write_endio(struct buffer_head *bh,
bh_end_io_t *handler)
@@ -1654,14 +1664,17 @@ static inline int block_size_bits(unsigned int blocksize)
return ilog2(blocksize);
}
-static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
+struct buffer_head *create_page_buffers(struct page *page, struct inode *inode,
+ unsigned int b_state)
{
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
- create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
+ create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits),
+ b_state);
return page_buffers(page);
}
+EXPORT_SYMBOL_GPL(create_page_buffers);
/*
* NOTE! All mapped/uptodate combinations are valid:
@@ -3009,7 +3022,8 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
}
}
-int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
+int _submit_bh_cb(int rw, struct buffer_head *bh, unsigned long bio_flags,
+ bio_completion_cb_t *cb, void *cb_ctx)
{
struct bio *bio;
int ret = 0;
@@ -3043,6 +3057,8 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
+ bio->bi_cb = cb;
+ bio->bi_cb_ctx = cb_ctx;
bio->bi_flags |= bio_flags;
/* Take care of bh's that straddle the end of the device */
@@ -3054,6 +3070,12 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
rw |= REQ_PRIO;
bio_get(bio);
+
+ if (bio->bi_cb) {
+ BUG_ON(bh->b_private);
+ bh->b_private = bio;
+ }
+
submit_bio(rw, bio);
if (bio_flagged(bio, BIO_EOPNOTSUPP))
@@ -3062,14 +3084,26 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
bio_put(bio);
return ret;
}
+
+int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
+{
+ return _submit_bh_cb(rw, bh, bio_flags, NULL, NULL);
+}
EXPORT_SYMBOL_GPL(_submit_bh);
int submit_bh(int rw, struct buffer_head *bh)
{
- return _submit_bh(rw, bh, 0);
+ return submit_bh_cb(rw, bh, NULL, NULL);
}
EXPORT_SYMBOL(submit_bh);
+int submit_bh_cb(int rw, struct buffer_head *bh, bio_completion_cb_t *cb,
+ void *cb_ctx)
+{
+ return _submit_bh_cb(rw, bh, 0, cb, cb_ctx);
+}
+EXPORT_SYMBOL_GPL(submit_bh_cb);
+
/**
* ll_rw_block: low-level access to block devices (DEPRECATED)
* @rw: whether to %READ or %WRITE or maybe %READA (readahead)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 66c2167..06102df 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -16,6 +16,7 @@ struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_destructor_t) (struct bio *);
+typedef int (bio_completion_cb_t) (struct bio *, int);
/*
* was unsigned short, but we might as well be ready for > 64kB I/O pages
@@ -96,6 +97,9 @@ struct bio {
struct bio_set *bi_pool;
+ bio_completion_cb_t *bi_cb; /* completion callback */
+ void *bi_cb_ctx; /* callback context */
+
/*
* We can inline a number of vecs at the end of the bio, to avoid
* double allocations for a small number of bio_vecs. This member
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 324329c..24ea03a 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -160,7 +160,9 @@ void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
+void end_buffer_async_read(struct buffer_head *bh, int uptodate);
void end_buffer_async_write(struct buffer_head *bh, int uptodate);
+void mark_buffer_async_read(struct buffer_head *bh);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
@@ -169,6 +171,8 @@ void invalidate_inode_buffers(struct inode *);
int remove_inode_buffers(struct inode *inode);
int sync_mapping_buffers(struct address_space *mapping);
void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
+struct buffer_head *create_page_buffers(struct page *page, struct inode *inode,
+ unsigned int b_state);
void mark_buffer_async_write(struct buffer_head *bh);
void __wait_on_buffer(struct buffer_head *);
@@ -191,7 +195,11 @@ int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, int rw);
void write_dirty_buffer(struct buffer_head *bh, int rw);
int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags);
+int _submit_bh_cb(int rw, struct buffer_head *bh, unsigned long bio_flags,
+ bio_completion_cb_t *cb, void *cb_ctx);
int submit_bh(int, struct buffer_head *);
+int submit_bh_cb(int rw, struct buffer_head *bh, bio_completion_cb_t *cb,
+ void *cb_ctx);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);
--
2.0.0.526.g5318336
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists