[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20240829093617.2396538-3-hsiangkao@linux.alibaba.com>
Date: Thu, 29 Aug 2024 17:36:16 +0800
From: Gao Xiang <hsiangkao@...ux.alibaba.com>
To: linux-erofs@...ts.ozlabs.org
Cc: LKML <linux-kernel@...r.kernel.org>,
Gao Xiang <hsiangkao@...ux.alibaba.com>
Subject: [PATCH RESEND 3/4] erofs: support compressed inodes for fileio
Use pseudo bios just like the previous fscache approach since
merged bio_vecs can be filled properly with unique interfaces.
Signed-off-by: Gao Xiang <hsiangkao@...ux.alibaba.com>
---
fs/erofs/fileio.c | 25 +++++++++++++++++++++----
fs/erofs/inode.c | 6 ------
fs/erofs/internal.h | 8 ++++++++
fs/erofs/zdata.c | 27 +++++++++++++++++----------
4 files changed, 46 insertions(+), 20 deletions(-)
diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
index 6191336b42b6..02b522ff7876 100644
--- a/fs/erofs/fileio.c
+++ b/fs/erofs/fileio.c
@@ -23,7 +23,6 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
container_of(iocb, struct erofs_fileio_rq, iocb);
struct folio_iter fi;
- DBG_BUGON(rq->bio.bi_end_io);
if (ret > 0) {
if (ret != rq->bio.bi_iter.bi_size) {
bio_advance(&rq->bio, ret);
@@ -31,9 +30,13 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
}
ret = 0;
}
- bio_for_each_folio_all(fi, &rq->bio) {
- DBG_BUGON(folio_test_uptodate(fi.folio));
- erofs_onlinefolio_end(fi.folio, ret);
+ if (rq->bio.bi_end_io) {
+ rq->bio.bi_end_io(&rq->bio);
+ } else {
+ bio_for_each_folio_all(fi, &rq->bio) {
+ DBG_BUGON(folio_test_uptodate(fi.folio));
+ erofs_onlinefolio_end(fi.folio, ret);
+ }
}
kfree(rq);
}
@@ -68,6 +71,20 @@ static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
return rq;
}
+struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev)
+{
+ struct erofs_fileio_rq *rq;
+
+ rq = erofs_fileio_rq_alloc(mdev);
+ return rq ? &rq->bio : NULL;
+}
+
+void erofs_fileio_submit_bio(struct bio *bio)
+{
+ return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq,
+ bio));
+}
+
static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
{
struct inode *inode = folio_inode(folio);
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index 4a902e6e69a5..82259553d9f6 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -260,12 +260,6 @@ static int erofs_fill_inode(struct inode *inode)
mapping_set_large_folios(inode->i_mapping);
if (erofs_inode_is_data_compressed(vi->datalayout)) {
#ifdef CONFIG_EROFS_FS_ZIP
-#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
- if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb))) {
- err = -EOPNOTSUPP;
- goto out_unlock;
- }
-#endif
DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT,
erofs_info, inode->i_sb,
"EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 9bc4dcfd06d7..4efd578d7c62 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -489,6 +489,14 @@ static inline void z_erofs_exit_subsystem(void) {}
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP */
+#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev);
+void erofs_fileio_submit_bio(struct bio *bio);
+#else
+static inline struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev) { return NULL; }
+static inline void erofs_fileio_submit_bio(struct bio *bio) {}
+#endif
+
#ifdef CONFIG_EROFS_FS_ONDEMAND
int erofs_fscache_register_fs(struct super_block *sb);
void erofs_fscache_unregister_fs(struct super_block *sb);
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 350612f32ac6..2271cb74ae3a 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -1618,10 +1618,12 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
if (bio && (cur != last_pa ||
bio->bi_bdev != mdev.m_bdev)) {
io_retry:
- if (!erofs_is_fscache_mode(sb))
- submit_bio(bio);
- else
+ if (erofs_is_fileio_mode(EROFS_SB(sb)))
+ erofs_fileio_submit_bio(bio);
+ else if (erofs_is_fscache_mode(sb))
erofs_fscache_submit_bio(bio);
+ else
+ submit_bio(bio);
if (memstall) {
psi_memstall_leave(&pflags);
@@ -1637,10 +1639,13 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
}
if (!bio) {
- bio = erofs_is_fscache_mode(sb) ?
- erofs_fscache_bio_alloc(&mdev) :
- bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
- REQ_OP_READ, GFP_NOIO);
+ if (erofs_is_fileio_mode(EROFS_SB(sb)))
+ bio = erofs_fileio_bio_alloc(&mdev);
+ else if (erofs_is_fscache_mode(sb))
+ bio = erofs_fscache_bio_alloc(&mdev);
+ else
+ bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
+ REQ_OP_READ, GFP_NOIO);
bio->bi_end_io = z_erofs_endio;
bio->bi_iter.bi_sector = cur >> 9;
bio->bi_private = q[JQ_SUBMIT];
@@ -1667,10 +1672,12 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
} while (owned_head != Z_EROFS_PCLUSTER_TAIL);
if (bio) {
- if (!erofs_is_fscache_mode(sb))
- submit_bio(bio);
- else
+ if (erofs_is_fileio_mode(EROFS_SB(sb)))
+ erofs_fileio_submit_bio(bio);
+ else if (erofs_is_fscache_mode(sb))
erofs_fscache_submit_bio(bio);
+ else
+ submit_bio(bio);
if (memstall)
psi_memstall_leave(&pflags);
}
--
2.43.5
Powered by blists - more mailing lists