[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251117132537.227116-9-lihongbo22@huawei.com>
Date: Mon, 17 Nov 2025 13:25:35 +0000
From: Hongbo Li <lihongbo22@...wei.com>
To: <hsiangkao@...ux.alibaba.com>, <chao@...nel.org>, <brauner@...nel.org>,
<djwong@...nel.org>, <amir73il@...il.com>, <joannelkoong@...il.com>
CC: <lihongbo22@...wei.com>, <linux-fsdevel@...r.kernel.org>,
<linux-erofs@...ts.ozlabs.org>, <linux-kernel@...r.kernel.org>
Subject: [PATCH v9 08/10] erofs: support unencoded inodes for page cache share
This patch adds inode page cache sharing functionality for unencoded
files.
I conducted experiments in the container environment. Below is the
memory usage for reading all files in two different minor versions
of container images:
+-------------------+------------------+-------------+---------------+
| Image | Page Cache Share | Memory (MB) | Memory |
| | | | Reduction (%) |
+-------------------+------------------+-------------+---------------+
| | No | 241 | - |
| redis +------------------+-------------+---------------+
| 7.2.4 & 7.2.5 | Yes | 163 | 33% |
+-------------------+------------------+-------------+---------------+
| | No | 872 | - |
| postgres +------------------+-------------+---------------+
| 16.1 & 16.2 | Yes | 630 | 28% |
+-------------------+------------------+-------------+---------------+
| | No | 2771 | - |
| tensorflow +------------------+-------------+---------------+
| 2.11.0 & 2.11.1 | Yes | 2340 | 16% |
+-------------------+------------------+-------------+---------------+
| | No | 926 | - |
| mysql +------------------+-------------+---------------+
| 8.0.11 & 8.0.12 | Yes | 735 | 21% |
+-------------------+------------------+-------------+---------------+
| | No | 390 | - |
| nginx +------------------+-------------+---------------+
| 7.2.4 & 7.2.5 | Yes | 219 | 44% |
+-------------------+------------------+-------------+---------------+
| tomcat | No | 924 | - |
| 10.1.25 & 10.1.26 +------------------+-------------+---------------+
| | Yes | 474 | 49% |
+-------------------+------------------+-------------+---------------+
Additionally, the table below shows the runtime memory usage of the
container:
+-------------------+------------------+-------------+---------------+
| Image | Page Cache Share | Memory (MB) | Memory |
| | | | Reduction (%) |
+-------------------+------------------+-------------+---------------+
| | No | 35 | - |
| redis +------------------+-------------+---------------+
| 7.2.4 & 7.2.5 | Yes | 28 | 20% |
+-------------------+------------------+-------------+---------------+
| | No | 149 | - |
| postgres +------------------+-------------+---------------+
| 16.1 & 16.2 | Yes | 95 | 37% |
+-------------------+------------------+-------------+---------------+
| | No | 1028 | - |
| tensorflow +------------------+-------------+---------------+
| 2.11.0 & 2.11.1 | Yes | 930 | 10% |
+-------------------+------------------+-------------+---------------+
| | No | 155 | - |
| mysql +------------------+-------------+---------------+
| 8.0.11 & 8.0.12 | Yes | 132 | 15% |
+-------------------+------------------+-------------+---------------+
| | No | 25 | - |
| nginx +------------------+-------------+---------------+
| 7.2.4 & 7.2.5 | Yes | 20 | 20% |
+-------------------+------------------+-------------+---------------+
| tomcat | No | 186 | - |
| 10.1.25 & 10.1.26 +------------------+-------------+---------------+
| | Yes | 98 | 48% |
+-------------------+------------------+-------------+---------------+
Co-developed-by: Hongzhen Luo <hongzhen@...ux.alibaba.com>
Signed-off-by: Hongzhen Luo <hongzhen@...ux.alibaba.com>
Signed-off-by: Hongbo Li <lihongbo22@...wei.com>
---
fs/erofs/data.c | 30 +++++++++++++++++++++++-------
fs/erofs/inode.c | 4 ++++
fs/erofs/internal.h | 17 +++++++++++++++++
fs/erofs/ishare.c | 31 +++++++++++++++++++++++++++++++
4 files changed, 75 insertions(+), 7 deletions(-)
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 71e23d91123d..862df0c7ceb7 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -269,6 +269,7 @@ void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty)
struct erofs_iomap_iter_ctx {
struct page *page;
void *base;
+ struct inode *realinode;
};
static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
@@ -276,14 +277,15 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
{
struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
struct erofs_iomap_iter_ctx *ctx = iter->private;
- struct super_block *sb = inode->i_sb;
+ struct inode *realinode = ctx ? ctx->realinode : inode;
+ struct super_block *sb = realinode->i_sb;
struct erofs_map_blocks map;
struct erofs_map_dev mdev;
int ret;
map.m_la = offset;
map.m_llen = length;
- ret = erofs_map_blocks(inode, &map);
+ ret = erofs_map_blocks(realinode, &map);
if (ret < 0)
return ret;
@@ -296,7 +298,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
return 0;
}
- if (!(map.m_flags & EROFS_MAP_META) || !erofs_inode_in_metabox(inode)) {
+ if (!(map.m_flags & EROFS_MAP_META) || !erofs_inode_in_metabox(realinode)) {
mdev = (struct erofs_map_dev) {
.m_deviceid = map.m_deviceid,
.m_pa = map.m_pa,
@@ -322,7 +324,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
void *ptr;
ptr = erofs_read_metabuf(&buf, sb, map.m_pa,
- erofs_inode_in_metabox(inode));
+ erofs_inode_in_metabox(realinode));
if (IS_ERR(ptr))
return PTR_ERR(ptr);
iomap->inline_data = ptr;
@@ -379,30 +381,42 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
*/
static int erofs_read_folio(struct file *file, struct folio *folio)
{
+ struct inode *inode = folio_inode(folio);
struct iomap_read_folio_ctx read_ctx = {
.ops = &iomap_bio_read_ops,
.cur_folio = folio,
};
- struct erofs_iomap_iter_ctx iter_ctx = {};
+ struct erofs_iomap_iter_ctx iter_ctx = {
+ .realinode = erofs_ishare_iget(inode),
+ };
+ if (!iter_ctx.realinode)
+ return -EIO;
trace_erofs_read_folio(folio, true);
iomap_read_folio(&erofs_iomap_ops, &read_ctx, &iter_ctx);
+ erofs_ishare_iput(iter_ctx.realinode);
return 0;
}
static void erofs_readahead(struct readahead_control *rac)
{
+ struct inode *inode = rac->mapping->host;
struct iomap_read_folio_ctx read_ctx = {
.ops = &iomap_bio_read_ops,
.rac = rac,
};
- struct erofs_iomap_iter_ctx iter_ctx = {};
+ struct erofs_iomap_iter_ctx iter_ctx = {
+ .realinode = erofs_ishare_iget(inode),
+ };
+ if (!iter_ctx.realinode)
+ return;
trace_erofs_readahead(rac->mapping->host, readahead_index(rac),
readahead_count(rac), true);
iomap_readahead(&erofs_iomap_ops, &read_ctx, &iter_ctx);
+ erofs_ishare_iput(iter_ctx.realinode);
}
static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
@@ -423,7 +437,9 @@ static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
#endif
if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_sb->s_bdev) {
- struct erofs_iomap_iter_ctx iter_ctx = {};
+ struct erofs_iomap_iter_ctx iter_ctx = {
+ .realinode = inode,
+ };
return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
NULL, 0, &iter_ctx, 0);
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index cb780c095d28..3be4614d1add 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -215,6 +215,10 @@ static int erofs_fill_inode(struct inode *inode)
case S_IFREG:
inode->i_op = &erofs_generic_iops;
inode->i_fop = &erofs_file_fops;
+#ifdef CONFIG_EROFS_FS_PAGE_CACHE_SHARE
+ if (erofs_ishare_fill_inode(inode))
+ inode->i_fop = &erofs_ishare_fops;
+#endif
break;
case S_IFDIR:
inode->i_op = &erofs_dir_iops;
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 93ad34f2b488..37b536eebc3d 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -197,6 +197,19 @@ static inline bool erofs_is_fscache_mode(struct super_block *sb)
!erofs_is_fileio_mode(EROFS_SB(sb)) && !sb->s_bdev;
}
+#if defined(CONFIG_EROFS_FS_INODE_SHARE)
+static inline bool erofs_is_ishare_inode(struct inode *inode)
+{
+ /* we have assumed FS_ONDEMAND is excluded with FS_INODE_SHARE feature */
+ return inode->i_sb->s_type == &erofs_anon_fs_type;
+}
+#else
+static inline bool erofs_is_ishare_inode(struct inode *inode)
+{
+ return false;
+}
+#endif
+
enum {
EROFS_ZIP_CACHE_DISABLED,
EROFS_ZIP_CACHE_READAHEAD,
@@ -563,11 +576,15 @@ int erofs_ishare_init(struct super_block *sb);
void erofs_ishare_exit(struct super_block *sb);
bool erofs_ishare_fill_inode(struct inode *inode);
void erofs_ishare_free_inode(struct inode *inode);
+struct inode *erofs_ishare_iget(struct inode *inode);
+void erofs_ishare_iput(struct inode *realinode);
#else
static inline int erofs_ishare_init(struct super_block *sb) { return 0; }
static inline void erofs_ishare_exit(struct super_block *sb) {}
static inline bool erofs_ishare_fill_inode(struct inode *inode) { return false; }
static inline void erofs_ishare_free_inode(struct inode *inode) {}
+static inline struct inode *erofs_ishare_iget(struct inode *inode) { return inode; }
+static inline void erofs_ishare_iput(struct inode *realinode) {}
#endif // CONFIG_EROFS_FS_PAGE_CACHE_SHARE
long erofs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
diff --git a/fs/erofs/ishare.c b/fs/erofs/ishare.c
index f386efb260da..da735d69f21f 100644
--- a/fs/erofs/ishare.c
+++ b/fs/erofs/ishare.c
@@ -239,3 +239,34 @@ const struct file_operations erofs_ishare_fops = {
.get_unmapped_area = thp_get_unmapped_area,
.splice_read = filemap_splice_read,
};
+
+/*
+ * erofs_ishare_iget - find the backing inode.
+ */
+struct inode *erofs_ishare_iget(struct inode *inode)
+{
+ struct erofs_inode *vi, *vi_dedup;
+ struct inode *realinode;
+
+ if (!erofs_is_ishare_inode(inode))
+ return igrab(inode);
+
+ vi_dedup = EROFS_I(inode);
+ spin_lock(&vi_dedup->lock);
+ /* fall back to all backing inodes */
+ DBG_BUGON(list_empty(&vi_dedup->backing_head));
+ list_for_each_entry(vi, &vi_dedup->backing_head, backing_link) {
+ realinode = igrab(&vi->vfs_inode);
+ if (realinode)
+ break;
+ }
+ spin_unlock(&vi_dedup->lock);
+
+ DBG_BUGON(!realinode);
+ return realinode;
+}
+
+void erofs_ishare_iput(struct inode *realinode)
+{
+ iput(realinode);
+}
--
2.22.0
Powered by blists - more mailing lists