[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240903122950.eugl53tler4n52ao@AALNPWDAGOMEZ1.aal.scsc.local>
Date: Tue, 3 Sep 2024 14:29:50 +0200
From: Daniel Gomez <da.gomez@...sung.com>
To: <brauner@...nel.org>
CC: "Pankaj Raghav (Samsung)" <kernel@...kajraghav.com>,
<akpm@...ux-foundation.org>, <chandan.babu@...cle.com>,
<linux-fsdevel@...r.kernel.org>, <djwong@...nel.org>, <hare@...e.de>,
<gost.dev@...sung.com>, <linux-xfs@...r.kernel.org>, <hch@....de>,
<david@...morbit.com>, Zi Yan <ziy@...dia.com>,
<yang@...amperecomputing.com>, <linux-kernel@...r.kernel.org>,
<linux-mm@...ck.org>, <willy@...radead.org>, <john.g.garry@...cle.com>,
<cl@...amperecomputing.com>, <p.raghav@...sung.com>, <mcgrof@...nel.org>,
<ryan.roberts@....com>, Dave Chinner <dchinner@...hat.com>
Subject: Re: [PATCH v13 10/10] xfs: enable block size larger than page size
support.
On Thu, Aug 22, 2024 at 03:50:18PM +0200, Pankaj Raghav (Samsung) wrote:
> From: Pankaj Raghav <p.raghav@...sung.com>
>
> Page cache now has the ability to have a minimum order when allocating
> a folio which is a prerequisite to add support for block size > page
> size.
>
> Signed-off-by: Pankaj Raghav <p.raghav@...sung.com>
> Signed-off-by: Luis Chamberlain <mcgrof@...nel.org>
> Reviewed-by: Darrick J. Wong <djwong@...nel.org>
> Reviewed-by: Dave Chinner <dchinner@...hat.com>
> ---
> fs/xfs/libxfs/xfs_ialloc.c | 5 +++++
> fs/xfs/libxfs/xfs_shared.h | 3 +++
> fs/xfs/xfs_icache.c | 6 ++++--
> fs/xfs/xfs_mount.c | 1 -
> fs/xfs/xfs_super.c | 28 ++++++++++++++++++++--------
> include/linux/pagemap.h | 13 +++++++++++++
> 6 files changed, 45 insertions(+), 11 deletions(-)
>
> diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
> index 0af5b7a33d055..1921b689888b8 100644
> --- a/fs/xfs/libxfs/xfs_ialloc.c
> +++ b/fs/xfs/libxfs/xfs_ialloc.c
> @@ -3033,6 +3033,11 @@ xfs_ialloc_setup_geometry(
> igeo->ialloc_align = mp->m_dalign;
> else
> igeo->ialloc_align = 0;
> +
> + if (mp->m_sb.sb_blocksize > PAGE_SIZE)
> + igeo->min_folio_order = mp->m_sb.sb_blocklog - PAGE_SHIFT;
> + else
> + igeo->min_folio_order = 0;
> }
>
> /* Compute the location of the root directory inode that is laid out by mkfs. */
> diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
> index 2f7413afbf46c..33b84a3a83ff6 100644
> --- a/fs/xfs/libxfs/xfs_shared.h
> +++ b/fs/xfs/libxfs/xfs_shared.h
> @@ -224,6 +224,9 @@ struct xfs_ino_geometry {
> /* precomputed value for di_flags2 */
> uint64_t new_diflags2;
>
> + /* minimum folio order of a page cache allocation */
> + unsigned int min_folio_order;
> +
> };
>
> #endif /* __XFS_SHARED_H__ */
> diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
> index cf629302d48e7..0fcf235e50235 100644
> --- a/fs/xfs/xfs_icache.c
> +++ b/fs/xfs/xfs_icache.c
> @@ -88,7 +88,8 @@ xfs_inode_alloc(
>
> /* VFS doesn't initialise i_mode! */
> VFS_I(ip)->i_mode = 0;
> - mapping_set_large_folios(VFS_I(ip)->i_mapping);
> + mapping_set_folio_min_order(VFS_I(ip)->i_mapping,
> + M_IGEO(mp)->min_folio_order);
>
> XFS_STATS_INC(mp, vn_active);
> ASSERT(atomic_read(&ip->i_pincount) == 0);
> @@ -325,7 +326,8 @@ xfs_reinit_inode(
> inode->i_uid = uid;
> inode->i_gid = gid;
> inode->i_state = state;
> - mapping_set_large_folios(inode->i_mapping);
> + mapping_set_folio_min_order(inode->i_mapping,
> + M_IGEO(mp)->min_folio_order);
> return error;
> }
>
> diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
> index 3949f720b5354..c6933440f8066 100644
> --- a/fs/xfs/xfs_mount.c
> +++ b/fs/xfs/xfs_mount.c
> @@ -134,7 +134,6 @@ xfs_sb_validate_fsb_count(
> {
> uint64_t max_bytes;
>
> - ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
> ASSERT(sbp->sb_blocklog >= BBSHIFT);
>
> if (check_shl_overflow(nblocks, sbp->sb_blocklog, &max_bytes))
> diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
> index 210481b03fdb4..8cd76a01b543f 100644
> --- a/fs/xfs/xfs_super.c
> +++ b/fs/xfs/xfs_super.c
> @@ -1638,16 +1638,28 @@ xfs_fs_fill_super(
> goto out_free_sb;
> }
>
> - /*
> - * Until this is fixed only page-sized or smaller data blocks work.
> - */
> if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
> - xfs_warn(mp,
> - "File system with blocksize %d bytes. "
> - "Only pagesize (%ld) or less will currently work.",
> + size_t max_folio_size = mapping_max_folio_size_supported();
> +
> + if (!xfs_has_crc(mp)) {
> + xfs_warn(mp,
> +"V4 Filesystem with blocksize %d bytes. Only pagesize (%ld) or less is supported.",
> mp->m_sb.sb_blocksize, PAGE_SIZE);
> - error = -ENOSYS;
> - goto out_free_sb;
> + error = -ENOSYS;
> + goto out_free_sb;
> + }
> +
> + if (mp->m_sb.sb_blocksize > max_folio_size) {
> + xfs_warn(mp,
> +"block size (%u bytes) not supported; Only block size (%ld) or less is supported",
This small fix [1] is missing in linux-next and vfs trees. Can it be picked?
[1] https://lore.kernel.org/all/Zs_vIaw8ESLN2TwY@casper.infradead.org/
> + mp->m_sb.sb_blocksize, max_folio_size);
> + error = -ENOSYS;
> + goto out_free_sb;
> + }
> +
> + xfs_warn(mp,
> +"EXPERIMENTAL: V5 Filesystem with Large Block Size (%d bytes) enabled.",
> + mp->m_sb.sb_blocksize);
> }
>
> /* Ensure this filesystem fits in the page cache limits */
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 4cc170949e9c0..55b254d951da7 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -374,6 +374,19 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
> #define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
> #define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
>
> +/*
> + * mapping_max_folio_size_supported() - Check the max folio size supported
> + *
> + * The filesystem should call this function at mount time if there is a
> + * requirement on the folio mapping size in the page cache.
> + */
> +static inline size_t mapping_max_folio_size_supported(void)
> +{
> + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
> + return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER);
> + return PAGE_SIZE;
> +}
> +
> /*
> * mapping_set_folio_order_range() - Set the orders supported by a file.
> * @mapping: The address space of the file.
> --
> 2.44.1
>
Powered by blists - more mailing lists