[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAPjX3FcZ6TJZnHNf3sm00F49BVsDzQaZr5fJHMXRUXne3gLZ2w@mail.gmail.com>
Date: Wed, 5 Mar 2025 08:02:28 +0100
From: Daniel Vacek <neelx@...e.com>
To: Qu Wenruo <quwenruo.btrfs@....com>
Cc: Chris Mason <clm@...com>, Josef Bacik <josef@...icpanda.com>, David Sterba <dsterba@...e.com>,
linux-btrfs@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] btrfs/defrag: implement compression levels
On Tue, 4 Mar 2025 at 22:31, Qu Wenruo <quwenruo.btrfs@....com> wrote:
>
> The feature itself looks good to me.
>
> Although not sure if a blank commit message is fine for this case.
Sigh, that's a mistake. I'll send a v2 with a few sentences.
> 在 2025/3/5 03:44, Daniel Vacek 写道:
> > Signed-off-by: Daniel Vacek <neelx@...e.com>
> > ---
> > fs/btrfs/btrfs_inode.h | 2 ++
> > fs/btrfs/defrag.c | 22 +++++++++++++++++-----
> > fs/btrfs/fs.h | 2 +-
> > fs/btrfs/inode.c | 10 +++++++---
> > include/uapi/linux/btrfs.h | 10 +++++++++-
> > 5 files changed, 36 insertions(+), 10 deletions(-)
> >
> > diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
> > index aa1f55cd81b79..5ee9da0054a74 100644
> > --- a/fs/btrfs/btrfs_inode.h
> > +++ b/fs/btrfs/btrfs_inode.h
> > @@ -145,6 +145,7 @@ struct btrfs_inode {
> > * different from prop_compress and takes precedence if set.
> > */
> > u8 defrag_compress;
> > + s8 defrag_compress_level;
> >
> > /*
> > * Lock for counters and all fields used to determine if the inode is in
> > diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
> > index 968dae9539482..03a0287a78ea0 100644
> > --- a/fs/btrfs/defrag.c
> > +++ b/fs/btrfs/defrag.c
> > @@ -1363,6 +1363,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
> > u64 last_byte;
> > bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS);
> > int compress_type = BTRFS_COMPRESS_ZLIB;
> > + int compress_level = 0;
> > int ret = 0;
> > u32 extent_thresh = range->extent_thresh;
> > pgoff_t start_index;
> > @@ -1376,10 +1377,19 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
> > return -EINVAL;
> >
> > if (do_compress) {
> > - if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
> > - return -EINVAL;
> > - if (range->compress_type)
> > - compress_type = range->compress_type;
> > + if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS_LEVEL) {
> > + if (range->compress.type >= BTRFS_NR_COMPRESS_TYPES)
> > + return -EINVAL;
> > + if (range->compress.type) {
> > + compress_type = range->compress.type;
> > + compress_level= range->compress.level;
> > + }
>
> I am not familiar with the compress level, but
> btrfs_compress_set_level() does extra clamping, maybe we also want to do
> that too?
This is intentionally left to be limited later. There's no need to do
it at this point and the code is simpler. It's also compression
type/method agnostic.
> > + } else {
> > + if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
> > + return -EINVAL;
> > + if (range->compress_type)
> > + compress_type = range->compress_type;
> > + }
> > }
> >
> > if (extent_thresh == 0)
> > @@ -1430,8 +1440,10 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
> > btrfs_inode_unlock(BTRFS_I(inode), 0);
> > break;
> > }
> > - if (do_compress)
> > + if (do_compress) {
> > BTRFS_I(inode)->defrag_compress = compress_type;
> > + BTRFS_I(inode)->defrag_compress_level = compress_level;
> > + }
> > ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
> > cluster_end + 1 - cur, extent_thresh,
> > newer_than, do_compress, §ors_defragged,
> > diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
> > index be6d5a24bd4e6..2dae7ffd37133 100644
> > --- a/fs/btrfs/fs.h
> > +++ b/fs/btrfs/fs.h
> > @@ -485,7 +485,7 @@ struct btrfs_fs_info {
> > u64 last_trans_log_full_commit;
> > unsigned long long mount_opt;
> >
> > - unsigned long compress_type:4;
> > + int compress_type;
> > int compress_level;
> > u32 commit_interval;
> > /*
> > diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
> > index fa04b027d53ac..156a9d4603391 100644
> > --- a/fs/btrfs/inode.c
> > +++ b/fs/btrfs/inode.c
> > @@ -925,6 +925,7 @@ static void compress_file_range(struct btrfs_work *work)
> > unsigned int poff;
> > int i;
> > int compress_type = fs_info->compress_type;
> > + int compress_level= fs_info->compress_level;
> >
> > inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
> >
> > @@ -1007,13 +1008,15 @@ static void compress_file_range(struct btrfs_work *work)
> > goto cleanup_and_bail_uncompressed;
> > }
> >
> > - if (inode->defrag_compress)
> > + if (inode->defrag_compress) {
> > compress_type = inode->defrag_compress;
> > - else if (inode->prop_compress)
> > + compress_level= inode->defrag_compress_level;
> > + } else if (inode->prop_compress) {
> > compress_type = inode->prop_compress;
> > + }
> >
> > /* Compression level is applied here. */
> > - ret = btrfs_compress_folios(compress_type, fs_info->compress_level,
> > + ret = btrfs_compress_folios(compress_type, compress_level,
> > mapping, start, folios, &nr_folios, &total_in,
> > &total_compressed);
> > if (ret)
> > diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
> > index d3b222d7af240..3540d33d6f50c 100644
> > --- a/include/uapi/linux/btrfs.h
> > +++ b/include/uapi/linux/btrfs.h
> > @@ -615,7 +615,9 @@ struct btrfs_ioctl_clone_range_args {
> > */
> > #define BTRFS_DEFRAG_RANGE_COMPRESS 1
> > #define BTRFS_DEFRAG_RANGE_START_IO 2
> > +#define BTRFS_DEFRAG_RANGE_COMPRESS_LEVEL 4
> > #define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \
> > + BTRFS_DEFRAG_RANGE_COMPRESS_LEVEL | \
> > BTRFS_DEFRAG_RANGE_START_IO)
> >
> > struct btrfs_ioctl_defrag_range_args {
> > @@ -643,7 +645,13 @@ struct btrfs_ioctl_defrag_range_args {
> > * for this defrag operation. If unspecified, zlib will
> > * be used
> > */
> > - __u32 compress_type;
> > + union {
> > + __u32 compress_type;
> > + struct {
> > + __u8 type;
> > + __s8 level;
> > + } compress;
> > + };
> >
> > /* spare for later */
> > __u32 unused[4];
>
> We have enough space left here, although u32 is overkilled for
> compress_type, using the unused space for a new s8/s16/s32 member should
> be fine.
That is what I did originally, but discussing with Dave he suggested
this solution.
>
> Thanks,
> Qu
Powered by blists - more mailing lists