[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <d70e95b2-bf1b-4ce7-b57e-9442b484ddf0@gmx.com>
Date: Sun, 29 Jun 2025 14:53:52 +0930
From: Qu Wenruo <quwenruo.btrfs@....com>
To: George Hu <integral@...hlinux.org>, Chris Mason <clm@...com>,
Josef Bacik <josef@...icpanda.com>, linux-btrfs@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: David Sterba <dsterba@...e.com>
Subject: Re: [PATCH] btrfs: use in_range() macro in volumes.c
在 2025/6/29 14:34, George Hu 写道:
> Replace "if (start <= val && val < (start + len))" in volumes.c
> with in_range() macro to improve code readability.
From the comment.
* Decide for yourself
* which behaviour you want, or prove that start + len never overflow.
* Do not blindly replace one form with the other.
So please explain in the commit message that those conversions are safe.
>
> Signed-off-by: George Hu <integral@...hlinux.org>
> ---
> fs/btrfs/volumes.c | 18 +++++++-----------
> 1 file changed, 7 insertions(+), 11 deletions(-)
>
> diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
> index f475b4b7c457..c5479dce0cb2 100644
> --- a/fs/btrfs/volumes.c
> +++ b/fs/btrfs/volumes.c
> @@ -3198,7 +3198,7 @@ struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
> return ERR_PTR(-EINVAL);
> }
>
> - if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) {
E.g. in this case, tree-checker should have make sure that map->start
and map->chunk_len do not overflow, so it should be safe.
Thanks,
Qu
> + if (unlikely(!in_range(logical, map->start, map->chunk_len))) {
> btrfs_crit(fs_info,
> "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
> logical, logical + length, map->start,
> @@ -3841,7 +3841,7 @@ static bool chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_of
> else
> user_thresh_max = mult_perc(cache->length, bargs->usage_max);
>
> - if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
> + if (in_range(chunk_used, user_thresh_min, user_thresh_max))
> ret = false;
>
> btrfs_put_block_group(cache);
> @@ -6211,9 +6211,7 @@ struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
> if (i < sub_stripes)
> stripes[i].length -= stripe_offset;
>
> - if (stripe_index >= last_stripe &&
> - stripe_index <= (last_stripe +
> - sub_stripes - 1))
> + if (in_range(stripe_index, last_stripe, sub_stripes))
> stripes[i].length -= stripe_end_offset;
>
> if (i == sub_stripes - 1)
> @@ -7047,11 +7045,10 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
> map = btrfs_find_chunk_map(fs_info, logical, 1);
>
> /* already mapped? */
> - if (map && map->start <= logical && map->start + map->chunk_len > logical) {
> - btrfs_free_chunk_map(map);
> - return 0;
> - } else if (map) {
> + if (map) {
> btrfs_free_chunk_map(map);
> + if (in_range(logical, map->start, map->chunk_len))
> + return 0;
> }
>
> map = btrfs_alloc_chunk_map(num_stripes, GFP_NOFS);
> @@ -8239,8 +8236,7 @@ static void map_raid56_repair_block(struct btrfs_io_context *bioc,
> u64 stripe_start = bioc->full_stripe_logical +
> btrfs_stripe_nr_to_offset(i);
>
> - if (logical >= stripe_start &&
> - logical < stripe_start + BTRFS_STRIPE_LEN)
> + if (in_range(logical, stripe_start, BTRFS_STRIPE_LEN))
> break;
> }
> ASSERT(i < data_stripes, "i=%d data_stripes=%d", i, data_stripes);
Powered by blists - more mailing lists