[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87d14908-dc9d-7ce4-c594-b28a9991efbb@gmx.com>
Date: Tue, 3 Jan 2023 13:47:43 +0800
From: Qu Wenruo <quwenruo.btrfs@....com>
To: zys.zljxml@...il.com, clm@...com, josef@...icpanda.com,
dsterba@...e.com
Cc: linux-btrfs@...r.kernel.org, linux-kernel@...r.kernel.org,
Yushan Zhou <katrinzhou@...cent.com>
Subject: Re: [PATCH v3] btrfs: use PAGE_{ALIGN, ALIGNED, ALIGN_DOWN} macro
On 2023/1/3 13:11, zys.zljxml@...il.com wrote:
> From: Yushan Zhou <katrinzhou@...cent.com>
>
> The header file linux/mm.h provides PAGE_ALIGN, PAGE_ALIGNED,
> PAGE_ALIGN_DOWN macros. Use these macros to make code more
> concise.
Is there anything benefit from the change?
In fact, PAGE_ALIGN()/PAGE_ALIGNED() is just using the same
ALIGN()/IS_ALIGNED() macro.
Thus I don't think your change is of any usefulness, not to mention it's
going to introduce confusion and extra effort.
I'm completely fine with regular ALIGN()/IS_ALIGNED() usage with PAGE_SIZE.
Thanks,
Qu
>
> Signed-off-by: Yushan Zhou <katrinzhou@...cent.com>
> ---
> fs/btrfs/compression.c | 2 +-
> fs/btrfs/defrag.c | 2 +-
> fs/btrfs/inode.c | 5 ++---
> fs/btrfs/lzo.c | 2 +-
> fs/btrfs/relocation.c | 2 +-
> fs/btrfs/send.c | 4 ++--
> 6 files changed, 8 insertions(+), 9 deletions(-)
>
> diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
> index 5122ca79f7ea..4a5aeb8dd479 100644
> --- a/fs/btrfs/compression.c
> +++ b/fs/btrfs/compression.c
> @@ -1609,7 +1609,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
> index_end = end >> PAGE_SHIFT;
>
> /* Don't miss unaligned end */
> - if (!IS_ALIGNED(end, PAGE_SIZE))
> + if (!PAGE_ALIGNED(end))
> index_end++;
>
> curr_sample_pos = 0;
> diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
> index 0a3c261b69c9..130de66839c1 100644
> --- a/fs/btrfs/defrag.c
> +++ b/fs/btrfs/defrag.c
> @@ -997,7 +997,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
> }
>
> #define CLUSTER_SIZE (SZ_256K)
> -static_assert(IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
> +static_assert(PAGE_ALIGNED(CLUSTER_SIZE));
>
> /*
> * Defrag one contiguous target range.
> diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
> index 8bcad9940154..ff3b1ab6a696 100644
> --- a/fs/btrfs/inode.c
> +++ b/fs/btrfs/inode.c
> @@ -10993,9 +10993,8 @@ static int btrfs_add_swap_extent(struct swap_info_struct *sis,
> return 0;
>
> max_pages = sis->max - bsi->nr_pages;
> - first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT;
> - next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len,
> - PAGE_SIZE) >> PAGE_SHIFT;
> + first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
> + next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
>
> if (first_ppage >= next_ppage)
> return 0;
> diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
> index d5e78cbc8fbc..71f6d8302d50 100644
> --- a/fs/btrfs/lzo.c
> +++ b/fs/btrfs/lzo.c
> @@ -280,7 +280,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
> }
>
> /* Check if we have reached page boundary */
> - if (IS_ALIGNED(cur_in, PAGE_SIZE)) {
> + if (PAGE_ALIGNED(cur_in)) {
> put_page(page_in);
> page_in = NULL;
> }
> diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
> index 31ec4a7658ce..ef13a9d4e370 100644
> --- a/fs/btrfs/relocation.c
> +++ b/fs/btrfs/relocation.c
> @@ -2825,7 +2825,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
> *
> * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
> */
> - if (!IS_ALIGNED(i_size, PAGE_SIZE)) {
> + if (!PAGE_ALIGNED(i_size)) {
> struct address_space *mapping = inode->vfs_inode.i_mapping;
> struct btrfs_fs_info *fs_info = inode->root->fs_info;
> const u32 sectorsize = fs_info->sectorsize;
> diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
> index e65e6b6600a7..b4cbd74fefce 100644
> --- a/fs/btrfs/send.c
> +++ b/fs/btrfs/send.c
> @@ -5635,7 +5635,7 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
> * boundary in the send buffer. This means that there may be a gap
> * between the beginning of the command and the file data.
> */
> - data_offset = ALIGN(sctx->send_size, PAGE_SIZE);
> + data_offset = PAGE_ALIGN(sctx->send_size);
> if (data_offset > sctx->send_max_size ||
> sctx->send_max_size - data_offset < disk_num_bytes) {
> ret = -EOVERFLOW;
> @@ -5759,7 +5759,7 @@ static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path,
> sent += size;
> }
>
> - if (sctx->clean_page_cache && IS_ALIGNED(end, PAGE_SIZE)) {
> + if (sctx->clean_page_cache && PAGE_ALIGNED(end)) {
> /*
> * Always operate only on ranges that are a multiple of the page
> * size. This is not only to prevent zeroing parts of a page in
Powered by blists - more mailing lists