lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220929113002.wjoskqpvzamsxdht@quack3>
Date:   Thu, 29 Sep 2022 13:30:02 +0200
From:   Jan Kara <jack@...e.cz>
To:     Ojaswin Mujoo <ojaswin@...ux.ibm.com>
Cc:     linux-ext4@...r.kernel.org, Theodore Ts'o <tytso@....edu>,
        Ritesh Harjani <riteshh@...ux.ibm.com>,
        linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
        Andreas Dilger <adilger.kernel@...ger.ca>,
        Jan Kara <jack@...e.cz>, rookxu <brookxu.cn@...il.com>,
        Ritesh Harjani <ritesh.list@...il.com>
Subject: Re: [RFC v3 3/8] ext4: Refactor code in ext4_mb_normalize_request()
 and ext4_mb_use_preallocated()

On Tue 27-09-22 14:46:43, Ojaswin Mujoo wrote:
> Change some variable names to be more consistent and
> refactor some of the code to make it easier to read.
> 
> There are no functional changes in this patch
> 
> Signed-off-by: Ojaswin Mujoo <ojaswin@...ux.ibm.com>
> Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@...il.com>

Looks good, although I have to say I don't find renaming pa -> tmp_pa
making the code any more readable. Anyways, feel free to add:

Reviewed-by: Jan Kara <jack@...e.cz>

								Honza

> ---
>  fs/ext4/mballoc.c | 97 ++++++++++++++++++++++++-----------------------
>  1 file changed, 49 insertions(+), 48 deletions(-)
> 
> diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
> index 8be6f8765a6f..84950df709bb 100644
> --- a/fs/ext4/mballoc.c
> +++ b/fs/ext4/mballoc.c
> @@ -4000,7 +4000,8 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
>  	loff_t orig_size __maybe_unused;
>  	ext4_lblk_t start;
>  	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
> -	struct ext4_prealloc_space *pa;
> +	struct ext4_prealloc_space *tmp_pa;
> +	ext4_lblk_t tmp_pa_start, tmp_pa_end;
>  
>  	/* do normalize only data requests, metadata requests
>  	   do not need preallocation */
> @@ -4103,56 +4104,53 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
>  
>  	/* check we don't cross already preallocated blocks */
>  	rcu_read_lock();
> -	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
> -		ext4_lblk_t pa_end;
> -
> -		if (pa->pa_deleted)
> +	list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
> +		if (tmp_pa->pa_deleted)
>  			continue;
> -		spin_lock(&pa->pa_lock);
> -		if (pa->pa_deleted) {
> -			spin_unlock(&pa->pa_lock);
> +		spin_lock(&tmp_pa->pa_lock);
> +		if (tmp_pa->pa_deleted) {
> +			spin_unlock(&tmp_pa->pa_lock);
>  			continue;
>  		}
>  
> -		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
> -						  pa->pa_len);
> +		tmp_pa_start = tmp_pa->pa_lstart;
> +		tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
>  
>  		/* PA must not overlap original request */
> -		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
> -			ac->ac_o_ex.fe_logical < pa->pa_lstart));
> +		BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
> +			ac->ac_o_ex.fe_logical < tmp_pa_start));
>  
>  		/* skip PAs this normalized request doesn't overlap with */
> -		if (pa->pa_lstart >= end || pa_end <= start) {
> -			spin_unlock(&pa->pa_lock);
> +		if (tmp_pa_start >= end || tmp_pa_end <= start) {
> +			spin_unlock(&tmp_pa->pa_lock);
>  			continue;
>  		}
> -		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
> +		BUG_ON(tmp_pa_start <= start && tmp_pa_end >= end);
>  
>  		/* adjust start or end to be adjacent to this pa */
> -		if (pa_end <= ac->ac_o_ex.fe_logical) {
> -			BUG_ON(pa_end < start);
> -			start = pa_end;
> -		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
> -			BUG_ON(pa->pa_lstart > end);
> -			end = pa->pa_lstart;
> +		if (tmp_pa_end <= ac->ac_o_ex.fe_logical) {
> +			BUG_ON(tmp_pa_end < start);
> +			start = tmp_pa_end;
> +		} else if (tmp_pa_start > ac->ac_o_ex.fe_logical) {
> +			BUG_ON(tmp_pa_start > end);
> +			end = tmp_pa_start;
>  		}
> -		spin_unlock(&pa->pa_lock);
> +		spin_unlock(&tmp_pa->pa_lock);
>  	}
>  	rcu_read_unlock();
>  	size = end - start;
>  
>  	/* XXX: extra loop to check we really don't overlap preallocations */
>  	rcu_read_lock();
> -	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
> -		ext4_lblk_t pa_end;
> +	list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
> +		spin_lock(&tmp_pa->pa_lock);
> +		if (tmp_pa->pa_deleted == 0) {
> +			tmp_pa_start = tmp_pa->pa_lstart;
> +			tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
>  
> -		spin_lock(&pa->pa_lock);
> -		if (pa->pa_deleted == 0) {
> -			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
> -							  pa->pa_len);
> -			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
> +			BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start));
>  		}
> -		spin_unlock(&pa->pa_lock);
> +		spin_unlock(&tmp_pa->pa_lock);
>  	}
>  	rcu_read_unlock();
>  
> @@ -4362,7 +4360,8 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
>  	int order, i;
>  	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
>  	struct ext4_locality_group *lg;
> -	struct ext4_prealloc_space *pa, *cpa = NULL;
> +	struct ext4_prealloc_space *tmp_pa, *cpa = NULL;
> +	ext4_lblk_t tmp_pa_start, tmp_pa_end;
>  	ext4_fsblk_t goal_block;
>  
>  	/* only data can be preallocated */
> @@ -4371,18 +4370,20 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
>  
>  	/* first, try per-file preallocation */
>  	rcu_read_lock();
> -	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
> +	list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
>  
>  		/* all fields in this condition don't change,
>  		 * so we can skip locking for them */
> -		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
> -		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
> -					       EXT4_C2B(sbi, pa->pa_len)))
> +		tmp_pa_start = tmp_pa->pa_lstart;
> +		tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
> +
> +		if (ac->ac_o_ex.fe_logical < tmp_pa_start ||
> +		    ac->ac_o_ex.fe_logical >= tmp_pa_end)
>  			continue;
>  
>  		/* non-extent files can't have physical blocks past 2^32 */
>  		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
> -		    (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
> +		    (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
>  		     EXT4_MAX_BLOCK_FILE_PHYS)) {
>  			/*
>  			 * Since PAs don't overlap, we won't find any
> @@ -4392,16 +4393,16 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
>  		}
>  
>  		/* found preallocated blocks, use them */
> -		spin_lock(&pa->pa_lock);
> -		if (pa->pa_deleted == 0 && pa->pa_free) {
> -			atomic_inc(&pa->pa_count);
> -			ext4_mb_use_inode_pa(ac, pa);
> -			spin_unlock(&pa->pa_lock);
> +		spin_lock(&tmp_pa->pa_lock);
> +		if (tmp_pa->pa_deleted == 0 && tmp_pa->pa_free) {
> +			atomic_inc(&tmp_pa->pa_count);
> +			ext4_mb_use_inode_pa(ac, tmp_pa);
> +			spin_unlock(&tmp_pa->pa_lock);
>  			ac->ac_criteria = 10;
>  			rcu_read_unlock();
>  			return true;
>  		}
> -		spin_unlock(&pa->pa_lock);
> +		spin_unlock(&tmp_pa->pa_lock);
>  	}
>  	rcu_read_unlock();
>  
> @@ -4425,16 +4426,16 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
>  	 */
>  	for (i = order; i < PREALLOC_TB_SIZE; i++) {
>  		rcu_read_lock();
> -		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
> +		list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
>  					pa_inode_list) {
> -			spin_lock(&pa->pa_lock);
> -			if (pa->pa_deleted == 0 &&
> -					pa->pa_free >= ac->ac_o_ex.fe_len) {
> +			spin_lock(&tmp_pa->pa_lock);
> +			if (tmp_pa->pa_deleted == 0 &&
> +					tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
>  
>  				cpa = ext4_mb_check_group_pa(goal_block,
> -								pa, cpa);
> +								tmp_pa, cpa);
>  			}
> -			spin_unlock(&pa->pa_lock);
> +			spin_unlock(&tmp_pa->pa_lock);
>  		}
>  		rcu_read_unlock();
>  	}
> -- 
> 2.31.1
> 
-- 
Jan Kara <jack@...e.com>
SUSE Labs, CR

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ