lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y4+q+vYuqqM0RKOT@casper.infradead.org>
Date:   Tue, 6 Dec 2022 20:50:02 +0000
From:   Matthew Wilcox <willy@...radead.org>
To:     "Vishal Moola (Oracle)" <vishal.moola@...il.com>
Cc:     linux-mm@...ck.org, tytso@....edu, linux-ext4@...r.kernel.org,
        linux-kernel@...r.kernel.org, akpm@...ux-foundation.org
Subject: Re: [PATCH mm-unstable] ext4: Convert mext_page_double_lock() to
 mext_folio_double_lock()

On Tue, Dec 06, 2022 at 12:41:15PM -0800, Vishal Moola (Oracle) wrote:
> Converts mext_page_double_lock() to use folios. This change saves
> 146 bytes of kernel text and removes 3 calls to compound_head().

I think it actually removes more than three ...

>  	flags = memalloc_nofs_save();
> -	page[0] = grab_cache_page_write_begin(mapping[0], index1);
> -	if (!page[0]) {
> +	folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
> +			mapping_gfp_mask(mapping[0]));

one

> +	if (!folio[0]) {
>  		memalloc_nofs_restore(flags);
>  		return -ENOMEM;
>  	}
>  
> -	page[1] = grab_cache_page_write_begin(mapping[1], index2);
> +	folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
> +			mapping_gfp_mask(mapping[1]));

two

>  	memalloc_nofs_restore(flags);
> -	if (!page[1]) {
> -		unlock_page(page[0]);
> -		put_page(page[0]);
> +	if (!folio[1]) {
> +		folio_unlock(folio[0]);
> +		folio_put(folio[0]);

four

>  		return -ENOMEM;
>  	}
>  	/*
> -	 * grab_cache_page_write_begin() may not wait on page's writeback if
> +	 * __filemap_get_folio() may not wait on folio's writeback if
>  	 * BDI not demand that. But it is reasonable to be very conservative
> -	 * here and explicitly wait on page's writeback
> +	 * here and explicitly wait on folio's writeback
>  	 */
> -	wait_on_page_writeback(page[0]);
> -	wait_on_page_writeback(page[1]);
> +	folio_wait_writeback(folio[0]);
> +	folio_wait_writeback(folio[1]);

six

>  	if (inode1 > inode2)
> -		swap(page[0], page[1]);
> +		swap(folio[0], folio[1]);
>  
>  	return 0;
>  }
> @@ -252,7 +255,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
>  		     int block_len_in_page, int unwritten, int *err)
>  {
>  	struct inode *orig_inode = file_inode(o_filp);
> -	struct page *pagep[2] = {NULL, NULL};
>  	struct folio *folio[2] = {NULL, NULL};
>  	handle_t *handle;
>  	ext4_lblk_t orig_blk_offset, donor_blk_offset;
> @@ -303,8 +305,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
>  
>  	replaced_size = data_size;
>  
> -	*err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
> -				     donor_page_offset, pagep);
> +	*err = mext_folio_double_lock(orig_inode, donor_inode, orig_page_offset,
> +				     donor_page_offset, folio);
>  	if (unlikely(*err < 0))
>  		goto stop_journal;
>  	/*
> @@ -314,8 +316,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
>  	 * hold page's lock, if it is still the case data copy is not
>  	 * necessary, just swap data blocks between orig and donor.
>  	 */
> -	folio[0] = page_folio(pagep[0]);
> -	folio[1] = page_folio(pagep[1]);

eight.

Three are inline, which makes sense for the 146 bytes, but we're also
removing out of line calls as well as the inline calls.

Anyway, whether the description is updated or not, this looks good to me.

Reviewed-by: Matthew Wilcox (Oracle) <willy@...radead.org>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ