lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8cbfc1ff-f86d-f2cc-d37e-ef874f4600bc@gmx.com>
Date:   Fri, 17 Jun 2022 21:09:47 +0800
From:   Qu Wenruo <quwenruo.btrfs@....com>
To:     "Fabio M. De Francesco" <fmdefrancesco@...il.com>,
        Chris Mason <clm@...com>, Josef Bacik <josef@...icpanda.com>,
        David Sterba <dsterba@...e.com>,
        Nick Terrell <terrelln@...com>,
        Chris Down <chris@...isdown.name>,
        Filipe Manana <fdmanana@...e.com>, Qu Wenruo <wqu@...e.com>,
        Nikolay Borisov <nborisov@...e.com>,
        Gabriel Niebler <gniebler@...e.com>,
        Ira Weiny <ira.weiny@...el.com>, linux-btrfs@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: Re: [RFC PATCH v2 3/3] btrfs: Use kmap_local_page() on "in_page" in
 zlib_compress_pages()



On 2022/6/17 20:05, Fabio M. De Francesco wrote:
> The use of kmap() is being deprecated in favor of kmap_local_page(). With
> kmap_local_page(), the mapping is per thread, CPU local and not globally
> visible.
>
> Therefore, use kmap_local_page() / kunmap_local() on "in_page" in
> zlib_compress_pages() because in this function the mappings are per thread
> and are not visible in other contexts.
>
> Use an array based stack in order to take note of the order of mappings
> and un-mappings to comply to the rules of nesting local mappings.

Any extra comment on the "rules of nesting local mappings" part?

>
> Tested with xfstests on QEMU + KVM 32 bits VM with 4GB of RAM and
> HIGHMEM64G enabled. This patch passes 26/26 tests of group "compress".
>
> Suggested-by: Ira Weiny <ira.weiny@...el.com>
> Signed-off-by: Fabio M. De Francesco <fmdefrancesco@...il.com>
> ---
>   fs/btrfs/zlib.c | 65 ++++++++++++++++++++++++++++++++++++++++---------
>   1 file changed, 53 insertions(+), 12 deletions(-)
>
> diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
> index c7c69ce4a1a9..1f16014e8ff3 100644
> --- a/fs/btrfs/zlib.c
> +++ b/fs/btrfs/zlib.c
> @@ -99,6 +99,8 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
>   	int ret;
>   	char *data_in = NULL;
>   	char *cpage_out = NULL;
> +	char mstack[2];
> +	int sind = 0;
>   	int nr_pages = 0;
>   	struct page *in_page = NULL;
>   	struct page *out_page = NULL;
> @@ -126,6 +128,8 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
>   		ret = -ENOMEM;
>   		goto out;
>   	}
> +	mstack[sind] = 'A';
> +	sind++;
>   	cpage_out = kmap_local_page(out_page);
>   	pages[0] = out_page;
>   	nr_pages = 1;
> @@ -148,26 +152,32 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
>   				int i;
>
>   				for (i = 0; i < in_buf_pages; i++) {
> -					if (in_page) {
> -						kunmap(in_page);

I don't think we really need to keep @in_page mapped for that long.

We only need the input pages (pages from inode page cache) when we run
out of input.

So what we really need is just to map the input, copy the data to
buffer, unmap the page.

> +					if (data_in) {
> +						sind--;
> +						kunmap_local(data_in);
>   						put_page(in_page);
>   					}
>   					in_page = find_get_page(mapping,
>   								start >> PAGE_SHIFT);
> -					data_in = kmap(in_page);
> +					mstack[sind] = 'B';
> +					sind++;
> +					data_in = kmap_local_page(in_page);
>   					memcpy(workspace->buf + i * PAGE_SIZE,
>   					       data_in, PAGE_SIZE);
>   					start += PAGE_SIZE;
>   				}
>   				workspace->strm.next_in = workspace->buf;
>   			} else {

I think we can clean up the code.

In fact the for loop can handle both case, I didn't see any special
reason to do different handling, we can always use workspace->buf,
instead of manually dancing using different paths.

I believe with all these cleanup, it should be much simpler to convert
to kmap_local_page().

I'm pretty happy to provide help on this refactor if you don't feel
confident enough on this part of btrfs.

Thanks,
Qu

> -				if (in_page) {
> -					kunmap(in_page);
> +				if (data_in) {
> +					sind--;
> +					kunmap_local(data_in);
>   					put_page(in_page);
>   				}
>   				in_page = find_get_page(mapping,
>   							start >> PAGE_SHIFT);
> -				data_in = kmap(in_page);
> +				mstack[sind] = 'B';
> +				sind++;
> +				data_in = kmap_local_page(in_page);
>   				start += PAGE_SIZE;
>   				workspace->strm.next_in = data_in;
>   			}
> @@ -196,23 +206,39 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
>   		 * the stream end if required
>   		 */
>   		if (workspace->strm.avail_out == 0) {
> +			sind--;
> +			kunmap_local(data_in);
> +			data_in = NULL;
> +
> +			sind--;
>   			kunmap_local(cpage_out);
>   			cpage_out = NULL;
> +
>   			if (nr_pages == nr_dest_pages) {
>   				out_page = NULL;
> +				put_page(in_page);
>   				ret = -E2BIG;
>   				goto out;
>   			}
> +
>   			out_page = alloc_page(GFP_NOFS);
>   			if (out_page == NULL) {
> +				put_page(in_page);
>   				ret = -ENOMEM;
>   				goto out;
>   			}
> +
> +			mstack[sind] = 'A';
> +			sind++;
>   			cpage_out = kmap_local_page(out_page);
>   			pages[nr_pages] = out_page;
>   			nr_pages++;
>   			workspace->strm.avail_out = PAGE_SIZE;
>   			workspace->strm.next_out = cpage_out;
> +
> +			mstack[sind] = 'B';
> +			sind++;
> +			data_in = kmap_local_page(in_page);
>   		}
>   		/* we're all done */
>   		if (workspace->strm.total_in >= len)
> @@ -235,10 +261,16 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
>   			goto out;
>   		} else if (workspace->strm.avail_out == 0) {
>   			/* get another page for the stream end */
> +			sind--;
> +			kunmap_local(data_in);
> +			data_in = NULL;
> +
> +			sind--;
>   			kunmap_local(cpage_out);
>   			cpage_out = NULL;
>   			if (nr_pages == nr_dest_pages) {
>   				out_page = NULL;
> +				put_page(in_page);
>   				ret = -E2BIG;
>   				goto out;
>   			}
> @@ -247,11 +279,18 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
>   				ret = -ENOMEM;
>   				goto out;
>   			}
> +
> +			mstack[sind] = 'A';
> +			sind++;
>   			cpage_out = kmap_local_page(out_page);
>   			pages[nr_pages] = out_page;
>   			nr_pages++;
>   			workspace->strm.avail_out = PAGE_SIZE;
>   			workspace->strm.next_out = cpage_out;
> +
> +			mstack[sind] = 'B';
> +			sind++;
> +			data_in = kmap_local_page(in_page);
>   		}
>   	}
>   	zlib_deflateEnd(&workspace->strm);
> @@ -266,13 +305,15 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
>   	*total_in = workspace->strm.total_in;
>   out:
>   	*out_pages = nr_pages;
> -	if (cpage_out)
> -		kunmap_local(cpage_out);
> -
> -	if (in_page) {
> -		kunmap(in_page);
> -		put_page(in_page);
> +	while (--sind >= 0) {
> +		if (mstack[sind] == 'B') {
> +			kunmap_local(data_in);
> +			put_page(in_page);
> +		} else {
> +			kunmap_local(cpage_out);
> +		}
>   	}
> +
>   	return ret;
>   }
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ