[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220617120538.18091-4-fmdefrancesco@gmail.com>
Date: Fri, 17 Jun 2022 14:05:38 +0200
From: "Fabio M. De Francesco" <fmdefrancesco@...il.com>
To: Chris Mason <clm@...com>, Josef Bacik <josef@...icpanda.com>,
David Sterba <dsterba@...e.com>,
Nick Terrell <terrelln@...com>,
Chris Down <chris@...isdown.name>,
Filipe Manana <fdmanana@...e.com>, Qu Wenruo <wqu@...e.com>,
Nikolay Borisov <nborisov@...e.com>,
Gabriel Niebler <gniebler@...e.com>,
Ira Weiny <ira.weiny@...el.com>, linux-btrfs@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: "Fabio M. De Francesco" <fmdefrancesco@...il.com>
Subject: [RFC PATCH v2 3/3] btrfs: Use kmap_local_page() on "in_page" in zlib_compress_pages()
The use of kmap() is being deprecated in favor of kmap_local_page(). With
kmap_local_page(), the mapping is per thread, CPU local and not globally
visible.
Therefore, use kmap_local_page() / kunmap_local() on "in_page" in
zlib_compress_pages() because in this function the mappings are per thread
and are not visible in other contexts.
Use an array based stack in order to take note of the order of mappings
and un-mappings to comply to the rules of nesting local mappings.
Tested with xfstests on QEMU + KVM 32 bits VM with 4GB of RAM and
HIGHMEM64G enabled. This patch passes 26/26 tests of group "compress".
Suggested-by: Ira Weiny <ira.weiny@...el.com>
Signed-off-by: Fabio M. De Francesco <fmdefrancesco@...il.com>
---
fs/btrfs/zlib.c | 65 ++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 53 insertions(+), 12 deletions(-)
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index c7c69ce4a1a9..1f16014e8ff3 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -99,6 +99,8 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
int ret;
char *data_in = NULL;
char *cpage_out = NULL;
+ char mstack[2];
+ int sind = 0;
int nr_pages = 0;
struct page *in_page = NULL;
struct page *out_page = NULL;
@@ -126,6 +128,8 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -ENOMEM;
goto out;
}
+ mstack[sind] = 'A';
+ sind++;
cpage_out = kmap_local_page(out_page);
pages[0] = out_page;
nr_pages = 1;
@@ -148,26 +152,32 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
int i;
for (i = 0; i < in_buf_pages; i++) {
- if (in_page) {
- kunmap(in_page);
+ if (data_in) {
+ sind--;
+ kunmap_local(data_in);
put_page(in_page);
}
in_page = find_get_page(mapping,
start >> PAGE_SHIFT);
- data_in = kmap(in_page);
+ mstack[sind] = 'B';
+ sind++;
+ data_in = kmap_local_page(in_page);
memcpy(workspace->buf + i * PAGE_SIZE,
data_in, PAGE_SIZE);
start += PAGE_SIZE;
}
workspace->strm.next_in = workspace->buf;
} else {
- if (in_page) {
- kunmap(in_page);
+ if (data_in) {
+ sind--;
+ kunmap_local(data_in);
put_page(in_page);
}
in_page = find_get_page(mapping,
start >> PAGE_SHIFT);
- data_in = kmap(in_page);
+ mstack[sind] = 'B';
+ sind++;
+ data_in = kmap_local_page(in_page);
start += PAGE_SIZE;
workspace->strm.next_in = data_in;
}
@@ -196,23 +206,39 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
* the stream end if required
*/
if (workspace->strm.avail_out == 0) {
+ sind--;
+ kunmap_local(data_in);
+ data_in = NULL;
+
+ sind--;
kunmap_local(cpage_out);
cpage_out = NULL;
+
if (nr_pages == nr_dest_pages) {
out_page = NULL;
+ put_page(in_page);
ret = -E2BIG;
goto out;
}
+
out_page = alloc_page(GFP_NOFS);
if (out_page == NULL) {
+ put_page(in_page);
ret = -ENOMEM;
goto out;
}
+
+ mstack[sind] = 'A';
+ sind++;
cpage_out = kmap_local_page(out_page);
pages[nr_pages] = out_page;
nr_pages++;
workspace->strm.avail_out = PAGE_SIZE;
workspace->strm.next_out = cpage_out;
+
+ mstack[sind] = 'B';
+ sind++;
+ data_in = kmap_local_page(in_page);
}
/* we're all done */
if (workspace->strm.total_in >= len)
@@ -235,10 +261,16 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
} else if (workspace->strm.avail_out == 0) {
/* get another page for the stream end */
+ sind--;
+ kunmap_local(data_in);
+ data_in = NULL;
+
+ sind--;
kunmap_local(cpage_out);
cpage_out = NULL;
if (nr_pages == nr_dest_pages) {
out_page = NULL;
+ put_page(in_page);
ret = -E2BIG;
goto out;
}
@@ -247,11 +279,18 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -ENOMEM;
goto out;
}
+
+ mstack[sind] = 'A';
+ sind++;
cpage_out = kmap_local_page(out_page);
pages[nr_pages] = out_page;
nr_pages++;
workspace->strm.avail_out = PAGE_SIZE;
workspace->strm.next_out = cpage_out;
+
+ mstack[sind] = 'B';
+ sind++;
+ data_in = kmap_local_page(in_page);
}
}
zlib_deflateEnd(&workspace->strm);
@@ -266,13 +305,15 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_in = workspace->strm.total_in;
out:
*out_pages = nr_pages;
- if (cpage_out)
- kunmap_local(cpage_out);
-
- if (in_page) {
- kunmap(in_page);
- put_page(in_page);
+ while (--sind >= 0) {
+ if (mstack[sind] == 'B') {
+ kunmap_local(data_in);
+ put_page(in_page);
+ } else {
+ kunmap_local(cpage_out);
+ }
}
+
return ret;
}
--
2.36.1
Powered by blists - more mailing lists