[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221020135545.586-1-nicememory@gmail.com>
Date: Thu, 20 Oct 2022 21:55:45 +0800
From: Jintao Yin <nicememory@...il.com>
To: phillip@...ashfs.org.uk
Cc: bagasdotme@...il.com, hsinyi@...omium.org,
linux-kernel@...r.kernel.org, marcmiltenberger@...il.com,
mirsad.todorovac@....unizg.hr, nicememory@...il.com,
regressions@...mhuis.info, regressions@...ts.linux.dev,
srw@...dewatkins.net
Subject: Re: BISECT result: 6.0.0-RC kernels trigger Firefox snap bug with 6.0.0-rc3 through 6.0.0-rc7
Hi all,
After review the details of page actor, the tail bytes may be written to
a temp buffer instead the last used page. So before diff would wrongly
memzero a page which is not the tail bytes in.
In this diff fixes it by caculation of the real index the trailing bytes
in and check if the last used page matches this index. If the page is
the real tail bytes in, then memzero the trailing bypte of the page.
Please help test and any feedbacks are welcome.
Thanks,
Jintao
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index e56510964b22..e1fafd10a850 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -506,8 +506,9 @@ static int squashfs_readahead_fragment(struct page **page,
squashfs_i(inode)->fragment_size);
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
+ int res = buffer->error;
- if (buffer->error)
+ if (res)
goto out;
expected += squashfs_i(inode)->fragment_offset;
@@ -529,7 +530,7 @@ static int squashfs_readahead_fragment(struct page **page,
out:
squashfs_cache_put(buffer);
- return buffer->error;
+ return res;
}
static void squashfs_readahead(struct readahead_control *ractl)
@@ -557,6 +558,7 @@ static void squashfs_readahead(struct readahead_control *ractl)
int res, bsize;
u64 block = 0;
unsigned int expected;
+ int nr_used_pages;
nr_pages = __readahead_batch(ractl, pages, max_pages);
if (!nr_pages)
@@ -593,18 +595,21 @@ static void squashfs_readahead(struct readahead_control *ractl)
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
- squashfs_page_actor_free(actor);
+ nr_used_pages = squashfs_page_actor_free(actor);
if (res == expected) {
int bytes;
+ pgoff_t bytes_index;
/* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
- if (pages[nr_pages - 1]->index == file_end && bytes)
- memzero_page(pages[nr_pages - 1], bytes,
+ bytes_index = (index << shift) + ((res - bytes) >> PAGE_SHIFT);
+ if (bytes && nr_used_pages > 0 &&
+ pages[nr_used_pages - 1]->index == bytes_index)
+ memzero_page(pages[nr_used_pages - 1], bytes,
PAGE_SIZE - bytes);
- for (i = 0; i < nr_pages; i++) {
+ for (i = 0; i < nr_used_pages; i++) {
flush_dcache_page(pages[i]);
SetPageUptodate(pages[i]);
}
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index f1ccad519e28..ee462ef380bf 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -26,14 +26,14 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
struct inode *inode = target_page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
- int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
- int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
- int start_index = target_page->index & ~mask;
- int end_index = start_index | mask;
- int i, n, pages, bytes, res = -ENOMEM;
+ pgoff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+ pgoff_t mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
+ pgoff_t start_index = target_page->index & ~mask;
+ pgoff_t end_index = start_index | mask;
+ int i, pages, used_pages, bytes, res = -ENOMEM;
+ pgoff_t n, bytes_index;
struct page **page;
struct squashfs_page_actor *actor;
- void *pageaddr;
if (end_index > file_end)
end_index = file_end;
@@ -74,7 +74,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
/* Decompress directly into the page cache buffers */
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
- squashfs_page_actor_free(actor);
+ used_pages = squashfs_page_actor_free(actor);
if (res < 0)
goto mark_errored;
@@ -86,16 +86,19 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
/* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
- if (page[pages - 1]->index == end_index && bytes) {
- pageaddr = kmap_local_page(page[pages - 1]);
- memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
- kunmap_local(pageaddr);
+ bytes_index = start_index + ((res - bytes) >> PAGE_SHIFT);
+ if (used_pages > 0 && bytes &&
+ page[used_pages - 1]->index == bytes_index) {
+ memzero_page(page[used_pages - 1], bytes,
+ PAGE_SIZE - bytes);
}
/* Mark pages as uptodate, unlock and release */
for (i = 0; i < pages; i++) {
- flush_dcache_page(page[i]);
- SetPageUptodate(page[i]);
+ if (i < used_pages) {
+ flush_dcache_page(page[i]);
+ SetPageUptodate(page[i]);
+ }
unlock_page(page[i]);
if (page[i] != target_page)
put_page(page[i]);
@@ -112,8 +115,10 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
for (i = 0; i < pages; i++) {
if (page[i] == NULL || page[i] == target_page)
continue;
- flush_dcache_page(page[i]);
- SetPageError(page[i]);
+ if (i < used_pages) {
+ flush_dcache_page(page[i]);
+ SetPageError(page[i]);
+ }
unlock_page(page[i]);
put_page(page[i]);
}
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
index 95ffbb543d91..c2c5c3937ef9 100644
--- a/fs/squashfs/page_actor.h
+++ b/fs/squashfs/page_actor.h
@@ -29,10 +29,12 @@ extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
extern struct squashfs_page_actor *squashfs_page_actor_init_special(
struct squashfs_sb_info *msblk,
struct page **page, int pages, int length);
-static inline void squashfs_page_actor_free(struct squashfs_page_actor *actor)
+static inline int squashfs_page_actor_free(struct squashfs_page_actor *actor)
{
+ int res = actor->next_page;
kfree(actor->tmp_buffer);
kfree(actor);
+ return res;
}
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
{
Powered by blists - more mailing lists