[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200610201345.13273-20-willy@infradead.org>
Date: Wed, 10 Jun 2020 13:13:13 -0700
From: Matthew Wilcox <willy@...radead.org>
To: linux-fsdevel@...r.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: [PATCH v6 19/51] mm: Zero the head page, not the tail page
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Pass the head page to zero_user_segment(), not the tail page, and adjust
the byte offsets appropriately.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
mm/shmem.c | 7 +++++++
mm/truncate.c | 7 +++++++
2 files changed, 14 insertions(+)
diff --git a/mm/shmem.c b/mm/shmem.c
index a05d129a45e9..55405d811cfd 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -898,11 +898,18 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
struct page *page = NULL;
shmem_getpage(inode, start - 1, &page, SGP_READ);
if (page) {
+ struct page *head = thp_head(page);
unsigned int top = PAGE_SIZE;
if (start > end) {
top = partial_end;
partial_end = 0;
}
+ if (head != page) {
+ unsigned int diff = start - 1 - head->index;
+ partial_start += diff << PAGE_SHIFT;
+ top += diff << PAGE_SHIFT;
+ page = head;
+ }
zero_user_segment(page, partial_start, top);
set_page_dirty(page);
unlock_page(page);
diff --git a/mm/truncate.c b/mm/truncate.c
index dd9ebc1da356..152974888124 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -374,12 +374,19 @@ void truncate_inode_pages_range(struct address_space *mapping,
if (partial_start) {
struct page *page = find_lock_page(mapping, start - 1);
if (page) {
+ struct page *head = thp_head(page);
unsigned int top = PAGE_SIZE;
if (start > end) {
/* Truncation within a single page */
top = partial_end;
partial_end = 0;
}
+ if (head != page) {
+ unsigned int diff = start - 1 - head->index;
+ partial_start += diff << PAGE_SHIFT;
+ top += diff << PAGE_SHIFT;
+ page = head;
+ }
wait_on_page_writeback(page);
zero_user_segment(page, partial_start, top);
cleancache_invalidate_page(mapping, page);
--
2.26.2
Powered by blists - more mailing lists