[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221231150919.659533-3-agruenba@redhat.com>
Date: Sat, 31 Dec 2022 16:09:12 +0100
From: Andreas Gruenbacher <agruenba@...hat.com>
To: Christoph Hellwig <hch@...radead.org>,
"Darrick J . Wong" <djwong@...nel.org>,
Alexander Viro <viro@...iv.linux.org.uk>,
Matthew Wilcox <willy@...radead.org>
Cc: Andreas Gruenbacher <agruenba@...hat.com>,
linux-xfs@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-ext4@...r.kernel.org, cluster-devel@...hat.com
Subject: [PATCH v5 2/9] iomap/gfs2: Unlock and put folio in page_done handler
When an iomap defines a ->page_done() handler in its page_ops, delegate
unlocking the folio and putting the folio reference to that handler.
This allows to fix a race between journaled data writes and folio
writeback in gfs2: before this change, gfs2_iomap_page_done() was called
after unlocking the folio, so writeback could start writing back the
folio's buffers before they could be marked for writing to the journal.
Also, try_to_free_buffers() could free the buffers before
gfs2_iomap_page_done() was done adding the buffers to the current
current transaction. With this change, gfs2_iomap_page_done() adds the
buffers to the current transaction while the folio is still locked, so
the problems described above can no longer occur.
The only current user of ->page_done() is gfs2, so other filesystems are
not affected. To catch out any out-of-tree users, switch from a page to
a folio in ->page_done().
Signed-off-by: Andreas Gruenbacher <agruenba@...hat.com>
---
fs/gfs2/bmap.c | 15 ++++++++++++---
fs/iomap/buffered-io.c | 8 ++++----
include/linux/iomap.h | 7 ++++---
3 files changed, 20 insertions(+), 10 deletions(-)
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index e7537fd305dd..46206286ad42 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -968,14 +968,23 @@ static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
}
static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
- unsigned copied, struct page *page)
+ unsigned copied, struct folio *folio)
{
struct gfs2_trans *tr = current->journal_info;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- if (page && !gfs2_is_stuffed(ip))
- gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
+ if (!folio) {
+ gfs2_trans_end(sdp);
+ return;
+ }
+
+ if (!gfs2_is_stuffed(ip))
+ gfs2_page_add_databufs(ip, &folio->page, offset_in_page(pos),
+ copied);
+
+ folio_unlock(folio);
+ folio_put(folio);
if (tr->tr_num_buf_new)
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index c30d150a9303..e13d5694e299 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -580,12 +580,12 @@ static void iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
{
const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
- if (folio)
+ if (page_ops && page_ops->page_done) {
+ page_ops->page_done(iter->inode, pos, ret, folio);
+ } else if (folio) {
folio_unlock(folio);
- if (page_ops && page_ops->page_done)
- page_ops->page_done(iter->inode, pos, ret, &folio->page);
- if (folio)
folio_put(folio);
+ }
}
static int iomap_write_begin_inline(const struct iomap_iter *iter,
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 0983dfc9a203..743e2a909162 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -131,13 +131,14 @@ static inline bool iomap_inline_data_valid(const struct iomap *iomap)
* associated with them.
*
* When page_prepare succeeds, page_done will always be called to do any
- * cleanup work necessary. In that page_done call, @page will be NULL if the
- * associated page could not be obtained.
+ * cleanup work necessary. In that page_done call, @folio will be NULL if the
+ * associated folio could not be obtained. When folio is not NULL, page_done
+ * is responsible for unlocking and putting the folio.
*/
struct iomap_page_ops {
int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len);
void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
- struct page *page);
+ struct folio *folio);
/*
* Check that the cached iomap still maps correctly to the filesystem's
--
2.38.1
Powered by blists - more mailing lists