[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260126023055.405401-4-CFSworks@gmail.com>
Date: Sun, 25 Jan 2026 18:30:54 -0800
From: Sam Edwards <cfsworks@...il.com>
To: Xiubo Li <xiubli@...hat.com>,
Ilya Dryomov <idryomov@...il.com>
Cc: Viacheslav Dubeyko <Slava.Dubeyko@....com>,
Christian Brauner <brauner@...nel.org>,
Milind Changire <mchangir@...hat.com>,
Jeff Layton <jlayton@...nel.org>,
ceph-devel@...r.kernel.org,
linux-kernel@...r.kernel.org,
Sam Edwards <CFSworks@...il.com>
Subject: [PATCH v3 3/4] ceph: remove error return from ceph_process_folio_batch()
Following an earlier commit, ceph_process_folio_batch() no longer
returns errors because the writeback loop cannot handle them.
Since this function already indicates failure to lock any pages by
leaving `ceph_wbc.locked_pages == 0`, and the writeback loop has no way
to handle abandonment of a locked batch, change the return type of
ceph_process_folio_batch() to `void` and remove the pathological goto in
the writeback loop. The lack of a return code emphasizes that
ceph_process_folio_batch() is designed to be abort-free: that is, once
it commits a folio for writeback, it will not later abandon it or
propagate an error for that folio. Any future changes requiring "abort"
logic should follow this invariant by cleaning up its array and
resetting ceph_wbc.locked_pages appropriately.
Signed-off-by: Sam Edwards <CFSworks@...il.com>
---
fs/ceph/addr.c | 17 +++++------------
1 file changed, 5 insertions(+), 12 deletions(-)
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 39064893f35b..cdf11288d6b7 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1284,16 +1284,16 @@ static inline int move_dirty_folio_in_page_array(struct address_space *mapping,
}
static
-int ceph_process_folio_batch(struct address_space *mapping,
- struct writeback_control *wbc,
- struct ceph_writeback_ctl *ceph_wbc)
+void ceph_process_folio_batch(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
{
struct inode *inode = mapping->host;
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct folio *folio = NULL;
unsigned i;
- int rc = 0;
+ int rc;
for (i = 0; can_next_page_be_processed(ceph_wbc, i); i++) {
folio = ceph_wbc->fbatch.folios[i];
@@ -1323,12 +1323,10 @@ int ceph_process_folio_batch(struct address_space *mapping,
rc = ceph_check_page_before_write(mapping, wbc,
ceph_wbc, folio);
if (rc == -ENODATA) {
- rc = 0;
folio_unlock(folio);
ceph_wbc->fbatch.folios[i] = NULL;
continue;
} else if (rc == -E2BIG) {
- rc = 0;
folio_unlock(folio);
ceph_wbc->fbatch.folios[i] = NULL;
break;
@@ -1370,7 +1368,6 @@ int ceph_process_folio_batch(struct address_space *mapping,
rc = move_dirty_folio_in_page_array(mapping, wbc, ceph_wbc,
folio);
if (rc) {
- rc = 0;
folio_redirty_for_writepage(wbc, folio);
folio_unlock(folio);
break;
@@ -1381,8 +1378,6 @@ int ceph_process_folio_batch(struct address_space *mapping,
}
ceph_wbc->processed_in_fbatch = i;
-
- return rc;
}
static inline
@@ -1686,10 +1681,8 @@ static int ceph_writepages_start(struct address_space *mapping,
break;
process_folio_batch:
- rc = ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
+ ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
ceph_shift_unused_folios_left(&ceph_wbc.fbatch);
- if (rc)
- goto release_folios;
/* did we get anything? */
if (!ceph_wbc.locked_pages)
--
2.52.0
Powered by blists - more mailing lists