[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251231024316.4643-3-CFSworks@gmail.com>
Date: Tue, 30 Dec 2025 18:43:13 -0800
From: Sam Edwards <cfsworks@...il.com>
To: Xiubo Li <xiubli@...hat.com>,
Ilya Dryomov <idryomov@...il.com>
Cc: Viacheslav Dubeyko <Slava.Dubeyko@....com>,
Christian Brauner <brauner@...nel.org>,
Milind Changire <mchangir@...hat.com>,
Jeff Layton <jlayton@...nel.org>,
ceph-devel@...r.kernel.org,
linux-kernel@...r.kernel.org,
Sam Edwards <CFSworks@...il.com>
Subject: [PATCH 2/5] ceph: Remove error return from ceph_process_folio_batch()
Following the previous patch, ceph_process_folio_batch() no longer
returns errors because the writeback loop cannot handle them.
Since this function already indicates failure to lock any pages by
leaving `ceph_wbc.locked_pages == 0`, and the writeback loop has no way
to handle abandonment of a locked batch, change the return type of
ceph_process_folio_batch() to `void` and remove the pathological goto in
the writeback loop. The lack of a return code emphasizes that
ceph_process_folio_batch() is designed to be abort-free: that is, once
it commits a folio for writeback, it will not later abandon it or
propagate an error for that folio.
Signed-off-by: Sam Edwards <CFSworks@...il.com>
---
fs/ceph/addr.c | 17 +++++------------
1 file changed, 5 insertions(+), 12 deletions(-)
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 3462df35d245..2b722916fb9b 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1283,16 +1283,16 @@ static inline int move_dirty_folio_in_page_array(struct address_space *mapping,
}
static
-int ceph_process_folio_batch(struct address_space *mapping,
- struct writeback_control *wbc,
- struct ceph_writeback_ctl *ceph_wbc)
+void ceph_process_folio_batch(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
{
struct inode *inode = mapping->host;
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct folio *folio = NULL;
unsigned i;
- int rc = 0;
+ int rc;
for (i = 0; can_next_page_be_processed(ceph_wbc, i); i++) {
folio = ceph_wbc->fbatch.folios[i];
@@ -1322,12 +1322,10 @@ int ceph_process_folio_batch(struct address_space *mapping,
rc = ceph_check_page_before_write(mapping, wbc,
ceph_wbc, folio);
if (rc == -ENODATA) {
- rc = 0;
folio_unlock(folio);
ceph_wbc->fbatch.folios[i] = NULL;
continue;
} else if (rc == -E2BIG) {
- rc = 0;
folio_unlock(folio);
ceph_wbc->fbatch.folios[i] = NULL;
break;
@@ -1369,7 +1367,6 @@ int ceph_process_folio_batch(struct address_space *mapping,
rc = move_dirty_folio_in_page_array(mapping, wbc, ceph_wbc,
folio);
if (rc) {
- rc = 0;
folio_redirty_for_writepage(wbc, folio);
folio_unlock(folio);
break;
@@ -1380,8 +1377,6 @@ int ceph_process_folio_batch(struct address_space *mapping,
}
ceph_wbc->processed_in_fbatch = i;
-
- return rc;
}
static inline
@@ -1685,10 +1680,8 @@ static int ceph_writepages_start(struct address_space *mapping,
break;
process_folio_batch:
- rc = ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
+ ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
ceph_shift_unused_folios_left(&ceph_wbc.fbatch);
- if (rc)
- goto release_folios;
/* did we get anything? */
if (!ceph_wbc.locked_pages)
--
2.51.2
Powered by blists - more mailing lists