[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20250828120453.748058-1-max.kellermann@ionos.com>
Date: Thu, 28 Aug 2025 14:04:53 +0200
From: Max Kellermann <max.kellermann@...os.com>
To: Slava.Dubeyko@....com,
xiubli@...hat.com,
idryomov@...il.com,
amarkuze@...hat.com,
ceph-devel@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: Max Kellermann <max.kellermann@...os.com>
Subject: [PATCH] fs/ceph/addr: convert `op_idx`, `data_pages` back to a local variables
These were local variables until commit f08068df4aa4 ("ceph: extend
ceph_writeback_ctl for ceph_writepages_start() refactoring"), but were
moved to the struct ceph_writeback_ctl for no obvious reason. Having
these in a struct means overhead, so let's move them back.
For the "allocate new pages array for next request" code block,
however I decided to introduce a new local variable `old_pages`
instead, because reusing `data_pages` for reallocation seemed
confusing to me.
Signed-off-by: Max Kellermann <max.kellermann@...os.com>
---
fs/ceph/addr.c | 39 ++++++++++++++++++++-------------------
1 file changed, 20 insertions(+), 19 deletions(-)
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 8b202d789e93..fc3192c79072 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -611,7 +611,6 @@ struct ceph_writeback_ctl
bool from_pool;
struct page **pages;
- struct page **data_pages;
};
/*
@@ -1051,7 +1050,6 @@ void ceph_init_writeback_ctl(struct address_space *mapping,
ceph_wbc->tag = PAGECACHE_TAG_DIRTY;
}
- ceph_wbc->op_idx = -1;
ceph_wbc->num_ops = 0;
ceph_wbc->offset = 0;
ceph_wbc->len = 0;
@@ -1060,7 +1058,6 @@ void ceph_init_writeback_ctl(struct address_space *mapping,
ceph_folio_batch_init(ceph_wbc);
ceph_wbc->pages = NULL;
- ceph_wbc->data_pages = NULL;
}
static inline
@@ -1417,10 +1414,12 @@ int ceph_submit_write(struct address_space *mapping,
struct ceph_vino vino = ceph_vino(inode);
struct ceph_osd_request *req = NULL;
struct page *page = NULL;
+ struct page **data_pages;
bool caching = ceph_is_cache_enabled(inode);
u64 offset;
u64 len;
unsigned i;
+ unsigned op_idx;
new_request:
offset = ceph_fscrypt_page_offset(ceph_wbc->pages[0]);
@@ -1481,8 +1480,8 @@ int ceph_submit_write(struct address_space *mapping,
/* Format the osd request message and submit the write */
len = 0;
- ceph_wbc->data_pages = ceph_wbc->pages;
- ceph_wbc->op_idx = 0;
+ data_pages = ceph_wbc->pages;
+ op_idx = 0;
for (i = 0; i < ceph_wbc->locked_pages; i++) {
u64 cur_offset;
@@ -1495,29 +1494,29 @@ int ceph_submit_write(struct address_space *mapping,
*/
if (offset + len != cur_offset) {
/* If it's full, stop here */
- if (ceph_wbc->op_idx + 1 == req->r_num_ops)
+ if (op_idx + 1 == req->r_num_ops)
break;
/* Kick off an fscache write with what we have so far. */
ceph_fscache_write_to_cache(inode, offset, len, caching);
/* Start a new extent */
- osd_req_op_extent_dup_last(req, ceph_wbc->op_idx,
+ osd_req_op_extent_dup_last(req, op_idx,
cur_offset - offset);
doutc(cl, "got pages at %llu~%llu\n", offset, len);
- osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx,
- ceph_wbc->data_pages,
+ osd_req_op_extent_osd_data_pages(req, op_idx,
+ data_pages,
len, 0,
ceph_wbc->from_pool,
false);
- osd_req_op_extent_update(req, ceph_wbc->op_idx, len);
+ osd_req_op_extent_update(req, op_idx, len);
len = 0;
offset = cur_offset;
- ceph_wbc->data_pages = ceph_wbc->pages + i;
- ceph_wbc->op_idx++;
+ data_pages = ceph_wbc->pages + i;
+ op_idx++;
}
set_page_writeback(page);
@@ -1555,25 +1554,27 @@ int ceph_submit_write(struct address_space *mapping,
offset, len);
}
- osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx,
- ceph_wbc->data_pages, len,
+ osd_req_op_extent_osd_data_pages(req, op_idx,
+ data_pages, len,
0, ceph_wbc->from_pool, false);
- osd_req_op_extent_update(req, ceph_wbc->op_idx, len);
+ osd_req_op_extent_update(req, op_idx, len);
- BUG_ON(ceph_wbc->op_idx + 1 != req->r_num_ops);
+ BUG_ON(op_idx + 1 != req->r_num_ops);
ceph_wbc->from_pool = false;
if (i < ceph_wbc->locked_pages) {
+ struct page **old_pages;
+
BUG_ON(ceph_wbc->num_ops <= req->r_num_ops);
ceph_wbc->num_ops -= req->r_num_ops;
ceph_wbc->locked_pages -= i;
/* allocate new pages array for next request */
- ceph_wbc->data_pages = ceph_wbc->pages;
+ old_pages = ceph_wbc->pages;
__ceph_allocate_page_array(ceph_wbc, ceph_wbc->locked_pages);
- memcpy(ceph_wbc->pages, ceph_wbc->data_pages + i,
+ memcpy(ceph_wbc->pages, old_pages + i,
ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages));
- memset(ceph_wbc->data_pages + i, 0,
+ memset(old_pages + i, 0,
ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages));
} else {
BUG_ON(ceph_wbc->num_ops != req->r_num_ops);
--
2.47.2
Powered by blists - more mailing lists