[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230414110821.21548-4-p.raghav@samsung.com>
Date: Fri, 14 Apr 2023 13:08:20 +0200
From: Pankaj Raghav <p.raghav@...sung.com>
To: brauner@...nel.org, willy@...radead.org, viro@...iv.linux.org.uk,
akpm@...ux-foundation.org
Cc: linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
mcgrof@...nel.org, gost.dev@...sung.com, hare@...e.de,
Pankaj Raghav <p.raghav@...sung.com>
Subject: [RFC 3/4] fs/buffer: add folio_create_empty_buffers helper
Folio version of create_empty_buffers(). This is required to convert
create_page_buffers() to create_folio_buffers() later in the series.
It removes several calls to compound_head() as it works directly on folio
compared to create_empty_buffers().
Signed-off-by: Pankaj Raghav <p.raghav@...sung.com>
---
fs/buffer.c | 34 ++++++++++++++++++++++++++++++++++
include/linux/buffer_head.h | 2 ++
2 files changed, 36 insertions(+)
diff --git a/fs/buffer.c b/fs/buffer.c
index 0f9c2127543d..9e6a1a738fb5 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1645,6 +1645,40 @@ void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
}
EXPORT_SYMBOL(block_invalidate_folio);
+/*
+ * We attach and possibly dirty the buffers atomically wrt
+ * block_dirty_folio() via private_lock. try_to_free_buffers
+ * is already excluded via the folio lock.
+ */
+void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
+ unsigned long b_state)
+{
+ struct buffer_head *bh, *head, *tail;
+
+ head = alloc_folio_buffers(folio, blocksize, true);
+ bh = head;
+ do {
+ bh->b_state |= b_state;
+ tail = bh;
+ bh = bh->b_this_page;
+ } while (bh);
+ tail->b_this_page = head;
+
+ spin_lock(&folio->mapping->private_lock);
+ if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
+ bh = head;
+ do {
+ if (folio_test_dirty(folio))
+ set_buffer_dirty(bh);
+ if (folio_test_uptodate(folio))
+ set_buffer_uptodate(bh);
+ bh = bh->b_this_page;
+ } while (bh != head);
+ }
+ folio_attach_private(folio, head);
+ spin_unlock(&folio->mapping->private_lock);
+}
+EXPORT_SYMBOL(folio_create_empty_buffers);
/*
* We attach and possibly dirty the buffers atomically wrt
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index d5a2ef9b4cdf..8afa91cbb8e2 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -203,6 +203,8 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry);
void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
+void folio_create_empty_buffers(struct folio *, unsigned long,
+ unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
void end_buffer_async_write(struct buffer_head *bh, int uptodate);
--
2.34.1
Powered by blists - more mailing lists