[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241025204008.4076565-5-dhowells@redhat.com>
Date: Fri, 25 Oct 2024 21:39:31 +0100
From: David Howells <dhowells@...hat.com>
To: Christian Brauner <christian@...uner.io>,
Steve French <smfrench@...il.com>,
Matthew Wilcox <willy@...radead.org>
Cc: David Howells <dhowells@...hat.com>,
Jeff Layton <jlayton@...nel.org>,
Gao Xiang <hsiangkao@...ux.alibaba.com>,
Dominique Martinet <asmadeus@...ewreck.org>,
Marc Dionne <marc.dionne@...istor.com>,
Paulo Alcantara <pc@...guebit.com>,
Shyam Prasad N <sprasad@...rosoft.com>,
Tom Talpey <tom@...pey.com>,
Eric Van Hensbergen <ericvh@...nel.org>,
Ilya Dryomov <idryomov@...il.com>,
netfs@...ts.linux.dev,
linux-afs@...ts.infradead.org,
linux-cifs@...r.kernel.org,
linux-nfs@...r.kernel.org,
ceph-devel@...r.kernel.org,
v9fs@...ts.linux.dev,
linux-erofs@...ts.ozlabs.org,
linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 04/31] netfs: Use a folio_queue allocation and free functions
Provide and use folio_queue allocation and free functions to combine the
allocation, initialisation and stat (un)accounting steps that are repeated
in several places.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Jeff Layton <jlayton@...nel.org>
cc: netfs@...ts.linux.dev
cc: linux-fsdevel@...r.kernel.org
---
fs/netfs/buffered_read.c | 12 +++---------
fs/netfs/misc.c | 38 ++++++++++++++++++++++++++++++++++----
include/linux/netfs.h | 5 +++++
3 files changed, 42 insertions(+), 13 deletions(-)
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 7ac34550c403..b5a7beb9d01b 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -131,11 +131,9 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
struct folio_queue *tail = rreq->buffer_tail, *new;
size_t added;
- new = kmalloc(sizeof(*new), GFP_NOFS);
+ new = netfs_folioq_alloc(GFP_NOFS);
if (!new)
return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(new);
new->prev = tail;
tail->next = new;
rreq->buffer_tail = new;
@@ -359,11 +357,9 @@ static int netfs_prime_buffer(struct netfs_io_request *rreq)
struct folio_batch put_batch;
size_t added;
- folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
+ folioq = netfs_folioq_alloc(GFP_KERNEL);
if (!folioq)
return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(folioq);
rreq->buffer = folioq;
rreq->buffer_tail = folioq;
rreq->submitted = rreq->start;
@@ -436,12 +432,10 @@ static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct fo
{
struct folio_queue *folioq;
- folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
+ folioq = netfs_folioq_alloc(GFP_KERNEL);
if (!folioq)
return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(folioq);
folioq_append(folioq, folio);
BUG_ON(folioq_folio(folioq, 0) != folio);
BUG_ON(folioq_folio_order(folioq, 0) != folio_order(folio));
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 78fe5796b2b2..6cd7e1ee7a14 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -8,6 +8,38 @@
#include <linux/swap.h>
#include "internal.h"
+/**
+ * netfs_folioq_alloc - Allocate a folio_queue struct
+ * @gfp: Allocation constraints
+ *
+ * Allocate, initialise and account the folio_queue struct.
+ */
+struct folio_queue *netfs_folioq_alloc(gfp_t gfp)
+{
+ struct folio_queue *fq;
+
+ fq = kmalloc(sizeof(*fq), gfp);
+ if (fq) {
+ netfs_stat(&netfs_n_folioq);
+ folioq_init(fq);
+ }
+ return fq;
+}
+EXPORT_SYMBOL(netfs_folioq_alloc);
+
+/**
+ * netfs_folioq_free - Free a folio_queue struct
+ * @folioq: The object to free
+ *
+ * Free and unaccount the folio_queue struct.
+ */
+void netfs_folioq_free(struct folio_queue *folioq)
+{
+ netfs_stat_d(&netfs_n_folioq);
+ kfree(folioq);
+}
+EXPORT_SYMBOL(netfs_folioq_free);
+
/*
* Make sure there's space in the rolling queue.
*/
@@ -87,8 +119,7 @@ struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq)
if (next)
next->prev = NULL;
- netfs_stat_d(&netfs_n_folioq);
- kfree(head);
+ netfs_folioq_free(head);
wreq->buffer = next;
return next;
}
@@ -111,8 +142,7 @@ void netfs_clear_buffer(struct netfs_io_request *rreq)
folio_put(folio);
}
}
- netfs_stat_d(&netfs_n_folioq);
- kfree(p);
+ netfs_folioq_free(p);
}
}
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 5eaceef41e6c..b2fa569e875d 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -21,6 +21,7 @@
enum netfs_sreq_ref_trace;
typedef struct mempool_s mempool_t;
+struct folio_queue;
/**
* folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED]
@@ -454,6 +455,10 @@ void netfs_end_io_write(struct inode *inode);
int netfs_start_io_direct(struct inode *inode);
void netfs_end_io_direct(struct inode *inode);
+/* Miscellaneous APIs. */
+struct folio_queue *netfs_folioq_alloc(gfp_t gfp);
+void netfs_folioq_free(struct folio_queue *folioq);
+
/**
* netfs_inode - Get the netfs inode context from the inode
* @inode: The inode to query
Powered by blists - more mailing lists