[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240814203850.2240469-11-dhowells@redhat.com>
Date: Wed, 14 Aug 2024 21:38:30 +0100
From: David Howells <dhowells@...hat.com>
To: Christian Brauner <christian@...uner.io>,
Steve French <smfrench@...il.com>,
Matthew Wilcox <willy@...radead.org>
Cc: David Howells <dhowells@...hat.com>,
Jeff Layton <jlayton@...nel.org>,
Gao Xiang <hsiangkao@...ux.alibaba.com>,
Dominique Martinet <asmadeus@...ewreck.org>,
Marc Dionne <marc.dionne@...istor.com>,
Paulo Alcantara <pc@...guebit.com>,
Shyam Prasad N <sprasad@...rosoft.com>,
Tom Talpey <tom@...pey.com>,
Eric Van Hensbergen <ericvh@...nel.org>,
Ilya Dryomov <idryomov@...il.com>,
netfs@...ts.linux.dev,
linux-afs@...ts.infradead.org,
linux-cifs@...r.kernel.org,
linux-nfs@...r.kernel.org,
ceph-devel@...r.kernel.org,
v9fs@...ts.linux.dev,
linux-erofs@...ts.ozlabs.org,
linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 10/25] netfs: Set the request work function upon allocation
Set the work function in the netfs_io_request work_struct when we allocate
the request rather than doing this later. This reduces the number of
places we need to set it in future code.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Jeff Layton <jlayton@...nel.org>
cc: netfs@...ts.linux.dev
cc: linux-fsdevel@...r.kernel.org
---
fs/netfs/internal.h | 1 +
fs/netfs/io.c | 4 +---
fs/netfs/objects.c | 9 ++++++++-
fs/netfs/write_issue.c | 1 -
4 files changed, 10 insertions(+), 5 deletions(-)
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index 9e6e0e59d7e4..f2920b4ee726 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -29,6 +29,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
/*
* io.c
*/
+void netfs_rreq_work(struct work_struct *work);
int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
/*
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index ce3e821b4e4f..8b9aaa99d787 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -422,7 +422,7 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
netfs_rreq_completed(rreq, was_async);
}
-static void netfs_rreq_work(struct work_struct *work)
+void netfs_rreq_work(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
@@ -734,8 +734,6 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
// TODO: Use bounce buffer if requested
rreq->io_iter = rreq->iter;
- INIT_WORK(&rreq->work, netfs_rreq_work);
-
/* Chop the read into slices according to what the cache and the netfs
* want and submit each one.
*/
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index 0294df70c3ff..d6e9785ce7a3 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -48,9 +48,16 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
INIT_LIST_HEAD(&rreq->subrequests);
- INIT_WORK(&rreq->work, NULL);
refcount_set(&rreq->ref, 1);
+ if (origin == NETFS_READAHEAD ||
+ origin == NETFS_READPAGE ||
+ origin == NETFS_READ_FOR_WRITE ||
+ origin == NETFS_DIO_READ)
+ INIT_WORK(&rreq->work, netfs_rreq_work);
+ else
+ INIT_WORK(&rreq->work, netfs_write_collection_worker);
+
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
if (file && file->f_flags & O_NONBLOCK)
__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 34e541afd79b..41db709ca1d3 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -109,7 +109,6 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
wreq->contiguity = wreq->start;
wreq->cleaned_to = wreq->start;
- INIT_WORK(&wreq->work, netfs_write_collection_worker);
wreq->io_streams[0].stream_nr = 0;
wreq->io_streams[0].source = NETFS_UPLOAD_TO_SERVER;
Powered by blists - more mailing lists