[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250806203705.2560493-16-dhowells@redhat.com>
Date: Wed, 6 Aug 2025 21:36:36 +0100
From: David Howells <dhowells@...hat.com>
To: Steve French <sfrench@...ba.org>
Cc: David Howells <dhowells@...hat.com>,
Paulo Alcantara <pc@...guebit.org>,
Shyam Prasad N <sprasad@...rosoft.com>,
Tom Talpey <tom@...pey.com>,
Wang Zhaolong <wangzhaolong@...weicloud.com>,
Stefan Metzmacher <metze@...ba.org>,
Mina Almasry <almasrymina@...gle.com>,
linux-cifs@...r.kernel.org,
linux-kernel@...r.kernel.org,
netfs@...ts.linux.dev,
linux-fsdevel@...r.kernel.org
Subject: [RFC PATCH 15/31] cifs: Use netfs_alloc/free_folioq_buffer()
Use netfs_alloc/free_folioq_buffer() rather than doing its own version.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Steve French <sfrench@...ba.org>
cc: Paulo Alcantara <pc@...guebit.org>
cc: Shyam Prasad N <sprasad@...rosoft.com>
cc: Tom Talpey <tom@...pey.com> (RDMA, smbdirect)
cc: linux-cifs@...r.kernel.org
cc: netfs@...ts.linux.dev
cc: linux-fsdevel@...r.kernel.org
---
fs/smb/client/smb2ops.c | 73 ++++++-----------------------------------
1 file changed, 10 insertions(+), 63 deletions(-)
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 0ad4a2a012a0..161cef316346 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -4446,61 +4446,6 @@ decrypt_message(struct TCP_Server_Info *server, int num_rqst,
return rc;
}
-/*
- * Clear a read buffer, discarding the folios which have the 1st mark set.
- */
-static void cifs_clear_folioq_buffer(struct folio_queue *buffer)
-{
- struct folio_queue *folioq;
-
- while ((folioq = buffer)) {
- for (int s = 0; s < folioq_count(folioq); s++)
- if (folioq_is_marked(folioq, s))
- folio_put(folioq_folio(folioq, s));
- buffer = folioq->next;
- kfree(folioq);
- }
-}
-
-/*
- * Allocate buffer space into a folio queue.
- */
-static struct folio_queue *cifs_alloc_folioq_buffer(ssize_t size)
-{
- struct folio_queue *buffer = NULL, *tail = NULL, *p;
- struct folio *folio;
- unsigned int slot;
-
- do {
- if (!tail || folioq_full(tail)) {
- p = kmalloc(sizeof(*p), GFP_NOFS);
- if (!p)
- goto nomem;
- folioq_init(p, 0);
- if (tail) {
- tail->next = p;
- p->prev = tail;
- } else {
- buffer = p;
- }
- tail = p;
- }
-
- folio = folio_alloc(GFP_KERNEL|__GFP_HIGHMEM, 0);
- if (!folio)
- goto nomem;
-
- slot = folioq_append_mark(tail, folio);
- size -= folioq_folio_size(tail, slot);
- } while (size > 0);
-
- return buffer;
-
-nomem:
- cifs_clear_folioq_buffer(buffer);
- return NULL;
-}
-
/*
* Copy data from an iterator to the folios in a folio queue buffer.
*/
@@ -4526,7 +4471,7 @@ void
smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
{
for (int i = 0; i < num_rqst; i++)
- cifs_clear_folioq_buffer(rqst[i].rq_buffer);
+ netfs_free_folioq_buffer(rqst[i].rq_buffer);
}
/*
@@ -4561,8 +4506,10 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
new->rq_nvec = old->rq_nvec;
if (size > 0) {
- buffer = cifs_alloc_folioq_buffer(size);
- if (!buffer)
+ size_t cur_size = 0;
+ rc = netfs_alloc_folioq_buffer(NULL, &buffer, &cur_size,
+ size, GFP_NOFS);
+ if (rc < 0)
goto err_free;
new->rq_buffer = buffer;
@@ -4894,7 +4841,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
}
free_pages:
- cifs_clear_folioq_buffer(dw->buffer);
+ netfs_free_folioq_buffer(dw->buffer);
cifs_small_buf_release(dw->buf);
kfree(dw);
}
@@ -4932,9 +4879,9 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct smb_message **smb,
dw->len = len;
len = round_up(dw->len, PAGE_SIZE);
- rc = -ENOMEM;
- dw->buffer = cifs_alloc_folioq_buffer(len);
- if (!dw->buffer)
+ size_t cur_size = 0;
+ rc = netfs_alloc_folioq_buffer(NULL, &dw->buffer, &cur_size, len, GFP_NOFS);
+ if (rc < 0)
goto discard_data;
iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, len);
@@ -4995,7 +4942,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct smb_message **smb,
}
free_pages:
- cifs_clear_folioq_buffer(dw->buffer);
+ netfs_free_folioq_buffer(dw->buffer);
free_dw:
kfree(dw);
return rc;
Powered by blists - more mailing lists