[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230316152618.711970-3-dhowells@redhat.com>
Date: Thu, 16 Mar 2023 15:25:52 +0000
From: David Howells <dhowells@...hat.com>
To: Matthew Wilcox <willy@...radead.org>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: David Howells <dhowells@...hat.com>,
Al Viro <viro@...iv.linux.org.uk>,
Christoph Hellwig <hch@...radead.org>,
Jens Axboe <axboe@...nel.dk>, Jeff Layton <jlayton@...nel.org>,
Christian Brauner <brauner@...nel.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
netdev@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Bernard Metzler <bmt@...ich.ibm.com>,
Tom Talpey <tom@...pey.com>, linux-rdma@...r.kernel.org
Subject: [RFC PATCH 02/28] Add a special allocator for staging netfs protocol to MSG_SPLICE_PAGES
If a network protocol sendmsg() sees MSG_SPLICE_DATA, it expects that the
iterator is of ITER_BVEC type and that all the pages can have refs taken on
them with get_page() and discarded with put_page(). Bits of network
filesystem protocol data, however, are typically contained in slab memory
for which the cleanup method is kfree(), not put_page(), so this doesn't
work.
Provide a simple allocator, zcopy_alloc(), that allocates a page at a time
per-cpu and sequentially breaks off pieces and hands them out with a ref as
it's asked for them. The caller disposes of the memory it was given by
calling put_page(). When a page is all parcelled out, it is abandoned by
the allocator and another page is obtained. The page will get cleaned up
when the last skbuff fragment is destroyed.
A helper function, zcopy_memdup() is provided to call zcopy_alloc() and
copy the data it is given into it.
[!] I'm not sure this is the best way to do things. A better way might be
to make the network protocol look at the page and copy it if it's a
slab object rather than taking a ref on it.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Bernard Metzler <bmt@...ich.ibm.com>
cc: Tom Talpey <tom@...pey.com>
cc: "David S. Miller" <davem@...emloft.net>
cc: Eric Dumazet <edumazet@...gle.com>
cc: Jakub Kicinski <kuba@...nel.org>
cc: Paolo Abeni <pabeni@...hat.com>
cc: Jens Axboe <axboe@...nel.dk>
cc: Matthew Wilcox <willy@...radead.org>
cc: linux-rdma@...r.kernel.org
cc: netdev@...r.kernel.org
---
include/linux/zcopy_alloc.h | 16 +++++
mm/Makefile | 2 +-
mm/zcopy_alloc.c | 129 ++++++++++++++++++++++++++++++++++++
3 files changed, 146 insertions(+), 1 deletion(-)
create mode 100644 include/linux/zcopy_alloc.h
create mode 100644 mm/zcopy_alloc.c
diff --git a/include/linux/zcopy_alloc.h b/include/linux/zcopy_alloc.h
new file mode 100644
index 000000000000..8eb205678073
--- /dev/null
+++ b/include/linux/zcopy_alloc.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Defs for for zerocopy filler fragment allocator.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@...hat.com)
+ */
+
+#ifndef _LINUX_ZCOPY_ALLOC_H
+#define _LINUX_ZCOPY_ALLOC_H
+
+struct bio_vec;
+
+int zcopy_alloc(size_t size, struct bio_vec *bvec, gfp_t gfp);
+int zcopy_memdup(size_t size, const void *p, struct bio_vec *bvec, gfp_t gfp);
+
+#endif /* _LINUX_ZCOPY_ALLOC_H */
diff --git a/mm/Makefile b/mm/Makefile
index 8e105e5b3e29..3848f43751ee 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -52,7 +52,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
util.o mmzone.o vmstat.o backing-dev.o \
mm_init.o percpu.o slab_common.o \
- compaction.o \
+ compaction.o zcopy_alloc.o \
interval_tree.o list_lru.o workingset.o \
debug.o gup.o mmap_lock.o $(mmu-y)
diff --git a/mm/zcopy_alloc.c b/mm/zcopy_alloc.c
new file mode 100644
index 000000000000..7b219392e829
--- /dev/null
+++ b/mm/zcopy_alloc.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Allocator for zerocopy filler fragments
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@...hat.com)
+ *
+ * Provide a facility whereby pieces of bufferage can be allocated for
+ * insertion into bio_vec arrays intended for zerocopying, allowing protocol
+ * stuff to be mixed in with data.
+ *
+ * Unlike objects allocated from the slab, the lifetime of these pieces of
+ * buffer are governed purely by the refcount of the page in which they reside.
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/zcopy_alloc.h>
+#include <linux/bvec.h>
+
+struct zcopy_alloc_info {
+ struct folio *folio; /* Page currently being allocated from */
+ struct folio *spare; /* Spare page */
+ unsigned int used; /* Amount of folio used */
+ spinlock_t lock; /* Allocation lock (needs bh-disable) */
+};
+
+static struct zcopy_alloc_info __percpu *zcopy_alloc_info;
+
+static int __init zcopy_alloc_init(void)
+{
+ zcopy_alloc_info = alloc_percpu(struct zcopy_alloc_info);
+ if (!zcopy_alloc_info)
+ panic("Unable to set up zcopy_alloc allocator\n");
+ return 0;
+}
+subsys_initcall(zcopy_alloc_init);
+
+/**
+ * zcopy_alloc - Allocate some memory for use in zerocopy
+ * @size: The amount of memory (maximum 1/2 page).
+ * @bvec: Where to store the details of the memory
+ * @gfp: Allocation flags under which to make an allocation
+ *
+ * Allocate some memory for use with zerocopy where protocol bits have to be
+ * mixed in with spliced/zerocopied data. Unlike memory allocated from the
+ * slab, this memory's lifetime is purely dependent on the folio's refcount.
+ *
+ * The way it works is that a folio is allocated and pieces are broken off
+ * sequentially and given to the allocators with a ref until it no longer has
+ * enough spare space, at which point the allocator's ref is dropped and a new
+ * folio is allocated. The folio remains in existence until the last ref held
+ * by, say, a sk_buff is discarded and then the page is returned to the
+ * allocator.
+ *
+ * Returns 0 on success and -ENOMEM on allocation failure. If successful, the
+ * details of the allocated memory are placed in *%bvec.
+ *
+ * The allocated memory should be disposed of with folio_put().
+ */
+int zcopy_alloc(size_t size, struct bio_vec *bvec, gfp_t gfp)
+{
+ struct zcopy_alloc_info *info;
+ struct folio *folio, *spare = NULL;
+ size_t full_size = round_up(size, 8);
+
+ if (WARN_ON_ONCE(full_size > PAGE_SIZE / 2))
+ return -ENOMEM; /* Allocate pages */
+
+try_again:
+ info = get_cpu_ptr(zcopy_alloc_info);
+
+ folio = info->folio;
+ if (folio && folio_size(folio) - info->used < full_size) {
+ folio_put(folio);
+ folio = info->folio = NULL;
+ }
+ if (spare && !info->spare) {
+ info->spare = spare;
+ spare = NULL;
+ }
+ if (!folio && info->spare) {
+ folio = info->folio = info->spare;
+ info->spare = NULL;
+ info->used = 0;
+ }
+ if (folio) {
+ bvec_set_folio(bvec, folio, size, info->used);
+ info->used += full_size;
+ if (info->used < folio_size(folio))
+ folio_get(folio);
+ else
+ info->folio = NULL;
+ }
+
+ put_cpu_ptr(zcopy_alloc_info);
+ if (folio) {
+ if (spare)
+ folio_put(spare);
+ return 0;
+ }
+
+ spare = folio_alloc(gfp, 0);
+ if (!spare)
+ return -ENOMEM;
+ goto try_again;
+}
+EXPORT_SYMBOL(zcopy_alloc);
+
+/**
+ * zcopy_memdup - Allocate some memory for use in zerocopy and fill it
+ * @size: The amount of memory to copy (maximum 1/2 page).
+ * @p: The source data to copy
+ * @bvec: Where to store the details of the memory
+ * @gfp: Allocation flags under which to make an allocation
+ */
+int zcopy_memdup(size_t size, const void *p, struct bio_vec *bvec, gfp_t gfp)
+{
+ void *q;
+
+ if (zcopy_alloc(size, bvec, gfp) < 0)
+ return -ENOMEM;
+
+ q = kmap_local_folio(page_folio(bvec->bv_page), bvec->bv_offset);
+ memcpy(q, p, size);
+ kunmap_local(q);
+ return 0;
+}
+EXPORT_SYMBOL(zcopy_memdup);
Powered by blists - more mailing lists