[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b38f2c3af8c03ee4fc5f67f97b4412ecd8588924.1763725388.git.asml.silence@gmail.com>
Date: Sun, 23 Nov 2025 22:51:30 +0000
From: Pavel Begunkov <asml.silence@...il.com>
To: linux-block@...r.kernel.org,
io-uring@...r.kernel.org
Cc: Vishal Verma <vishal1.verma@...el.com>,
tushar.gohad@...el.com,
Keith Busch <kbusch@...nel.org>,
Jens Axboe <axboe@...nel.dk>,
Christoph Hellwig <hch@....de>,
Sagi Grimberg <sagi@...mberg.me>,
Alexander Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Sumit Semwal <sumit.semwal@...aro.org>,
Christian König <christian.koenig@....com>,
Pavel Begunkov <asml.silence@...il.com>,
linux-kernel@...r.kernel.org,
linux-nvme@...ts.infradead.org,
linux-fsdevel@...r.kernel.org,
linux-media@...r.kernel.org,
dri-devel@...ts.freedesktop.org,
linaro-mm-sig@...ts.linaro.org,
David Wei <dw@...idwei.uk>
Subject: [RFC v2 10/11] io_uring/rsrc: add dmabuf-backed buffer registeration
Add an ability to register a dmabuf backed io_uring buffer. It also
needs know which device to use for attachment, for that it takes
target_fd and extracts the device through the new file op. Unlike normal
buffers, it also retains the target file so that any imports from
ineligible requests can be rejected in next patches.
Suggested-by: Vishal Verma <vishal1.verma@...el.com>
Suggested-by: David Wei <dw@...idwei.uk>
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
io_uring/rsrc.c | 106 +++++++++++++++++++++++++++++++++++++++++++++++-
io_uring/rsrc.h | 1 +
2 files changed, 106 insertions(+), 1 deletion(-)
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 691f9645d04c..7dfebf459dd0 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -10,6 +10,8 @@
#include <linux/compat.h>
#include <linux/io_uring.h>
#include <linux/io_uring/cmd.h>
+#include <linux/dma-buf.h>
+#include <linux/dma_token.h>
#include <uapi/linux/io_uring.h>
@@ -802,6 +804,106 @@ bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
return true;
}
+struct io_regbuf_dma {
+ struct dma_token *token;
+ struct file *target_file;
+ struct dma_buf *dmabuf;
+};
+
+static void io_release_reg_dmabuf(void *priv)
+{
+ struct io_regbuf_dma *db = priv;
+
+ dma_token_release(db->token);
+ dma_buf_put(db->dmabuf);
+ fput(db->target_file);
+ kfree(db);
+}
+
+static struct io_rsrc_node *io_register_dmabuf(struct io_ring_ctx *ctx,
+ struct io_uring_reg_buffer *rb,
+ struct iovec *iov)
+{
+ struct dma_token_params params = {};
+ struct io_rsrc_node *node = NULL;
+ struct io_mapped_ubuf *imu = NULL;
+ struct io_regbuf_dma *regbuf = NULL;
+ struct file *target_file = NULL;
+ struct dma_buf *dmabuf = NULL;
+ struct dma_token *token;
+ int ret;
+
+ if (iov->iov_base || iov->iov_len)
+ return ERR_PTR(-EFAULT);
+
+ node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
+ if (!node) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ imu = io_alloc_imu(ctx, 0);
+ if (!imu) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ regbuf = kzalloc(sizeof(*regbuf), GFP_KERNEL);
+ if (!regbuf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ target_file = fget(rb->target_fd);
+ if (!target_file) {
+ ret = -EBADF;
+ goto err;
+ }
+
+ dmabuf = dma_buf_get(rb->dmabuf_fd);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ dmabuf = NULL;
+ goto err;
+ }
+
+ params.dmabuf = dmabuf;
+ params.dir = DMA_BIDIRECTIONAL;
+ token = dma_token_create(target_file, ¶ms);
+ if (IS_ERR(token)) {
+ ret = PTR_ERR(token);
+ goto err;
+ }
+
+ regbuf->target_file = target_file;
+ regbuf->token = token;
+ regbuf->dmabuf = dmabuf;
+
+ imu->nr_bvecs = 1;
+ imu->ubuf = 0;
+ imu->len = dmabuf->size;
+ imu->folio_shift = 0;
+ imu->release = io_release_reg_dmabuf;
+ imu->priv = regbuf;
+ imu->flags = IO_IMU_F_DMA;
+ imu->dir = IO_IMU_DEST | IO_IMU_SOURCE;
+ refcount_set(&imu->refs, 1);
+ node->buf = imu;
+ return node;
+err:
+ if (regbuf)
+ kfree(regbuf);
+ if (imu)
+ io_free_imu(ctx, imu);
+ if (node)
+ io_cache_free(&ctx->node_cache, node);
+ if (target_file)
+ fput(target_file);
+ if (dmabuf)
+ dma_buf_put(dmabuf);
+ return ERR_PTR(ret);
+}
+
static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
struct io_uring_reg_buffer *rb,
struct iovec *iov,
@@ -817,7 +919,7 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
bool coalesced = false;
if (rb->dmabuf_fd != -1 || rb->target_fd != -1)
- return NULL;
+ return io_register_dmabuf(ctx, rb, iov);
if (!iov->iov_base)
return NULL;
@@ -1117,6 +1219,8 @@ static int io_import_fixed(int ddir, struct iov_iter *iter,
offset = buf_addr - imu->ubuf;
+ if (imu->flags & IO_IMU_F_DMA)
+ return -EOPNOTSUPP;
if (imu->flags & IO_IMU_F_KBUF)
return io_import_kbuf(ddir, iter, imu, len, offset);
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 7c1128a856ec..280d3988abf3 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -30,6 +30,7 @@ enum {
enum {
IO_IMU_F_KBUF = 1,
+ IO_IMU_F_DMA = 2,
};
struct io_mapped_ubuf {
--
2.52.0
Powered by blists - more mailing lists