[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f57269489c4d6f670ab1f9de4d0764030d8d080c.1763725387.git.asml.silence@gmail.com>
Date: Sun, 23 Nov 2025 22:51:22 +0000
From: Pavel Begunkov <asml.silence@...il.com>
To: linux-block@...r.kernel.org,
io-uring@...r.kernel.org
Cc: Vishal Verma <vishal1.verma@...el.com>,
tushar.gohad@...el.com,
Keith Busch <kbusch@...nel.org>,
Jens Axboe <axboe@...nel.dk>,
Christoph Hellwig <hch@....de>,
Sagi Grimberg <sagi@...mberg.me>,
Alexander Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Sumit Semwal <sumit.semwal@...aro.org>,
Christian König <christian.koenig@....com>,
Pavel Begunkov <asml.silence@...il.com>,
linux-kernel@...r.kernel.org,
linux-nvme@...ts.infradead.org,
linux-fsdevel@...r.kernel.org,
linux-media@...r.kernel.org,
dri-devel@...ts.freedesktop.org,
linaro-mm-sig@...ts.linaro.org
Subject: [RFC v2 02/11] iov_iter: introduce iter type for pre-registered dma
Introduce a new iterator type backed by a pre mapped dmabuf represented
by struct dma_token. The token is specific to the file for which it was
created, and the user must avoid the token and the iterator to any other
file. This limitation will be softened in the future.
Suggested-by: Keith Busch <kbusch@...nel.org>
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
include/linux/uio.h | 10 ++++++++++
lib/iov_iter.c | 30 ++++++++++++++++++++++++------
2 files changed, 34 insertions(+), 6 deletions(-)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 5b127043a151..1b22594ca35b 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -29,6 +29,7 @@ enum iter_type {
ITER_FOLIOQ,
ITER_XARRAY,
ITER_DISCARD,
+ ITER_DMA_TOKEN,
};
#define ITER_SOURCE 1 // == WRITE
@@ -71,6 +72,7 @@ struct iov_iter {
const struct folio_queue *folioq;
struct xarray *xarray;
void __user *ubuf;
+ struct dma_token *dma_token;
};
size_t count;
};
@@ -155,6 +157,11 @@ static inline bool iov_iter_is_xarray(const struct iov_iter *i)
return iov_iter_type(i) == ITER_XARRAY;
}
+static inline bool iov_iter_is_dma_token(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_DMA_TOKEN;
+}
+
static inline unsigned char iov_iter_rw(const struct iov_iter *i)
{
return i->data_source ? WRITE : READ;
@@ -300,6 +307,9 @@ void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
unsigned int first_slot, unsigned int offset, size_t count);
void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
loff_t start, size_t count);
+void iov_iter_dma_token(struct iov_iter *i, unsigned int direction,
+ struct dma_token *token,
+ loff_t off, size_t count);
ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 2fe66a6b8789..26fa8f8f13c0 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -563,7 +563,8 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
{
if (unlikely(i->count < size))
size = i->count;
- if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
+ if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i)) ||
+ unlikely(iov_iter_is_dma_token(i))) {
i->iov_offset += size;
i->count -= size;
} else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
@@ -619,7 +620,8 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
return;
}
unroll -= i->iov_offset;
- if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
+ if (iov_iter_is_xarray(i) || iter_is_ubuf(i) ||
+ iov_iter_is_dma_token(i)) {
BUG(); /* We should never go beyond the start of the specified
* range since we might then be straying into pages that
* aren't pinned.
@@ -763,6 +765,21 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
}
EXPORT_SYMBOL(iov_iter_xarray);
+void iov_iter_dma_token(struct iov_iter *i, unsigned int direction,
+ struct dma_token *token,
+ loff_t off, size_t count)
+{
+ WARN_ON(direction & ~(READ | WRITE));
+ *i = (struct iov_iter){
+ .iter_type = ITER_DMA_TOKEN,
+ .data_source = direction,
+ .dma_token = token,
+ .iov_offset = 0,
+ .count = count,
+ .iov_offset = off,
+ };
+}
+
/**
* iov_iter_discard - Initialise an I/O iterator that discards data
* @i: The iterator to initialise.
@@ -829,7 +846,7 @@ static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
- if (likely(iter_is_ubuf(i))) {
+ if (likely(iter_is_ubuf(i)) || iov_iter_is_dma_token(i)) {
size_t size = i->count;
if (size)
return ((unsigned long)i->ubuf + i->iov_offset) | size;
@@ -860,7 +877,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
size_t size = i->count;
unsigned k;
- if (iter_is_ubuf(i))
+ if (iter_is_ubuf(i) || iov_iter_is_dma_token(i))
return 0;
if (WARN_ON(!iter_is_iovec(i)))
@@ -1457,11 +1474,12 @@ EXPORT_SYMBOL_GPL(import_ubuf);
void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
{
if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
- !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
+ !iter_is_ubuf(i) && !iov_iter_is_kvec(i) &&
+ !iov_iter_is_dma_token(i)))
return;
i->iov_offset = state->iov_offset;
i->count = state->count;
- if (iter_is_ubuf(i))
+ if (iter_is_ubuf(i) || iov_iter_is_dma_token(i))
return;
/*
* For the *vec iters, nr_segs + iov is constant - if we increment
--
2.52.0
Powered by blists - more mailing lists