[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230710223304.1174642-4-almasrymina@google.com>
Date: Mon, 10 Jul 2023 15:32:54 -0700
From: Mina Almasry <almasrymina@...gle.com>
To: linux-kernel@...r.kernel.org, linux-media@...r.kernel.org,
dri-devel@...ts.freedesktop.org, linaro-mm-sig@...ts.linaro.org,
netdev@...r.kernel.org, linux-arch@...r.kernel.org,
linux-kselftest@...r.kernel.org
Cc: Mina Almasry <almasrymina@...gle.com>,
Sumit Semwal <sumit.semwal@...aro.org>,
"Christian König" <christian.koenig@....com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
Ilias Apalodimas <ilias.apalodimas@...aro.org>,
Arnd Bergmann <arnd@...db.de>,
David Ahern <dsahern@...nel.org>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
Shuah Khan <shuah@...nel.org>, jgg@...pe.ca
Subject: [RFC PATCH 03/10] dma-buf: add support for NET_TX pages
Used the paged attachment mappings support to create NET_TX pages.
NET_TX pages can be used in the networking transmit path:
1. Create an iov_iter & bio_vec entries to represent this dmabuf.
2. Initialize the bio_vec with the backing dmabuf pages.
Signed-off-by: Mina Almasry <almasrymina@...gle.com>
---
drivers/dma-buf/dma-buf.c | 47 ++++++++++++++++++++++++++++++++++++
include/linux/dma-buf.h | 7 ++++++
include/uapi/linux/dma-buf.h | 1 +
3 files changed, 55 insertions(+)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index acb86bf406f4..3ca71297b9b4 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1683,6 +1683,7 @@ static void dma_buf_pages_destroy(struct percpu_ref *ref)
}
const struct dma_buf_pages_type_ops net_rx_ops;
+const struct dma_buf_pages_type_ops net_tx_ops;
static long dma_buf_create_pages(struct file *file,
struct dma_buf_create_pages_info *create_info)
@@ -1799,6 +1800,9 @@ static long dma_buf_create_pages(struct file *file,
case DMA_BUF_PAGES_NET_RX:
priv->type_ops = &net_rx_ops;
break;
+ case DMA_BUF_PAGES_NET_TX:
+ priv->type_ops = &net_tx_ops;
+ break;
default:
err = -EINVAL;
goto out_put_new_file;
@@ -2140,3 +2144,46 @@ struct page *dma_buf_pages_net_rx_alloc(struct dma_buf_pages *priv)
percpu_ref_get(&priv->pgmap.ref);
return pg;
}
+
+/********************************
+ * dma_buf_pages_net_tx *
+ ********************************/
+
+static void dma_buf_pages_net_tx_release(struct dma_buf_pages *priv,
+ struct file *file)
+{
+ int i;
+ for (i = 0; i < priv->num_pages; i++)
+ put_page(&priv->pages[i]);
+}
+
+static int dma_buf_pages_net_tx_init(struct dma_buf_pages *priv,
+ struct file *file)
+{
+ int i;
+ priv->net_tx.tx_bv = kvmalloc_array(priv->num_pages,
+ sizeof(struct bio_vec), GFP_KERNEL);
+ if (!priv->net_tx.tx_bv)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->num_pages; i++) {
+ priv->net_tx.tx_bv[i].bv_page = &priv->pages[i];
+ priv->net_tx.tx_bv[i].bv_offset = 0;
+ priv->net_tx.tx_bv[i].bv_len = PAGE_SIZE;
+ }
+ percpu_ref_get_many(&priv->pgmap.ref, priv->num_pages);
+ iov_iter_bvec(&priv->net_tx.iter, WRITE, priv->net_tx.tx_bv,
+ priv->num_pages, priv->dmabuf->size);
+ return 0;
+}
+
+static void dma_buf_pages_net_tx_free(struct dma_buf_pages *priv)
+{
+ kvfree(priv->net_tx.tx_bv);
+}
+
+const struct dma_buf_pages_type_ops net_tx_ops = {
+ .dma_buf_pages_init = dma_buf_pages_net_tx_init,
+ .dma_buf_pages_release = dma_buf_pages_net_tx_release,
+ .dma_buf_pages_destroy = dma_buf_pages_net_tx_free,
+};
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index e8e66d6407d0..93228a2fec47 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -22,6 +22,7 @@
#include <linux/fs.h>
#include <linux/dma-fence.h>
#include <linux/wait.h>
+#include <linux/uio.h>
#include <linux/genalloc.h>
#include <linux/xarray.h>
#include <net/page_pool.h>
@@ -555,6 +556,11 @@ struct dma_buf_pages_type_ops {
struct page *page);
};
+struct dma_buf_pages_net_tx {
+ struct iov_iter iter;
+ struct bio_vec *tx_bv;
+};
+
struct dma_buf_pages_net_rx {
struct gen_pool *page_pool;
struct xarray bound_rxq_list;
@@ -579,6 +585,7 @@ struct dma_buf_pages {
union {
struct dma_buf_pages_net_rx net_rx;
+ struct dma_buf_pages_net_tx net_tx;
};
};
diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
index b392cef9d3c6..546f211a7556 100644
--- a/include/uapi/linux/dma-buf.h
+++ b/include/uapi/linux/dma-buf.h
@@ -187,6 +187,7 @@ struct dma_buf_create_pages_info {
};
#define DMA_BUF_PAGES_NET_RX (1 << 0)
+#define DMA_BUF_PAGES_NET_TX (2 << 0)
#define DMA_BUF_CREATE_PAGES _IOW(DMA_BUF_BASE, 4, struct dma_buf_create_pages_info)
--
2.41.0.390.g38632f3daf-goog
Powered by blists - more mailing lists