[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260209175317.1713406-3-zhipingz@meta.com>
Date: Mon, 9 Feb 2026 09:53:12 -0800
From: Zhiping Zhang <zhipingz@...a.com>
To: Jason Gunthorpe <jgg@...pe.ca>, Leon Romanovsky <leon@...nel.org>,
Bjorn
Helgaas <bhelgaas@...gle.com>, <linux-rdma@...r.kernel.org>,
<linux-pci@...r.kernel.org>, <netdev@...r.kernel.org>,
Keith Busch
<kbusch@...nel.org>, Yochai Cohen <yochai@...dia.com>,
Yishai Hadas
<yishaih@...dia.com>
CC: Bjorn Helgaas <helgaas@...nel.org>, Zhiping Zhang <zhipingz@...a.com>
Subject: [RFC 2/2] RMDA MLX5: get tph for p2p access when registering dmabuf mr
The patch adds a local function to check and get tph info when available during
dmabuf mr registration. Note the DMAH workflow for CPU still takes precedence in
the process. Currently, it only works with the direct st_mode. Compatibility
with other st_modes will be added in the forma patch set.
Signed-off-by: Zhiping Zhang <zhipingz@...a.com>
---
drivers/infiniband/hw/mlx5/mr.c | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 325fa04cbe8a..c3eb5b24ef29 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -46,6 +46,8 @@
#include "data_direct.h"
#include "dmah.h"
+MODULE_IMPORT_NS("DMA_BUF");
+
enum {
MAX_PENDING_REG_MR = 8,
};
@@ -1623,6 +1625,32 @@ static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
.move_notify = mlx5_ib_dmabuf_invalidate_cb,
};
+static void get_tph_mr_dmabuf(struct mlx5_ib_dev *dev, int fd, u16 *st_index,
+ u8 *ph)
+{
+ int ret;
+ struct dma_buf *dmabuf;
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return;
+
+ if (!dmabuf->ops->get_tph)
+ goto end_dbuf_put;
+
+ ret = dmabuf->ops->get_tph(dmabuf, st_index, ph);
+ if (ret) {
+ *st_index = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX;
+ *ph = MLX5_IB_NO_PH;
+ mlx5_ib_dbg(dev, "get_tph failed (%d)\n", ret);
+ goto end_dbuf_put;
+ }
+
+end_dbuf_put:
+ dma_buf_put(dmabuf);
+};
+
static struct ib_mr *
reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
u64 offset, u64 length, u64 virt_addr,
@@ -1662,6 +1690,8 @@ reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
ph = dmah->ph;
if (dmah->valid_fields & BIT(IB_DMAH_CPU_ID_EXISTS))
st_index = mdmah->st_index;
+ } else {
+ get_tph_mr_dmabuf(dev, fd, &st_index, &ph);
}
mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
--
2.47.3
Powered by blists - more mailing lists