[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <fcbbc3ba9414bfcdb775ad677ee63efbe7e74953.1699503619.git.matsuda-daisuke@fujitsu.com>
Date: Thu, 9 Nov 2023 14:44:51 +0900
From: Daisuke Matsuda <matsuda-daisuke@...itsu.com>
To: linux-rdma@...r.kernel.org, leon@...nel.org, jgg@...pe.ca,
zyjzyj2000@...il.com
Cc: linux-kernel@...r.kernel.org, rpearsonhpe@...il.com,
yangx.jy@...itsu.com, lizhijian@...itsu.com, y-goto@...itsu.com,
Daisuke Matsuda <matsuda-daisuke@...itsu.com>
Subject: [PATCH for-next v7 6/7] RDMA/rxe: Add support for Send/Recv/Write/Read with ODP
rxe_mr_copy() is used widely to copy data to/from a user MR. requester uses
it to load payloads of requesting packets; responder uses it to process
Send, Write, and Read operaetions; completer uses it to copy data from
response packets of Read and Atomic operations to a user MR.
Allow these operations to be used with ODP by adding a subordinate function
rxe_odp_mr_copy(). It is comprised of the following steps:
1. Check page presence and R/W permission.
2. If OK, just execute data copy to/from the pages and exit.
3. Otherwise, trigger page fault to map the pages.
4. Update the MR xarray using PFNs in umem_odp->pfn_list.
5. Execute data copy to/from the pages.
Signed-off-by: Daisuke Matsuda <matsuda-daisuke@...itsu.com>
---
drivers/infiniband/sw/rxe/rxe.c | 10 ++++
drivers/infiniband/sw/rxe/rxe_loc.h | 8 +++
drivers/infiniband/sw/rxe/rxe_mr.c | 9 +++-
drivers/infiniband/sw/rxe/rxe_odp.c | 77 +++++++++++++++++++++++++++++
4 files changed, 102 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index f2284d27229b..207a022156f0 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -79,6 +79,16 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
/* IB_ODP_SUPPORT_IMPLICIT is not supported right now. */
rxe->attr.odp_caps.general_caps |= IB_ODP_SUPPORT;
+
+ rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
+ rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_RECV;
+ rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 4bda154a0248..eeaeff8a1398 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -192,6 +192,8 @@ static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
u64 iova, int access_flags, struct rxe_mr *mr);
+int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+ enum rxe_mr_copy_dir dir);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline int
rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
@@ -199,6 +201,12 @@ rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
{
return -EOPNOTSUPP;
}
+static inline int
+rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
+ int length, enum rxe_mr_copy_dir dir)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 384cb4ba1f2d..f0ce87c0fc7d 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -247,7 +247,12 @@ int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
void *va;
while (length) {
- page = xa_load(&mr->page_list, index);
+ if (mr->umem->is_odp)
+ page = xa_untag_pointer(xa_load(&mr->page_list,
+ index));
+ else
+ page = xa_load(&mr->page_list, index);
+
if (!page)
return -EFAULT;
@@ -319,7 +324,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
}
if (mr->umem->is_odp)
- return -EOPNOTSUPP;
+ return rxe_odp_mr_copy(mr, iova, addr, length, dir);
else
return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c
index c5e24901c141..5aa09b9c1095 100644
--- a/drivers/infiniband/sw/rxe/rxe_odp.c
+++ b/drivers/infiniband/sw/rxe/rxe_odp.c
@@ -177,3 +177,80 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
return err;
}
+
+/* Take xarray spinlock before entry */
+static inline bool rxe_odp_check_pages(struct rxe_mr *mr, u64 iova,
+ int length, u32 flags)
+{
+ unsigned long upper = rxe_mr_iova_to_index(mr, iova + length - 1);
+ unsigned long lower = rxe_mr_iova_to_index(mr, iova);
+ bool need_fault = false;
+ void *page, *entry;
+ size_t perm = 0;
+
+
+ if (!(flags & RXE_PAGEFAULT_RDONLY))
+ perm = RXE_ODP_WRITABLE_BIT;
+
+ XA_STATE(xas, &mr->page_list, lower);
+
+ while (xas.xa_index <= upper) {
+ page = xas_load(&xas);
+
+ /* Check page presence and write permission */
+ if (!page || (perm && !(xa_pointer_tag(page) & perm))) {
+ need_fault = true;
+ break;
+ }
+ entry = xas_next(&xas);
+ }
+
+ return need_fault;
+}
+
+int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+ enum rxe_mr_copy_dir dir)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ u32 flags = 0;
+ int err;
+
+ if (unlikely(!mr->umem->is_odp))
+ return -EOPNOTSUPP;
+
+ switch (dir) {
+ case RXE_TO_MR_OBJ:
+ break;
+
+ case RXE_FROM_MR_OBJ:
+ flags = RXE_PAGEFAULT_RDONLY;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock(&mr->page_list.xa_lock);
+
+ if (rxe_odp_check_pages(mr, iova, length, flags)) {
+ spin_unlock(&mr->page_list.xa_lock);
+
+ /* umem_mutex is locked on success */
+ err = rxe_odp_do_pagefault_and_lock(mr, iova, length, flags);
+ if (err < 0)
+ return err;
+
+ /*
+ * The spinlock is always locked under mutex_lock except
+ * for MR initialization. No worry about deadlock.
+ */
+ spin_lock(&mr->page_list.xa_lock);
+ mutex_unlock(&umem_odp->umem_mutex);
+ }
+
+ err = rxe_mr_copy_xarray(mr, iova, addr, length, dir);
+
+ spin_unlock(&mr->page_list.xa_lock);
+
+ return err;
+}
--
2.39.1
Powered by blists - more mailing lists