[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <7441c59fcea601c03c70ec03b5d17a69032e51f8.1681882651.git.matsuda-daisuke@fujitsu.com>
Date: Wed, 19 Apr 2023 14:51:54 +0900
From: Daisuke Matsuda <matsuda-daisuke@...itsu.com>
To: linux-rdma@...r.kernel.org, leonro@...dia.com, jgg@...dia.com,
zyjzyj2000@...il.com
Cc: linux-kernel@...r.kernel.org, rpearsonhpe@...il.com,
yangx.jy@...itsu.com, lizhijian@...itsu.com,
Daisuke Matsuda <matsuda-daisuke@...itsu.com>
Subject: [PATCH for-next v4 2/8] RDMA/rxe: Always schedule works before accessing user MRs
Both responder and completer can sleep to execute page-fault when used
with ODP. It happens when they are going to access user MRs, so works must
be scheduled in such cases.
Signed-off-by: Daisuke Matsuda <matsuda-daisuke@...itsu.com>
---
drivers/infiniband/sw/rxe/rxe_comp.c | 12 ++++++++++--
drivers/infiniband/sw/rxe/rxe_loc.h | 4 ++--
drivers/infiniband/sw/rxe/rxe_recv.c | 4 ++--
drivers/infiniband/sw/rxe/rxe_resp.c | 14 +++++++++-----
4 files changed, 23 insertions(+), 11 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index db18ace74d2b..b71bd9cc00d0 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -126,13 +126,21 @@ void retransmit_timer(struct timer_list *t)
spin_unlock_bh(&qp->state_lock);
}
-void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
+void rxe_comp_queue_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
+ struct rxe_qp *qp = pkt->qp;
int must_sched;
skb_queue_tail(&qp->resp_pkts, skb);
- must_sched = skb_queue_len(&qp->resp_pkts) > 1;
+ /* Schedule the task if processing Read responses or Atomic acks.
+ * In these cases, completer may sleep to access ODP-enabled MRs.
+ */
+ if (pkt->mask | (RXE_PAYLOAD_MASK || RXE_ATMACK_MASK))
+ must_sched = 1;
+ else
+ must_sched = skb_queue_len(&qp->resp_pkts) > 1;
+
if (must_sched != 0)
rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 804b15e929dd..bf28ac13c3f5 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -179,9 +179,9 @@ int rxe_icrc_init(struct rxe_dev *rxe);
int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt);
void rxe_icrc_generate(struct sk_buff *skb, struct rxe_pkt_info *pkt);
-void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
+void rxe_resp_queue_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb);
-void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
+void rxe_comp_queue_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb);
static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 2f953cc74256..0d869615508a 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -181,9 +181,9 @@ static int hdr_check(struct rxe_pkt_info *pkt)
static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
if (pkt->mask & RXE_REQ_MASK)
- rxe_resp_queue_pkt(pkt->qp, skb);
+ rxe_resp_queue_pkt(pkt, skb);
else
- rxe_comp_queue_pkt(pkt->qp, skb);
+ rxe_comp_queue_pkt(pkt, skb);
}
static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 68f6cd188d8e..f915128ed32a 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -47,15 +47,19 @@ static char *resp_state_name[] = {
};
/* rxe_recv calls here to add a request packet to the input queue */
-void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
+void rxe_resp_queue_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
- int must_sched;
- struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ int must_sched = 1;
+ struct rxe_qp *qp = pkt->qp;
skb_queue_tail(&qp->req_pkts, skb);
- must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
- (skb_queue_len(&qp->req_pkts) > 1);
+ /* responder can sleep to access an ODP-enabled MR. Always schedule
+ * tasks for non-zero-byte operations, RDMA Read, and Atomic.
+ */
+ if ((skb_queue_len(&qp->req_pkts) == 1) && (payload_size(pkt) == 0)
+ && !(pkt->mask & RXE_READ_OR_ATOMIC_MASK))
+ must_sched = 0;
if (must_sched)
rxe_sched_task(&qp->resp.task);
--
2.39.1
Powered by blists - more mailing lists