[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230928150954.1684-8-aaptel@nvidia.com>
Date: Thu, 28 Sep 2023 15:09:41 +0000
From: Aurelien Aptel <aaptel@...dia.com>
To: linux-nvme@...ts.infradead.org,
netdev@...r.kernel.org,
sagi@...mberg.me,
hch@....de,
kbusch@...nel.org,
axboe@...com,
chaitanyak@...dia.com,
davem@...emloft.net,
kuba@...nel.org
Cc: Yoray Zack <yorayz@...dia.com>,
aaptel@...dia.com,
aurelien.aptel@...il.com,
smalin@...dia.com,
malin1024@...il.com,
ogerlitz@...dia.com,
borisp@...dia.com,
galshalom@...dia.com,
mgurtovoy@...dia.com
Subject: [PATCH v16 07/20] nvme-tcp: RX DDGST offload
From: Yoray Zack <yorayz@...dia.com>
Enable rx side of DDGST offload when supported.
At the end of the capsule, check if all the skb bits are on, and if not
recalculate the DDGST in SW and check it.
Signed-off-by: Yoray Zack <yorayz@...dia.com>
Signed-off-by: Boris Pismenny <borisp@...dia.com>
Signed-off-by: Ben Ben-Ishay <benishay@...dia.com>
Signed-off-by: Or Gerlitz <ogerlitz@...dia.com>
Signed-off-by: Shai Malin <smalin@...dia.com>
Signed-off-by: Aurelien Aptel <aaptel@...dia.com>
Reviewed-by: Chaitanya Kulkarni <kch@...dia.com>
---
drivers/nvme/host/tcp.c | 84 ++++++++++++++++++++++++++++++++++++++---
1 file changed, 79 insertions(+), 5 deletions(-)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 655f73dacc09..49975e8e7cde 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -126,6 +126,7 @@ enum nvme_tcp_queue_flags {
NVME_TCP_Q_LIVE = 1,
NVME_TCP_Q_POLLING = 2,
NVME_TCP_Q_OFF_DDP = 3,
+ NVME_TCP_Q_OFF_DDGST_RX = 4,
};
enum nvme_tcp_recv_state {
@@ -163,6 +164,7 @@ struct nvme_tcp_queue {
* is pending (ULP_DDP_RESYNC_PENDING).
*/
atomic64_t resync_tcp_seq;
+ bool ddp_ddgst_valid;
#endif
/* send state */
@@ -342,6 +344,33 @@ static struct net_device *nvme_tcp_get_ddp_netdev_with_limits(
return netdev;
}
+static inline bool nvme_tcp_ddp_ddgst_ok(struct nvme_tcp_queue *queue)
+{
+ return queue->ddp_ddgst_valid;
+}
+
+static inline void nvme_tcp_ddp_ddgst_update(struct nvme_tcp_queue *queue,
+ struct sk_buff *skb)
+{
+ if (queue->ddp_ddgst_valid)
+ queue->ddp_ddgst_valid = skb_is_ulp_crc(skb);
+}
+
+static void nvme_tcp_ddp_ddgst_recalc(struct ahash_request *hash,
+ struct request *rq,
+ __le32 *ddgst)
+{
+ struct nvme_tcp_request *req;
+
+ if (!rq)
+ return;
+
+ req = blk_mq_rq_to_pdu(rq);
+ ahash_request_set_crypt(hash, req->ddp.sg_table.sgl, (u8 *)ddgst,
+ req->data_len);
+ crypto_ahash_digest(hash);
+}
+
static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags);
static void nvme_tcp_ddp_teardown_done(void *ddp_ctx);
static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = {
@@ -412,6 +441,8 @@ static void nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue,
static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
{
struct ulp_ddp_config config = {.type = ULP_DDP_NVME};
+ bool offload_ddgst_rx = test_bit(ULP_DDP_C_NVME_TCP_DDGST_RX_BIT,
+ queue->ctrl->ddp_netdev->ulp_ddp_caps.active);
int ret;
config.nvmeotcp.pfv = NVME_TCP_PFV_1_0;
@@ -431,6 +462,8 @@ static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
return ret;
set_bit(NVME_TCP_Q_OFF_DDP, &queue->flags);
+ if (queue->data_digest && offload_ddgst_rx)
+ set_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags);
return 0;
}
@@ -438,6 +471,7 @@ static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
static void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue)
{
clear_bit(NVME_TCP_Q_OFF_DDP, &queue->flags);
+ clear_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags);
ulp_ddp_sk_del(queue->ctrl->ddp_netdev, queue->sock->sk);
}
@@ -537,6 +571,20 @@ static void nvme_tcp_resync_response(struct nvme_tcp_queue *queue,
struct sk_buff *skb, unsigned int offset)
{}
+static inline bool nvme_tcp_ddp_ddgst_ok(struct nvme_tcp_queue *queue)
+{
+ return false;
+}
+
+static inline void nvme_tcp_ddp_ddgst_update(struct nvme_tcp_queue *queue,
+ struct sk_buff *skb)
+{}
+
+static void nvme_tcp_ddp_ddgst_recalc(struct ahash_request *hash,
+ struct request *rq,
+ __le32 *ddgst)
+{}
+
#endif
static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
@@ -797,6 +845,9 @@ static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
queue->pdu_offset = 0;
queue->data_remaining = -1;
queue->ddgst_remaining = 0;
+#ifdef CONFIG_ULP_DDP
+ queue->ddp_ddgst_valid = true;
+#endif
}
static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
@@ -1064,6 +1115,10 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ if (queue->data_digest &&
+ test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
+ nvme_tcp_ddp_ddgst_update(queue, skb);
+
while (true) {
int recv_len, ret;
@@ -1092,7 +1147,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
recv_len = min_t(size_t, recv_len,
iov_iter_count(&req->iter));
- if (queue->data_digest)
+ if (queue->data_digest &&
+ !test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
ret = skb_copy_and_hash_datagram_iter(skb, *offset,
&req->iter, recv_len, queue->rcv_hash);
else
@@ -1134,8 +1190,11 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
char *ddgst = (char *)&queue->recv_ddgst;
size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
+ struct request *rq;
int ret;
+ if (test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
+ nvme_tcp_ddp_ddgst_update(queue, skb);
ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
if (unlikely(ret))
return ret;
@@ -1146,9 +1205,25 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
if (queue->ddgst_remaining)
return 0;
+ rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
+
+ if (test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags)) {
+ /*
+ * If HW successfully offloaded the digest
+ * verification, we can skip it
+ */
+ if (nvme_tcp_ddp_ddgst_ok(queue))
+ goto out;
+ /*
+ * Otherwise we have to recalculate and verify the
+ * digest with the software-fallback
+ */
+ nvme_tcp_ddp_ddgst_recalc(queue->rcv_hash, rq,
+ &queue->exp_ddgst);
+ }
+
if (queue->recv_ddgst != queue->exp_ddgst) {
- struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
- pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
@@ -1159,9 +1234,8 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
le32_to_cpu(queue->exp_ddgst));
}
+out:
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
- struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
- pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
nvme_tcp_end_request(rq, le16_to_cpu(req->status));
--
2.34.1
Powered by blists - more mailing lists