[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <bc6ec871-de51-477a-b27f-4d516e5bc3e1@grimberg.me>
Date: Tue, 28 Nov 2023 12:42:51 +0200
From: Sagi Grimberg <sagi@...mberg.me>
To: Aurelien Aptel <aaptel@...dia.com>, linux-nvme@...ts.infradead.org,
netdev@...r.kernel.org, hch@....de, kbusch@...nel.org, axboe@...com,
chaitanyak@...dia.com, davem@...emloft.net, kuba@...nel.org
Cc: Yoray Zack <yorayz@...dia.com>, aurelien.aptel@...il.com,
smalin@...dia.com, malin1024@...il.com, ogerlitz@...dia.com,
borisp@...dia.com, galshalom@...dia.com, mgurtovoy@...dia.com
Subject: Re: [PATCH v20 07/20] nvme-tcp: RX DDGST offload
On 11/22/23 15:48, Aurelien Aptel wrote:
> From: Yoray Zack <yorayz@...dia.com>
>
> Enable rx side of DDGST offload when supported.
>
> At the end of the capsule, check if all the skb bits are on, and if not
> recalculate the DDGST in SW and check it.
>
> Signed-off-by: Yoray Zack <yorayz@...dia.com>
> Signed-off-by: Boris Pismenny <borisp@...dia.com>
> Signed-off-by: Ben Ben-Ishay <benishay@...dia.com>
> Signed-off-by: Or Gerlitz <ogerlitz@...dia.com>
> Signed-off-by: Shai Malin <smalin@...dia.com>
> Signed-off-by: Aurelien Aptel <aaptel@...dia.com>
> Reviewed-by: Chaitanya Kulkarni <kch@...dia.com>
> ---
> drivers/nvme/host/tcp.c | 84 ++++++++++++++++++++++++++++++++++++++---
> 1 file changed, 79 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
> index 680d909eb3fb..5537f04a62fd 100644
> --- a/drivers/nvme/host/tcp.c
> +++ b/drivers/nvme/host/tcp.c
> @@ -141,6 +141,7 @@ enum nvme_tcp_queue_flags {
> NVME_TCP_Q_LIVE = 1,
> NVME_TCP_Q_POLLING = 2,
> NVME_TCP_Q_OFF_DDP = 3,
> + NVME_TCP_Q_OFF_DDGST_RX = 4,
> };
>
> enum nvme_tcp_recv_state {
> @@ -178,6 +179,7 @@ struct nvme_tcp_queue {
> * is pending (ULP_DDP_RESYNC_PENDING).
> */
> atomic64_t resync_tcp_seq;
> + bool ddp_ddgst_valid;
> #endif
>
> /* send state */
> @@ -360,6 +362,33 @@ nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl)
> return netdev;
> }
>
> +static inline bool nvme_tcp_ddp_ddgst_ok(struct nvme_tcp_queue *queue)
> +{
> + return queue->ddp_ddgst_valid;
> +}
> +
> +static inline void nvme_tcp_ddp_ddgst_update(struct nvme_tcp_queue *queue,
> + struct sk_buff *skb)
> +{
> + if (queue->ddp_ddgst_valid)
> + queue->ddp_ddgst_valid = skb_is_ulp_crc(skb);
> +}
> +
> +static void nvme_tcp_ddp_ddgst_recalc(struct ahash_request *hash,
> + struct request *rq,
> + __le32 *ddgst)
> +{
> + struct nvme_tcp_request *req;
> +
> + if (!rq)
> + return;
How is this even possible? And what happens down the road if this is
indeed a null rq?
> +
> + req = blk_mq_rq_to_pdu(rq);
> + ahash_request_set_crypt(hash, req->ddp.sg_table.sgl, (u8 *)ddgst,
> + req->data_len);
> + crypto_ahash_digest(hash);
> +}
> +
> static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags);
> static void nvme_tcp_ddp_teardown_done(void *ddp_ctx);
> static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = {
> @@ -430,6 +459,8 @@ static void nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue,
> static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
> {
> struct ulp_ddp_config config = {.type = ULP_DDP_NVME};
> + bool offload_ddgst_rx = ulp_ddp_is_cap_active(queue->ctrl->ddp_netdev,
> + ULP_DDP_CAP_NVME_TCP_DDGST_RX);
Not sure a local variable is needed here.
Powered by blists - more mailing lists