[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1d5adbe9-dcab-5eae-fff3-631b91c2da94@grimberg.me>
Date: Wed, 9 Aug 2023 10:35:28 +0300
From: Sagi Grimberg <sagi@...mberg.me>
To: Aurelien Aptel <aaptel@...dia.com>, linux-nvme@...ts.infradead.org,
netdev@...r.kernel.org, hch@....de, kbusch@...nel.org, axboe@...com,
chaitanyak@...dia.com, davem@...emloft.net, kuba@...nel.org
Cc: Boris Pismenny <borisp@...dia.com>, aurelien.aptel@...il.com,
smalin@...dia.com, malin1024@...il.com, ogerlitz@...dia.com,
yorayz@...dia.com, galshalom@...dia.com, mgurtovoy@...dia.com
Subject: Re: [PATCH v12 08/26] nvme-tcp: Add DDP data-path
On 7/12/23 19:14, Aurelien Aptel wrote:
> From: Boris Pismenny <borisp@...dia.com>
>
> Introduce the NVMe-TCP DDP data-path offload.
> Using this interface, the NIC hardware will scatter TCP payload directly
> to the BIO pages according to the command_id in the PDU.
> To maintain the correctness of the network stack, the driver is expected
> to construct SKBs that point to the BIO pages.
>
> The data-path interface contains two routines: setup/teardown.
> The setup provides the mapping from command_id to the request buffers,
> while the teardown removes this mapping.
>
> For efficiency, we introduce an asynchronous nvme completion, which is
> split between NVMe-TCP and the NIC driver as follows:
> NVMe-TCP performs the specific completion, while NIC driver performs the
> generic mq_blk completion.
>
> Signed-off-by: Boris Pismenny <borisp@...dia.com>
> Signed-off-by: Ben Ben-Ishay <benishay@...dia.com>
> Signed-off-by: Or Gerlitz <ogerlitz@...dia.com>
> Signed-off-by: Yoray Zack <yorayz@...dia.com>
> Signed-off-by: Shai Malin <smalin@...dia.com>
> Signed-off-by: Aurelien Aptel <aaptel@...dia.com>
> Reviewed-by: Chaitanya Kulkarni <kch@...dia.com>
> ---
> drivers/nvme/host/tcp.c | 121 ++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 116 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
> index 7d3b0ac65c03..6057cd424a19 100644
> --- a/drivers/nvme/host/tcp.c
> +++ b/drivers/nvme/host/tcp.c
> @@ -116,6 +116,13 @@ struct nvme_tcp_request {
> size_t offset;
> size_t data_sent;
> enum nvme_tcp_send_state state;
> +
> +#ifdef CONFIG_ULP_DDP
> + bool offloaded;
> + struct ulp_ddp_io ddp;
> + __le16 ddp_status;
> + union nvme_result result;
> +#endif
> };
>
> enum nvme_tcp_queue_flags {
> @@ -354,11 +361,75 @@ static inline bool is_netdev_ulp_offload_active(struct net_device *netdev,
> return false;
> }
>
> +static int nvme_tcp_req_map_ddp_sg(struct nvme_tcp_request *req, struct request *rq)
Why do you pass both req and rq? You can derive each from the other.
> +{
> + int ret;
> +
> + req->ddp.sg_table.sgl = req->ddp.first_sgl;
> + ret = sg_alloc_table_chained(&req->ddp.sg_table,
> + blk_rq_nr_phys_segments(rq),
> + req->ddp.sg_table.sgl, SG_CHUNK_SIZE);
> + if (ret)
> + return -ENOMEM;
> + req->ddp.nents = blk_rq_map_sg(rq->q, rq, req->ddp.sg_table.sgl);
General question, I'm assuming that the hca knows how to deal with
a controller that sends c2hdata in parts?
> + return 0;
> +}
> +
> static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags);
> +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx);
> static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = {
> .resync_request = nvme_tcp_resync_request,
> + .ddp_teardown_done = nvme_tcp_ddp_teardown_done,
> };
>
> +static void nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue,
> + struct request *rq)
> +{
> + struct net_device *netdev = queue->ctrl->offloading_netdev;
> + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +
> + netdev->netdev_ops->ulp_ddp_ops->teardown(netdev, queue->sock->sk,
> + &req->ddp, rq);
> + sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE);
> +}
> +
> +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx)
> +{
> + struct request *rq = ddp_ctx;
> + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +
> + if (!nvme_try_complete_req(rq, req->ddp_status, req->result))
> + nvme_complete_rq(rq);
> +}
> +
> +static int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, u16 command_id,
> + struct request *rq)
I think you can use nvme_cid(rq) instead of passing the command_id.
> +{
> + struct net_device *netdev = queue->ctrl->offloading_netdev;
> + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> + int ret;
> +
> + if (rq_data_dir(rq) != READ ||
> + queue->ctrl->offload_io_threshold > blk_rq_payload_bytes(rq))
> + return 0;
> +
> + req->ddp.command_id = command_id;
> + ret = nvme_tcp_req_map_ddp_sg(req, rq);
Don't see why map_ddp_sg is not open-coded here, its the only call-site,
and its pretty much does exactly what its called.
> + if (ret)
> + return -ENOMEM;
> +
> + ret = netdev->netdev_ops->ulp_ddp_ops->setup(netdev, queue->sock->sk,
> + &req->ddp);
> + if (ret) {
> + sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE);
> + return ret;
> + }
> +
> + /* if successful, sg table is freed in nvme_tcp_teardown_ddp() */
> + req->offloaded = true;
> + return 0;
> +}
> +
> static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
> {
> struct net_device *netdev = queue->ctrl->offloading_netdev;
> @@ -491,6 +562,12 @@ static inline bool is_netdev_ulp_offload_active(struct net_device *netdev,
> return false;
> }
>
> +static int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, u16 command_id,
> + struct request *rq)
> +{
> + return 0;
> +}
> +
> static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
> {
> return -EOPNOTSUPP;
> @@ -778,6 +855,26 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
> queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
> }
>
> +static void nvme_tcp_complete_request(struct request *rq,
> + __le16 status,
> + union nvme_result result,
> + __u16 command_id)
> +{
> +#ifdef CONFIG_ULP_DDP
> + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +
> + if (req->offloaded) {
> + req->ddp_status = status;
> + req->result = result;
> + nvme_tcp_teardown_ddp(req->queue, rq);
> + return;
> + }
> +#endif
> +
> + if (!nvme_try_complete_req(rq, status, result))
> + nvme_complete_rq(rq);
> +}
> +
> static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
> struct nvme_completion *cqe)
> {
> @@ -797,10 +894,9 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
> if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
> req->status = cqe->status;
>
> - if (!nvme_try_complete_req(rq, req->status, cqe->result))
> - nvme_complete_rq(rq);
> + nvme_tcp_complete_request(rq, req->status, cqe->result,
> + cqe->command_id);
> queue->nr_cqe++;
> -
> return 0;
> }
>
> @@ -998,10 +1094,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
>
> static inline void nvme_tcp_end_request(struct request *rq, u16 status)
> {
> + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> + struct nvme_tcp_queue *queue = req->queue;
> + struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
> union nvme_result res = {};
>
> - if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
> - nvme_complete_rq(rq);
> + nvme_tcp_complete_request(rq, cpu_to_le16(status << 1), res,
> + pdu->command_id);
> }
>
> static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
> @@ -1308,6 +1407,15 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
> else
> msg.msg_flags |= MSG_EOR;
>
> + if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) {
> + ret = nvme_tcp_setup_ddp(queue, pdu->cmd.common.command_id,
> + blk_mq_rq_from_pdu(req));
> + WARN_ONCE(ret, "ddp setup failed (queue 0x%x, cid 0x%x, ret=%d)",
> + nvme_tcp_queue_id(queue),
> + pdu->cmd.common.command_id,
> + ret);
> + }
Any reason why this is done here when sending the command pdu and not
in setup time?
> +
> if (queue->hdr_digest && !req->offset)
> nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
>
> @@ -2739,6 +2847,9 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
> if (ret)
> return ret;
>
> +#ifdef CONFIG_ULP_DDP
> + req->offloaded = false;
> +#endif
> req->state = NVME_TCP_SEND_CMD_PDU;
> req->status = cpu_to_le16(NVME_SC_SUCCESS);
> req->offset = 0;
Powered by blists - more mailing lists