lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5daf1d33-300b-6aad-3b28-6c4030d4c674@oracle.com>
Date:   Fri, 21 May 2021 13:36:28 -0500
From:   Himanshu Madhani <himanshu.madhani@...cle.com>
To:     Shai Malin <smalin@...vell.com>, netdev@...r.kernel.org,
        linux-nvme@...ts.infradead.org, davem@...emloft.net,
        kuba@...nel.org, sagi@...mberg.me, hch@....de, axboe@...com,
        kbusch@...nel.org
Cc:     aelior@...vell.com, mkalderon@...vell.com, okulkarni@...vell.com,
        pkushwaha@...vell.com, malin1024@...il.com
Subject: Re: [RFC PATCH v5 08/27] nvme-tcp-offload: Add Timeout and ASYNC
 Support



On 5/19/21 6:13 AM, Shai Malin wrote:
> In this patch, we present the nvme-tcp-offload timeout support
> nvme_tcp_ofld_timeout() and ASYNC support
> nvme_tcp_ofld_submit_async_event().
> 
> Acked-by: Igor Russkikh <irusskikh@...vell.com>
> Signed-off-by: Prabhakar Kushwaha <pkushwaha@...vell.com>
> Signed-off-by: Omkar Kulkarni <okulkarni@...vell.com>
> Signed-off-by: Michal Kalderon <mkalderon@...vell.com>
> Signed-off-by: Ariel Elior <aelior@...vell.com>
> Signed-off-by: Shai Malin <smalin@...vell.com>
> Reviewed-by: Hannes Reinecke <hare@...e.de>
> ---
>   drivers/nvme/host/tcp-offload.c | 85 ++++++++++++++++++++++++++++++++-
>   drivers/nvme/host/tcp-offload.h |  2 +
>   2 files changed, 86 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/nvme/host/tcp-offload.c b/drivers/nvme/host/tcp-offload.c
> index 276b8475ac85..01b4c43cdaa5 100644
> --- a/drivers/nvme/host/tcp-offload.c
> +++ b/drivers/nvme/host/tcp-offload.c
> @@ -133,6 +133,26 @@ void nvme_tcp_ofld_req_done(struct nvme_tcp_ofld_req *req,
>   		nvme_complete_rq(rq);
>   }
>   
> +/**
> + * nvme_tcp_ofld_async_req_done() - NVMeTCP Offload request done callback
> + * function for async request. Pointed to by nvme_tcp_ofld_req->done.
> + * Handles both NVME_TCP_F_DATA_SUCCESS flag and NVMe CQ.
> + * @req:	NVMeTCP offload request to complete.
> + * @result:     The nvme_result.
> + * @status:     The completion status.
> + *
> + * API function that allows the vendor specific offload driver to report request
> + * completions to the common offload layer.
> + */
> +void nvme_tcp_ofld_async_req_done(struct nvme_tcp_ofld_req *req,
> +				  union nvme_result *result, __le16 status)
> +{
> +	struct nvme_tcp_ofld_queue *queue = req->queue;
> +	struct nvme_tcp_ofld_ctrl *ctrl = queue->ctrl;
> +
> +	nvme_complete_async_event(&ctrl->nctrl, status, result);
> +}
> +
>   struct nvme_tcp_ofld_dev *
>   nvme_tcp_ofld_lookup_dev(struct nvme_tcp_ofld_ctrl *ctrl)
>   {
> @@ -733,7 +753,23 @@ void nvme_tcp_ofld_map_data(struct nvme_command *c, u32 data_len)
>   
>   static void nvme_tcp_ofld_submit_async_event(struct nvme_ctrl *arg)
>   {
> -	/* Placeholder - submit_async_event */
> +	struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(arg);
> +	struct nvme_tcp_ofld_queue *queue = &ctrl->queues[0];
> +	struct nvme_tcp_ofld_dev *dev = queue->dev;
> +	struct nvme_tcp_ofld_ops *ops = dev->ops;
> +
> +	ctrl->async_req.nvme_cmd.common.opcode = nvme_admin_async_event;
> +	ctrl->async_req.nvme_cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
> +	ctrl->async_req.nvme_cmd.common.flags |= NVME_CMD_SGL_METABUF;
> +
> +	nvme_tcp_ofld_set_sg_null(&ctrl->async_req.nvme_cmd);
> +
> +	ctrl->async_req.async = true;
> +	ctrl->async_req.queue = queue;
> +	ctrl->async_req.last = true;
> +	ctrl->async_req.done = nvme_tcp_ofld_async_req_done;
> +
> +	ops->send_req(&ctrl->async_req);
>   }
>   
>   static void
> @@ -1039,6 +1075,51 @@ static int nvme_tcp_ofld_poll(struct blk_mq_hw_ctx *hctx)
>   	return ops->poll_queue(queue);
>   }
>   
> +static void nvme_tcp_ofld_complete_timed_out(struct request *rq)
> +{
> +	struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq);
> +	struct nvme_ctrl *nctrl = &req->queue->ctrl->nctrl;
> +
> +	nvme_tcp_ofld_stop_queue(nctrl, nvme_tcp_ofld_qid(req->queue));
> +	if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
> +		nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
> +		blk_mq_complete_request(rq);
> +	}
> +}
> +
> +static enum blk_eh_timer_return nvme_tcp_ofld_timeout(struct request *rq, bool reserved)
> +{
> +	struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq);
> +	struct nvme_tcp_ofld_ctrl *ctrl = req->queue->ctrl;
> +
> +	dev_warn(ctrl->nctrl.device,
> +		 "queue %d: timeout request %#x type %d\n",
> +		 nvme_tcp_ofld_qid(req->queue), rq->tag, req->nvme_cmd.common.opcode);
> +
> +	if (ctrl->nctrl.state != NVME_CTRL_LIVE) {
> +		/*
> +		 * If we are resetting, connecting or deleting we should
> +		 * complete immediately because we may block controller
> +		 * teardown or setup sequence
> +		 * - ctrl disable/shutdown fabrics requests
> +		 * - connect requests
> +		 * - initialization admin requests
> +		 * - I/O requests that entered after unquiescing and
> +		 *   the controller stopped responding
> +		 *
> +		 * All other requests should be cancelled by the error
> +		 * recovery work, so it's fine that we fail it here.
> +		 */
> +		nvme_tcp_ofld_complete_timed_out(rq);
> +
> +		return BLK_EH_DONE;
> +	}
> +
> +	nvme_tcp_ofld_error_recovery(&ctrl->nctrl);
> +
> +	return BLK_EH_RESET_TIMER;
> +}
> +
>   static struct blk_mq_ops nvme_tcp_ofld_mq_ops = {
>   	.queue_rq	= nvme_tcp_ofld_queue_rq,
>   	.commit_rqs     = nvme_tcp_ofld_commit_rqs,
> @@ -1046,6 +1127,7 @@ static struct blk_mq_ops nvme_tcp_ofld_mq_ops = {
>   	.init_request	= nvme_tcp_ofld_init_request,
>   	.exit_request	= nvme_tcp_ofld_exit_request,
>   	.init_hctx	= nvme_tcp_ofld_init_hctx,
> +	.timeout	= nvme_tcp_ofld_timeout,
>   	.map_queues	= nvme_tcp_ofld_map_queues,
>   	.poll		= nvme_tcp_ofld_poll,
>   };
> @@ -1056,6 +1138,7 @@ static struct blk_mq_ops nvme_tcp_ofld_admin_mq_ops = {
>   	.init_request	= nvme_tcp_ofld_init_request,
>   	.exit_request	= nvme_tcp_ofld_exit_request,
>   	.init_hctx	= nvme_tcp_ofld_init_admin_hctx,
> +	.timeout	= nvme_tcp_ofld_timeout,
>   };
>   
>   static const struct nvme_ctrl_ops nvme_tcp_ofld_ctrl_ops = {
> diff --git a/drivers/nvme/host/tcp-offload.h b/drivers/nvme/host/tcp-offload.h
> index 2233d855aa10..f897b811c399 100644
> --- a/drivers/nvme/host/tcp-offload.h
> +++ b/drivers/nvme/host/tcp-offload.h
> @@ -117,6 +117,8 @@ struct nvme_tcp_ofld_ctrl {
>   	/* Connectivity params */
>   	struct nvme_tcp_ofld_ctrl_con_params conn_params;
>   
> +	struct nvme_tcp_ofld_req async_req;
> +
>   	/* Vendor specific driver context */
>   	void *private_data;
>   };
> 

Reviewed-by: Himanshu Madhani <himanshu.madhani@...cle.com>

-- 
Himanshu Madhani                                Oracle Linux Engineering

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ