lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 4 May 2021 19:49:00 +0300
From:   Shai Malin <malin1024@...il.com>
To:     Hannes Reinecke <hare@...e.de>
Cc:     Shai Malin <smalin@...vell.com>, netdev@...r.kernel.org,
        linux-nvme@...ts.infradead.org, davem@...emloft.net,
        kuba@...nel.org, sagi@...mberg.me, hch@....de, axboe@...com,
        kbusch@...nel.org, Ariel Elior <aelior@...vell.com>,
        Michal Kalderon <mkalderon@...vell.com>, okulkarni@...vell.com,
        pkushwaha@...vell.com
Subject: Re: [RFC PATCH v4 15/27] nvme-tcp-offload: Add Timeout and ASYNC Support

On 5/1/21 7:45 PM, Hannes Reinecke wrote:
> On 4/29/21 9:09 PM, Shai Malin wrote:
> > In this patch, we present the nvme-tcp-offload timeout support
> > nvme_tcp_ofld_timeout() and ASYNC support
> > nvme_tcp_ofld_submit_async_event().
> >
> > Acked-by: Igor Russkikh <irusskikh@...vell.com>
> > Signed-off-by: Prabhakar Kushwaha <pkushwaha@...vell.com>
> > Signed-off-by: Omkar Kulkarni <okulkarni@...vell.com>
> > Signed-off-by: Michal Kalderon <mkalderon@...vell.com>
> > Signed-off-by: Ariel Elior <aelior@...vell.com>
> > Signed-off-by: Shai Malin <smalin@...vell.com>
> > ---
> >   drivers/nvme/host/tcp-offload.c | 85 ++++++++++++++++++++++++++++++++-
> >   drivers/nvme/host/tcp-offload.h |  2 +
> >   2 files changed, 86 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/nvme/host/tcp-offload.c b/drivers/nvme/host/tcp-offload.c
> > index 0cdf5a432208..1d62f921f109 100644
> > --- a/drivers/nvme/host/tcp-offload.c
> > +++ b/drivers/nvme/host/tcp-offload.c
> > @@ -133,6 +133,26 @@ void nvme_tcp_ofld_req_done(struct nvme_tcp_ofld_req *req,
> >               nvme_complete_rq(rq);
> >   }
> >
> > +/**
> > + * nvme_tcp_ofld_async_req_done() - NVMeTCP Offload request done callback
> > + * function for async request. Pointed to by nvme_tcp_ofld_req->done.
> > + * Handles both NVME_TCP_F_DATA_SUCCESS flag and NVMe CQ.
> > + * @req:     NVMeTCP offload request to complete.
> > + * @result:     The nvme_result.
> > + * @status:     The completion status.
> > + *
> > + * API function that allows the vendor specific offload driver to report request
> > + * completions to the common offload layer.
> > + */
> > +void nvme_tcp_ofld_async_req_done(struct nvme_tcp_ofld_req *req,
> > +                               union nvme_result *result, __le16 status)
> > +{
> > +     struct nvme_tcp_ofld_queue *queue = req->queue;
> > +     struct nvme_tcp_ofld_ctrl *ctrl = queue->ctrl;
> > +
> > +     nvme_complete_async_event(&ctrl->nctrl, status, result);
> > +}
> > +
> >   struct nvme_tcp_ofld_dev *
> >   nvme_tcp_ofld_lookup_dev(struct nvme_tcp_ofld_ctrl *ctrl)
> >   {
> > @@ -719,7 +739,23 @@ void nvme_tcp_ofld_map_data(struct nvme_command *c, u32 data_len)
> >
> >   static void nvme_tcp_ofld_submit_async_event(struct nvme_ctrl *arg)
> >   {
> > -     /* Placeholder - submit_async_event */
> > +     struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(arg);
> > +     struct nvme_tcp_ofld_queue *queue = &ctrl->queues[0];
> > +     struct nvme_tcp_ofld_dev *dev = queue->dev;
> > +     struct nvme_tcp_ofld_ops *ops = dev->ops;
> > +
> > +     ctrl->async_req.nvme_cmd.common.opcode = nvme_admin_async_event;
> > +     ctrl->async_req.nvme_cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
> > +     ctrl->async_req.nvme_cmd.common.flags |= NVME_CMD_SGL_METABUF;
> > +
> > +     nvme_tcp_ofld_set_sg_null(&ctrl->async_req.nvme_cmd);
> > +
> > +     ctrl->async_req.async = true;
> > +     ctrl->async_req.queue = queue;
> > +     ctrl->async_req.last = true;
> > +     ctrl->async_req.done = nvme_tcp_ofld_async_req_done;
> > +
> > +     ops->send_req(&ctrl->async_req);
> >   }
> >
> >   static void
> > @@ -1024,6 +1060,51 @@ static int nvme_tcp_ofld_poll(struct blk_mq_hw_ctx *hctx)
> >       return ops->poll_queue(queue);
> >   }
> >
> > +static void nvme_tcp_ofld_complete_timed_out(struct request *rq)
> > +{
> > +     struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq);
> > +     struct nvme_ctrl *nctrl = &req->queue->ctrl->nctrl;
> > +
> > +     nvme_tcp_ofld_stop_queue(nctrl, nvme_tcp_ofld_qid(req->queue));
> > +     if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
> > +             nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
> > +             blk_mq_complete_request(rq);
> > +     }
> > +}
> > +
> > +static enum blk_eh_timer_return nvme_tcp_ofld_timeout(struct request *rq, bool reserved)
> > +{
> > +     struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq);
> > +     struct nvme_tcp_ofld_ctrl *ctrl = req->queue->ctrl;
> > +
> > +     dev_warn(ctrl->nctrl.device,
> > +              "queue %d: timeout request %#x type %d\n",
> > +              nvme_tcp_ofld_qid(req->queue), rq->tag, req->nvme_cmd.common.opcode);
> > +
> > +     if (ctrl->nctrl.state != NVME_CTRL_LIVE) {
> > +             /*
> > +              * If we are resetting, connecting or deleting we should
> > +              * complete immediately because we may block controller
> > +              * teardown or setup sequence
> > +              * - ctrl disable/shutdown fabrics requests
> > +              * - connect requests
> > +              * - initialization admin requests
> > +              * - I/O requests that entered after unquiescing and
> > +              *   the controller stopped responding
> > +              *
> > +              * All other requests should be cancelled by the error
> > +              * recovery work, so it's fine that we fail it here.
> > +              */
> > +             nvme_tcp_ofld_complete_timed_out(rq);
> > +
> > +             return BLK_EH_DONE;
> > +     }
>
> And this particular error code has been causing _so_ _many_ issues
> during testing, that I'd rather get rid of it altogether.
> But probably not your fault, your just copying what tcp and rdma is doing.
>

I agree. We preferred to keep all the teardown/error flows similar to the tcp
and rdma design in order to be able to align the tcp-offload to any future
changes. Would you like us to do anything differently?

> > +
> > +     nvme_tcp_ofld_error_recovery(&ctrl->nctrl);
> > +
> > +     return BLK_EH_RESET_TIMER;
> > +}
> > +
> >   static struct blk_mq_ops nvme_tcp_ofld_mq_ops = {
> >       .queue_rq       = nvme_tcp_ofld_queue_rq,
> >       .commit_rqs     = nvme_tcp_ofld_commit_rqs,
> > @@ -1031,6 +1112,7 @@ static struct blk_mq_ops nvme_tcp_ofld_mq_ops = {
> >       .init_request   = nvme_tcp_ofld_init_request,
> >       .exit_request   = nvme_tcp_ofld_exit_request,
> >       .init_hctx      = nvme_tcp_ofld_init_hctx,
> > +     .timeout        = nvme_tcp_ofld_timeout,
> >       .map_queues     = nvme_tcp_ofld_map_queues,
> >       .poll           = nvme_tcp_ofld_poll,
> >   };
> > @@ -1041,6 +1123,7 @@ static struct blk_mq_ops nvme_tcp_ofld_admin_mq_ops = {
> >       .init_request   = nvme_tcp_ofld_init_request,
> >       .exit_request   = nvme_tcp_ofld_exit_request,
> >       .init_hctx      = nvme_tcp_ofld_init_admin_hctx,
> > +     .timeout        = nvme_tcp_ofld_timeout,
> >   };
> >
> >   static const struct nvme_ctrl_ops nvme_tcp_ofld_ctrl_ops = {
> > diff --git a/drivers/nvme/host/tcp-offload.h b/drivers/nvme/host/tcp-offload.h
> > index d82645fcf9da..275a7e2d9d8a 100644
> > --- a/drivers/nvme/host/tcp-offload.h
> > +++ b/drivers/nvme/host/tcp-offload.h
> > @@ -110,6 +110,8 @@ struct nvme_tcp_ofld_ctrl {
> >       /* Connectivity params */
> >       struct nvme_tcp_ofld_ctrl_con_params conn_params;
> >
> > +     struct nvme_tcp_ofld_req async_req;
> > +
> >       /* Vendor specific driver context */
> >       void *private_data;
> >   };
> >
> So:
>
> Reviewed-by: Hannes Reinecke <hare@...e.de>

Thanks.

>
> Cheers,
>
> Hannes
> --
> Dr. Hannes Reinecke                Kernel Storage Architect
> hare@...e.de                              +49 911 74053 688
> SUSE Software Solutions GmbH, Maxfeldstr. 5, 90409 Nürnberg
> HRB 36809 (AG Nürnberg), Geschäftsführer: Felix Imendörffer

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ