[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4937c9f8-fbc7-46d4-a14f-262a0244f1f0@gmail.com>
Date: Wed, 19 Mar 2025 15:47:20 -0700
From: James Smart <jsmart2021@...il.com>
To: Daniel Wagner <wagi@...nel.org>, James Smart <james.smart@...adcom.com>,
Christoph Hellwig <hch@....de>, Sagi Grimberg <sagi@...mberg.me>,
Chaitanya Kulkarni <kch@...dia.com>
Cc: Hannes Reinecke <hare@...e.de>, Keith Busch <kbusch@...nel.org>,
linux-nvme@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 10/18] nvmet-fcloop: allocate/free fcloop_lsreq
directly
On 3/18/2025 3:40 AM, Daniel Wagner wrote:
> fcloop depends on the host or the target to allocate the fcloop_lsreq
> object. This means that the lifetime of the fcloop_lsreq is tied to
> either the host or the target. Consequently, the host or the target must
> cooperate during shutdown.
>
> Unfortunately, this approach does not work well when the target forces a
> shutdown, as there are dependencies that are difficult to resolve in a
> clean way.
ok - although I'm guessing you'll trading one set of problems for another.
>
> The simplest solution is to decouple the lifetime of the fcloop_lsreq
> object by managing them directly within fcloop. Since this is not a
> performance-critical path and only a small number of LS objects are used
> during setup and cleanup, it does not significantly impact performance
> to allocate them during normal operation.
ok
>
> Signed-off-by: Daniel Wagner <wagi@...nel.org>
> ---
> drivers/nvme/target/fcloop.c | 53 +++++++++++++++++++++++++++++---------------
> 1 file changed, 35 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> index 06f42da6a0335c53ae319133119d057aab12e07e..537fc6533a4cf5d39855cf850b82af739eeb3056 100644
> --- a/drivers/nvme/target/fcloop.c
> +++ b/drivers/nvme/target/fcloop.c
> @@ -342,6 +342,7 @@ fcloop_rport_lsrqst_work(struct work_struct *work)
> * callee may free memory containing tls_req.
> * do not reference lsreq after this.
> */
> + kfree(tls_req);
>
> spin_lock(&rport->lock);
> }
> @@ -353,10 +354,13 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
> struct nvme_fc_remote_port *remoteport,
> struct nvmefc_ls_req *lsreq)
> {
> - struct fcloop_lsreq *tls_req = lsreq->private;
> struct fcloop_rport *rport = remoteport->private;
> + struct fcloop_lsreq *tls_req;
> int ret = 0;
>
> + tls_req = kmalloc(sizeof(*tls_req), GFP_KERNEL);
> + if (!tls_req)
> + return -ENOMEM;
> tls_req->lsreq = lsreq;
> INIT_LIST_HEAD(&tls_req->ls_list);
>
> @@ -387,19 +391,23 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
> struct nvme_fc_remote_port *remoteport = tport->remoteport;
> struct fcloop_rport *rport;
>
> +
> + if (!remoteport) {
> + kfree(tls_req);
> + return -ECONNREFUSED;
> + }
> +
don't do this - this is not a path the lldd would generate.
> memcpy(lsreq->rspaddr, lsrsp->rspbuf,
> ((lsreq->rsplen < lsrsp->rsplen) ?
> lsreq->rsplen : lsrsp->rsplen));
>
> lsrsp->done(lsrsp);
This done() call should always be made regardless of the remoteport
presence.
instead, put the check here
if (!remoteport) {
kfree(tls_req);
return 0;
}
>
> - if (remoteport) {
> - rport = remoteport->private;
> - spin_lock(&rport->lock);
> - list_add_tail(&tls_req->ls_list, &rport->ls_list);
> - spin_unlock(&rport->lock);
> - queue_work(nvmet_wq, &rport->ls_work);
> - }
> + rport = remoteport->private;
> + spin_lock(&rport->lock);
> + list_add_tail(&tls_req->ls_list, &rport->ls_list);
> + spin_unlock(&rport->lock);
> + queue_work(nvmet_wq, &rport->ls_work);
this is just an indentation style - whichever way works.
>
> return 0;
> }
> @@ -426,6 +434,7 @@ fcloop_tport_lsrqst_work(struct work_struct *work)
> * callee may free memory containing tls_req.
> * do not reference lsreq after this.
> */
> + kfree(tls_req);
>
> spin_lock(&tport->lock);
> }
> @@ -436,8 +445,8 @@ static int
> fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
> struct nvmefc_ls_req *lsreq)
> {
> - struct fcloop_lsreq *tls_req = lsreq->private;
> struct fcloop_tport *tport = targetport->private;
> + struct fcloop_lsreq *tls_req;
> int ret = 0;
>
> /*
> @@ -445,6 +454,10 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
> * hosthandle ignored as fcloop currently is
> * 1:1 tgtport vs remoteport
> */
> +
> + tls_req = kmalloc(sizeof(*tls_req), GFP_KERNEL);
> + if (!tls_req)
> + return -ENOMEM;
> tls_req->lsreq = lsreq;
> INIT_LIST_HEAD(&tls_req->ls_list);
>
> @@ -461,6 +474,9 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
> ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
> lsreq->rqstaddr, lsreq->rqstlen);
>
> + if (ret)
> + kfree(tls_req);
> +
> return ret;
> }
>
> @@ -475,18 +491,21 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
> struct nvmet_fc_target_port *targetport = rport->targetport;
> struct fcloop_tport *tport;
>
> + if (!targetport) {
> + kfree(tls_req);
> + return -ECONNREFUSED;
> + }
> +
same here - don't do this - this is not a path the lldd would generate.
> memcpy(lsreq->rspaddr, lsrsp->rspbuf,
> ((lsreq->rsplen < lsrsp->rsplen) ?
> lsreq->rsplen : lsrsp->rsplen));
> lsrsp->done(lsrsp);
>
Same for this done().
instead, put the check here
if (!targetport) {
kfree(tls_req);
return 0;
}
> - if (targetport) {
> - tport = targetport->private;
> - spin_lock(&tport->lock);
> - list_add_tail(&tport->ls_list, &tls_req->ls_list);
> - spin_unlock(&tport->lock);
> - queue_work(nvmet_wq, &tport->ls_work);
> - }
> + tport = targetport->private;
> + spin_lock(&tport->lock);
> + list_add_tail(&tport->ls_list, &tls_req->ls_list);
> + spin_unlock(&tport->lock);
> + queue_work(nvmet_wq, &tport->ls_work);
>
> return 0;
> }
> @@ -1129,7 +1148,6 @@ static struct nvme_fc_port_template fctemplate = {
> /* sizes of additional private data for data structures */
> .local_priv_sz = sizeof(struct fcloop_lport_priv),
> .remote_priv_sz = sizeof(struct fcloop_rport),
> - .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
> .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
> };
>
> @@ -1152,7 +1170,6 @@ static struct nvmet_fc_target_template tgttemplate = {
> .target_features = 0,
> /* sizes of additional private data for data structures */
> .target_priv_sz = sizeof(struct fcloop_tport),
> - .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
> };
>
> static ssize_t
>
-- james
Powered by blists - more mailing lists