[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fd877a93-8630-4180-a591-5916e18cda72@suse.de>
Date: Fri, 28 Feb 2025 08:11:11 +0100
From: Hannes Reinecke <hare@...e.de>
To: Daniel Wagner <wagi@...nel.org>, James Smart <james.smart@...adcom.com>,
Christoph Hellwig <hch@....de>, Sagi Grimberg <sagi@...mberg.me>,
Chaitanya Kulkarni <kch@...dia.com>
Cc: Keith Busch <kbusch@...nel.org>, linux-nvme@...ts.infradead.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 03/11] nvmet-fcloop: refactor fcloop_nport_alloc
On 2/26/25 19:45, Daniel Wagner wrote:
> There are many different operations done under the spin lock. Since the
> lport and nport are ref counted it's possible to only use for the list
> insert and lookup the spin lock.
>
... it's possible to use the spin lock only for the list instert and lookup.
> This allows us to untangle the setup steps into a more linear form which
> reduces the complexity of the functions.
>
> Signed-off-by: Daniel Wagner <wagi@...nel.org>
> ---
> drivers/nvme/target/fcloop.c | 156 +++++++++++++++++++++++--------------------
> 1 file changed, 84 insertions(+), 72 deletions(-)
>
> diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> index ca46830d46ecbaae21f3ee3e69aa7d52905abcae..de1963c34bd88d0335f70de569565740fd395a0a 100644
> --- a/drivers/nvme/target/fcloop.c
> +++ b/drivers/nvme/target/fcloop.c
> @@ -1038,6 +1038,8 @@ fcloop_nport_free(struct kref *ref)
> list_del(&nport->nport_list);
> spin_unlock_irqrestore(&fcloop_lock, flags);
>
> + if (nport->lport)
> + fcloop_lport_put(nport->lport);
> kfree(nport);
> }
>
> @@ -1206,33 +1208,63 @@ __wait_localport_unreg(struct fcloop_lport *lport)
> return ret;
> }
>
> +static struct fcloop_lport *
> +fcloop_lport_lookup(u64 node_name, u64 port_name)
> +{
> + struct fcloop_lport *lp, *lport = NULL;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&fcloop_lock, flags);
> + list_for_each_entry(lp, &fcloop_lports, lport_list) {
> + if (lp->localport->node_name != node_name ||
> + lp->localport->port_name != port_name)
> + continue;
> +
> + if (fcloop_lport_get(lp))
> + lport = lp;
> +
> + break;
> + }
> + spin_unlock_irqrestore(&fcloop_lock, flags);
> +
> + return lport;
> +}
> +
> +static struct fcloop_nport *
> +fcloop_nport_lookup(u64 node_name, u64 port_name)
> +{
> + struct fcloop_nport *np, *nport = NULL;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&fcloop_lock, flags);
> + list_for_each_entry(np, &fcloop_nports, nport_list) {
> + if (np->node_name != node_name ||
> + np->port_name != port_name)
> + continue;
> +
> + if (fcloop_nport_get(np))
> + nport = np;
> +
> + break;
> + }
> + spin_unlock_irqrestore(&fcloop_lock, flags);
> +
> + return nport;
> +}
>
> static ssize_t
> fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
> const char *buf, size_t count)
> {
> - struct fcloop_lport *tlport, *lport = NULL;
> + struct fcloop_lport *lport;
> u64 nodename, portname;
> - unsigned long flags;
> int ret;
>
> ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
> if (ret)
> return ret;
>
> - spin_lock_irqsave(&fcloop_lock, flags);
> -
> - list_for_each_entry(tlport, &fcloop_lports, lport_list) {
> - if (tlport->localport->node_name == nodename &&
> - tlport->localport->port_name == portname) {
> - if (!fcloop_lport_get(tlport))
> - break;
> - lport = tlport;
> - break;
> - }
> - }
> - spin_unlock_irqrestore(&fcloop_lock, flags);
> -
> + lport = fcloop_lport_lookup(nodename, portname);
> if (!lport)
> return -ENOENT;
>
> @@ -1245,9 +1277,9 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
> static struct fcloop_nport *
> fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
> {
> - struct fcloop_nport *newnport, *nport = NULL;
> - struct fcloop_lport *tmplport, *lport = NULL;
> struct fcloop_ctrl_options *opts;
> + struct fcloop_nport *nport;
> + struct fcloop_lport *lport;
> unsigned long flags;
> u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
> int ret;
> @@ -1261,79 +1293,59 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
> goto out_free_opts;
>
> /* everything there ? */
> - if ((opts->mask & opts_mask) != opts_mask) {
> - ret = -EINVAL;
> + if ((opts->mask & opts_mask) != opts_mask)
> goto out_free_opts;
> - }
>
> - newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
> - if (!newnport)
> + lport = fcloop_lport_lookup(opts->wwnn, opts->wwpn);
> + if (lport) {
> + /* invalid configuration */
> + fcloop_lport_put(lport);
> goto out_free_opts;
> + }
>
> - INIT_LIST_HEAD(&newnport->nport_list);
> - newnport->node_name = opts->wwnn;
> - newnport->port_name = opts->wwpn;
> - if (opts->mask & NVMF_OPT_ROLES)
> - newnport->port_role = opts->roles;
> - if (opts->mask & NVMF_OPT_FCADDR)
> - newnport->port_id = opts->fcaddr;
> - kref_init(&newnport->ref);
> + nport = fcloop_nport_lookup(opts->wwnn, opts->wwpn);
> + if (nport && ((remoteport && nport->rport) ||
> + (!remoteport && nport->tport))) {
> + /* invalid configuration */
> + goto out_put_nport;
> + }
>
> - spin_lock_irqsave(&fcloop_lock, flags);
> + if (!nport) {
> + nport = kzalloc(sizeof(*nport), GFP_KERNEL);
> + if (!nport)
> + goto out_free_opts;
>
> - list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
> - if (tmplport->localport->node_name == opts->wwnn &&
> - tmplport->localport->port_name == opts->wwpn)
> - goto out_invalid_opts;
> + INIT_LIST_HEAD(&nport->nport_list);
> + nport->node_name = opts->wwnn;
> + nport->port_name = opts->wwpn;
> + kref_init(&nport->ref);
>
> - if (tmplport->localport->node_name == opts->lpwwnn &&
> - tmplport->localport->port_name == opts->lpwwpn)
> - lport = tmplport;
> + spin_lock_irqsave(&fcloop_lock, flags);
> + list_add_tail(&nport->nport_list, &fcloop_nports);
> + spin_unlock_irqrestore(&fcloop_lock, flags);
Don't you need to check here if an 'nport' with the same node_name and
port_name is already present?
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@...e.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
Powered by blists - more mailing lists