[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241114014004.GE89669@linux.alibaba.com>
Date: Thu, 14 Nov 2024 09:40:04 +0800
From: Dust Li <dust.li@...ux.alibaba.com>
To: "D. Wythe" <alibuda@...ux.alibaba.com>, kgraul@...ux.ibm.com,
wenjia@...ux.ibm.com, jaka@...ux.ibm.com, wintera@...ux.ibm.com,
guwen@...ux.alibaba.com
Cc: kuba@...nel.org, davem@...emloft.net, netdev@...r.kernel.org,
linux-s390@...r.kernel.org, linux-rdma@...r.kernel.org,
tonylu@...ux.alibaba.com, pabeni@...hat.com, edumazet@...gle.com
Subject: Re: [PATCH net-next 1/3] net/smc: refactoring lgr pending lock
On 2024-11-13 15:14:03, D. Wythe wrote:
>This patch replaces the locking and unlocking of lgr pending with
>a unified inline function, and since the granularity of lgr pending
>lock is within the lifecycle of init_info, which make it possible
>to record the lock state on init_info, which provides a potential
>functionality for multiple unlocks without triggering exceptions, which
Since we already have lockdep, I am skeptical about the usefulness of
this feature.
>creates conditions to reduce the scope of locks in the future.
>
>Signed-off-by: D. Wythe <alibuda@...ux.alibaba.com>
>---
> net/smc/af_smc.c | 24 ++++++++++++------------
> net/smc/smc_core.h | 29 +++++++++++++++++++++++++++++
> 2 files changed, 41 insertions(+), 12 deletions(-)
>
>diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
>index 9d76e902fd77..19480d8affb0 100644
>--- a/net/smc/af_smc.c
>+++ b/net/smc/af_smc.c
>@@ -1251,10 +1251,10 @@ static int smc_connect_rdma(struct smc_sock *smc,
> if (reason_code)
> return reason_code;
>
>- mutex_lock(&smc_client_lgr_pending);
>+ smc_lgr_pending_lock(ini, &smc_client_lgr_pending);
> reason_code = smc_conn_create(smc, ini);
> if (reason_code) {
>- mutex_unlock(&smc_client_lgr_pending);
>+ smc_lgr_pending_unlock(ini);
> return reason_code;
> }
>
>@@ -1343,7 +1343,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
> if (reason_code)
> goto connect_abort;
> }
>- mutex_unlock(&smc_client_lgr_pending);
>+ smc_lgr_pending_unlock(ini);
>
> smc_copy_sock_settings_to_clc(smc);
> smc->connect_nonblock = 0;
>@@ -1353,7 +1353,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
> return 0;
> connect_abort:
> smc_conn_abort(smc, ini->first_contact_local);
>- mutex_unlock(&smc_client_lgr_pending);
>+ smc_lgr_pending_unlock(ini);
> smc->connect_nonblock = 0;
>
> return reason_code;
>@@ -1412,10 +1412,10 @@ static int smc_connect_ism(struct smc_sock *smc,
> ini->ism_peer_gid[ini->ism_selected].gid = ntohll(aclc->d0.gid);
>
> /* there is only one lgr role for SMC-D; use server lock */
>- mutex_lock(&smc_server_lgr_pending);
>+ smc_lgr_pending_lock(ini, &smc_server_lgr_pending);
> rc = smc_conn_create(smc, ini);
> if (rc) {
>- mutex_unlock(&smc_server_lgr_pending);
>+ smc_lgr_pending_unlock(ini);
> return rc;
> }
>
>@@ -1446,7 +1446,7 @@ static int smc_connect_ism(struct smc_sock *smc,
> aclc->hdr.version, eid, ini);
> if (rc)
> goto connect_abort;
>- mutex_unlock(&smc_server_lgr_pending);
>+ smc_lgr_pending_unlock(ini);
>
> smc_copy_sock_settings_to_clc(smc);
> smc->connect_nonblock = 0;
>@@ -1456,7 +1456,7 @@ static int smc_connect_ism(struct smc_sock *smc,
> return 0;
> connect_abort:
> smc_conn_abort(smc, ini->first_contact_local);
>- mutex_unlock(&smc_server_lgr_pending);
>+ smc_lgr_pending_unlock(ini);
> smc->connect_nonblock = 0;
>
> return rc;
>@@ -2478,7 +2478,7 @@ static void smc_listen_work(struct work_struct *work)
> if (rc)
> goto out_decl;
>
>- mutex_lock(&smc_server_lgr_pending);
>+ smc_lgr_pending_lock(ini, &smc_server_lgr_pending);
> smc_close_init(new_smc);
> smc_rx_init(new_smc);
> smc_tx_init(new_smc);
>@@ -2497,7 +2497,7 @@ static void smc_listen_work(struct work_struct *work)
>
> /* SMC-D does not need this lock any more */
> if (ini->is_smcd)
>- mutex_unlock(&smc_server_lgr_pending);
>+ smc_lgr_pending_unlock(ini);
>
> /* receive SMC Confirm CLC message */
> memset(buf, 0, sizeof(*buf));
>@@ -2528,7 +2528,7 @@ static void smc_listen_work(struct work_struct *work)
> ini->first_contact_local, ini);
> if (rc)
> goto out_unlock;
>- mutex_unlock(&smc_server_lgr_pending);
>+ smc_lgr_pending_unlock(ini);
> }
> smc_conn_save_peer_info(new_smc, cclc);
>
>@@ -2544,7 +2544,7 @@ static void smc_listen_work(struct work_struct *work)
> goto out_free;
>
> out_unlock:
>- mutex_unlock(&smc_server_lgr_pending);
>+ smc_lgr_pending_unlock(ini);
> out_decl:
> smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
> proposal_version);
>diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
>index 69b54ecd6503..5abe9438772c 100644
>--- a/net/smc/smc_core.h
>+++ b/net/smc/smc_core.h
>@@ -432,6 +432,8 @@ struct smc_init_info {
> u8 ism_offered_cnt; /* # of ISM devices offered */
> u8 ism_selected; /* index of selected ISM dev*/
> u8 smcd_version;
>+ /* mutex holding for conn create */
>+ struct mutex *mutex;
> };
>
> /* Find the connection associated with the given alert token in the link group.
>@@ -600,6 +602,33 @@ int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb);
> int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb);
> int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb);
>
>+static inline void smc_lgr_pending_lock(struct smc_init_info *ini, struct mutex *lock)
>+{
>+ if (unlikely(ini->mutex))
>+ pr_warn_once("smc: lgr pending deadlock dected.");
>+
>+ mutex_lock(lock);
>+ ini->mutex = lock;
>+}
>+
>+/* It will save the locking status of the ini, which provides a potential functionality
>+ * for multiple unlocks without triggering exceptions. This creates conditions
>+ * to reduce the scope of locks in the future.
>+ */
>+static inline void smc_lgr_pending_unlock(struct smc_init_info *ini)
>+{
>+ /* tempory */
>+ struct mutex *lock;
>+
>+ /* already unlock it, just return */
>+ if (!ini->mutex)
>+ return;
>+
>+ lock = ini->mutex;
>+ ini->mutex = NULL;
>+ mutex_unlock(lock);
>+}
>+
> static inline struct smc_link_group *smc_get_lgr(struct smc_link *link)
> {
> return link->lgr;
>--
>2.45.0
>
Powered by blists - more mailing lists