[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1708412505-34470-9-git-send-email-alibuda@linux.alibaba.com>
Date: Tue, 20 Feb 2024 15:01:33 +0800
From: "D. Wythe" <alibuda@...ux.alibaba.com>
To: kgraul@...ux.ibm.com,
wenjia@...ux.ibm.com,
jaka@...ux.ibm.com,
wintera@...ux.ibm.com,
guwen@...ux.alibaba.com
Cc: kuba@...nel.org,
davem@...emloft.net,
netdev@...r.kernel.org,
linux-s390@...r.kernel.org,
linux-rdma@...r.kernel.org,
tonylu@...ux.alibaba.com,
pabeni@...hat.com,
edumazet@...gle.com
Subject: [RFC net-next 08/20] net/smc: optimize mutex_fback_rsn from mutex to spinlock
From: "D. Wythe" <alibuda@...ux.alibaba.com>
The region protected by mutex_fback_rsn is simple enough and has no
potential blocking points. This change makes us can invoke
smc_stat_fallback() in any context, typically, in the context of
IRQ.
Signed-off-by: D. Wythe <alibuda@...ux.alibaba.com>
---
include/net/netns/smc.h | 2 +-
net/smc/af_smc.c | 4 ++--
net/smc/smc_stats.c | 6 +++---
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h
index fc752a5..99bde74 100644
--- a/include/net/netns/smc.h
+++ b/include/net/netns/smc.h
@@ -10,7 +10,7 @@ struct netns_smc {
/* per cpu counters for SMC */
struct smc_stats __percpu *smc_stats;
/* protect fback_rsn */
- struct mutex mutex_fback_rsn;
+ spinlock_t mutex_fback_rsn;
struct smc_stats_rsn *fback_rsn;
bool limit_smc_hs; /* constraint on handshake */
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 66306b7..1381ac1 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -769,7 +769,7 @@ static void smc_stat_fallback(struct smc_sock *smc)
{
struct net *net = sock_net(&smc->sk);
- mutex_lock(&net->smc.mutex_fback_rsn);
+ spin_lock_bh(&net->smc.mutex_fback_rsn);
if (smc->listen_smc) {
smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
net->smc.fback_rsn->srv_fback_cnt++;
@@ -777,7 +777,7 @@ static void smc_stat_fallback(struct smc_sock *smc)
smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
net->smc.fback_rsn->clnt_fback_cnt++;
}
- mutex_unlock(&net->smc.mutex_fback_rsn);
+ spin_unlock_bh(&net->smc.mutex_fback_rsn);
}
/* must be called under rcu read lock */
diff --git a/net/smc/smc_stats.c b/net/smc/smc_stats.c
index ca14c0f..64668e9 100644
--- a/net/smc/smc_stats.c
+++ b/net/smc/smc_stats.c
@@ -26,7 +26,7 @@ int smc_stats_init(struct net *net)
net->smc.smc_stats = alloc_percpu(struct smc_stats);
if (!net->smc.smc_stats)
goto err_stats;
- mutex_init(&net->smc.mutex_fback_rsn);
+ spin_lock_init(&net->smc.mutex_fback_rsn);
return 0;
err_stats:
@@ -387,7 +387,7 @@ int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
int snum = cb_ctx->pos[0];
bool is_srv = true;
- mutex_lock(&net->smc.mutex_fback_rsn);
+ spin_lock_bh(&net->smc.mutex_fback_rsn);
for (k = 0; k < SMC_MAX_FBACK_RSN_CNT; k++) {
if (k < snum)
continue;
@@ -406,7 +406,7 @@ int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
if (rc_clnt == -ENODATA && rc_srv == -ENODATA)
break;
}
- mutex_unlock(&net->smc.mutex_fback_rsn);
+ spin_unlock_bh(&net->smc.mutex_fback_rsn);
cb_ctx->pos[1] = skip_serv;
cb_ctx->pos[0] = k;
return skb->len;
--
1.8.3.1
Powered by blists - more mailing lists