[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201103102531.91710-4-kgraul@linux.ibm.com>
Date: Tue, 3 Nov 2020 11:25:19 +0100
From: Karsten Graul <kgraul@...ux.ibm.com>
To: davem@...emloft.net
Cc: netdev@...r.kernel.org, linux-s390@...r.kernel.org,
hca@...ux.ibm.com, raspl@...ux.ibm.com
Subject: [PATCH net-next v2 03/15] net/smc: Add connection counters for links
From: Guvenc Gulce <guvenc@...ux.ibm.com>
Add connection counters to the structure of the link.
Increase/decrease the counters as needed in the corresponding
routines.
Signed-off-by: Guvenc Gulce <guvenc@...ux.ibm.com>
Signed-off-by: Karsten Graul <kgraul@...ux.ibm.com>
---
net/smc/smc_core.c | 16 ++++++++++++++--
net/smc/smc_core.h | 1 +
2 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 2b19863f7171..6e2077161267 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -139,6 +139,7 @@ static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
}
if (!conn->lnk)
return SMC_CLC_DECL_NOACTLINK;
+ atomic_inc(&conn->lnk->conn_cnt);
return 0;
}
@@ -180,6 +181,8 @@ static void __smc_lgr_unregister_conn(struct smc_connection *conn)
struct smc_link_group *lgr = conn->lgr;
rb_erase(&conn->alert_node, &lgr->conns_all);
+ if (conn->lnk)
+ atomic_dec(&conn->lnk->conn_cnt);
lgr->conns_num--;
conn->alert_token_local = 0;
sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
@@ -314,6 +317,7 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
lnk->smcibdev = ini->ib_dev;
lnk->ibport = ini->ib_port;
lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
+ atomic_set(&lnk->conn_cnt, 0);
smc_llc_link_set_uid(lnk);
INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
if (!ini->ib_dev->initialized) {
@@ -526,6 +530,14 @@ static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
return rc;
}
+static inline void smc_switch_link_and_count(struct smc_connection *conn,
+ struct smc_link *to_lnk)
+{
+ atomic_dec(&conn->lnk->conn_cnt);
+ conn->lnk = to_lnk;
+ atomic_inc(&conn->lnk->conn_cnt);
+}
+
struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
struct smc_link *from_lnk, bool is_dev_err)
{
@@ -574,7 +586,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
smc->sk.sk_state == SMC_PEERABORTWAIT ||
smc->sk.sk_state == SMC_PROCESSABORT) {
spin_lock_bh(&conn->send_lock);
- conn->lnk = to_lnk;
+ smc_switch_link_and_count(conn, to_lnk);
spin_unlock_bh(&conn->send_lock);
continue;
}
@@ -588,7 +600,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
}
/* avoid race with smcr_tx_sndbuf_nonempty() */
spin_lock_bh(&conn->send_lock);
- conn->lnk = to_lnk;
+ smc_switch_link_and_count(conn, to_lnk);
rc = smc_switch_cursor(smc, pend, wr_buf);
spin_unlock_bh(&conn->send_lock);
sock_put(&smc->sk);
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 9aee54a6bcba..83a88a4635db 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -129,6 +129,7 @@ struct smc_link {
struct delayed_work llc_testlink_wrk; /* testlink worker */
struct completion llc_testlink_resp; /* wait for rx of testlink */
int llc_testlink_time; /* testlink interval */
+ atomic_t conn_cnt;
};
/* For now we just allow one parallel link per link group. The SMC protocol
--
2.17.1
Powered by blists - more mailing lists