[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200503123850.57261-11-kgraul@linux.ibm.com>
Date: Sun, 3 May 2020 14:38:49 +0200
From: Karsten Graul <kgraul@...ux.ibm.com>
To: davem@...emloft.net
Cc: netdev@...r.kernel.org, linux-s390@...r.kernel.org,
heiko.carstens@...ibm.com, raspl@...ux.ibm.com,
ubraun@...ux.ibm.com
Subject: [PATCH net-next v2 10/11] net/smc: delete link processing as SMC server
Add smc_llc_process_srv_delete_link() to process a DELETE_LINK request
as SMC server. When the request is to delete ALL links then terminate
the whole link group. If not, find the link to delete by its link_id,
send the DELETE_LINK request LLC message and wait for the response.
No matter if a response was received, clear the deleted link and update
the link group state.
Signed-off-by: Karsten Graul <kgraul@...ux.ibm.com>
Reviewed-by: Ursula Braun <ubraun@...ux.ibm.com>
---
net/smc/smc_llc.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 72 insertions(+)
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index cd57b4fb1842..ac065f6d60dc 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -1187,6 +1187,76 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
kfree(qentry);
}
+static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
+{
+ struct smc_llc_msg_del_link *del_llc;
+ struct smc_link *lnk, *lnk_del;
+ struct smc_llc_qentry *qentry;
+ int active_links;
+ int i;
+
+ mutex_lock(&lgr->llc_conf_mutex);
+ qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
+ lnk = qentry->link;
+ del_llc = &qentry->msg.delete_link;
+
+ if (qentry->msg.delete_link.hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
+ /* delete entire lgr */
+ smc_lgr_terminate_sched(lgr);
+ goto out;
+ }
+ /* delete single link */
+ lnk_del = NULL;
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+ if (lgr->lnk[i].link_id == del_llc->link_num) {
+ lnk_del = &lgr->lnk[i];
+ break;
+ }
+ }
+ if (!lnk_del)
+ goto out; /* asymmetric link already deleted */
+
+ if (smc_link_downing(&lnk_del->state)) {
+ /* tbd: call smc_switch_conns(lgr, lnk_del, false); */
+ smc_wr_tx_wait_no_pending_sends(lnk_del);
+ }
+ if (!list_empty(&lgr->list)) {
+ /* qentry is either a request from peer (send it back to
+ * initiate the DELETE_LINK processing), or a locally
+ * enqueued DELETE_LINK request (forward it)
+ */
+ if (!smc_llc_send_message(lnk, &qentry->msg)) {
+ struct smc_llc_msg_del_link *del_llc_resp;
+ struct smc_llc_qentry *qentry2;
+
+ qentry2 = smc_llc_wait(lgr, lnk, SMC_LLC_WAIT_TIME,
+ SMC_LLC_DELETE_LINK);
+ if (!qentry2) {
+ } else {
+ del_llc_resp = &qentry2->msg.delete_link;
+ smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
+ }
+ }
+ }
+ smcr_link_clear(lnk_del);
+
+ active_links = smc_llc_active_link_count(lgr);
+ if (active_links == 1) {
+ lgr->type = SMC_LGR_SINGLE;
+ } else if (!active_links) {
+ lgr->type = SMC_LGR_NONE;
+ smc_lgr_terminate_sched(lgr);
+ }
+
+ if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) {
+ /* trigger setup of asymm alt link */
+ /* tbd: call smc_llc_srv_add_link_local(lnk); */
+ }
+out:
+ mutex_unlock(&lgr->llc_conf_mutex);
+ kfree(qentry);
+}
+
static void smc_llc_delete_link_work(struct work_struct *work)
{
struct smc_link_group *lgr = container_of(work, struct smc_link_group,
@@ -1200,6 +1270,8 @@ static void smc_llc_delete_link_work(struct work_struct *work)
if (lgr->role == SMC_CLNT)
smc_llc_process_cli_delete_link(lgr);
+ else
+ smc_llc_process_srv_delete_link(lgr);
out:
smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
}
--
2.17.1
Powered by blists - more mailing lists