[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201001162246.18508-6-snelson@pensando.io>
Date: Thu, 1 Oct 2020 09:22:43 -0700
From: Shannon Nelson <snelson@...sando.io>
To: netdev@...r.kernel.org, davem@...emloft.net
Cc: Shannon Nelson <snelson@...sando.io>
Subject: [PATCH net-next 5/8] ionic: disable all queue napi contexts on timeout
Some time ago we short-circuited the queue disables on a timeout
error in order to not have to wait on every queue when we already
know it will time out. However, this meant that we're not
properly stopping all the interrupts and napi contexts. This
changes queue disable to always call ionic_qcq_disable() and to
give it an argument to know when to not do the adminq request.
Signed-off-by: Shannon Nelson <snelson@...sando.io>
---
.../net/ethernet/pensando/ionic/ionic_lif.c | 47 +++++++++----------
1 file changed, 21 insertions(+), 26 deletions(-)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index efffdfe18406..2b6cd60095b1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -264,10 +264,11 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
return ionic_adminq_post_wait(lif, &ctx);
}
-static int ionic_qcq_disable(struct ionic_qcq *qcq)
+static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
{
struct ionic_queue *q;
struct ionic_lif *lif;
+ int err = 0;
struct ionic_admin_ctx ctx = {
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
@@ -294,13 +295,17 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq)
napi_disable(&qcq->napi);
}
- ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
- ctx.cmd.q_control.type = q->type;
- ctx.cmd.q_control.index = cpu_to_le32(q->index);
- dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
- ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ if (send_to_hw) {
+ ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
+ ctx.cmd.q_control.type = q->type;
+ ctx.cmd.q_control.index = cpu_to_le32(q->index);
+ dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
+ ctx.cmd.q_control.index, ctx.cmd.q_control.type);
- return ionic_adminq_post_wait(lif, &ctx);
+ err = ionic_adminq_post_wait(lif, &ctx);
+ }
+
+ return err;
}
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
@@ -1627,22 +1632,16 @@ static void ionic_lif_rss_deinit(struct ionic_lif *lif)
static void ionic_txrx_disable(struct ionic_lif *lif)
{
unsigned int i;
- int err;
+ int err = 0;
if (lif->txqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- err = ionic_qcq_disable(lif->txqcqs[i]);
- if (err == -ETIMEDOUT)
- break;
- }
+ for (i = 0; i < lif->nxqs; i++)
+ err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
}
if (lif->rxqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- err = ionic_qcq_disable(lif->rxqcqs[i]);
- if (err == -ETIMEDOUT)
- break;
- }
+ for (i = 0; i < lif->nxqs; i++)
+ err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
}
}
@@ -1794,6 +1793,7 @@ static int ionic_txrx_init(struct ionic_lif *lif)
static int ionic_txrx_enable(struct ionic_lif *lif)
{
+ int derr = 0;
int i, err;
for (i = 0; i < lif->nxqs; i++) {
@@ -1810,8 +1810,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err = ionic_qcq_enable(lif->txqcqs[i]);
if (err) {
- if (err != -ETIMEDOUT)
- ionic_qcq_disable(lif->rxqcqs[i]);
+ derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
goto err_out;
}
}
@@ -1820,12 +1819,8 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err_out:
while (i--) {
- err = ionic_qcq_disable(lif->txqcqs[i]);
- if (err == -ETIMEDOUT)
- break;
- err = ionic_qcq_disable(lif->rxqcqs[i]);
- if (err == -ETIMEDOUT)
- break;
+ derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
}
return err;
--
2.17.1
Powered by blists - more mailing lists