[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210309165221.1735641-4-jwi@linux.ibm.com>
Date: Tue, 9 Mar 2021 17:52:20 +0100
From: Julian Wiedmann <jwi@...ux.ibm.com>
To: David Miller <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>
Cc: linux-netdev <netdev@...r.kernel.org>,
linux-s390 <linux-s390@...r.kernel.org>,
Heiko Carstens <hca@...ux.ibm.com>,
Karsten Graul <kgraul@...ux.ibm.com>,
Julian Wiedmann <jwi@...ux.ibm.com>
Subject: [PATCH net 3/4] s390/qeth: schedule TX NAPI on QAOB completion
When a QAOB notifies us that a pending TX buffer has been delivered, the
actual TX completion processing by qeth_tx_complete_pending_bufs()
is done within the context of a TX NAPI instance. We shouldn't rely on
this instance being scheduled by some other TX event, but just do it
ourselves.
qeth_qdio_handle_aob() is called from qeth_poll(), ie. our main NAPI
instance. To avoid touching the TX queue's NAPI instance
before/after it is (un-)registered, reorder the code in qeth_open()
and qeth_stop() accordingly.
Fixes: 0da9581ddb0f ("qeth: exploit asynchronous delivery of storage blocks")
Signed-off-by: Julian Wiedmann <jwi@...ux.ibm.com>
---
drivers/s390/net/qeth_core_main.c | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3763cd6d14f8..d0a56afec028 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -470,6 +470,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
struct qaob *aob;
struct qeth_qdio_out_buffer *buffer;
enum iucv_tx_notify notification;
+ struct qeth_qdio_out_q *queue;
unsigned int i;
aob = (struct qaob *) phys_to_virt(phys_aob_addr);
@@ -512,7 +513,9 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
buffer->is_header[i] = 0;
}
+ queue = buffer->q;
atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY);
+ napi_schedule(&queue->napi);
break;
default:
WARN_ON_ONCE(1);
@@ -7235,9 +7238,7 @@ int qeth_open(struct net_device *dev)
card->data.state = CH_STATE_UP;
netif_tx_start_all_queues(dev);
- napi_enable(&card->napi);
local_bh_disable();
- napi_schedule(&card->napi);
if (IS_IQD(card)) {
struct qeth_qdio_out_q *queue;
unsigned int i;
@@ -7249,8 +7250,12 @@ int qeth_open(struct net_device *dev)
napi_schedule(&queue->napi);
}
}
+
+ napi_enable(&card->napi);
+ napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
+
return 0;
}
EXPORT_SYMBOL_GPL(qeth_open);
@@ -7260,6 +7265,11 @@ int qeth_stop(struct net_device *dev)
struct qeth_card *card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "qethstop");
+
+ napi_disable(&card->napi);
+ cancel_delayed_work_sync(&card->buffer_reclaim_work);
+ qdio_stop_irq(CARD_DDEV(card));
+
if (IS_IQD(card)) {
struct qeth_qdio_out_q *queue;
unsigned int i;
@@ -7280,10 +7290,6 @@ int qeth_stop(struct net_device *dev)
netif_tx_disable(dev);
}
- napi_disable(&card->napi);
- cancel_delayed_work_sync(&card->buffer_reclaim_work);
- qdio_stop_irq(CARD_DDEV(card));
-
return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);
--
2.25.1
Powered by blists - more mailing lists