[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201001171136.46830-4-jwi@linux.ibm.com>
Date: Thu, 1 Oct 2020 19:11:32 +0200
From: Julian Wiedmann <jwi@...ux.ibm.com>
To: David Miller <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>
Cc: netdev <netdev@...r.kernel.org>,
linux-s390 <linux-s390@...r.kernel.org>,
Heiko Carstens <hca@...ux.ibm.com>,
Ursula Braun <ubraun@...ux.ibm.com>,
Karsten Graul <kgraul@...ux.ibm.com>,
Julian Wiedmann <jwi@...ux.ibm.com>
Subject: [PATCH net-next 3/7] s390/qeth: allow configuration of TX queues for OSA devices
For OSA devices that are _not_ configured in prio-queue mode, give users
the option of selecting the number of active TX queues.
This requires setting up the HW queues with a reasonable default QoS
value in the QIB's PQUE parm area.
As with the other device types, we bring up the device with a minimal
number of TX queues for compatibility reasons.
Signed-off-by: Julian Wiedmann <jwi@...ux.ibm.com>
---
drivers/s390/net/qeth_core.h | 18 +++++++++++++++++
drivers/s390/net/qeth_core_main.c | 32 ++++++++++++++++++++-----------
drivers/s390/net/qeth_core_sys.c | 4 +++-
drivers/s390/net/qeth_ethtool.c | 8 ++++----
drivers/s390/net/qeth_l2_main.c | 5 +++--
drivers/s390/net/qeth_l3_main.c | 6 ++++--
6 files changed, 53 insertions(+), 20 deletions(-)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 1e1e7104dade..707a1634f621 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -278,6 +278,10 @@ struct qeth_hdr {
} hdr;
} __attribute__ ((packed));
+#define QETH_QIB_PQUE_ORDER_RR 0
+#define QETH_QIB_PQUE_UNITS_SBAL 2
+#define QETH_QIB_PQUE_PRIO_DEFAULT 4
+
struct qeth_qib_parms {
char pcit_magic[4];
u32 pcit_a;
@@ -287,6 +291,11 @@ struct qeth_qib_parms {
u32 blkt_total;
u32 blkt_inter_packet;
u32 blkt_inter_packet_jumbo;
+ char pque_magic[4];
+ u8 pque_order;
+ u8 pque_units;
+ u16 reserved;
+ u32 pque_priority[4];
};
/*TCP Segmentation Offload header*/
@@ -492,6 +501,7 @@ struct qeth_qdio_out_q {
struct qdio_outbuf_state *bufstates; /* convenience pointer */
struct qeth_out_q_stats stats;
spinlock_t lock;
+ unsigned int priority;
u8 next_buf_to_fill;
u8 max_elements;
u8 queue_no;
@@ -885,10 +895,18 @@ struct qeth_trap_id {
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+static inline bool qeth_uses_tx_prio_queueing(struct qeth_card *card)
+{
+ return card->qdio.do_prio_queueing != QETH_NO_PRIO_QUEUEING;
+}
+
static inline unsigned int qeth_tx_actual_queues(struct qeth_card *card)
{
struct qeth_priv *priv = netdev_priv(card->dev);
+ if (qeth_uses_tx_prio_queueing(card))
+ return min(card->dev->num_tx_queues, card->qdio.no_out_queues);
+
return min(priv->tx_wanted_queues, card->qdio.no_out_queues);
}
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 81f02a70680e..9e9c229e2780 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2683,6 +2683,7 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
+ queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
/* give outbound qeth_qdio_buffers their qdio_buffers */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
@@ -2746,6 +2747,9 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
static void qeth_fill_qib_parms(struct qeth_card *card,
struct qeth_qib_parms *parms)
{
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
+
parms->pcit_magic[0] = 'P';
parms->pcit_magic[1] = 'C';
parms->pcit_magic[2] = 'I';
@@ -2763,6 +2767,21 @@ static void qeth_fill_qib_parms(struct qeth_card *card,
parms->blkt_total = card->info.blkt.time_total;
parms->blkt_inter_packet = card->info.blkt.inter_packet;
parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
+
+ /* Prio-queueing implicitly uses the default priorities: */
+ if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
+ return;
+
+ parms->pque_magic[0] = 'P';
+ parms->pque_magic[1] = 'Q';
+ parms->pque_magic[2] = 'U';
+ parms->pque_magic[3] = 'E';
+ ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
+ parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
+ parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
+
+ qeth_for_each_output_queue(card, queue, i)
+ parms->pque_priority[i] = queue->priority;
}
static int qeth_qdio_activate(struct qeth_card *card)
@@ -5298,19 +5317,9 @@ static int qeth_set_online(struct qeth_card *card)
qeth_print_status_message(card);
- if (card->dev->reg_state != NETREG_REGISTERED) {
- struct qeth_priv *priv = netdev_priv(card->dev);
-
- if (IS_IQD(card))
- priv->tx_wanted_queues = QETH_IQD_MIN_TXQ;
- else if (IS_VM_NIC(card))
- priv->tx_wanted_queues = 1;
- else
- priv->tx_wanted_queues = card->dev->num_tx_queues;
-
+ if (card->dev->reg_state != NETREG_REGISTERED)
/* no need for locking / error handling at this early stage: */
qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
- }
rc = card->discipline->set_online(card, carrier_ok);
if (rc)
@@ -6236,6 +6245,7 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
priv = netdev_priv(dev);
priv->rx_copybreak = QETH_RX_COPYBREAK;
+ priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
dev->ml_priv = card;
dev->watchdog_timeo = QETH_TX_TIMEOUT;
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 74c70364edc1..7cc5649dfffe 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -164,9 +164,11 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
return sprintf(buf, "%s\n", "by skb-priority");
case QETH_PRIO_Q_ING_VLAN:
return sprintf(buf, "%s\n", "by VLAN headers");
- default:
+ case QETH_PRIO_Q_ING_FIXED:
return sprintf(buf, "always queue %i\n",
card->qdio.default_out_queue);
+ default:
+ return sprintf(buf, "disabled\n");
}
}
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index bc3ea0efb58b..b5caa723326e 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -220,6 +220,10 @@ static int qeth_set_channels(struct net_device *dev,
if (channels->tx_count > card->qdio.no_out_queues)
return -EINVAL;
+ /* Prio-queueing needs all TX queues: */
+ if (qeth_uses_tx_prio_queueing(card))
+ return -EPERM;
+
if (IS_IQD(card)) {
if (channels->tx_count < QETH_IQD_MIN_TXQ)
return -EINVAL;
@@ -230,10 +234,6 @@ static int qeth_set_channels(struct net_device *dev,
if (netif_running(dev) &&
channels->tx_count < dev->real_num_tx_queues)
return -EPERM;
- } else {
- /* OSA still uses the legacy prio-queue mechanism: */
- if (!IS_VM_NIC(card))
- return -EOPNOTSUPP;
}
rc = qeth_set_real_num_tx_queues(card, channels->tx_count);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 290389fc7e79..c0ceeddd1549 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -571,9 +571,10 @@ static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
return qeth_iqd_select_queue(dev, skb,
qeth_get_ether_cast_type(skb),
sb_dev);
+ if (qeth_uses_tx_prio_queueing(card))
+ return qeth_get_priority_queue(card, skb);
- return IS_VM_NIC(card) ? netdev_pick_tx(dev, skb, sb_dev) :
- qeth_get_priority_queue(card, skb);
+ return netdev_pick_tx(dev, skb, sb_dev);
}
static void qeth_l2_set_rx_mode(struct net_device *dev)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ea5f25857aff..803ccbcf3511 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1831,8 +1831,10 @@ static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct qeth_card *card = dev->ml_priv;
- return IS_VM_NIC(card) ? netdev_pick_tx(dev, skb, sb_dev) :
- qeth_get_priority_queue(card, skb);
+ if (qeth_uses_tx_prio_queueing(card))
+ return qeth_get_priority_queue(card, skb);
+
+ return netdev_pick_tx(dev, skb, sb_dev);
}
static const struct net_device_ops qeth_l3_netdev_ops = {
--
2.17.1
Powered by blists - more mailing lists