[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230130173145.475943-4-vladimir.oltean@nxp.com>
Date: Mon, 30 Jan 2023 19:31:33 +0200
From: Vladimir Oltean <vladimir.oltean@....com>
To: netdev@...r.kernel.org
Cc: "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Claudiu Manoil <claudiu.manoil@....com>,
Vinicius Costa Gomes <vinicius.gomes@...el.com>,
Kurt Kanzenbach <kurt@...utronix.de>,
Jacob Keller <jacob.e.keller@...el.com>,
Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>,
Jiri Pirko <jiri@...nulli.us>
Subject: [PATCH v4 net-next 03/15] net: enetc: recalculate num_real_tx_queues when XDP program attaches
Since the blamed net-next commit, enetc_setup_xdp_prog() no longer goes
through enetc_open(), and therefore, the function which was supposed to
detect whether a BPF program exists (in order to crop some TX queues
from network stack usage), enetc_num_stack_tx_queues(), no longer gets
called.
We can move the netif_set_real_num_rx_queues() call to enetc_alloc_msix()
(probe time), since it is a runtime invariant. We can do the same thing
with netif_set_real_num_tx_queues(), and let enetc_reconfigure_xdp_cb()
explicitly recalculate and change the number of stack TX queues.
Fixes: c33bfaf91c4c ("net: enetc: set up XDP program under enetc_reconfigure()")
Signed-off-by: Vladimir Oltean <vladimir.oltean@....com>
---
v2->v4: none
v1->v2: patch is new
drivers/net/ethernet/freescale/enetc/enetc.c | 35 ++++++++++++--------
1 file changed, 21 insertions(+), 14 deletions(-)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 5d7eeb1b5a23..e18a6c834eb4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2454,7 +2454,6 @@ int enetc_open(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr_resource *tx_res, *rx_res;
- int num_stack_tx_queues;
bool extended;
int err;
@@ -2480,16 +2479,6 @@ int enetc_open(struct net_device *ndev)
goto err_alloc_rx;
}
- num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
-
- err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
- if (err)
- goto err_set_queues;
-
enetc_tx_onestep_tstamp_init(priv);
enetc_assign_tx_resources(priv, tx_res);
enetc_assign_rx_resources(priv, rx_res);
@@ -2498,8 +2487,6 @@ int enetc_open(struct net_device *ndev)
return 0;
-err_set_queues:
- enetc_free_rx_resources(rx_res, priv->num_rx_rings);
err_alloc_rx:
enetc_free_tx_resources(tx_res, priv->num_tx_rings);
err_alloc_tx:
@@ -2683,9 +2670,18 @@ EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio);
static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx)
{
struct bpf_prog *old_prog, *prog = ctx;
- int i;
+ int num_stack_tx_queues;
+ int err, i;
old_prog = xchg(&priv->xdp_prog, prog);
+
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+ err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
+ if (err) {
+ xchg(&priv->xdp_prog, old_prog);
+ return err;
+ }
+
if (old_prog)
bpf_prog_put(old_prog);
@@ -2906,6 +2902,7 @@ EXPORT_SYMBOL_GPL(enetc_ioctl);
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ int num_stack_tx_queues;
int first_xdp_tx_ring;
int i, n, err, nvec;
int v_tx_rings;
@@ -2982,6 +2979,16 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
}
}
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+
+ err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
+ if (err)
+ goto fail;
+
+ err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings);
+ if (err)
+ goto fail;
+
first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
--
2.34.1
Powered by blists - more mailing lists