lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260122005113.2476634-5-kuba@kernel.org>
Date: Wed, 21 Jan 2026 16:51:11 -0800
From: Jakub Kicinski <kuba@...nel.org>
To: davem@...emloft.net
Cc: netdev@...r.kernel.org,
	edumazet@...gle.com,
	pabeni@...hat.com,
	andrew+netdev@...n.ch,
	horms@...nel.org,
	michael.chan@...adcom.com,
	sdf@...ichev.me,
	almasrymina@...gle.com,
	dtatulea@...dia.com,
	asml.silence@...il.com,
	dw@...idwei.uk,
	daniel@...earbox.net,
	Jakub Kicinski <kuba@...nel.org>
Subject: [PATCH net-next 4/6] net: use netdev_queue_config() for mp restart

We should follow the prepare/commit approach for queue configuration.
The qcfg struct should be added to dev->cfg rather than directly to
queue objects so that we can clone and discard the pending config
easily.

Remove the qcfg in struct netdev_rx_queue, and switch remaining callers
to netdev_queue_config(). netdev_queue_config() will construct the qcfg
on the fly based on device defaults and state of the queue.

ndo_default_qcfg becomes optional because having the callback itself
does not have any meaningful semantics to us.

Signed-off-by: Jakub Kicinski <kuba@...nel.org>
---
 include/net/netdev_queues.h |  4 +++-
 net/core/dev.c              | 17 ---------------
 net/core/netdev_config.c    | 12 ++++++++---
 net/core/netdev_rx_queue.c  | 43 +++++++++++++++++++++----------------
 4 files changed, 37 insertions(+), 39 deletions(-)

diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index 725bf69ef86c..4b23c3697cc9 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -139,7 +139,9 @@ enum {
  * @ndo_queue_get_dma_dev: Get dma device for zero-copy operations to be used
  *			   for this queue. Return NULL on error.
  *
- * @ndo_default_qcfg:	Populate queue config struct with defaults. Optional.
+ * @ndo_default_qcfg:	(Optional) Populate queue config struct with defaults.
+ *			Queue config structs are passed to this helper before
+ *			the user-requested settings are applied.
  *
  * @supported_params:	Bitmask of supported parameters, see QCFG_*.
  *
diff --git a/net/core/dev.c b/net/core/dev.c
index 048ab4409a2c..2661b68f5be3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -11282,21 +11282,6 @@ static void netdev_free_phy_link_topology(struct net_device *dev)
 	}
 }
 
-static void init_rx_queue_cfgs(struct net_device *dev)
-{
-	const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
-	struct netdev_rx_queue *rxq;
-	int i;
-
-	if (!qops || !qops->ndo_default_qcfg)
-		return;
-
-	for (i = 0; i < dev->num_rx_queues; i++) {
-		rxq = __netif_get_rx_queue(dev, i);
-		qops->ndo_default_qcfg(dev, &rxq->qcfg);
-	}
-}
-
 /**
  * register_netdevice() - register a network device
  * @dev: device to register
@@ -11342,8 +11327,6 @@ int register_netdevice(struct net_device *dev)
 	if (!dev->name_node)
 		goto out;
 
-	init_rx_queue_cfgs(dev);
-
 	/* Init, if this function is available */
 	if (dev->netdev_ops->ndo_init) {
 		ret = dev->netdev_ops->ndo_init(dev);
diff --git a/net/core/netdev_config.c b/net/core/netdev_config.c
index 562087bd30c8..48f763446506 100644
--- a/net/core/netdev_config.c
+++ b/net/core/netdev_config.c
@@ -22,11 +22,17 @@
 void netdev_queue_config(struct net_device *dev, int rxq_idx,
 			 struct netdev_queue_config *qcfg)
 {
-	struct netdev_queue_config *stored;
+	struct pp_memory_provider_params *mpp;
 
 	memset(qcfg, 0, sizeof(*qcfg));
 
-	stored = &__netif_get_rx_queue(dev, rxq_idx)->qcfg;
-	qcfg->rx_page_size = stored->rx_page_size;
+	/* Get defaults from the driver, in case user config not set */
+	if (dev->queue_mgmt_ops->ndo_default_qcfg)
+		dev->queue_mgmt_ops->ndo_default_qcfg(dev, qcfg);
+
+	/* Apply MP overrides */
+	mpp = &__netif_get_rx_queue(dev, rxq_idx)->mp_params;
+	if (mpp->rx_page_size)
+		qcfg->rx_page_size = mpp->rx_page_size;
 }
 EXPORT_SYMBOL(netdev_queue_config);
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
index 03d7531abb3a..72374930699a 100644
--- a/net/core/netdev_rx_queue.c
+++ b/net/core/netdev_rx_queue.c
@@ -18,11 +18,13 @@ bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx)
 }
 EXPORT_SYMBOL(netif_rxq_has_unreadable_mp);
 
-int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
+static int netdev_rx_queue_reconfig(struct net_device *dev,
+				    unsigned int rxq_idx,
+				    struct netdev_queue_config *qcfg_old,
+				    struct netdev_queue_config *qcfg_new)
 {
 	struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
 	const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
-	struct netdev_queue_config qcfg;
 	void *new_mem, *old_mem;
 	int err;
 
@@ -30,18 +32,8 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
 	    !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
 		return -EOPNOTSUPP;
 
-	if (WARN_ON_ONCE(qops->supported_params && !qops->ndo_default_qcfg))
-		return -EINVAL;
-
 	netdev_assert_locked(dev);
 
-	memset(&qcfg, 0, sizeof(qcfg));
-	if (qops->ndo_default_qcfg)
-		qops->ndo_default_qcfg(dev, &qcfg);
-
-	if (rxq->mp_params.rx_page_size)
-		qcfg.rx_page_size = rxq->mp_params.rx_page_size;
-
 	new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
 	if (!new_mem)
 		return -ENOMEM;
@@ -52,7 +44,7 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
 		goto err_free_new_mem;
 	}
 
-	err = qops->ndo_queue_mem_alloc(dev, &qcfg, new_mem, rxq_idx);
+	err = qops->ndo_queue_mem_alloc(dev, qcfg_new, new_mem, rxq_idx);
 	if (err)
 		goto err_free_old_mem;
 
@@ -65,7 +57,7 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
 		if (err)
 			goto err_free_new_queue_mem;
 
-		err = qops->ndo_queue_start(dev, &qcfg, new_mem, rxq_idx);
+		err = qops->ndo_queue_start(dev, qcfg_new, new_mem, rxq_idx);
 		if (err)
 			goto err_start_queue;
 	} else {
@@ -77,7 +69,6 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
 	kvfree(old_mem);
 	kvfree(new_mem);
 
-	rxq->qcfg = qcfg;
 	return 0;
 
 err_start_queue:
@@ -88,7 +79,7 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
 	 * WARN if we fail to recover the old rx queue, and at least free
 	 * old_mem so we don't also leak that.
 	 */
-	if (qops->ndo_queue_start(dev, &rxq->qcfg, old_mem, rxq_idx)) {
+	if (qops->ndo_queue_start(dev, qcfg_old, old_mem, rxq_idx)) {
 		WARN(1,
 		     "Failed to restart old queue in error path. RX queue %d may be unhealthy.",
 		     rxq_idx);
@@ -106,6 +97,14 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
 
 	return err;
 }
+
+int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
+{
+	struct netdev_queue_config qcfg;
+
+	netdev_queue_config(dev, rxq_idx, &qcfg);
+	return netdev_rx_queue_reconfig(dev, rxq_idx, &qcfg, &qcfg);
+}
 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
 
 int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
@@ -113,6 +112,7 @@ int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
 		      struct netlink_ext_ack *extack)
 {
 	const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
+	struct netdev_queue_config qcfg[2];
 	struct netdev_rx_queue *rxq;
 	int ret;
 
@@ -154,8 +154,11 @@ int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
 	}
 #endif
 
+	netdev_queue_config(dev, rxq_idx, &qcfg[0]);
 	rxq->mp_params = *p;
-	ret = netdev_rx_queue_restart(dev, rxq_idx);
+	netdev_queue_config(dev, rxq_idx, &qcfg[1]);
+
+	ret = netdev_rx_queue_reconfig(dev, rxq_idx, &qcfg[0], &qcfg[1]);
 	if (ret)
 		memset(&rxq->mp_params, 0, sizeof(rxq->mp_params));
 
@@ -176,6 +179,7 @@ int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
 void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
 			const struct pp_memory_provider_params *old_p)
 {
+	struct netdev_queue_config qcfg[2];
 	struct netdev_rx_queue *rxq;
 	int err;
 
@@ -195,8 +199,11 @@ void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
 			 rxq->mp_params.mp_priv != old_p->mp_priv))
 		return;
 
+	netdev_queue_config(dev, ifq_idx, &qcfg[0]);
 	memset(&rxq->mp_params, 0, sizeof(rxq->mp_params));
-	err = netdev_rx_queue_restart(dev, ifq_idx);
+	netdev_queue_config(dev, ifq_idx, &qcfg[1]);
+
+	err = netdev_rx_queue_reconfig(dev, ifq_idx, &qcfg[0], &qcfg[1]);
 	WARN_ON(err && err != -ENETDOWN);
 }
 
-- 
2.52.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ