lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <171234778911.5075.12956603794662346879.stgit@anambiarhost.jf.intel.com>
Date: Fri, 05 Apr 2024 13:09:49 -0700
From: Amritha Nambiar <amritha.nambiar@...el.com>
To: netdev@...r.kernel.org, kuba@...nel.org, davem@...emloft.net
Cc: edumazet@...gle.com, pabeni@...hat.com, ast@...nel.org, sdf@...gle.com,
 lorenzo@...nel.org, tariqt@...dia.com, daniel@...earbox.net,
 anthony.l.nguyen@...el.com, lucien.xin@...il.com, hawk@...nel.org,
 sridhar.samudrala@...el.com, amritha.nambiar@...el.com
Subject: [net-next,RFC PATCH 4/5] ice: Handle unused vectors dynamically

When queues are moved between vectors, some vector[s] may get
unused. The unused vector[s] need to be freed. When queue[s]
gets assigned to previously unused and freed vector, this vector
will need to be requested and setup. Add the framework functions
for this.

Signed-off-by: Amritha Nambiar <amritha.nambiar@...el.com>
---
 drivers/net/ethernet/intel/ice/ice.h      |   12 +++
 drivers/net/ethernet/intel/ice/ice_lib.c  |  117 +++++++++++++++++++++++++++++
 drivers/net/ethernet/intel/ice/ice_lib.h  |    6 +
 drivers/net/ethernet/intel/ice/ice_main.c |   12 ---
 4 files changed, 136 insertions(+), 11 deletions(-)

diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index a2c91fa88e14..d7b67821dc21 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -1010,4 +1010,16 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
 
 extern const struct xdp_metadata_ops ice_xdp_md_ops;
 void ice_init_moderation(struct ice_q_vector *q_vector);
+void
+ice_irq_affinity_notify(struct irq_affinity_notify *notify,
+			const cpumask_t *mask);
+/**
+ * ice_irq_affinity_release - Callback for affinity notifier release
+ * @ref: internal core kernel usage
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * to inform the current notification subscriber that they will no longer
+ * receive notifications.
+ */
+static inline void ice_irq_affinity_release(struct kref __always_unused *ref) {}
 #endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 35389189af1b..419d9561bc2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -4248,3 +4248,120 @@ ice_q_vector_ena(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
 
 	return 0;
 }
+
+static void
+ice_qvec_release_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+	struct ice_hw *hw = &vsi->back->hw;
+	struct ice_rx_ring *rx_ring;
+	struct ice_tx_ring *tx_ring;
+
+	ice_write_intrl(q_vector, 0);
+
+	ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+		ice_write_itr(&q_vector->rx, 0);
+		wr32(hw, QINT_RQCTL(vsi->rxq_map[rx_ring->q_index]), 0);
+	}
+
+	ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+		ice_write_itr(&q_vector->tx, 0);
+		wr32(hw, QINT_TQCTL(vsi->txq_map[tx_ring->q_index]), 0);
+	}
+
+	/* Disable the interrupt by writing to the register */
+	wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
+	ice_flush(hw);
+}
+
+/**
+ * ice_qvec_free - Free the MSI_X vector
+ * @vsi: the VSI that contains queue vector
+ * @q_vector: queue vector
+ */
+static void __maybe_unused
+ice_qvec_free(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+	int irq_num = q_vector->irq.virq;
+	struct ice_pf *pf = vsi->back;
+
+	ice_qvec_release_msix(vsi, q_vector);
+
+#ifdef CONFIG_RFS_ACCEL
+	struct net_device *netdev = vsi->netdev;
+
+	if (netdev && netdev->rx_cpu_rmap)
+		irq_cpu_rmap_remove(netdev->rx_cpu_rmap, irq_num);
+#endif
+
+	/* clear the affinity notifier in the IRQ descriptor */
+	if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+		irq_set_affinity_notifier(irq_num, NULL);
+
+	/* clear the affinity_mask in the IRQ descriptor */
+	irq_set_affinity_hint(irq_num, NULL);
+
+	synchronize_irq(irq_num);
+	devm_free_irq(ice_pf_to_dev(pf), irq_num, q_vector);
+}
+
+/**
+ * ice_qvec_prep - Request and prepare a new MSI_X vector
+ * @vsi: the VSI that contains queue vector
+ * @q_vector: queue vector
+ */
+static int __maybe_unused
+ice_qvec_prep(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+	struct ice_pf *pf = vsi->back;
+	struct device *dev;
+	int err, irq_num;
+
+	dev = ice_pf_to_dev(pf);
+	irq_num = q_vector->irq.virq;
+
+	err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
+			       q_vector->name, q_vector);
+	if (err) {
+		netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
+			   err);
+		goto free_q_irqs;
+	}
+
+	/* register for affinity change notifications */
+	if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
+		struct irq_affinity_notify *affinity_notify;
+
+		affinity_notify = &q_vector->affinity_notify;
+		affinity_notify->notify = ice_irq_affinity_notify;
+		affinity_notify->release = ice_irq_affinity_release;
+		irq_set_affinity_notifier(irq_num, affinity_notify);
+	}
+
+	/* assign the mask for this irq */
+	irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+
+#ifdef CONFIG_RFS_ACCEL
+	struct net_device *netdev = vsi->netdev;
+
+	if (!netdev) {
+		err = -EINVAL;
+		goto free_q_irqs;
+	}
+
+	if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, irq_num)) {
+		err = -EINVAL;
+		netdev_err(vsi->netdev, "Failed to setup CPU RMAP on irq %u: %pe\n",
+			   irq_num, ERR_PTR(err));
+		goto free_q_irqs;
+	}
+#endif
+	return 0;
+
+free_q_irqs:
+	if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+		irq_set_affinity_notifier(irq_num, NULL);
+	irq_set_affinity_hint(irq_num, NULL);
+	devm_free_irq(dev, irq_num, &q_vector);
+
+	return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 00239c2efa92..66a9709ff612 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -164,4 +164,10 @@ void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector);
 void
 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
 		     bool enable);
+static inline bool
+ice_is_q_vector_unused(struct ice_q_vector *q_vector)
+{
+	return (!q_vector->num_ring_tx && !q_vector->num_ring_rx);
+}
+
 #endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index cd2f467fe3a0..0884b53a0b01 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2476,7 +2476,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
  * This is a callback function used by the irq_set_affinity_notifier function
  * so that we may register to receive changes to the irq affinity masks.
  */
-static void
+void
 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
 			const cpumask_t *mask)
 {
@@ -2486,16 +2486,6 @@ ice_irq_affinity_notify(struct irq_affinity_notify *notify,
 	cpumask_copy(&q_vector->affinity_mask, mask);
 }
 
-/**
- * ice_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- */
-static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
-
 /**
  * ice_vsi_ena_irq - Enable IRQ for the given VSI
  * @vsi: the VSI being configured


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ