lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <169266031496.10199.9655642930215889099.stgit@anambiarhost.jf.intel.com>
Date: Mon, 21 Aug 2023 16:25:15 -0700
From: Amritha Nambiar <amritha.nambiar@...el.com>
To: netdev@...r.kernel.org, kuba@...nel.org, davem@...emloft.net
Cc: sridhar.samudrala@...el.com, amritha.nambiar@...el.com
Subject: [net-next PATCH v2 1/9] net: Introduce new fields for napi and
 queue associations

Add the napi pointer in netdev queue for tracking the napi
instance for each queue. This achieves the queue<->napi mapping.

Introduce new napi fields 'napi_rxq_list' and 'napi_txq_list'
for rx and tx queue set associated with the napi. Add functions
to associate the queue with the napi and handle their removal
as well. This lists the queue/queue-set on the corresponding
irq line for each napi instance.

Signed-off-by: Amritha Nambiar <amritha.nambiar@...el.com>
---
 include/linux/netdevice.h     |   16 +++++++++++++
 include/net/netdev_rx_queue.h |    3 ++
 net/core/dev.c                |   52 +++++++++++++++++++++++++++++++++++++++++
 3 files changed, 71 insertions(+)

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0896aaa91dd7..7645c0ba0995 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -346,6 +346,14 @@ struct gro_list {
  */
 #define GRO_HASH_BUCKETS	8
 
+/*
+ * napi queue container type
+ */
+enum q_type {
+	QUEUE_RX,
+	QUEUE_TX,
+};
+
 /*
  * Structure for NAPI scheduling similar to tasklet but with weighting
  */
@@ -380,6 +388,8 @@ struct napi_struct {
 	/* control-path-only fields follow */
 	struct list_head	dev_list;
 	struct hlist_node	napi_hash_node;
+	struct list_head	napi_rxq_list;
+	struct list_head	napi_txq_list;
 };
 
 enum {
@@ -655,6 +665,9 @@ struct netdev_queue {
 
 	unsigned long		state;
 
+	/* NAPI instance for the queue */
+	struct napi_struct      *napi;
+	struct list_head        q_list;
 #ifdef CONFIG_BQL
 	struct dql		dql;
 #endif
@@ -2609,6 +2622,9 @@ static inline void *netdev_priv(const struct net_device *dev)
  */
 #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))
 
+int netif_napi_add_queue(struct napi_struct *napi, unsigned int queue_index,
+			 enum q_type type);
+
 /* Default NAPI poll() weight
  * Device drivers are strongly advised to not use bigger value
  */
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index cdcafb30d437..66bda0dfe71c 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -21,6 +21,9 @@ struct netdev_rx_queue {
 #ifdef CONFIG_XDP_SOCKETS
 	struct xsk_buff_pool            *pool;
 #endif
+	struct list_head		q_list;
+	/* NAPI instance for the queue */
+	struct napi_struct		*napi;
 } ____cacheline_aligned_in_smp;
 
 /*
diff --git a/net/core/dev.c b/net/core/dev.c
index 17e6281e408c..ec4c469c9e1d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6391,6 +6391,42 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
 }
 EXPORT_SYMBOL(dev_set_threaded);
 
+/**
+ * netif_napi_add_queue - Associate queue with the napi
+ * @napi: NAPI context
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ *
+ * Add queue with its corresponding napi context
+ */
+int netif_napi_add_queue(struct napi_struct *napi, unsigned int queue_index,
+			 enum q_type type)
+{
+	struct net_device *dev = napi->dev;
+	struct netdev_rx_queue *rxq;
+	struct netdev_queue *txq;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (type) {
+	case QUEUE_RX:
+		rxq = __netif_get_rx_queue(dev, queue_index);
+		rxq->napi = napi;
+		list_add_rcu(&rxq->q_list, &napi->napi_rxq_list);
+		break;
+	case QUEUE_TX:
+		txq = netdev_get_tx_queue(dev, queue_index);
+		txq->napi = napi;
+		list_add_rcu(&txq->q_list, &napi->napi_txq_list);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(netif_napi_add_queue);
+
 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
 			   int (*poll)(struct napi_struct *, int), int weight)
 {
@@ -6426,6 +6462,9 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
 	 */
 	if (dev->threaded && napi_kthread_create(napi))
 		dev->threaded = 0;
+
+	INIT_LIST_HEAD(&napi->napi_rxq_list);
+	INIT_LIST_HEAD(&napi->napi_txq_list);
 }
 EXPORT_SYMBOL(netif_napi_add_weight);
 
@@ -6487,6 +6526,18 @@ static void flush_gro_hash(struct napi_struct *napi)
 	}
 }
 
+static void napi_del_queues(struct napi_struct *napi)
+{
+	struct netdev_rx_queue *rx_queue, *rxq;
+	struct netdev_queue *tx_queue, *txq;
+
+	list_for_each_entry_safe(rx_queue, rxq, &napi->napi_rxq_list, q_list)
+		list_del_rcu(&rx_queue->q_list);
+
+	list_for_each_entry_safe(tx_queue, txq, &napi->napi_txq_list, q_list)
+		list_del_rcu(&tx_queue->q_list);
+}
+
 /* Must be called in process context */
 void __netif_napi_del(struct napi_struct *napi)
 {
@@ -6504,6 +6555,7 @@ void __netif_napi_del(struct napi_struct *napi)
 		kthread_stop(napi->thread);
 		napi->thread = NULL;
 	}
+	napi_del_queues(napi);
 }
 EXPORT_SYMBOL(__netif_napi_del);
 


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ