[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240830162508.1009458-4-aleksander.lobakin@intel.com>
Date: Fri, 30 Aug 2024 18:25:02 +0200
From: Alexander Lobakin <aleksander.lobakin@...el.com>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>
Cc: Alexander Lobakin <aleksander.lobakin@...el.com>,
Lorenzo Bianconi <lorenzo@...nel.org>,
Daniel Xu <dxu@...uu.xyz>,
John Fastabend <john.fastabend@...il.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
Martin KaFai Lau <martin.lau@...ux.dev>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
bpf@...r.kernel.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH bpf-next 3/9] net: napi: add ability to create CPU-pinned threaded NAPI
From: Lorenzo Bianconi <lorenzo@...nel.org>
Add netif_napi_add_percpu() to pin NAPI in threaded mode to a particular
CPU. This means, if the NAPI is not threaded, it will be run as usually,
but when switching to threaded mode, it will always be run on the
specified CPU.
It's not meant to be used in drivers, but might be useful when creating
percpu threaded NAPIs, for example, to replace percpu kthreads or
workers where a NAPI context is needed.
The already existing netif_napi_add*() are not anyhow affected.
Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@...el.com>
---
include/linux/netdevice.h | 35 +++++++++++++++++++++++++++++++++--
net/core/dev.c | 18 +++++++++++++-----
2 files changed, 46 insertions(+), 7 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ca5f0dda733b..4d6fb0ccdea1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -377,6 +377,7 @@ struct napi_struct {
struct list_head dev_list;
struct hlist_node napi_hash_node;
int irq;
+ int thread_cpuid;
};
enum {
@@ -2619,8 +2620,18 @@ static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
*/
#define NAPI_POLL_WEIGHT 64
-void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight);
+void netif_napi_add_weight_percpu(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight, int thread_cpuid);
+
+static inline void netif_napi_add_weight(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ netif_napi_add_weight_percpu(dev, napi, poll, weight, -1);
+}
/**
* netif_napi_add() - initialize a NAPI context
@@ -2665,6 +2676,26 @@ static inline void netif_napi_add_tx(struct net_device *dev,
netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
+/**
+ * netif_napi_add_percpu() - initialize a CPU-pinned threaded NAPI context
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
+ * @thread_cpuid: CPU which this NAPI will be pinned to
+ *
+ * Variant of netif_napi_add() which pins the NAPI to the specified CPU. No
+ * changes in the "standard" mode, but in case with the threaded one, this
+ * NAPI will always be run on the passed CPU no matter where scheduled.
+ */
+static inline void netif_napi_add_percpu(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int thread_cpuid)
+{
+ netif_napi_add_weight_percpu(dev, napi, poll, NAPI_POLL_WEIGHT,
+ thread_cpuid);
+}
+
/**
* __netif_napi_del - remove a NAPI context
* @napi: NAPI context
diff --git a/net/core/dev.c b/net/core/dev.c
index 98bb5f890b88..93ca3df8e9dd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1428,8 +1428,13 @@ static int napi_kthread_create(struct napi_struct *n)
* TASK_INTERRUPTIBLE mode to avoid the blocked task
* warning and work with loadavg.
*/
- n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
- n->dev->name, n->napi_id);
+ if (n->thread_cpuid >= 0)
+ n->thread = kthread_run_on_cpu(napi_threaded_poll, n,
+ n->thread_cpuid, "napi/%s-%u",
+ n->dev->name);
+ else
+ n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
+ n->dev->name, n->napi_id);
if (IS_ERR(n->thread)) {
err = PTR_ERR(n->thread);
pr_err("kthread_run failed with err %d\n", err);
@@ -6640,8 +6645,10 @@ void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
}
EXPORT_SYMBOL(netif_queue_set_napi);
-void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight)
+void netif_napi_add_weight_percpu(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight, int thread_cpuid)
{
if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
return;
@@ -6664,6 +6671,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
napi->poll_owner = -1;
#endif
napi->list_owner = -1;
+ napi->thread_cpuid = thread_cpuid;
set_bit(NAPI_STATE_SCHED, &napi->state);
set_bit(NAPI_STATE_NPSVC, &napi->state);
list_add_rcu(&napi->dev_list, &dev->napi_list);
@@ -6677,7 +6685,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
dev->threaded = false;
netif_napi_set_irq(napi, -1);
}
-EXPORT_SYMBOL(netif_napi_add_weight);
+EXPORT_SYMBOL(netif_napi_add_weight_percpu);
void napi_disable(struct napi_struct *n)
{
--
2.46.0
Powered by blists - more mailing lists