[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241206001209.213168-2-ahmed.zaki@intel.com>
Date: Thu, 5 Dec 2024 17:12:08 -0700
From: Ahmed Zaki <ahmed.zaki@...el.com>
To: netdev@...r.kernel.org
Cc: Ahmed Zaki <ahmed.zaki@...el.com>,
Jakub Kicinski <kuba@...nel.org>
Subject: [PATCH RFC net-next 1/2] net: napi: add CPU affinity to napi->config
A common task for most drivers is to remember the user's CPU affinity to
its IRQs. On each netdev reset, the driver must then re-assign the
user's setting to the IRQs.
Add CPU affinity mask to napi->config. To delegate the CPU affinity
management to the core, drivers must:
1 - add a persistent napi config: netif_napi_add_config()
2 - bind an IRQ to the napi instance: netif_napi_set_irq()
the core will then make sure to use re-assign affinity to the napi's
IRQ.
Suggested-by: Jakub Kicinski <kuba@...nel.org>
Signed-off-by: Ahmed Zaki <ahmed.zaki@...el.com>
---
include/linux/netdevice.h | 22 ++++++++++++++++++++++
net/core/dev.c | 7 ++++++-
2 files changed, 28 insertions(+), 1 deletion(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ecc686409161..8660de791a1a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -350,6 +350,7 @@ struct napi_config {
u64 gro_flush_timeout;
u64 irq_suspend_timeout;
u32 defer_hard_irqs;
+ cpumask_t affinity_mask;
unsigned int napi_id;
};
@@ -393,6 +394,7 @@ struct napi_struct {
int irq;
int index;
struct napi_config *config;
+ struct irq_affinity_notify affinity_notify;
};
enum {
@@ -2666,10 +2668,30 @@ static inline void *netdev_priv(const struct net_device *dev)
void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
enum netdev_queue_type type,
struct napi_struct *napi);
+static inline void
+netif_napi_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct napi_struct *napi =
+ container_of(notify, struct napi_struct, affinity_notify);
+
+ if (napi->config)
+ cpumask_copy(&napi->config->affinity_mask, mask);
+}
+
+static inline void
+netif_napi_affinity_release(struct kref __always_unused *ref) {}
static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
{
napi->irq = irq;
+
+ if (irq > 0 && napi->config) {
+ napi->affinity_notify.notify = netif_napi_affinity_notify;
+ napi->affinity_notify.release = netif_napi_affinity_release;
+ irq_set_affinity_notifier(irq, &napi->affinity_notify);
+ irq_set_affinity(irq, &napi->config->affinity_mask);
+ }
}
/* Default NAPI poll() weight
diff --git a/net/core/dev.c b/net/core/dev.c
index 13d00fc10f55..d58779d57994 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6843,6 +6843,8 @@ void __netif_napi_del(struct napi_struct *napi)
return;
if (napi->config) {
+ if (napi->irq > 0)
+ irq_set_affinity_notifier(napi->irq, NULL);
napi->index = -1;
napi->config = NULL;
}
@@ -11184,7 +11186,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
{
struct net_device *dev;
size_t napi_config_sz;
- unsigned int maxqs;
+ unsigned int maxqs, i;
BUG_ON(strlen(name) >= sizeof(dev->name));
@@ -11280,6 +11282,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT);
if (!dev->napi_config)
goto free_all;
+ for (i = 0; i < maxqs; i++)
+ cpumask_copy(&dev->napi_config[i].affinity_mask,
+ cpu_online_mask);
strscpy(dev->name, name);
dev->name_assign_type = name_assign_type;
--
2.47.0
Powered by blists - more mailing lists