[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <176833579866.510.10170669932125279922.tip-bot2@tip-bot2>
Date: Tue, 13 Jan 2026 20:23:18 -0000
From: "tip-bot2 for Imran Khan" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Imran Khan <imran.f.khan@...cle.com>, Thomas Gleixner <tglx@...nel.org>,
x86@...nel.org, linux-kernel@...r.kernel.org, maz@...nel.org
Subject: [tip: irq/core] genirq/cpuhotplug: Notify about affinity changes
breaking the affinity mask
The following commit has been merged into the irq/core branch of tip:
Commit-ID: dd9f6d30c64001ca4dde973ac04d8d155e856743
Gitweb: https://git.kernel.org/tip/dd9f6d30c64001ca4dde973ac04d8d155e856743
Author: Imran Khan <imran.f.khan@...cle.com>
AuthorDate: Tue, 13 Jan 2026 22:37:27 +08:00
Committer: Thomas Gleixner <tglx@...nel.org>
CommitterDate: Tue, 13 Jan 2026 21:18:16 +01:00
genirq/cpuhotplug: Notify about affinity changes breaking the affinity mask
During CPU offlining the interrupts affined to that CPU are moved to other
online CPUs, which might break the original affinity mask if the outgoing
CPU was the last online CPU in that mask. This change is not propagated to
irq_desc::affinity_notify(), which leaves users of the affinity notifier
mechanism with stale information.
Avoid this by scheduling affinity change notification work for interrupts
that were affined to the CPU being offlined, if the new target CPU is not
part of the original affinity mask.
Since irq_set_affinity_locked() uses the same logic to schedule affinity
change notification work, split out this logic into a dedicated function
and use that at both places.
[ tglx: Removed the EXPORT(), removed the !SMP stub, moved the prototype,
added a lockdep assert instead of a comment, fixed up coding style
and name space. Polished and clarified the change log ]
Signed-off-by: Imran Khan <imran.f.khan@...cle.com>
Signed-off-by: Thomas Gleixner <tglx@...nel.org>
Link: https://patch.msgid.link/20260113143727.1041265-1-imran.f.khan@oracle.com
---
kernel/irq/cpuhotplug.c | 6 ++++--
kernel/irq/internals.h | 2 +-
kernel/irq/manage.c | 26 ++++++++++++++++++--------
3 files changed, 23 insertions(+), 11 deletions(-)
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 755346e..cd5689e 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -177,9 +177,11 @@ void irq_migrate_all_off_this_cpu(void)
bool affinity_broken;
desc = irq_to_desc(irq);
- scoped_guard(raw_spinlock, &desc->lock)
+ scoped_guard(raw_spinlock, &desc->lock) {
affinity_broken = migrate_one_irq(desc);
-
+ if (affinity_broken && desc->affinity_notify)
+ irq_affinity_schedule_notify_work(desc);
+ }
if (affinity_broken) {
pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
irq, smp_processor_id());
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 202c50f..9412e57 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -135,6 +135,7 @@ extern bool irq_can_set_affinity_usr(unsigned int irq);
extern int irq_do_set_affinity(struct irq_data *data,
const struct cpumask *dest, bool force);
+extern void irq_affinity_schedule_notify_work(struct irq_desc *desc);
#ifdef CONFIG_SMP
extern int irq_setup_affinity(struct irq_desc *desc);
@@ -142,7 +143,6 @@ extern int irq_setup_affinity(struct irq_desc *desc);
static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; }
#endif
-
#define for_each_action_of_desc(desc, act) \
for (act = desc->action; act; act = act->next)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index dde1aa6..9927e08 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -347,6 +347,21 @@ static bool irq_set_affinity_deactivated(struct irq_data *data,
return true;
}
+/**
+ * irq_affinity_schedule_notify_work - Schedule work to notify about affinity change
+ * @desc: Interrupt descriptor whose affinity changed
+ */
+void irq_affinity_schedule_notify_work(struct irq_desc *desc)
+{
+ lockdep_assert_held(&desc->lock);
+
+ kref_get(&desc->affinity_notify->kref);
+ if (!schedule_work(&desc->affinity_notify->work)) {
+ /* Work was already scheduled, drop our extra ref */
+ kref_put(&desc->affinity_notify->kref, desc->affinity_notify->release);
+ }
+}
+
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
bool force)
{
@@ -367,14 +382,9 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
irq_copy_pending(desc, mask);
}
- if (desc->affinity_notify) {
- kref_get(&desc->affinity_notify->kref);
- if (!schedule_work(&desc->affinity_notify->work)) {
- /* Work was already scheduled, drop our extra ref */
- kref_put(&desc->affinity_notify->kref,
- desc->affinity_notify->release);
- }
- }
+ if (desc->affinity_notify)
+ irq_affinity_schedule_notify_work(desc);
+
irqd_set(data, IRQD_AFFINITY_SET);
return ret;
Powered by blists - more mailing lists