[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240516190437.3545310-5-costa.shul@redhat.com>
Date: Thu, 16 May 2024 22:04:34 +0300
From: Costa Shulyupin <costa.shul@...hat.com>
To: longman@...hat.com,
pauld@...hat.com,
juri.lelli@...hat.com,
prarit@...hat.com,
vschneid@...hat.com,
Anna-Maria Behnsen <anna-maria@...utronix.de>,
Frederic Weisbecker <frederic@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Zefan Li <lizefan.x@...edance.com>,
Tejun Heo <tj@...nel.org>,
Johannes Weiner <hannes@...xchg.org>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Petr Mladek <pmladek@...e.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Masahiro Yamada <masahiroy@...nel.org>,
Randy Dunlap <rdunlap@...radead.org>,
Yoann Congal <yoann.congal@...le.fr>,
"Gustavo A. R. Silva" <gustavoars@...nel.org>,
Nhat Pham <nphamcs@...il.com>,
Costa Shulyupin <costa.shul@...hat.com>,
linux-kernel@...r.kernel.org,
cgroups@...r.kernel.org
Subject: [PATCH v1 4/7] sched/isolation: Adjust affinity of managed irqs according to change of housekeeping cpumask
irq_affinity_adjust() is prototyped from irq_affinity_online_cpu()
and irq_restore_affinity_of_irq().
Core test snippets without infrastructure:
1. Create managed IRQ on specific cpu with:
static int test_set_affinity(struct irq_data *data,
const struct cpumask *m, bool f)
{
irq_data_update_effective_affinity(data, m);
return 0;
}
static int make_test_irq(void)
{
struct irq_affinity_desc a = { mask: *cpumask_of(test_cpu),
is_managed: true };
static struct irq_chip test_chip = { .irq_set_affinity = test_set_affinity };
int test_irq = __irq_alloc_descs(-1, 1, 1, 0, THIS_MODULE, &a);
irq_set_chip(test_irq, &test_chip);
irq_set_status_flags(test_irq, IRQ_MOVE_PCNTXT);
request_irq(test_irq, test_irq_cb, 0, "test_affinity", 0);
return test_irq;
}
2. Isolate specified CPU.
3. Assure that test_irq doesn't affine with test_cpu:
cat /proc/irq/$test_irq/smp_affinity_list
Signed-off-by: Costa Shulyupin <costa.shul@...hat.com>
---
kernel/cgroup/cpuset.c | 3 ++-
kernel/sched/isolation.c | 44 +++++++++++++++++++++++++++++++++++++++-
2 files changed, 45 insertions(+), 2 deletions(-)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 9d01e8e0a3ed9..2e59a2983eb2e 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -225,7 +225,8 @@ static struct list_head remote_children;
/*
* The set of housekeeping flags to be updated for CPU isolation
*/
-#define HOUSEKEEPING_FLAGS (BIT(HK_TYPE_TIMER) | BIT(HK_TYPE_RCU))
+#define HOUSEKEEPING_FLAGS (BIT(HK_TYPE_TIMER) | BIT(HK_TYPE_RCU) \
+ | BIT(HK_TYPE_MANAGED_IRQ))
/*
* Partition root states:
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
index 85a17d39d8bb0..b0503ed362fce 100644
--- a/kernel/sched/isolation.c
+++ b/kernel/sched/isolation.c
@@ -135,6 +135,43 @@ static void resettle_all_timers(cpumask_var_t enable_mask, cpumask_var_t disable
}
}
+static int irq_affinity_adjust(cpumask_var_t disable_mask)
+{
+ unsigned int irq;
+ cpumask_var_t mask;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ irq_lock_sparse();
+ for_each_active_irq(irq) {
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ raw_spin_lock_irq(&desc->lock);
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+
+ if (irqd_affinity_is_managed(data) && cpumask_weight_and(disable_mask,
+ irq_data_get_affinity_mask(data))) {
+
+ cpumask_and(mask, cpu_online_mask, irq_default_affinity);
+ cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ));
+ irq_set_affinity_locked(data, mask, true);
+ WARN_ON(cpumask_weight_and(irq_data_get_effective_affinity_mask(data),
+ disable_mask));
+ WARN_ON(!cpumask_subset(irq_data_get_effective_affinity_mask(data),
+ cpu_online_mask));
+ WARN_ON(!cpumask_subset(irq_data_get_effective_affinity_mask(data),
+ housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)));
+ }
+ raw_spin_unlock_irq(&desc->lock);
+ }
+ irq_unlock_sparse();
+
+ free_cpumask_var(mask);
+
+ return 0;
+}
+
/*
* housekeeping_update - change housekeeping.cpumasks[type] and propagate the
* change.
@@ -144,6 +181,8 @@ static void resettle_all_timers(cpumask_var_t enable_mask, cpumask_var_t disable
*/
static int housekeeping_update(enum hk_type type, cpumask_var_t update)
{
+ int err = 0;
+
struct {
struct cpumask changed;
struct cpumask enable;
@@ -171,11 +210,14 @@ static int housekeeping_update(enum hk_type type, cpumask_var_t update)
lockup_detector_reconfigure();
#endif
break;
+ case HK_TYPE_MANAGED_IRQ:
+ err = irq_affinity_adjust(&masks->disable);
+ break;
default:
}
kfree(masks);
- return 0;
+ return err;
}
static int __init housekeeping_setup(char *str, unsigned long flags)
--
2.45.0
Powered by blists - more mailing lists