[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230530214550.864894-5-rrendec@redhat.com>
Date: Tue, 30 May 2023 17:45:49 -0400
From: Radu Rendec <rrendec@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: Marc Zyngier <maz@...nel.org>, Thomas Gleixner <tglx@...utronix.de>
Subject: [RFC PATCH 4/5] irq: Move SMP affinity write handler out of proc.c
This patch prepares the ground for setting the SMP affinity from sysfs.
The bulk of the code is identical for procfs and sysfs, except for the
cpumask parsing functions, where procfs requires the _user variants.
Summary of changes:
- irq_select_affinity_usr() and write_irq_affinity() are moved from
from proc.c to irqdesc.c
- write_irq_affinity() is slightly modified to allow using the other
variant of cpumask parsing functions
- the definition of no_irq_affinity is moved from proc.c to manage.c
and available only when CONFIG_SMP is enabled
- the declaration of no_irq_affinity is available only when CONFIG_SMP
is enabled
Note that all existing use cases of no_irq_affinity were already
confined within CONFIG_SMP preprocessor conditionals.
Signed-off-by: Radu Rendec <rrendec@...hat.com>
---
include/linux/irq.h | 2 ++
kernel/irq/internals.h | 2 ++
kernel/irq/irqdesc.c | 67 +++++++++++++++++++++++++++++++++++++++
kernel/irq/manage.c | 2 ++
kernel/irq/proc.c | 72 +++---------------------------------------
5 files changed, 78 insertions(+), 67 deletions(-)
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 7710f157e12de..0393fc02cfd46 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -639,7 +639,9 @@ static inline void irq_move_masked_irq(struct irq_data *data) { }
static inline void irq_force_complete_move(struct irq_desc *desc) { }
#endif
+#ifdef CONFIG_SMP
extern int no_irq_affinity;
+#endif
int irq_set_parent(int irq, int parent_irq);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index c75cd836155c9..381a0b4c1d381 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -147,6 +147,8 @@ extern int irq_do_set_affinity(struct irq_data *data,
#ifdef CONFIG_SMP
extern int irq_setup_affinity(struct irq_desc *desc);
+extern ssize_t write_irq_affinity(unsigned int irq, const char __user *buffer,
+ size_t count, bool is_list, bool is_user);
#else
static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; }
#endif
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index ec52b8b41002e..a46a76c29b8d1 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -133,6 +133,73 @@ EXPORT_SYMBOL_GPL(nr_irqs);
static DEFINE_MUTEX(sparse_irq_lock);
static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
+#ifdef CONFIG_SMP
+
+#ifndef CONFIG_AUTO_IRQ_AFFINITY
+static inline int irq_select_affinity_usr(unsigned int irq)
+{
+ /*
+ * If the interrupt is started up already then this fails. The
+ * interrupt is assigned to an online CPU already. There is no
+ * point to move it around randomly. Tell user space that the
+ * selected mask is bogus.
+ *
+ * If not then any change to the affinity is pointless because the
+ * startup code invokes irq_setup_affinity() which will select
+ * a online CPU anyway.
+ */
+ return -EINVAL;
+}
+#else
+/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
+static inline int irq_select_affinity_usr(unsigned int irq)
+{
+ return irq_select_affinity(irq);
+}
+#endif
+
+ssize_t write_irq_affinity(unsigned int irq, const char __user *buffer,
+ size_t count, bool is_list, bool is_user)
+{
+ cpumask_var_t mask;
+ int err;
+
+ if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
+ return -EIO;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (is_user)
+ err = is_list ? cpumask_parselist_user(buffer, count, mask) :
+ cpumask_parse_user(buffer, count, mask);
+ else
+ err = is_list ? cpulist_parse(buffer, mask) :
+ cpumask_parse(buffer, mask);
+ if (err)
+ goto free_cpumask;
+
+ /*
+ * Do not allow disabling IRQs completely - it's a too easy
+ * way to make the system unusable accidentally :-) At least
+ * one online CPU still has to be targeted.
+ */
+ if (!cpumask_intersects(mask, cpu_online_mask)) {
+ /*
+ * Special case for empty set - allow the architecture code
+ * to set default SMP affinity.
+ */
+ err = irq_select_affinity_usr(irq) ? -EINVAL : count;
+ } else {
+ err = irq_set_affinity(irq, mask) ?: count;
+ }
+
+free_cpumask:
+ free_cpumask_var(mask);
+ return err;
+}
+#endif
+
#ifdef CONFIG_SPARSE_IRQ
static void irq_kobj_release(struct kobject *kobj);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index eec9b94747439..91cee7270d221 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -143,6 +143,8 @@ EXPORT_SYMBOL(synchronize_irq);
#ifdef CONFIG_SMP
cpumask_var_t irq_default_affinity;
+int no_irq_affinity;
+
static bool __irq_can_set_affinity(struct irq_desc *desc)
{
if (!desc || !irqd_can_balance(&desc->irq_data) ||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 623b8136e9af3..76f0dda1f26b8 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -100,7 +100,6 @@ static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
return 0;
}
-int no_irq_affinity;
static int irq_affinity_proc_show(struct seq_file *m, void *v)
{
return show_irq_affinity(AFFINITY, m);
@@ -111,81 +110,20 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
return show_irq_affinity(AFFINITY_LIST, m);
}
-#ifndef CONFIG_AUTO_IRQ_AFFINITY
-static inline int irq_select_affinity_usr(unsigned int irq)
-{
- /*
- * If the interrupt is started up already then this fails. The
- * interrupt is assigned to an online CPU already. There is no
- * point to move it around randomly. Tell user space that the
- * selected mask is bogus.
- *
- * If not then any change to the affinity is pointless because the
- * startup code invokes irq_setup_affinity() which will select
- * a online CPU anyway.
- */
- return -EINVAL;
-}
-#else
-/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
-static inline int irq_select_affinity_usr(unsigned int irq)
-{
- return irq_select_affinity(irq);
-}
-#endif
-
-static ssize_t write_irq_affinity(int type, struct file *file,
+static ssize_t irq_affinity_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
unsigned int irq = (int)(long)pde_data(file_inode(file));
- cpumask_var_t new_value;
- int err;
-
- if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
- return -EIO;
-
- if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
- return -ENOMEM;
-
- if (type)
- err = cpumask_parselist_user(buffer, count, new_value);
- else
- err = cpumask_parse_user(buffer, count, new_value);
- if (err)
- goto free_cpumask;
- /*
- * Do not allow disabling IRQs completely - it's a too easy
- * way to make the system unusable accidentally :-) At least
- * one online CPU still has to be targeted.
- */
- if (!cpumask_intersects(new_value, cpu_online_mask)) {
- /*
- * Special case for empty set - allow the architecture code
- * to set default SMP affinity.
- */
- err = irq_select_affinity_usr(irq) ? -EINVAL : count;
- } else {
- err = irq_set_affinity(irq, new_value);
- if (!err)
- err = count;
- }
-
-free_cpumask:
- free_cpumask_var(new_value);
- return err;
-}
-
-static ssize_t irq_affinity_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *pos)
-{
- return write_irq_affinity(0, file, buffer, count, pos);
+ return write_irq_affinity(irq, buffer, count, false, true);
}
static ssize_t irq_affinity_list_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
- return write_irq_affinity(1, file, buffer, count, pos);
+ unsigned int irq = (int)(long)pde_data(file_inode(file));
+
+ return write_irq_affinity(irq, buffer, count, true, true);
}
static int irq_affinity_proc_open(struct inode *inode, struct file *file)
--
2.40.1
Powered by blists - more mailing lists