lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1166018020.27217.805.camel@laptopd505.fenrus.org>
Date:	Wed, 13 Dec 2006 14:53:40 +0100
From:	Arjan van de Ven <arjan@...ux.intel.com>
To:	ebiederm@...ssion.com, mingo@...e.hu
Cc:	linux-kernel@...r.kernel.org
Subject: [patch] Add allowed_affinity to the irq_desc to make it possible
	to have restricted irqs

[due to a broken libata in current -git I've not been able to test this patch enough]


This patch adds an "allowed_affinity" mask to each interrupt, in addition to the 
existing actual affinity mask. In addition this new mask is exported to userspace
in a similar way as the actual affinity is exported. (this is so that irqbalance
can find out about the restriction and take it into account)

The purpose for having this mask is to allow for the situation where interrupts
just can't or shouldn't go to all cpus; one example is the "per cpu" IRQ thing 
that powerpc and others have. Another soon-to-come example is MSIX devices that 
can generate a different MSI interrupt for each cpu; in that case the MSI needs to
be strictly constrained to it's designated cpu.

Signed-off-by: Arjan van de Ven <arjan@...ux.intel.com>

---
 include/linux/irq.h |    2 ++
 kernel/irq/chip.c   |    1 +
 kernel/irq/handle.c |    3 ++-
 kernel/irq/manage.c |    5 ++++-
 kernel/irq/proc.c   |   27 +++++++++++++++++++++++++++
 5 files changed, 36 insertions(+), 2 deletions(-)

Index: linux-2.6/include/linux/irq.h
===================================================================
--- linux-2.6.orig/include/linux/irq.h
+++ linux-2.6/include/linux/irq.h
@@ -137,6 +137,7 @@ struct irq_chip {
  * @irqs_unhandled:	stats field for spurious unhandled interrupts
  * @lock:		locking for SMP
  * @affinity:		IRQ affinity on SMP
+ * @allowed_affinity	The allowed affinity for this IRQ
  * @cpu:		cpu index useful for balancing
  * @pending_mask:	pending rebalanced interrupts
  * @dir:		/proc/irq/ procfs entry
@@ -160,6 +161,7 @@ struct irq_desc {
 	spinlock_t		lock;
 #ifdef CONFIG_SMP
 	cpumask_t		affinity;
+	cpumask_t		allowed_affinity;
 	unsigned int		cpu;
 #endif
 #if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
Index: linux-2.6/kernel/irq/chip.c
===================================================================
--- linux-2.6.orig/kernel/irq/chip.c
+++ linux-2.6/kernel/irq/chip.c
@@ -46,6 +46,7 @@ void dynamic_irq_init(unsigned int irq)
 	desc->irqs_unhandled = 0;
 #ifdef CONFIG_SMP
 	desc->affinity = CPU_MASK_ALL;
+	desc->allowed_affinity = CPU_MASK_ALL;
 #endif
 	spin_unlock_irqrestore(&desc->lock, flags);
 }
Index: linux-2.6/kernel/irq/handle.c
===================================================================
--- linux-2.6.orig/kernel/irq/handle.c
+++ linux-2.6/kernel/irq/handle.c
@@ -56,7 +56,8 @@ struct irq_desc irq_desc[NR_IRQS] __cach
 		.depth = 1,
 		.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
 #ifdef CONFIG_SMP
-		.affinity = CPU_MASK_ALL
+		.affinity = CPU_MASK_ALL,
+		.allowed_affinity = CPU_MASK_ALL
 #endif
 	}
 };
Index: linux-2.6/kernel/irq/manage.c
===================================================================
--- linux-2.6.orig/kernel/irq/manage.c
+++ linux-2.6/kernel/irq/manage.c
@@ -278,8 +278,13 @@ int setup_irq(unsigned int irq, struct i
 
 	*p = new;
 #if defined(CONFIG_IRQ_PER_CPU)
-	if (new->flags & IRQF_PERCPU)
+	if (new->flags & IRQF_PERCPU) {
 		desc->status |= IRQ_PER_CPU;
+		/* don't allow affinity to be set for per cpu interrupts */
+#ifdef CONFIG_SMP
+		desc->allowed_affinity = CPU_MASK_NONE;
+#endif
+	}
 #endif
 	if (!shared) {
 		irq_chip_set_defaults(desc->chip);
Index: linux-2.6/kernel/irq/proc.c
===================================================================
--- linux-2.6.orig/kernel/irq/proc.c
+++ linux-2.6/kernel/irq/proc.c
@@ -47,6 +47,20 @@ static int irq_affinity_read_proc(char *
 	return len;
 }
 
+
+static int irq_affinity_read_allowed_proc(char *page, char **start, off_t off,
+				  int count, int *eof, void *data)
+{
+	int len = cpumask_scnprintf(page, count, irq_desc[(long)data].allowed_affinity);
+
+	if (count - len < 2)
+		return -EINVAL;
+	len += sprintf(page + len, "\n");
+	return len;
+}
+
+
+
 int no_irq_affinity;
 static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
 				   unsigned long count, void *data)
@@ -62,6 +76,9 @@ static int irq_affinity_write_proc(struc
 	if (err)
 		return err;
 
+	/* mask off the allowed_affinity mask to only leave legal cpus */
+	cpus_and(new_value, new_value, irq_desc[irq].allowed_affinity);
+
 	/*
 	 * Do not allow disabling IRQs completely - it's a too easy
 	 * way to make the system unusable accidentally :-) At least
@@ -141,6 +158,16 @@ void register_irq_proc(unsigned int irq)
 			entry->read_proc = irq_affinity_read_proc;
 			entry->write_proc = irq_affinity_write_proc;
 		}
+
+		/* create /proc/irq/<irq>/smp_affinity */
+		entry = create_proc_entry("allowed_affinity", 0400, irq_desc[irq].dir);
+
+		if (entry) {
+			entry->nlink = 1;
+			entry->data = (void *)(long)irq;
+			entry->read_proc = irq_affinity_read_allowed_proc;
+		}
+
 	}
 #endif
 }

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ