lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 24 Jul 2008 12:50:44 +0200
From:	Sebastien Dugue <sebastien.dugue@...l.net>
To:	Linux-rt <linux-rt-users@...r.kernel.org>
Cc:	linux-ppc <linuxppc-dev@...abs.org>,
	linux-kernel <linux-kernel@...r.kernel.org>,
	Benjamin Herrenschmidt <benh@...nel.crashing.org>,
	Paul Mackerras <paulus@...ba.org>,
	Michael Ellerman <michael@...erman.id.au>,
	Tim Chavez <tinytim@...ibm.com>,
	Jean Pierre Dion <jean-pierre.dion@...l.net>,
	Gilles Carry <Gilles.Carry@....bull.net>
Subject: [PATCH 2/2][RT] powerpc - Make the irq reverse mapping radix tree
 lockless

From: Sebastien Dugue <sebastien.dugue@...l.net>
Date: Tue, 22 Jul 2008 11:56:41 +0200
Subject: [PATCH][RT] powerpc - Make the irq reverse mapping radix tree lockless

  The radix tree used by interrupt controllers for their irq reverse mapping
(currently only the XICS found on pSeries) have a complex locking scheme
dating back to before the advent of the concurrent radix tree on preempt-rt.

  Take advantage of this and of the fact that the items of the tree are
pointers to a static array (irq_map) elements which can never go under us
to simplify the locking.

  Concurrency between readers and writers are handled by the intrinsic
properties of the concurrent radix tree. Concurrency between the tree
initialization which is done asynchronously with readers and writers access is
handled via an atomic variable (revmap_trees_allocated) set when the tree
has been initialized and checked before any reader or writer access just
like we used to check for tree.gfp_mask != 0 before.

Signed-off-by: Sebastien Dugue <sebastien.dugue@...l.net>
Cc: Benjamin Herrenschmidt <benh@...nel.crashing.org>
Cc: Paul Mackerras <paulus@...ba.org>

---
 arch/powerpc/kernel/irq.c |  102 ++++++++++++----------------------------------
 1 file changed, 27 insertions(+), 75 deletions(-)

Index: linux-2.6.25.8-rt7/arch/powerpc/kernel/irq.c
===================================================================
--- linux-2.6.25.8-rt7.orig/arch/powerpc/kernel/irq.c
+++ linux-2.6.25.8-rt7/arch/powerpc/kernel/irq.c
@@ -403,8 +403,7 @@ void do_softirq(void)
 
 static LIST_HEAD(irq_hosts);
 static DEFINE_RAW_SPINLOCK(irq_big_lock);
-static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
-static unsigned int irq_radix_writer;
+static atomic_t revmap_trees_allocated = ATOMIC_INIT(0);
 struct irq_map_entry irq_map[NR_IRQS];
 static unsigned int irq_virq_count = NR_IRQS;
 static struct irq_host *irq_default_host;
@@ -547,57 +546,6 @@ void irq_set_virq_count(unsigned int cou
 		irq_virq_count = count;
 }
 
-/* radix tree not lockless safe ! we use a brlock-type mecanism
- * for now, until we can use a lockless radix tree
- */
-static void irq_radix_wrlock(unsigned long *flags)
-{
-	unsigned int cpu, ok;
-
-	spin_lock_irqsave(&irq_big_lock, *flags);
-	irq_radix_writer = 1;
-	smp_mb();
-	do {
-		barrier();
-		ok = 1;
-		for_each_possible_cpu(cpu) {
-			if (per_cpu(irq_radix_reader, cpu)) {
-				ok = 0;
-				break;
-			}
-		}
-		if (!ok)
-			cpu_relax();
-	} while(!ok);
-}
-
-static void irq_radix_wrunlock(unsigned long flags)
-{
-	smp_wmb();
-	irq_radix_writer = 0;
-	spin_unlock_irqrestore(&irq_big_lock, flags);
-}
-
-static void irq_radix_rdlock(unsigned long *flags)
-{
-	local_irq_save(*flags);
-	__get_cpu_var(irq_radix_reader) = 1;
-	smp_mb();
-	if (likely(irq_radix_writer == 0))
-		return;
-	__get_cpu_var(irq_radix_reader) = 0;
-	smp_wmb();
-	spin_lock(&irq_big_lock);
-	__get_cpu_var(irq_radix_reader) = 1;
-	spin_unlock(&irq_big_lock);
-}
-
-static void irq_radix_rdunlock(unsigned long flags)
-{
-	__get_cpu_var(irq_radix_reader) = 0;
-	local_irq_restore(flags);
-}
-
 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
 			    irq_hw_number_t hwirq)
 {
@@ -752,7 +700,6 @@ void irq_dispose_mapping(unsigned int vi
 {
 	struct irq_host *host;
 	irq_hw_number_t hwirq;
-	unsigned long flags;
 
 	if (virq == NO_IRQ)
 		return;
@@ -784,15 +731,20 @@ void irq_dispose_mapping(unsigned int vi
 		if (hwirq < host->revmap_data.linear.size)
 			host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
 		break;
-	case IRQ_HOST_MAP_TREE:
+	case IRQ_HOST_MAP_TREE: {
+		DEFINE_RADIX_TREE_CONTEXT(ctx, &host->revmap_data.tree);
+
 		/* Check if radix tree allocated yet */
-		if (host->revmap_data.tree.gfp_mask == 0)
+		if (atomic_read(&revmap_trees_allocated) == 0)
 			break;
-		irq_radix_wrlock(&flags);
-		radix_tree_delete(&host->revmap_data.tree, hwirq);
-		irq_radix_wrunlock(flags);
+
+		radix_tree_lock(&ctx);
+		radix_tree_delete(ctx.tree, hwirq);
+		radix_tree_unlock(&ctx);
+
 		break;
 	}
+	}
 
 	/* Destroy map */
 	smp_mb();
@@ -845,22 +797,20 @@ unsigned int irq_radix_revmap(struct irq
 	struct radix_tree_root *tree;
 	struct irq_map_entry *ptr;
 	unsigned int virq;
-	unsigned long flags;
 
 	WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
 
-	/* Check if the radix tree exist yet. We test the value of
-	 * the gfp_mask for that. Sneaky but saves another int in the
-	 * structure. If not, we fallback to slow mode
-	 */
-	tree = &host->revmap_data.tree;
-	if (tree->gfp_mask == 0)
+	/* Check if the radix tree exist yet. */
+	if (atomic_read(&revmap_trees_allocated) == 0)
 		return irq_find_mapping(host, hwirq);
 
-	/* Now try to resolve */
-	irq_radix_rdlock(&flags);
+	/*
+	 * Now try to resolve
+	 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
+	 * as it's referencing an entry in the static irq_map table.
+	 */
+	tree = &host->revmap_data.tree;
 	ptr = radix_tree_lookup(tree, hwirq);
-	irq_radix_rdunlock(flags);
 
 	/* Found it, return */
 	if (ptr) {
@@ -871,9 +821,10 @@ unsigned int irq_radix_revmap(struct irq
 	/* If not there, try to insert it */
 	virq = irq_find_mapping(host, hwirq);
 	if (virq != NO_IRQ) {
-		irq_radix_wrlock(&flags);
-		radix_tree_insert(tree, hwirq, &irq_map[virq]);
-		irq_radix_wrunlock(flags);
+		DEFINE_RADIX_TREE_CONTEXT(ctx, tree);
+		radix_tree_lock(&ctx);
+		radix_tree_insert(ctx.tree, hwirq, &irq_map[virq]);
+		radix_tree_unlock(&ctx);
 	}
 	return virq;
 }
@@ -984,14 +935,15 @@ void irq_early_init(void)
 static int irq_late_init(void)
 {
 	struct irq_host *h;
-	unsigned long flags;
 
-	irq_radix_wrlock(&flags);
 	list_for_each_entry(h, &irq_hosts, link) {
 		if (h->revmap_type == IRQ_HOST_MAP_TREE)
 			INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
 	}
-	irq_radix_wrunlock(flags);
+
+	/* Make sure the radix trees inits are visible before setting the flag */
+	smp_mb();
+	atomic_set(&revmap_trees_allocated, 1);
 
 	return 0;
 }
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ