lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri,  1 Sep 2017 13:03:58 +0800
From:   Chen Yu <yu.c.chen@...el.com>
To:     x86@...nel.org
Cc:     Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>,
        "H. Peter Anvin" <hpa@...or.com>, Rui Zhang <rui.zhang@...el.com>,
        linux-kernel@...r.kernel.org, Chen Yu <yu.c.chen@...el.com>,
        "Rafael J. Wysocki" <rjw@...ysocki.net>,
        Len Brown <lenb@...nel.org>,
        Dan Williams <dan.j.williams@...el.com>
Subject: [PATCH 1/4][RFC v2] x86/apic: Extend the defination for vector_irq

Introduce a variable inside the vector_irq, to record the number
of vectors assigned per CPU. This is to prepare for the vector
spreading work.

No functional change.

Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: "Rafael J. Wysocki" <rjw@...ysocki.net>
Cc: Len Brown <lenb@...nel.org>
Cc: Dan Williams <dan.j.williams@...el.com>
Signed-off-by: Chen Yu <yu.c.chen@...el.com>
---
 arch/x86/include/asm/hw_irq.h |  8 ++++++--
 arch/x86/kernel/apic/vector.c | 25 +++++++++++++------------
 arch/x86/kernel/irq.c         | 18 +++++++++---------
 arch/x86/kernel/irqinit.c     |  9 +++++----
 arch/x86/lguest/boot.c        |  2 +-
 5 files changed, 34 insertions(+), 28 deletions(-)

diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index d6dbafb..b2243fe 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -179,8 +179,12 @@ extern char irq_entries_start[];
 #define VECTOR_UNUSED		NULL
 #define VECTOR_RETRIGGERED	((void *)~0UL)
 
-typedef struct irq_desc* vector_irq_t[NR_VECTORS];
-DECLARE_PER_CPU(vector_irq_t, vector_irq);
+struct vector_irq_info {
+	struct irq_desc *desc[NR_VECTORS];
+	int alloc;
+};
+
+DECLARE_PER_CPU(struct vector_irq_info, vector_irq);
 
 #endif /* !ASSEMBLY_ */
 
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index b3af457..2ce1021 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -179,7 +179,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
 			goto next;
 
 		for_each_cpu(new_cpu, vector_searchmask) {
-			if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
+			if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu).desc[vector]))
 				goto next;
 		}
 		/* Found one! */
@@ -189,7 +189,8 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
 		if (d->cfg.vector)
 			cpumask_copy(d->old_domain, d->domain);
 		for_each_cpu(new_cpu, vector_searchmask)
-			per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
+			per_cpu(vector_irq, new_cpu).desc[vector] = irq_to_desc(irq);
+
 		goto update;
 
 next_cpu:
@@ -268,7 +269,7 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
 
 	vector = data->cfg.vector;
 	for_each_cpu_and(cpu, data->domain, cpu_online_mask)
-		per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
+		per_cpu(vector_irq, cpu).desc[vector] = VECTOR_UNUSED;
 
 	data->cfg.vector = 0;
 	cpumask_clear(data->domain);
@@ -285,9 +286,9 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
 	for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
 		for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
 		     vector++) {
-			if (per_cpu(vector_irq, cpu)[vector] != desc)
+			if (per_cpu(vector_irq, cpu).desc[vector] != desc)
 				continue;
-			per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
+			per_cpu(vector_irq, cpu).desc[vector] = VECTOR_UNUSED;
 			break;
 		}
 	}
@@ -481,17 +482,17 @@ static void __setup_vector_irq(int cpu)
 		if (!data || !cpumask_test_cpu(cpu, data->domain))
 			continue;
 		vector = data->cfg.vector;
-		per_cpu(vector_irq, cpu)[vector] = desc;
+		per_cpu(vector_irq, cpu).desc[vector] = desc;
 	}
 	/* Mark the free vectors */
 	for (vector = 0; vector < NR_VECTORS; ++vector) {
-		desc = per_cpu(vector_irq, cpu)[vector];
+		desc = per_cpu(vector_irq, cpu).desc[vector];
 		if (IS_ERR_OR_NULL(desc))
 			continue;
 
 		data = apic_chip_data(irq_desc_get_irq_data(desc));
 		if (!cpumask_test_cpu(cpu, data->domain))
-			per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
+			per_cpu(vector_irq, cpu).desc[vector] = VECTOR_UNUSED;
 	}
 }
 
@@ -511,7 +512,7 @@ void setup_vector_irq(int cpu)
 	 * legacy vector to irq mapping:
 	 */
 	for (irq = 0; irq < nr_legacy_irqs(); irq++)
-		per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
+		per_cpu(vector_irq, cpu).desc[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
 
 	__setup_vector_irq(cpu);
 }
@@ -596,7 +597,7 @@ asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
 		unsigned int irr;
 
 	retry:
-		desc = __this_cpu_read(vector_irq[vector]);
+		desc = __this_cpu_read(vector_irq.desc[vector]);
 		if (IS_ERR_OR_NULL(desc))
 			continue;
 
@@ -647,7 +648,7 @@ asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
 			apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
 			goto unlock;
 		}
-		__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+		__this_cpu_write(vector_irq.desc[vector], VECTOR_UNUSED);
 		cpumask_clear_cpu(me, data->old_domain);
 unlock:
 		raw_spin_unlock(&desc->lock);
@@ -781,7 +782,7 @@ void irq_force_complete_move(struct irq_desc *desc)
 	 * descriptor set in their vector array. Clean it up.
 	 */
 	for_each_cpu(cpu, data->old_domain)
-		per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;
+		per_cpu(vector_irq, cpu).desc[cfg->old_vector] = VECTOR_UNUSED;
 
 	/* Cleanup the left overs of the (half finished) move */
 	cpumask_clear(data->old_domain);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 4ed0aba..ae11e86 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -239,7 +239,7 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
 	/* entering_irq() tells RCU that we're not quiescent.  Check it. */
 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
 
-	desc = __this_cpu_read(vector_irq[vector]);
+	desc = __this_cpu_read(vector_irq.desc[vector]);
 
 	if (!handle_irq(desc, regs)) {
 		ack_APIC_irq();
@@ -249,7 +249,7 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
 					     __func__, smp_processor_id(),
 					     vector);
 		} else {
-			__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+			__this_cpu_write(vector_irq.desc[vector], VECTOR_UNUSED);
 		}
 	}
 
@@ -375,7 +375,7 @@ int check_irq_vectors_for_cpu_disable(void)
 
 	this_count = 0;
 	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
-		desc = __this_cpu_read(vector_irq[vector]);
+		desc = __this_cpu_read(vector_irq.desc[vector]);
 		if (IS_ERR_OR_NULL(desc))
 			continue;
 		/*
@@ -433,7 +433,7 @@ int check_irq_vectors_for_cpu_disable(void)
 		for (vector = FIRST_EXTERNAL_VECTOR;
 		     vector < first_system_vector; vector++) {
 			if (!test_bit(vector, used_vectors) &&
-			    IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) {
+			    IS_ERR_OR_NULL(per_cpu(vector_irq, cpu).desc[vector])) {
 				if (++count == this_count)
 					return 0;
 			}
@@ -475,24 +475,24 @@ void fixup_irqs(void)
 	 * nothing else will touch it.
 	 */
 	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
-		if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
+		if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq.desc[vector])))
 			continue;
 
 		irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
 		if (irr  & (1 << (vector % 32))) {
-			desc = __this_cpu_read(vector_irq[vector]);
+			desc = __this_cpu_read(vector_irq.desc[vector]);
 
 			raw_spin_lock(&desc->lock);
 			data = irq_desc_get_irq_data(desc);
 			chip = irq_data_get_irq_chip(data);
 			if (chip->irq_retrigger) {
 				chip->irq_retrigger(data);
-				__this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
+				__this_cpu_write(vector_irq.desc[vector], VECTOR_RETRIGGERED);
 			}
 			raw_spin_unlock(&desc->lock);
 		}
-		if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
-			__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+		if (__this_cpu_read(vector_irq.desc[vector]) != VECTOR_RETRIGGERED)
+			__this_cpu_write(vector_irq.desc[vector], VECTOR_UNUSED);
 	}
 }
 #endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index c7fd185..734b54f 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -51,8 +51,9 @@ static struct irqaction irq2 = {
 	.flags = IRQF_NO_THREAD,
 };
 
-DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
-	[0 ... NR_VECTORS - 1] = VECTOR_UNUSED,
+DEFINE_PER_CPU(struct vector_irq_info, vector_irq) = {
+	.desc[0 ... NR_VECTORS - 1] = VECTOR_UNUSED,
+	.alloc = 0,
 };
 
 int vector_used_by_percpu_irq(unsigned int vector)
@@ -60,7 +61,7 @@ int vector_used_by_percpu_irq(unsigned int vector)
 	int cpu;
 
 	for_each_online_cpu(cpu) {
-		if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
+		if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu).desc[vector]))
 			return 1;
 	}
 
@@ -94,7 +95,7 @@ void __init init_IRQ(void)
 	 * irq's migrate etc.
 	 */
 	for (i = 0; i < nr_legacy_irqs(); i++)
-		per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
+		per_cpu(vector_irq, 0).desc[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
 
 	x86_init.irqs.intr_init();
 }
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 9947269..e80758a 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -846,7 +846,7 @@ static int lguest_setup_irq(unsigned int irq)
 
 	/* Some systems map "vectors" to interrupts weirdly.  Not us! */
 	desc = irq_to_desc(irq);
-	__this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], desc);
+	__this_cpu_write(vector_irq.desc[FIRST_EXTERNAL_VECTOR + irq], desc);
 	return 0;
 }
 
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ