lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <a5bcc82c5f6ff14aab37679a2e21cd0f14595ca7.1504235838.git.yu.c.chen@intel.com>
Date:   Fri,  1 Sep 2017 13:04:20 +0800
From:   Chen Yu <yu.c.chen@...el.com>
To:     x86@...nel.org
Cc:     Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>,
        "H. Peter Anvin" <hpa@...or.com>, Rui Zhang <rui.zhang@...el.com>,
        linux-kernel@...r.kernel.org, Chen Yu <yu.c.chen@...el.com>,
        "Rafael J. Wysocki" <rjw@...ysocki.net>,
        Len Brown <lenb@...nel.org>,
        Dan Williams <dan.j.williams@...el.com>
Subject: [PATCH 2/4][RFC v2] x86/apic: Record the number of vectors assigned on a CPU

Update the number of vectors assigned on each CPU during
vector allocation/free process. This is to prepare for
the vector spreading work that, we can find out the CPU
with least vectors assigned.

Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: "Rafael J. Wysocki" <rjw@...ysocki.net>
Cc: Len Brown <lenb@...nel.org>
Cc: Dan Williams <dan.j.williams@...el.com>
Signed-off-by: Chen Yu <yu.c.chen@...el.com>
---
 arch/x86/include/asm/hw_irq.h |  8 ++++++++
 arch/x86/kernel/apic/vector.c | 45 ++++++++++++++++++++++++++++++++++++++++++-
 arch/x86/kernel/irq.c         |  5 ++++-
 arch/x86/kernel/irqinit.c     |  1 +
 arch/x86/lguest/boot.c        |  1 +
 5 files changed, 58 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index b2243fe..d1b3c61 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -151,6 +151,10 @@ extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
 extern void lock_vector_lock(void);
 extern void unlock_vector_lock(void);
 extern void setup_vector_irq(int cpu);
+extern void inc_vector_alloc(const struct cpumask *mask,
+			    int count);
+extern void dec_vector_alloc(const struct cpumask *mask,
+				int count);
 #ifdef CONFIG_SMP
 extern void send_cleanup_vector(struct irq_cfg *);
 extern void irq_complete_move(struct irq_cfg *cfg);
@@ -163,6 +167,10 @@ extern void apic_ack_edge(struct irq_data *data);
 #else	/*  CONFIG_X86_LOCAL_APIC */
 static inline void lock_vector_lock(void) {}
 static inline void unlock_vector_lock(void) {}
+static inline void inc_vector_alloc(const struct cpumask *mask,
+			    int count) {}
+static inline void dec_vector_alloc(const struct cpumask *mask,
+				int count) {}
 #endif	/* CONFIG_X86_LOCAL_APIC */
 
 /* Statistics */
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 2ce1021..4ff84c0 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -50,6 +50,36 @@ void unlock_vector_lock(void)
 	raw_spin_unlock(&vector_lock);
 }
 
+static void update_vectors_alloc(const struct cpumask *mask,
+			   int count, bool add)
+{
+	int cpu;
+
+	for_each_cpu(cpu, mask) {
+		int cur_alloc = per_cpu(vector_irq, cpu).alloc;
+
+		/* Update the number of vectors assigned on this CPU. */
+		if (add && (cur_alloc + count <= NR_VECTORS))
+			per_cpu(vector_irq, cpu).alloc += count;
+		else if (!add && cur_alloc >= count)
+			per_cpu(vector_irq, cpu).alloc -= count;
+		else
+			continue;
+	}
+}
+
+void inc_vector_alloc(const struct cpumask *mask,
+			    int count)
+{
+	update_vectors_alloc(mask, count, true);
+}
+
+void dec_vector_alloc(const struct cpumask *mask,
+			    int count)
+{
+	update_vectors_alloc(mask, count, false);
+}
+
 static struct apic_chip_data *apic_chip_data(struct irq_data *irq_data)
 {
 	if (!irq_data)
@@ -191,6 +221,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
 		for_each_cpu(new_cpu, vector_searchmask)
 			per_cpu(vector_irq, new_cpu).desc[vector] = irq_to_desc(irq);
 
+		inc_vector_alloc(vector_searchmask, 1);
 		goto update;
 
 next_cpu:
@@ -263,6 +294,7 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
 {
 	struct irq_desc *desc;
 	int cpu, vector;
+	struct cpumask mask;
 
 	if (!data->cfg.vector)
 		return;
@@ -271,6 +303,9 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
 	for_each_cpu_and(cpu, data->domain, cpu_online_mask)
 		per_cpu(vector_irq, cpu).desc[vector] = VECTOR_UNUSED;
 
+	cpumask_and(&mask, data->domain, cpu_online_mask);
+	dec_vector_alloc(&mask, 1);
+
 	data->cfg.vector = 0;
 	cpumask_clear(data->domain);
 
@@ -289,6 +324,7 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
 			if (per_cpu(vector_irq, cpu).desc[vector] != desc)
 				continue;
 			per_cpu(vector_irq, cpu).desc[vector] = VECTOR_UNUSED;
+			dec_vector_alloc(cpumask_of(cpu), 1);
 			break;
 		}
 	}
@@ -483,6 +519,7 @@ static void __setup_vector_irq(int cpu)
 			continue;
 		vector = data->cfg.vector;
 		per_cpu(vector_irq, cpu).desc[vector] = desc;
+		inc_vector_alloc(cpumask_of(cpu), 1);
 	}
 	/* Mark the free vectors */
 	for (vector = 0; vector < NR_VECTORS; ++vector) {
@@ -491,8 +528,10 @@ static void __setup_vector_irq(int cpu)
 			continue;
 
 		data = apic_chip_data(irq_desc_get_irq_data(desc));
-		if (!cpumask_test_cpu(cpu, data->domain))
+		if (!cpumask_test_cpu(cpu, data->domain)) {
 			per_cpu(vector_irq, cpu).desc[vector] = VECTOR_UNUSED;
+			dec_vector_alloc(cpumask_of(cpu), 1);
+		}
 	}
 }
 
@@ -514,6 +553,7 @@ void setup_vector_irq(int cpu)
 	for (irq = 0; irq < nr_legacy_irqs(); irq++)
 		per_cpu(vector_irq, cpu).desc[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
 
+	inc_vector_alloc(cpumask_of(cpu), irq);
 	__setup_vector_irq(cpu);
 }
 
@@ -649,6 +689,7 @@ asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
 			goto unlock;
 		}
 		__this_cpu_write(vector_irq.desc[vector], VECTOR_UNUSED);
+		dec_vector_alloc(cpumask_of(me), 1);
 		cpumask_clear_cpu(me, data->old_domain);
 unlock:
 		raw_spin_unlock(&desc->lock);
@@ -784,6 +825,8 @@ void irq_force_complete_move(struct irq_desc *desc)
 	for_each_cpu(cpu, data->old_domain)
 		per_cpu(vector_irq, cpu).desc[cfg->old_vector] = VECTOR_UNUSED;
 
+	dec_vector_alloc(data->old_domain, 1);
+
 	/* Cleanup the left overs of the (half finished) move */
 	cpumask_clear(data->old_domain);
 	data->move_in_progress = 0;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index ae11e86..67c01b8 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -250,6 +250,7 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
 					     vector);
 		} else {
 			__this_cpu_write(vector_irq.desc[vector], VECTOR_UNUSED);
+			inc_vector_alloc(cpumask_of(smp_processor_id()), 1);
 		}
 	}
 
@@ -491,8 +492,10 @@ void fixup_irqs(void)
 			}
 			raw_spin_unlock(&desc->lock);
 		}
-		if (__this_cpu_read(vector_irq.desc[vector]) != VECTOR_RETRIGGERED)
+		if (__this_cpu_read(vector_irq.desc[vector]) != VECTOR_RETRIGGERED) {
 			__this_cpu_write(vector_irq.desc[vector], VECTOR_UNUSED);
+			dec_vector_alloc(cpumask_of(smp_processor_id()), 1);
+		}
 	}
 }
 #endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 734b54f..dd618c1 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -97,6 +97,7 @@ void __init init_IRQ(void)
 	for (i = 0; i < nr_legacy_irqs(); i++)
 		per_cpu(vector_irq, 0).desc[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
 
+	inc_vector_alloc(cpumask_of(1), i);
 	x86_init.irqs.intr_init();
 }
 
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index e80758a..0696354 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -847,6 +847,7 @@ static int lguest_setup_irq(unsigned int irq)
 	/* Some systems map "vectors" to interrupts weirdly.  Not us! */
 	desc = irq_to_desc(irq);
 	__this_cpu_write(vector_irq.desc[FIRST_EXTERNAL_VECTOR + irq], desc);
+	inc_vector_alloc(cpumask_of(smp_processor_id()), 1);
 	return 0;
 }
 
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ