[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <tip-111abeba67e0dbdc26537429de9155e4f1d807d8@git.kernel.org>
Date: Sat, 16 Jan 2016 13:16:15 -0800
From: tip-bot for Jiang Liu <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: mingo@...nel.org, linux@...ck-us.net, jmmahler@...il.com,
joe.lawrence@...atus.com, hpa@...or.com, tglx@...utronix.de,
linux-kernel@...r.kernel.org, bp@...en8.de,
jiang.liu@...ux.intel.com
Subject: [tip:x86/urgent] x86/irq: Fix a race in x86_vector_free_irqs()
Commit-ID: 111abeba67e0dbdc26537429de9155e4f1d807d8
Gitweb: http://git.kernel.org/tip/111abeba67e0dbdc26537429de9155e4f1d807d8
Author: Jiang Liu <jiang.liu@...ux.intel.com>
AuthorDate: Thu, 31 Dec 2015 16:30:44 +0000
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitDate: Fri, 15 Jan 2016 13:43:58 +0100
x86/irq: Fix a race in x86_vector_free_irqs()
There's a race condition between
x86_vector_free_irqs()
{
free_apic_chip_data(irq_data->chip_data);
xxxxx //irq_data->chip_data has been freed, but the pointer
//hasn't been reset yet
irq_domain_reset_irq_data(irq_data);
}
and
smp_irq_move_cleanup_interrupt()
{
raw_spin_lock(&vector_lock);
data = apic_chip_data(irq_desc_get_irq_data(desc));
access data->xxxx // may access freed memory
raw_spin_unlock(&desc->lock);
}
which may cause smp_irq_move_cleanup_interrupt() to access freed memory.
Call irq_domain_reset_irq_data(), which clears the pointer with vector lock
held.
[ tglx: Free memory outside of lock held region. ]
Signed-off-by: Jiang Liu <jiang.liu@...ux.intel.com>
Tested-by: Borislav Petkov <bp@...en8.de>
Tested-by: Joe Lawrence <joe.lawrence@...atus.com>
Cc: Jeremiah Mahler <jmmahler@...il.com>
Cc: andy.shevchenko@...il.com
Cc: Guenter Roeck <linux@...ck-us.net>
Cc: stable@...r.kernel.org #4.3+
Link: http://lkml.kernel.org/r/1450880014-11741-3-git-send-email-jiang.liu@linux.intel.com
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
arch/x86/kernel/apic/vector.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 908cb37..cf1e325 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -226,10 +226,8 @@ static int assign_irq_vector_policy(int irq, int node,
static void clear_irq_vector(int irq, struct apic_chip_data *data)
{
struct irq_desc *desc;
- unsigned long flags;
int cpu, vector;
- raw_spin_lock_irqsave(&vector_lock, flags);
BUG_ON(!data->cfg.vector);
vector = data->cfg.vector;
@@ -239,10 +237,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
data->cfg.vector = 0;
cpumask_clear(data->domain);
- if (likely(!data->move_in_progress)) {
- raw_spin_unlock_irqrestore(&vector_lock, flags);
+ if (likely(!data->move_in_progress))
return;
- }
desc = irq_to_desc(irq);
for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
@@ -255,7 +251,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
}
}
data->move_in_progress = 0;
- raw_spin_unlock_irqrestore(&vector_lock, flags);
}
void init_irq_alloc_info(struct irq_alloc_info *info,
@@ -276,19 +271,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
static void x86_vector_free_irqs(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
+ struct apic_chip_data *apic_data;
struct irq_data *irq_data;
+ unsigned long flags;
int i;
for (i = 0; i < nr_irqs; i++) {
irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
if (irq_data && irq_data->chip_data) {
+ raw_spin_lock_irqsave(&vector_lock, flags);
clear_irq_vector(virq + i, irq_data->chip_data);
- free_apic_chip_data(irq_data->chip_data);
+ apic_data = irq_data->chip_data;
+ irq_domain_reset_irq_data(irq_data);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+ free_apic_chip_data(apic_data);
#ifdef CONFIG_X86_IO_APIC
if (virq + i < nr_legacy_irqs())
legacy_irq_data[virq + i] = NULL;
#endif
- irq_domain_reset_irq_data(irq_data);
}
}
}
Powered by blists - more mailing lists