[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1416901802-24211-34-git-send-email-jiang.liu@linux.intel.com>
Date: Tue, 25 Nov 2014 15:49:57 +0800
From: Jiang Liu <jiang.liu@...ux.intel.com>
To: Bjorn Helgaas <bhelgaas@...gle.com>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>,
"Rafael J. Wysocki" <rjw@...ysocki.net>,
Randy Dunlap <rdunlap@...radead.org>,
Yinghai Lu <yinghai@...nel.org>,
Borislav Petkov <bp@...en8.de>, x86@...nel.org,
Joerg Roedel <joro@...tes.org>,
Jiang Liu <jiang.liu@...ux.intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Tony Luck <tony.luck@...el.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
linux-kernel@...r.kernel.org, linux-pci@...r.kernel.org,
linux-acpi@...r.kernel.org, iommu@...ts.linux-foundation.org
Subject: [Patch Part3 v4 33/38] x86, irq: Move check of cfg->move_in_progress into send_cleanup_vector()
Move check of cfg->move_in_progress into send_cleanup_vector() to
prepare for simplifying struct irq_cfg.
Signed-off-by: Jiang Liu <jiang.liu@...ux.intel.com>
Tested-by: Joerg Roedel <jroedel@...e.de>
---
arch/x86/kernel/apic/vector.c | 10 ++++++++--
arch/x86/platform/uv/uv_irq.c | 3 +--
drivers/iommu/amd_iommu.c | 3 +--
drivers/iommu/intel_irq_remapping.c | 3 +--
4 files changed, 11 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 436a3400d9ac..a5ce2eef0528 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -495,7 +495,7 @@ static struct irq_chip lapic_controller = {
};
#ifdef CONFIG_SMP
-void send_cleanup_vector(struct irq_cfg *cfg)
+static void __send_cleanup_vector(struct irq_cfg *cfg)
{
cpumask_var_t cleanup_mask;
@@ -513,6 +513,12 @@ void send_cleanup_vector(struct irq_cfg *cfg)
cfg->move_in_progress = 0;
}
+void send_cleanup_vector(struct irq_cfg *cfg)
+{
+ if (cfg->move_in_progress)
+ __send_cleanup_vector(cfg);
+}
+
asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
{
unsigned vector, me;
@@ -583,7 +589,7 @@ static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
me = smp_processor_id();
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
- send_cleanup_vector(cfg);
+ __send_cleanup_vector(cfg);
}
void irq_complete_move(struct irq_cfg *cfg)
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index c237ed34a498..b242e0aada19 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -63,8 +63,7 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
ret = parent->chip->irq_set_affinity(parent, mask, force);
if (ret >= 0) {
uv_program_mmr(cfg, data->chip_data);
- if (cfg->move_in_progress)
- send_cleanup_vector(cfg);
+ send_cleanup_vector(cfg);
}
return ret;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2e8bbe5e164d..a5c59b434c77 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4331,8 +4331,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
* at the new destination. So, time to cleanup the previous
* vector allocation.
*/
- if (cfg->move_in_progress)
- send_cleanup_vector(cfg);
+ send_cleanup_vector(cfg);
return IRQ_SET_MASK_OK_DONE;
}
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index acff8493191e..f6da3b2651c0 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -997,8 +997,7 @@ intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
* at the new destination. So, time to cleanup the previous
* vector allocation.
*/
- if (cfg->move_in_progress)
- send_cleanup_vector(cfg);
+ send_cleanup_vector(cfg);
return IRQ_SET_MASK_OK_DONE;
}
--
1.7.10.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists