[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1414484803-10311-8-git-send-email-jiang.liu@linux.intel.com>
Date: Tue, 28 Oct 2014 16:26:25 +0800
From: Jiang Liu <jiang.liu@...ux.intel.com>
To: Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>,
"Rafael J. Wysocki" <rjw@...ysocki.net>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Randy Dunlap <rdunlap@...radead.org>,
Yinghai Lu <yinghai@...nel.org>,
Borislav Petkov <bp@...en8.de>,
Grant Likely <grant.likely@...aro.org>,
Marc Zyngier <marc.zyngier@....com>,
Yingjoe Chen <yingjoe.chen@...iatek.com>, x86@...nel.org,
Matthias Brugger <matthias.bgg@...il.com>,
Jiang Liu <jiang.liu@...ux.intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Tony Luck <tony.luck@...el.com>,
Joerg Roedel <joro@...tes.org>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
linux-kernel@...r.kernel.org, linux-pci@...r.kernel.org,
linux-acpi@...r.kernel.org, linux-arm-kernel@...ts.infradead.org
Subject: [Patch Part2 v3 07/24] x86, uv: Use new irqdomain interfaces to allocate/free IRQ
Use new irqdomain interfaces to allocate/free IRQ, so we could
kill GENERIC_IRQ_LEGACY_ALLOC_HWIRQ later.
Signed-off-by: Jiang Liu <jiang.liu@...ux.intel.com>
---
arch/x86/platform/uv/uv_irq.c | 27 +++++++++++----------------
1 file changed, 11 insertions(+), 16 deletions(-)
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 0ce673645432..474912d03f40 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -12,6 +12,7 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/apic.h>
#include <asm/uv/uv_irq.h>
@@ -130,24 +131,14 @@ static int
arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
unsigned long mmr_offset, int limit)
{
- const struct cpumask *eligible_cpu = cpumask_of(cpu);
struct irq_cfg *cfg = irq_cfg(irq);
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
- int mmr_pnode, err;
- unsigned int dest;
+ int mmr_pnode;
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
sizeof(unsigned long));
- err = assign_irq_vector(irq, cfg, eligible_cpu);
- if (err != 0)
- return err;
-
- err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest);
- if (err != 0)
- return err;
-
if (limit == UV_AFFINITY_CPU)
irq_set_status_flags(irq, IRQ_NO_BALANCING);
else
@@ -164,7 +155,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
entry->polarity = 0;
entry->trigger = 0;
entry->mask = 0;
- entry->dest = dest;
+ entry->dest = cfg->dest_apicid;
mmr_pnode = uv_blade_to_pnode(mmr_blade);
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -238,9 +229,13 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
unsigned long mmr_offset, int limit)
{
- int ret, irq = irq_alloc_hwirq(uv_blade_to_memory_nid(mmr_blade));
+ int ret, irq;
+ struct irq_alloc_info info;
- if (!irq)
+ init_irq_alloc_info(&info, cpumask_of(cpu));
+ irq = irq_domain_alloc_irqs(NULL, 1, uv_blade_to_memory_nid(mmr_blade),
+ &info);
+ if (irq <= 0)
return -EBUSY;
ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
@@ -248,7 +243,7 @@ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
if (ret == irq)
uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
else
- irq_free_hwirq(irq);
+ irq_domain_free_irqs(irq, 1);
return ret;
}
@@ -283,6 +278,6 @@ void uv_teardown_irq(unsigned int irq)
n = n->rb_right;
}
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
- irq_free_hwirq(irq);
+ irq_domain_free_irqs(irq, 1);
}
EXPORT_SYMBOL_GPL(uv_teardown_irq);
--
1.7.10.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists