[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1401262770-25343-6-git-send-email-tomasz.nowicki@linaro.org>
Date: Wed, 28 May 2014 09:39:30 +0200
From: Tomasz Nowicki <tomasz.nowicki@...aro.org>
To: rjw@...ysocki.net, lenb@...nel.org, tony.luck@...el.com,
bp@...en8.de, m.chehab@...sung.com, bp@...e.de
Cc: linux-edac@...r.kernel.org, x86@...nel.org,
linux-acpi@...r.kernel.org, linux-kernel@...r.kernel.org,
linaro-acpi@...ts.linaro.org,
Tomasz Nowicki <tomasz.nowicki@...aro.org>
Subject: [PATCH v2 5/5] acpi, apei, ghes: Factor out ioremap virtual memory for IRQ and NMI context.
GHES currently maps two pages with atomic_ioremap. From now
on, NMI is optional so there is no need to allocate an NMI page
for platforms without NMI support.
To make it possible to not use a second page, swap the existing
page order so that the IRQ context page is first, and the optional
NMI context page is second. Then, use CONFIG_ACPI_APEI_NMI to decide
at runtime how many pages are to be allocated. Finally, put in
sanity checks to avoid accessing unallocated memory.
Signed-off-by: Tomasz Nowicki <tomasz.nowicki@...aro.org>
---
arch/x86/kernel/acpi/apei.c | 6 ++++++
drivers/acpi/apei/ghes.c | 16 ++++++++--------
include/acpi/apei.h | 1 +
3 files changed, 15 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c
index 221a3a6..842d158 100644
--- a/arch/x86/kernel/acpi/apei.c
+++ b/arch/x86/kernel/acpi/apei.c
@@ -79,3 +79,9 @@ void arch_apei_nmi_oops_begin(void)
{
oops_begin();
}
+
+void arch_apei_flush_tlb_one(unsigned long addr)
+{
+ __flush_tlb_one(addr);
+}
+EXPORT_SYMBOL_GPL(arch_apei_flush_tlb_one);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 3c42629..55bf47f 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -113,12 +113,11 @@ static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
*/
/*
- * Two virtual pages are used, one for NMI context, the other for
- * IRQ/PROCESS context
+ * Two virtual pages are used, one for IRQ/PROCESS context, the other for
+ * NMI context (optionally).
*/
-#define GHES_IOREMAP_PAGES 2
-#define GHES_IOREMAP_NMI_PAGE(base) (base)
-#define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE)
+#define GHES_IOREMAP_IRQ_PAGE(base) (base)
+#define GHES_IOREMAP_NMI_PAGE(base) ((base) + PAGE_SIZE)
/* virtual memory area for atomic ioremap */
static struct vm_struct *ghes_ioremap_area;
@@ -155,7 +154,8 @@ static struct ghes_notify_setup ghes_notify_tab[];
static int ghes_ioremap_init(void)
{
- ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
+ ghes_ioremap_area = __get_vm_area(
+ PAGE_SIZE * (IS_ENABLED(CONFIG_ACPI_APEI_NMI) ? 2 : 1),
VM_IOREMAP, VMALLOC_START, VMALLOC_END);
if (!ghes_ioremap_area) {
pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
@@ -199,7 +199,7 @@ static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
- __flush_tlb_one(vaddr);
+ arch_apei_flush_tlb_one(vaddr);
}
static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
@@ -209,7 +209,7 @@ static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
- __flush_tlb_one(vaddr);
+ arch_apei_flush_tlb_one(vaddr);
}
static int ghes_estatus_pool_init(void)
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index 348e1ea..ff2bb7e 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -51,6 +51,7 @@ int arch_apei_register_nmi(int (*nmi_handler)(unsigned int, struct pt_regs *),
const char *name);
void arch_apei_unregister_nmi(const char *name);
void arch_apei_nmi_oops_begin(void);
+void arch_apei_flush_tlb_one(unsigned long addr);
#endif
#endif
--
1.7.9.5
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists