[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <lsq.1518323471.107345651@decadent.org.uk>
Date: Sun, 11 Feb 2018 04:31:11 +0000
From: Ben Hutchings <ben@...adent.org.uk>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
CC: akpm@...ux-foundation.org, "Tyler Baicar" <tbaicar@...eaurora.org>,
"Ingo Molnar" <mingo@...nel.org>, "Borislav Petkov" <bp@...e.de>,
"Fengguang Wu" <fengguang.wu@...el.com>,
"James Morse" <james.morse@....com>,
"Will Deacon" <will.deacon@....com>,
"Linus Torvalds" <torvalds@...ux-foundation.org>,
"Rafael J. Wysocki" <rafael.j.wysocki@...el.com>,
"Toshi Kani" <toshi.kani@....com>
Subject: [PATCH 3.16 060/136] ACPI / APEI: Replace ioremap_page_range()
with fixmap
3.16.54-rc1 review patch. If anyone has any objections, please let me know.
------------------
From: James Morse <james.morse@....com>
commit 4f89fa286f6729312e227e7c2d764e8e7b9d340e upstream.
Replace ghes_io{re,un}map_pfn_{nmi,irq}()s use of ioremap_page_range()
with __set_fixmap() as ioremap_page_range() may sleep to allocate a new
level of page-table, even if its passed an existing final-address to
use in the mapping.
The GHES driver can only be enabled for architectures that select
HAVE_ACPI_APEI: Add fixmap entries to both x86 and arm64.
clear_fixmap() does the TLB invalidation in __set_fixmap() for arm64
and __set_pte_vaddr() for x86. In each case its the same as the
respective arch_apei_flush_tlb_one().
Reported-by: Fengguang Wu <fengguang.wu@...el.com>
Suggested-by: Linus Torvalds <torvalds@...ux-foundation.org>
Signed-off-by: James Morse <james.morse@....com>
Reviewed-by: Borislav Petkov <bp@...e.de>
Tested-by: Tyler Baicar <tbaicar@...eaurora.org>
Tested-by: Toshi Kani <toshi.kani@....com>
[ For the arm64 bits: ]
Acked-by: Will Deacon <will.deacon@....com>
[ For the x86 bits: ]
Acked-by: Ingo Molnar <mingo@...nel.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
[bwh: Backported to 3.16:
- Drop arm64 changes; ghes is x86-only here
- Don't use page or prot variables in ghes_ioremap_fn_{nmi,irq}()
- Adjust context]
Signed-off-by: Ben Hutchings <ben@...adent.org.uk>
---
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -103,6 +103,12 @@ enum fixed_addresses {
#ifdef CONFIG_X86_INTEL_MID
FIX_LNW_VRTC,
#endif
+#ifdef CONFIG_ACPI_APEI_GHES
+ /* Used for GHES mapping from assorted contexts */
+ FIX_APEI_GHES_IRQ,
+ FIX_APEI_GHES_NMI,
+#endif
+
__end_of_permanent_fixed_addresses,
/*
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -49,6 +49,7 @@
#include <linux/aer.h>
#include <acpi/ghes.h>
+#include <asm/fixmap.h>
#include <asm/mce.h>
#include <asm/tlbflush.h>
#include <asm/nmi.h>
@@ -110,7 +111,7 @@ static DEFINE_RAW_SPINLOCK(ghes_nmi_lock
* Because the memory area used to transfer hardware error information
* from BIOS to Linux can be determined only in NMI, IRQ or timer
* handler, but general ioremap can not be used in atomic context, so
- * a special version of atomic ioremap is implemented for that.
+ * the fixmap is used instead.
*/
/*
@@ -124,8 +125,8 @@ static DEFINE_RAW_SPINLOCK(ghes_nmi_lock
/* virtual memory area for atomic ioremap */
static struct vm_struct *ghes_ioremap_area;
/*
- * These 2 spinlock is used to prevent atomic ioremap virtual memory
- * area from being mapped simultaneously.
+ * These 2 spinlocks are used to prevent the fixmap entries from being used
+ * simultaneously.
*/
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
@@ -165,44 +166,26 @@ static void ghes_ioremap_exit(void)
static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
{
- unsigned long vaddr;
+ __set_fixmap(FIX_APEI_GHES_NMI, pfn << PAGE_SHIFT, PAGE_KERNEL);
- vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
- ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
- pfn << PAGE_SHIFT, PAGE_KERNEL);
-
- return (void __iomem *)vaddr;
+ return (void __iomem *) fix_to_virt(FIX_APEI_GHES_NMI);
}
static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
{
- unsigned long vaddr;
-
- vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
- ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
- pfn << PAGE_SHIFT, PAGE_KERNEL);
+ __set_fixmap(FIX_APEI_GHES_IRQ, pfn << PAGE_SHIFT, PAGE_KERNEL);
- return (void __iomem *)vaddr;
+ return (void __iomem *) fix_to_virt(FIX_APEI_GHES_IRQ);
}
-static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
+static void ghes_iounmap_nmi(void)
{
- unsigned long vaddr = (unsigned long __force)vaddr_ptr;
- void *base = ghes_ioremap_area->addr;
-
- BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
- unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
- __flush_tlb_one(vaddr);
+ clear_fixmap(FIX_APEI_GHES_NMI);
}
-static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
+static void ghes_iounmap_irq(void)
{
- unsigned long vaddr = (unsigned long __force)vaddr_ptr;
- void *base = ghes_ioremap_area->addr;
-
- BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
- unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
- __flush_tlb_one(vaddr);
+ clear_fixmap(FIX_APEI_GHES_IRQ);
}
static int ghes_estatus_pool_init(void)
@@ -341,10 +324,10 @@ static void ghes_copy_tofrom_phys(void *
paddr += trunk;
buffer += trunk;
if (in_nmi) {
- ghes_iounmap_nmi(vaddr);
+ ghes_iounmap_nmi();
raw_spin_unlock(&ghes_ioremap_lock_nmi);
} else {
- ghes_iounmap_irq(vaddr);
+ ghes_iounmap_irq();
spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
}
}
Powered by blists - more mailing lists