[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201029222652.084086429@linutronix.de>
Date: Thu, 29 Oct 2020 23:18:21 +0100
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: linux-arch@...r.kernel.org,
Linus Torvalds <torvalds@...uxfoundation.org>,
Peter Zijlstra <peterz@...radead.org>,
Paul McKenney <paulmck@...nel.org>,
David Airlie <airlied@...ux.ie>,
Daniel Vetter <daniel@...ll.ch>,
Ard Biesheuvel <ardb@...nel.org>,
Herbert Xu <herbert@...dor.apana.org.au>,
Christoph Hellwig <hch@....de>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Ingo Molnar <mingo@...nel.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
x86@...nel.org, Vineet Gupta <vgupta@...opsys.com>,
linux-snps-arc@...ts.infradead.org,
Russell King <linux@...linux.org.uk>,
Arnd Bergmann <arnd@...db.de>,
linux-arm-kernel@...ts.infradead.org, Guo Ren <guoren@...nel.org>,
linux-csky@...r.kernel.org, Michal Simek <monstr@...str.eu>,
Thomas Bogendoerfer <tsbogend@...ha.franken.de>,
linux-mips@...r.kernel.org, Nick Hu <nickhu@...estech.com>,
Greentime Hu <green.hu@...il.com>,
Vincent Chen <deanbo422@...il.com>,
Michael Ellerman <mpe@...erman.id.au>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
linuxppc-dev@...ts.ozlabs.org,
"David S. Miller" <davem@...emloft.net>,
sparclinux@...r.kernel.org, Chris Zankel <chris@...kel.net>,
Max Filippov <jcmvbkbc@...il.com>,
linux-xtensa@...ux-xtensa.org
Subject: [patch V2 15/18] io-mapping: Cleanup atomic iomap
Switch the atomic iomap implementation over to kmap_local and stick the
preempt/pagefault mechanics into the generic code similar to the
kmap_atomic variants.
Rename the x86 map function in preparation for a non-atomic variant.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
V2: New patch to make review easier
---
arch/x86/include/asm/iomap.h | 9 +--------
arch/x86/mm/iomap_32.c | 6 ++----
include/linux/io-mapping.h | 8 ++++++--
3 files changed, 9 insertions(+), 14 deletions(-)
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -13,14 +13,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
-
-static inline void iounmap_atomic(void __iomem *vaddr)
-{
- kunmap_local_indexed((void __force *)vaddr);
- pagefault_enable();
- preempt_enable();
-}
+void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -44,7 +44,7 @@ void iomap_free(resource_size_t base, un
}
EXPORT_SYMBOL_GPL(iomap_free);
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
{
/*
* For non-PAT systems, translate non-WB request to UC- just in
@@ -60,8 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsi
/* Filter out unsupported __PAGE_KERNEL* bits: */
pgprot_val(prot) &= __default_kernel_pte_mask;
- preempt_disable();
- pagefault_disable();
return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
}
-EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
+EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot);
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -69,13 +69,17 @@ io_mapping_map_atomic_wc(struct io_mappi
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
- return iomap_atomic_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
+ preempt_disable();
+ pagefault_disable();
+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
}
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
- iounmap_atomic(vaddr);
+ kunmap_local_indexed((void __force *)vaddr);
+ pagefault_enable();
+ preempt_enable();
}
static inline void __iomem *
Powered by blists - more mailing lists