[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <75718e9104633bad3c71e0c7a9e931864f59dee0.1665568707.git.christophe.leroy@csgroup.eu>
Date: Wed, 12 Oct 2022 12:09:41 +0200
From: Christophe Leroy <christophe.leroy@...roup.eu>
To: Baoquan He <bhe@...hat.com>
Cc: Christophe Leroy <christophe.leroy@...roup.eu>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, linux-arch@...r.kernel.org,
akpm@...ux-foundation.org, hch@...radead.org,
agordeev@...ux.ibm.com, wangkefeng.wang@...wei.com,
schnelle@...ux.ibm.com, David.Laight@...LAB.COM, shorne@...il.com,
Vineet Gupta <vgupta@...nel.org>,
linux-snps-arc@...ts.infradead.org
Subject: [RFC PATCH 5/8] arc: mm: Convert to GENERIC_IOREMAP
From: Baoquan He <bhe@...hat.com>
By taking GENERIC_IOREMAP method, the generic ioremap_prot() and
iounmap() are visible and available to arch. Arch only needs to
provide implementation of arch_ioremap() or arch_iounmap() if there's
arch specific handling needed in its ioremap() or iounmap(). This
change will simplify implementation by removing duplicated codes with
generic ioremap() and iounmap(), and has the equivalent functioality
as before.
Here, add hooks arch_ioremap() and arch_iounmap() for arc's special
operation when ioremap_prot() and iounmap(). Meanwhile define and
implement arc's own ioremap() because arc has some special handling
in ioremap() than standard ioremap().
Signed-off-by: Baoquan He <bhe@...hat.com>
Cc: Vineet Gupta <vgupta@...nel.org>
Cc: linux-snps-arc@...ts.infradead.org
Signed-off-by: Christophe Leroy <christophe.leroy@...roup.eu>
---
arch/arc/Kconfig | 1 +
arch/arc/include/asm/io.h | 7 +++---
arch/arc/mm/ioremap.c | 46 +++------------------------------------
3 files changed, 8 insertions(+), 46 deletions(-)
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 9e3653253ef2..a08d2abfaf61 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -26,6 +26,7 @@ config ARC
select GENERIC_PENDING_IRQ if SMP
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_IOREMAP
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 8f777d6441a5..53b0f1e4f276 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -21,8 +21,8 @@
#endif
extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
-extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
- unsigned long flags);
+#define ioremap ioremap
+#define ioremap_prot ioremap_prot
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
return (void __iomem *)port;
@@ -32,7 +32,8 @@ static inline void ioport_unmap(void __iomem *addr)
{
}
-extern void iounmap(const void __iomem *addr);
+bool iounmap_allowed(void *addr);
+#define iounmap_allowed iounmap_allowed
/*
* io{read,write}{16,32}be() macros
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index 0ee75aca6e10..02b750abccee 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -25,13 +25,6 @@ static inline bool arc_uncached_addr_space(phys_addr_t paddr)
void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
{
- phys_addr_t end;
-
- /* Don't allow wraparound or zero size */
- end = paddr + size - 1;
- if (!size || (end < paddr))
- return NULL;
-
/*
* If the region is h/w uncached, MMU mapping can be elided as optim
* The cast to u32 is fine as this region can only be inside 4GB
@@ -54,52 +47,19 @@ EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
unsigned long flags)
{
- unsigned int off;
- unsigned long vaddr;
- struct vm_struct *area;
- phys_addr_t end;
pgprot_t prot = __pgprot(flags);
- /* Don't allow wraparound, zero size */
- end = paddr + size - 1;
- if ((!size) || (end < paddr))
- return NULL;
-
/* An early platform driver might end up here */
if (!slab_is_available())
return NULL;
/* force uncached */
- prot = pgprot_noncached(prot);
-
- /* Mappings have to be page-aligned */
- off = paddr & ~PAGE_MASK;
- paddr &= PAGE_MASK_PHYS;
- size = PAGE_ALIGN(end + 1) - paddr;
-
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
- area->phys_addr = paddr;
- vaddr = (unsigned long)area->addr;
- if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
- vunmap((void __force *)vaddr);
- return NULL;
- }
- return (void __iomem *)(off + (char __iomem *)vaddr);
+ return generic_ioremap_prot(paddr, size, pgprot_noncached(prot));
}
EXPORT_SYMBOL(ioremap_prot);
-
-void iounmap(const void __iomem *addr)
+bool iounmap_allowed(void *addr)
{
/* weird double cast to handle phys_addr_t > 32 bits */
- if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
- return;
-
- vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
+ return !arc_uncached_addr_space((phys_addr_t)(u32)addr);
}
-EXPORT_SYMBOL(iounmap);
--
2.37.1
Powered by blists - more mailing lists