[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YqAjM9+4jjWXbQty@MiWiFi-R3L-srv>
Date: Wed, 8 Jun 2022 12:18:59 +0800
From: Baoquan He <bhe@...hat.com>
To: Kefeng Wang <wangkefeng.wang@...wei.com>
Cc: catalin.marinas@....com, will@...nel.org,
akpm@...ux-foundation.org, linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
hch@...radead.org, arnd@...db.de, anshuman.khandual@....com
Subject: Re: [PATCH v5 4/6] mm: ioremap: Add ioremap/iounmap_allowed()
On 06/07/22 at 08:50pm, Kefeng Wang wrote:
> Add special hook for architecture to verify addr, size or prot
> when ioremap() or iounmap(), which will make the generic ioremap
> more useful.
>
> ioremap_allowed() return a bool,
> - true means continue to remap
> - false means skip remap and return directly
> iounmap_allowed() return a bool,
> - true means continue to vunmap
> - false code means skip vunmap and return directly
>
> Meanwhile, only vunmap the address when it is in vmalloc area
> as the generic ioremap only returns vmalloc addresses.
LGTM,
Reviewed-by: Baoquan He <bhe@...hat.com>
>
> Acked-by: Andrew Morton <akpm@...ux-foundation.org>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@...wei.com>
> ---
> include/asm-generic/io.h | 26 ++++++++++++++++++++++++++
> mm/ioremap.c | 11 ++++++++++-
> 2 files changed, 36 insertions(+), 1 deletion(-)
>
> diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
> index b76379628a02..db5b890eaff7 100644
> --- a/include/asm-generic/io.h
> +++ b/include/asm-generic/io.h
> @@ -964,6 +964,32 @@ static inline void iounmap(volatile void __iomem *addr)
> #elif defined(CONFIG_GENERIC_IOREMAP)
> #include <linux/pgtable.h>
>
> +/*
> + * Arch code can implement the following two hooks when using GENERIC_IOREMAP
> + * ioremap_allowed() return a bool,
> + * - true means continue to remap
> + * - false means skip remap and return directly
> + * iounmap_allowed() return a bool,
> + * - true means continue to vunmap
> + * - false means skip vunmap and return directly
> + */
> +#ifndef ioremap_allowed
> +#define ioremap_allowed ioremap_allowed
> +static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size,
> + unsigned long prot)
> +{
> + return true;
> +}
> +#endif
> +
> +#ifndef iounmap_allowed
> +#define iounmap_allowed iounmap_allowed
> +static inline bool iounmap_allowed(void *addr)
> +{
> + return true;
> +}
> +#endif
> +
> void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
> unsigned long prot);
> void iounmap(volatile void __iomem *addr);
> diff --git a/mm/ioremap.c b/mm/ioremap.c
> index e1d008e8f87f..8652426282cc 100644
> --- a/mm/ioremap.c
> +++ b/mm/ioremap.c
> @@ -28,6 +28,9 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
> phys_addr -= offset;
> size = PAGE_ALIGN(size + offset);
>
> + if (!ioremap_allowed(phys_addr, size, prot))
> + return NULL;
> +
> area = get_vm_area_caller(size, VM_IOREMAP,
> __builtin_return_address(0));
> if (!area)
> @@ -47,6 +50,12 @@ EXPORT_SYMBOL(ioremap_prot);
>
> void iounmap(volatile void __iomem *addr)
> {
> - vunmap((void *)((unsigned long)addr & PAGE_MASK));
> + void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
> +
> + if (!iounmap_allowed(vaddr))
> + return;
> +
> + if (is_vmalloc_addr(vaddr))
> + vunmap(vaddr);
> }
> EXPORT_SYMBOL(iounmap);
> --
> 2.35.3
>
>
Powered by blists - more mailing lists