[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241016122424.1655560-2-rppt@kernel.org>
Date: Wed, 16 Oct 2024 15:24:17 +0300
From: Mike Rapoport <rppt@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>,
Luis Chamberlain <mcgrof@...nel.org>
Cc: Andreas Larsson <andreas@...sler.com>,
Andy Lutomirski <luto@...nel.org>,
Ard Biesheuvel <ardb@...nel.org>,
Arnd Bergmann <arnd@...db.de>,
Borislav Petkov <bp@...en8.de>,
Brian Cain <bcain@...cinc.com>,
Catalin Marinas <catalin.marinas@....com>,
Christoph Hellwig <hch@...radead.org>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Dinh Nguyen <dinguyen@...nel.org>,
Geert Uytterhoeven <geert@...ux-m68k.org>,
Guo Ren <guoren@...nel.org>,
Helge Deller <deller@....de>,
Huacai Chen <chenhuacai@...nel.org>,
Ingo Molnar <mingo@...hat.com>,
Johannes Berg <johannes@...solutions.net>,
John Paul Adrian Glaubitz <glaubitz@...sik.fu-berlin.de>,
Kent Overstreet <kent.overstreet@...ux.dev>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Mark Rutland <mark.rutland@....com>,
Masami Hiramatsu <mhiramat@...nel.org>,
Matt Turner <mattst88@...il.com>,
Max Filippov <jcmvbkbc@...il.com>,
Michael Ellerman <mpe@...erman.id.au>,
Michal Simek <monstr@...str.eu>,
Mike Rapoport <rppt@...nel.org>,
Oleg Nesterov <oleg@...hat.com>,
Palmer Dabbelt <palmer@...belt.com>,
Peter Zijlstra <peterz@...radead.org>,
Richard Weinberger <richard@....at>,
Russell King <linux@...linux.org.uk>,
Song Liu <song@...nel.org>,
Stafford Horne <shorne@...il.com>,
Steven Rostedt <rostedt@...dmis.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Thomas Bogendoerfer <tsbogend@...ha.franken.de>,
Thomas Gleixner <tglx@...utronix.de>,
Uladzislau Rezki <urezki@...il.com>,
Vineet Gupta <vgupta@...nel.org>,
Will Deacon <will@...nel.org>,
bpf@...r.kernel.org,
linux-alpha@...r.kernel.org,
linux-arch@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
linux-csky@...r.kernel.org,
linux-hexagon@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-m68k@...ts.linux-m68k.org,
linux-mips@...r.kernel.org,
linux-mm@...ck.org,
linux-modules@...r.kernel.org,
linux-openrisc@...r.kernel.org,
linux-parisc@...r.kernel.org,
linux-riscv@...ts.infradead.org,
linux-sh@...r.kernel.org,
linux-snps-arc@...ts.infradead.org,
linux-trace-kernel@...r.kernel.org,
linux-um@...ts.infradead.org,
linuxppc-dev@...ts.ozlabs.org,
loongarch@...ts.linux.dev,
sparclinux@...r.kernel.org,
x86@...nel.org,
Christoph Hellwig <hch@....de>
Subject: [PATCH v6 1/8] mm: vmalloc: group declarations depending on CONFIG_MMU together
From: "Mike Rapoport (Microsoft)" <rppt@...nel.org>
There are a couple of declarations that depend on CONFIG_MMU in
include/linux/vmalloc.h spread all over the file.
Group them all together to improve code readability.
No functional changes.
Signed-off-by: Mike Rapoport (Microsoft) <rppt@...nel.org>
Reviewed-by: Christoph Hellwig <hch@....de>
---
include/linux/vmalloc.h | 60 +++++++++++++++++------------------------
1 file changed, 24 insertions(+), 36 deletions(-)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index ad2ce7a6ab7a..27408f21e501 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -134,12 +134,6 @@ extern void vm_unmap_ram(const void *mem, unsigned int count);
extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
extern void vm_unmap_aliases(void);
-#ifdef CONFIG_MMU
-extern unsigned long vmalloc_nr_pages(void);
-#else
-static inline unsigned long vmalloc_nr_pages(void) { return 0; }
-#endif
-
extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
#define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__))
@@ -266,12 +260,29 @@ static inline bool is_vm_area_hugepages(const void *addr)
#endif
}
+/* for /proc/kcore */
+long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
+
+/*
+ * Internals. Don't use..
+ */
+__init void vm_area_add_early(struct vm_struct *vm);
+__init void vm_area_register_early(struct vm_struct *vm, size_t align);
+
+int register_vmap_purge_notifier(struct notifier_block *nb);
+int unregister_vmap_purge_notifier(struct notifier_block *nb);
+
#ifdef CONFIG_MMU
+#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
+
+unsigned long vmalloc_nr_pages(void);
+
int vm_area_map_pages(struct vm_struct *area, unsigned long start,
unsigned long end, struct page **pages);
void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
unsigned long end);
void vunmap_range(unsigned long addr, unsigned long end);
+
static inline void set_vm_flush_reset_perms(void *addr)
{
struct vm_struct *vm = find_vm_area(addr);
@@ -279,24 +290,14 @@ static inline void set_vm_flush_reset_perms(void *addr)
if (vm)
vm->flags |= VM_FLUSH_RESET_PERMS;
}
+#else /* !CONFIG_MMU */
+#define VMALLOC_TOTAL 0UL
-#else
-static inline void set_vm_flush_reset_perms(void *addr)
-{
-}
-#endif
-
-/* for /proc/kcore */
-extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
-
-/*
- * Internals. Don't use..
- */
-extern __init void vm_area_add_early(struct vm_struct *vm);
-extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+static inline unsigned long vmalloc_nr_pages(void) { return 0; }
+static inline void set_vm_flush_reset_perms(void *addr) {}
+#endif /* CONFIG_MMU */
-#ifdef CONFIG_SMP
-# ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && defined(CONFIG_SMP)
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align);
@@ -311,22 +312,9 @@ pcpu_get_vm_areas(const unsigned long *offsets,
return NULL;
}
-static inline void
-pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
-{
-}
-# endif
-#endif
-
-#ifdef CONFIG_MMU
-#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
-#else
-#define VMALLOC_TOTAL 0UL
+static inline void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) {}
#endif
-int register_vmap_purge_notifier(struct notifier_block *nb);
-int unregister_vmap_purge_notifier(struct notifier_block *nb);
-
#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
bool vmalloc_dump_obj(void *object);
#else
--
2.43.0
Powered by blists - more mailing lists