[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240119073019.1528573-2-houtao@huaweicloud.com>
Date: Fri, 19 Jan 2024 15:30:17 +0800
From: Hou Tao <houtao@...weicloud.com>
To: x86@...nel.org,
bpf@...r.kernel.org
Cc: Dave Hansen <dave.hansen@...ux.intel.com>,
Andy Lutomirski <luto@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
"H . Peter Anvin" <hpa@...or.com>,
linux-kernel@...r.kernel.org,
xingwei lee <xrivendell7@...il.com>,
Jann Horn <jannh@...gle.com>,
houtao1@...wei.com
Subject: [PATCH bpf 1/3] x86/mm: Move is_vsyscall_vaddr() into mm_internal.h
From: Hou Tao <houtao1@...wei.com>
Moving is_vsyscall_vaddr() into mm_internal.h to make it available for
copy_from_kernel_nofault_allowed() in arch/x86/mm/maccess.c.
Signed-off-by: Hou Tao <houtao1@...wei.com>
---
arch/x86/mm/fault.c | 11 ++---------
arch/x86/mm/mm_internal.h | 13 +++++++++++++
2 files changed, 15 insertions(+), 9 deletions(-)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 679b09cfe241c..69e007761d9a9 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -38,6 +38,8 @@
#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>
+#include "mm_internal.h"
+
/*
* Returns 0 if mmiotrace is disabled, or if the fault is not
* handled by mmiotrace:
@@ -798,15 +800,6 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
show_opcodes(regs, loglvl);
}
-/*
- * The (legacy) vsyscall page is the long page in the kernel portion
- * of the address space that has user-accessible permissions.
- */
-static bool is_vsyscall_vaddr(unsigned long vaddr)
-{
- return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
-}
-
static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address, u32 pkey, int si_code)
diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h
index 3f37b5c80bb32..4ebf6051e1ed7 100644
--- a/arch/x86/mm/mm_internal.h
+++ b/arch/x86/mm/mm_internal.h
@@ -2,6 +2,10 @@
#ifndef __X86_MM_INTERNAL_H
#define __X86_MM_INTERNAL_H
+#include <uapi/asm/vsyscall.h>
+
+#include <asm/page_types.h>
+
void *alloc_low_pages(unsigned int num);
static inline void *alloc_low_page(void)
{
@@ -25,4 +29,13 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
extern unsigned long tlb_single_page_flush_ceiling;
+/*
+ * The (legacy) vsyscall page is the long page in the kernel portion
+ * of the address space that has user-accessible permissions.
+ */
+static inline bool is_vsyscall_vaddr(unsigned long vaddr)
+{
+ return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
+}
+
#endif /* __X86_MM_INTERNAL_H */
--
2.29.2
Powered by blists - more mailing lists