[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240911131320.GA3448@redhat.com>
Date: Wed, 11 Sep 2024 15:13:20 +0200
From: Oleg Nesterov <oleg@...hat.com>
To: Sven Schnelle <svens@...ux.ibm.com>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: Michael Ellerman <mpe@...erman.id.au>,
Masami Hiramatsu <mhiramat@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>, Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
"Liang, Kan" <kan.liang@...ux.intel.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Andrii Nakryiko <andrii@...nel.org>, linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org
Subject: [PATCH -mm 1/3] Revert "uprobes: use vm_special_mapping close()
functionality"
This reverts commit 08e28de1160a712724268fd33d77b32f1bc84d1c.
A malicious application can munmap() its "[uprobes]" vma and in this case
xol_mapping.close == uprobe_clear_state() will free the memory which can
be used by another thread, or the same thread when it hits the uprobe bp
afterwards.
Signed-off-by: Oleg Nesterov <oleg@...hat.com>
---
include/linux/uprobes.h | 1 +
kernel/events/uprobes.c | 36 +++++++++++++++++++-----------------
kernel/fork.c | 1 +
3 files changed, 21 insertions(+), 17 deletions(-)
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 493dc95d912c..b503fafb7fb3 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -126,6 +126,7 @@ extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
extern bool uprobe_deny_signal(void);
extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
+extern void uprobe_clear_state(struct mm_struct *mm);
extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index c2d6b2d64de2..73cc47708679 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1482,22 +1482,6 @@ void * __weak arch_uprobe_trampoline(unsigned long *psize)
return &insn;
}
-/*
- * uprobe_clear_state - Free the area allocated for slots.
- */
-static void uprobe_clear_state(const struct vm_special_mapping *sm, struct vm_area_struct *vma)
-{
- struct xol_area *area = container_of(vma->vm_private_data, struct xol_area, xol_mapping);
-
- mutex_lock(&delayed_uprobe_lock);
- delayed_uprobe_remove(NULL, vma->vm_mm);
- mutex_unlock(&delayed_uprobe_lock);
-
- put_page(area->pages[0]);
- kfree(area->bitmap);
- kfree(area);
-}
-
static struct xol_area *__create_xol_area(unsigned long vaddr)
{
struct mm_struct *mm = current->mm;
@@ -1516,7 +1500,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
area->xol_mapping.name = "[uprobes]";
area->xol_mapping.fault = NULL;
- area->xol_mapping.close = uprobe_clear_state;
area->xol_mapping.pages = area->pages;
area->pages[0] = alloc_page(GFP_HIGHUSER);
if (!area->pages[0])
@@ -1562,6 +1545,25 @@ static struct xol_area *get_xol_area(void)
return area;
}
+/*
+ * uprobe_clear_state - Free the area allocated for slots.
+ */
+void uprobe_clear_state(struct mm_struct *mm)
+{
+ struct xol_area *area = mm->uprobes_state.xol_area;
+
+ mutex_lock(&delayed_uprobe_lock);
+ delayed_uprobe_remove(NULL, mm);
+ mutex_unlock(&delayed_uprobe_lock);
+
+ if (!area)
+ return;
+
+ put_page(area->pages[0]);
+ kfree(area->bitmap);
+ kfree(area);
+}
+
void uprobe_start_dup_mmap(void)
{
percpu_down_read(&dup_mmap_sem);
diff --git a/kernel/fork.c b/kernel/fork.c
index 61070248a7d3..3d590e51ce84 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1338,6 +1338,7 @@ static inline void __mmput(struct mm_struct *mm)
{
VM_BUG_ON(atomic_read(&mm->mm_users));
+ uprobe_clear_state(mm);
exit_aio(mm);
ksm_exit(mm);
khugepaged_exit(mm); /* must run before exit_mmap */
--
2.25.1.362.g51ebf55
Powered by blists - more mailing lists