[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220418170104.GA16199@redhat.com>
Date: Mon, 18 Apr 2022 19:01:05 +0200
From: Oleg Nesterov <oleg@...hat.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: rjw@...ysocki.net, mingo@...nel.org, vincent.guittot@...aro.org,
dietmar.eggemann@....com, rostedt@...dmis.org, mgorman@...e.de,
ebiederm@...ssion.com, bigeasy@...utronix.de,
Will Deacon <will@...nel.org>, linux-kernel@...r.kernel.org,
tj@...nel.org, linux-pm@...r.kernel.org
Subject: Re: [PATCH 2/5] sched,ptrace: Fix ptrace_check_attach() vs PREEMPT_RT
On 04/15, Peter Zijlstra wrote:
>
> On Fri, Apr 15, 2022 at 12:57:56PM +0200, Oleg Nesterov wrote:
> > On 04/15, Oleg Nesterov wrote:
> > >
> > > OK, so far it seems that this patch needs a couple of simple fixes you
> > > pointed out, but before I send V2:
> > >
> > > - do you agree we can avoid JOBCTL_TRACED_FROZEN in 1-2 ?
> > >
> > > - will you agree if I change ptrace_freeze_traced() to rely
> > > on __state == TASK_TRACED rather than task_is_traced() ?
> > >
> >
> > Forgot to say, I think 1/5 needs some changes in any case...
> >
> > ptrace_resume() does wake_up_state(child, __TASK_TRACED) but doesn't
> > clear JOBCTL_TRACED. The "else" branch in ptrace_stop() leaks this flag
> > too.
>
> Urgh, yes, I seemed to have missed that one :-(
OK, I'll wait for updated version and send V3 on top of it.
I think the fixes are simple. ptrace_resume() should simply remove the
minor "need_siglock" optimization and clear JOBCTL_TRACED. As for the
"else" branch in ptrace_stop(), perhaps 1/5 can introduce the trivial
static void clear_traced_jobctl_flags(unsigned long flags)
{
spin_lock_irq(¤t->sighand->siglock);
current->jobctl &= ~flags;
spin_unlock_irq(¤t->sighand->siglock);
}
which can be reused by 2/5 to clear JOBCTL_TRACED_XXX. And, Peter, feel
free to join 1/5 and this patch if you think this makes any sense.
Meanwhile see V2 on top of the current version.
Oleg.
diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h
index ec8b312f7506..1b5a57048e13 100644
--- a/include/linux/sched/jobctl.h
+++ b/include/linux/sched/jobctl.h
@@ -22,7 +22,8 @@ struct task_struct;
#define JOBCTL_STOPPED_BIT 24
#define JOBCTL_TRACED_BIT 25
-#define JOBCTL_TRACED_FROZEN_BIT 26
+#define JOBCTL_TRACED_XXX_BIT 25
+#define JOBCTL_TRACED_FROZEN_BIT 27
#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -35,6 +36,7 @@ struct task_struct;
#define JOBCTL_STOPPED (1UL << JOBCTL_STOPPED_BIT)
#define JOBCTL_TRACED (1UL << JOBCTL_TRACED_BIT)
+#define JOBCTL_TRACED_XXX (1UL << JOBCTL_TRACED_XXX_BIT)
#define JOBCTL_TRACED_FROZEN (1UL << JOBCTL_TRACED_FROZEN_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 626f96d275c7..ec104bae4e21 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -193,20 +193,24 @@ static bool looks_like_a_spurious_pid(struct task_struct *task)
*/
static bool ptrace_freeze_traced(struct task_struct *task)
{
+ unsigned long flags;
bool ret = false;
/* Lockless, nobody but us can set this flag */
if (task->jobctl & JOBCTL_LISTENING)
return ret;
+ if (!lock_task_sighand(task, &flags))
+ return ret;
- spin_lock_irq(&task->sighand->siglock);
- if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
+ if (READ_ONCE(task->__state) == TASK_TRACED &&
+ !looks_like_a_spurious_pid(task) &&
!__fatal_signal_pending(task)) {
task->jobctl |= JOBCTL_TRACED_FROZEN;
WRITE_ONCE(task->__state, __TASK_TRACED);
+ WARN_ON_ONCE(!task_is_traced(task));
ret = true;
}
- spin_unlock_irq(&task->sighand->siglock);
+ unlock_task_sighand(task, &flags);
return ret;
}
@@ -253,40 +257,42 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
*/
static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
{
- int ret = -ESRCH;
-
+ int traced;
/*
* We take the read lock around doing both checks to close a
- * possible race where someone else was tracing our child and
- * detached between these two checks. After this locked check,
- * we are sure that this is our traced child and that can only
- * be changed by us so it's not changing right after this.
+ * possible race where someone else attaches or detaches our
+ * natural child.
*/
read_lock(&tasklist_lock);
- if (child->ptrace && child->parent == current) {
- WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
- /*
- * child->sighand can't be NULL, release_task()
- * does ptrace_unlink() before __exit_signal().
- */
- if (ignore_state || ptrace_freeze_traced(child))
- ret = 0;
- }
+ traced = child->ptrace && child->parent == current;
read_unlock(&tasklist_lock);
- if (!ret && !ignore_state) {
- if (!wait_task_inactive(child, __TASK_TRACED)) {
- /*
- * This can only happen if may_ptrace_stop() fails and
- * ptrace_stop() changes ->state back to TASK_RUNNING,
- * so we should not worry about leaking __TASK_TRACED.
- */
- WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
- ret = -ESRCH;
+ if (!traced)
+ return -ESRCH;
+
+ WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
+ if (ignore_state)
+ return 0;
+
+ if (!task_is_traced(child))
+ return -ESRCH;
+
+ for (;;) {
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ set_current_state(TASK_KILLABLE);
+ if (!(READ_ONCE(child->jobctl) & JOBCTL_TRACED_XXX)) {
+ __set_current_state(TASK_RUNNING);
+ break;
}
+ schedule();
}
- return ret;
+ if (!wait_task_inactive(child, TASK_TRACED) ||
+ !ptrace_freeze_traced(child))
+ return -ESRCH;
+
+ return 0;
}
static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
diff --git a/kernel/signal.c b/kernel/signal.c
index 0aea3f0a8002..c7a89904cc4a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2182,6 +2182,13 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
spin_unlock_irqrestore(&sighand->siglock, flags);
}
+static void clear_traced_xxx(void)
+{
+ spin_lock_irq(¤t->sighand->siglock);
+ current->jobctl &= ~JOBCTL_TRACED_XXX;
+ spin_unlock_irq(¤t->sighand->siglock);
+}
+
/*
* This must be called with current->sighand->siglock held.
*
@@ -2220,7 +2227,7 @@ static int ptrace_stop(int exit_code, int why, int clear_code,
* schedule() will not sleep if there is a pending signal that
* can awaken the task.
*/
- current->jobctl |= JOBCTL_TRACED;
+ current->jobctl |= JOBCTL_TRACED | JOBCTL_TRACED_XXX;
set_special_state(TASK_TRACED);
/*
@@ -2282,6 +2289,8 @@ static int ptrace_stop(int exit_code, int why, int clear_code,
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
+ clear_traced_xxx();
+ wake_up_state(current->parent, TASK_KILLABLE);
/*
* Don't want to allow preemption here, because
* sys_ptrace() needs this task to be inactive.
@@ -2297,8 +2306,12 @@ static int ptrace_stop(int exit_code, int why, int clear_code,
} else {
/*
* By the time we got the lock, our tracer went away.
- * Don't drop the lock yet, another tracer may come.
- *
+ * Don't drop the lock yet, another tracer may come,
+ * tasklist protects us from ptrace_freeze_traced().
+ */
+ __set_current_state(TASK_RUNNING);
+ clear_traced_xxx();
+ /*
* If @gstop_done, the ptracer went away between group stop
* completion and here. During detach, it would have set
* JOBCTL_STOP_PENDING on us and we'll re-enter
@@ -2307,13 +2320,11 @@ static int ptrace_stop(int exit_code, int why, int clear_code,
*/
if (gstop_done)
do_notify_parent_cldstop(current, false, why);
+ read_unlock(&tasklist_lock);
- /* tasklist protects us from ptrace_freeze_traced() */
- __set_current_state(TASK_RUNNING);
read_code = false;
if (clear_code)
exit_code = 0;
- read_unlock(&tasklist_lock);
}
/*
Powered by blists - more mailing lists