[<prev] [next>] [day] [month] [year] [list]
Message-ID: <Ykbapdep6CxfSIOf@linutronix.de>
Date: Fri, 1 Apr 2022 12:57:41 +0200
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: Thomas Gleixner <tglx@...utronix.de>
Cc: LKML <linux-kernel@...r.kernel.org>,
linux-rt-users@...r.kernel.org,
Steven Rostedt <rostedt@...dmis.org>
Subject: [ANNOUNCE] v5.17.1-rt17
Dear RT folks!
I'm pleased to announce the v5.17.1-rt17 patch set.
Changes since v5.17.1-rt16:
- Update the "ptrace: fix ptrace vs tasklist_lock race" patch:
- The rework in v5.17-rc6-rt11 caused a bug which may update the
wrong state during a ptrace operation.
- wait_task_inactive() waits for the expected task state and ensures
that the task is not blocked on a lock.
Known issues
- Valentin Schneider reported a few splats on ARM64, see
https://lkml.kernel.org/r/20210810134127.1394269-1-valentin.schneider@arm.com
The delta patch against v5.17.1-rt16 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.17/incr/patch-5.17.1-rt16-rt17.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.17.1-rt17
The RT patch against v5.17.1 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.17/older/patch-5.17.1-rt17.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.17/older/patches-5.17.1-rt17.tar.xz
Sebastian
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3df4ab414f1ad..94ead66808a50 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2058,21 +2058,22 @@ static inline bool task_state_match_and(struct task_struct *tsk, long state)
return match;
}
-static inline bool __task_state_match_eq(struct task_struct *tsk, long state)
+static inline int __task_state_match_eq(struct task_struct *tsk, long state)
{
- bool match = false;
+ int match = 0;
if (READ_ONCE(tsk->__state) == state)
- match = true;
+ match = 1;
else if (tsk->saved_state == state)
- match = true;
+ match = -1;
+
return match;
}
-static inline bool task_state_match_eq(struct task_struct *tsk, long state)
+static inline int task_state_match_eq(struct task_struct *tsk, long state)
{
unsigned long flags;
- bool match;
+ int match;
raw_spin_lock_irqsave(&tsk->pi_lock, flags);
match = __task_state_match_eq(tsk, state);
@@ -2091,7 +2092,7 @@ static inline bool task_state_match_and_set(struct task_struct *tsk, long state,
WRITE_ONCE(tsk->__state, new_state);
match = true;
} else if (tsk->saved_state & state) {
- tsk->__state = new_state;
+ tsk->saved_state = new_state;
match = true;
}
raw_spin_unlock_irqrestore(&tsk->pi_lock, flags);
@@ -2123,12 +2124,12 @@ static inline bool task_state_match_and(struct task_struct *tsk, long state)
return READ_ONCE(tsk->__state) & state;
}
-static inline bool __task_state_match_eq(struct task_struct *tsk, long state)
+static inline int __task_state_match_eq(struct task_struct *tsk, long state)
{
return READ_ONCE(tsk->__state) == state;
}
-static inline bool task_state_match_eq(struct task_struct *tsk, long state)
+static inline int task_state_match_eq(struct task_struct *tsk, long state)
{
return __task_state_match_eq(tsk, state);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 33ce5cd113d82..7cda920dc6939 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3261,6 +3261,8 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
struct rq *rq;
for (;;) {
+ int match_type = 0;
+
/*
* We do the initial early heuristics without holding
* any task-queue locks at all. We'll only try to get
@@ -3297,7 +3299,9 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
- if (!match_state || __task_state_match_eq(p, match_state))
+ if (match_state)
+ match_type = __task_state_match_eq(p, match_state);
+ if (!match_state || match_type)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &rf);
@@ -3327,7 +3331,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
* running right now), it's preempted, and we should
* yield - it could be a while.
*/
- if (unlikely(queued)) {
+ if (unlikely(queued || match_type < 0)) {
ktime_t to = NSEC_PER_SEC / HZ;
set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/localversion-rt b/localversion-rt
index 1199ebade17b4..1e584b47c987e 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt16
+-rt17
Powered by blists - more mailing lists