[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210602133040.334970485@infradead.org>
Date: Wed, 02 Jun 2021 15:12:27 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>
Cc: Borislav Petkov <bp@...en8.de>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>, Jens Axboe <axboe@...nel.dk>,
Alasdair Kergon <agk@...hat.com>,
Mike Snitzer <snitzer@...hat.com>, dm-devel@...hat.com,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Felipe Balbi <balbi@...nel.org>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Alexander Viro <viro@...iv.linux.org.uk>,
Tejun Heo <tj@...nel.org>, Zefan Li <lizefan.x@...edance.com>,
Johannes Weiner <hannes@...xchg.org>,
Jason Wessel <jason.wessel@...driver.com>,
Daniel Thompson <daniel.thompson@...aro.org>,
Douglas Anderson <dianders@...omium.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...hat.com>,
Namhyung Kim <namhyung@...nel.org>,
"Rafael J. Wysocki" <rjw@...ysocki.net>,
Pavel Machek <pavel@....cz>, Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>,
Oleg Nesterov <oleg@...hat.com>,
Davidlohr Bueso <dave@...olabs.net>,
"Paul E. McKenney" <paulmck@...nel.org>,
Josh Triplett <josh@...htriplett.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Lai Jiangshan <jiangshanlai@...il.com>,
Joel Fernandes <joel@...lfernandes.org>,
John Stultz <john.stultz@...aro.org>,
Stephen Boyd <sboyd@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Paolo Bonzini <pbonzini@...hat.com>,
linux-kernel@...r.kernel.org, linux-block@...r.kernel.org,
netdev@...r.kernel.org, linux-usb@...r.kernel.org,
linux-fsdevel@...r.kernel.org, cgroups@...r.kernel.org,
kgdb-bugreport@...ts.sourceforge.net,
linux-perf-users@...r.kernel.org, linux-pm@...r.kernel.org,
rcu@...r.kernel.org, linux-mm@...ck.org, kvm@...r.kernel.org
Subject: [PATCH 2/6] sched: Introduce task_is_running()
Replace a bunch of 'p->state == TASK_RUNNING' with a new helper:
task_is_running(p).
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
arch/x86/kernel/process.c | 4 ++--
block/blk-mq.c | 2 +-
include/linux/sched.h | 2 ++
kernel/locking/lockdep.c | 2 +-
kernel/rcu/tree_plugin.h | 2 +-
kernel/sched/core.c | 6 +++---
kernel/sched/stats.h | 2 +-
kernel/signal.c | 2 +-
kernel/softirq.c | 3 +--
mm/compaction.c | 2 +-
10 files changed, 14 insertions(+), 13 deletions(-)
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -931,7 +931,7 @@ unsigned long get_wchan(struct task_stru
unsigned long start, bottom, top, sp, fp, ip, ret = 0;
int count = 0;
- if (p == current || p->state == TASK_RUNNING)
+ if (p == current || task_is_running(p))
return 0;
if (!try_get_task_stack(p))
@@ -975,7 +975,7 @@ unsigned long get_wchan(struct task_stru
goto out;
}
fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
- } while (count++ < 16 && p->state != TASK_RUNNING);
+ } while (count++ < 16 && !task_is_running(p));
out:
put_task_stack(p);
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3926,7 +3926,7 @@ int blk_poll(struct request_queue *q, bl
if (signal_pending_state(state, current))
__set_current_state(TASK_RUNNING);
- if (current->state == TASK_RUNNING)
+ if (task_is_running(current))
return 1;
if (ret < 0 || !spin)
break;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -113,6 +113,8 @@ struct task_group;
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
+#define task_is_running(task) (READ_ONCE((task)->state) == TASK_RUNNING)
+
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -760,7 +760,7 @@ static void lockdep_print_held_locks(str
* It's not reliable to print a task's held locks if it's not sleeping
* and it's not the current task.
*/
- if (p->state == TASK_RUNNING && p != current)
+ if (p != current && task_is_running(p))
return;
for (i = 0; i < depth; i++) {
printk(" #%d: ", i);
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2768,7 +2768,7 @@ EXPORT_SYMBOL_GPL(rcu_bind_current_to_no
#ifdef CONFIG_SMP
static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
{
- return tsp && tsp->state == TASK_RUNNING && !tsp->on_cpu ? "!" : "";
+ return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
}
#else // #ifdef CONFIG_SMP
static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5989,7 +5989,7 @@ static inline void sched_submit_work(str
{
unsigned int task_flags;
- if (!tsk->state)
+ if (task_is_running(tsk))
return;
task_flags = tsk->flags;
@@ -7964,7 +7964,7 @@ int __sched yield_to(struct task_struct
if (curr->sched_class != p->sched_class)
goto out_unlock;
- if (task_running(p_rq, p) || p->state)
+ if (task_running(p_rq, p) || !task_is_running(p))
goto out_unlock;
yielded = curr->sched_class->yield_to_task(rq, p);
@@ -8167,7 +8167,7 @@ void sched_show_task(struct task_struct
pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
- if (p->state == TASK_RUNNING)
+ if (task_is_running(p))
pr_cont(" running task ");
#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -217,7 +217,7 @@ static inline void sched_info_depart(str
rq_sched_info_depart(rq, delta);
- if (t->state == TASK_RUNNING)
+ if (task_is_running(t))
sched_info_enqueue(rq, t);
}
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -4719,7 +4719,7 @@ void kdb_send_sig(struct task_struct *t,
}
new_t = kdb_prev_t != t;
kdb_prev_t = t;
- if (t->state != TASK_RUNNING && new_t) {
+ if (!task_is_running(t) && new_t) {
spin_unlock(&t->sighand->siglock);
kdb_printf("Process is not RUNNING, sending a signal from "
"kdb risks deadlock\n"
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -92,8 +92,7 @@ static bool ksoftirqd_running(unsigned l
if (pending & SOFTIRQ_NOW_MASK)
return false;
- return tsk && (tsk->state == TASK_RUNNING) &&
- !__kthread_should_park(tsk);
+ return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
}
#ifdef CONFIG_TRACE_IRQFLAGS
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1955,7 +1955,7 @@ static inline bool is_via_compact_memory
static bool kswapd_is_running(pg_data_t *pgdat)
{
- return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING);
+ return pgdat->kswapd && task_is_running(pgdat->kswapd);
}
/*
Powered by blists - more mailing lists