[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1621390860-6350-1-git-send-email-huangzhaoyang@gmail.com>
Date: Wed, 19 May 2021 10:21:00 +0800
From: Huangzhaoyang <huangzhaoyang@...il.com>
To: Johannes Weiner <hannes@...xchg.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Zhaoyang Huang <zhaoyang.huang@...soc.com>,
Ziwei Dai <ziwei.dai@...soc.com>, Ke Wang <ke.wang@...soc.com>,
linux-kernel@...r.kernel.org
Cc: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>
Subject: [PATCH v4] psi: fix race between psi_trigger_create and psimon
From: Zhaoyang Huang <zhaoyang.huang@...soc.com>
Race detected between psimon_new and psimon_old as shown below, which
cause panic by accessing invalid psi_system->poll_wait->wait_queue_entry
and psi_system->poll_timer->entry->next. Under this modification, the
race window is removed by initialising poll_wait and poll_timer in
group_init which are executed only once at beginning.
psi_trigger_create psimon_new psimon_old
init_waitqueue_head finish_wait
spin_lock(lock_old)
spin_lock_init(lock_new)
wake_up_process(psimon_new)
finish_wait
spin_lock(lock_new)
list_del list_del
Fixes: 461daba06bdc ("psi: eliminate kthread_worker from psi trigger
scheduling mechanism")
Signed-off-by: ziwei.dai <ziwei.dai@...soc.com>
Signed-off-by: ke.wang <ke.wang@...soc.com>
Signed-off-by: Zhaoyang Huang <zhaoyang.huang@...soc.com>
---
v2: change del_timer_sync to del_timer in psi_trigger_destroy
v3: remove timer_setup within psi_tirgger_create
protect del_timer by extending the critical section of mutex_lock
v4: amend fix information on comment
---
---
kernel/sched/psi.c | 29 +++++++++++++++++------------
1 file changed, 17 insertions(+), 12 deletions(-)
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index cc25a3c..7b53217 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -182,6 +182,8 @@ struct psi_group psi_system = {
static void psi_avgs_work(struct work_struct *work);
+static void poll_timer_fn(struct timer_list *t);
+
static void group_init(struct psi_group *group)
{
int cpu;
@@ -201,6 +203,8 @@ static void group_init(struct psi_group *group)
memset(group->polling_total, 0, sizeof(group->polling_total));
group->polling_next_update = ULLONG_MAX;
group->polling_until = 0;
+ init_waitqueue_head(&group->poll_wait);
+ timer_setup(&group->poll_timer, poll_timer_fn, 0);
rcu_assign_pointer(group->poll_task, NULL);
}
@@ -1157,9 +1161,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
return ERR_CAST(task);
}
atomic_set(&group->poll_wakeup, 0);
- init_waitqueue_head(&group->poll_wait);
wake_up_process(task);
- timer_setup(&group->poll_timer, poll_timer_fn, 0);
rcu_assign_pointer(group->poll_task, task);
}
@@ -1214,16 +1216,8 @@ static void psi_trigger_destroy(struct kref *ref)
}
}
- mutex_unlock(&group->trigger_lock);
-
- /*
- * Wait for both *trigger_ptr from psi_trigger_replace and
- * poll_task RCUs to complete their read-side critical sections
- * before destroying the trigger and optionally the poll_task
- */
- synchronize_rcu();
/*
- * Destroy the kworker after releasing trigger_lock to prevent a
+ * Destroy psimon after releasing trigger_lock to prevent a
* deadlock while waiting for psi_poll_work to acquire trigger_lock
*/
if (task_to_destroy) {
@@ -1233,9 +1227,20 @@ static void psi_trigger_destroy(struct kref *ref)
* But it might have been already scheduled before
* that - deschedule it cleanly before destroying it.
*/
- del_timer_sync(&group->poll_timer);
+ del_timer(&group->poll_timer);
+ mutex_unlock(&group->trigger_lock);
kthread_stop(task_to_destroy);
+ } else {
+ mutex_unlock(&group->trigger_lock);
}
+
+ /*
+ * Wait for both *trigger_ptr from psi_trigger_replace and
+ * poll_task RCUs to complete their read-side critical sections
+ * before destroying the trigger and optionally the poll_task
+ */
+ synchronize_rcu();
+
kfree(t);
}
--
1.9.1
Powered by blists - more mailing lists