[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAJuCfpH_qNn-_hqdmpy0GUyfqrh2eKaWeOBnbkGyK1H6Ev-4fw@mail.gmail.com>
Date: Wed, 19 May 2021 10:41:54 -0700
From: Suren Baghdasaryan <surenb@...gle.com>
To: Huangzhaoyang <huangzhaoyang@...il.com>
Cc: Johannes Weiner <hannes@...xchg.org>,
Zhaoyang Huang <zhaoyang.huang@...soc.com>,
Ziwei Dai <ziwei.dai@...soc.com>, Ke Wang <ke.wang@...soc.com>,
LKML <linux-kernel@...r.kernel.org>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>
Subject: Re: [PATCH v4] psi: fix race between psi_trigger_create and psimon
On Tue, May 18, 2021 at 7:22 PM Huangzhaoyang <huangzhaoyang@...il.com> wrote:
>
> From: Zhaoyang Huang <zhaoyang.huang@...soc.com>
>
> Race detected between psimon_new and psimon_old as shown below, which
> cause panic by accessing invalid psi_system->poll_wait->wait_queue_entry
> and psi_system->poll_timer->entry->next. Under this modification, the
> race window is removed by initialising poll_wait and poll_timer in
> group_init which are executed only once at beginning.
>
> psi_trigger_create psimon_new psimon_old
> init_waitqueue_head finish_wait
> spin_lock(lock_old)
> spin_lock_init(lock_new)
> wake_up_process(psimon_new)
>
> finish_wait
> spin_lock(lock_new)
> list_del list_del
>
> Fixes: 461daba06bdc ("psi: eliminate kthread_worker from psi trigger
> scheduling mechanism")
>
> Signed-off-by: ziwei.dai <ziwei.dai@...soc.com>
> Signed-off-by: ke.wang <ke.wang@...soc.com>
> Signed-off-by: Zhaoyang Huang <zhaoyang.huang@...soc.com>
> ---
> v2: change del_timer_sync to del_timer in psi_trigger_destroy
> v3: remove timer_setup within psi_tirgger_create
> protect del_timer by extending the critical section of mutex_lock
> v4: amend fix information on comment
> ---
> ---
> kernel/sched/psi.c | 29 +++++++++++++++++------------
> 1 file changed, 17 insertions(+), 12 deletions(-)
>
> diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
> index cc25a3c..7b53217 100644
> --- a/kernel/sched/psi.c
> +++ b/kernel/sched/psi.c
> @@ -182,6 +182,8 @@ struct psi_group psi_system = {
>
> static void psi_avgs_work(struct work_struct *work);
>
> +static void poll_timer_fn(struct timer_list *t);
> +
> static void group_init(struct psi_group *group)
> {
> int cpu;
> @@ -201,6 +203,8 @@ static void group_init(struct psi_group *group)
> memset(group->polling_total, 0, sizeof(group->polling_total));
> group->polling_next_update = ULLONG_MAX;
> group->polling_until = 0;
> + init_waitqueue_head(&group->poll_wait);
> + timer_setup(&group->poll_timer, poll_timer_fn, 0);
> rcu_assign_pointer(group->poll_task, NULL);
> }
>
> @@ -1157,9 +1161,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
> return ERR_CAST(task);
> }
> atomic_set(&group->poll_wakeup, 0);
> - init_waitqueue_head(&group->poll_wait);
> wake_up_process(task);
> - timer_setup(&group->poll_timer, poll_timer_fn, 0);
> rcu_assign_pointer(group->poll_task, task);
> }
>
> @@ -1214,16 +1216,8 @@ static void psi_trigger_destroy(struct kref *ref)
> }
> }
>
> - mutex_unlock(&group->trigger_lock);
> -
> - /*
> - * Wait for both *trigger_ptr from psi_trigger_replace and
> - * poll_task RCUs to complete their read-side critical sections
> - * before destroying the trigger and optionally the poll_task
> - */
> - synchronize_rcu();
> /*
> - * Destroy the kworker after releasing trigger_lock to prevent a
> + * Destroy psimon after releasing trigger_lock to prevent a
> * deadlock while waiting for psi_poll_work to acquire trigger_lock
> */
> if (task_to_destroy) {
> @@ -1233,9 +1227,20 @@ static void psi_trigger_destroy(struct kref *ref)
> * But it might have been already scheduled before
> * that - deschedule it cleanly before destroying it.
> */
> - del_timer_sync(&group->poll_timer);
> + del_timer(&group->poll_timer);
> + mutex_unlock(&group->trigger_lock);
> kthread_stop(task_to_destroy);
> + } else {
> + mutex_unlock(&group->trigger_lock);
> }
> +
> + /*
> + * Wait for both *trigger_ptr from psi_trigger_replace and
> + * poll_task RCUs to complete their read-side critical sections
> + * before destroying the trigger and optionally the poll_task
> + */
> + synchronize_rcu();
> +
> kfree(t);
> }
There is no need to move mutex_unlock() and synchronize_rcu() in
psi_trigger_destroy(). The only change in psi_trigger_destroy() that
is needed is this:
@@ -1211,6 +1212,7 @@ static void psi_trigger_destroy(struct kref *ref)
group->poll_task,
lockdep_is_held(&group->trigger_lock));
rcu_assign_pointer(group->poll_task, NULL);
+ del_timer(&group->poll_timer);
}
}
@@ -1230,10 +1232,7 @@ static void psi_trigger_destroy(struct kref *ref)
/*
* After the RCU grace period has expired, the worker
* can no longer be found through group->poll_task.
- * But it might have been already scheduled before
- * that - deschedule it cleanly before destroying it.
*/
- del_timer_sync(&group->poll_timer);
kthread_stop(task_to_destroy);
}
kfree(t);
>
> --
> 1.9.1
>
Powered by blists - more mailing lists