lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <772085e9-35f3-1c32-1b87-f101cbc5f8f7@redhat.com>
Date:   Tue, 18 Dec 2018 15:35:30 -0500
From:   Waiman Long <longman@...hat.com>
To:     Davidlohr Bueso <dave@...olabs.net>
Cc:     Peter Zijlstra <peterz@...radead.org>,
        Yongji Xie <elohimes@...il.com>, mingo@...hat.com,
        will.deacon@....com, linux-kernel@...r.kernel.org,
        Xie Yongji <xieyongji@...du.com>, zhangyu31@...du.com,
        liuqi16@...du.com, yuanlinsi01@...du.com, nixun@...du.com,
        lilin24@...du.com, andrea.parri@...rulasolutions.com
Subject: Re: [PATCH v4] sched/wake_q: Reduce reference counting for special
 users

On 12/18/2018 02:53 PM, Davidlohr Bueso wrote:
> Some users, specifically futexes and rwsems, required fixes
> that allowed the callers to be safe when wakeups occur before
> they are expected by wake_up_q(). Such scenarios also play
> games and rely on reference counting, and until now were
> pivoting on wake_q doing it. With the wake_q_add() call being
> moved down, this can no longer be the case. As such we end up
> with a a double task refcounting overhead; and these callers
> care enough about this (being rather core-ish).
>
> This patch introduces a wake_q_add_safe() call that serves
> for callers that have already done refcounting and therefore the
> task is 'safe' from wake_q point of view (int that it requires
> reference throughout the entire queue/>wakeup cycle). In the one
> case it has internal reference counting, in the other case it
> consumes the reference counting.
>
> Signed-off-by: Davidlohr Bueso <dbueso@...e.de>
> ---
>
> - Changes from v3: fixed wake_q_add_safe. While previous version
>  had been tested with a bootup, the failed cmpxchg path obviously
>  hadn't been exercised.  Sorry about the noise.
>
> include/linux/sched/wake_q.h |  4 +--
> kernel/futex.c               |  3 +--
> kernel/locking/rwsem-xadd.c  |  4 +--
> kernel/sched/core.c          | 60
> ++++++++++++++++++++++++++++++++------------
> 4 files changed, 48 insertions(+), 23 deletions(-)
>
> diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
> index 545f37138057..ad826d2a4557 100644
> --- a/include/linux/sched/wake_q.h
> +++ b/include/linux/sched/wake_q.h
> @@ -51,8 +51,8 @@ static inline void wake_q_init(struct wake_q_head
> *head)
>     head->lastp = &head->first;
> }
>
> -extern void wake_q_add(struct wake_q_head *head,
> -               struct task_struct *task);
> +extern void wake_q_add(struct wake_q_head *head, struct task_struct
> *task);
> +extern void wake_q_add_safe(struct wake_q_head *head, struct
> task_struct *task);
> extern void wake_up_q(struct wake_q_head *head);
>
> #endif /* _LINUX_SCHED_WAKE_Q_H */
> diff --git a/kernel/futex.c b/kernel/futex.c
> index d14971f6ed3d..6218d98f649b 100644
> --- a/kernel/futex.c
> +++ b/kernel/futex.c
> @@ -1402,8 +1402,7 @@ static void mark_wake_futex(struct wake_q_head
> *wake_q, struct futex_q *q)
>      * Queue the task for later wakeup for after we've released
>      * the hb->lock. wake_q_add() grabs reference to p.
>      */
> -    wake_q_add(wake_q, p);
> -    put_task_struct(p);
> +    wake_q_add_safe(wake_q, p);
> }
>
> /*
> diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
> index 50d9af615dc4..fbe96341beee 100644
> --- a/kernel/locking/rwsem-xadd.c
> +++ b/kernel/locking/rwsem-xadd.c
> @@ -211,9 +211,7 @@ static void __rwsem_mark_wake(struct rw_semaphore
> *sem,
>          * Ensure issuing the wakeup (either by us or someone else)
>          * after setting the reader waiter to nil.
>          */
> -        wake_q_add(wake_q, tsk);
> -        /* wake_q_add() already take the task ref */
> -        put_task_struct(tsk);
> +        wake_q_add_safe(wake_q, tsk);
>     }
>
>     adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index d740d7a3608d..be977df66a21 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -396,19 +396,7 @@ static bool set_nr_if_polling(struct task_struct *p)
> #endif
> #endif
>
> -/**
> - * wake_q_add() - queue a wakeup for 'later' waking.
> - * @head: the wake_q_head to add @task to
> - * @task: the task to queue for 'later' wakeup
> - *
> - * Queue a task for later wakeup, most likely by the wake_up_q() call
> in the
> - * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
> - * instantly.
> - *
> - * This function must be used as-if it were wake_up_process(); IOW
> the task
> - * must be ready to be woken at this location.
> - */
> -void wake_q_add(struct wake_q_head *head, struct task_struct *task)
> +static bool __wake_q_add(struct wake_q_head *head, struct task_struct
> *task)
> {
>     struct wake_q_node *node = &task->wake_q;
>
> @@ -422,15 +410,55 @@ void wake_q_add(struct wake_q_head *head, struct
> task_struct *task)
>      */
>     smp_mb__before_atomic();
>     if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
> -        return;
> -
> -    get_task_struct(task);
> +        return false;
>
>     /*
>      * The head is context local, there can be no concurrency.
>      */
>     *head->lastp = node;
>     head->lastp = &node->next;
> +    return true;
> +}
> +
> +/**
> + * wake_q_add() - queue a wakeup for 'later' waking.
> + * @head: the wake_q_head to add @task to
> + * @task: the task to queue for 'later' wakeup
> + *
> + * Queue a task for later wakeup, most likely by the wake_up_q() call
> in the
> + * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
> + * instantly.
> + *
> + * This function must be used as-if it were wake_up_process(); IOW
> the task
> + * must be ready to be woken at this location.
> + */
> +void wake_q_add(struct wake_q_head *head, struct task_struct *task)
> +{
> +    if (__wake_q_add(head, task))
> +        get_task_struct(task);
> +}
> +
> +/**
> + * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
> + * @head: the wake_q_head to add @task to
> + * @task: the task to queue for 'later' wakeup
> + *
> + * Queue a task for later wakeup, most likely by the wake_up_q() call
> in the
> + * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
> + * instantly.
> + *
> + * This function must be used as-if it were wake_up_process(); IOW
> the task
> + * must be ready to be woken at this location.
> + *
> + * This function is essentially a task-safe equivalent to
> wake_q_add(). Callers
> + * that already hold reference to @task can call the 'safe' version
> and trust
> + * wake_q to do the right thing depending whether or not the @task is
> already
> + * queued for wakeup.
> + */
> +void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
> +{
> +    if (!__wake_q_add(head, task))
> +        put_task_struct(task);
> }
>
> void wake_up_q(struct wake_q_head *head)

Acked-by: Waiman Long <longman@...hat.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ