lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJhGHyC+oyX6fqnR1-JPnNgPuDpikU6KYr1iXrj7BDZHnjPGTA@mail.gmail.com>
Date:   Tue, 13 Jul 2021 13:56:12 +0800
From:   Lai Jiangshan <jiangshanlai@...il.com>
To:     Tejun Heo <tj@...nel.org>
Cc:     Yang Yingliang <yangyingliang@...wei.com>,
        LKML <linux-kernel@...r.kernel.org>,
        Xu Qiang <xuqiang36@...wei.com>,
        Pavel Skripkin <paskripkin@...il.com>
Subject: Re: [PATCH v2] workqueue: fix UAF in pwq_unbound_release_workfn()

On Tue, Jul 13, 2021 at 1:12 AM Tejun Heo <tj@...nel.org> wrote:
>
> Hello, Yang.
>
> > +static void free_pwq(struct pool_workqueue *pwq)
> > +{
> > +     if (!pwq || --pwq->refcnt)
> > +             return;
> > +
> > +     put_unbound_pool(pwq->pool);
> > +     kmem_cache_free(pwq_cache, pwq);
> > +}
> > +
> > +static void free_wqattrs_ctx(struct apply_wqattrs_ctx *ctx)
> > +{
> > +     int node;
> > +
> > +     if (!ctx)
> > +             return;
> > +
> > +     for_each_node(node)
> > +             free_pwq(ctx->pwq_tbl[node]);
> > +     free_pwq(ctx->dfl_pwq);
> > +
> > +     free_workqueue_attrs(ctx->attrs);
> > +
> > +     kfree(ctx);
> > +}
>
> It bothers me that we're partially replicating the free path including pwq
> refcnting.

The replicating code can be reduced by merging
apply_wqattrs_cleanup() into apply_wqattrs_commit().

> Does something like the following work?

It works since it has a flush_scheduled_work() in
alloc_and_link_pwqs(). But I don't think it works for
workqueue_apply_unbound_cpumask() when apply_wqattrs_commit()
is not called.

If we want to reuse the current apply_wqattrs_cleanup(), I would prefer
something like this: (untested)

@@ -3680,15 +3676,21 @@ static void pwq_unbound_release_workfn(struct
work_struct *work)
                                                  unbound_release_work);
        struct workqueue_struct *wq = pwq->wq;
        struct worker_pool *pool = pwq->pool;
-       bool is_last;
+       bool is_last = false;

-       if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
-               return;
+       /*
+        * when @pwq is not linked, it doesn't hold any reference to the
+        * @wq, and @wq is invalid to access.
+        */
+       if (!list_empty(&pwq->pwqs_node)) {
+               if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
+                       return;

-       mutex_lock(&wq->mutex);
-       list_del_rcu(&pwq->pwqs_node);
-       is_last = list_empty(&wq->pwqs);
-       mutex_unlock(&wq->mutex);
+               mutex_lock(&wq->mutex);
+               list_del_rcu(&pwq->pwqs_node);
+               is_last = list_empty(&wq->pwqs);
+               mutex_unlock(&wq->mutex);
+       }

        mutex_lock(&wq_pool_mutex);
        put_unbound_pool(pool);

>
> diff --git a/kernel/workqueue.c b/kernel/workqueue.c
> index 104e3ef04e33..0c0ab363edeb 100644
> --- a/kernel/workqueue.c
> +++ b/kernel/workqueue.c
> @@ -3693,7 +3693,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
>          * If we're the last pwq going away, @wq is already dead and no one
>          * is gonna access it anymore.  Schedule RCU free.
>          */
> -       if (is_last) {
> +       if (is_last && !list_empty(&wq->list)) {
>                 wq_unregister_lockdep(wq);
>                 call_rcu(&wq->rcu, rcu_free_wq);
>         }
> @@ -4199,6 +4199,10 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
>         }
>         put_online_cpus();
>
> +       if (ret) {
> +               flush_scheduled_work();
> +       }
> +
>         return ret;
>  }
>
> --
> tejun

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ