lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 20 Mar 2013 23:52:03 +0800
From:	Lai Jiangshan <eag0628@...il.com>
To:	Tejun Heo <tj@...nel.org>
Cc:	laijs@...fujitsu.com, axboe@...nel.dk, jack@...e.cz,
	fengguang.wu@...el.com, jmoyer@...hat.com, zab@...hat.com,
	linux-kernel@...r.kernel.org, herbert@...dor.apana.org.au,
	davem@...emloft.net, linux-crypto@...r.kernel.org
Subject: Re: [PATCH 08/10] workqueue: break init_and_link_pwq() into two
 functions and introduce alloc_unbound_pwq()

On Wed, Mar 20, 2013 at 8:00 AM, Tejun Heo <tj@...nel.org> wrote:
> Break init_and_link_pwq() into init_pwq() and link_pwq() and move
> unbound-workqueue specific handling into apply_workqueue_attrs().
> Also, factor out unbound pool and pool_workqueue allocation into
> alloc_unbound_pwq().
>
> This reorganization is to prepare for NUMA affinity and doesn't
> introduce any functional changes.
>
> Signed-off-by: Tejun Heo <tj@...nel.org>
> ---
>  kernel/workqueue.c | 75 +++++++++++++++++++++++++++++++++++++-----------------
>  1 file changed, 52 insertions(+), 23 deletions(-)
>
> diff --git a/kernel/workqueue.c b/kernel/workqueue.c
> index 3f820a5..bbbfc92 100644
> --- a/kernel/workqueue.c
> +++ b/kernel/workqueue.c
> @@ -3647,13 +3647,10 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
>         spin_unlock(&pwq->pool->lock);
>  }
>
> -static void init_and_link_pwq(struct pool_workqueue *pwq,
> -                             struct workqueue_struct *wq,
> -                             struct worker_pool *pool,
> -                             struct pool_workqueue **p_last_pwq)
> +/* initialize newly zalloced @pwq which is associated with @wq and @pool */
> +static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
> +                    struct worker_pool *pool)
>  {
> -       int node;
> -
>         BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
>
>         pwq->pool = pool;
> @@ -3663,9 +3660,16 @@ static void init_and_link_pwq(struct pool_workqueue *pwq,
>         INIT_LIST_HEAD(&pwq->delayed_works);
>         INIT_LIST_HEAD(&pwq->mayday_node);
>         INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
> +}
>
> -       mutex_lock(&wq->flush_mutex);
> -       spin_lock_irq(&pwq_lock);
> +/* sync @pwq with the current state of its associated wq and link it */
> +static void link_pwq(struct pool_workqueue *pwq,
> +                    struct pool_workqueue **p_last_pwq)
> +{
> +       struct workqueue_struct *wq = pwq->wq;
> +
> +       lockdep_assert_held(&wq->flush_mutex);
> +       lockdep_assert_held(&pwq_lock);
>
>         /*
>          * Set the matching work_color.  This is synchronized with
> @@ -3680,15 +3684,27 @@ static void init_and_link_pwq(struct pool_workqueue *pwq,
>
>         /* link in @pwq */
>         list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
> +}
>
> -       if (wq->flags & WQ_UNBOUND) {
> -               copy_workqueue_attrs(wq->unbound_attrs, pool->attrs);
> -               for_each_node(node)
> -                       rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
> +/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
> +static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
> +                                       const struct workqueue_attrs *attrs)
> +{
> +       struct worker_pool *pool;
> +       struct pool_workqueue *pwq;
> +
> +       pool = get_unbound_pool(attrs);
> +       if (!pool)
> +               return NULL;
> +
> +       pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL);

this allocation is not numa-awared, you may use pool->node here.

> +       if (!pwq) {
> +               put_unbound_pool(pool);
> +               return NULL;
>         }
>
> -       spin_unlock_irq(&pwq_lock);
> -       mutex_unlock(&wq->flush_mutex);
> +       init_pwq(pwq, wq, pool);
> +       return pwq;
>  }
>
>  /**
> @@ -3709,7 +3725,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
>                           const struct workqueue_attrs *attrs)
>  {
>         struct pool_workqueue *pwq, *last_pwq;
> -       struct worker_pool *pool;
> +       int node;
>
>         /* only unbound workqueues can change attributes */
>         if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
> @@ -3719,17 +3735,22 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
>         if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
>                 return -EINVAL;
>
> -       pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL);
> +       pwq = alloc_unbound_pwq(wq, attrs);
>         if (!pwq)
>                 return -ENOMEM;
>
> -       pool = get_unbound_pool(attrs);
> -       if (!pool) {
> -               kmem_cache_free(pwq_cache, pwq);
> -               return -ENOMEM;
> -       }
> +       mutex_lock(&wq->flush_mutex);
> +       spin_lock_irq(&pwq_lock);
> +
> +       link_pwq(pwq, &last_pwq);
> +
> +       copy_workqueue_attrs(wq->unbound_attrs, pwq->pool->attrs);
> +       for_each_node(node)
> +               rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
> +
> +       spin_unlock_irq(&pwq_lock);
> +       mutex_unlock(&wq->flush_mutex);
>
> -       init_and_link_pwq(pwq, wq, pool, &last_pwq);
>         if (last_pwq) {
>                 spin_lock_irq(&last_pwq->pool->lock);
>                 put_pwq(last_pwq);
> @@ -3755,7 +3776,15 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
>                         struct worker_pool *cpu_pools =
>                                 per_cpu(cpu_worker_pools, cpu);
>
> -                       init_and_link_pwq(pwq, wq, &cpu_pools[highpri], NULL);
> +                       init_pwq(pwq, wq, &cpu_pools[highpri]);
> +
> +                       mutex_lock(&wq->flush_mutex);
> +                       spin_lock_irq(&pwq_lock);
> +
> +                       link_pwq(pwq, NULL);
> +
> +                       spin_unlock_irq(&pwq_lock);
> +                       mutex_unlock(&wq->flush_mutex);
>                 }
>                 return 0;
>         } else {
> --
> 1.8.1.4
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@...r.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ