lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <533414C7.1050607@cn.fujitsu.com>
Date:	Thu, 27 Mar 2014 20:08:39 +0800
From:	Lai Jiangshan <laijs@...fujitsu.com>
To:	Lai Jiangshan <laijs@...fujitsu.com>
CC:	Tejun Heo <tj@...nel.org>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] workqueue: add __WQ_FREEZING and remove POOL_FREEZING

On 03/25/2014 05:56 PM, Lai Jiangshan wrote:
> freezing is nothing related to pools, but POOL_FREEZING adds a connection,
> and causes freeze_workqueues_begin() and thaw_workqueues() complicated.
> 
> Since freezing is workqueue instance attribute, so we introduce __WQ_FREEZING
> to wq->flags instead and remove POOL_FREEZING.
> 
> we set __WQ_FREEZING only when freezable(to simplify pwq_adjust_max_active()),
> make freeze_workqueues_begin() and thaw_workqueues() fast skip non-freezable wq.
> 
> Changed from previous patches(requested by tj):
> 	1) added the WARN_ON_ONCE() back
> 	2) merged the two patches as one

Ping.

Hi, Tejun

You had reviewed this patch several rounds.
I had applied all your requests(the last two is listed above) in your comments.

I'm deeply sorry for responding so late.

Thanks,
Lai


> 
> Signed-off-by: Lai Jiangshan <laijs@...fujitsu.com>
> ---
>  include/linux/workqueue.h |    1 +
>  kernel/workqueue.c        |   43 ++++++++++++-------------------------------
>  2 files changed, 13 insertions(+), 31 deletions(-)
> 
> diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
> index 704f4f6..a45202b 100644
> --- a/include/linux/workqueue.h
> +++ b/include/linux/workqueue.h
> @@ -335,6 +335,7 @@ enum {
>  	 */
>  	WQ_POWER_EFFICIENT	= 1 << 7,
>  
> +	__WQ_FREEZING		= 1 << 15, /* internel: workqueue is freezing */
>  	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
>  	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
>  
> diff --git a/kernel/workqueue.c b/kernel/workqueue.c
> index 193e977..0c74979 100644
> --- a/kernel/workqueue.c
> +++ b/kernel/workqueue.c
> @@ -70,7 +70,6 @@ enum {
>  	 */
>  	POOL_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */
>  	POOL_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
> -	POOL_FREEZING		= 1 << 3,	/* freeze in progress */
>  
>  	/* worker flags */
>  	WORKER_STARTED		= 1 << 0,	/* started */
> @@ -3632,9 +3631,6 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
>  	if (!pool || init_worker_pool(pool) < 0)
>  		goto fail;
>  
> -	if (workqueue_freezing)
> -		pool->flags |= POOL_FREEZING;
> -
>  	lockdep_set_subclass(&pool->lock, 1);	/* see put_pwq() */
>  	copy_workqueue_attrs(pool->attrs, attrs);
>  
> @@ -3730,18 +3726,13 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
>  static void pwq_adjust_max_active(struct pool_workqueue *pwq)
>  {
>  	struct workqueue_struct *wq = pwq->wq;
> -	bool freezable = wq->flags & WQ_FREEZABLE;
>  
> -	/* for @wq->saved_max_active */
> +	/* for @wq->saved_max_active and @wq->flags */
>  	lockdep_assert_held(&wq->mutex);
>  
> -	/* fast exit for non-freezable wqs */
> -	if (!freezable && pwq->max_active == wq->saved_max_active)
> -		return;
> -
>  	spin_lock_irq(&pwq->pool->lock);
>  
> -	if (!freezable || !(pwq->pool->flags & POOL_FREEZING)) {
> +	if (!(wq->flags & __WQ_FREEZING)) {
>  		pwq->max_active = wq->saved_max_active;
>  
>  		while (!list_empty(&pwq->delayed_works) &&
> @@ -4250,6 +4241,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
>  	mutex_lock(&wq_pool_mutex);
>  
>  	mutex_lock(&wq->mutex);
> +	if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing)
> +		wq->flags |= __WQ_FREEZING;
>  	for_each_pwq(pwq, wq)
>  		pwq_adjust_max_active(pwq);
>  	mutex_unlock(&wq->mutex);
> @@ -4856,26 +4849,20 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
>   */
>  void freeze_workqueues_begin(void)
>  {
> -	struct worker_pool *pool;
>  	struct workqueue_struct *wq;
>  	struct pool_workqueue *pwq;
> -	int pi;
>  
>  	mutex_lock(&wq_pool_mutex);
>  
>  	WARN_ON_ONCE(workqueue_freezing);
>  	workqueue_freezing = true;
>  
> -	/* set FREEZING */
> -	for_each_pool(pool, pi) {
> -		spin_lock_irq(&pool->lock);
> -		WARN_ON_ONCE(pool->flags & POOL_FREEZING);
> -		pool->flags |= POOL_FREEZING;
> -		spin_unlock_irq(&pool->lock);
> -	}
> -
>  	list_for_each_entry(wq, &workqueues, list) {
> +		if (!(wq->flags & WQ_FREEZABLE))
> +			continue;
>  		mutex_lock(&wq->mutex);
> +		WARN_ON_ONCE(wq->flags & __WQ_FREEZING);
> +		wq->flags |= __WQ_FREEZING;
>  		for_each_pwq(pwq, wq)
>  			pwq_adjust_max_active(pwq);
>  		mutex_unlock(&wq->mutex);
> @@ -4943,25 +4930,19 @@ void thaw_workqueues(void)
>  {
>  	struct workqueue_struct *wq;
>  	struct pool_workqueue *pwq;
> -	struct worker_pool *pool;
> -	int pi;
>  
>  	mutex_lock(&wq_pool_mutex);
>  
>  	if (!workqueue_freezing)
>  		goto out_unlock;
>  
> -	/* clear FREEZING */
> -	for_each_pool(pool, pi) {
> -		spin_lock_irq(&pool->lock);
> -		WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
> -		pool->flags &= ~POOL_FREEZING;
> -		spin_unlock_irq(&pool->lock);
> -	}
> -
>  	/* restore max_active and repopulate worklist */
>  	list_for_each_entry(wq, &workqueues, list) {
> +		if (!(wq->flags & WQ_FREEZABLE))
> +			continue;
>  		mutex_lock(&wq->mutex);
> +		WARN_ON_ONCE(!(wq->flags & __WQ_FREEZING));
> +		wq->flags &= ~__WQ_FREEZING;
>  		for_each_pwq(pwq, wq)
>  			pwq_adjust_max_active(pwq);
>  		mutex_unlock(&wq->mutex);

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ