lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <39f4a1f0-7f84-ad84-c9f8-e45e63346190@huaweicloud.com>
Date:   Wed, 14 Sep 2022 18:21:10 +0800
From:   Yu Kuai <yukuai1@...weicloud.com>
To:     Yu Kuai <yukuai1@...weicloud.com>, song@...nel.org,
        logang@...tatee.com, guoqing.jiang@...ux.dev, pmenzel@...gen.mpg.de
Cc:     linux-raid@...r.kernel.org, linux-kernel@...r.kernel.org,
        yi.zhang@...wei.com, "yukuai (C)" <yukuai3@...wei.com>
Subject: Re: [PATCH v2 2/4] md/raid10: prevent unnecessary calls to wake_up()
 in fast path

在 2022/09/14 9:49, Yu Kuai 写道:
> From: Yu Kuai <yukuai3@...wei.com>
> 
> Currently, wake_up() is called unconditionally in fast path such as
> raid10_make_request(), which will cause lock contention under high
> concurrency:
> 
> raid10_make_request
>   wake_up
>    __wake_up_common_lock
>     spin_lock_irqsave
> 
> Improve performance by only call wake_up() if waitqueue is not empty.
> 
Hi,

I'm replacing all the wake_up() here, currently I'm not quite sure it's
OK, "conf->wait_barrier" is used for many purpose.

Perhaps should I just replace host path here? (raid10_make_request
and allow_barrier().

Thanks,
Kuai

> Signed-off-by: Yu Kuai <yukuai3@...wei.com>
> ---
>   drivers/md/raid10.c | 26 ++++++++++++++++----------
>   1 file changed, 16 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
> index 56458a53043d..0edcd98461fe 100644
> --- a/drivers/md/raid10.c
> +++ b/drivers/md/raid10.c
> @@ -274,6 +274,12 @@ static void put_buf(struct r10bio *r10_bio)
>   	lower_barrier(conf);
>   }
>   
> +static void wake_up_barrier(struct r10conf *conf)
> +{
> +	if (wq_has_sleeper(&conf->wait_barrier))
> +		wake_up(&conf->wait_barrier);
> +}
> +
>   static void reschedule_retry(struct r10bio *r10_bio)
>   {
>   	unsigned long flags;
> @@ -286,7 +292,7 @@ static void reschedule_retry(struct r10bio *r10_bio)
>   	spin_unlock_irqrestore(&conf->device_lock, flags);
>   
>   	/* wake up frozen array... */
> -	wake_up(&conf->wait_barrier);
> +	wake_up_barrier(conf);
>   
>   	md_wakeup_thread(mddev->thread);
>   }
> @@ -884,7 +890,7 @@ static void flush_pending_writes(struct r10conf *conf)
>   		/* flush any pending bitmap writes to disk
>   		 * before proceeding w/ I/O */
>   		md_bitmap_unplug(conf->mddev->bitmap);
> -		wake_up(&conf->wait_barrier);
> +		wake_up_barrier(conf);
>   
>   		while (bio) { /* submit pending writes */
>   			struct bio *next = bio->bi_next;
> @@ -954,7 +960,7 @@ static void lower_barrier(struct r10conf *conf)
>   	spin_lock_irqsave(&conf->resync_lock, flags);
>   	conf->barrier--;
>   	spin_unlock_irqrestore(&conf->resync_lock, flags);
> -	wake_up(&conf->wait_barrier);
> +	wake_up_barrier(conf);
>   }
>   
>   static bool stop_waiting_barrier(struct r10conf *conf)
> @@ -1004,7 +1010,7 @@ static bool wait_barrier(struct r10conf *conf, bool nowait)
>   			conf->nr_waiting--;
>   		}
>   		if (!conf->nr_waiting)
> -			wake_up(&conf->wait_barrier);
> +			wake_up_barrier(conf);
>   	}
>   	/* Only increment nr_pending when we wait */
>   	if (ret)
> @@ -1017,7 +1023,7 @@ static void allow_barrier(struct r10conf *conf)
>   {
>   	if ((atomic_dec_and_test(&conf->nr_pending)) ||
>   			(conf->array_freeze_pending))
> -		wake_up(&conf->wait_barrier);
> +		wake_up_barrier(conf);
>   }
>   
>   static void freeze_array(struct r10conf *conf, int extra)
> @@ -1053,7 +1059,7 @@ static void unfreeze_array(struct r10conf *conf)
>   	spin_lock_irq(&conf->resync_lock);
>   	conf->barrier--;
>   	conf->nr_waiting--;
> -	wake_up(&conf->wait_barrier);
> +	wake_up_barrier(conf);
>   	spin_unlock_irq(&conf->resync_lock);
>   }
>   
> @@ -1078,7 +1084,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
>   		spin_lock_irq(&conf->device_lock);
>   		bio_list_merge(&conf->pending_bio_list, &plug->pending);
>   		spin_unlock_irq(&conf->device_lock);
> -		wake_up(&conf->wait_barrier);
> +		wake_up_barrier(conf);
>   		md_wakeup_thread(mddev->thread);
>   		kfree(plug);
>   		return;
> @@ -1087,7 +1093,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
>   	/* we aren't scheduling, so we can do the write-out directly. */
>   	bio = bio_list_get(&plug->pending);
>   	md_bitmap_unplug(mddev->bitmap);
> -	wake_up(&conf->wait_barrier);
> +	wake_up_barrier(conf);
>   
>   	while (bio) { /* submit pending writes */
>   		struct bio *next = bio->bi_next;
> @@ -1893,7 +1899,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
>   	__make_request(mddev, bio, sectors);
>   
>   	/* In case raid10d snuck in to freeze_array */
> -	wake_up(&conf->wait_barrier);
> +	wake_up_barrier(conf);
>   	return true;
>   }
>   
> @@ -3040,7 +3046,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
>   			 * In case freeze_array() is waiting for condition
>   			 * nr_pending == nr_queued + extra to be true.
>   			 */
> -			wake_up(&conf->wait_barrier);
> +			wake_up_barrier(conf);
>   			md_wakeup_thread(conf->mddev->thread);
>   		} else {
>   			if (test_bit(R10BIO_WriteError,
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ