lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <x497ftxz2m6.fsf@segfault.boston.devel.redhat.com>
Date:	Tue, 31 Mar 2015 10:02:41 -0400
From:	Jeff Moyer <jmoyer@...hat.com>
To:	Ming Lei <ming.lei@...onical.com>
Cc:	Alexander Viro <viro@...iv.linux.org.uk>,
	linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] fs: direct-io: increase bio refcount as batch

Ming Lei <ming.lei@...onical.com> writes:

> Each bio is always submitted to block device one by one,
> so it isn't necessary to increase the bio refcount by one
> each time with holding dio->bio_lock.

This patch opens up a race where a completion event can come in before
the refcount for the dio is incremented, resulting in refcount going
negative.  I don't think that will actually cause problems, but it
certainly is ugly, and I doubt it was the intended design.

Before I dig into this any further, would you care to comment on why you
went down this path?  Did you see spinlock contention here?  And was
there a resultant performance improvement for some benchmark with the
patch applied?

Cheers,
Jeff

> Signed-off-by: Ming Lei <ming.lei@...onical.com>
> ---
>  fs/direct-io.c |   27 +++++++++++++++++----------
>  1 file changed, 17 insertions(+), 10 deletions(-)
>
> diff --git a/fs/direct-io.c b/fs/direct-io.c
> index 6fb00e3..57b8e73 100644
> --- a/fs/direct-io.c
> +++ b/fs/direct-io.c
> @@ -79,6 +79,8 @@ struct dio_submit {
>  	get_block_t *get_block;		/* block mapping function */
>  	dio_submit_t *submit_io;	/* IO submition function */
>  
> +	long	submitted_bio;
> +
>  	loff_t logical_offset_in_bio;	/* current first logical block in bio */
>  	sector_t final_block_in_bio;	/* current final block in bio + 1 */
>  	sector_t next_block_for_io;	/* next block to be put under IO,
> @@ -121,7 +123,7 @@ struct dio {
>  	int is_async;			/* is IO async ? */
>  	bool defer_completion;		/* defer AIO completion to workqueue? */
>  	int io_error;			/* IO error in completion path */
> -	unsigned long refcount;		/* direct_io_worker() and bios */
> +	long refcount;			/* direct_io_worker() and bios */
>  	struct bio *bio_list;		/* singly linked via bi_private */
>  	struct task_struct *waiter;	/* waiting task (NULL if none) */
>  
> @@ -383,14 +385,9 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
>  static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
>  {
>  	struct bio *bio = sdio->bio;
> -	unsigned long flags;
>  
>  	bio->bi_private = dio;
>  
> -	spin_lock_irqsave(&dio->bio_lock, flags);
> -	dio->refcount++;
> -	spin_unlock_irqrestore(&dio->bio_lock, flags);
> -
>  	if (dio->is_async && dio->rw == READ)
>  		bio_set_pages_dirty(bio);
>  
> @@ -403,15 +400,26 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
>  	sdio->bio = NULL;
>  	sdio->boundary = 0;
>  	sdio->logical_offset_in_bio = 0;
> +	sdio->submitted_bio++;
>  }
>  
>  /*
>   * Release any resources in case of a failure
>   */
> -static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
> +static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio,
> +		bool commit_refcount)
>  {
> +	unsigned long flags;
> +
>  	while (sdio->head < sdio->tail)
>  		page_cache_release(dio->pages[sdio->head++]);
> +
> +	if (!commit_refcount)
> +		return;
> +
> +	spin_lock_irqsave(&dio->bio_lock, flags);
> +	dio->refcount += (sdio->submitted_bio + 1);
> +	spin_unlock_irqrestore(&dio->bio_lock, flags);
>  }
>  
>  /*
> @@ -1215,7 +1223,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
>  	dio->i_size = i_size_read(inode);
>  
>  	spin_lock_init(&dio->bio_lock);
> -	dio->refcount = 1;
>  
>  	sdio.iter = iter;
>  	sdio.final_block_in_request =
> @@ -1234,7 +1241,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
>  
>  	retval = do_direct_IO(dio, &sdio, &map_bh);
>  	if (retval)
> -		dio_cleanup(dio, &sdio);
> +		dio_cleanup(dio, &sdio, false);
>  
>  	if (retval == -ENOTBLK) {
>  		/*
> @@ -1267,7 +1274,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
>  	 * It is possible that, we return short IO due to end of file.
>  	 * In that case, we need to release all the pages we got hold on.
>  	 */
> -	dio_cleanup(dio, &sdio);
> +	dio_cleanup(dio, &sdio, true);
>  
>  	/*
>  	 * All block lookups have been performed. For READ requests
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ