lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20131202150422.f3c9db7e38f91f92d67c3149@canb.auug.org.au>
Date:	Mon, 2 Dec 2013 15:04:22 +1100
From:	Stephen Rothwell <sfr@...b.auug.org.au>
To:	Jens Axboe <axboe@...nel.dk>, Jaegeuk Kim <jaegeuk.kim@...sung.com>
Cc:	linux-next@...r.kernel.org, linux-kernel@...r.kernel.org,
	Kent Overstreet <kmo@...erainc.com>
Subject: linux-next: manual merge of the block tree with the f2fs tree

Hi Jens,

Today's linux-next merge of the block tree got conflicts in
fs/f2fs/data.c and fs/f2fs/segment.c between commits 0626804cd9a6 ("f2fs:
remove the own bi_private allocation") and 899d7625d85a ("f2fs: refactor
bio-related operations") from the f2fs tree and commits 2c30c71bd653
("block: Convert various code to bio_for_each_segment()") and  ("block:
Abstract out bvec iterator") from the block tree.

I fixed it up (hopefully - see below - the handling of sbi in
f2fs_end_io_write is not the best) and can carry the fix as necessary (no
action is required).

-- 
Cheers,
Stephen Rothwell                    sfr@...b.auug.org.au

diff --cc fs/f2fs/data.c
index 53e3bbbba7ed,a2c8de8ba6ce..b62a99cf6561
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@@ -25,205 -25,6 +25,201 @@@
  #include <trace/events/f2fs.h>
  
  /*
 + * Low-level block read/write IO operations.
 + */
 +static struct bio *__bio_alloc(struct block_device *bdev, int npages)
 +{
 +	struct bio *bio;
 +
 +	/* No failure on bio allocation */
 +	bio = bio_alloc(GFP_NOIO, npages);
 +	bio->bi_bdev = bdev;
 +	bio->bi_private = NULL;
 +	return bio;
 +}
 +
 +static void f2fs_read_end_io(struct bio *bio, int err)
 +{
- 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
++	struct bio_vec *bvec;
++	int i;
 +
- 	do {
++	bio_for_each_segment_all(bvec, bio, i) {
 +		struct page *page = bvec->bv_page;
 +
- 		if (--bvec >= bio->bi_io_vec)
- 			prefetchw(&bvec->bv_page->flags);
- 
- 		if (uptodate) {
++		if (!err) {
 +			SetPageUptodate(page);
 +		} else {
 +			ClearPageUptodate(page);
 +			SetPageError(page);
 +		}
 +		unlock_page(page);
- 	} while (bvec >= bio->bi_io_vec);
++	}
 +
 +	bio_put(bio);
 +}
 +
 +static void f2fs_write_end_io(struct bio *bio, int err)
 +{
- 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- 	struct f2fs_sb_info *sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb);
++	struct f2fs_sb_info *sbi = NULL;
++	struct bio_vec *bvec;
++	int i;
 +
- 	do {
++	bio_for_each_segment_all(bvec, bio, i) {
 +		struct page *page = bvec->bv_page;
 +
- 		if (--bvec >= bio->bi_io_vec)
- 			prefetchw(&bvec->bv_page->flags);
- 
- 		if (!uptodate) {
++		if (!sbi)
++			sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb);
++		if (err) {
 +			SetPageError(page);
 +			set_bit(AS_EIO, &page->mapping->flags);
 +			set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
 +			sbi->sb->s_flags |= MS_RDONLY;
 +		}
 +		end_page_writeback(page);
 +		dec_page_count(sbi, F2FS_WRITEBACK);
- 	} while (bvec >= bio->bi_io_vec);
++	}
 +
 +	if (bio->bi_private)
 +		complete(bio->bi_private);
 +
 +	if (!get_pages(sbi, F2FS_WRITEBACK) &&
 +			!list_empty(&sbi->cp_wait.task_list))
 +		wake_up(&sbi->cp_wait);
 +
 +	bio_put(bio);
 +}
 +
 +static void __submit_merged_bio(struct f2fs_sb_info *sbi,
 +				struct f2fs_bio_info *io,
 +				enum page_type type, bool sync, int rw)
 +{
 +	enum page_type btype = PAGE_TYPE_OF_BIO(type);
 +
 +	if (!io->bio)
 +		return;
 +
 +	if (btype == META)
 +		rw |= REQ_META;
 +
 +	if (is_read_io(rw)) {
 +		if (sync)
 +			rw |= READ_SYNC;
 +		submit_bio(rw, io->bio);
 +		trace_f2fs_submit_read_bio(sbi->sb, rw, type, io->bio);
 +		io->bio = NULL;
 +		return;
 +	}
 +
 +	if (sync)
 +		rw |= WRITE_SYNC;
 +	if (type >= META_FLUSH)
 +		rw |= WRITE_FLUSH_FUA;
 +
 +	/*
 +	 * META_FLUSH is only from the checkpoint procedure, and we should wait
 +	 * this metadata bio for FS consistency.
 +	 */
 +	if (type == META_FLUSH) {
 +		DECLARE_COMPLETION_ONSTACK(wait);
 +		io->bio->bi_private = &wait;
 +		submit_bio(rw, io->bio);
 +		wait_for_completion(&wait);
 +	} else {
 +		submit_bio(rw, io->bio);
 +	}
 +	trace_f2fs_submit_write_bio(sbi->sb, rw, btype, io->bio);
 +	io->bio = NULL;
 +}
 +
 +void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
 +				enum page_type type, bool sync, int rw)
 +{
 +	enum page_type btype = PAGE_TYPE_OF_BIO(type);
 +	struct f2fs_bio_info *io;
 +
 +	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
 +
 +	mutex_lock(&io->io_mutex);
 +	__submit_merged_bio(sbi, io, type, sync, rw);
 +	mutex_unlock(&io->io_mutex);
 +}
 +
 +/*
 + * Fill the locked page with data located in the block address.
 + * Return unlocked page.
 + */
 +int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
 +					block_t blk_addr, int rw)
 +{
 +	struct block_device *bdev = sbi->sb->s_bdev;
 +	struct bio *bio;
 +
 +	trace_f2fs_submit_page_bio(page, blk_addr, rw);
 +
 +	/* Allocate a new bio */
 +	bio = __bio_alloc(bdev, 1);
 +
 +	/* Initialize the bio */
- 	bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
++	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
 +	bio->bi_end_io = is_read_io(rw) ? f2fs_read_end_io : f2fs_write_end_io;
 +
 +	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
 +		bio_put(bio);
 +		f2fs_put_page(page, 1);
 +		return -EFAULT;
 +	}
 +
 +	submit_bio(rw, bio);
 +	return 0;
 +}
 +
 +void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
 +			block_t blk_addr, enum page_type type, int rw)
 +{
 +	enum page_type btype = PAGE_TYPE_OF_BIO(type);
 +	struct block_device *bdev = sbi->sb->s_bdev;
 +	struct f2fs_bio_info *io;
 +	int bio_blocks;
 +
 +	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
 +
 +	verify_block_addr(sbi, blk_addr);
 +
 +	mutex_lock(&io->io_mutex);
 +
 +	if (!is_read_io(rw))
 +		inc_page_count(sbi, F2FS_WRITEBACK);
 +
 +	if (io->bio && io->last_block_in_bio != blk_addr - 1)
 +		__submit_merged_bio(sbi, io, type, true, rw);
 +alloc_new:
 +	if (io->bio == NULL) {
 +		bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
 +		io->bio = __bio_alloc(bdev, bio_blocks);
- 		io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
++		io->bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
 +		io->bio->bi_end_io = is_read_io(rw) ? f2fs_read_end_io :
 +							f2fs_write_end_io;
 +		/*
 +		 * The end_io will be assigned at the sumbission phase.
 +		 * Until then, let bio_add_page() merge consecutive IOs as much
 +		 * as possible.
 +		 */
 +	}
 +
 +	if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
 +							PAGE_CACHE_SIZE) {
 +		__submit_merged_bio(sbi, io, type, true, rw);
 +		io->bio = NULL;
 +		goto alloc_new;
 +	}
 +
 +	io->last_block_in_bio = blk_addr;
 +
 +	mutex_unlock(&io->io_mutex);
 +	trace_f2fs_submit_page_mbio(page, rw, type, blk_addr);
 +}
 +
 +/*
   * Lock ordering for the change of data block address:
   * ->data_page
   *  ->node_page

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ