lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed,  1 Apr 2009 22:44:25 +0900
From:	Tejun Heo <tj@...nel.org>
To:	axboe@...nel.dk, bharrosh@...asas.com,
	linux-kernel@...r.kernel.org, fujita.tomonori@....ntt.co.jp
Cc:	Tejun Heo <tj@...nel.org>
Subject: [PATCH 10/17] bio: use bio_create_from_sgl() in bio_map_user_iov()

Impact: cleanup

As bio_map_user_iov() is unique in the way it acquires pages to map
from all other three operations (user-copy, kern-map, kern-copy), the
function still needs to get the pages itself but the bio creation can
use bio_create_from_sgl().  Create sgl of mapped pages and use it to
create bio from it.  The code will be further simplified with future
changes to bio_create_from_sgl().

Signed-off-by: Tejun Heo <tj@...nel.org>
---
 fs/bio.c |   79 ++++++++++++++++++++++++++------------------------------------
 1 files changed, 33 insertions(+), 46 deletions(-)

diff --git a/fs/bio.c b/fs/bio.c
index 4540afc..1ca8b16 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1059,13 +1059,13 @@ struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *md,
 struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
 			     struct iovec *iov, int count, int rw, gfp_t gfp)
 {
-	int i, j;
 	size_t tot_len = 0;
 	int nr_pages = 0;
+	int nents = 0;
+	struct scatterlist *sgl;
 	struct page **pages;
 	struct bio *bio;
-	int cur_page = 0;
-	int ret, offset;
+	int i, ret;
 
 	for (i = 0; i < count; i++) {
 		unsigned long uaddr = (unsigned long)iov[i].iov_base;
@@ -1088,70 +1088,57 @@ struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
 	if (!nr_pages || tot_len & q->dma_pad_mask)
 		return ERR_PTR(-EINVAL);
 
-	bio = bio_kmalloc(gfp, nr_pages);
-	if (!bio)
+	sgl = kmalloc(nr_pages * sizeof(sgl[0]), gfp);
+	if (!sgl)
 		return ERR_PTR(-ENOMEM);
+	sg_init_table(sgl, nr_pages);
 
 	ret = -ENOMEM;
-	pages = kcalloc(nr_pages, sizeof(struct page *), gfp);
+	pages = kcalloc(nr_pages, sizeof(pages[0]), gfp);
 	if (!pages)
-		goto out;
+		goto err_sgl;
 
 	for (i = 0; i < count; i++) {
 		unsigned long uaddr = (unsigned long)iov[i].iov_base;
 		unsigned long len = iov[i].iov_len;
-		unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-		unsigned long start = uaddr >> PAGE_SHIFT;
-		const int local_nr_pages = end - start;
-		const int page_limit = cur_page + local_nr_pages;
-		
+		unsigned long offset = offset_in_page(uaddr);
+		int local_nr_pages = PFN_UP(uaddr + len) - PFN_DOWN(uaddr);
+
 		ret = get_user_pages_fast(uaddr, local_nr_pages, rw == READ,
-					  &pages[cur_page]);
+					  &pages[nents]);
 		if (ret < local_nr_pages) {
 			ret = -EFAULT;
-			goto out_unmap;
+			goto err_pages;
 		}
 
-		offset = uaddr & ~PAGE_MASK;
-		for (j = cur_page; j < page_limit; j++) {
-			unsigned int bytes = PAGE_SIZE - offset;
+		while (len) {
+			unsigned int bytes = min(PAGE_SIZE - offset, len);
 
-			if (len <= 0)
-				break;
-			
-			if (bytes > len)
-				bytes = len;
-
-			/*
-			 * sorry...
-			 */
-			if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
-					    bytes)
-				break;
+			sg_set_page(&sgl[nents], pages[nents], bytes, offset);
 
+			nents++;
 			len -= bytes;
 			offset = 0;
 		}
-
-		cur_page = j;
-		/*
-		 * release the pages we didn't map into the bio, if any
-		 */
-		while (j < page_limit)
-			page_cache_release(pages[j++]);
 	}
+	BUG_ON(nents != nr_pages);
 
-	kfree(pages);
+	bio = bio_create_from_sgl(q, sgl, nents, nr_pages, rw, gfp);
+	if (IS_ERR(bio)) {
+		ret = PTR_ERR(bio);
+		goto err_pages;
+	}
 
-	/*
-	 * set data direction, and check if mapped pages need bouncing
-	 */
-	if (rw == WRITE)
-		bio->bi_rw |= (1 << BIO_RW);
+	/* release the pages we didn't map into the bio, if any */
+	for (i = bio->bi_vcnt; i < nr_pages; i++)
+		page_cache_release(pages[i]);
 
 	bio->bi_bdev = bdev;
 	bio->bi_flags |= (1 << BIO_USER_MAPPED);
 
+	kfree(sgl);
+	kfree(pages);
+
 	/*
 	 * subtle -- if __bio_map_user() ended up bouncing a bio,
 	 * it would normally disappear when its bi_end_io is run.
@@ -1162,15 +1149,15 @@ struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
 
 	return bio;
 
- out_unmap:
+err_pages:
 	for (i = 0; i < nr_pages; i++) {
-		if(!pages[i])
+		if (!pages[i])
 			break;
 		page_cache_release(pages[i]);
 	}
- out:
 	kfree(pages);
-	bio_put(bio);
+err_sgl:
+	kfree(sgl);
 	return ERR_PTR(ret);
 }
 
-- 
1.6.0.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ