lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <1435901011-5015-1-git-send-email-yuanhan.liu@linux.intel.com>
Date:	Fri,  3 Jul 2015 13:23:31 +0800
From:	Yuanhan Liu <yuanhan.liu@...ux.intel.com>
To:	linux-kernel@...r.kernel.org
Cc:	Yuanhan Liu <yuanhan.liu@...ux.intel.com>,
	Dan Williams <dan.j.williams@...el.com>,
	Shaohua Li <shli@...com>
Subject: [PATCH] async_tx: replace page_address with kmap_atomic

As a page might belong to highmem.

Strictly nested kmap_atomic() order is followed according to doc
Documentation/vm/highmem.txt

CC: Dan Williams <dan.j.williams@...el.com>
CC: Shaohua Li <shli@...com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@...ux.intel.com>
---
 crypto/async_tx/async_pq.c          | 18 +++++++++++++-----
 crypto/async_tx/async_raid6_recov.c | 31 ++++++++++++++++++++++++-------
 crypto/async_tx/async_xor.c         | 17 ++++++++++++++---
 3 files changed, 51 insertions(+), 15 deletions(-)

diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 5d355e0..a408b7e 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -136,7 +136,7 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
 			BUG_ON(i > disks - 3); /* P or Q can't be zero */
 			srcs[i] = (void*)raid6_empty_zero_page;
 		} else {
-			srcs[i] = page_address(blocks[i]) + offset;
+			srcs[i] = kmap_atomic(blocks[i]) + offset;
 			if (i < disks - 2) {
 				stop = i;
 				if (start == -1)
@@ -150,6 +150,12 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
 			raid6_call.xor_syndrome(disks, start, stop, len, srcs);
 	} else
 		raid6_call.gen_syndrome(disks, len, srcs);
+
+	for (i = disks; i--; ) {
+		if (blocks[i])
+			kunmap_atomic(srcs[i]);
+	}
+
 	async_tx_sync_epilog(submit);
 }
 
@@ -395,14 +401,15 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
 		 */
 		tx = NULL;
 		*pqres = 0;
+		s = kmap_atomic(spare) + offset;
 		if (p_src) {
 			init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
 					  NULL, NULL, scribble);
 			tx = async_xor(spare, blocks, offset, disks-2, len, submit);
 			async_tx_quiesce(&tx);
-			p = page_address(p_src) + offset;
-			s = page_address(spare) + offset;
+			p = kmap_atomic(p_src) + offset;
 			*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
+			kunmap_atomic(p);
 		}
 
 		if (q_src) {
@@ -411,10 +418,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
 			init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
 			tx = async_gen_syndrome(blocks, offset, disks, len, submit);
 			async_tx_quiesce(&tx);
-			q = page_address(q_src) + offset;
-			s = page_address(spare) + offset;
+			q = kmap_atomic(q_src) + offset;
 			*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
+			kunmap_atomic(q);
 		}
+		kunmap_atomic(s);
 
 		/* restore P, Q and submit */
 		P(blocks, disks) = p_src;
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 934a849..abcacb0 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -80,9 +80,9 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
 	async_tx_quiesce(&submit->depend_tx);
 	amul = raid6_gfmul[coef[0]];
 	bmul = raid6_gfmul[coef[1]];
-	a = page_address(srcs[0]);
-	b = page_address(srcs[1]);
-	c = page_address(dest);
+	a = kmap_atomic(srcs[0]);
+	b = kmap_atomic(srcs[1]);
+	c = kmap_atomic(dest);
 
 	while (len--) {
 		ax    = amul[*a++];
@@ -90,6 +90,10 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
 		*c++ = ax ^ bx;
 	}
 
+	kunmap_atomic(c);
+	kunmap_atomic(b);
+	kunmap_atomic(a);
+
 	return NULL;
 }
 
@@ -147,12 +151,15 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
 	 */
 	async_tx_quiesce(&submit->depend_tx);
 	qmul  = raid6_gfmul[coef];
-	d = page_address(dest);
-	s = page_address(src);
+	d = kmap_atomic(dest);
+	s = kmap_atomic(src);
 
 	while (len--)
 		*d++ = qmul[*s++];
 
+	kunmap_atomic(s);
+	kunmap_atomic(d);
+
 	return NULL;
 }
 
@@ -372,10 +379,15 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
 			if (blocks[i] == NULL)
 				ptrs[i] = (void *) raid6_empty_zero_page;
 			else
-				ptrs[i] = page_address(blocks[i]);
+				ptrs[i] = kmap_atomic(blocks[i]);
 
 		raid6_2data_recov(disks, bytes, faila, failb, ptrs);
 
+		for (i = disks; i--; ) {
+			if (blocks[i])
+				kunmap_atomic(ptrs[i]);
+		}
+
 		async_tx_sync_epilog(submit);
 
 		return NULL;
@@ -448,10 +460,15 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
 			if (blocks[i] == NULL)
 				ptrs[i] = (void*)raid6_empty_zero_page;
 			else
-				ptrs[i] = page_address(blocks[i]);
+				ptrs[i] = kmap_atomic(blocks[i]);
 
 		raid6_datap_recov(disks, bytes, faila, ptrs);
 
+		for (i = disks; i--; ) {
+			if (blocks[i])
+				kunmap_atomic(ptrs[i]);
+		}
+
 		async_tx_sync_epilog(submit);
 
 		return NULL;
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index e1bce26..e26a0cd 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -30,6 +30,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/raid/xor.h>
 #include <linux/async_tx.h>
+#include <linux/highmem.h>
 
 /* do_async_xor - dma map the pages and perform the xor with an engine */
 static __async_inline struct dma_async_tx_descriptor *
@@ -127,10 +128,10 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
 	/* convert to buffer pointers */
 	for (i = 0; i < src_cnt; i++)
 		if (src_list[i])
-			srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
+			srcs[xor_src_cnt++] = kmap_atomic(src_list[i]) + offset;
 	src_cnt = xor_src_cnt;
 	/* set destination address */
-	dest_buf = page_address(dest) + offset;
+	dest_buf = kmap_atomic(dest) + offset;
 
 	if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
 		memset(dest_buf, 0, len);
@@ -144,6 +145,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
 		src_cnt -= xor_src_cnt;
 		src_off += xor_src_cnt;
 	}
+	kunmap_atomic(dest_buf);
+	for (i = src_off; i--; )
+		kunmap_atomic(srcs[i]);
 
 	async_tx_sync_epilog(submit);
 }
@@ -235,7 +239,14 @@ EXPORT_SYMBOL_GPL(async_xor);
 
 static int page_is_zero(struct page *p, unsigned int offset, size_t len)
 {
-	return !memchr_inv(page_address(p) + offset, 0, len);
+	void *addr;
+	int ret;
+
+	addr = kmap_atomic(p);
+	ret = !memchr_inv(addr + offset, 0, len);
+	kunmap_atomic(addr);
+
+	return ret;
 }
 
 static inline struct dma_chan *
-- 
1.9.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ