lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1442939270-9651-1-git-send-email-vkuznets@redhat.com>
Date:	Tue, 22 Sep 2015 18:27:50 +0200
From:	Vitaly Kuznetsov <vkuznets@...hat.com>
To:	linux-scsi@...r.kernel.org
Cc:	linux-kernel@...r.kernel.org, devel@...uxdriverproject.org,
	"K. Y. Srinivasan" <kys@...rosoft.com>,
	Haiyang Zhang <haiyangz@...rosoft.com>,
	"James E.J. Bottomley" <JBottomley@...n.com>,
	Radim Krčmář <rkrcmar@...hat.com>
Subject: [PATCH] storvsc: get rid of homegrown copy_{to,from}_bounce_buffer()

Storvsc driver needs to ensure there are no 'holes' in the presented
sg list (all segments in the middle of the list need to be of PAGE_SIZE).
When a hole is detected storvsc driver creates a 'bounce sgl' without
holes and copies data over with its own copy_{to,from}_bounce_buffer()
functions. Scsi layer already has scsi_sg_copy_{to,from}_buffer()
functions to linearize a list, the only difference is that already
existent functions create a flat buffer instead of a new list but we can
cope.

Reported-by: Radim Krčmář <rkrcmar@...hat.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
---
 drivers/scsi/storvsc_drv.c | 281 ++++++---------------------------------------
 1 file changed, 36 insertions(+), 245 deletions(-)

diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 40c43ae..31e8c67 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -393,8 +393,8 @@ static void storvsc_on_channel_callback(void *context);
 struct storvsc_cmd_request {
 	struct scsi_cmnd *cmd;
 
-	unsigned int bounce_sgl_count;
-	struct scatterlist *bounce_sgl;
+	unsigned int bounce_len;
+	void *bounce_buf;
 
 	struct hv_device *device;
 
@@ -586,21 +586,6 @@ get_in_err:
 
 }
 
-static void destroy_bounce_buffer(struct scatterlist *sgl,
-				  unsigned int sg_count)
-{
-	int i;
-	struct page *page_buf;
-
-	for (i = 0; i < sg_count; i++) {
-		page_buf = sg_page((&sgl[i]));
-		if (page_buf != NULL)
-			__free_page(page_buf);
-	}
-
-	kfree(sgl);
-}
-
 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
 {
 	int i;
@@ -628,199 +613,6 @@ static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
 	return -1;
 }
 
-static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
-						unsigned int sg_count,
-						unsigned int len,
-						int write)
-{
-	int i;
-	int num_pages;
-	struct scatterlist *bounce_sgl;
-	struct page *page_buf;
-	unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
-
-	num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
-
-	bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
-	if (!bounce_sgl)
-		return NULL;
-
-	sg_init_table(bounce_sgl, num_pages);
-	for (i = 0; i < num_pages; i++) {
-		page_buf = alloc_page(GFP_ATOMIC);
-		if (!page_buf)
-			goto cleanup;
-		sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
-	}
-
-	return bounce_sgl;
-
-cleanup:
-	destroy_bounce_buffer(bounce_sgl, num_pages);
-	return NULL;
-}
-
-/* Assume the original sgl has enough room */
-static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
-					    struct scatterlist *bounce_sgl,
-					    unsigned int orig_sgl_count,
-					    unsigned int bounce_sgl_count)
-{
-	int i;
-	int j = 0;
-	unsigned long src, dest;
-	unsigned int srclen, destlen, copylen;
-	unsigned int total_copied = 0;
-	unsigned long bounce_addr = 0;
-	unsigned long dest_addr = 0;
-	unsigned long flags;
-	struct scatterlist *cur_dest_sgl;
-	struct scatterlist *cur_src_sgl;
-
-	local_irq_save(flags);
-	cur_dest_sgl = orig_sgl;
-	cur_src_sgl = bounce_sgl;
-	for (i = 0; i < orig_sgl_count; i++) {
-		dest_addr = (unsigned long)
-				kmap_atomic(sg_page(cur_dest_sgl)) +
-				cur_dest_sgl->offset;
-		dest = dest_addr;
-		destlen = cur_dest_sgl->length;
-
-		if (bounce_addr == 0)
-			bounce_addr = (unsigned long)kmap_atomic(
-							sg_page(cur_src_sgl));
-
-		while (destlen) {
-			src = bounce_addr + cur_src_sgl->offset;
-			srclen = cur_src_sgl->length - cur_src_sgl->offset;
-
-			copylen = min(srclen, destlen);
-			memcpy((void *)dest, (void *)src, copylen);
-
-			total_copied += copylen;
-			cur_src_sgl->offset += copylen;
-			destlen -= copylen;
-			dest += copylen;
-
-			if (cur_src_sgl->offset == cur_src_sgl->length) {
-				/* full */
-				kunmap_atomic((void *)bounce_addr);
-				j++;
-
-				/*
-				 * It is possible that the number of elements
-				 * in the bounce buffer may not be equal to
-				 * the number of elements in the original
-				 * scatter list. Handle this correctly.
-				 */
-
-				if (j == bounce_sgl_count) {
-					/*
-					 * We are done; cleanup and return.
-					 */
-					kunmap_atomic((void *)(dest_addr -
-						cur_dest_sgl->offset));
-					local_irq_restore(flags);
-					return total_copied;
-				}
-
-				/* if we need to use another bounce buffer */
-				if (destlen || i != orig_sgl_count - 1) {
-					cur_src_sgl = sg_next(cur_src_sgl);
-					bounce_addr = (unsigned long)
-							kmap_atomic(
-							sg_page(cur_src_sgl));
-				}
-			} else if (destlen == 0 && i == orig_sgl_count - 1) {
-				/* unmap the last bounce that is < PAGE_SIZE */
-				kunmap_atomic((void *)bounce_addr);
-			}
-		}
-
-		kunmap_atomic((void *)(dest_addr - cur_dest_sgl->offset));
-		cur_dest_sgl = sg_next(cur_dest_sgl);
-	}
-
-	local_irq_restore(flags);
-
-	return total_copied;
-}
-
-/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
-static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
-					  struct scatterlist *bounce_sgl,
-					  unsigned int orig_sgl_count)
-{
-	int i;
-	int j = 0;
-	unsigned long src, dest;
-	unsigned int srclen, destlen, copylen;
-	unsigned int total_copied = 0;
-	unsigned long bounce_addr = 0;
-	unsigned long src_addr = 0;
-	unsigned long flags;
-	struct scatterlist *cur_src_sgl;
-	struct scatterlist *cur_dest_sgl;
-
-	local_irq_save(flags);
-
-	cur_src_sgl = orig_sgl;
-	cur_dest_sgl = bounce_sgl;
-
-	for (i = 0; i < orig_sgl_count; i++) {
-		src_addr = (unsigned long)
-				kmap_atomic(sg_page(cur_src_sgl)) +
-				cur_src_sgl->offset;
-		src = src_addr;
-		srclen = cur_src_sgl->length;
-
-		if (bounce_addr == 0)
-			bounce_addr = (unsigned long)
-					kmap_atomic(sg_page(cur_dest_sgl));
-
-		while (srclen) {
-			/* assume bounce offset always == 0 */
-			dest = bounce_addr + cur_dest_sgl->length;
-			destlen = PAGE_SIZE - cur_dest_sgl->length;
-
-			copylen = min(srclen, destlen);
-			memcpy((void *)dest, (void *)src, copylen);
-
-			total_copied += copylen;
-			cur_dest_sgl->length += copylen;
-			srclen -= copylen;
-			src += copylen;
-
-			if (cur_dest_sgl->length == PAGE_SIZE) {
-				/* full..move to next entry */
-				kunmap_atomic((void *)bounce_addr);
-				bounce_addr = 0;
-				j++;
-			}
-
-			/* if we need to use another bounce buffer */
-			if (srclen && bounce_addr == 0) {
-				cur_dest_sgl = sg_next(cur_dest_sgl);
-				bounce_addr = (unsigned long)
-						kmap_atomic(
-						sg_page(cur_dest_sgl));
-			}
-
-		}
-
-		kunmap_atomic((void *)(src_addr - cur_src_sgl->offset));
-		cur_src_sgl = sg_next(cur_src_sgl);
-	}
-
-	if (bounce_addr)
-		kunmap_atomic((void *)bounce_addr);
-
-	local_irq_restore(flags);
-
-	return total_copied;
-}
-
 static void handle_sc_creation(struct vmbus_channel *new_sc)
 {
 	struct hv_device *device = new_sc->primary_channel->device_obj;
@@ -1171,14 +963,11 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
 	host = stor_dev->host;
 
 	vm_srb = &cmd_request->vstor_packet.vm_srb;
-	if (cmd_request->bounce_sgl_count) {
+	if (cmd_request->bounce_len) {
 		if (vm_srb->data_in == READ_TYPE)
-			copy_from_bounce_buffer(scsi_sglist(scmnd),
-					cmd_request->bounce_sgl,
-					scsi_sg_count(scmnd),
-					cmd_request->bounce_sgl_count);
-		destroy_bounce_buffer(cmd_request->bounce_sgl,
-					cmd_request->bounce_sgl_count);
+			scsi_sg_copy_from_buffer(scmnd, cmd_request->bounce_buf,
+						 cmd_request->bounce_len);
+		kfree(cmd_request->bounce_buf);
 	}
 
 	scmnd->result = vm_srb->scsi_status;
@@ -1694,48 +1483,51 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 	if (sg_count) {
 		/* check if we need to bounce the sgl */
 		if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
-			cmd_request->bounce_sgl =
-				create_bounce_buffer(sgl, sg_count,
-						     length,
-						     vm_srb->data_in);
-			if (!cmd_request->bounce_sgl)
+			cmd_request->bounce_buf = kmalloc(length, GFP_ATOMIC);
+			if (!cmd_request->bounce_buf)
 				return SCSI_MLQUEUE_HOST_BUSY;
 
-			cmd_request->bounce_sgl_count =
-				ALIGN(length, PAGE_SIZE) >> PAGE_SHIFT;
+			cmd_request->bounce_len = length;
 
 			if (vm_srb->data_in == WRITE_TYPE)
-				copy_to_bounce_buffer(sgl,
-					cmd_request->bounce_sgl, sg_count);
+				scsi_sg_copy_to_buffer(scmnd,
+						       cmd_request->bounce_buf,
+						       cmd_request->bounce_len);
 
-			sgl = cmd_request->bounce_sgl;
-			sg_count = cmd_request->bounce_sgl_count;
+			sg_count = ALIGN(length, PAGE_SIZE) >> PAGE_SHIFT;
 		}
 
-
 		if (sg_count > MAX_PAGE_BUFFER_COUNT) {
-
 			payload_sz = (sg_count * sizeof(void *) +
 				      sizeof(struct vmbus_packet_mpb_array));
 			payload = kmalloc(payload_sz, GFP_ATOMIC);
 			if (!payload) {
-				if (cmd_request->bounce_sgl_count)
-					destroy_bounce_buffer(
-					cmd_request->bounce_sgl,
-					cmd_request->bounce_sgl_count);
-
-					return SCSI_MLQUEUE_DEVICE_BUSY;
+				if (cmd_request->bounce_len)
+					kfree(cmd_request->bounce_buf);
+				return SCSI_MLQUEUE_DEVICE_BUSY;
 			}
 		}
 
 		payload->range.len = length;
-		payload->range.offset = sgl[0].offset;
 
-		cur_sgl = sgl;
-		for (i = 0; i < sg_count; i++) {
-			payload->range.pfn_array[i] =
-				page_to_pfn(sg_page((cur_sgl)));
-			cur_sgl = sg_next(cur_sgl);
+		if (!cmd_request->bounce_len) {
+			payload->range.offset = sgl[0].offset;
+
+			cur_sgl = sgl;
+			for (i = 0; i < sg_count; i++) {
+				payload->range.pfn_array[i] =
+					page_to_pfn(sg_page((cur_sgl)));
+				cur_sgl = sg_next(cur_sgl);
+			}
+		} else {
+			payload->range.offset =	(unsigned long)
+				cmd_request->bounce_buf & ~PAGE_MASK;
+
+			for (i = 0; i < sg_count; i++)
+				payload->range.pfn_array[i] =
+					virt_to_phys(cmd_request->bounce_buf +
+						     i * PAGE_SIZE)
+					>> PAGE_SHIFT;
 		}
 
 	} else if (scsi_sglist(scmnd)) {
@@ -1755,9 +1547,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 	if (ret == -EAGAIN) {
 		/* no more space */
 
-		if (cmd_request->bounce_sgl_count)
-			destroy_bounce_buffer(cmd_request->bounce_sgl,
-					cmd_request->bounce_sgl_count);
+		if (cmd_request->bounce_len)
+			kfree(cmd_request->bounce_buf);
 
 		return SCSI_MLQUEUE_DEVICE_BUSY;
 	}
-- 
2.4.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ