lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 11 May 2012 16:49:56 -0500
From:	Seth Jennings <sjenning@...ux.vnet.ibm.com>
To:	Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
CC:	Minchan Kim <minchan@...nel.org>,
	Dan Magenheimer <dan.magenheimer@...cle.com>,
	Nitin Gupta <ngupta@...are.org>,
	Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
	Andrew Morton <akpm@...ux-foundation.org>,
	linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH 3/4] zsmalloc use zs_handle instead of void *

On 05/11/2012 02:29 PM, Konrad Rzeszutek Wilk wrote:

>> At least, zram is also primary user and it also has such mess
>> although it's not severe than zcache. zram->table[index].handle
>> sometime has real (void*) handle, sometime (struct page*).
> 
> Yikes. Yeah that needs to be fixed.
> 


How about this (untested)?  Changes to zram_bvec_write() are a little
hard to make out in this format.  There are a couple of checkpatch fixes
(two split line strings) and an unused variable store_offset removal mixed
in too. If this patch is good, I'll break them up for official submission
after I test.

diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index fbe8ac9..10dcd99 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -81,7 +81,10 @@ enum zram_pageflags {
 
 /* Allocated for each disk page */
 struct table {
-	void *handle;
+	union {
+		void *handle; /* compressible */
+		struct page *page; /* incompressible */
+	};
 	u16 size;	/* object size (excluding header) */
 	u8 count;	/* object ref count (not yet used) */
 	u8 flags;
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 685d612..d49deca 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -150,7 +150,7 @@ static void zram_free_page(struct zram *zram, size_t index)
 	}
 
 	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
-		__free_page(handle);
+		__free_page(zram->table[index].page);
 		zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
 		zram_stat_dec(&zram->stats.pages_expand);
 		goto out;
@@ -189,7 +189,7 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
 	unsigned char *user_mem, *cmem;
 
 	user_mem = kmap_atomic(page);
-	cmem = kmap_atomic(zram->table[index].handle);
+	cmem = kmap_atomic(zram->table[index].page);
 
 	memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
 	kunmap_atomic(cmem);
@@ -315,7 +315,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 			   int offset)
 {
 	int ret;
-	u32 store_offset;
 	size_t clen;
 	void *handle;
 	struct zobj_header *zheader;
@@ -390,31 +389,38 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 		clen = PAGE_SIZE;
 		page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
 		if (unlikely(!page_store)) {
-			pr_info("Error allocating memory for "
-				"incompressible page: %u\n", index);
+			pr_info("Error allocating memory for incompressible page: %u\n", index);
 			ret = -ENOMEM;
 			goto out;
 		}
 
-		store_offset = 0;
-		zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
-		zram_stat_inc(&zram->stats.pages_expand);
-		handle = page_store;
 		src = kmap_atomic(page);
 		cmem = kmap_atomic(page_store);
-		goto memstore;
-	}
+		memcpy(cmem, src, clen);
+		kunmap_atomic(cmem);
+		kunmap_atomic(src);
 
-	handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
-	if (!handle) {
-		pr_info("Error allocating memory for compressed "
-			"page: %u, size=%zu\n", index, clen);
-		ret = -ENOMEM;
-		goto out;
+		zram->table[index].page = page_store;
+		zram->table[index].size = PAGE_SIZE;
+
+		zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
+		zram_stat_inc(&zram->stats.pages_expand);
+	} else {
+		handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
+		if (!handle) {
+			pr_info("Error allocating memory for compressed page: %u, size=%zu\n", index, clen);
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		zram->table[index].handle = handle;
+		zram->table[index].size = clen;
+
+		cmem = zs_map_object(zram->mem_pool, handle);
+		memcpy(cmem, src, clen);
+		zs_unmap_object(zram->mem_pool, handle);
 	}
-	cmem = zs_map_object(zram->mem_pool, handle);
 
-memstore:
 #if 0
 	/* Back-reference needed for memory defragmentation */
 	if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
@@ -424,18 +430,6 @@ memstore:
 	}
 #endif
 
-	memcpy(cmem, src, clen);
-
-	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
-		kunmap_atomic(cmem);
-		kunmap_atomic(src);
-	} else {
-		zs_unmap_object(zram->mem_pool, handle);
-	}
-
-	zram->table[index].handle = handle;
-	zram->table[index].size = clen;
-
 	/* Update stats */
 	zram_stat64_add(zram, &zram->stats.compr_size, clen);
 	zram_stat_inc(&zram->stats.pages_stored);
@@ -580,6 +574,8 @@ error:
 void __zram_reset_device(struct zram *zram)
 {
 	size_t index;
+	void *handle;
+	struct page *page;
 
 	zram->init_done = 0;
 
@@ -592,14 +588,17 @@ void __zram_reset_device(struct zram *zram)
 
 	/* Free all pages that are still in this zram device */
 	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
-		void *handle = zram->table[index].handle;
-		if (!handle)
-			continue;
-
-		if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
-			__free_page(handle);
-		else
+		if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
+			page = zram->table[index].page;
+			if (!page)
+				continue;
+			__free_page(page);
+		} else {
+			handle = zram->table[index].handle;
+			if (!handle)
+				continue;
 			zs_free(zram->mem_pool, handle);
+		}
 	}
 
 	vfree(zram->table);

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ