[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1354175106-30679-1-git-send-email-ngupta@vflare.org>
Date: Wed, 28 Nov 2012 23:45:06 -0800
From: Nitin Gupta <ngupta@...are.org>
To: Greg KH <greg@...ah.com>
Cc: Jerome Marchand <jmarchan@...hat.com>,
Minchan Kim <minchan.kim@...il.com>,
Seth Jennings <sjenning@...ux.vnet.ibm.com>,
Dan Carpenter <dan.carpenter@...cle.com>,
Sam Hansen <solid.se7en@...il.com>, Tomas M <tomas@...x.org>,
Mihail Kasadjikov <hamer.mk@...il.com>,
Linux Driver Project <devel@...uxdriverproject.org>,
linux-kernel <linux-kernel@...r.kernel.org>
Subject: [PATCH v2] zram: Fix use-after-free bug in disk write case
Changelog v2 vs v1:
- Changelog message now correctly explains the problem
Fixes a bug introduced by commit c8f2f0db1 ("zram: Fix handling
of incompressible pages") which caused a freed buffer to be used
in case a partial write (non PAGE_SIZED) request is received and
the data is found to be incompressible.
Fixes bug 50081:
https://bugzilla.kernel.org/show_bug.cgi?id=50081
Signed-off-by: Nitin Gupta <ngupta@...are.org>
Reported-by: Mihail Kasadjikov <hamer.mk@...il.com>
Reported-by: Tomas M <tomas@...x.org>
Reviewed-by: Minchan Kim <minchan@...nel.org>
---
drivers/staging/zram/zram_drv.c | 39 ++++++++++++++++++++++++---------------
1 file changed, 24 insertions(+), 15 deletions(-)
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index fb4a7c9..f2a73bd 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -265,7 +265,7 @@ out_cleanup:
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset)
{
- int ret;
+ int ret = 0;
size_t clen;
unsigned long handle;
struct page *page;
@@ -286,10 +286,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
ret = zram_decompress_page(zram, uncmem, index);
- if (ret) {
- kfree(uncmem);
+ if (ret)
goto out;
- }
}
/*
@@ -302,16 +300,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
user_mem = kmap_atomic(page);
- if (is_partial_io(bvec))
+ if (is_partial_io(bvec)) {
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
bvec->bv_len);
- else
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ } else {
uncmem = user_mem;
+ }
if (page_zero_filled(uncmem)) {
- kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
+ if (!is_partial_io(bvec))
+ kunmap_atomic(user_mem);
zram_stat_inc(&zram->stats.pages_zero);
zram_set_flag(zram, index, ZRAM_ZERO);
ret = 0;
@@ -321,9 +321,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
zram->compress_workmem);
- kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
+ if (!is_partial_io(bvec)) {
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ uncmem = NULL;
+ }
if (unlikely(ret != LZO_E_OK)) {
pr_err("Compression failed! err=%d\n", ret);
@@ -332,8 +334,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (unlikely(clen > max_zpage_size)) {
zram_stat_inc(&zram->stats.bad_compress);
- src = uncmem;
clen = PAGE_SIZE;
+ src = NULL;
+ if (is_partial_io(bvec))
+ src = uncmem;
}
handle = zs_malloc(zram->mem_pool, clen);
@@ -345,7 +349,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
}
cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+ src = kmap_atomic(page);
memcpy(cmem, src, clen);
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+ kunmap_atomic(src);
zs_unmap_object(zram->mem_pool, handle);
@@ -358,9 +366,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (clen <= PAGE_SIZE / 2)
zram_stat_inc(&zram->stats.good_compress);
- return 0;
-
out:
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+
if (ret)
zram_stat64_inc(zram, &zram->stats.failed_writes);
return ret;
--
1.7.10.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists