lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 29 Oct 2012 10:14:20 -0700
From:	Nitin Gupta <ngupta@...are.org>
To:	Sergey Senozhatsky <sergey.senozhatsky@...il.com>
CC:	Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
	Fengguang Wu <fengguang.wu@...el.com>,
	linux-kernel <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH 1/2] zram: factor-out zram_decompress_page() function

On 10/27/2012 09:00 AM, Sergey Senozhatsky wrote:
>    zram: factor-out zram_decompress_page() function
>
>    zram_bvec_read() shared decompress functionality with zram_read_before_write() function.
>    Factor-out and make commonly used zram_decompress_page() function, which also simplified
>    error handling in zram_bvec_read().
>
>    Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
>
> ---
>
>   drivers/staging/zram/zram_drv.c | 115 +++++++++++++++++-----------------------
>   1 file changed, 50 insertions(+), 65 deletions(-)
>
> diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
> index 6edefde..7585467 100644
> --- a/drivers/staging/zram/zram_drv.c
> +++ b/drivers/staging/zram/zram_drv.c
> @@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
>   	return bvec->bv_len != PAGE_SIZE;
>   }
>
> -static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> -			  u32 index, int offset, struct bio *bio)
> +static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>   {
> -	int ret;
> -	size_t clen;
> -	struct page *page;
> -	unsigned char *user_mem, *cmem, *uncmem = NULL;
> -
> -	page = bvec->bv_page;
> -
> -	if (zram_test_flag(zram, index, ZRAM_ZERO)) {
> -		handle_zero_page(bvec);
> -		return 0;
> -	}
> +	int ret = LZO_E_OK;
> +	size_t clen = PAGE_SIZE;
> +	unsigned char *cmem;
> +	unsigned long handle = zram->table[index].handle;
>
> -	/* Requested page is not present in compressed area */
> -	if (unlikely(!zram->table[index].handle)) {
> -		pr_debug("Read before write: sector=%lu, size=%u",
> -			 (ulong)(bio->bi_sector), bio->bi_size);
> -		handle_zero_page(bvec);
> +	if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
> +		memset(mem, 0, PAGE_SIZE);
>   		return 0;
>   	}
>
> -	if (is_partial_io(bvec)) {
> -		/* Use  a temporary buffer to decompress the page */
> -		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
> -		if (!uncmem) {
> -			pr_info("Error allocating temp memory!\n");
> -			return -ENOMEM;
> -		}
> -	}
> -
> -	user_mem = kmap_atomic(page);
> -	if (!is_partial_io(bvec))
> -		uncmem = user_mem;
> -	clen = PAGE_SIZE;
> -
> -	cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
> -				ZS_MM_RO);
> -
> -	if (zram->table[index].size == PAGE_SIZE) {
> -		memcpy(uncmem, cmem, PAGE_SIZE);
> -		ret = LZO_E_OK;
> -	} else {
> +	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
> +	if (zram->table[index].size == PAGE_SIZE)
> +		memcpy(mem, cmem, PAGE_SIZE);
> +	else
>   		ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
> -				    uncmem, &clen);
> -	}
> -
> -	if (is_partial_io(bvec)) {
> -		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
> -		       bvec->bv_len);
> -		kfree(uncmem);
> -	}
> -
> -	zs_unmap_object(zram->mem_pool, zram->table[index].handle);
> -	kunmap_atomic(user_mem);
> +						mem, &clen);
> +	zs_unmap_object(zram->mem_pool, handle);
>
>   	/* Should NEVER happen. Return bio error if it does. */
>   	if (unlikely(ret != LZO_E_OK)) {
> @@ -247,36 +210,58 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
>   		return ret;
>   	}
>
> -	flush_dcache_page(page);
> -
>   	return 0;
>   }
>
> -static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
> +static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> +			  u32 index, int offset, struct bio *bio)
>   {
>   	int ret;
> -	size_t clen = PAGE_SIZE;
> -	unsigned char *cmem;
> -	unsigned long handle = zram->table[index].handle;
> +	struct page *page;
> +	unsigned char *user_mem, *uncmem = NULL;
>
> -	if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
> -		memset(mem, 0, PAGE_SIZE);
> +	page = bvec->bv_page;
> +
> +	if (unlikely(!zram->table[index].handle) ||
> +			zram_test_flag(zram, index, ZRAM_ZERO)) {
> +		pr_debug("Read before write: sector=%lu, size=%u",
> +			 (ulong)(bio->bi_sector), bio->bi_size);


"Read before write" message is not valid in case ZRAM_ZERO flag is set. 
Its true only in !handle case.

Otherwise, the patch looks good to me.

On a side note, zram still contains a known use-after-free bug reported 
by Fengguang Wu (CC'ed) which happens in the "partial I/O" i.e. non 
PAGE_SIZE'ed I/O case which is fixed by the following patch.

Please let me know if you can include the following patch when you 
resend this patch series, or I can do the same or will wait for this to 
be merged and then send it later.

======
zram: Fix use-after-free in partial I/O case

When the compressed size of a page exceeds a threshold, the page is 
stored as-is i.e. in uncompressed form. In the partial I/O i.e. 
non-PAGE_SIZE'ed I/O case, however, the uncompressed memory was being 
freed before it could be copied into the zsmalloc pool resulting in 
use-after-free bug.

Signed-off-by: Nitin Gupta <ngupta@...are.org>
---

diff --git a/drivers/staging/zram/zram_drv.c 
b/drivers/staging/zram/zram_drv.c
index 7585467..635736b 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -288,10 +288,8 @@ static int zram_bvec_write(struct zram *zram, 
struct bio_vec *bvec, u32 index,
  			goto out;
  		}
  		ret = zram_decompress_page(zram, uncmem, index);
-		if (ret) {
-			kfree(uncmem);
+		if (ret)
  			goto out;
-		}
  	}

  	/*
@@ -312,8 +310,6 @@ static int zram_bvec_write(struct zram *zram, struct 
bio_vec *bvec, u32 index,

  	if (page_zero_filled(uncmem)) {
  		kunmap_atomic(user_mem);
-		if (is_partial_io(bvec))
-			kfree(uncmem);
  		zram_stat_inc(&zram->stats.pages_zero);
  		zram_set_flag(zram, index, ZRAM_ZERO);
  		ret = 0;
@@ -324,8 +320,6 @@ static int zram_bvec_write(struct zram *zram, struct 
bio_vec *bvec, u32 index,
  			       zram->compress_workmem);

  	kunmap_atomic(user_mem);
-	if (is_partial_io(bvec))
-			kfree(uncmem);

  	if (unlikely(ret != LZO_E_OK)) {
  		pr_err("Compression failed! err=%d\n", ret);
@@ -360,11 +354,15 @@ static int zram_bvec_write(struct zram *zram, 
struct bio_vec *bvec, u32 index,
  	if (clen <= PAGE_SIZE / 2)
  		zram_stat_inc(&zram->stats.good_compress);

-	return 0;
+	ret = 0;

  out:
  	if (ret)
  		zram_stat64_inc(zram, &zram->stats.failed_writes);
+
+	if (is_partial_io(bvec))
+		kfree(uncmem);
+
  	return ret;
  }


BTW, I could not trigger this partial I/O case, so please let me know if 
you hit any issue during your testing.

There is another sparse warning to be fixed: zram_reset_device should be 
static.

Thanks,
Nitin

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ