[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1439474123-11279-5-git-send-email-sergey.senozhatsky@gmail.com>
Date: Thu, 13 Aug 2015 22:55:23 +0900
From: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
To: Minchan Kim <minchan@...nel.org>,
Joonsoo Kim <iamjoonsoo.kim@....com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org,
Sergey Senozhatsky <sergey.senozhatsky.work@...il.com>,
Sergey Senozhatsky <sergey.senozhatsky@...il.com>
Subject: [RFC][PATCH 4/4] zram: enable zlib backend support
Update zram and zcomp read (decompress) path to provide a
zstrm to backends that set ZCOMP_NEED_READ_ZSTRM bit in flags,
so now we can enable ZLIB compression backend in Kconfig.
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
---
drivers/block/zram/Kconfig | 14 +++++++++++++-
drivers/block/zram/zcomp.c | 7 ++++++-
drivers/block/zram/zram_drv.c | 23 +++++++++++++++++++----
3 files changed, 38 insertions(+), 6 deletions(-)
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 386ba3d..1858762 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -23,4 +23,16 @@ config ZRAM_LZ4_COMPRESS
default n
help
This option enables LZ4 compression algorithm support. Compression
- algorithm can be changed using `comp_algorithm' device attribute.
\ No newline at end of file
+ algorithm can be changed using `comp_algorithm' device attribute.
+
+config ZRAM_ZLIB_COMPRESS
+ bool "Enable ZLIB algorithm support"
+ depends on ZRAM
+ select ZLIB_INFLATE
+ select ZLIB_DEFLATE
+ default n
+ help
+ This option enables ZLIB compression algorithm support. ZLIB gives
+ a significantly better compression ratio, at a price of significantly
+ worse performance. Compression algorithm can be changed using
+ `comp_algorithm' device attribute.
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index a0cef0b..8f24820 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -323,7 +323,12 @@ int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
const unsigned char *src,
size_t src_len, unsigned char *dst)
{
- return comp->backend->decompress(src, src_len, dst, NULL);
+ void *private = NULL;
+
+ if (unlikely(zstrm))
+ private = zstrm->private;
+
+ return comp->backend->decompress(src, src_len, dst, private);
}
void zcomp_destroy(struct zcomp *comp)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 0aec4ce..5c52400 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -560,7 +560,8 @@ static void zram_free_page(struct zram *zram, size_t index)
zram_set_obj_size(meta, index, 0);
}
-static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
+static int zram_decompress_page(struct zram *zram, struct zcomp_strm *zstrm,
+ char *mem, u32 index)
{
int ret = 0;
unsigned char *cmem;
@@ -582,7 +583,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
if (size == PAGE_SIZE)
copy_page(mem, cmem);
else
- ret = zcomp_decompress(zram->comp, NULL, cmem, size, mem);
+ ret = zcomp_decompress(zram->comp, zstrm, cmem, size, mem);
zs_unmap_object(meta->mem_pool, handle);
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
@@ -602,6 +603,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
struct page *page;
unsigned char *user_mem, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
+ void *dzstrm;
page = bvec->bv_page;
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
@@ -617,6 +619,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
/* Use a temporary buffer to decompress the page */
uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
+ dzstrm = zcomp_decompress_begin(zram->comp);
user_mem = kmap_atomic(page);
if (!is_partial_io(bvec))
uncmem = user_mem;
@@ -627,7 +630,11 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
goto out_cleanup;
}
- ret = zram_decompress_page(zram, uncmem, index);
+ ret = zram_decompress_page(zram, dzstrm, uncmem, index);
+
+ zcomp_decompress_end(zram->comp, dzstrm);
+ dzstrm = NULL;
+
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret))
goto out_cleanup;
@@ -638,10 +645,13 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
flush_dcache_page(page);
ret = 0;
+
out_cleanup:
kunmap_atomic(user_mem);
+ zcomp_decompress_end(zram->comp, dzstrm);
if (is_partial_io(bvec))
kfree(uncmem);
+
return ret;
}
@@ -659,6 +669,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
page = bvec->bv_page;
if (is_partial_io(bvec)) {
+ void *dzstrm;
+
/*
* This is a partial IO. We need to read the full page
* before to write the changes.
@@ -668,7 +680,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = -ENOMEM;
goto out;
}
- ret = zram_decompress_page(zram, uncmem, index);
+
+ dzstrm = zcomp_decompress_begin(zram->comp);
+ ret = zram_decompress_page(zram, dzstrm, uncmem, index);
+ zcomp_decompress_end(zram->comp, dzstrm);
if (ret)
goto out;
}
--
2.5.0
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists