lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y2LPUlircv6a74mJ@google.com>
Date:   Wed, 2 Nov 2022 13:13:06 -0700
From:   Minchan Kim <minchan@...nel.org>
To:     Sergey Senozhatsky <senozhatsky@...omium.org>
Cc:     Andrew Morton <akpm@...ux-foundation.org>,
        Nitin Gupta <ngupta@...are.org>, linux-kernel@...r.kernel.org,
        linux-mm@...ck.org
Subject: Re: [PATCHv4 1/9] zram: Preparation for multi-zcomp support

On Tue, Oct 18, 2022 at 01:55:25PM +0900, Sergey Senozhatsky wrote:
> The patch turns compression streams and compressor algorithm
> name struct zram members into arrays, so that we can have
> multiple compression streams support (in the next patches).
> 
> The patch uses a rather explicit API for compressor selection:
> 
> - Get primary (default) compression stream
> 	zcomp_stream_get(zram->comps[ZRAM_PRIMARY_ZCOMP])
> - Get secondary compression stream
> 	zcomp_stream_get(zram->comps[ZRAM_SECONDARY_ZCOMP])
> 
> We use similar API for compression streams put().
> 
> At this point we always have just one compression stream,
> since CONFIG_ZRAM_MULTI_COMP is not yet defined.
> 
> Signed-off-by: Sergey Senozhatsky <senozhatsky@...omium.org>
> ---
>  drivers/block/zram/zcomp.c    |  6 +--
>  drivers/block/zram/zcomp.h    |  2 +-
>  drivers/block/zram/zram_drv.c | 87 ++++++++++++++++++++++++-----------
>  drivers/block/zram/zram_drv.h | 14 +++++-
>  4 files changed, 77 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
> index 0916de952e09..55af4efd7983 100644
> --- a/drivers/block/zram/zcomp.c
> +++ b/drivers/block/zram/zcomp.c
> @@ -206,7 +206,7 @@ void zcomp_destroy(struct zcomp *comp)
>   * case of allocation error, or any other error potentially
>   * returned by zcomp_init().
>   */
> -struct zcomp *zcomp_create(const char *compress)
> +struct zcomp *zcomp_create(const char *alg)
>  {
>  	struct zcomp *comp;
>  	int error;
> @@ -216,14 +216,14 @@ struct zcomp *zcomp_create(const char *compress)
>  	 * is not loaded yet. We must do it here, otherwise we are about to
>  	 * call /sbin/modprobe under CPU hot-plug lock.
>  	 */
> -	if (!zcomp_available_algorithm(compress))
> +	if (!zcomp_available_algorithm(alg))
>  		return ERR_PTR(-EINVAL);
>  
>  	comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
>  	if (!comp)
>  		return ERR_PTR(-ENOMEM);
>  
> -	comp->name = compress;
> +	comp->name = alg;
>  	error = zcomp_init(comp);
>  	if (error) {
>  		kfree(comp);
> diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
> index 40f6420f4b2e..cdefdef93da8 100644
> --- a/drivers/block/zram/zcomp.h
> +++ b/drivers/block/zram/zcomp.h
> @@ -27,7 +27,7 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
>  ssize_t zcomp_available_show(const char *comp, char *buf);
>  bool zcomp_available_algorithm(const char *comp);
>  
> -struct zcomp *zcomp_create(const char *comp);
> +struct zcomp *zcomp_create(const char *alg);
>  void zcomp_destroy(struct zcomp *comp);
>  
>  struct zcomp_strm *zcomp_stream_get(struct zcomp *comp);
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 966aab902d19..770ea3489eb6 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -1007,36 +1007,53 @@ static ssize_t comp_algorithm_show(struct device *dev,
>  	struct zram *zram = dev_to_zram(dev);
>  
>  	down_read(&zram->init_lock);
> -	sz = zcomp_available_show(zram->compressor, buf);
> +	sz = zcomp_available_show(zram->comp_algs[ZRAM_PRIMARY_ZCOMP], buf);
>  	up_read(&zram->init_lock);
>  
>  	return sz;
>  }
>  
> +static void comp_algorithm_set(struct zram *zram, u32 idx, const char *alg)
> +{
> +	/* Do not kfree() algs that we didn't allocate, IOW the default ones */
> +	if (zram->comp_algs[idx] != default_compressor)
> +		kfree(zram->comp_algs[idx]);
> +	zram->comp_algs[idx] = alg;
> +}
> +
>  static ssize_t comp_algorithm_store(struct device *dev,
>  		struct device_attribute *attr, const char *buf, size_t len)
>  {
>  	struct zram *zram = dev_to_zram(dev);
> -	char compressor[ARRAY_SIZE(zram->compressor)];
> +	char *compressor;
>  	size_t sz;
>  
> -	strscpy(compressor, buf, sizeof(compressor));
> +	sz = strlen(buf);
> +	if (sz >= CRYPTO_MAX_ALG_NAME)
> +		return -E2BIG;
> +
> +	compressor = kstrdup(buf, GFP_KERNEL);
> +	if (!compressor)
> +		return -ENOMEM;
> +
>  	/* ignore trailing newline */
> -	sz = strlen(compressor);
>  	if (sz > 0 && compressor[sz - 1] == '\n')
>  		compressor[sz - 1] = 0x00;
>  
> -	if (!zcomp_available_algorithm(compressor))
> +	if (!zcomp_available_algorithm(compressor)) {
> +		kfree(compressor);
>  		return -EINVAL;
> +	}
>  
>  	down_write(&zram->init_lock);
>  	if (init_done(zram)) {
>  		up_write(&zram->init_lock);
> +		kfree(compressor);
>  		pr_info("Can't change algorithm for initialized device\n");
>  		return -EBUSY;
>  	}
>  
> -	strcpy(zram->compressor, compressor);
> +	comp_algorithm_set(zram, ZRAM_PRIMARY_ZCOMP, compressor);
>  	up_write(&zram->init_lock);
>  	return len;
>  }
> @@ -1284,7 +1301,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
>  	size = zram_get_obj_size(zram, index);
>  
>  	if (size != PAGE_SIZE)
> -		zstrm = zcomp_stream_get(zram->comp);
> +		zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_ZCOMP]);
>  
>  	src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
>  	if (size == PAGE_SIZE) {
> @@ -1296,7 +1313,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
>  		dst = kmap_atomic(page);
>  		ret = zcomp_decompress(zstrm, src, size, dst);
>  		kunmap_atomic(dst);
> -		zcomp_stream_put(zram->comp);
> +		zcomp_stream_put(zram->comps[ZRAM_PRIMARY_ZCOMP]);
>  	}
>  	zs_unmap_object(zram->mem_pool, handle);
>  	zram_slot_unlock(zram, index);
> @@ -1363,13 +1380,13 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
>  	kunmap_atomic(mem);
>  
>  compress_again:
> -	zstrm = zcomp_stream_get(zram->comp);
> +	zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_ZCOMP]);
>  	src = kmap_atomic(page);
>  	ret = zcomp_compress(zstrm, src, &comp_len);
>  	kunmap_atomic(src);
>  
>  	if (unlikely(ret)) {
> -		zcomp_stream_put(zram->comp);
> +		zcomp_stream_put(zram->comps[ZRAM_PRIMARY_ZCOMP]);
>  		pr_err("Compression failed! err=%d\n", ret);
>  		zs_free(zram->mem_pool, handle);
>  		return ret;
> @@ -1397,7 +1414,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
>  				__GFP_HIGHMEM |
>  				__GFP_MOVABLE);
>  	if (IS_ERR((void *)handle)) {
> -		zcomp_stream_put(zram->comp);
> +		zcomp_stream_put(zram->comps[ZRAM_PRIMARY_ZCOMP]);
>  		atomic64_inc(&zram->stats.writestall);
>  		handle = zs_malloc(zram->mem_pool, comp_len,
>  				GFP_NOIO | __GFP_HIGHMEM |
> @@ -1414,14 +1431,14 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
>  		 * zstrm buffer back. It is necessary that the dereferencing
>  		 * of the zstrm variable below occurs correctly.
>  		 */
> -		zstrm = zcomp_stream_get(zram->comp);
> +		zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_ZCOMP]);
>  	}
>  
>  	alloced_pages = zs_get_total_pages(zram->mem_pool);
>  	update_used_max(zram, alloced_pages);
>  
>  	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
> -		zcomp_stream_put(zram->comp);
> +		zcomp_stream_put(zram->comps[ZRAM_PRIMARY_ZCOMP]);
>  		zs_free(zram->mem_pool, handle);
>  		return -ENOMEM;
>  	}
> @@ -1435,7 +1452,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
>  	if (comp_len == PAGE_SIZE)
>  		kunmap_atomic(src);
>  
> -	zcomp_stream_put(zram->comp);
> +	zcomp_stream_put(zram->comps[ZRAM_PRIMARY_ZCOMP]);
>  	zs_unmap_object(zram->mem_pool, handle);
>  	atomic64_add(comp_len, &zram->stats.compr_data_size);
>  out:
> @@ -1710,6 +1727,20 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
>  	return ret;
>  }
>  
> +static void zram_destroy_comps(struct zram *zram)
> +{
> +	u32 idx;
> +
> +	for (idx = 0; idx < ZRAM_MAX_ZCOMPS; idx++) {
> +		struct zcomp *comp = zram->comps[idx];
> +
> +		zram->comps[idx] = NULL;
> +		if (IS_ERR_OR_NULL(comp))

nit:

Why don't you use just NULL check? I don't see any error setting
for zram->comps(Maybe later patch? Will keep check)?

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ