lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 05 Jun 2012 15:19:08 -0500
From:	Seth Jennings <sjenning@...ux.vnet.ibm.com>
To:	Sasha Levin <levinsasha928@...il.com>
CC:	gregkh@...uxfoundation.org, dan.magenheimer@...cle.com,
	konrad.wilk@...cle.com, devel@...verdev.osuosl.org,
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH] zcache: don't limit number of pools per client

On 06/05/2012 06:05 AM, Sasha Levin wrote:

> Currently the amount of pools each client can use is limited to 16, this is
> and arbitrary limit which isn't really required by current implementation.


Might want to add something like "This places and arbitrary limit on the number of mounted filesystems that can use cleancache"

> 
> This patch removes that limit and uses IDR to do sparse mapping of pools
> in each client.

> 

> Signed-off-by: Sasha Levin <levinsasha928@...il.com>
> ---
>  drivers/staging/zcache/zcache-main.c |   41 +++++++++++++++++++--------------
>  1 files changed, 24 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
> index 784c796..b068bd8 100644
> --- a/drivers/staging/zcache/zcache-main.c
> +++ b/drivers/staging/zcache/zcache-main.c


need to #include <linux/idr.h> yes?

> @@ -53,15 +53,13 @@
>  	(__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
>  #endif
> 
> -#define MAX_POOLS_PER_CLIENT 16
> -
>  #define MAX_CLIENTS 16
>  #define LOCAL_CLIENT ((uint16_t)-1)
> 
>  MODULE_LICENSE("GPL");
> 
>  struct zcache_client {
> -	struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
> +	struct idr tmem_pools;
>  	struct zs_pool *zspool;
>  	bool allocated;
>  	atomic_t refcount;
> @@ -949,11 +947,9 @@ static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
>  			goto out;
>  		atomic_inc(&cli->refcount);
>  	}
> -	if (poolid < MAX_POOLS_PER_CLIENT) {
> -		pool = cli->tmem_pools[poolid];
> -		if (pool != NULL)
> -			atomic_inc(&pool->refcount);
> -	}
> +	pool = idr_find(&cli->tmem_pools, poolid);
> +	if (pool != NULL)
> +		atomic_inc(&pool->refcount);


This is called on the main path, so it needs to be fast.  There is so much
contention elsewhere in the stack I don't think it'll be an issue. It looks
like idr_find() is fast, even though it contains a loop.  Just needs to be
considered.

>  out:
>  	return pool;
>  }
> @@ -987,6 +983,7 @@ int zcache_new_client(uint16_t cli_id)
>  	cli->zspool = zs_create_pool("zcache", ZCACHE_GFP_MASK);
>  	if (cli->zspool == NULL)
>  		goto out;
> +	idr_init(&cli->tmem_pools);
>  #endif
>  	ret = 0;
>  out:
> @@ -1673,10 +1670,10 @@ static int zcache_destroy_pool(int cli_id, int pool_id)
>  	if (cli == NULL)
>  		goto out;
>  	atomic_inc(&cli->refcount);
> -	pool = cli->tmem_pools[pool_id];
> +	pool = idr_find(&cli->tmem_pools, pool_id);
>  	if (pool == NULL)
>  		goto out;
> -	cli->tmem_pools[pool_id] = NULL;
> +	idr_remove(&cli->tmem_pools, pool_id);
>  	/* wait for pool activity on other cpus to quiesce */
>  	while (atomic_read(&pool->refcount) != 0)
>  		;
> @@ -1696,6 +1693,7 @@ static int zcache_new_pool(uint16_t cli_id, uint32_t flags)
>  	int poolid = -1;
>  	struct tmem_pool *pool;
>  	struct zcache_client *cli = NULL;
> +	int r;
> 
>  	if (cli_id == LOCAL_CLIENT)
>  		cli = &zcache_host;
> @@ -1710,20 +1708,29 @@ static int zcache_new_pool(uint16_t cli_id, uint32_t flags)
>  		goto out;
>  	}
> 
> -	for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
> -		if (cli->tmem_pools[poolid] == NULL)
> -			break;
> -	if (poolid >= MAX_POOLS_PER_CLIENT) {
> -		pr_info("zcache: pool creation failed: max exceeded\n");
> +retry:
> +	r = idr_pre_get(&cli->tmem_pools, GFP_ATOMIC);

> +	if (r != 1) {
> +		kfree(pool);
> +		pr_info("zcache: pool creation failed: out of memory\n");
> +		goto out;
> +	}
> +	r = idr_get_new(&cli->tmem_pools, pool, &poolid);
> +	switch (r) {
> +	case 0:
> +		break;
> +	case -EAGAIN:
> +		goto retry;
> +	default:
> +		pr_info("zcache: pool creation failed: error %d\n", r);
>  		kfree(pool);
> -		poolid = -1;
>  		goto out;
>  	}
> +


how about:
=====
do {
	r = idr_pre_get(&cli->tmem_pools, GFP_ATOMIC);
	if (r != 1) {
		kfree(pool);
		pr_info("zcache: pool creation failed: out of memory\n");
		goto out;
	}
	r = idr_get_new(&cli->tmem_pools, pool, &poolid);
}
while (r == -EAGAIN)

if (r) {
	pr_info("zcache: pool creation failed: error %d\n", r);
	kfree(pool);
	goto out;
}
=====
so we can lose the label/goto.

Also, do we want GFP_ATOMIC?  Why not GFP_KERNEL?

>  	atomic_set(&pool->refcount, 0);
>  	pool->client = cli;
>  	pool->pool_id = poolid;
>  	tmem_new_pool(pool, flags);
> -	cli->tmem_pools[poolid] = pool;
>  	pr_info("zcache: created %s tmem pool, id=%d, client=%d\n",
>  		flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
>  		poolid, cli_id);


Thanks,
Seth

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ