lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20150810085302.GB600@swordfish>
Date:	Mon, 10 Aug 2015 17:53:53 +0900
From:	Sergey Senozhatsky <sergey.senozhatsky.work@...il.com>
To:	Minchan Kim <minchan@...nel.org>
Cc:	Andrew Morton <akpm@...ux-foundation.org>, gioh.kim@....com,
	Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
	linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [RFC zsmalloc 2/4] zsmalloc: squeeze inuse into page->mapping

On (08/10/15 16:12), Minchan Kim wrote:
[..]
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 491491a..75fefba 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -35,7 +35,7 @@
>   *		metadata.
>   *	page->lru: links together first pages of various zspages.
>   *		Basically forming list of zspages in a fullness group.
> - *	page->mapping: class index and fullness group of the zspage
> + *	page->mapping: override by struct zs_meta
>   *
>   * Usage of struct page flags:
>   *	PG_private: identifies the first component page
> @@ -132,6 +132,14 @@
>  /* each chunk includes extra space to keep handle */
>  #define ZS_MAX_ALLOC_SIZE	PAGE_SIZE
>  
> +#define CLASS_IDX_BITS	8
> +#define CLASS_IDX_MASK	((1 << CLASS_IDX_BITS) - 1)
> +#define FULLNESS_BITS	2
> +#define FULLNESS_MASK	((1 << FULLNESS_BITS) - 1)
> +#define INUSE_BITS	11
> +#define INUSE_MASK	((1 << INUSE_BITS) - 1)
> +#define ETC_BITS	((sizeof(unsigned long) * 8) - CLASS_IDX_BITS \
> +				- FULLNESS_BITS - INUSE_BITS)
>  /*
>   * On systems with 4K page size, this gives 255 size classes! There is a
>   * trader-off here:
> @@ -145,16 +153,15 @@
>   *  ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
>   *  (reason above)
>   */
> -#define ZS_SIZE_CLASS_DELTA	(PAGE_SIZE >> 8)
> +#define ZS_SIZE_CLASS_DELTA	(PAGE_SIZE >> CLASS_IDX_BITS)
>  
>  /*
>   * We do not maintain any list for completely empty or full pages
> + * Don't reorder.
>   */
>  enum fullness_group {
> -	ZS_ALMOST_FULL,
> +	ZS_ALMOST_FULL = 0,
>  	ZS_ALMOST_EMPTY,
> -	_ZS_NR_FULLNESS_GROUPS,
> -
>  	ZS_EMPTY,
>  	ZS_FULL
>  };
> @@ -198,7 +205,7 @@ static const int fullness_threshold_frac = 4;
>  
>  struct size_class {
>  	spinlock_t lock;
> -	struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
> +	struct page *fullness_list[ZS_EMPTY];
>  	/*
>  	 * Size of objects stored in this class. Must be multiple
>  	 * of ZS_ALIGN.
> @@ -259,13 +266,16 @@ struct zs_pool {
>  };
>  
>  /*
> - * A zspage's class index and fullness group
> - * are encoded in its (first)page->mapping
> + * In this implementation, a zspage's class index, fullness group,
> + * inuse object count are encoded in its (first)page->mapping
> + * sizeof(struct zs_meta) should be equal to sizeof(unsigned long).
>   */
> -#define CLASS_IDX_BITS	28
> -#define FULLNESS_BITS	4
> -#define CLASS_IDX_MASK	((1 << CLASS_IDX_BITS) - 1)
> -#define FULLNESS_MASK	((1 << FULLNESS_BITS) - 1)
> +struct zs_meta {
> +	unsigned long class_idx:CLASS_IDX_BITS;
> +	unsigned long fullness:FULLNESS_BITS;
> +	unsigned long inuse:INUSE_BITS;
> +	unsigned long etc:ETC_BITS;
> +};
>  
>  struct mapping_area {
>  #ifdef CONFIG_PGTABLE_MAPPING
> @@ -403,26 +413,51 @@ static int is_last_page(struct page *page)
>  	return PagePrivate2(page);
>  }
>  
> +static int get_inuse_obj(struct page *page)
> +{
> +	struct zs_meta *m;
> +
> +	BUG_ON(!is_first_page(page));
> +
> +	m = (struct zs_meta *)&page->mapping;
> +
> +	return m->inuse;
> +}

a side note. I think there are too many BUG_ON in zsmalloc. some of
them are clearly unnecessary, just because same checks are performed
several lines above. for example:

get_fullness_group()
	BUG_ON(!is_first_page(page))
	get_inuse_obj()
		BUG_ON(!is_first_page(page))


fix_fullness_group()
	BUG_ON(!is_first_page(page))
	get_zspage_mapping()
		BUG_ON(!is_first_page(page))


and so on.


> +static void set_inuse_obj(struct page *page, int inc)
> +{
> +	struct zs_meta *m;
> +
> +	BUG_ON(!is_first_page(page));
> +
> +	m = (struct zs_meta *)&page->mapping;
> +	m->inuse += inc;
> +}
> +

{get,set}_zspage_inuse() ?


[..]

> +	BUILD_BUG_ON(sizeof(unsigned long) * 8 < (CLASS_IDX_BITS + \
> +			FULLNESS_BITS + INUSE_BITS + ETC_BITS));

this build_bug is overwritten in the next patch?

	-ss
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ