lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220913154321.45954-1-sj@kernel.org>
Date:   Tue, 13 Sep 2022 15:43:21 +0000
From:   SeongJae Park <sj@...nel.org>
To:     Xin Hao <xhao@...ux.alibaba.com>
Cc:     sj@...nel.org, akpm@...ux-foundation.org, damon@...ts.linux.dev,
        linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] mm/damon: simplify scheme create in lru_sort.c

Hi Xin,

On Tue, 13 Sep 2022 23:22:45 +0800 Xin Hao <xhao@...ux.alibaba.com> wrote:

> In damon_lru_sort_new_hot_scheme() and damon_lru_sort_new_cold_scheme(),
> they have so much in common, so we can combine them into a single
> function, and we just need to distinguish their differences.
> 
> Signed-off-by: Xin Hao <xhao@...ux.alibaba.com>
> ---
>  mm/damon/lru_sort.c | 82 +++++++++++++++++----------------------------
>  1 file changed, 30 insertions(+), 52 deletions(-)
> 
> diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
> index 8415e18fcf0e..62063ed43224 100644
> --- a/mm/damon/lru_sort.c
> +++ b/mm/damon/lru_sort.c
> @@ -257,15 +257,15 @@ module_param(nr_cold_quota_exceeds, ulong, 0400);
>  static struct damon_ctx *ctx;
>  static struct damon_target *target;
> 
> -/* Create a DAMON-based operation scheme for hot memory regions */
> -static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres)
> +static inline struct damos *damon_lru_sort_new_scheme(unsigned int thres,
> +						      enum damos_action action)
>  {
>  	struct damos_access_pattern pattern = {
>  		/* Find regions having PAGE_SIZE or larger size */
>  		.min_sz_region = PAGE_SIZE,
>  		.max_sz_region = ULONG_MAX,
>  		/* and accessed for more than the threshold */
> -		.min_nr_accesses = hot_thres,
> +		.min_nr_accesses = 0,
>  		.max_nr_accesses = UINT_MAX,
>  		/* no matter its age */
>  		.min_age_region = 0,
> @@ -292,60 +292,38 @@ static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres)
>  		.weight_age = 0,
>  	};
> 
> -	return damon_new_scheme(
> -			&pattern,
> -			/* prioritize those on LRU lists, as soon as found */
> -			DAMOS_LRU_PRIO,
> -			/* under the quota. */
> -			&quota,
> -			/* (De)activate this according to the watermarks. */
> -			&wmarks);
> +	switch (action) {
> +	case DAMOS_LRU_PRIO:
> +		pattern.min_nr_accesses = thres;
> +		break;
> +	case DAMOS_LRU_DEPRIO:
> +		pattern.min_age_region = thres;
> +		quota.weight_nr_accesses = 0;
> +		quota.weight_age = 1;
> +		break;

I would like to do this from damon_lru_sort_new_{hot,cold}_scheme() instead.

> +	default:
> +		return NULL;
> +	}
> +
> +	return damon_new_scheme(&pattern,
> +				/* mark those as not accessed, as soon as found */
> +				action,
> +				/* under the quota. */
> +				&quota,
> +				/* (De)activate this according to the watermarks. */
> +				&wmarks);
> +}
> +
> +/* Create a DAMON-based operation scheme for hot memory regions */
> +static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres)
> +{
> +	return damon_lru_sort_new_scheme(hot_thres, DAMOS_LRU_PRIO);
>  }
> 
>  /* Create a DAMON-based operation scheme for cold memory regions */
>  static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
>  {
> -	struct damos_access_pattern pattern = {
> -		/* Find regions having PAGE_SIZE or larger size */
> -		.min_sz_region = PAGE_SIZE,
> -		.max_sz_region = ULONG_MAX,
> -		/* and not accessed at all */
> -		.min_nr_accesses = 0,
> -		.max_nr_accesses = 0,
> -		/* for min_age or more micro-seconds */
> -		.min_age_region = cold_thres,
> -		.max_age_region = UINT_MAX,
> -	};
> -	struct damos_watermarks wmarks = {
> -		.metric = DAMOS_WMARK_FREE_MEM_RATE,
> -		.interval = wmarks_interval,
> -		.high = wmarks_high,
> -		.mid = wmarks_mid,
> -		.low = wmarks_low,
> -	};
> -	struct damos_quota quota = {
> -		/*
> -		 * Do not try LRU-lists sorting of cold pages for more than
> -		 * half of quota_ms milliseconds within
> -		 * quota_reset_interval_ms.
> -		 */
> -		.ms = quota_ms / 2,
> -		.sz = 0,
> -		.reset_interval = quota_reset_interval_ms,
> -		/* Within the quota, mark colder regions not accessed first. */
> -		.weight_sz = 0,
> -		.weight_nr_accesses = 0,
> -		.weight_age = 1,
> -	};
> -
> -	return damon_new_scheme(
> -			&pattern,
> -			/* mark those as not accessed, as soon as found */
> -			DAMOS_LRU_DEPRIO,
> -			/* under the quota. */
> -			&quota,
> -			/* (De)activate this according to the watermarks. */
> -			&wmarks);
> +	return damon_lru_sort_new_scheme(cold_thres, DAMOS_LRU_DEPRIO);
>  }
> 
>  static int damon_lru_sort_apply_parameters(void)
> --
> 2.31.0

And, yes, LRU_SORT is a mess.  I'm also trying to make some cleanups like this:
https://git.kernel.org/pub/scm/linux/kernel/git/sj/linux.git/commit/?h=damon/next&id=366451cd82e71ff0227caa0c6b7b6f9ae2659c29

I'd like to apply your patch and then rebase my cleanups on it, but it might be
not a one second work.  Could I ask you to wait until my cleanups are done?  I
will post the patches within this week.


Thanks,
SJ

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ