lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Z39K4cGuXbQs9Pwu@MiWiFi-R3L-srv>
Date: Thu, 9 Jan 2025 12:04:49 +0800
From: Baoquan He <bhe@...hat.com>
To: Kairui Song <kasong@...cent.com>
Cc: linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>,
	Chris Li <chrisl@...nel.org>, Barry Song <v-songbaohua@...o.com>,
	Ryan Roberts <ryan.roberts@....com>,
	Hugh Dickins <hughd@...gle.com>,
	Yosry Ahmed <yosryahmed@...gle.com>,
	"Huang, Ying" <ying.huang@...ux.alibaba.com>,
	Nhat Pham <nphamcs@...il.com>, Johannes Weiner <hannes@...xchg.org>,
	Kalesh Singh <kaleshsingh@...gle.com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 01/13] mm, swap: minor clean up for swap entry
 allocation

On 12/31/24 at 01:46am, Kairui Song wrote:
> From: Kairui Song <kasong@...cent.com>
> 
> Direct reclaim can skip the whole folio after reclaimed a set of
> folio based slots. Also simplify the code for allocation, reduce
> indention.
> 
> Signed-off-by: Kairui Song <kasong@...cent.com>
> ---
>  mm/swapfile.c | 59 +++++++++++++++++++++++++--------------------------
>  1 file changed, 29 insertions(+), 30 deletions(-)

This actually can be split as two patches. Anyway,

Reviewed-by: Baoquan He <bhe@...hat.com>

> 
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index b0a9071cfe1d..f8002f110104 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -604,23 +604,28 @@ static bool cluster_reclaim_range(struct swap_info_struct *si,
>  				  unsigned long start, unsigned long end)
>  {
>  	unsigned char *map = si->swap_map;
> -	unsigned long offset;
> +	unsigned long offset = start;
> +	int nr_reclaim;
>  
>  	spin_unlock(&ci->lock);
>  	spin_unlock(&si->lock);
>  
> -	for (offset = start; offset < end; offset++) {
> +	do {
>  		switch (READ_ONCE(map[offset])) {
>  		case 0:
> -			continue;
> +			offset++;
> +			break;
>  		case SWAP_HAS_CACHE:
> -			if (__try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT) > 0)
> -				continue;
> -			goto out;
> +			nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
> +			if (nr_reclaim > 0)
> +				offset += nr_reclaim;
> +			else
> +				goto out;
> +			break;
>  		default:
>  			goto out;
>  		}
> -	}
> +	} while (offset < end);
>  out:
>  	spin_lock(&si->lock);
>  	spin_lock(&ci->lock);
> @@ -838,35 +843,30 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
>  							 &found, order, usage);
>  			frags++;
>  			if (found)
> -				break;
> +				goto done;
>  		}
>  
> -		if (!found) {
> +		/*
> +		 * Nonfull clusters are moved to frag tail if we reached
> +		 * here, count them too, don't over scan the frag list.
> +		 */
> +		while (frags < si->frag_cluster_nr[order]) {
> +			ci = list_first_entry(&si->frag_clusters[order],
> +					      struct swap_cluster_info, list);
>  			/*
> -			 * Nonfull clusters are moved to frag tail if we reached
> -			 * here, count them too, don't over scan the frag list.
> +			 * Rotate the frag list to iterate, they were all failing
> +			 * high order allocation or moved here due to per-CPU usage,
> +			 * this help keeping usable cluster ahead.
>  			 */
> -			while (frags < si->frag_cluster_nr[order]) {
> -				ci = list_first_entry(&si->frag_clusters[order],
> -						      struct swap_cluster_info, list);
> -				/*
> -				 * Rotate the frag list to iterate, they were all failing
> -				 * high order allocation or moved here due to per-CPU usage,
> -				 * this help keeping usable cluster ahead.
> -				 */
> -				list_move_tail(&ci->list, &si->frag_clusters[order]);
> -				offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
> -								 &found, order, usage);
> -				frags++;
> -				if (found)
> -					break;
> -			}
> +			list_move_tail(&ci->list, &si->frag_clusters[order]);
> +			offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
> +							 &found, order, usage);
> +			frags++;
> +			if (found)
> +				goto done;
>  		}
>  	}
>  
> -	if (found)
> -		goto done;
> -
>  	if (!list_empty(&si->discard_clusters)) {
>  		/*
>  		 * we don't have free cluster but have some clusters in
> @@ -904,7 +904,6 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
>  				goto done;
>  		}
>  	}
> -
>  done:
>  	cluster->next[order] = offset;
>  	return found;
> -- 
> 2.47.1
> 
> 


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ