lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <18a6bed3-112b-4107-9aaa-6963b6b0bc08@linux.alibaba.com>
Date: Tue, 30 Jul 2024 16:15:18 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: Barry Song <21cnbao@...il.com>, akpm@...ux-foundation.org,
 linux-mm@...ck.org
Cc: chrisl@...nel.org, david@...hat.com, hannes@...xchg.org,
 hughd@...gle.com, kaleshsingh@...gle.com, kasong@...cent.com,
 linux-kernel@...r.kernel.org, mhocko@...e.com, minchan@...nel.org,
 nphamcs@...il.com, ryan.roberts@....com, senozhatsky@...omium.org,
 shakeel.butt@...ux.dev, shy828301@...il.com, surenb@...gle.com,
 v-songbaohua@...o.com, willy@...radead.org, xiang@...nel.org,
 ying.huang@...el.com, yosryahmed@...gle.com
Subject: Re: [PATCH 1/1] mm: swap: add nr argument in swapcache_prepare and
 swapcache_clear to support large folios



On 2024/7/30 15:13, Barry Song wrote:
> From: Barry Song <v-songbaohua@...o.com>
> 
> Right now, swapcache_prepare() and swapcache_clear() supports one entry
> only, to support large folios, we need to handle multiple swap entries.
> 
> To optimize stack usage, we iterate twice in __swap_duplicate(): the
> first time to verify that all entries are valid, and the second time
> to apply the modifications to the entries.
> 
> Currently, we're using nr=1 for the existing users.
> 
> Reviewed-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
> Signed-off-by: Barry Song <v-songbaohua@...o.com>

Thanks Barry. Tested it with my shmem swap patches, and works well. So
Tested-by: Baolin Wang <baolin.wang@...ux.alibaba.com>

> ---
>   include/linux/swap.h |   4 +-
>   mm/memory.c          |   6 +--
>   mm/swap.h            |   5 ++-
>   mm/swap_state.c      |   2 +-
>   mm/swapfile.c        | 101 +++++++++++++++++++++++++------------------
>   5 files changed, 68 insertions(+), 50 deletions(-)
> 
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index ba7ea95d1c57..5b920fa2315b 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -480,7 +480,7 @@ extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order);
>   extern int add_swap_count_continuation(swp_entry_t, gfp_t);
>   extern void swap_shmem_alloc(swp_entry_t);
>   extern int swap_duplicate(swp_entry_t);
> -extern int swapcache_prepare(swp_entry_t);
> +extern int swapcache_prepare(swp_entry_t entry, int nr);
>   extern void swap_free_nr(swp_entry_t entry, int nr_pages);
>   extern void swapcache_free_entries(swp_entry_t *entries, int n);
>   extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
> @@ -554,7 +554,7 @@ static inline int swap_duplicate(swp_entry_t swp)
>   	return 0;
>   }
>   
> -static inline int swapcache_prepare(swp_entry_t swp)
> +static inline int swapcache_prepare(swp_entry_t swp, int nr)
>   {
>   	return 0;
>   }
> diff --git a/mm/memory.c b/mm/memory.c
> index 833d2cad6eb2..b8675617a5e3 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4081,7 +4081,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>   			 * reusing the same entry. It's undetectable as
>   			 * pte_same() returns true due to entry reuse.
>   			 */
> -			if (swapcache_prepare(entry)) {
> +			if (swapcache_prepare(entry, 1)) {
>   				/* Relax a bit to prevent rapid repeated page faults */
>   				schedule_timeout_uninterruptible(1);
>   				goto out;
> @@ -4387,7 +4387,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>   out:
>   	/* Clear the swap cache pin for direct swapin after PTL unlock */
>   	if (need_clear_cache)
> -		swapcache_clear(si, entry);
> +		swapcache_clear(si, entry, 1);
>   	if (si)
>   		put_swap_device(si);
>   	return ret;
> @@ -4403,7 +4403,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>   		folio_put(swapcache);
>   	}
>   	if (need_clear_cache)
> -		swapcache_clear(si, entry);
> +		swapcache_clear(si, entry, 1);
>   	if (si)
>   		put_swap_device(si);
>   	return ret;
> diff --git a/mm/swap.h b/mm/swap.h
> index baa1fa946b34..7c6330561d84 100644
> --- a/mm/swap.h
> +++ b/mm/swap.h
> @@ -59,7 +59,7 @@ void __delete_from_swap_cache(struct folio *folio,
>   void delete_from_swap_cache(struct folio *folio);
>   void clear_shadow_from_swap_cache(int type, unsigned long begin,
>   				  unsigned long end);
> -void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
> +void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr);
>   struct folio *swap_cache_get_folio(swp_entry_t entry,
>   		struct vm_area_struct *vma, unsigned long addr);
>   struct folio *filemap_get_incore_folio(struct address_space *mapping,
> @@ -120,7 +120,7 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
>   	return 0;
>   }
>   
> -static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
> +static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
>   {
>   }
>   
> @@ -172,4 +172,5 @@ static inline unsigned int folio_swap_flags(struct folio *folio)
>   	return 0;
>   }
>   #endif /* CONFIG_SWAP */
> +
>   #endif /* _MM_SWAP_H */
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index a1726e49a5eb..b06f2a054f5a 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -477,7 +477,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
>   		/*
>   		 * Swap entry may have been freed since our caller observed it.
>   		 */
> -		err = swapcache_prepare(entry);
> +		err = swapcache_prepare(entry, 1);
>   		if (!err)
>   			break;
>   
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index 5f73a8553371..757d38a86f56 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -3363,7 +3363,7 @@ void si_swapinfo(struct sysinfo *val)
>   }
>   
>   /*
> - * Verify that a swap entry is valid and increment its swap map count.
> + * Verify that nr swap entries are valid and increment their swap map counts.
>    *
>    * Returns error code in following case.
>    * - success -> 0
> @@ -3373,60 +3373,77 @@ void si_swapinfo(struct sysinfo *val)
>    * - swap-cache reference is requested but the entry is not used. -> ENOENT
>    * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
>    */
> -static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
> +static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
>   {
>   	struct swap_info_struct *p;
>   	struct swap_cluster_info *ci;
>   	unsigned long offset;
>   	unsigned char count;
>   	unsigned char has_cache;
> -	int err;
> +	int err, i;
>   
>   	p = swp_swap_info(entry);
>   
>   	offset = swp_offset(entry);
> +	VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
>   	ci = lock_cluster_or_swap_info(p, offset);
>   
> -	count = p->swap_map[offset];
> +	err = 0;
> +	for (i = 0; i < nr; i++) {
> +		count = p->swap_map[offset + i];
>   
> -	/*
> -	 * swapin_readahead() doesn't check if a swap entry is valid, so the
> -	 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
> -	 */
> -	if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
> -		err = -ENOENT;
> -		goto unlock_out;
> -	}
> +		/*
> +		 * swapin_readahead() doesn't check if a swap entry is valid, so the
> +		 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
> +		 */
> +		if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
> +			err = -ENOENT;
> +			goto unlock_out;
> +		}
>   
> -	has_cache = count & SWAP_HAS_CACHE;
> -	count &= ~SWAP_HAS_CACHE;
> -	err = 0;
> +		has_cache = count & SWAP_HAS_CACHE;
> +		count &= ~SWAP_HAS_CACHE;
>   
> -	if (usage == SWAP_HAS_CACHE) {
> +		if (usage == SWAP_HAS_CACHE) {
> +			/* set SWAP_HAS_CACHE if there is no cache and entry is used */
> +			if (!has_cache && count)
> +				continue;
> +			else if (has_cache)		/* someone else added cache */
> +				err = -EEXIST;
> +			else				/* no users remaining */
> +				err = -ENOENT;
>   
> -		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
> -		if (!has_cache && count)
> -			has_cache = SWAP_HAS_CACHE;
> -		else if (has_cache)		/* someone else added cache */
> -			err = -EEXIST;
> -		else				/* no users remaining */
> -			err = -ENOENT;
> +		} else if (count || has_cache) {
>   
> -	} else if (count || has_cache) {
> +			if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
> +				continue;
> +			else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
> +				err = -EINVAL;
> +			else if (swap_count_continued(p, offset + i, count))
> +				continue;
> +			else
> +				err = -ENOMEM;
> +		} else
> +			err = -ENOENT;			/* unused swap entry */
>   
> -		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
> +		if (err)
> +			goto unlock_out;
> +	}
> +
> +	for (i = 0; i < nr; i++) {
> +		count = p->swap_map[offset + i];
> +		has_cache = count & SWAP_HAS_CACHE;
> +		count &= ~SWAP_HAS_CACHE;
> +
> +		if (usage == SWAP_HAS_CACHE)
> +			has_cache = SWAP_HAS_CACHE;
> +		else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
>   			count += usage;
> -		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
> -			err = -EINVAL;
> -		else if (swap_count_continued(p, offset, count))
> -			count = COUNT_CONTINUED;
>   		else
> -			err = -ENOMEM;
> -	} else
> -		err = -ENOENT;			/* unused swap entry */
> +			count = COUNT_CONTINUED;
>   
> -	if (!err)
> -		WRITE_ONCE(p->swap_map[offset], count | has_cache);
> +		WRITE_ONCE(p->swap_map[offset + i], count | has_cache);
> +	}
>   
>   unlock_out:
>   	unlock_cluster_or_swap_info(p, ci);
> @@ -3439,7 +3456,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
>    */
>   void swap_shmem_alloc(swp_entry_t entry)
>   {
> -	__swap_duplicate(entry, SWAP_MAP_SHMEM);
> +	__swap_duplicate(entry, SWAP_MAP_SHMEM, 1);
>   }
>   
>   /*
> @@ -3453,29 +3470,29 @@ int swap_duplicate(swp_entry_t entry)
>   {
>   	int err = 0;
>   
> -	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
> +	while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM)
>   		err = add_swap_count_continuation(entry, GFP_ATOMIC);
>   	return err;
>   }
>   
>   /*
> - * @entry: swap entry for which we allocate swap cache.
> + * @entry: first swap entry from which we allocate nr swap cache.
>    *
> - * Called when allocating swap cache for existing swap entry,
> + * Called when allocating swap cache for existing swap entries,
>    * This can return error codes. Returns 0 at success.
>    * -EEXIST means there is a swap cache.
>    * Note: return code is different from swap_duplicate().
>    */
> -int swapcache_prepare(swp_entry_t entry)
> +int swapcache_prepare(swp_entry_t entry, int nr)
>   {
> -	return __swap_duplicate(entry, SWAP_HAS_CACHE);
> +	return __swap_duplicate(entry, SWAP_HAS_CACHE, nr);
>   }
>   
> -void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
> +void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
>   {
>   	unsigned long offset = swp_offset(entry);
>   
> -	cluster_swap_free_nr(si, offset, 1, SWAP_HAS_CACHE);
> +	cluster_swap_free_nr(si, offset, nr, SWAP_HAS_CACHE);
>   }
>   
>   struct swap_info_struct *swp_swap_info(swp_entry_t entry)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ