[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8624268dd3a211e656b3fb179c46c1742fe80790.camel@linux.intel.com>
Date: Fri, 14 Mar 2025 14:09:48 -0700
From: Tim Chen <tim.c.chen@...ux.intel.com>
To: Kemeng Shi <shikemeng@...weicloud.com>, akpm@...ux-foundation.org
Cc: kasong@...cent.com, linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 4/9] mm: swap: remove unneeded VM_BUG_ON(*map !=
SWAP_HAS_CACHE) in swap_entry_range_free()
On Fri, 2025-03-14 at 05:05 +0800, Kemeng Shi wrote:
> As all callers of swap_entry_range_free() have already ensured slots to
> be freed are marked as SWAP_HAS_CACHE while holding the cluster lock,
> the BUG_ON check can be safely removed. After this, the function
> swap_entry_range_free() could drop any kind of last flag, rename it to
> swap_entries_free() and update it's comment accordingly.
>
> This is a preparation to use swap_entries_free() to drop last 1 and
Probably clearer to say
drop last ref count
instead or drop last 1
> SWAP_MAP_SHMEM flag.
>
> Signed-off-by: Kemeng Shi <shikemeng@...weicloud.com>
> ---
> mm/swapfile.c | 21 ++++++++++-----------
> 1 file changed, 10 insertions(+), 11 deletions(-)
>
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index ba37b9bff586..14b7b37996ff 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -52,9 +52,9 @@
> static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
> unsigned char);
> static void free_swap_count_continuations(struct swap_info_struct *);
> -static void swap_entry_range_free(struct swap_info_struct *si,
> - struct swap_cluster_info *ci,
> - swp_entry_t entry, unsigned int nr_pages);
> +static void swap_entries_free(struct swap_info_struct *si,
> + struct swap_cluster_info *ci,
> + swp_entry_t entry, unsigned int nr_pages);
> static void swap_range_alloc(struct swap_info_struct *si,
> unsigned int nr_entries);
> static bool folio_swapcache_freeable(struct folio *folio);
> @@ -1511,7 +1511,7 @@ static bool swap_entries_put_nr(struct swap_info_struct *si,
> for (i = 0; i < nr; i++)
> WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
> if (!has_cache)
> - swap_entry_range_free(si, ci, entry, nr);
> + swap_entries_free(si, ci, entry, nr);
> unlock_cluster(ci);
>
> return has_cache;
> @@ -1530,12 +1530,12 @@ static bool swap_entries_put_nr(struct swap_info_struct *si,
> }
>
> /*
> - * Drop the last HAS_CACHE flag of swap entries, caller have to
> - * ensure all entries belong to the same cgroup.
> + * Drop the last flag(1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM) of swap entries,
> + * caller have to ensure all entries belong to the same cgroup.
Will be nice to modify the above comment:
all entries belong to the same cgroup and cluster.
Otherwise
Reviewed-by: Tim Chen <tim.c.chen@...ux.intel.com>
>
> -static void swap_entry_range_free(struct swap_info_struct *si,
> - struct swap_cluster_info *ci,
> - swp_entry_t entry, unsigned int nr_pages)
> +static void swap_entries_free(struct swap_info_struct *si,
> + struct swap_cluster_info *ci,
> + swp_entry_t entry, unsigned int nr_pages)
> {
> unsigned long offset = swp_offset(entry);
> unsigned char *map = si->swap_map + offset;
> @@ -1545,7 +1545,6 @@ static void swap_entry_range_free(struct swap_info_struct *si,
> VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1));
>
> do {
> - VM_BUG_ON(*map != SWAP_HAS_CACHE);
> *map = 0;
> } while (++map < map_end);
>
> @@ -1605,7 +1604,7 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
>
> ci = lock_cluster(si, offset);
> if (swap_only_has_cache(si, offset, size))
> - swap_entry_range_free(si, ci, entry, size);
> + swap_entries_free(si, ci, entry, size);
> else
> for (int i = 0; i < size; i++, entry.val++)
> swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE);
Powered by blists - more mailing lists