[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4edca674b2460b18748251b984b722803a9706f2.camel@linux.intel.com>
Date: Fri, 14 Mar 2025 13:59:07 -0700
From: Tim Chen <tim.c.chen@...ux.intel.com>
To: Kemeng Shi <shikemeng@...weicloud.com>, akpm@...ux-foundation.org
Cc: kasong@...cent.com, linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 3/9] mm: swap: use __swap_entry_free() to free swap
entry in swap_entry_put_locked()
On Fri, 2025-03-14 at 05:05 +0800, Kemeng Shi wrote:
> In swap_entry_put_locked(), we will set slot to SWAP_HAS_CACHE before
> using swap_entry_range_free to do actual swap entry freeing. This
> introduce an unnecessary intermediate state.
> By using __swap_entry_free() in swap_entry_put_locked(), we can eliminate
> the need to set slot to SWAP_HAS_CACHE.
> This change would make the behavior of swap_entry_put_locked() more
> consistent with other put() operations which will do actual free work
> after put last reference.
>
> Signed-off-by: Kemeng Shi <shikemeng@...weicloud.com>
Reviewed-by: Tim Chen <tim.c.chen@...ux.intel.com>
> ---
> mm/swapfile.c | 28 ++++++++++++----------------
> 1 file changed, 12 insertions(+), 16 deletions(-)
>
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index 7c886f9dd6f9..ba37b9bff586 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -1367,9 +1367,11 @@ static inline void __swap_entries_free(struct swap_info_struct *si,
> }
>
> static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
> - unsigned long offset,
> + struct swap_cluster_info *ci,
> + swp_entry_t entry,
> unsigned char usage)
> {
> + unsigned long offset = swp_offset(entry);
> unsigned char count;
> unsigned char has_cache;
>
> @@ -1398,10 +1400,9 @@ static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
> }
>
> usage = count | has_cache;
> - if (usage)
> - WRITE_ONCE(si->swap_map[offset], usage);
> - else
> - WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
> + WRITE_ONCE(si->swap_map[offset], usage);
> + if (!usage)
> + __swap_entries_free(si, ci, entry, 1);
>
> return usage;
> }
> @@ -1480,9 +1481,7 @@ static unsigned char swap_entry_put(struct swap_info_struct *si,
> unsigned char usage;
>
> ci = lock_cluster(si, offset);
> - usage = swap_entry_put_locked(si, offset, 1);
> - if (!usage)
> - swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
> + usage = swap_entry_put_locked(si, ci, entry, 1);
> unlock_cluster(ci);
>
> return usage;
> @@ -1562,8 +1561,8 @@ static void cluster_swap_free_nr(struct swap_info_struct *si,
>
> ci = lock_cluster(si, offset);
> do {
> - if (!swap_entry_put_locked(si, offset, usage))
> - swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
> + swap_entry_put_locked(si, ci, swp_entry(si->type, offset),
> + usage);
> } while (++offset < end);
> unlock_cluster(ci);
> }
> @@ -1607,12 +1606,9 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
> ci = lock_cluster(si, offset);
> if (swap_only_has_cache(si, offset, size))
> swap_entry_range_free(si, ci, entry, size);
> - else {
> - for (int i = 0; i < size; i++, entry.val++) {
> - if (!swap_entry_put_locked(si, offset + i, SWAP_HAS_CACHE))
> - swap_entry_range_free(si, ci, entry, 1);
> - }
> - }
> + else
> + for (int i = 0; i < size; i++, entry.val++)
> + swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE);
> unlock_cluster(ci);
> }
>
Powered by blists - more mailing lists