[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Z7VFELM19eiDUzOJ@MiWiFi-R3L-srv>
Date: Wed, 19 Feb 2025 10:42:24 +0800
From: Baoquan He <bhe@...hat.com>
To: Kairui Song <kasong@...cent.com>
Cc: linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>,
Chris Li <chrisl@...nel.org>, Barry Song <v-songbaohua@...o.com>,
Hugh Dickins <hughd@...gle.com>,
Yosry Ahmed <yosryahmed@...gle.com>,
"Huang, Ying" <ying.huang@...ux.alibaba.com>,
Nhat Pham <nphamcs@...il.com>, Johannes Weiner <hannes@...xchg.org>,
Kalesh Singh <kaleshsingh@...gle.com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/7] mm, swap: drop the flag TTRS_DIRECT
On 02/15/25 at 01:57am, Kairui Song wrote:
> From: Kairui Song <kasong@...cent.com>
>
> This flag exists temporarily to allow the allocator to bypass the slot
> cache during freeing, so reclaiming one slot will free the slot
> immediately.
>
> But now we have already removed slot cache usage on freeing, so this
> flag has no effect now.
>
> Signed-off-by: Kairui Song <kasong@...cent.com>
> ---
> mm/swapfile.c | 23 +++--------------------
> 1 file changed, 3 insertions(+), 20 deletions(-)
>
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index c77ffee4af86..449e388a6fec 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
Reviewed-by: Baoquan He <bhe@...haat.com>
> @@ -158,8 +158,6 @@ static long swap_usage_in_pages(struct swap_info_struct *si)
> #define TTRS_UNMAPPED 0x2
> /* Reclaim the swap entry if swap is getting full */
> #define TTRS_FULL 0x4
> -/* Reclaim directly, bypass the slot cache and don't touch device lock */
> -#define TTRS_DIRECT 0x8
>
> static bool swap_only_has_cache(struct swap_info_struct *si,
> unsigned long offset, int nr_pages)
> @@ -257,23 +255,8 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
> if (!need_reclaim)
> goto out_unlock;
>
> - if (!(flags & TTRS_DIRECT)) {
> - /* Free through slot cache */
> - delete_from_swap_cache(folio);
> - folio_set_dirty(folio);
> - ret = nr_pages;
> - goto out_unlock;
> - }
> -
> - xa_lock_irq(&address_space->i_pages);
> - __delete_from_swap_cache(folio, entry, NULL);
> - xa_unlock_irq(&address_space->i_pages);
> - folio_ref_sub(folio, nr_pages);
> + delete_from_swap_cache(folio);
> folio_set_dirty(folio);
> -
> - ci = lock_cluster(si, offset);
> - swap_entry_range_free(si, ci, entry, nr_pages);
> - unlock_cluster(ci);
> ret = nr_pages;
> out_unlock:
> folio_unlock(folio);
> @@ -707,7 +690,7 @@ static bool cluster_reclaim_range(struct swap_info_struct *si,
> offset++;
> break;
> case SWAP_HAS_CACHE:
> - nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
> + nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
> if (nr_reclaim > 0)
> offset += nr_reclaim;
> else
> @@ -860,7 +843,7 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
> if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
> spin_unlock(&ci->lock);
> nr_reclaim = __try_to_reclaim_swap(si, offset,
> - TTRS_ANYWAY | TTRS_DIRECT);
> + TTRS_ANYWAY);
> spin_lock(&ci->lock);
> if (nr_reclaim) {
> offset += abs(nr_reclaim);
> --
> 2.48.1
>
Powered by blists - more mailing lists