[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <877csdpfcq.fsf@yhuang6-desk2.ccr.corp.intel.com>
Date: Fri, 09 Jun 2023 09:57:25 +0800
From: "Huang, Ying" <ying.huang@...el.com>
To: Suren Baghdasaryan <surenb@...gle.com>
Cc: akpm@...ux-foundation.org, willy@...radead.org, hannes@...xchg.org,
mhocko@...e.com, josef@...icpanda.com, jack@...e.cz,
ldufour@...ux.ibm.com, laurent.dufour@...ibm.com,
michel@...pinasse.org, liam.howlett@...cle.com, jglisse@...gle.com,
vbabka@...e.cz, minchan@...gle.com, dave@...olabs.net,
punit.agrawal@...edance.com, lstoakes@...il.com, hdanton@...a.com,
apopple@...dia.com, peterx@...hat.com, david@...hat.com,
yuzhao@...gle.com, dhowells@...hat.com, hughd@...gle.com,
viro@...iv.linux.org.uk, brauner@...nel.org,
pasha.tatashin@...een.com, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
kernel-team@...roid.com, Ming Lei <ming.lei@...hat.com>
Subject: Re: [PATCH v2 1/6] swap: remove remnants of polling from
read_swap_cache_async
+ Ming Lei for confirmation.
Suren Baghdasaryan <surenb@...gle.com> writes:
> Commit [1] introduced IO polling support during swapin to reduce
> swap read latency for block devices that can be polled. However later
> commit [2] removed polling support. Therefore it seems safe to remove
> do_poll parameter in read_swap_cache_async and always call swap_readpage
> with synchronous=false waiting for IO completion in folio_lock_or_retry.
>
> [1] commit 23955622ff8d ("swap: add block io poll in swapin path")
> [2] commit 9650b453a3d4 ("block: ignore RWF_HIPRI hint for sync dio")
>
> Suggested-by: Huang Ying <ying.huang@...el.com>
> Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
Looks good to me! Thanks!
Reviewed-by: "Huang, Ying" <ying.huang@...el.com>
> ---
> mm/madvise.c | 4 ++--
> mm/swap.h | 1 -
> mm/swap_state.c | 12 +++++-------
> 3 files changed, 7 insertions(+), 10 deletions(-)
>
> diff --git a/mm/madvise.c b/mm/madvise.c
> index b5ffbaf616f5..b1e8adf1234e 100644
> --- a/mm/madvise.c
> +++ b/mm/madvise.c
> @@ -215,7 +215,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
> continue;
>
> page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
> - vma, index, false, &splug);
> + vma, index, &splug);
> if (page)
> put_page(page);
> }
> @@ -252,7 +252,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
> rcu_read_unlock();
>
> page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
> - NULL, 0, false, &splug);
> + NULL, 0, &splug);
> if (page)
> put_page(page);
>
> diff --git a/mm/swap.h b/mm/swap.h
> index 7c033d793f15..8a3c7a0ace4f 100644
> --- a/mm/swap.h
> +++ b/mm/swap.h
> @@ -46,7 +46,6 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping,
> struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
> struct vm_area_struct *vma,
> unsigned long addr,
> - bool do_poll,
> struct swap_iocb **plug);
> struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
> struct vm_area_struct *vma,
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index b76a65ac28b3..a3839de71f3f 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -517,15 +517,14 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
> */
> struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
> struct vm_area_struct *vma,
> - unsigned long addr, bool do_poll,
> - struct swap_iocb **plug)
> + unsigned long addr, struct swap_iocb **plug)
> {
> bool page_was_allocated;
> struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
> vma, addr, &page_was_allocated);
>
> if (page_was_allocated)
> - swap_readpage(retpage, do_poll, plug);
> + swap_readpage(retpage, false, plug);
>
> return retpage;
> }
> @@ -620,7 +619,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
> struct swap_info_struct *si = swp_swap_info(entry);
> struct blk_plug plug;
> struct swap_iocb *splug = NULL;
> - bool do_poll = true, page_allocated;
> + bool page_allocated;
> struct vm_area_struct *vma = vmf->vma;
> unsigned long addr = vmf->address;
>
> @@ -628,7 +627,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
> if (!mask)
> goto skip;
>
> - do_poll = false;
> /* Read a page_cluster sized and aligned cluster around offset. */
> start_offset = offset & ~mask;
> end_offset = offset | mask;
> @@ -660,7 +658,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
> lru_add_drain(); /* Push any new pages onto the LRU now */
> skip:
> /* The page was likely read above, so no need for plugging here */
> - return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
> + return read_swap_cache_async(entry, gfp_mask, vma, addr, NULL);
> }
>
> int init_swap_address_space(unsigned int type, unsigned long nr_pages)
> @@ -825,7 +823,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
> skip:
> /* The page was likely read above, so no need for plugging here */
> return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
> - ra_info.win == 1, NULL);
> + NULL);
> }
>
> /**
Powered by blists - more mailing lists