lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87legp6rax.fsf@yhuang6-desk2.ccr.corp.intel.com>
Date:   Mon, 12 Jun 2023 16:03:18 +0800
From:   "Huang, Ying" <ying.huang@...el.com>
To:     Hugh Dickins <hughd@...gle.com>
Cc:     Andrew Morton <akpm@...ux-foundation.org>,
        Mike Kravetz <mike.kravetz@...cle.com>,
        Mike Rapoport <rppt@...nel.org>,
        "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
        Matthew Wilcox <willy@...radead.org>,
        David Hildenbrand <david@...hat.com>,
        Suren Baghdasaryan <surenb@...gle.com>,
        Qi Zheng <zhengqi.arch@...edance.com>,
        Yang Shi <shy828301@...il.com>,
        Mel Gorman <mgorman@...hsingularity.net>,
        Peter Xu <peterx@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Will Deacon <will@...nel.org>, Yu Zhao <yuzhao@...gle.com>,
        Alistair Popple <apopple@...dia.com>,
        Ralph Campbell <rcampbell@...dia.com>,
        Ira Weiny <ira.weiny@...el.com>,
        Steven Price <steven.price@....com>,
        SeongJae Park <sj@...nel.org>,
        Lorenzo Stoakes <lstoakes@...il.com>,
        Naoya Horiguchi <naoya.horiguchi@....com>,
        Christophe Leroy <christophe.leroy@...roup.eu>,
        Zack Rusin <zackr@...are.com>, Jason Gunthorpe <jgg@...pe.ca>,
        Axel Rasmussen <axelrasmussen@...gle.com>,
        Anshuman Khandual <anshuman.khandual@....com>,
        Pasha Tatashin <pasha.tatashin@...een.com>,
        Miaohe Lin <linmiaohe@...wei.com>,
        Minchan Kim <minchan@...nel.org>,
        Christoph Hellwig <hch@...radead.org>,
        Song Liu <song@...nel.org>,
        Thomas Hellstrom <thomas.hellstrom@...ux.intel.com>,
        Ryan Roberts <ryan.roberts@....com>,
        linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH v2 31/32] mm/swap: swap_vma_readahead() do the
 pte_offset_map()

Hi, Hugh,

Sorry for late reply.

Hugh Dickins <hughd@...gle.com> writes:

> swap_vma_readahead() has been proceeding in an unconventional way, its
> preliminary swap_ra_info() doing the pte_offset_map() and pte_unmap(),
> then relying on that pte pointer even after the pte_unmap() - in its
> CONFIG_64BIT case (I think !CONFIG_HIGHPTE was intended; whereas 32-bit
> copied ptes to stack while they were mapped, but had to limit how many).
>
> Though it would be difficult to construct a failing testcase, accessing
> page table after pte_unmap() will become bad practice, even on 64-bit:
> an rcu_read_unlock() in pte_unmap() will allow page table to be freed.
>
> Move relevant definitions from include/linux/swap.h to mm/swap_state.c,
> nothing else used them.  Delete the CONFIG_64BIT distinction and buffer,
> delete all reference to ptes from swap_ra_info(), use pte_offset_map()
> repeatedly in swap_vma_readahead(), breaking from the loop if it fails.
>
> (Will the repeated "map" and "unmap" show up as a slowdown anywhere?
> If so, maybe modify __read_swap_cache_async() to do the pte_unmap()
> only when it does not find the page already in the swapcache.)
>
> Use ptep_get_lockless(), mainly for its READ_ONCE().  Correctly advance
> the address passed down to each call of __read__swap_cache_async().
>
> Signed-off-by: Hugh Dickins <hughd@...gle.com>
> ---
>  include/linux/swap.h | 19 -------------------
>  mm/swap_state.c      | 45 +++++++++++++++++++++++---------------------
>  2 files changed, 24 insertions(+), 40 deletions(-)
>
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 3c69cb653cb9..1b9f2d92fc10 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -337,25 +337,6 @@ struct swap_info_struct {
>  					   */
>  };
>  
> -#ifdef CONFIG_64BIT
> -#define SWAP_RA_ORDER_CEILING	5
> -#else
> -/* Avoid stack overflow, because we need to save part of page table */
> -#define SWAP_RA_ORDER_CEILING	3
> -#define SWAP_RA_PTE_CACHE_SIZE	(1 << SWAP_RA_ORDER_CEILING)
> -#endif
> -
> -struct vma_swap_readahead {
> -	unsigned short win;
> -	unsigned short offset;
> -	unsigned short nr_pte;
> -#ifdef CONFIG_64BIT
> -	pte_t *ptes;
> -#else
> -	pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
> -#endif
> -};
> -
>  static inline swp_entry_t folio_swap_entry(struct folio *folio)
>  {
>  	swp_entry_t entry = { .val = page_private(&folio->page) };
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index b76a65ac28b3..a43b41975da2 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -698,6 +698,14 @@ void exit_swap_address_space(unsigned int type)
>  	swapper_spaces[type] = NULL;
>  }
>  
> +#define SWAP_RA_ORDER_CEILING	5
> +
> +struct vma_swap_readahead {
> +	unsigned short win;
> +	unsigned short offset;
> +	unsigned short nr_pte;
> +};
> +

Because we don't deal with PTEs in struct vma_swap_readahead anymore, it
appears simpler to record addresses directly, for example,

struct vma_swap_readahead {
	unsigned long start;
	unsigned long end;
};

we can make ra_info.win to be the return value of swap_ra_info().

Anyway, this can be a separate cleanup patch based on this patch.

For the patch itself, feel free to add,

Reviewed-by: "Huang, Ying" <ying.huang@...el.com>

>  static void swap_ra_info(struct vm_fault *vmf,
>  			 struct vma_swap_readahead *ra_info)
>  {
> @@ -705,11 +713,7 @@ static void swap_ra_info(struct vm_fault *vmf,
>  	unsigned long ra_val;
>  	unsigned long faddr, pfn, fpfn, lpfn, rpfn;
>  	unsigned long start, end;
> -	pte_t *pte, *orig_pte;
>  	unsigned int max_win, hits, prev_win, win;
> -#ifndef CONFIG_64BIT
> -	pte_t *tpte;
> -#endif
>  
>  	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
>  			     SWAP_RA_ORDER_CEILING);
> @@ -728,12 +732,9 @@ static void swap_ra_info(struct vm_fault *vmf,
>  					       max_win, prev_win);
>  	atomic_long_set(&vma->swap_readahead_info,
>  			SWAP_RA_VAL(faddr, win, 0));
> -
>  	if (win == 1)
>  		return;
>  
> -	/* Copy the PTEs because the page table may be unmapped */
> -	orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
>  	if (fpfn == pfn + 1) {
>  		lpfn = fpfn;
>  		rpfn = fpfn + win;
> @@ -753,15 +754,6 @@ static void swap_ra_info(struct vm_fault *vmf,
>  
>  	ra_info->nr_pte = end - start;
>  	ra_info->offset = fpfn - start;
> -	pte -= ra_info->offset;
> -#ifdef CONFIG_64BIT
> -	ra_info->ptes = pte;
> -#else
> -	tpte = ra_info->ptes;
> -	for (pfn = start; pfn != end; pfn++)
> -		*tpte++ = *pte++;
> -#endif
> -	pte_unmap(orig_pte);
>  }
>  
>  /**
> @@ -785,7 +777,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
>  	struct swap_iocb *splug = NULL;
>  	struct vm_area_struct *vma = vmf->vma;
>  	struct page *page;
> -	pte_t *pte, pentry;
> +	pte_t *pte = NULL, pentry;
> +	unsigned long addr;
>  	swp_entry_t entry;
>  	unsigned int i;
>  	bool page_allocated;
> @@ -797,17 +790,25 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
>  	if (ra_info.win == 1)
>  		goto skip;
>  
> +	addr = vmf->address - (ra_info.offset * PAGE_SIZE);
> +
>  	blk_start_plug(&plug);
> -	for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
> -	     i++, pte++) {
> -		pentry = *pte;
> +	for (i = 0; i < ra_info.nr_pte; i++, addr += PAGE_SIZE) {
> +		if (!pte++) {
> +			pte = pte_offset_map(vmf->pmd, addr);
> +			if (!pte)
> +				break;
> +		}
> +		pentry = ptep_get_lockless(pte);
>  		if (!is_swap_pte(pentry))
>  			continue;
>  		entry = pte_to_swp_entry(pentry);
>  		if (unlikely(non_swap_entry(entry)))
>  			continue;
> +		pte_unmap(pte);
> +		pte = NULL;
>  		page = __read_swap_cache_async(entry, gfp_mask, vma,
> -					       vmf->address, &page_allocated);
> +					       addr, &page_allocated);
>  		if (!page)
>  			continue;
>  		if (page_allocated) {
> @@ -819,6 +820,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
>  		}
>  		put_page(page);
>  	}
> +	if (pte)
> +		pte_unmap(pte);
>  	blk_finish_plug(&plug);
>  	swap_read_unplug(splug);
>  	lru_add_drain();

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ