[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZBGStjiZsZ3vqx9e@kernel.org>
Date: Wed, 15 Mar 2023 11:41:10 +0200
From: Mike Rapoport <rppt@...nel.org>
To: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Cc: linux-arch@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Richard Henderson <richard.henderson@...aro.org>,
Ivan Kokshaysky <ink@...assic.park.msu.ru>,
Matt Turner <mattst88@...il.com>, linux-alpha@...r.kernel.org
Subject: Re: [PATCH v4 06/36] alpha: Implement the new page table range API
On Wed, Mar 15, 2023 at 05:14:14AM +0000, Matthew Wilcox (Oracle) wrote:
> Add PFN_PTE_SHIFT, update_mmu_cache_range() and flush_icache_pages().
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
> Cc: Richard Henderson <richard.henderson@...aro.org>
> Cc: Ivan Kokshaysky <ink@...assic.park.msu.ru>
> Cc: Matt Turner <mattst88@...il.com>
> Cc: linux-alpha@...r.kernel.org
Acked-by: Mike Rapoport (IBM) <rppt@...nel.org>
> ---
> arch/alpha/include/asm/cacheflush.h | 10 ++++++++++
> arch/alpha/include/asm/pgtable.h | 9 +++++++--
> 2 files changed, 17 insertions(+), 2 deletions(-)
>
> diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h
> index 9945ff483eaf..3956460e69e2 100644
> --- a/arch/alpha/include/asm/cacheflush.h
> +++ b/arch/alpha/include/asm/cacheflush.h
> @@ -57,6 +57,16 @@ extern void flush_icache_user_page(struct vm_area_struct *vma,
> #define flush_icache_page(vma, page) \
> flush_icache_user_page((vma), (page), 0, 0)
>
> +/*
> + * Both implementations of flush_icache_user_page flush the entire
> + * address space, so one call, no matter how many pages.
> + */
> +static inline void flush_icache_pages(struct vm_area_struct *vma,
> + struct page *page, unsigned int nr)
> +{
> + flush_icache_user_page(vma, page, 0, 0);
> +}
> +
> #include <asm-generic/cacheflush.h>
>
> #endif /* _ALPHA_CACHEFLUSH_H */
> diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
> index ba43cb841d19..6c24c408b8e9 100644
> --- a/arch/alpha/include/asm/pgtable.h
> +++ b/arch/alpha/include/asm/pgtable.h
> @@ -26,7 +26,6 @@ struct vm_area_struct;
> * hook is made available.
> */
> #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
> -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
>
> /* PMD_SHIFT determines the size of the area a second-level page table can map */
> #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
> @@ -189,7 +188,8 @@ extern unsigned long __zero_page(void);
> * and a page entry and page directory to the page they refer to.
> */
> #define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT)
> -#define pte_pfn(pte) (pte_val(pte) >> 32)
> +#define PFN_PTE_SHIFT 32
> +#define pte_pfn(pte) (pte_val(pte) >> PFN_PTE_SHIFT)
>
> #define pte_page(pte) pfn_to_page(pte_pfn(pte))
> #define mk_pte(page, pgprot) \
> @@ -303,6 +303,11 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
> {
> }
>
> +static inline void update_mmu_cache_range(struct vm_area_struct *vma,
> + unsigned long address, pte_t *ptep, unsigned int nr)
> +{
> +}
> +
> /*
> * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
> * are !pte_none() && !pte_present().
> --
> 2.39.2
>
>
--
Sincerely yours,
Mike.
Powered by blists - more mailing lists