[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZLQIvPpKvjWppc59@arm.com>
Date: Sun, 16 Jul 2023 08:11:56 -0700
From: Catalin Marinas <catalin.marinas@....com>
To: Yicong Yang <yangyicong@...wei.com>
Cc: akpm@...ux-foundation.org, linux-mm@...ck.org,
linux-arm-kernel@...ts.infradead.org, x86@...nel.org,
mark.rutland@....com, ryan.roberts@....com, will@...nel.org,
anshuman.khandual@....com, linux-doc@...r.kernel.org,
corbet@....net, peterz@...radead.org, arnd@...db.de,
punit.agrawal@...edance.com, linux-kernel@...r.kernel.org,
darren@...amperecomputing.com, yangyicong@...ilicon.com,
huzhanyuan@...o.com, lipeifeng@...o.com, zhangshiming@...o.com,
guojian@...o.com, realmz6@...il.com, linux-mips@...r.kernel.org,
openrisc@...ts.librecores.org, linuxppc-dev@...ts.ozlabs.org,
linux-riscv@...ts.infradead.org, linux-s390@...r.kernel.org,
Barry Song <21cnbao@...il.com>, wangkefeng.wang@...wei.com,
xhao@...ux.alibaba.com, prime.zeng@...ilicon.com,
Jonathan.Cameron@...wei.com, Barry Song <v-songbaohua@...o.com>,
Nadav Amit <namit@...are.com>, Mel Gorman <mgorman@...e.de>
Subject: Re: [PATCH v10 4/4] arm64: support batched/deferred tlb shootdown
during page reclamation/migration
On Mon, Jul 10, 2023 at 04:39:14PM +0800, Yicong Yang wrote:
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 7856c3a3e35a..f0ce8208c57f 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -96,6 +96,7 @@ config ARM64
> select ARCH_SUPPORTS_NUMA_BALANCING
> select ARCH_SUPPORTS_PAGE_TABLE_CHECK
> select ARCH_SUPPORTS_PER_VMA_LOCK
> + select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if EXPERT
I don't want EXPERT to turn on a feature that's not selectable by the
user. This would lead to different performance behaviour based on
EXPERT. Just select it unconditionally.
> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> index 412a3b9a3c25..4bb9cec62e26 100644
> --- a/arch/arm64/include/asm/tlbflush.h
> +++ b/arch/arm64/include/asm/tlbflush.h
> @@ -254,17 +254,23 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
> dsb(ish);
> }
>
> -static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
> - unsigned long uaddr)
> +static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
> + unsigned long uaddr)
> {
> unsigned long addr;
>
> dsb(ishst);
> - addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
> + addr = __TLBI_VADDR(uaddr, ASID(mm));
> __tlbi(vale1is, addr);
> __tlbi_user(vale1is, addr);
> }
>
> +static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
> + unsigned long uaddr)
> +{
> + return __flush_tlb_page_nosync(vma->vm_mm, uaddr);
> +}
> +
> static inline void flush_tlb_page(struct vm_area_struct *vma,
> unsigned long uaddr)
> {
> @@ -272,6 +278,42 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
> dsb(ish);
> }
>
> +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
If it's selected unconditionally, we won't need this #ifdef here.
> +
> +static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
> +{
> +#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
> + /*
> + * TLB flush deferral is not required on systems, which are affected with
"affected by" and drop the comma before "which".
--
Catalin
Powered by blists - more mailing lists