lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAOnJCULmsJtd8+vLGjUdLtdyK+6HG40c7jQBbUqckWT75uGFog@mail.gmail.com>
Date:   Thu, 3 Nov 2022 00:00:56 -0700
From:   Atish Patra <atishp@...shpatra.org>
To:     Anup Patel <apatel@...tanamicro.com>
Cc:     Palmer Dabbelt <palmer@...belt.com>,
        Paul Walmsley <paul.walmsley@...ive.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Marc Zyngier <maz@...nel.org>,
        Daniel Lezcano <daniel.lezcano@...aro.org>,
        Alistair Francis <Alistair.Francis@....com>,
        Anup Patel <anup@...infault.org>,
        linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v10 6/7] RISC-V: Use IPIs for remote TLB flush when possible

On Tue, Nov 1, 2022 at 7:34 AM Anup Patel <apatel@...tanamicro.com> wrote:
>
> If we have specialized interrupt controller (such as AIA IMSIC) which
> allows supervisor mode to directly inject IPIs without any assistance
> from M-mode or HS-mode then using such specialized interrupt controller,
> we can do remote TLB flushes directly from supervisor mode instead of
> using the SBI RFENCE calls.
>
> This patch extends remote TLB flush functions to use supervisor mode
> IPIs whenever direct supervisor mode IPIs.are supported by interrupt
> controller.
>
> Signed-off-by: Anup Patel <apatel@...tanamicro.com>
> ---
>  arch/riscv/mm/tlbflush.c | 93 +++++++++++++++++++++++++++++++++-------
>  1 file changed, 78 insertions(+), 15 deletions(-)
>
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 37ed760d007c..27a7db8eb2c4 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -23,14 +23,62 @@ static inline void local_flush_tlb_page_asid(unsigned long addr,
>                         : "memory");
>  }
>
> +static inline void local_flush_tlb_range(unsigned long start,
> +               unsigned long size, unsigned long stride)
> +{
> +       if (size <= stride)
> +               local_flush_tlb_page(start);
> +       else
> +               local_flush_tlb_all();
> +}
> +
> +static inline void local_flush_tlb_range_asid(unsigned long start,
> +               unsigned long size, unsigned long stride, unsigned long asid)
> +{
> +       if (size <= stride)
> +               local_flush_tlb_page_asid(start, asid);
> +       else
> +               local_flush_tlb_all_asid(asid);
> +}
> +
> +static void __ipi_flush_tlb_all(void *info)
> +{
> +       local_flush_tlb_all();
> +}
> +
>  void flush_tlb_all(void)
>  {
> -       sbi_remote_sfence_vma(NULL, 0, -1);
> +       if (riscv_use_ipi_for_rfence())
> +               on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
> +       else
> +               sbi_remote_sfence_vma(NULL, 0, -1);
> +}
> +
> +struct flush_tlb_range_data {
> +       unsigned long asid;
> +       unsigned long start;
> +       unsigned long size;
> +       unsigned long stride;
> +};
> +
> +static void __ipi_flush_tlb_range_asid(void *info)
> +{
> +       struct flush_tlb_range_data *d = info;
> +
> +       local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
> +}
> +
> +static void __ipi_flush_tlb_range(void *info)
> +{
> +       struct flush_tlb_range_data *d = info;
> +
> +       local_flush_tlb_range(d->start, d->size, d->stride);
>  }
>
> -static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
> -                                 unsigned long size, unsigned long stride)
> +static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
> +                             unsigned long size, unsigned long stride)
>  {
> +       struct flush_tlb_range_data ftd;
>         struct cpumask *cmask = mm_cpumask(mm);
>         unsigned int cpuid;
>         bool broadcast;
> @@ -45,19 +93,34 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
>                 unsigned long asid = atomic_long_read(&mm->context.id);
>
>                 if (broadcast) {
> -                       sbi_remote_sfence_vma_asid(cmask, start, size, asid);
> -               } else if (size <= stride) {
> -                       local_flush_tlb_page_asid(start, asid);
> +                       if (riscv_use_ipi_for_rfence()) {
> +                               ftd.asid = asid;
> +                               ftd.start = start;
> +                               ftd.size = size;
> +                               ftd.stride = stride;
> +                               on_each_cpu_mask(cmask,
> +                                                __ipi_flush_tlb_range_asid,
> +                                                &ftd, 1);
> +                       } else
> +                               sbi_remote_sfence_vma_asid(cmask,
> +                                                          start, size, asid);
>                 } else {
> -                       local_flush_tlb_all_asid(asid);
> +                       local_flush_tlb_range_asid(start, size, stride, asid);
>                 }
>         } else {
>                 if (broadcast) {
> -                       sbi_remote_sfence_vma(cmask, start, size);
> -               } else if (size <= stride) {
> -                       local_flush_tlb_page(start);
> +                       if (riscv_use_ipi_for_rfence()) {
> +                               ftd.asid = 0;
> +                               ftd.start = start;
> +                               ftd.size = size;
> +                               ftd.stride = stride;
> +                               on_each_cpu_mask(cmask,
> +                                                __ipi_flush_tlb_range,
> +                                                &ftd, 1);
> +                       } else
> +                               sbi_remote_sfence_vma(cmask, start, size);
>                 } else {
> -                       local_flush_tlb_all();
> +                       local_flush_tlb_range(start, size, stride);
>                 }
>         }
>
> @@ -66,23 +129,23 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
>
>  void flush_tlb_mm(struct mm_struct *mm)
>  {
> -       __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
> +       __flush_tlb_range(mm, 0, -1, PAGE_SIZE);
>  }
>
>  void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
>  {
> -       __sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
> +       __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
>  }
>
>  void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
>                      unsigned long end)
>  {
> -       __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
> +       __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
>  }
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
>                         unsigned long end)
>  {
> -       __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);
> +       __flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
>  }
>  #endif
> --
> 2.34.1
>

LGTM.

Reviewed-by: Atish Patra <atishp@...osinc.com>

-- 
Regards,
Atish

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ