[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CALC8CXdigEV4KWOv9-WOknxt8RD29mamBAUkw=5dDN_zeC7v6w@mail.gmail.com>
Date: Thu, 2 Oct 2025 12:29:02 -0400
From: ChaosEsque Team <chaosesqueteam@...il.com>
To: Vivian Wang <wangruikang@...as.ac.cn>
Cc: Paul Walmsley <paul.walmsley@...ive.com>, Palmer Dabbelt <palmer@...belt.com>,
Albert Ou <aou@...s.berkeley.edu>, Alexandre Ghiti <alex@...ti.fr>, Yury Norov <yury.norov@...il.com>,
Rasmus Villemoes <linux@...musvillemoes.dk>, Vivian Wang <uwu@...m.page>,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/6] riscv: pgtable: Convert to use_alternative_unlikely
I see you have a woman's name "Vivian": so am I to assume you are one
of the beloved "trannies" linus threw all the MAMALLLLEEE hackers (who
helped him from 1993-2003...2008) for? Or are China-wumans just more
competent than whites?
On Wed, Aug 20, 2025 at 9:51 AM Vivian Wang <wangruikang@...as.ac.cn> wrote:
>
> Use use_alternative_unlikely() to check for RISCV_ISA_EXT_SVVPTC,
> replacing the use of asm goto with ALTERNATIVE.
>
> The "unlikely" variant is used to match the behavior of the original
> implementation using ALTERNATIVE("nop", "j %l[svvptc]", ...).
>
> Signed-off-by: Vivian Wang <wangruikang@...as.ac.cn>
> ---
> arch/riscv/include/asm/pgtable.h | 15 +++++++--------
> arch/riscv/mm/pgtable.c | 22 ++++++++++------------
> 2 files changed, 17 insertions(+), 20 deletions(-)
>
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index 91697fbf1f9013005800f713797e4b6b1fc8d312..81eb386da837f064c7372530e2f2227575a703d3 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -495,8 +495,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
> struct vm_area_struct *vma, unsigned long address,
> pte_t *ptep, unsigned int nr)
> {
> - asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
> - : : : : svvptc);
> + /*
> + * Svvptc guarantees that the new valid pte will be visible within
> + * a bounded timeframe, so when the uarch does not cache invalid
> + * entries, we don't have to do anything.
> + */
> + if (use_alternative_unlikely(0, RISCV_ISA_EXT_SVVPTC))
> + return;
>
> /*
> * The kernel assumes that TLBs don't cache invalid entries, but
> @@ -508,12 +513,6 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
> while (nr--)
> local_flush_tlb_page(address + nr * PAGE_SIZE);
>
> -svvptc:;
> - /*
> - * Svvptc guarantees that the new valid pte will be visible within
> - * a bounded timeframe, so when the uarch does not cache invalid
> - * entries, we don't have to do anything.
> - */
> }
> #define update_mmu_cache(vma, addr, ptep) \
> update_mmu_cache_range(NULL, vma, addr, ptep, 1)
> diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
> index 8b6c0a112a8db4e91de54c3bd3bd527a605a6197..e0c414fa0d433fdc39c80ec390c467ca59a9a334 100644
> --- a/arch/riscv/mm/pgtable.c
> +++ b/arch/riscv/mm/pgtable.c
> @@ -9,8 +9,16 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
> unsigned long address, pte_t *ptep,
> pte_t entry, int dirty)
> {
> - asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
> - : : : : svvptc);
> + if (use_alternative_unlikely(0, RISCV_ISA_EXT_SVVPTC)) {
> + if (!pte_same(ptep_get(ptep), entry)) {
> + __set_pte_at(vma->vm_mm, ptep, entry);
> + /* Here only not svadu is impacted */
> + flush_tlb_page(vma, address);
> + return true;
> + }
> +
> + return false;
> + }
>
> if (!pte_same(ptep_get(ptep), entry))
> __set_pte_at(vma->vm_mm, ptep, entry);
> @@ -19,16 +27,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
> * the case that the PTE changed and the spurious fault case.
> */
> return true;
> -
> -svvptc:
> - if (!pte_same(ptep_get(ptep), entry)) {
> - __set_pte_at(vma->vm_mm, ptep, entry);
> - /* Here only not svadu is impacted */
> - flush_tlb_page(vma, address);
> - return true;
> - }
> -
> - return false;
> }
>
> int ptep_test_and_clear_young(struct vm_area_struct *vma,
>
> --
> 2.50.1
>
>
Powered by blists - more mailing lists