[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YxcHP3WYNp0/lWS6@hirez.programming.kicks-ass.net>
Date: Tue, 6 Sep 2022 10:39:27 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: "Kirill A. Shutemov" <kirill@...temov.name>
Cc: Bharata B Rao <bharata@....com>, ananth.narayan@....com,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Andy Lutomirski <luto@...nel.org>, x86@...nel.org,
Kostya Serebryany <kcc@...gle.com>,
Andrey Ryabinin <ryabinin.a.a@...il.com>,
Andrey Konovalov <andreyknvl@...il.com>,
Alexander Potapenko <glider@...gle.com>,
Taras Madan <tarasmadan@...gle.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
"H . J . Lu" <hjl.tools@...il.com>,
Andi Kleen <ak@...ux.intel.com>,
Rick Edgecombe <rick.p.edgecombe@...el.com>,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCHv8 00/11] Linear Address Masking enabling
On Mon, Sep 05, 2022 at 07:47:08PM +0300, Kirill A. Shutemov wrote:
> Fair enough. How about this?
>
> diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
> index 803241dfc473..1a03c65a9c0f 100644
> --- a/arch/x86/include/asm/uaccess.h
> +++ b/arch/x86/include/asm/uaccess.h
> @@ -22,6 +22,8 @@ static inline bool pagefault_disabled(void);
> #endif
>
> #ifdef CONFIG_X86_64
> +DECLARE_STATIC_KEY_FALSE(tagged_addr_key);
> +
> /*
> * Mask out tag bits from the address.
> *
> @@ -30,8 +32,10 @@ static inline bool pagefault_disabled(void);
> */
> #define untagged_addr(mm, addr) ({ \
> u64 __addr = (__force u64)(addr); \
> - s64 sign = (s64)__addr >> 63; \
> - __addr &= (mm)->context.untag_mask | sign; \
> + if (static_branch_unlikely(&tagged_addr_key)) { \
> + s64 sign = (s64)__addr >> 63; \
> + __addr &= (mm)->context.untag_mask | sign; \
> + } \
> (__force __typeof__(addr))__addr; \
> })
>
> diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
> index 337f80a0862f..63194bf43c9a 100644
> --- a/arch/x86/kernel/process_64.c
> +++ b/arch/x86/kernel/process_64.c
> @@ -742,6 +742,9 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
> }
> #endif
>
> +DEFINE_STATIC_KEY_FALSE(tagged_addr_key);
So here you use the: false-unlikely scenario which seems suboptimal in
this case, I was thinking the false-likely case would generate better
code (see the comment in linux/jump_label.h).
> +EXPORT_SYMBOL_GPL(tagged_addr_key);
> +
> static void enable_lam_func(void *mm)
> {
> struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
> @@ -813,6 +816,7 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
> }
>
> on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true);
> + static_branch_enable(&tagged_addr_key);
> out:
> mutex_unlock(&mm->context.lock);
> mmap_write_unlock(mm);
Aside from the one nit above, this looks about right.
Powered by blists - more mailing lists