[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <E03A9B6B-8DB3-4777-A458-E6DC85E2EF9E@gmail.com>
Date: Mon, 30 Dec 2024 21:24:13 +0200
From: Nadav Amit <nadav.amit@...il.com>
To: Rik van Riel <riel@...riel.com>
Cc: the arch/x86 maintainers <x86@...nel.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
kernel-team@...a.com,
Dave Hansen <dave.hansen@...ux.intel.com>,
luto@...nel.org,
peterz@...radead.org,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
"H. Peter Anvin" <hpa@...or.com>,
Andrew Morton <akpm@...ux-foundation.org>,
zhengqi.arch@...edance.com,
"open list:MEMORY MANAGEMENT" <linux-mm@...ck.org>
Subject: Re: [PATCH 09/12] x86/mm: enable broadcast TLB invalidation for
multi-threaded processes
> On 30 Dec 2024, at 19:53, Rik van Riel <riel@...riel.com> wrote:
>
> Use broadcast TLB invalidation, using the INVPLGB instruction, on AMD EPYC 3
> and newer CPUs.
>
> In order to not exhaust PCID space, and keep TLB flushes local for single
> threaded processes, we only hand out broadcast ASIDs to processes active on
> 3 or more CPUs, and gradually increase the threshold as broadcast ASID space
> is depleted.
>
> Signed-off-by: Rik van Riel <riel@...riel.com>
> ---
>
[snip]
> --- a/arch/x86/include/asm/mmu_context.h
> +++ b/arch/x86/include/asm/mmu_context.h
> @@ -139,6 +139,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
> #define enter_lazy_tlb enter_lazy_tlb
> extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
>
> +extern void destroy_context_free_broadcast_asid(struct mm_struct *mm);
> +
> /*
> * Init a new mm. Used on mm copies, like at fork()
> * and on mm's that are brand-new, like at execve().
> @@ -161,6 +163,13 @@ static inline int init_new_context(struct task_struct *tsk,
> mm->context.execute_only_pkey = -1;
> }
> #endif
> +
> +#ifdef CONFIG_CPU_SUP_AMD
> + INIT_LIST_HEAD(&mm->context.broadcast_asid_list);
> + mm->context.broadcast_asid = 0;
> + mm->context.asid_transition = false;
> +#endif
> +
> mm_reset_untag_mask(mm);
> init_new_context_ldt(mm);
> return 0;
> @@ -170,6 +179,9 @@ static inline int init_new_context(struct task_struct *tsk,
> static inline void destroy_context(struct mm_struct *mm)
> {
> destroy_context_ldt(mm);
> +#ifdef CONFIG_CPU_SUP_AMD
> + destroy_context_free_broadcast_asid(mm);
> +#endif
This ifdef’ry is not great. I think it’s better to have entire functions
in ifdef than put ifdef’s within the code.
> }
>
> extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
> diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
> index 20074f17fbcd..5e9956af98d1 100644
> --- a/arch/x86/include/asm/tlbflush.h
> +++ b/arch/x86/include/asm/tlbflush.h
> @@ -65,6 +65,23 @@ static inline void cr4_clear_bits(unsigned long mask)
> */
> #define TLB_NR_DYN_ASIDS 6
>
> +#ifdef CONFIG_CPU_SUP_AMD
> +#define is_dyn_asid(asid) (asid) < TLB_NR_DYN_ASIDS
> +#define is_broadcast_asid(asid) (asid) >= TLB_NR_DYN_ASIDS
> +#define in_asid_transition(info) (info->mm && info->mm->context.asid_transition)
> +#define mm_broadcast_asid(mm) (mm->context.broadcast_asid)
> +#else
> +#define is_dyn_asid(asid) true
> +#define is_broadcast_asid(asid) false
> +#define in_asid_transition(info) false
> +#define mm_broadcast_asid(mm) 0
I don’t see a reason why those should be #define instead of inline functions.
Arguably, those are better due to type-checking, etc. For instance is_dyn_asid()
is missing brackets to be safe.
> +
> +inline bool needs_broadcast_asid_reload(struct mm_struct *next, u16 prev_asid)
> +{
> + return false;
> +}
> +#endif
> +
> struct tlb_context {
> u64 ctx_id;
> u64 tlb_gen;
> diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
> index 64f1679c37e1..eb83391385ce 100644
> --- a/arch/x86/mm/tlb.c
> +++ b/arch/x86/mm/tlb.c
> @@ -74,13 +74,15 @@
> * use different names for each of them:
> *
> * ASID - [0, TLB_NR_DYN_ASIDS-1]
> - * the canonical identifier for an mm
> + * the canonical identifier for an mm, dynamically allocated on each CPU
> + * [TLB_NR_DYN_ASIDS, MAX_ASID_AVAILABLE-1]
> + * the canonical, global identifier for an mm, identical across all CPUs
> *
> - * kPCID - [1, TLB_NR_DYN_ASIDS]
> + * kPCID - [1, MAX_ASID_AVAILABLE]
> * the value we write into the PCID part of CR3; corresponds to the
> * ASID+1, because PCID 0 is special.
> *
> - * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
> + * uPCID - [2048 + 1, 2048 + MAX_ASID_AVAILABLE]
> * for KPTI each mm has two address spaces and thus needs two
> * PCID values, but we can still do with a single ASID denomination
> * for each mm. Corresponds to kPCID + 2048.
> @@ -225,6 +227,18 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
> return;
> }
>
> + /*
> + * TLB consistency for this ASID is maintained with INVLPGB;
> + * TLB flushes happen even while the process isn't running.
> + */
> +#ifdef CONFIG_CPU_SUP_AMD
I’m pretty sure IS_ENABLED() can be used here.
> + if (static_cpu_has(X86_FEATURE_INVLPGB) && mm_broadcast_asid(next)) {
> + *new_asid = mm_broadcast_asid(next);
Isn’t there a risk of a race changing broadcast_asid between the two reads?
Maybe use READ_ONCE() also since the value is modified asynchronously?
> + *need_flush = false;
> + return;
> + }
> +#endif
> +
> if (this_cpu_read(cpu_tlbstate.invalidate_other))
> clear_asid_other();
>
> @@ -251,6 +265,245 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
> *need_flush = true;
> }
>
> +#ifdef CONFIG_CPU_SUP_AMD
> +/*
> + * Logic for AMD INVLPGB support.
> + */
> +static DEFINE_RAW_SPINLOCK(broadcast_asid_lock);
> +static u16 last_broadcast_asid = TLB_NR_DYN_ASIDS;
> +static DECLARE_BITMAP(broadcast_asid_used, MAX_ASID_AVAILABLE) = { 0 };
> +static LIST_HEAD(broadcast_asid_list);
> +static int broadcast_asid_available = MAX_ASID_AVAILABLE - TLB_NR_DYN_ASIDS - 1;
Presumably some of these data structures are shared, and some are accessed
frequently together. Wouldn’t it make more sense to put them inside a struct(s)
and make it cacheline aligned?
> +
> +static void reset_broadcast_asid_space(void)
> +{
> + mm_context_t *context;
> +
> + lockdep_assert_held(&broadcast_asid_lock);
> +
> + /*
> + * Flush once when we wrap around the ASID space, so we won't need
> + * to flush every time we allocate an ASID for boradcast flushing.
> + */
> + invlpgb_flush_all_nonglobals();
> + tlbsync();
> +
> + /*
> + * Leave the currently used broadcast ASIDs set in the bitmap, since
> + * those cannot be reused before the next wraparound and flush..
> + */
> + bitmap_clear(broadcast_asid_used, 0, MAX_ASID_AVAILABLE);
> + list_for_each_entry(context, &broadcast_asid_list, broadcast_asid_list)
> + __set_bit(context->broadcast_asid, broadcast_asid_used);
> +
> + last_broadcast_asid = TLB_NR_DYN_ASIDS;
> +}
> +
> +static u16 get_broadcast_asid(void)
> +{
> + lockdep_assert_held(&broadcast_asid_lock);
> +
> + do {
> + u16 start = last_broadcast_asid;
> + u16 asid = find_next_zero_bit(broadcast_asid_used, MAX_ASID_AVAILABLE, start);
> +
> + if (asid >= MAX_ASID_AVAILABLE) {
> + reset_broadcast_asid_space();
> + continue;
> + }
> +
> + /* Try claiming this broadcast ASID. */
> + if (!test_and_set_bit(asid, broadcast_asid_used)) {
IIUC, broadcast_asid_used is always protected with broadcast_asid_lock.
So why test_and_set_bit ?
> + last_broadcast_asid = asid;
> + return asid;
> + }
> + } while (1);
> +}
> +
> +/*
> + * Returns true if the mm is transitioning from a CPU-local ASID to a broadcast
> + * (INVLPGB) ASID, or the other way around.
> + */
> +static bool needs_broadcast_asid_reload(struct mm_struct *next, u16 prev_asid)
> +{
> + u16 broadcast_asid = mm_broadcast_asid(next);
> +
> + if (broadcast_asid && prev_asid != broadcast_asid)
> + return true;
> +
> + if (!broadcast_asid && is_broadcast_asid(prev_asid))
> + return true;
> +
> + return false;
> +}
> +
> +void destroy_context_free_broadcast_asid(struct mm_struct *mm)
> +{
> + if (!mm->context.broadcast_asid)
mm_broadcast_asid()?
> + return;
> +
> + guard(raw_spinlock_irqsave)(&broadcast_asid_lock);
> + mm->context.broadcast_asid = 0;
> + list_del(&mm->context.broadcast_asid_list);
> + broadcast_asid_available++;
> +}
> +
> +static bool mm_active_cpus_exceeds(struct mm_struct *mm, int threshold)
> +{
> + int count = 0;
> + int cpu;
> +
> + if (cpumask_weight(mm_cpumask(mm)) <= threshold)
> + return false;
> +
> + for_each_cpu(cpu, mm_cpumask(mm)) {
> + /* Skip the CPUs that aren't really running this process. */
> + if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm)
> + continue;
> +
> + if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
> + continue;
> +
> + if (++count > threshold)
> + return true;
> + }
> + return false;
> +}
> +
> +/*
> + * Assign a broadcast ASID to the current process, protecting against
> + * races between multiple threads in the process.
> + */
> +static void use_broadcast_asid(struct mm_struct *mm)
> +{
> + guard(raw_spinlock_irqsave)(&broadcast_asid_lock);
> +
> + /* This process is already using broadcast TLB invalidation. */
> + if (mm->context.broadcast_asid)
> + return;
> +
> + mm->context.broadcast_asid = get_broadcast_asid();
This is read without the lock, so do you want WRITE_ONCE() here?
> + mm->context.asid_transition = true;
And what about asid_transition? Presumably also need WRITE_ONCE(). But more
importantly than this theoretical compiler optimization, is there some assumed
ordering with setting broadcast_asid?
> + list_add(&mm->context.broadcast_asid_list, &broadcast_asid_list);
> + broadcast_asid_available--;
> +}
> +
> +/*
> + * Figure out whether to assign a broadcast (global) ASID to a process.
> + * We vary the threshold by how empty or full broadcast ASID space is.
> + * 1/4 full: >= 4 active threads
> + * 1/2 full: >= 8 active threads
> + * 3/4 full: >= 16 active threads
> + * 7/8 full: >= 32 active threads
> + * etc
> + *
> + * This way we should never exhaust the broadcast ASID space, even on very
> + * large systems, and the processes with the largest number of active
> + * threads should be able to use broadcast TLB invalidation.
> + */
> +#define HALFFULL_THRESHOLD 8
> +static bool meets_broadcast_asid_threshold(struct mm_struct *mm)
> +{
> + int avail = broadcast_asid_available;
> + int threshold = HALFFULL_THRESHOLD;
> +
> + if (!avail)
> + return false;
> +
> + if (avail > MAX_ASID_AVAILABLE * 3 / 4) {
> + threshold = HALFFULL_THRESHOLD / 4;
> + } else if (avail > MAX_ASID_AVAILABLE / 2) {
> + threshold = HALFFULL_THRESHOLD / 2;
> + } else if (avail < MAX_ASID_AVAILABLE / 3) {
> + do {
> + avail *= 2;
> + threshold *= 2;
> + } while ((avail + threshold) < MAX_ASID_AVAILABLE / 2);
> + }
> +
> + return mm_active_cpus_exceeds(mm, threshold);
> +}
> +
> +static void count_tlb_flush(struct mm_struct *mm)
> +{
> + if (!static_cpu_has(X86_FEATURE_INVLPGB))
> + return;
> +
> + /* Check every once in a while. */
> + if ((current->pid & 0x1f) != (jiffies & 0x1f))
> + return;
> +
> + if (meets_broadcast_asid_threshold(mm))
> + use_broadcast_asid(mm);
> +}
I don’t think count_tlb_flush() is a name that reflects what this function
does.
> +
> +static void finish_asid_transition(struct flush_tlb_info *info)
> +{
> + struct mm_struct *mm = info->mm;
> + int bc_asid = mm_broadcast_asid(mm);
> + int cpu;
> +
> + if (!mm->context.asid_transition)
is_asid_transition()?
> + return;
> +
> + for_each_cpu(cpu, mm_cpumask(mm)) {
> + if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) != mm)
> + continue;
> +
> + /*
> + * If at least one CPU is not using the broadcast ASID yet,
> + * send a TLB flush IPI. The IPI should cause stragglers
> + * to transition soon.
> + */
> + if (per_cpu(cpu_tlbstate.loaded_mm_asid, cpu) != bc_asid) {
> + flush_tlb_multi(mm_cpumask(info->mm), info);
> + return;
> + }
> + }
> +
> + /* All the CPUs running this process are using the broadcast ASID. */
> + mm->context.asid_transition = 0;
> +}
> +
> +static void broadcast_tlb_flush(struct flush_tlb_info *info)
> +{
> + bool pmd = info->stride_shift == PMD_SHIFT;
> + unsigned long maxnr = invlpgb_count_max;
> + unsigned long asid = info->mm->context.broadcast_asid;
> + unsigned long addr = info->start;
> + unsigned long nr;
> +
> + /* Flushing multiple pages at once is not supported with 1GB pages. */
> + if (info->stride_shift > PMD_SHIFT)
> + maxnr = 1;
> +
> + if (info->end == TLB_FLUSH_ALL) {
> + invlpgb_flush_single_pcid(kern_pcid(asid));
> + /* Do any CPUs supporting INVLPGB need PTI? */
> + if (static_cpu_has(X86_FEATURE_PTI))
> + invlpgb_flush_single_pcid(user_pcid(asid));
> + } else do {
I couldn’t find any use of “else do” in the kernel. Might it be confusing?
> + /*
> + * Calculate how many pages can be flushed at once; if the
> + * remainder of the range is less than one page, flush one.
> + */
> + nr = min(maxnr, (info->end - addr) >> info->stride_shift);
> + nr = max(nr, 1);
> +
> + invlpgb_flush_user_nr(kern_pcid(asid), addr, nr, pmd);
> + /* Do any CPUs supporting INVLPGB need PTI? */
> + if (static_cpu_has(X86_FEATURE_PTI))
> + invlpgb_flush_user_nr(user_pcid(asid), addr, nr, pmd);
> + addr += nr << info->stride_shift;
> + } while (addr < info->end);
> +
> + finish_asid_transition(info);
> +
> + /* Wait for the INVLPGBs kicked off above to finish. */
> + tlbsync();
> +}
> +#endif /* CONFIG_CPU_SUP_AMD */
> +
> /*
> * Given an ASID, flush the corresponding user ASID. We can delay this
> * until the next time we switch to it.
> @@ -556,8 +809,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
> */
> if (prev == next) {
> /* Not actually switching mm's */
> - VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
> - next->context.ctx_id);
> + if (is_dyn_asid(prev_asid))
> + VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
> + next->context.ctx_id);
Why not to add the condition into the VM_WARN_ON and avoid the nesting?
>
> /*
> * If this races with another thread that enables lam, 'new_lam'
> @@ -573,6 +827,23 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
> !cpumask_test_cpu(cpu, mm_cpumask(next))))
> cpumask_set_cpu(cpu, mm_cpumask(next));
>
> + /*
> + * Check if the current mm is transitioning to a new ASID.
> + */
> + if (needs_broadcast_asid_reload(next, prev_asid)) {
> + next_tlb_gen = atomic64_read(&next->context.tlb_gen);
> +
> + choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
> + goto reload_tlb;
> + }
> +
> + /*
> + * Broadcast TLB invalidation keeps this PCID up to date
> + * all the time.
> + */
> + if (is_broadcast_asid(prev_asid))
> + return;
> +
> /*
> * If the CPU is not in lazy TLB mode, we are just switching
> * from one thread in a process to another thread in the same
> @@ -626,8 +897,10 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
> barrier();
> }
>
> +reload_tlb:
> new_lam = mm_lam_cr3_mask(next);
> if (need_flush) {
> + VM_BUG_ON(is_broadcast_asid(new_asid));
> this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
> this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
> load_new_mm_cr3(next->pgd, new_asid, new_lam, true);
> @@ -746,7 +1019,7 @@ static void flush_tlb_func(void *info)
> const struct flush_tlb_info *f = info;
> struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
> u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
> - u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
> + u64 local_tlb_gen;
> bool local = smp_processor_id() == f->initiating_cpu;
> unsigned long nr_invalidate = 0;
> u64 mm_tlb_gen;
> @@ -769,6 +1042,16 @@ static void flush_tlb_func(void *info)
> if (unlikely(loaded_mm == &init_mm))
> return;
>
> + /* Reload the ASID if transitioning into or out of a broadcast ASID */
> + if (needs_broadcast_asid_reload(loaded_mm, loaded_mm_asid)) {
> + switch_mm_irqs_off(NULL, loaded_mm, NULL);
> + loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
> + }
> +
> + /* Broadcast ASIDs are always kept up to date with INVLPGB. */
> + if (is_broadcast_asid(loaded_mm_asid))
> + return;
> +
> VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
> loaded_mm->context.ctx_id);
>
> @@ -786,6 +1069,8 @@ static void flush_tlb_func(void *info)
> return;
> }
>
> + local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
> +
> if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID &&
> f->new_tlb_gen <= local_tlb_gen)) {
> /*
> @@ -953,7 +1238,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
> * up on the new contents of what used to be page tables, while
> * doing a speculative memory access.
> */
> - if (info->freed_tables)
> + if (info->freed_tables || in_asid_transition(info))
> on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
> else
> on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
> @@ -1026,14 +1311,18 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
> bool freed_tables)
> {
> struct flush_tlb_info *info;
> + unsigned long threshold = tlb_single_page_flush_ceiling;
> u64 new_tlb_gen;
> int cpu;
>
> + if (static_cpu_has(X86_FEATURE_INVLPGB))
> + threshold *= invlpgb_count_max;
I know it’s not really impacting performance, but it is hard for me to see
such calculations happening unnecessarily every time...
Powered by blists - more mailing lists