[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250520010350.1740223-9-riel@surriel.com>
Date: Mon, 19 May 2025 21:02:33 -0400
From: Rik van Riel <riel@...riel.com>
To: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org,
x86@...nel.org,
kernel-team@...a.com,
dave.hansen@...ux.intel.com,
luto@...nel.org,
peterz@...radead.org,
tglx@...utronix.de,
mingo@...hat.com,
bp@...en8.de,
hpa@...or.com,
nadav.amit@...il.com,
Rik van Riel <riel@...com>,
Rik van Riel <riel@...riel.com>
Subject: [RFC v2 8/9] x86/mm: use RAR for kernel TLB flushes
From: Rik van Riel <riel@...com>
Use Intel RAR for kernel TLB flushes, when enabled.
Pass in PCID 0 to smp_call_rar_many() to flush the specified addresses,
regardless of which PCID they might be cached under in any destination CPU.
Signed-off-by: Rik van Riel <riel@...riel.com>
---
arch/x86/mm/rar.c | 4 ++--
arch/x86/mm/tlb.c | 38 ++++++++++++++++++++++++++++++++++++++
2 files changed, 40 insertions(+), 2 deletions(-)
diff --git a/arch/x86/mm/rar.c b/arch/x86/mm/rar.c
index 16dc9b889cbd..9a18c926ea7b 100644
--- a/arch/x86/mm/rar.c
+++ b/arch/x86/mm/rar.c
@@ -142,8 +142,8 @@ void smp_call_rar_many(const struct cpumask *mask, u16 pcid,
* send smp call function interrupt to this cpu and as such deadlocks
* can't happen.
*/
- WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
- && !oops_in_progress && !early_boot_irqs_disabled);
+ if (cpu_online(this_cpu) && !oops_in_progress && !early_boot_irqs_disabled)
+ lockdep_assert_irqs_enabled();
/* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
cpu = cpumask_first_and(mask, cpu_online_mask);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index f5761e8be77f..35489df811dc 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -21,6 +21,7 @@
#include <asm/apic.h>
#include <asm/msr.h>
#include <asm/perf_event.h>
+#include <asm/rar.h>
#include <asm/tlb.h>
#include "mm_internal.h"
@@ -1446,6 +1447,18 @@ static void do_flush_tlb_all(void *info)
__flush_tlb_all();
}
+static void rar_full_flush(const cpumask_t *cpumask)
+{
+ guard(preempt)();
+ smp_call_rar_many(cpumask, 0, 0, TLB_FLUSH_ALL);
+ invpcid_flush_all();
+}
+
+static void rar_flush_all(void)
+{
+ rar_full_flush(cpu_online_mask);
+}
+
void flush_tlb_all(void)
{
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
@@ -1453,6 +1466,8 @@ void flush_tlb_all(void)
/* First try (faster) hardware-assisted TLB invalidation. */
if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
invlpgb_flush_all();
+ else if (cpu_feature_enabled(X86_FEATURE_RAR))
+ rar_flush_all();
else
/* Fall back to the IPI-based invalidation. */
on_each_cpu(do_flush_tlb_all, NULL, 1);
@@ -1482,15 +1497,36 @@ static void do_kernel_range_flush(void *info)
struct flush_tlb_info *f = info;
unsigned long addr;
+ /*
+ * With PTI kernel TLB entries in all PCIDs need to be flushed.
+ * With RAR the PCID space becomes so large, we might as well flush it all.
+ *
+ * Either of the two by itself works with targeted flushes.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_RAR) &&
+ cpu_feature_enabled(X86_FEATURE_PTI)) {
+ invpcid_flush_all();
+ return;
+ }
+
/* flush range by one by one 'invlpg' */
for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
flush_tlb_one_kernel(addr);
}
+static void rar_kernel_range_flush(struct flush_tlb_info *info)
+{
+ guard(preempt)();
+ smp_call_rar_many(cpu_online_mask, 0, info->start, info->end);
+ do_kernel_range_flush(info);
+}
+
static void kernel_tlb_flush_all(struct flush_tlb_info *info)
{
if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
invlpgb_flush_all();
+ else if (cpu_feature_enabled(X86_FEATURE_RAR))
+ rar_flush_all();
else
on_each_cpu(do_flush_tlb_all, NULL, 1);
}
@@ -1499,6 +1535,8 @@ static void kernel_tlb_flush_range(struct flush_tlb_info *info)
{
if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
invlpgb_kernel_range_flush(info);
+ else if (cpu_feature_enabled(X86_FEATURE_RAR))
+ rar_kernel_range_flush(info);
else
on_each_cpu(do_kernel_range_flush, info, 1);
}
--
2.49.0
Powered by blists - more mailing lists