[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2385d4ed-48d5-4d50-ae95-dbeb23432b71@intel.com>
Date: Wed, 21 May 2025 09:38:56 -0700
From: Dave Hansen <dave.hansen@...el.com>
To: Rik van Riel <riel@...riel.com>, linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, x86@...nel.org, kernel-team@...a.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
tglx@...utronix.de, mingo@...hat.com, bp@...en8.de, hpa@...or.com,
nadav.amit@...il.com, Yu-cheng Yu <yu-cheng.yu@...el.com>
Subject: Re: [RFC v2 7/9] x86/mm: Introduce Remote Action Request
On 5/19/25 18:02, Rik van Riel wrote:
> From: Yu-cheng Yu <yu-cheng.yu@...el.com>
>
> Remote Action Request (RAR) is a TLB flushing broadcast facility.
> To start a TLB flush, the initiator CPU creates a RAR payload and
> sends a command to the APIC. The receiving CPUs automatically flush
> TLBs as specified in the payload without the kernel's involement.
>
> [ riel: add pcid parameter to smp_call_rar_many so other mms can be flushed ]
>
> Signed-off-by: Yu-cheng Yu <yu-cheng.yu@...el.com>
> Signed-off-by: Rik van Riel <riel@...riel.com>
> ---
> arch/x86/include/asm/rar.h | 69 +++++++++++++
> arch/x86/kernel/cpu/common.c | 4 +
> arch/x86/mm/Makefile | 1 +
> arch/x86/mm/rar.c | 195 +++++++++++++++++++++++++++++++++++
> 4 files changed, 269 insertions(+)
> create mode 100644 arch/x86/include/asm/rar.h
> create mode 100644 arch/x86/mm/rar.c
>
> diff --git a/arch/x86/include/asm/rar.h b/arch/x86/include/asm/rar.h
> new file mode 100644
> index 000000000000..78c039e40e81
> --- /dev/null
> +++ b/arch/x86/include/asm/rar.h
> @@ -0,0 +1,69 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_X86_RAR_H
> +#define _ASM_X86_RAR_H
> +
> +/*
> + * RAR payload types
> + */
> +#define RAR_TYPE_INVPG 0
> +#define RAR_TYPE_INVPG_NO_CR3 1
> +#define RAR_TYPE_INVPCID 2
> +#define RAR_TYPE_INVEPT 3
> +#define RAR_TYPE_INVVPID 4
> +#define RAR_TYPE_WRMSR 5
> +
> +/*
> + * Subtypes for RAR_TYPE_INVLPG
> + */
> +#define RAR_INVPG_ADDR 0 /* address specific */
> +#define RAR_INVPG_ALL 2 /* all, include global */
> +#define RAR_INVPG_ALL_NO_GLOBAL 3 /* all, exclude global */
> +
> +/*
> + * Subtypes for RAR_TYPE_INVPCID
> + */
> +#define RAR_INVPCID_ADDR 0 /* address specific */
> +#define RAR_INVPCID_PCID 1 /* all of PCID */
> +#define RAR_INVPCID_ALL 2 /* all, include global */
> +#define RAR_INVPCID_ALL_NO_GLOBAL 3 /* all, exclude global */
> +
> +/*
> + * Page size for RAR_TYPE_INVLPG
> + */
> +#define RAR_INVLPG_PAGE_SIZE_4K 0
> +#define RAR_INVLPG_PAGE_SIZE_2M 1
> +#define RAR_INVLPG_PAGE_SIZE_1G 2
> +
> +/*
> + * Max number of pages per payload
> + */
> +#define RAR_INVLPG_MAX_PAGES 63
> +
> +struct rar_payload {
> + u64 for_sw : 8;
> + u64 type : 8;
> + u64 must_be_zero_1 : 16;
> + u64 subtype : 3;
> + u64 page_size : 2;
> + u64 num_pages : 6;
> + u64 must_be_zero_2 : 21;
> +
> + u64 must_be_zero_3;
> +
> + /*
> + * Starting address
> + */
> + u64 initiator_cr3;
> + u64 linear_address;
> +
> + /*
> + * Padding
> + */
> + u64 padding[4];
> +};
> +
> +void rar_cpu_init(void);
> +void smp_call_rar_many(const struct cpumask *mask, u16 pcid,
> + unsigned long start, unsigned long end);
> +
> +#endif /* _ASM_X86_RAR_H */
> diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
> index dd662c42f510..b1e1b9afb2ac 100644
> --- a/arch/x86/kernel/cpu/common.c
> +++ b/arch/x86/kernel/cpu/common.c
> @@ -71,6 +71,7 @@
> #include <asm/tdx.h>
> #include <asm/posted_intr.h>
> #include <asm/runtime-const.h>
> +#include <asm/rar.h>
>
> #include "cpu.h"
>
> @@ -2438,6 +2439,9 @@ void cpu_init(void)
> if (is_uv_system())
> uv_cpu_init();
>
> + if (cpu_feature_enabled(X86_FEATURE_RAR))
> + rar_cpu_init();
> +
> load_fixmap_gdt(cpu);
> }
>
> diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
> index 5b9908f13dcf..f36fc99e8b10 100644
> --- a/arch/x86/mm/Makefile
> +++ b/arch/x86/mm/Makefile
> @@ -52,6 +52,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
> obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
> obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
> obj-$(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION) += pti.o
> +obj-$(CONFIG_BROADCAST_TLB_FLUSH) += rar.o
>
> obj-$(CONFIG_X86_MEM_ENCRYPT) += mem_encrypt.o
> obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o
> diff --git a/arch/x86/mm/rar.c b/arch/x86/mm/rar.c
> new file mode 100644
> index 000000000000..16dc9b889cbd
> --- /dev/null
> +++ b/arch/x86/mm/rar.c
> @@ -0,0 +1,195 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * RAR TLB shootdown
> + */
> +#include <linux/sched.h>
> +#include <linux/bug.h>
> +#include <asm/current.h>
> +#include <asm/io.h>
> +#include <asm/sync_bitops.h>
> +#include <asm/rar.h>
> +#include <asm/tlbflush.h>
> +
> +static DEFINE_PER_CPU(struct cpumask, rar_cpu_mask);
> +
> +#define RAR_ACTION_OK 0x00
> +#define RAR_ACTION_START 0x01
> +#define RAR_ACTION_ACKED 0x02
> +#define RAR_ACTION_FAIL 0x80
These don't match up with the names that ended up in the public
documentation. Could we realign them, please?
> +#define RAR_MAX_PAYLOADS 32UL
> +
> +static unsigned long rar_in_use = ~(RAR_MAX_PAYLOADS - 1);
> +static struct rar_payload rar_payload[RAR_MAX_PAYLOADS] __page_aligned_bss;
> +static DEFINE_PER_CPU_ALIGNED(u8[RAR_MAX_PAYLOADS], rar_action);
At some point, there needs to be a description of the data structures.
For instance, there's nothing architecturally requiring all CPUs to
share a payload table. But this implementation chooses to have them
share. We need a discussion somewhere of those design decisions.
One thing that also needs discussion: 'rar_in_use' isn't really about
RAR itself. It's a bitmap of whether the payload is allocated.
> +static unsigned long get_payload(void)
> +{
This is more like "allocate a payload slot" than a "get payload"
operation, IMNHO.
> + while (1) {
> + unsigned long bit;
> +
> + /*
> + * Find a free bit and confirm it with
> + * test_and_set_bit() below.
> + */
> + bit = ffz(READ_ONCE(rar_in_use));
> +
> + if (bit >= RAR_MAX_PAYLOADS)
> + continue;
> +
> + if (!test_and_set_bit((long)bit, &rar_in_use))
> + return bit;
> + }
> +}
This also serves like a kind of spinlock to wait for a payload slot to
become free.
> +static void free_payload(unsigned long idx)
> +{
> + clear_bit(idx, &rar_in_use);
> +}
> +
> +static void set_payload(unsigned long idx, u16 pcid, unsigned long start,
> + uint32_t pages)
> +{
> + struct rar_payload *p = &rar_payload[idx];
I'd _probably_ just pass the 'struct rar_payload *' instead of an index.
It's harder to screw up a pointer.
> + p->must_be_zero_1 = 0;
> + p->must_be_zero_2 = 0;
> + p->must_be_zero_3 = 0;
> + p->page_size = RAR_INVLPG_PAGE_SIZE_4K;
> + p->type = RAR_TYPE_INVPCID;
> + p->num_pages = pages;
> + p->initiator_cr3 = pcid;
> + p->linear_address = start;
> +
> + if (pcid) {
> + /* RAR invalidation of the mapping of a specific process. */
> + if (pages >= RAR_INVLPG_MAX_PAGES)
> + p->subtype = RAR_INVPCID_PCID;
> + else
> + p->subtype = RAR_INVPCID_ADDR;
> + } else {
> + /*
> + * Unfortunately RAR_INVPCID_ADDR excludes global translations.
> + * Always do a full flush for kernel invalidations.
> + */
> + p->subtype = RAR_INVPCID_ALL;
> + }
> +
> + smp_wmb();
> +}
The barrier could use a comment too.
> +static void set_action_entry(unsigned long idx, int target_cpu)
Just trying to read this, I think we probably should remove the 'idx'
nomenclature and call them "payload_nr"'s or something more descriptive.
> +{
> + u8 *bitmap = per_cpu(rar_action, target_cpu);
> +
> + WRITE_ONCE(bitmap[idx], RAR_ACTION_START);
> +}
Maybe a comment like this for set_action_entry() would be helpful:
/*
* Given a remote CPU, "arm" its action vector to ensure it
* handles payload number 'idx' when it receives the RAR signal.
* The remote CPU will overwrite RAR_ACTION_START when it handles
* the request.
*/
> +static void wait_for_done(unsigned long idx, int target_cpu)
> +{
> + u8 status;
> + u8 *rar_actions = per_cpu(rar_action, target_cpu);
> +
> + status = READ_ONCE(rar_actions[idx]);
> +
> + while ((status != RAR_ACTION_OK) && (status != RAR_ACTION_FAIL)) {
Should this be:
while (status == RAR_ACTION_START) {
...
? That would more clearly link it to set_action_entry() and would also
be shorter.
> + cpu_relax();
> + status = READ_ONCE(rar_actions[idx]);
> + }
> +
> + WARN_ON_ONCE(rar_actions[idx] == RAR_ACTION_FAIL);
> +}
> +
> +void rar_cpu_init(void)
> +{
> + u64 r;
> + u8 *bitmap;
> + int this_cpu = smp_processor_id();
> +
> + cpumask_clear(&per_cpu(rar_cpu_mask, this_cpu));
> +
> + rdmsrl(MSR_IA32_RAR_INFO, r);
> + pr_info_once("RAR: support %lld payloads\n", r >> 32);
Doesn't this need to get coordinated or checked against RAR_MAX_PAYLOADS?
It might also be nice to use one of the mask functions for this. It's
nice when you see a spec say "37:32" and then you see code actually see
a GENMASK(37, 32) somewhere to match it.
> + bitmap = (u8 *)per_cpu(rar_action, this_cpu);
> + memset(bitmap, 0, RAR_MAX_PAYLOADS);
> + wrmsrl(MSR_IA32_RAR_ACT_VEC, (u64)virt_to_phys(bitmap));
> + wrmsrl(MSR_IA32_RAR_PAYLOAD_BASE, (u64)virt_to_phys(rar_payload));
please vertically align the virt_to_phys() ^
> +
> + r = RAR_CTRL_ENABLE | RAR_CTRL_IGNORE_IF;
Setting RAR_CTRL_IGNORE_IF is probably worth a _little_ discussion in
the changelog.
> + // reserved bits!!! r |= (RAR_VECTOR & 0xff);
Is this just some cruft from testing?
> + wrmsrl(MSR_IA32_RAR_CTRL, r);
> +}
> +
> +/*
> + * This is a modified version of smp_call_function_many() of kernel/smp.c,
> + * without a function pointer, because the RAR handler is the ucode.
> + */
It doesn't look _that_ much like smp_call_function_many(). I don't see
much that can be consolidated.
> +void smp_call_rar_many(const struct cpumask *mask, u16 pcid,
> + unsigned long start, unsigned long end)
> +{
> + unsigned long pages = (end - start + PAGE_SIZE) / PAGE_SIZE;
> + int cpu, next_cpu, this_cpu = smp_processor_id();
> + cpumask_t *dest_mask;
> + unsigned long idx;
> +
> + if (pages > RAR_INVLPG_MAX_PAGES || end == TLB_FLUSH_ALL)
> + pages = RAR_INVLPG_MAX_PAGES;
> +
> + /*
> + * Can deadlock when called with interrupts disabled.
> + * We allow cpu's that are not yet online though, as no one else can
Nit: at some point all of the "we's" need to be excised and moved over
to imperative voice.
> + * send smp call function interrupt to this cpu and as such deadlocks
> + * can't happen.
> + */
> + WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
> + && !oops_in_progress && !early_boot_irqs_disabled);
> +
> + /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
> + cpu = cpumask_first_and(mask, cpu_online_mask);
> + if (cpu == this_cpu)
> + cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
> +
> + /* No online cpus? We're done. */
> + if (cpu >= nr_cpu_ids)
> + return;
This little idiom _is_ in smp_call_function_many_cond(). I wonder if it
can be refactored out.
> + /* Do we have another CPU which isn't us? */
> + next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
> + if (next_cpu == this_cpu)
> + next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
> +
> + /* Fastpath: do that cpu by itself. */
> + if (next_cpu >= nr_cpu_ids) {
> + idx = get_payload();
> + set_payload(idx, pcid, start, pages);
> + set_action_entry(idx, cpu);
> + arch_send_rar_single_ipi(cpu);
> + wait_for_done(idx, cpu);
> + free_payload(idx);
> + return;
> + }
FWIW, I'm not sure this is that much of a fast path. I wouldn't be
shocked if _some_ hardware has a much faster way of IPI'ing a single CPU
versus a bunch. But I think arch_send_rar_single_ipi() and
arch_send_rar_ipi_mask() end up frobbing the hardware in pretty similar
ways.
I'd probably just axe this in the name of simplification unless there
are numbers behind it.
> + dest_mask = this_cpu_ptr(&rar_cpu_mask);
> + cpumask_and(dest_mask, mask, cpu_online_mask);
> + cpumask_clear_cpu(this_cpu, dest_mask);
> +
> + /* Some callers race with other cpus changing the passed mask */
> + if (unlikely(!cpumask_weight(dest_mask)))
> + return;
> +
> + idx = get_payload();
> + set_payload(idx, pcid, start, pages);
> +
> + for_each_cpu(cpu, dest_mask)
> + set_action_entry(idx, cpu);
> +
> + /* Send a message to all CPUs in the map */
> + arch_send_rar_ipi_mask(dest_mask);
> +
> + for_each_cpu(cpu, dest_mask)
> + wait_for_done(idx, cpu);
Naming nit: Let's give wait_for_done() a more RAR-specific name. It'll
make it clear that this is a RAR opertion and not soemthing generic.
> + free_payload(idx);
> +}
> +EXPORT_SYMBOL(smp_call_rar_many);
Powered by blists - more mailing lists