[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d95d73d2-c728-4da2-b68e-d2e39a518ae6@oracle.com>
Date: Tue, 4 Jun 2024 14:47:27 -0700
From: ross.philipson@...cle.com
To: Jarkko Sakkinen <jarkko@...nel.org>, linux-kernel@...r.kernel.org,
x86@...nel.org, linux-integrity@...r.kernel.org,
linux-doc@...r.kernel.org, linux-crypto@...r.kernel.org,
kexec@...ts.infradead.org, linux-efi@...r.kernel.org,
iommu@...ts.linux-foundation.org
Cc: dpsmith@...rtussolutions.com, tglx@...utronix.de, mingo@...hat.com,
bp@...en8.de, hpa@...or.com, dave.hansen@...ux.intel.com,
ardb@...nel.org, mjg59@...f.ucam.org,
James.Bottomley@...senpartnership.com, peterhuewe@....de, jgg@...pe.ca,
luto@...capital.net, nivedita@...m.mit.edu,
herbert@...dor.apana.org.au, davem@...emloft.net, corbet@....net,
ebiederm@...ssion.com, dwmw2@...radead.org, baolu.lu@...ux.intel.com,
kanth.ghatraju@...cle.com, andrew.cooper3@...rix.com,
trenchboot-devel@...glegroups.com, ross.philipson@...cle.com
Subject: Re: [PATCH v9 10/19] x86: Secure Launch SMP bringup support
On 6/4/24 1:05 PM, Jarkko Sakkinen wrote:
> On Fri May 31, 2024 at 4:03 AM EEST, Ross Philipson wrote:
>> On Intel, the APs are left in a well documented state after TXT performs
>> the late launch. Specifically they cannot have #INIT asserted on them so
>> a standard startup via INIT/SIPI/SIPI cannot be performed. Instead the
>> early SL stub code uses MONITOR and MWAIT to park the APs. The realmode/init.c
>> code updates the jump address for the waiting APs with the location of the
>> Secure Launch entry point in the RM piggy after it is loaded and fixed up.
>> As the APs are woken up by writing the monitor, the APs jump to the Secure
>> Launch entry point in the RM piggy which mimics what the real mode code would
>> do then jumps to the standard RM piggy protected mode entry point.
>>
>> Signed-off-by: Ross Philipson <ross.philipson@...cle.com>
>> ---
>> arch/x86/include/asm/realmode.h | 3 ++
>> arch/x86/kernel/smpboot.c | 58 +++++++++++++++++++++++++++-
>> arch/x86/realmode/init.c | 3 ++
>> arch/x86/realmode/rm/header.S | 3 ++
>> arch/x86/realmode/rm/trampoline_64.S | 32 +++++++++++++++
>> 5 files changed, 97 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
>> index 87e5482acd0d..339b48e2543d 100644
>> --- a/arch/x86/include/asm/realmode.h
>> +++ b/arch/x86/include/asm/realmode.h
>> @@ -38,6 +38,9 @@ struct real_mode_header {
>> #ifdef CONFIG_X86_64
>> u32 machine_real_restart_seg;
>> #endif
>> +#ifdef CONFIG_SECURE_LAUNCH
>> + u32 sl_trampoline_start32;
>> +#endif
>> };
>>
>> /* This must match data at realmode/rm/trampoline_{32,64}.S */
>> diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
>> index 0c35207320cb..adb521221d6c 100644
>> --- a/arch/x86/kernel/smpboot.c
>> +++ b/arch/x86/kernel/smpboot.c
>> @@ -60,6 +60,7 @@
>> #include <linux/stackprotector.h>
>> #include <linux/cpuhotplug.h>
>> #include <linux/mc146818rtc.h>
>> +#include <linux/slaunch.h>
>>
>> #include <asm/acpi.h>
>> #include <asm/cacheinfo.h>
>> @@ -868,6 +869,56 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
>> return 0;
>> }
>>
>> +#ifdef CONFIG_SECURE_LAUNCH
>> +
>> +static bool slaunch_is_txt_launch(void)
>> +{
>> + if ((slaunch_get_flags() & (SL_FLAG_ACTIVE|SL_FLAG_ARCH_TXT)) ==
>> + (SL_FLAG_ACTIVE | SL_FLAG_ARCH_TXT))
>> + return true;
>> +
>> + return false;
>> +}
>
> static inline bool slaunch_is_txt_launch(void)
> {
> u32 mask = SL_FLAG_ACTIVE | SL_FLAG_ARCH_TXT;
>
> return slaunch_get_flags() & mask == mask;
> }
Actually I think I can take your suggested change and move this function
to the main header files since this check is done elsewhere. And later I
can make others like slaunch_is_skinit_launch(). Thanks.
>
>
>> +
>> +/*
>> + * TXT AP startup is quite different than normal. The APs cannot have #INIT
>> + * asserted on them or receive SIPIs. The early Secure Launch code has parked
>> + * the APs using monitor/mwait. This will wake the APs by writing the monitor
>> + * and have them jump to the protected mode code in the rmpiggy where the rest
>> + * of the SMP boot of the AP will proceed normally.
>> + */
>> +static void slaunch_wakeup_cpu_from_txt(int cpu, int apicid)
>> +{
>> + struct sl_ap_wake_info *ap_wake_info;
>> + struct sl_ap_stack_and_monitor *stack_monitor = NULL;
>
> struct sl_ap_stack_and_monitor *stack_monitor; /* note: no initialization */
> struct sl_ap_wake_info *ap_wake_info;
Will fix.
>
>
>> +
>> + ap_wake_info = slaunch_get_ap_wake_info();
>> +
>> + stack_monitor = (struct sl_ap_stack_and_monitor *)__va(ap_wake_info->ap_wake_block +
>> + ap_wake_info->ap_stacks_offset);
>> +
>> + for (unsigned int i = TXT_MAX_CPUS - 1; i >= 0; i--) {
>> + if (stack_monitor[i].apicid == apicid) {
>> + /* Write the monitor */
>
> I'd remove this comment.
Sure.
Ross
>
>> + stack_monitor[i].monitor = 1;
>> + break;
>> + }
>> + }
>> +}
>> +
>> +#else
>> +
>> +static inline bool slaunch_is_txt_launch(void)
>> +{
>> + return false;
>> +}
>> +
>> +static inline void slaunch_wakeup_cpu_from_txt(int cpu, int apicid)
>> +{
>> +}
>> +
>> +#endif /* !CONFIG_SECURE_LAUNCH */
>> +
>> /*
>> * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
>> * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
>> @@ -877,7 +928,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
>> static int do_boot_cpu(u32 apicid, int cpu, struct task_struct *idle)
>> {
>> unsigned long start_ip = real_mode_header->trampoline_start;
>> - int ret;
>> + int ret = 0;
>>
>> #ifdef CONFIG_X86_64
>> /* If 64-bit wakeup method exists, use the 64-bit mode trampoline IP */
>> @@ -922,12 +973,15 @@ static int do_boot_cpu(u32 apicid, int cpu, struct task_struct *idle)
>>
>> /*
>> * Wake up a CPU in difference cases:
>> + * - Intel TXT DRTM launch uses its own method to wake the APs
>> * - Use a method from the APIC driver if one defined, with wakeup
>> * straight to 64-bit mode preferred over wakeup to RM.
>> * Otherwise,
>> * - Use an INIT boot APIC message
>> */
>> - if (apic->wakeup_secondary_cpu_64)
>> + if (slaunch_is_txt_launch())
>> + slaunch_wakeup_cpu_from_txt(cpu, apicid);
>> + else if (apic->wakeup_secondary_cpu_64)
>> ret = apic->wakeup_secondary_cpu_64(apicid, start_ip);
>> else if (apic->wakeup_secondary_cpu)
>> ret = apic->wakeup_secondary_cpu(apicid, start_ip);
>> diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
>> index f9bc444a3064..d95776cb30d3 100644
>> --- a/arch/x86/realmode/init.c
>> +++ b/arch/x86/realmode/init.c
>> @@ -4,6 +4,7 @@
>> #include <linux/memblock.h>
>> #include <linux/cc_platform.h>
>> #include <linux/pgtable.h>
>> +#include <linux/slaunch.h>
>>
>> #include <asm/set_memory.h>
>> #include <asm/realmode.h>
>> @@ -210,6 +211,8 @@ void __init init_real_mode(void)
>>
>> setup_real_mode();
>> set_real_mode_permissions();
>> +
>> + slaunch_fixup_jump_vector();
>> }
>>
>> static int __init do_init_real_mode(void)
>> diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
>> index 2eb62be6d256..3b5cbcbbfc90 100644
>> --- a/arch/x86/realmode/rm/header.S
>> +++ b/arch/x86/realmode/rm/header.S
>> @@ -37,6 +37,9 @@ SYM_DATA_START(real_mode_header)
>> #ifdef CONFIG_X86_64
>> .long __KERNEL32_CS
>> #endif
>> +#ifdef CONFIG_SECURE_LAUNCH
>> + .long pa_sl_trampoline_start32
>> +#endif
>> SYM_DATA_END(real_mode_header)
>>
>> /* End signature, used to verify integrity */
>> diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
>> index 14d9c7daf90f..b0ce6205d7ea 100644
>> --- a/arch/x86/realmode/rm/trampoline_64.S
>> +++ b/arch/x86/realmode/rm/trampoline_64.S
>> @@ -122,6 +122,38 @@ SYM_CODE_END(sev_es_trampoline_start)
>>
>> .section ".text32","ax"
>> .code32
>> +#ifdef CONFIG_SECURE_LAUNCH
>> + .balign 4
>> +SYM_CODE_START(sl_trampoline_start32)
>> + /*
>> + * The early secure launch stub AP wakeup code has taken care of all
>> + * the vagaries of launching out of TXT. This bit just mimics what the
>> + * 16b entry code does and jumps off to the real startup_32.
>> + */
>> + cli
>> + wbinvd
>> +
>> + /*
>> + * The %ebx provided is not terribly useful since it is the physical
>> + * address of tb_trampoline_start and not the base of the image.
>> + * Use pa_real_mode_base, which is fixed up, to get a run time
>> + * base register to use for offsets to location that do not have
>> + * pa_ symbols.
>> + */
>> + movl $pa_real_mode_base, %ebx
>> +
>> + LOCK_AND_LOAD_REALMODE_ESP lock_pa=1
>> +
>> + lgdt tr_gdt(%ebx)
>> + lidt tr_idt(%ebx)
>> +
>> + movw $__KERNEL_DS, %dx # Data segment descriptor
>> +
>> + /* Jump to where the 16b code would have jumped */
>> + ljmpl $__KERNEL32_CS, $pa_startup_32
>> +SYM_CODE_END(sl_trampoline_start32)
>> +#endif
>> +
>> .balign 4
>> SYM_CODE_START(startup_32)
>> movl %edx, %ss
>
> BR, Jarkko
>
Powered by blists - more mailing lists