[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5061dfee-636c-6b68-8f33-5f32e5bfa093@amd.com>
Date: Wed, 8 Mar 2023 17:18:13 +0100
From: "Gupta, Pankaj" <pankaj.gupta@....com>
To: Tianyu Lan <ltykernel@...il.com>, luto@...nel.org,
tglx@...utronix.de, mingo@...hat.com, bp@...en8.de,
dave.hansen@...ux.intel.com, x86@...nel.org, hpa@...or.com,
seanjc@...gle.com, pbonzini@...hat.com, jgross@...e.com,
tiala@...rosoft.com, kirill@...temov.name,
jiangshan.ljs@...group.com, peterz@...radead.org,
ashish.kalra@....com, srutherford@...gle.com,
akpm@...ux-foundation.org, anshuman.khandual@....com,
pawan.kumar.gupta@...ux.intel.com, adrian.hunter@...el.com,
daniel.sneddon@...ux.intel.com, alexander.shishkin@...ux.intel.com,
sandipan.das@....com, ray.huang@....com, brijesh.singh@....com,
michael.roth@....com, thomas.lendacky@....com,
venu.busireddy@...cle.com, sterritt@...gle.com,
tony.luck@...el.com, samitolvanen@...gle.com, fenghua.yu@...el.com
Cc: linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
linux-hyperv@...r.kernel.org, linux-arch@...r.kernel.org
Subject: Re: [RFC PATCH V3 13/16] x86/sev: Add Check of #HV event in path
On 3/1/2023 12:11 PM, Gupta, Pankaj wrote:
> On 1/22/2023 3:46 AM, Tianyu Lan wrote:
>> From: Tianyu Lan <tiala@...rosoft.com>
>>
>> Add check_hv_pending() and check_hv_pending_after_irq() to
>> check queued #HV event when irq is disabled.
>>
>> Signed-off-by: Tianyu Lan <tiala@...rosoft.com>
>> ---
>> arch/x86/entry/entry_64.S | 18 +++++++++++++++
>> arch/x86/include/asm/irqflags.h | 10 +++++++++
>> arch/x86/kernel/sev.c | 39 +++++++++++++++++++++++++++++++++
>> 3 files changed, 67 insertions(+)
>>
>> diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
>> index 6baec7653f19..aec8dc4443d1 100644
>> --- a/arch/x86/entry/entry_64.S
>> +++ b/arch/x86/entry/entry_64.S
>> @@ -1064,6 +1064,15 @@ SYM_CODE_END(paranoid_entry)
>> * R15 - old SPEC_CTRL
>> */
>> SYM_CODE_START_LOCAL(paranoid_exit)
>> +#ifdef CONFIG_AMD_MEM_ENCRYPT
>> + /*
>> + * If a #HV was delivered during execution and interrupts were
>> + * disabled, then check if it can be handled before the iret
>> + * (which may re-enable interrupts).
>> + */
>> + mov %rsp, %rdi
>> + call check_hv_pending
>> +#endif
>> UNWIND_HINT_REGS
>> /*
>> @@ -1188,6 +1197,15 @@ SYM_CODE_START(error_entry)
>> SYM_CODE_END(error_entry)
>> SYM_CODE_START_LOCAL(error_return)
>> +#ifdef CONFIG_AMD_MEM_ENCRYPT
>> + /*
>> + * If a #HV was delivered during execution and interrupts were
>> + * disabled, then check if it can be handled before the iret
>> + * (which may re-enable interrupts).
>> + */
>> + mov %rsp, %rdi
>> + call check_hv_pending
>> +#endif
>> UNWIND_HINT_REGS
>> DEBUG_ENTRY_ASSERT_IRQS_OFF
>> testb $3, CS(%rsp)
>> diff --git a/arch/x86/include/asm/irqflags.h
>> b/arch/x86/include/asm/irqflags.h
>> index 7793e52d6237..fe46e59168dd 100644
>> --- a/arch/x86/include/asm/irqflags.h
>> +++ b/arch/x86/include/asm/irqflags.h
>> @@ -14,6 +14,10 @@
>> /*
>> * Interrupt control:
>> */
>> +#ifdef CONFIG_AMD_MEM_ENCRYPT
>> +void check_hv_pending(struct pt_regs *regs);
>> +void check_hv_pending_irq_enable(void);
>> +#endif
>> /* Declaration required for gcc < 4.9 to prevent
>> -Werror=missing-prototypes */
>> extern inline unsigned long native_save_fl(void);
>> @@ -43,12 +47,18 @@ static __always_inline void native_irq_disable(void)
>> static __always_inline void native_irq_enable(void)
>> {
>> asm volatile("sti": : :"memory");
>> +#ifdef CONFIG_AMD_MEM_ENCRYPT
>> + check_hv_pending_irq_enable();
>> +#endif
>> }
>> static inline __cpuidle void native_safe_halt(void)
>> {
>> mds_idle_clear_cpu_buffers();
>> asm volatile("sti; hlt": : :"memory");
>> +#ifdef CONFIG_AMD_MEM_ENCRYPT
>> + check_hv_pending_irq_enable();
>> +#endif
>> }
>> static inline __cpuidle void native_halt(void)
>> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
>> index a8862a2eff67..fe5e5e41433d 100644
>> --- a/arch/x86/kernel/sev.c
>> +++ b/arch/x86/kernel/sev.c
>> @@ -179,6 +179,45 @@ void noinstr __sev_es_ist_enter(struct pt_regs
>> *regs)
>> this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
>> }
>> +static void do_exc_hv(struct pt_regs *regs)
>> +{
>> + /* Handle #HV exception. */
>> +}
>> +
>> +void check_hv_pending(struct pt_regs *regs)
>> +{
>> + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
>> + return;
>> +
>> + if ((regs->flags & X86_EFLAGS_IF) == 0)
>> + return;
>
> Will this return and prevent guest from executing NMI's
> while irqs are disabled?
I think we need to handle NMI's even when irqs are disabled.
As we reset "no_further_signal" in hv_raw_handle_exception()
and return from check_hv_pending() when irqs are disabled, this
can result in loss/delay of NMI event?
Thanks,
Pankaj
Powered by blists - more mailing lists