[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <202004071122.379AB7D@keescook>
Date: Tue, 7 Apr 2020 11:25:00 -0700
From: Kees Cook <keescook@...omium.org>
To: Balbir Singh <sblbir@...zon.com>
Cc: tglx@...utronix.de, linux-kernel@...r.kernel.org,
jpoimboe@...hat.com, tony.luck@...el.com, benh@...nel.crashing.org,
x86@...nel.org, dave.hansen@...el.com
Subject: Re: [PATCH v2 2/4] arch/x86: Refactor tlbflush and l1d flush
On Mon, Apr 06, 2020 at 01:19:44PM +1000, Balbir Singh wrote:
> Refactor the existing assembly bits into smaller helper functions
> and also abstract L1D_FLUSH into a helper function. Use these
> functions in kvm for L1D flushing.
>
> Signed-off-by: Balbir Singh <sblbir@...zon.com>
> ---
> arch/x86/include/asm/cacheflush.h | 3 ++
> arch/x86/kernel/l1d_flush.c | 49 +++++++++++++++++++++++++++++++
> arch/x86/kvm/vmx/vmx.c | 31 ++++---------------
> 3 files changed, 57 insertions(+), 26 deletions(-)
>
> diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
> index 6419a4cef0e8..66a46db7aadd 100644
> --- a/arch/x86/include/asm/cacheflush.h
> +++ b/arch/x86/include/asm/cacheflush.h
> @@ -10,5 +10,8 @@
> void clflush_cache_range(void *addr, unsigned int size);
> void *alloc_l1d_flush_pages(void);
> void cleanup_l1d_flush_pages(void *l1d_flush_pages);
> +void populate_tlb_with_flush_pages(void *l1d_flush_pages);
> +void flush_l1d_cache_sw(void *l1d_flush_pages);
> +int flush_l1d_cache_hw(void);
>
> #endif /* _ASM_X86_CACHEFLUSH_H */
> diff --git a/arch/x86/kernel/l1d_flush.c b/arch/x86/kernel/l1d_flush.c
> index 05f375c33423..60499f773046 100644
> --- a/arch/x86/kernel/l1d_flush.c
> +++ b/arch/x86/kernel/l1d_flush.c
> @@ -34,3 +34,52 @@ void cleanup_l1d_flush_pages(void *l1d_flush_pages)
> free_pages((unsigned long)l1d_flush_pages, L1D_CACHE_ORDER);
> }
> EXPORT_SYMBOL_GPL(cleanup_l1d_flush_pages);
> +
> +void populate_tlb_with_flush_pages(void *l1d_flush_pages)
> +{
> + int size = PAGE_SIZE << L1D_CACHE_ORDER;
> +
> + asm volatile(
> + /* First ensure the pages are in the TLB */
> + "xorl %%eax, %%eax\n"
> + ".Lpopulate_tlb:\n\t"
> + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
> + "addl $4096, %%eax\n\t"
> + "cmpl %%eax, %[size]\n\t"
> + "jne .Lpopulate_tlb\n\t"
> + "xorl %%eax, %%eax\n\t"
> + "cpuid\n\t"
> + :: [flush_pages] "r" (l1d_flush_pages),
> + [size] "r" (size)
> + : "eax", "ebx", "ecx", "edx");
> +}
> +EXPORT_SYMBOL_GPL(populate_tlb_with_flush_pages);
> +
> +int flush_l1d_cache_hw(void)
> +{
> + if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
> + wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
> + return 1;
> + }
> + return 0;
> +}
This return value is backwards from the kernel's normal use of "int". I
would expect 0 to mean "success" and non-zero to mean "failure". How
about:
int flush_l1d_cache_hw(void)
{
if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
return 0;
}
return -ENOTSUPP;
}
> +EXPORT_SYMBOL_GPL(flush_l1d_cache_hw);
> +
> +void flush_l1d_cache_sw(void *l1d_flush_pages)
> +{
> + int size = PAGE_SIZE << L1D_CACHE_ORDER;
> +
> + asm volatile(
> + /* Fill the cache */
> + "xorl %%eax, %%eax\n"
> + ".Lfill_cache:\n"
> + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
> + "addl $64, %%eax\n\t"
> + "cmpl %%eax, %[size]\n\t"
> + "jne .Lfill_cache\n\t"
> + "lfence\n"
> + :: [flush_pages] "r" (l1d_flush_pages),
> + [size] "r" (size)
> + : "eax", "ecx");
> +}
> +EXPORT_SYMBOL_GPL(flush_l1d_cache_sw);
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 209e63798435..29dc5a5bb6ab 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -5956,8 +5956,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu,
> */
> static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
> {
> - int size = PAGE_SIZE << L1D_CACHE_ORDER;
> -
> /*
> * This code is only executed when the the flush mode is 'cond' or
> * 'always'
> @@ -5986,32 +5984,13 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
>
> vcpu->stat.l1d_flush++;
>
> - if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
> - wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
> + if (flush_l1d_cache_hw())
> return;
> - }
Then this becomes:
if (flush_l1d_cache_hw() == 0)
return;
(Or change it to a "bool" with and use true/false and leave the above
call as-is.)
Either way:
Reviewed-by: Kees Cook <keescook@...omium.org>
-Kees
>
> - asm volatile(
> - /* First ensure the pages are in the TLB */
> - "xorl %%eax, %%eax\n"
> - ".Lpopulate_tlb:\n\t"
> - "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
> - "addl $4096, %%eax\n\t"
> - "cmpl %%eax, %[size]\n\t"
> - "jne .Lpopulate_tlb\n\t"
> - "xorl %%eax, %%eax\n\t"
> - "cpuid\n\t"
> - /* Now fill the cache */
> - "xorl %%eax, %%eax\n"
> - ".Lfill_cache:\n"
> - "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
> - "addl $64, %%eax\n\t"
> - "cmpl %%eax, %[size]\n\t"
> - "jne .Lfill_cache\n\t"
> - "lfence\n"
> - :: [flush_pages] "r" (vmx_l1d_flush_pages),
> - [size] "r" (size)
> - : "eax", "ebx", "ecx", "edx");
> + preempt_disable();
> + populate_tlb_with_flush_pages(vmx_l1d_flush_pages);
> + flush_l1d_cache_sw(vmx_l1d_flush_pages);
> + preempt_enable();
> }
>
> static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
> --
> 2.17.1
>
--
Kees Cook
Powered by blists - more mailing lists