lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 7 Apr 2020 11:21:11 -0700
From:   Kees Cook <keescook@...omium.org>
To:     Balbir Singh <sblbir@...zon.com>
Cc:     tglx@...utronix.de, linux-kernel@...r.kernel.org,
        jpoimboe@...hat.com, tony.luck@...el.com, benh@...nel.crashing.org,
        x86@...nel.org, dave.hansen@...el.com
Subject: Re: [PATCH v2 1/4] arch/x86/kvm: Refactor l1d flush lifecycle
 management

On Mon, Apr 06, 2020 at 01:19:43PM +1000, Balbir Singh wrote:
> Split out the allocation and free routines to be used in a follow
> up set of patches (to reuse for L1D flushing).
> 
> Signed-off-by: Balbir Singh <sblbir@...zon.com>

Reviewed-by: Kees Cook <keescook@...omium.org>

-Kees

> ---
>  arch/x86/include/asm/cacheflush.h |  3 +++
>  arch/x86/kernel/Makefile          |  1 +
>  arch/x86/kernel/l1d_flush.c       | 36 +++++++++++++++++++++++++++++++
>  arch/x86/kvm/vmx/vmx.c            | 25 +++------------------
>  4 files changed, 43 insertions(+), 22 deletions(-)
>  create mode 100644 arch/x86/kernel/l1d_flush.c
> 
> diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
> index 63feaf2a5f93..6419a4cef0e8 100644
> --- a/arch/x86/include/asm/cacheflush.h
> +++ b/arch/x86/include/asm/cacheflush.h
> @@ -6,6 +6,9 @@
>  #include <asm-generic/cacheflush.h>
>  #include <asm/special_insns.h>
>  
> +#define L1D_CACHE_ORDER 4
>  void clflush_cache_range(void *addr, unsigned int size);
> +void *alloc_l1d_flush_pages(void);
> +void cleanup_l1d_flush_pages(void *l1d_flush_pages);
>  
>  #endif /* _ASM_X86_CACHEFLUSH_H */
> diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
> index d6d61c4455fa..48f443e6c2de 100644
> --- a/arch/x86/kernel/Makefile
> +++ b/arch/x86/kernel/Makefile
> @@ -160,3 +160,4 @@ ifeq ($(CONFIG_X86_64),y)
>  endif
>  
>  obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT)	+= ima_arch.o
> +obj-y						+= l1d_flush.o
> diff --git a/arch/x86/kernel/l1d_flush.c b/arch/x86/kernel/l1d_flush.c
> new file mode 100644
> index 000000000000..05f375c33423
> --- /dev/null
> +++ b/arch/x86/kernel/l1d_flush.c
> @@ -0,0 +1,36 @@
> +#include <linux/mm.h>
> +#include <asm/cacheflush.h>
> +
> +void *alloc_l1d_flush_pages(void)
> +{
> +	struct page *page;
> +	void *l1d_flush_pages = NULL;
> +	int i;
> +
> +	/*
> +	 * This allocation for l1d_flush_pages is not tied to a VM/task's
> +	 * lifetime and so should not be charged to a memcg.
> +	 */
> +	page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
> +	if (!page)
> +		return NULL;
> +	l1d_flush_pages = page_address(page);
> +
> +	/*
> +	 * Initialize each page with a different pattern in
> +	 * order to protect against KSM in the nested
> +	 * virtualization case.
> +	 */
> +	for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
> +		memset(l1d_flush_pages + i * PAGE_SIZE, i + 1,
> +				PAGE_SIZE);
> +	}
> +	return l1d_flush_pages;
> +}
> +EXPORT_SYMBOL_GPL(alloc_l1d_flush_pages);
> +
> +void cleanup_l1d_flush_pages(void *l1d_flush_pages)
> +{
> +	free_pages((unsigned long)l1d_flush_pages, L1D_CACHE_ORDER);
> +}
> +EXPORT_SYMBOL_GPL(cleanup_l1d_flush_pages);
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 9eaccf92d616..209e63798435 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -203,14 +203,10 @@ static const struct {
>  	[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
>  };
>  
> -#define L1D_CACHE_ORDER 4
>  static void *vmx_l1d_flush_pages;
>  
>  static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
>  {
> -	struct page *page;
> -	unsigned int i;
> -
>  	if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
>  		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
>  		return 0;
> @@ -253,24 +249,9 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
>  
>  	if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
>  	    !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
> -		/*
> -		 * This allocation for vmx_l1d_flush_pages is not tied to a VM
> -		 * lifetime and so should not be charged to a memcg.
> -		 */
> -		page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
> -		if (!page)
> +		vmx_l1d_flush_pages = alloc_l1d_flush_pages();
> +		if (!vmx_l1d_flush_pages)
>  			return -ENOMEM;
> -		vmx_l1d_flush_pages = page_address(page);
> -
> -		/*
> -		 * Initialize each page with a different pattern in
> -		 * order to protect against KSM in the nested
> -		 * virtualization case.
> -		 */
> -		for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
> -			memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
> -			       PAGE_SIZE);
> -		}
>  	}
>  
>  	l1tf_vmx_mitigation = l1tf;
> @@ -7992,7 +7973,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
>  static void vmx_cleanup_l1d_flush(void)
>  {
>  	if (vmx_l1d_flush_pages) {
> -		free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
> +		cleanup_l1d_flush_pages(vmx_l1d_flush_pages);
>  		vmx_l1d_flush_pages = NULL;
>  	}
>  	/* Restore state so sysfs ignores VMX */
> -- 
> 2.17.1
> 

-- 
Kees Cook

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ