lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180109142242.GD18661@char.us.oracle.com>
Date:   Tue, 9 Jan 2018 09:22:42 -0500
From:   Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
To:     Paolo Bonzini <pbonzini@...hat.com>
Cc:     linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
        rkrcmar@...hat.com, liran.alon@...cle.com, jmattson@...gle.com,
        aliguori@...zon.com, thomas.lendacky@....com, dwmw@...zon.co.uk,
        bp@...en8.de, x86@...nel.org
Subject: Re: [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and
 MSR_IA32_PRED_CMD down to guest

On Tue, Jan 09, 2018 at 01:03:08PM +0100, Paolo Bonzini wrote:
> Direct access to MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD is important
> for performance.  Allow load/store of MSR_IA32_SPEC_CTRL, restore guest
> IBRS on VM entry and set it to 0 on VM exit (because Linux does not use
> it yet).
> 
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
>  arch/x86/kvm/svm.c | 42 ++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 42 insertions(+)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 31ace8d7774a..934a21e02e03 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -183,6 +183,8 @@ struct vcpu_svm {
>  		u64 gs_base;
>  	} host;
>  
> +	u64 spec_ctrl;
> +
>  	u32 *msrpm;
>  
>  	ulong nmi_iret_rip;
> @@ -248,6 +250,8 @@ struct amd_svm_iommu_ir {
>  	{ .index = MSR_CSTAR,				.always = true  },
>  	{ .index = MSR_SYSCALL_MASK,			.always = true  },
>  #endif
> +	{ .index = MSR_IA32_SPEC_CTRL,			.always = true  },
> +	{ .index = MSR_IA32_PRED_CMD,			.always = true  },
>  	{ .index = MSR_IA32_LASTBRANCHFROMIP,		.always = false },
>  	{ .index = MSR_IA32_LASTBRANCHTOIP,		.always = false },
>  	{ .index = MSR_IA32_LASTINTFROMIP,		.always = false },
> @@ -283,6 +287,8 @@ struct amd_svm_iommu_ir {
>  /* enable/disable Virtual GIF */
>  static int vgif = true;
>  module_param(vgif, int, 0444);
> + 
> +static bool __read_mostly have_spec_ctrl;
>  
>  static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
>  static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
> @@ -1135,6 +1141,17 @@ static __init int svm_hardware_setup(void)
>  			pr_info("Virtual GIF supported\n");
>  	}
>  
> +	/*
> +	 * FIXME: this is only needed until SPEC_CTRL is supported
> +	 * by upstream Linux in cpufeatures, then it can be replaced
> +	 * with static_cpu_has.
> +	 */
> +	have_spec_ctrl = cpu_has_spec_ctrl();
> +	if (have_spec_ctrl)
> +		pr_info("kvm: SPEC_CTRL available\n");
> +	else
> +		pr_info("kvm: SPEC_CTRL not available\n");

Perhaps just

	pr_info("kvm: SPEC_CTRL %s available\n", have_spec_ctrl ? "" : "not");

> +
>  	return 0;
>  
>  err:
> @@ -3599,6 +3616,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  	case MSR_VM_CR:
>  		msr_info->data = svm->nested.vm_cr_msr;
>  		break;
> +	case MSR_IA32_SPEC_CTRL:
> +		msr_info->data = svm->spec_ctrl;
> +		break;
>  	case MSR_IA32_UCODE_REV:
>  		msr_info->data = 0x01000065;
>  		break;
> @@ -3754,6 +3774,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>  	case MSR_VM_IGNNE:
>  		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
>  		break;
> +	case MSR_IA32_SPEC_CTRL:
> +		svm->spec_ctrl = data;
> +		break;
>  	case MSR_IA32_APICBASE:
>  		if (kvm_vcpu_apicv_active(vcpu))
>  			avic_update_vapic_bar(to_svm(vcpu), data);
> @@ -4942,6 +4965,13 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  
>  	local_irq_enable();
>  
> +	/*
> +	 * MSR_IA32_SPEC_CTRL is restored after the last indirect branch
> +	 * before vmentry.
> +	 */
> +	if (have_spec_ctrl && svm->spec_ctrl != 0)
> +		wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
> +
>  	asm volatile (
>  		"push %%" _ASM_BP "; \n\t"
>  		"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
> @@ -5015,6 +5045,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  #endif
>  		);
>  
> +	if (have_spec_ctrl) {
> +		rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
> +		if (svm->spec_ctrl != 0)

Perhaps just :

	if (svm->spec_ctrl) ?

And above too?
> +			wrmsrl(MSR_IA32_SPEC_CTRL, 0);
> +	}
> +	/*
> +	 * Speculative execution past the above wrmsrl might encounter
> +	 * an indirect branch and use guest-controlled contents of the
> +	 * indirect branch predictor; block it.
> +	 */
> +	asm("lfence");

Don't you want this to be part of the if () .. else part?

Meaning:

	if (have_spec_ctrl && svm->spec_ctrl)
		wrmsrl(MSR_IA32_SPEC_CTRL, 0);
	else
		asm("lfence");

But .. I am missing something - AMD don't expose 0x48. They expose only 0x49.

That is only the IPBP is needed on AMD? (I haven't actually seen any official
docs from AMD).

> +
>  #ifdef CONFIG_X86_64
>  	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
>  #else
> -- 
> 1.8.3.1
> 
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ