lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sun, 25 Nov 2018 23:38:53 +0200
From:   Liran Alon <liran.alon@...cle.com>
To:     Paolo Bonzini <pbonzini@...hat.com>
Cc:     linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
        Jim Mattson <jmattson@...gle.com>
Subject: Re: [PATCH] KVM: x86: Trace changes to active TSC offset regardless
 if vCPU in guest-mode



> On 25 Nov 2018, at 19:53, Paolo Bonzini <pbonzini@...hat.com> wrote:
> 
> For some reason, kvm_x86_ops->write_l1_tsc_offset() skipped trace
> of change to active TSC offset in case vCPU is in guest-mode.
> This patch changes write_l1_tsc_offset() behavior to trace any change
> to active TSC offset to aid debugging.  The VMX code is changed to
> look more similar to SVM, which is in my opinion nicer.
> 
> Based on a patch by Liran Alon.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>

I would have applied this refactoring change on top of my original version of this patch. Easier to read and review.
But I guess it’s a matter of taste…
Anyway, code looks correct to me. Therefore:
Reviewed-by: Liran Alon <liran.alon@...cle.com>

> ---
> 	Untested still, but throwing it out because it seems pretty
> 	obvious...
> 
> arch/x86/kvm/svm.c |  9 +++++----
> arch/x86/kvm/vmx.c | 34 +++++++++++++++++-----------------
> 2 files changed, 22 insertions(+), 21 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index a24733aade4c..0d1a74069a9e 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -1456,10 +1456,11 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
> 		g_tsc_offset = svm->vmcb->control.tsc_offset -
> 			       svm->nested.hsave->control.tsc_offset;
> 		svm->nested.hsave->control.tsc_offset = offset;
> -	} else
> -		trace_kvm_write_tsc_offset(vcpu->vcpu_id,
> -					   svm->vmcb->control.tsc_offset,
> -					   offset);
> +	}
> +
> +	trace_kvm_write_tsc_offset(vcpu->vcpu_id,
> +				   svm->vmcb->control.tsc_offset - g_tsc_offset,
> +				   offset);
> 
> 	svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 764c23dc444f..e7d3f7d35355 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -3466,24 +3466,24 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
> 
> static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
> {
> -	u64 active_offset = offset;
> -	if (is_guest_mode(vcpu)) {
> -		/*
> -		 * We're here if L1 chose not to trap WRMSR to TSC. According
> -		 * to the spec, this should set L1's TSC; The offset that L1
> -		 * set for L2 remains unchanged, and still needs to be added
> -		 * to the newly set TSC to get L2's TSC.
> -		 */
> -		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
> -		if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING))
> -			active_offset += vmcs12->tsc_offset;
> -	} else {
> -		trace_kvm_write_tsc_offset(vcpu->vcpu_id,
> -					   vmcs_read64(TSC_OFFSET), offset);
> -	}
> +	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
> +	u64 g_tsc_offset = 0;
> +
> +	/*
> +	 * We're here if L1 chose not to trap WRMSR to TSC. According
> +	 * to the spec, this should set L1's TSC; The offset that L1
> +	 * set for L2 remains unchanged, and still needs to be added
> +	 * to the newly set TSC to get L2's TSC.
> +	 */
> +	if (is_guest_mode(vcpu) &&
> +	    (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
> +		g_tsc_offset = vmcs12->tsc_offset;
> 
> -	vmcs_write64(TSC_OFFSET, active_offset);
> -	return active_offset;
> +	trace_kvm_write_tsc_offset(vcpu->vcpu_id,
> +				   vcpu->arch.tsc_offset - g_tsc_offset,
> +				   offset);
> +	vmcs_write64(TSC_OFFSET, offset + g_tsc_offset);
> +	return offset + g_tsc_offset;
> }
> 
> /*
> -- 
> 1.8.3.1
> 



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ