[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <330cd5ec-813b-8835-2178-32e14190b9e6@redhat.com>
Date: Mon, 11 Mar 2019 16:21:56 +0100
From: Paolo Bonzini <pbonzini@...hat.com>
To: Xiaoyao Li <xiaoyao.li@...ux.intel.com>
Cc: linux-kernel <linux-kernel@...r.kernel.org>, x86 <x86@...nel.org>,
kvm@...r.kernel.org
Subject: Re: [PATCH v4 17/17] kvm: vmx: Emulate TEST_CTL MSR
On 11/03/19 16:10, Xiaoyao Li wrote:
> On Mon, 2019-03-11 at 14:31 +0100, Paolo Bonzini wrote:
>> On 09/03/19 03:31, Xiaoyao Li wrote:
>>> Hi, Paolo,
>>>
>>> Do you have any comments on this patch?
>>>
>>> We are preparing v5 patches for split lock detection, if you have any
>>> comments
>>> about this one, please let me know.
>>
>> No, my only comment is that it should be placed _before_ the other two
>> for bisectability. I think I have already sent that small remark.
>>
>> Thanks,
>>
>> Paolo
>
> I cannot find the small remark you sent before. Maybe I missed something.
> But I'am confused why it should be placed _before_ the other two. This patch
> just use the vmx->core_capability that defined it the previous patch.
Because otherwise the guest can see core_capability != 0 and will #GP
when trying to use split lock detection.
But you are right, this patch must be the last. Instead,
kvm-get_core_capability() should always return 0 until the previous
patch. Then in this patch you add the rdmsr and boot_cpu_has() tests.
Paolo
>>>> + if (!(vmx->core_capability & CORE_CAP_SPLIT_LOCK_DETECT))
>>>> + return 1;
>>>> +
>>>> + if (data & ~TEST_CTL_ENABLE_SPLIT_LOCK_DETECT)
>>>> + return 1;
>>>> + vmx->msr_test_ctl = data;
>>>> + break;
>>>> case MSR_EFER:
>>>> ret = kvm_set_msr_common(vcpu, msr_info);
>>>> break;
>>>> @@ -4108,6 +4122,9 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
>>>>
>>>> vmx->arch_capabilities = kvm_get_arch_capabilities();
>>>>
>>>> + /* disable AC split lock by default */
>>>> + vmx->msr_test_ctl = 0;
>>>> +
>>>> vm_exit_controls_init(vmx, vmx_vmexit_ctrl());
>>>>
>>>> /* 22.2.1, 20.8.1 */
>>>> @@ -4145,6 +4162,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu,
>>>> bool
>>>> init_event)
>>>>
>>>> vmx->rmode.vm86_active = 0;
>>>> vmx->spec_ctrl = 0;
>>>> + vmx->msr_test_ctl = 0;
>>>>
>>>> vcpu->arch.microcode_version = 0x100000000ULL;
>>>> vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
>>>> @@ -6344,6 +6362,21 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx
>>>> *vmx)
>>>> msrs[i].host, false);
>>>> }
>>>>
>>>> +static void atomic_switch_msr_test_ctl(struct vcpu_vmx *vmx)
>>>> +{
>>>> + u64 host_msr_test_ctl;
>>>> +
>>>> + if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
>>>> + return;
>>>> +
>>>> + rdmsrl(MSR_TEST_CTL, host_msr_test_ctl);
>>>> + if (host_msr_test_ctl == vmx->msr_test_ctl)
>>>> + clear_atomic_switch_msr(vmx, MSR_TEST_CTL);
>>>> + else
>>>> + add_atomic_switch_msr(vmx, MSR_TEST_CTL, vmx->msr_test_ctl,
>>>> + host_msr_test_ctl, false);
>>>> +}
>>>> +
>>>> static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
>>>> {
>>>> vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
>>>> @@ -6585,6 +6618,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
>>>>
>>>> atomic_switch_perf_msrs(vmx);
>>>>
>>>> + atomic_switch_msr_test_ctl(vmx);
>>>> +
>>>> vmx_update_hv_timer(vcpu);
>>>>
>>>> /*
>>>> diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
>>>> index cc22379991f3..e8831609c6c3 100644
>>>> --- a/arch/x86/kvm/vmx/vmx.h
>>>> +++ b/arch/x86/kvm/vmx/vmx.h
>>>> @@ -191,6 +191,7 @@ struct vcpu_vmx {
>>>> u64 msr_guest_kernel_gs_base;
>>>> #endif
>>>>
>>>> + u64 msr_test_ctl;
>>>> u64 core_capability;
>>>> u64 arch_capabilities;
>>>> u64 spec_ctrl;
>>
>>
>
Powered by blists - more mailing lists