[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZL410xRbInlQMc5y@chao-email>
Date: Mon, 24 Jul 2023 16:26:59 +0800
From: Chao Gao <chao.gao@...el.com>
To: Yang Weijiang <weijiang.yang@...el.com>
CC: <seanjc@...gle.com>, <pbonzini@...hat.com>, <peterz@...radead.org>,
<john.allen@....com>, <kvm@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <rick.p.edgecombe@...el.com>,
<binbin.wu@...ux.intel.com>
Subject: Re: [PATCH v4 10/20] KVM:x86: Make guest supervisor states as
non-XSAVE managed
On Thu, Jul 20, 2023 at 11:03:42PM -0400, Yang Weijiang wrote:
>+static void kvm_save_cet_supervisor_ssp(struct kvm_vcpu *vcpu)
>+{
>+ preempt_disable();
what's the purpose of disabling preemption?
>+ if (unlikely(guest_can_use(vcpu, X86_FEATURE_SHSTK))) {
>+ rdmsrl(MSR_IA32_PL0_SSP, vcpu->arch.cet_s_ssp[0]);
>+ rdmsrl(MSR_IA32_PL1_SSP, vcpu->arch.cet_s_ssp[1]);
>+ rdmsrl(MSR_IA32_PL2_SSP, vcpu->arch.cet_s_ssp[2]);
>+ /*
>+ * Omit reset to host PL{1,2}_SSP because Linux will never use
>+ * these MSRs.
>+ */
>+ wrmsrl(MSR_IA32_PL0_SSP, 0);
You don't need to reset the MSR because current host doesn't enable SSS
and leaving guest value in the MSR won't affect host behavior.
>+ }
>+ preempt_enable();
>+}
>+
>+static void kvm_reload_cet_supervisor_ssp(struct kvm_vcpu *vcpu)
>+{
>+ preempt_disable();
>+ if (unlikely(guest_can_use(vcpu, X86_FEATURE_SHSTK))) {
>+ wrmsrl(MSR_IA32_PL0_SSP, vcpu->arch.cet_s_ssp[0]);
>+ wrmsrl(MSR_IA32_PL1_SSP, vcpu->arch.cet_s_ssp[1]);
>+ wrmsrl(MSR_IA32_PL2_SSP, vcpu->arch.cet_s_ssp[2]);
>+ }
>+ preempt_enable();
>+}
save/load PLx_SSP in kvm_sched_in/out() and in VCPU_RUN ioctl is sub-optimal.
How about:
1. expose kvm_save/reload_cet_supervisor_ssp()
2. reload guest PLx_SSP in {vmx,svm}_prepare_switch_to_guest()
3. save guest PLx_SSP in vmx_prepare_switch_to_host() and
svm_prepare_host_switch()?
this way existing svm/vmx->guest_state_loaded can help to reduce a lot of
unnecessary PLx_SSP MSR accesses.
>+
> int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> {
> struct kvm_queued_exception *ex = &vcpu->arch.exception;
>@@ -11222,6 +11249,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> kvm_sigset_activate(vcpu);
> kvm_run->flags = 0;
> kvm_load_guest_fpu(vcpu);
>+ kvm_reload_cet_supervisor_ssp(vcpu);
>
> kvm_vcpu_srcu_read_lock(vcpu);
> if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
>@@ -11310,6 +11338,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> r = vcpu_run(vcpu);
>
> out:
>+ kvm_save_cet_supervisor_ssp(vcpu);
> kvm_put_guest_fpu(vcpu);
> if (kvm_run->kvm_valid_regs)
> store_regs(vcpu);
>@@ -12398,9 +12427,17 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
> pmu->need_cleanup = true;
> kvm_make_request(KVM_REQ_PMU, vcpu);
> }
>+
>+ kvm_reload_cet_supervisor_ssp(vcpu);
>+
> static_call(kvm_x86_sched_in)(vcpu, cpu);
> }
>
>+void kvm_arch_sched_out(struct kvm_vcpu *vcpu, int cpu)
>+{
@cpu its meaning isn't clear and isn't used and ...
>+ kvm_save_cet_supervisor_ssp(vcpu);
>+}
>+
> void kvm_arch_free_vm(struct kvm *kvm)
> {
> kfree(to_kvm_hv(kvm)->hv_pa_pg);
>diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>index d90331f16db1..b3032a5f0641 100644
>--- a/include/linux/kvm_host.h
>+++ b/include/linux/kvm_host.h
>@@ -1423,6 +1423,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
> int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
>
> void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
>+void kvm_arch_sched_out(struct kvm_vcpu *vcpu, int cpu);
>
> void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
> void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
>diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>index 66c1447d3c7f..42f28e8905e1 100644
>--- a/virt/kvm/kvm_main.c
>+++ b/virt/kvm/kvm_main.c
>@@ -5885,6 +5885,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
> {
> struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
>
>+ kvm_arch_sched_out(vcpu, 0);
passing 0 always looks problematic.
> if (current->on_rq) {
> WRITE_ONCE(vcpu->preempted, true);
> WRITE_ONCE(vcpu->ready, true);
>--
>2.27.0
>
Powered by blists - more mailing lists