[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240609154945.55332-19-nsaenz@amazon.com>
Date: Sun, 9 Jun 2024 15:49:47 +0000
From: Nicolas Saenz Julienne <nsaenz@...zon.com>
To: <linux-kernel@...r.kernel.org>, <kvm@...r.kernel.org>
CC: <pbonzini@...hat.com>, <seanjc@...gle.com>, <vkuznets@...hat.com>,
<linux-doc@...r.kernel.org>, <linux-hyperv@...r.kernel.org>,
<linux-arch@...r.kernel.org>, <linux-trace-kernel@...r.kernel.org>,
<graf@...zon.de>, <dwmw2@...radead.org>, <paul@...zon.com>,
<nsaenz@...zon.com>, <mlevitsk@...hat.com>, <jgowans@...zon.com>,
<corbet@....net>, <decui@...rosoft.com>, <tglx@...utronix.de>,
<mingo@...hat.com>, <bp@...en8.de>, <dave.hansen@...ux.intel.com>,
<x86@...nel.org>, <amoorthy@...gle.com>
Subject: [PATCH 18/18] KVM: x86: hyper-v: Handle VSM hcalls in user-space
Let user-space handle all hypercalls that fall under the AccessVsm
partition privilege flag. That is:
- HvCallModifyVtlProtectionMask
- HvCallEnablePartitionVtl
- HvCallEnableVpVtl
- HvCallVtlCall
- HvCallVtlReturn
All these are VTL aware and as such need to be handled in user-space.
Additionally, select KVM_GENERIC_MEMORY_ATTRIBUTES when
CONFIG_KVM_HYPERV is enabled, as it's necessary in order to implement
VTL memory protections.
Signed-off-by: Nicolas Saenz Julienne <nsaenz@...zon.com>
---
Documentation/virt/kvm/api.rst | 23 +++++++++++++++++++++++
arch/x86/kvm/Kconfig | 1 +
arch/x86/kvm/hyperv.c | 29 +++++++++++++++++++++++++----
include/asm-generic/hyperv-tlfs.h | 6 +++++-
4 files changed, 54 insertions(+), 5 deletions(-)
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 6d3bc5092ea63..77af2ccf49a30 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -8969,3 +8969,26 @@ HvCallGetVpIndexFromApicId. Currently, it is only used in conjunction with
HV_ACCESS_VSM, and immediately exits to userspace with KVM_EXIT_HYPERV_HCALL as
the reason. Userspace is expected to complete the hypercall before resuming
execution.
+
+10.4 HV_ACCESS_VSM
+------------------
+
+:Location: CPUID.40000003H:EBX[bit 16]
+
+This CPUID indicates that KVM supports HvCallModifyVtlProtectionMask,
+HvCallEnablePartitionVtl, HvCallEnableVpVtl, HvCallVtlCall, and
+HvCallVtlReturn. Additionally, as a prerequirsite to being able to implement
+Hyper-V VSM, it also identifies the availability of HvTranslateVirtualAddress,
+as well as the VTL-aware aspects of HvCallSendSyntheticClusterIpi and
+HvCallSendSyntheticClusterIpiEx.
+
+All these hypercalls immediately exit with KVM_EXIT_HYPERV_HCALL as the reason.
+Userspace is expected to complete the hypercall before resuming execution.
+Note that both IPI hypercalls will only exit to userspace if the request is
+VTL-aware, which will only happen if HV_ACCESS_VSM is exposed to the guest.
+
+Access restriction memory attributes (4.141) are available to simplify
+HvCallModifyVtlProtectionMask's implementation.
+
+Ultimately this CPUID also indicates that KVM_MP_STATE_HV_INACTIVE_VTL is
+available.
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index fec95a7702703..8d851fe3b8c25 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -157,6 +157,7 @@ config KVM_SMM
config KVM_HYPERV
bool "Support for Microsoft Hyper-V emulation"
depends on KVM
+ select KVM_GENERIC_MEMORY_ATTRIBUTES
default y
help
Provides KVM support for emulating Microsoft Hyper-V. This allows KVM
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index dd64f41dc835d..1158c59a92790 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -2388,7 +2388,12 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
}
}
-static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
+static inline bool kvm_hv_is_vtl_call_return(u16 code)
+{
+ return code == HVCALL_VTL_CALL || code == HVCALL_VTL_RETURN;
+}
+
+static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u16 code, u64 result)
{
u32 tlb_lock_count = 0;
int ret;
@@ -2400,9 +2405,12 @@ static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
result = HV_STATUS_INVALID_HYPERCALL_INPUT;
trace_kvm_hv_hypercall_done(result);
- kvm_hv_hypercall_set_result(vcpu, result);
++vcpu->stat.hypercalls;
+ /* VTL call and return don't set a hcall result */
+ if (!kvm_hv_is_vtl_call_return(code))
+ kvm_hv_hypercall_set_result(vcpu, result);
+
ret = kvm_skip_emulated_instruction(vcpu);
if (tlb_lock_count)
@@ -2459,7 +2467,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
kvm_hv_write_xmm(vcpu->run->hyperv.u.hcall.xmm);
}
- return kvm_hv_hypercall_complete(vcpu, result);
+ return kvm_hv_hypercall_complete(vcpu, code, result);
}
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
@@ -2513,6 +2521,7 @@ static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
case HVCALL_SEND_IPI_EX:
case HVCALL_GET_VP_REGISTERS:
case HVCALL_SET_VP_REGISTERS:
+ case HVCALL_MODIFY_VTL_PROTECTION_MASK:
case HVCALL_TRANSLATE_VIRTUAL_ADDRESS:
return true;
}
@@ -2552,6 +2561,12 @@ static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
*/
return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
+ case HVCALL_MODIFY_VTL_PROTECTION_MASK:
+ case HVCALL_ENABLE_PARTITION_VTL:
+ case HVCALL_ENABLE_VP_VTL:
+ case HVCALL_VTL_CALL:
+ case HVCALL_VTL_RETURN:
+ return hv_vcpu->cpuid_cache.features_ebx & HV_ACCESS_VSM;
case HVCALL_GET_VP_REGISTERS:
case HVCALL_SET_VP_REGISTERS:
return hv_vcpu->cpuid_cache.features_ebx &
@@ -2744,6 +2759,11 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
break;
}
goto hypercall_userspace_exit;
+ case HVCALL_MODIFY_VTL_PROTECTION_MASK:
+ case HVCALL_ENABLE_PARTITION_VTL:
+ case HVCALL_ENABLE_VP_VTL:
+ case HVCALL_VTL_CALL:
+ case HVCALL_VTL_RETURN:
case HVCALL_GET_VP_REGISTERS:
case HVCALL_SET_VP_REGISTERS:
case HVCALL_TRANSLATE_VIRTUAL_ADDRESS:
@@ -2765,7 +2785,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
}
hypercall_complete:
- return kvm_hv_hypercall_complete(vcpu, ret);
+ return kvm_hv_hypercall_complete(vcpu, hc.code, ret);
hypercall_userspace_exit:
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
@@ -2921,6 +2941,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
ent->ebx |= HV_POST_MESSAGES;
ent->ebx |= HV_SIGNAL_EVENTS;
ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS;
+ ent->ebx |= HV_ACCESS_VSM;
ent->ebx |= HV_ACCESS_VP_REGISTERS;
ent->ebx |= HV_START_VIRTUAL_PROCESSOR;
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index e24b88ec4ec00..6b12e5818292c 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -149,9 +149,13 @@ union hv_reference_tsc_msr {
/* Declare the various hypercall operations. */
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
-#define HVCALL_ENABLE_VP_VTL 0x000f
#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
#define HVCALL_SEND_IPI 0x000b
+#define HVCALL_MODIFY_VTL_PROTECTION_MASK 0x000c
+#define HVCALL_ENABLE_PARTITION_VTL 0x000d
+#define HVCALL_ENABLE_VP_VTL 0x000f
+#define HVCALL_VTL_CALL 0x0011
+#define HVCALL_VTL_RETURN 0x0012
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
#define HVCALL_SEND_IPI_EX 0x0015
--
2.40.1
Powered by blists - more mailing lists