[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220405070403.124326505@linuxfoundation.org>
Date: Tue, 5 Apr 2022 09:30:39 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Vitaly Kuznetsov <vkuznets@...hat.com>,
Paolo Bonzini <pbonzini@...hat.com>
Subject: [PATCH 5.15 777/913] KVM: x86: hyper-v: HVCALL_SEND_IPI_EX is an XMM fast hypercall
From: Vitaly Kuznetsov <vkuznets@...hat.com>
commit 47d3e5cdfe607ec6883eb0faa7acf05b8cb3f92a upstream.
It has been proven on practice that at least Windows Server 2019 tries
using HVCALL_SEND_IPI_EX in 'XMM fast' mode when it has more than 64 vCPUs
and it needs to send an IPI to a vCPU > 63. Similarly to other XMM Fast
hypercalls (HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}{,_EX}), this
information is missing in TLFS as of 6.0b. Currently, KVM returns an error
(HV_STATUS_INVALID_HYPERCALL_INPUT) and Windows crashes.
Note, HVCALL_SEND_IPI is a 'standard' fast hypercall (not 'XMM fast') as
all its parameters fit into RDX:R8 and this is handled by KVM correctly.
Cc: stable@...r.kernel.org # 5.14.x: 3244867af8c0: KVM: x86: Ignore sparse banks size for an "all CPUs", non-sparse IPI req
Cc: stable@...r.kernel.org # 5.14.x
Fixes: d8f5537a8816 ("KVM: hyper-v: Advertise support for fast XMM hypercalls")
Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
Message-Id: <20220222154642.684285-5-vkuznets@...hat.com>
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
arch/x86/kvm/hyperv.c | 52 ++++++++++++++++++++++++++++++++------------------
1 file changed, 34 insertions(+), 18 deletions(-)
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1889,6 +1889,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vc
int sparse_banks_len;
u32 vector;
bool all_cpus;
+ int i;
if (hc->code == HVCALL_SEND_IPI) {
if (!hc->fast) {
@@ -1909,9 +1910,15 @@ static u64 kvm_hv_send_ipi(struct kvm_vc
trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
} else {
- if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
- sizeof(send_ipi_ex))))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (!hc->fast) {
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
+ sizeof(send_ipi_ex))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ } else {
+ send_ipi_ex.vector = (u32)hc->ingpa;
+ send_ipi_ex.vp_set.format = hc->outgpa;
+ send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]);
+ }
trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
send_ipi_ex.vp_set.format,
@@ -1919,8 +1926,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vc
vector = send_ipi_ex.vector;
valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
- sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
- sizeof(sparse_banks[0]);
+ sparse_banks_len = bitmap_weight(&valid_bank_mask, 64);
all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
@@ -1930,12 +1936,27 @@ static u64 kvm_hv_send_ipi(struct kvm_vc
if (!sparse_banks_len)
goto ret_success;
- if (kvm_read_guest(kvm,
- hc->ingpa + offsetof(struct hv_send_ipi_ex,
- vp_set.bank_contents),
- sparse_banks,
- sparse_banks_len))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (!hc->fast) {
+ if (kvm_read_guest(kvm,
+ hc->ingpa + offsetof(struct hv_send_ipi_ex,
+ vp_set.bank_contents),
+ sparse_banks,
+ sparse_banks_len * sizeof(sparse_banks[0])))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ } else {
+ /*
+ * The lower half of XMM0 is already consumed, each XMM holds
+ * two sparse banks.
+ */
+ if (sparse_banks_len > (2 * HV_HYPERCALL_MAX_XMM_REGISTERS - 1))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ for (i = 0; i < sparse_banks_len; i++) {
+ if (i % 2)
+ sparse_banks[i] = sse128_lo(hc->xmm[(i + 1) / 2]);
+ else
+ sparse_banks[i] = sse128_hi(hc->xmm[i / 2]);
+ }
+ }
}
check_and_send_ipi:
@@ -2097,6 +2118,7 @@ static bool is_xmm_fast_hypercall(struct
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
+ case HVCALL_SEND_IPI_EX:
return true;
}
@@ -2264,14 +2286,8 @@ int kvm_hv_hypercall(struct kvm_vcpu *vc
ret = kvm_hv_flush_tlb(vcpu, &hc);
break;
case HVCALL_SEND_IPI:
- if (unlikely(hc.rep)) {
- ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
- break;
- }
- ret = kvm_hv_send_ipi(vcpu, &hc);
- break;
case HVCALL_SEND_IPI_EX:
- if (unlikely(hc.fast || hc.rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
Powered by blists - more mailing lists