[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87pmtsog4c.fsf@vitty.brq.redhat.com>
Date: Wed, 01 Sep 2021 10:27:15 +0200
From: Vitaly Kuznetsov <vkuznets@...hat.com>
To: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>
Cc: Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org,
syzbot+200c08e88ae818f849ce@...kaller.appspotmail.com
Subject: Re: [PATCH 1/3] Revert "KVM: x86: mmu: Add guest physical address
check in translate_gpa()"
Sean Christopherson <seanjc@...gle.com> writes:
> Revert a misguided illegal GPA check when "translating" a non-nested GPA.
> The check is woefully incomplete as it does not fill in @exception as
> expected by all callers, which leads to KVM attempting to inject a bogus
> exception, potentially exposing kernel stack information in the process.
>
> WARNING: CPU: 0 PID: 8469 at arch/x86/kvm/x86.c:525 exception_type+0x98/0xb0 arch/x86/kvm/x86.c:525
> CPU: 1 PID: 8469 Comm: syz-executor531 Not tainted 5.14.0-rc7-syzkaller #0
> RIP: 0010:exception_type+0x98/0xb0 arch/x86/kvm/x86.c:525
> Call Trace:
> x86_emulate_instruction+0xef6/0x1460 arch/x86/kvm/x86.c:7853
> kvm_mmu_page_fault+0x2f0/0x1810 arch/x86/kvm/mmu/mmu.c:5199
> handle_ept_misconfig+0xdf/0x3e0 arch/x86/kvm/vmx/vmx.c:5336
> __vmx_handle_exit arch/x86/kvm/vmx/vmx.c:6021 [inline]
> vmx_handle_exit+0x336/0x1800 arch/x86/kvm/vmx/vmx.c:6038
> vcpu_enter_guest+0x2a1c/0x4430 arch/x86/kvm/x86.c:9712
> vcpu_run arch/x86/kvm/x86.c:9779 [inline]
> kvm_arch_vcpu_ioctl_run+0x47d/0x1b20 arch/x86/kvm/x86.c:10010
> kvm_vcpu_ioctl+0x49e/0xe50 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3652
>
> The bug has escaped notice because practically speaking the GPA check is
> useless. The GPA check in question only comes into play when KVM is
> walking guest page tables (or "translating" CR3), and KVM already handles
> illegal GPA checks by setting reserved bits in rsvd_bits_mask for each
> PxE, or in the case of CR3 for loading PTDPTRs, manually checks for an
> illegal CR3. This particular failure doesn't hit the existing reserved
> bits checks because syzbot sets guest.MAXPHYADDR=1, and IA32 architecture
> simply doesn't allow for such an absurd MAXPHADDR, e.g. 32-bit paging
"MAXPHYADDR"
> doesn't define any reserved PA bits checks, which KVM emulates by only
> incorporating the reserved PA bits into the "high" bits, i.e. bits 63:32.
>
> Simply remove the bogus check. There is zero meaningful value and no
> architectural justification for supporting guest.MAXPHYADDR < 32, and
> properly filling the exception would introduce non-trivial complexity.
>
> This reverts commit ec7771ab471ba6a945350353617e2e3385d0e013.
>
> Fixes: ec7771ab471b ("KVM: x86: mmu: Add guest physical address check in translate_gpa()")
> Cc: stable@...r.kernel.org
> Reported-by: syzbot+200c08e88ae818f849ce@...kaller.appspotmail.com
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
> arch/x86/kvm/mmu/mmu.c | 6 ------
> 1 file changed, 6 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 4853c033e6ce..4b7908187d05 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -334,12 +334,6 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
> static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
> struct x86_exception *exception)
> {
> - /* Check if guest physical address doesn't exceed guest maximum */
> - if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) {
> - exception->error_code |= PFERR_RSVD_MASK;
> - return UNMAPPED_GVA;
> - }
> -
> return gpa;
> }
Reviewed-by: Vitaly Kuznetsov <vkuznets@...hat.com>
I'm, however, wondering if it would also make sense to forbid setting
nonsensical MAXPHYADDR, something like (compile-only tested):
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index fe03bd978761..42e71ac8ff31 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -73,25 +73,6 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
return NULL;
}
-static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent)
-{
- struct kvm_cpuid_entry2 *best;
-
- /*
- * The existing code assumes virtual address is 48-bit or 57-bit in the
- * canonical address checks; exit if it is ever changed.
- */
- best = cpuid_entry2_find(entries, nent, 0x80000008, 0);
- if (best) {
- int vaddr_bits = (best->eax & 0xff00) >> 8;
-
- if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
- return -EINVAL;
- }
-
- return 0;
-}
-
void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -208,20 +189,48 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_mmu_after_set_cpuid(vcpu);
}
-int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
+static int __cpuid_query_maxphyaddr(struct kvm_cpuid_entry2 *entries, int nent)
{
struct kvm_cpuid_entry2 *best;
- best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
+ best = cpuid_entry2_find(entries, nent, 0x80000000, 0);
if (!best || best->eax < 0x80000008)
goto not_found;
- best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ best = cpuid_entry2_find(entries, nent, 0x80000008, 0);
if (best)
return best->eax & 0xff;
not_found:
return 36;
}
+int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
+{
+ return __cpuid_query_maxphyaddr(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
+}
+
+static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ /* guest.MAXPHYADDR < 32 is completely nonsensical */
+ if (__cpuid_query_maxphyaddr(entries, nent) < 32)
+ return -EINVAL;
+
+ /*
+ * The existing code assumes virtual address is 48-bit or 57-bit in the
+ * canonical address checks; exit if it is ever changed.
+ */
+ best = cpuid_entry2_find(entries, nent, 0x80000008, 0);
+ if (best) {
+ int vaddr_bits = (best->eax & 0xff00) >> 8;
+
+ if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* This "raw" version returns the reserved GPA bits without any adjustments for
* encryption technologies that usurp bits. The raw mask should be used if and
--
Vitaly
Powered by blists - more mailing lists