[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191211204753.242298-11-pomonis@google.com>
Date: Wed, 11 Dec 2019 12:47:50 -0800
From: Marios Pomonis <pomonis@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>, rkrcmar@...hat.com,
Sean Christopherson <sean.j.christopherson@...el.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>
Cc: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
"H. Peter Anvin" <hpa@...or.com>, x86@...nel.org,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Nick Finco <nifi@...gle.com>, Andrew Honig <ahonig@...gle.com>,
Marios Pomonis <pomonis@...gle.com>, stable@...r.kernel.org
Subject: [PATCH v2 10/13] KVM: x86: Protect memory accesses from
Spectre-v1/L1TF attacks in x86.c
This fixes Spectre-v1/L1TF vulnerabilities in
vmx_read_guest_seg_selector(), vmx_read_guest_seg_base(),
vmx_read_guest_seg_limit() and vmx_read_guest_seg_ar().
These functions contain index computations based on the
(attacker-influenced) segment value.
Fixes: commit 2fb92db1ec08 ("KVM: VMX: Cache vmcs segment fields")
Signed-off-by: Nick Finco <nifi@...gle.com>
Signed-off-by: Marios Pomonis <pomonis@...gle.com>
Reviewed-by: Andrew Honig <ahonig@...gle.com>
Cc: stable@...r.kernel.org
---
arch/x86/kvm/vmx/vmx.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d39475e2d44e..82b25f1812aa 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -753,7 +753,9 @@ static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
{
- u16 *p = &vmx->segment_cache.seg[seg].selector;
+ size_t size = ARRAY_SIZE(vmx->segment_cache.seg);
+ size_t index = array_index_nospec(seg, size);
+ u16 *p = &vmx->segment_cache.seg[index].selector;
if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
*p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
@@ -762,7 +764,9 @@ static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
{
- ulong *p = &vmx->segment_cache.seg[seg].base;
+ size_t size = ARRAY_SIZE(vmx->segment_cache.seg);
+ size_t index = array_index_nospec(seg, size);
+ ulong *p = &vmx->segment_cache.seg[index].base;
if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
*p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
@@ -771,7 +775,9 @@ static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
{
- u32 *p = &vmx->segment_cache.seg[seg].limit;
+ size_t size = ARRAY_SIZE(vmx->segment_cache.seg);
+ size_t index = array_index_nospec(seg, size);
+ u32 *p = &vmx->segment_cache.seg[index].limit;
if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
*p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
@@ -780,7 +786,9 @@ static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
{
- u32 *p = &vmx->segment_cache.seg[seg].ar;
+ size_t size = ARRAY_SIZE(vmx->segment_cache.seg);
+ size_t index = array_index_nospec(seg, size);
+ u32 *p = &vmx->segment_cache.seg[index].ar;
if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
*p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
--
2.24.0.525.g8f36a354ae-goog
Powered by blists - more mailing lists