[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f471cac2-89fd-2d61-04fa-2edf6ec438e5@redhat.com>
Date: Sat, 26 Sep 2020 00:22:16 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Alexander Graf <graf@...zon.com>, kvm list <kvm@...r.kernel.org>
Cc: Aaron Lewis <aaronlewis@...gle.com>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Jonathan Corbet <corbet@....net>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
KarimAllah Raslan <karahmed@...zon.de>,
Dan Carpenter <dan.carpenter@...cle.com>,
Maxim Levitsky <mlevitsk@...hat.com>,
linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v8 5/8] KVM: x86: SVM: Prevent MSR passthrough when MSR
access is denied
On 25/09/20 16:34, Alexander Graf wrote:
> We will introduce the concept of MSRs that may not be handled in kernel
> space soon. Some MSRs are directly passed through to the guest, effectively
> making them handled by KVM from user space's point of view.
>
> This patch introduces all logic required to ensure that MSRs that
> user space wants trapped are not marked as direct access for guests.
>
> Signed-off-by: Alexander Graf <graf@...zon.com>
>
> ---
>
> v7 -> v8:
>
> - s/KVM_MSR_ALLOW/KVM_MSR_FILTER/g
> ---
Ok, just some cosmetic fixes on top:
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index bb9f438e9e62..692110f2ac6f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -553,7 +553,7 @@ static int svm_cpu_init(int cpu)
}
-static int direct_access_msr_idx(u32 msr)
+static int direct_access_msr_slot(u32 msr)
{
u32 i;
@@ -561,33 +561,33 @@ static int direct_access_msr_idx(u32 msr)
if (direct_access_msrs[i].index == msr)
return i;
- return -EINVAL;
+ return -ENOENT;
}
static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
int write)
{
struct vcpu_svm *svm = to_svm(vcpu);
- int idx = direct_access_msr_idx(msr);
+ int slot = direct_access_msr_slot(msr);
- if (idx == -EINVAL)
+ if (slot == -ENOENT)
return;
/* Set the shadow bitmaps to the desired intercept states */
if (read)
- set_bit(idx, svm->shadow_msr_intercept.read);
+ set_bit(slot, svm->shadow_msr_intercept.read);
else
- clear_bit(idx, svm->shadow_msr_intercept.read);
+ clear_bit(slot, svm->shadow_msr_intercept.read);
if (write)
- set_bit(idx, svm->shadow_msr_intercept.write);
+ set_bit(slot, svm->shadow_msr_intercept.write);
else
- clear_bit(idx, svm->shadow_msr_intercept.write);
+ clear_bit(slot, svm->shadow_msr_intercept.write);
}
static bool valid_msr_intercept(u32 index)
{
- return direct_access_msr_idx(index) != -EINVAL;
+ return direct_access_msr_slot(index) != -ENOENT;
}
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
@@ -609,7 +609,7 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
return !!test_bit(bit_write, &tmp);
}
-static void set_msr_interception_nosync(struct kvm_vcpu *vcpu, u32 *msrpm,
+static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
u32 msr, int read, int write)
{
u8 bit_read, bit_write;
@@ -646,7 +646,7 @@ static void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
int read, int write)
{
set_shadow_msr_intercept(vcpu, msr, read, write);
- set_msr_interception_nosync(vcpu, msrpm, msr, read, write);
+ set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
}
static u32 *svm_vcpu_alloc_msrpm(void)
@@ -694,7 +694,7 @@ static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
u32 read = test_bit(i, svm->shadow_msr_intercept.read);
u32 write = test_bit(i, svm->shadow_msr_intercept.write);
- set_msr_interception_nosync(vcpu, svm->msrpm, msr, read, write);
+ set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
}
}
Powered by blists - more mailing lists