[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250529234013.3826933-12-seanjc@google.com>
Date: Thu, 29 May 2025 16:39:56 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Borislav Petkov <bp@...en8.de>, Xin Li <xin@...or.com>, Chao Gao <chao.gao@...el.com>,
Dapeng Mi <dapeng1.mi@...ux.intel.com>
Subject: [PATCH 11/28] KVM: SVM: Add helpers for accessing MSR bitmap that
don't rely on offsets
Add macro-built helpers for testing, setting, and clearing MSRPM entries
without relying on precomputed offsets. This sets the stage for eventually
removing general KVM use of precomputed offsets, which are quite confusing
and rather inefficient for the vast majority of KVM's usage.
Outside of merging L0 and L1 bitmaps for nested SVM, using u32-indexed
offsets and accesses is at best unnecessary, and at worst introduces extra
operations to retrieve the individual bit from within the offset u32 value.
And simply calling them "offsets" is very confusing, as the "unit" of the
offset isn't immediately obvious.
Use the new helpers in set_msr_interception_bitmap() and
msr_write_intercepted() to verify the math and operations, but keep the
existing offset-based logic set_msr_interception_bitmap() to sanity check
the "clear" and "set" operations. Manipulating MSR interceptions isn't a
hot path and no kernel release is ever expected to contain this specific
version of set_msr_interception_bitmap() (it will be removed entirely in
the near future).
Add compile-time asserts to verify the bit number calculations, and also
to provide a simple demonstration of the layout (SVM and VMX use the same
concept of a bitmap, but with different layouts).
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/svm/svm.c | 30 ++++++++++++++--------------
arch/x86/kvm/svm/svm.h | 44 ++++++++++++++++++++++++++++++++++++++++++
2 files changed, 58 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d97711bdbfc9..76d074440bcc 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -806,11 +806,6 @@ static bool valid_msr_intercept(u32 index)
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
{
- u8 bit_write;
- unsigned long tmp;
- u32 offset;
- u32 *msrpm;
-
/*
* For non-nested case:
* If the L01 MSR bitmap does not intercept the MSR, then we need to
@@ -820,17 +815,10 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
* If the L02 MSR bitmap does not intercept the MSR, then we need to
* save it.
*/
- msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
- to_svm(vcpu)->msrpm;
+ void *msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
+ to_svm(vcpu)->msrpm;
- offset = svm_msrpm_offset(msr);
- bit_write = 2 * (msr & 0x0f) + 1;
- tmp = msrpm[offset];
-
- if (KVM_BUG_ON(offset == MSR_INVALID, vcpu->kvm))
- return false;
-
- return test_bit(bit_write, &tmp);
+ return svm_test_msr_bitmap_write(msrpm, msr);
}
static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
@@ -865,7 +853,17 @@ static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
read ? __clear_bit(bit_read, &tmp) : __set_bit(bit_read, &tmp);
write ? __clear_bit(bit_write, &tmp) : __set_bit(bit_write, &tmp);
- msrpm[offset] = tmp;
+ if (read)
+ svm_clear_msr_bitmap_read((void *)msrpm, msr);
+ else
+ svm_set_msr_bitmap_read((void *)msrpm, msr);
+
+ if (write)
+ svm_clear_msr_bitmap_write((void *)msrpm, msr);
+ else
+ svm_set_msr_bitmap_write((void *)msrpm, msr);
+
+ WARN_ON_ONCE(msrpm[offset] != (u32)tmp);
svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
svm->nested.force_msr_bitmap_recalc = true;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 47a36a9a7fe5..e432cd7a7889 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -628,6 +628,50 @@ static_assert(SVM_MSRS_PER_RANGE == 8192);
#define SVM_MSRPM_RANGE_1_BASE_MSR 0xc0000000
#define SVM_MSRPM_RANGE_2_BASE_MSR 0xc0010000
+#define SVM_MSRPM_FIRST_MSR(range_nr) \
+ (SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR)
+#define SVM_MSRPM_LAST_MSR(range_nr) \
+ (SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR + SVM_MSRS_PER_RANGE - 1)
+
+#define SVM_MSRPM_BIT_NR(range_nr, msr) \
+ (range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE + \
+ (msr - SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR) * SVM_BITS_PER_MSR)
+
+#define SVM_MSRPM_SANITY_CHECK_BITS(range_nr) \
+static_assert(SVM_MSRPM_BIT_NR(range_nr, SVM_MSRPM_FIRST_MSR(range_nr) + 1) == \
+ range_nr * 2048 * 8 + 2); \
+static_assert(SVM_MSRPM_BIT_NR(range_nr, SVM_MSRPM_FIRST_MSR(range_nr) + 7) == \
+ range_nr * 2048 * 8 + 14);
+
+SVM_MSRPM_SANITY_CHECK_BITS(0);
+SVM_MSRPM_SANITY_CHECK_BITS(1);
+SVM_MSRPM_SANITY_CHECK_BITS(2);
+
+#define SVM_BUILD_MSR_BITMAP_CASE(bitmap, range_nr, msr, bitop, bit_rw) \
+ case SVM_MSRPM_FIRST_MSR(range_nr) ... SVM_MSRPM_LAST_MSR(range_nr): \
+ return bitop##_bit(SVM_MSRPM_BIT_NR(range_nr, msr) + bit_rw, bitmap);
+
+#define __BUILD_SVM_MSR_BITMAP_HELPER(rtype, action, bitop, access, bit_rw) \
+static inline rtype svm_##action##_msr_bitmap_##access(unsigned long *bitmap, \
+ u32 msr) \
+{ \
+ switch (msr) { \
+ SVM_BUILD_MSR_BITMAP_CASE(bitmap, 0, msr, bitop, bit_rw) \
+ SVM_BUILD_MSR_BITMAP_CASE(bitmap, 1, msr, bitop, bit_rw) \
+ SVM_BUILD_MSR_BITMAP_CASE(bitmap, 2, msr, bitop, bit_rw) \
+ default: \
+ return (rtype)true; \
+ } \
+ \
+}
+#define BUILD_SVM_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
+ __BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0) \
+ __BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 1)
+
+BUILD_SVM_MSR_BITMAP_HELPERS(bool, test, test)
+BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear)
+BUILD_SVM_MSR_BITMAP_HELPERS(void, set, __set)
+
#define MSR_INVALID 0xffffffffU
#define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
--
2.49.0.1204.g71687c7c1d-goog
Powered by blists - more mailing lists