[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250610225737.156318-30-seanjc@google.com>
Date: Tue, 10 Jun 2025 15:57:34 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Chao Gao <chao.gao@...el.com>, Borislav Petkov <bp@...en8.de>, Xin Li <xin@...or.com>,
Dapeng Mi <dapeng1.mi@...ux.intel.com>, Francesco Lavra <francescolavra.fl@...il.com>,
Manali Shukla <Manali.Shukla@....com>
Subject: [PATCH v2 29/32] KVM: nSVM: Merge MSRPM in 64-bit chunks on 64-bit kernels
When merging L0 and L1 MSRPMs as part of nested VMRUN emulation, access
the bitmaps using "unsigned long" chunks, i.e. use 8-byte access for
64-bit kernels instead of arbitrarily working on 4-byte chunks.
Opportunistically rename local variables in nested_svm_merge_msrpm() to
more precisely/accurately reflect their purpose ("offset" in particular is
extremely ambiguous).
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/svm/nested.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 7ca45361ced3..749f7b866ac8 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -196,6 +196,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
*/
static int nested_svm_msrpm_merge_offsets[6] __ro_after_init;
static int nested_svm_nr_msrpm_merge_offsets __ro_after_init;
+typedef unsigned long nsvm_msrpm_merge_t;
int __init nested_svm_init_msrpm_merge_offsets(void)
{
@@ -230,10 +231,10 @@ int __init nested_svm_init_msrpm_merge_offsets(void)
return -EIO;
/*
- * Merging is done in 32-bit chunks to reduce the number of
- * accesses to L1's bitmap.
+ * Merging is done in chunks to reduce the number of accesses
+ * to L1's bitmap.
*/
- offset = bit_nr / BITS_PER_BYTE / sizeof(u32);
+ offset = bit_nr / BITS_PER_BYTE / sizeof(nsvm_msrpm_merge_t);
for (j = 0; j < nested_svm_nr_msrpm_merge_offsets; j++) {
if (nested_svm_msrpm_merge_offsets[j] == offset)
@@ -261,8 +262,8 @@ int __init nested_svm_init_msrpm_merge_offsets(void)
static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- u32 *msrpm02 = svm->nested.msrpm;
- u32 *msrpm01 = svm->msrpm;
+ nsvm_msrpm_merge_t *msrpm02 = svm->nested.msrpm;
+ nsvm_msrpm_merge_t *msrpm01 = svm->msrpm;
int i;
/*
@@ -289,15 +290,15 @@ static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu)
for (i = 0; i < nested_svm_nr_msrpm_merge_offsets; i++) {
const int p = nested_svm_msrpm_merge_offsets[i];
- u32 value;
- u64 offset;
+ nsvm_msrpm_merge_t l1_val;
+ gpa_t gpa;
- offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
+ gpa = svm->nested.ctl.msrpm_base_pa + (p * sizeof(l1_val));
- if (kvm_vcpu_read_guest(vcpu, offset, &value, 4))
+ if (kvm_vcpu_read_guest(vcpu, gpa, &l1_val, sizeof(l1_val)))
return false;
- msrpm02[p] = msrpm01[p] | value;
+ msrpm02[p] = msrpm01[p] | l1_val;
}
svm->nested.force_msr_bitmap_recalc = false;
--
2.50.0.rc0.642.g800a2b2222-goog
Powered by blists - more mailing lists