[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250930070356.30695-5-jgross@suse.com>
Date: Tue, 30 Sep 2025 09:03:48 +0200
From: Juergen Gross <jgross@...e.com>
To: linux-kernel@...r.kernel.org,
x86@...nel.org,
linux-hyperv@...r.kernel.org,
kvm@...r.kernel.org
Cc: xin@...or.com,
Juergen Gross <jgross@...e.com>,
"K. Y. Srinivasan" <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Wei Liu <wei.liu@...nel.org>,
Dexuan Cui <decui@...rosoft.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
xen-devel@...ts.xenproject.org
Subject: [PATCH v2 04/12] x86/msr: Minimize usage of native_*() msr access functions
In order to prepare for some MSR access function reorg work, switch
most users of native_{read|write}_msr[_safe]() to the more generic
rdmsr*()/wrmsr*() variants.
For now this will have some intermediate performance impact with
paravirtualization configured when running on bare metal, but this
is a prereq change for the planned direct inlining of the rdmsr/wrmsr
instructions with this configuration.
The main reason for this switch is the planned move of the MSR trace
function invocation from the native_*() functions to the generic
rdmsr*()/wrmsr*() variants. Without this switch the users of the
native_*() functions would lose the related tracing entries.
Note that the Xen related MSR access functions will not be switched,
as these will be handled after the move of the trace hooks.
Signed-off-by: Juergen Gross <jgross@...e.com>
Acked-by: Sean Christopherson <seanjc@...gle.com>
Acked-by: Wei Liu <wei.liu@...nel.org>
---
arch/x86/hyperv/ivm.c | 2 +-
arch/x86/kernel/kvmclock.c | 2 +-
arch/x86/kvm/svm/svm.c | 16 ++++++++--------
arch/x86/xen/pmu.c | 4 ++--
4 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index ade6c665c97e..202ed01dc151 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -327,7 +327,7 @@ int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu)
asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
- vmsa->efer = native_read_msr(MSR_EFER);
+ rdmsrq(MSR_EFER, vmsa->efer);
vmsa->cr4 = native_read_cr4();
vmsa->cr3 = __native_read_cr3();
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index ca0a49eeac4a..b6cd45cce5fe 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -196,7 +196,7 @@ static void kvm_setup_secondary_clock(void)
void kvmclock_disable(void)
{
if (msr_kvm_system_time)
- native_write_msr(msr_kvm_system_time, 0);
+ wrmsrq(msr_kvm_system_time, 0);
}
static void __init kvmclock_init_mem(void)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 1bfebe40854f..105d5c2aae46 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -393,12 +393,12 @@ static void svm_init_erratum_383(void)
return;
/* Use _safe variants to not break nested virtualization */
- if (native_read_msr_safe(MSR_AMD64_DC_CFG, &val))
+ if (rdmsrq_safe(MSR_AMD64_DC_CFG, &val))
return;
val |= (1ULL << 47);
- native_write_msr_safe(MSR_AMD64_DC_CFG, val);
+ wrmsrq_safe(MSR_AMD64_DC_CFG, val);
erratum_383_found = true;
}
@@ -558,9 +558,9 @@ static int svm_enable_virtualization_cpu(void)
u64 len, status = 0;
int err;
- err = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &len);
+ err = rdmsrq_safe(MSR_AMD64_OSVW_ID_LENGTH, &len);
if (!err)
- err = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &status);
+ err = rdmsrq_safe(MSR_AMD64_OSVW_STATUS, &status);
if (err)
osvw_status = osvw_len = 0;
@@ -2032,7 +2032,7 @@ static bool is_erratum_383(void)
if (!erratum_383_found)
return false;
- if (native_read_msr_safe(MSR_IA32_MC0_STATUS, &value))
+ if (rdmsrq_safe(MSR_IA32_MC0_STATUS, &value))
return false;
/* Bit 62 may or may not be set for this mce */
@@ -2043,11 +2043,11 @@ static bool is_erratum_383(void)
/* Clear MCi_STATUS registers */
for (i = 0; i < 6; ++i)
- native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0);
+ wrmsrq_safe(MSR_IA32_MCx_STATUS(i), 0);
- if (!native_read_msr_safe(MSR_IA32_MCG_STATUS, &value)) {
+ if (!rdmsrq_safe(MSR_IA32_MCG_STATUS, &value)) {
value &= ~(1ULL << 2);
- native_write_msr_safe(MSR_IA32_MCG_STATUS, value);
+ wrmsrq_safe(MSR_IA32_MCG_STATUS, value);
}
/* Flush tlb to evict multi-match entries */
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 8f89ce0b67e3..d49a3bdc448b 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -323,7 +323,7 @@ static u64 xen_amd_read_pmc(int counter)
u64 val;
msr = amd_counters_base + (counter * amd_msr_step);
- native_read_msr_safe(msr, &val);
+ rdmsrq_safe(msr, &val);
return val;
}
@@ -349,7 +349,7 @@ static u64 xen_intel_read_pmc(int counter)
else
msr = MSR_IA32_PERFCTR0 + counter;
- native_read_msr_safe(msr, &val);
+ rdmsrq_safe(msr, &val);
return val;
}
--
2.51.0
Powered by blists - more mailing lists