[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250331082251.3171276-2-xin@zytor.com>
Date: Mon, 31 Mar 2025 01:22:37 -0700
From: "Xin Li (Intel)" <xin@...or.com>
To: linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
linux-hyperv@...r.kernel.org, virtualization@...ts.linux.dev,
linux-edac@...r.kernel.org, kvm@...r.kernel.org,
xen-devel@...ts.xenproject.org, linux-ide@...r.kernel.org,
linux-pm@...r.kernel.org, bpf@...r.kernel.org, llvm@...ts.linux.dev
Cc: tglx@...utronix.de, mingo@...hat.com, bp@...en8.de,
dave.hansen@...ux.intel.com, x86@...nel.org, hpa@...or.com,
jgross@...e.com, andrew.cooper3@...rix.com, peterz@...radead.org,
acme@...nel.org, namhyung@...nel.org, mark.rutland@....com,
alexander.shishkin@...ux.intel.com, jolsa@...nel.org,
irogers@...gle.com, adrian.hunter@...el.com, kan.liang@...ux.intel.com,
wei.liu@...nel.org, ajay.kaher@...adcom.com,
alexey.amakhalov@...adcom.com, bcm-kernel-feedback-list@...adcom.com,
tony.luck@...el.com, pbonzini@...hat.com, vkuznets@...hat.com,
seanjc@...gle.com, luto@...nel.org, boris.ostrovsky@...cle.com,
kys@...rosoft.com, haiyangz@...rosoft.com, decui@...rosoft.com
Subject: [RFC PATCH v1 01/15] x86/msr: Replace __wrmsr() with native_wrmsrl()
__wrmsr() is the lowest level primitive MSR write API, and its direct
use is NOT preferred. Use its wrapper function native_wrmsrl() instead.
No functional change intended.
Signed-off-by: Xin Li (Intel) <xin@...or.com>
---
arch/x86/events/amd/brs.c | 2 +-
arch/x86/include/asm/apic.h | 2 +-
arch/x86/include/asm/msr.h | 6 ++++--
arch/x86/kernel/cpu/mce/core.c | 2 +-
arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 6 +++---
5 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c
index ec3427463382..4a47f3c108de 100644
--- a/arch/x86/events/amd/brs.c
+++ b/arch/x86/events/amd/brs.c
@@ -44,7 +44,7 @@ static inline unsigned int brs_to(int idx)
static __always_inline void set_debug_extn_cfg(u64 val)
{
/* bits[4:3] must always be set to 11b */
- __wrmsr(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3, val >> 32);
+ native_wrmsrl(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3);
}
static __always_inline u64 get_debug_extn_cfg(void)
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index c903d358405d..3345a819c859 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -214,7 +214,7 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
static inline void native_apic_msr_eoi(void)
{
- __wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
+ native_wrmsrl(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK);
}
static inline u32 native_apic_msr_read(u32 reg)
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 9397a319d165..27ea8793705d 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -144,10 +144,12 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
static inline void notrace
native_write_msr(unsigned int msr, u32 low, u32 high)
{
- __wrmsr(msr, low, high);
+ u64 val = (u64)high << 32 | low;
+
+ native_wrmsrl(msr, val);
if (tracepoint_enabled(write_msr))
- do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
+ do_trace_write_msr(msr, val, 0);
}
/* Can be uninlined because referenced by paravirt */
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 1f14c3308b6b..0eaeaba12df2 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1306,7 +1306,7 @@ static noinstr bool mce_check_crashing_cpu(void)
}
if (mcgstatus & MCG_STATUS_RIPV) {
- __wrmsr(MSR_IA32_MCG_STATUS, 0, 0);
+ native_wrmsrl(MSR_IA32_MCG_STATUS, 0);
return true;
}
}
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 01fa7890b43f..55536120c8d1 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -481,7 +481,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* cache.
*/
saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
- __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ native_wrmsrl(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
closid_p = this_cpu_read(pqr_state.cur_closid);
rmid_p = this_cpu_read(pqr_state.cur_rmid);
mem_r = plr->kmem;
@@ -493,7 +493,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* pseudo-locked followed by reading of kernel memory to load it
* into the cache.
*/
- __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
+ native_wrmsrl(MSR_IA32_PQR_ASSOC, (u64)plr->closid << 32 | rmid_p);
/*
* Cache was flushed earlier. Now access kernel memory to read it
@@ -530,7 +530,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* Critical section end: restore closid with capacity bitmask that
* does not overlap with pseudo-locked region.
*/
- __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
+ native_wrmsrl(MSR_IA32_PQR_ASSOC, (u64)closid_p << 32 | rmid_p);
/* Re-enable the hardware prefetcher(s) */
wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr);
--
2.49.0
Powered by blists - more mailing lists