[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220926141849.21805-2-jgross@suse.com>
Date: Mon, 26 Sep 2022 16:18:47 +0200
From: Juergen Gross <jgross@...e.com>
To: xen-devel@...ts.xenproject.org, x86@...nel.org,
linux-kernel@...r.kernel.org
Cc: Juergen Gross <jgross@...e.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: [PATCH 1/3] xen/pv: allow pmu msr accesses to cause GP
Today pmu_msr_read() and pmu_msr_write() fall back to the safe variants
of read/write MSR in case the MSR access isn't emulated via Xen. Allow
the caller to select the potentially faulting variant by passing NULL
for the error pointer.
Remove one level of indentation by restructuring the code a little bit.
Signed-off-by: Juergen Gross <jgross@...e.com>
---
arch/x86/xen/pmu.c | 44 ++++++++++++++++++++++++++------------------
1 file changed, 26 insertions(+), 18 deletions(-)
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 21ecbe754cb2..34b4144f6041 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -293,22 +293,24 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
- if (is_amd_pmu_msr(msr)) {
- if (!xen_amd_pmu_emulate(msr, val, 1))
- *val = native_read_msr_safe(msr, err);
- return true;
+ if (!is_amd_pmu_msr(msr))
+ return false;
+ if (!xen_amd_pmu_emulate(msr, val, 1)) {
+ *val = err ? native_read_msr_safe(msr, err)
+ : native_read_msr(msr);
}
+ return true;
} else {
int type, index;
- if (is_intel_pmu_msr(msr, &type, &index)) {
- if (!xen_intel_pmu_emulate(msr, val, type, index, 1))
- *val = native_read_msr_safe(msr, err);
- return true;
+ if (!is_intel_pmu_msr(msr, &type, &index))
+ return false;
+ if (!xen_intel_pmu_emulate(msr, val, type, index, 1)) {
+ *val = err ? native_read_msr_safe(msr, err)
+ : native_read_msr(msr);
}
+ return true;
}
-
- return false;
}
bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
@@ -316,22 +318,28 @@ bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
uint64_t val = ((uint64_t)high << 32) | low;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
- if (is_amd_pmu_msr(msr)) {
- if (!xen_amd_pmu_emulate(msr, &val, 0))
+ if (!is_amd_pmu_msr(msr))
+ return false;
+ if (!xen_amd_pmu_emulate(msr, &val, 0)) {
+ if (err)
*err = native_write_msr_safe(msr, low, high);
- return true;
+ else
+ native_write_msr(msr, low, high);
}
+ return true;
} else {
int type, index;
- if (is_intel_pmu_msr(msr, &type, &index)) {
- if (!xen_intel_pmu_emulate(msr, &val, type, index, 0))
+ if (!is_intel_pmu_msr(msr, &type, &index))
+ return false;
+ if (!xen_intel_pmu_emulate(msr, &val, type, index, 0)) {
+ if (err)
*err = native_write_msr_safe(msr, low, high);
- return true;
+ else
+ native_write_msr(msr, low, high);
}
+ return true;
}
-
- return false;
}
static unsigned long long xen_amd_read_pmc(int counter)
--
2.35.3
Powered by blists - more mailing lists