lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250304085152.51092-21-darwi@linutronix.de>
Date: Tue,  4 Mar 2025 09:51:31 +0100
From: "Ahmed S. Darwish" <darwi@...utronix.de>
To: Borislav Petkov <bp@...en8.de>,
	Ingo Molnar <mingo@...hat.com>,
	Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Thomas Gleixner <tglx@...utronix.de>,
	John Ogness <john.ogness@...utronix.de>,
	"H. Peter Anvin" <hpa@...or.com>,
	Andrew Cooper <andrew.cooper3@...rix.com>,
	x86@...nel.org,
	x86-cpuid@...ts.linux.dev,
	LKML <linux-kernel@...r.kernel.org>,
	"Ahmed S. Darwish" <darwi@...utronix.de>
Subject: [PATCH v1 20/40] x86: treewide: Introduce x86_vendor_amd_or_hygon()

The pattern to check if an x86 vendor is AMD or HYGON (or not both), is
pretty common across the x86 tree.

Introduce x86_vendor_amd_or_hygon() macro at asm/processor.h, and use it
across the x86 tree.

Signed-off-by: Ahmed S. Darwish <darwi@...utronix.de>
---
 arch/x86/events/amd/uncore.c       |  3 +--
 arch/x86/events/rapl.c             |  3 +--
 arch/x86/include/asm/processor.h   |  5 +++++
 arch/x86/kernel/amd_nb.c           |  9 +++------
 arch/x86/kernel/cpu/bugs.c         | 12 ++++--------
 arch/x86/kernel/cpu/cacheinfo.c    |  7 ++-----
 arch/x86/kernel/cpu/mce/core.c     |  4 ++--
 arch/x86/kernel/cpu/mce/severity.c |  3 +--
 arch/x86/kernel/cpu/mtrr/cleanup.c |  3 +--
 arch/x86/kernel/smpboot.c          |  3 +--
 arch/x86/kvm/svm/svm.c             |  3 +--
 arch/x86/pci/amd_bus.c             |  3 +--
 arch/x86/xen/enlighten.c           | 15 +++++----------
 arch/x86/xen/pmu.c                 |  3 +--
 14 files changed, 29 insertions(+), 47 deletions(-)

diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 49c26ce2b115..5141c0375990 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -1023,8 +1023,7 @@ static int __init amd_uncore_init(void)
 	int ret = -ENODEV;
 	int i;
 
-	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
-	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+	if (!x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor))
 		return -ENODEV;
 
 	if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 6941f4811bec..999ea90059ae 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -123,8 +123,7 @@ static struct perf_pmu_events_attr event_attr_##v = {				\
  *	     them as die-scope.
  */
 #define rapl_pkg_pmu_is_pkg_scope()				\
-	(boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||	\
-	 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+	x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor)
 
 struct rapl_pmu {
 	raw_spinlock_t		lock;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index d5d9a071cddc..0f586c638e87 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -203,6 +203,11 @@ struct cpuinfo_x86 {
 
 #define X86_VENDOR_UNKNOWN	0xff
 
+static inline bool x86_vendor_amd_or_hygon(u8 vendor)
+{
+	return (vendor == X86_VENDOR_AMD || vendor == X86_VENDOR_HYGON);
+}
+
 /*
  * capabilities of CPUs
  */
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 11fac09e3a8c..bac8d3b6f12b 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -127,8 +127,7 @@ bool __init early_is_amd_nb(u32 device)
 	const struct pci_device_id *id;
 	u32 vendor = device & 0xffff;
 
-	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
-	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+	if (!x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor))
 		return false;
 
 	if (cpu_feature_enabled(X86_FEATURE_ZEN))
@@ -147,8 +146,7 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
 	u64 base, msr;
 	unsigned int segn_busn_bits;
 
-	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
-	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+	if (!x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor))
 		return NULL;
 
 	/* assume all cpus from fam10h have mmconfig */
@@ -320,8 +318,7 @@ static __init void fix_erratum_688(void)
 
 static __init int init_amd_nbs(void)
 {
-	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
-	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+	if (!x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor))
 		return 0;
 
 	amd_cache_northbridges();
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index a5d0998d7604..b0dc4e96f4bc 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1081,8 +1081,7 @@ static void __init retbleed_select_mitigation(void)
 
 do_cmd_auto:
 	case RETBLEED_CMD_AUTO:
-		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
-		    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+		if (x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor)) {
 			if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
 				retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
 			else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
@@ -1106,8 +1105,7 @@ static void __init retbleed_select_mitigation(void)
 
 		x86_return_thunk = retbleed_return_thunk;
 
-		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
-		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+		if (!x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor))
 			pr_err(RETBLEED_UNTRAIN_MSG);
 
 		mitigate_smt = true;
@@ -1872,8 +1870,7 @@ static void __init spectre_v2_select_mitigation(void)
 	 */
 	if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
 	    boot_cpu_has(X86_FEATURE_IBPB) &&
-	    (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
-	     boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
+	    x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor)) {
 
 		if (retbleed_cmd != RETBLEED_CMD_IBPB) {
 			setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
@@ -2903,8 +2900,7 @@ static ssize_t retbleed_show_state(char *buf)
 {
 	if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
 	    retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
-		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
-		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+		if (!x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor))
 			return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
 
 		return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 74a2949ff872..0024d126c385 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -633,8 +633,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
 	union _cpuid4_leaf_eax	cache_eax;
 	int 			i = -1;
 
-	if (c->x86_vendor == X86_VENDOR_AMD ||
-	    c->x86_vendor == X86_VENDOR_HYGON)
+	if (x86_vendor_amd_or_hygon(c->x86_vendor))
 		op = 0x8000001d;
 	else
 		op = 4;
@@ -907,11 +906,9 @@ static void __cache_cpumap_setup(unsigned int cpu, int index,
 	int index_msb, i;
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-	if (c->x86_vendor == X86_VENDOR_AMD ||
-	    c->x86_vendor == X86_VENDOR_HYGON) {
+	if (x86_vendor_amd_or_hygon(c->x86_vendor))
 		if (__cache_amd_cpumap_setup(cpu, index, id4))
 			return;
-	}
 
 	ci = this_cpu_ci->info_list + index;
 	num_threads_sharing = 1 + id4->eax.split.num_threads_sharing;
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 0dc00c9894c7..135d7b8f3e55 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -227,7 +227,7 @@ static void print_mce(struct mce_hw_err *err)
 
 	__print_mce(err);
 
-	if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
+	if (!x86_vendor_amd_or_hygon(m->cpuvendor))
 		pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
 }
 
@@ -2060,7 +2060,7 @@ static bool __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
  */
 static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
 {
-	if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
+	if (x86_vendor_amd_or_hygon(c->x86_vendor)) {
 		mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
 		mce_flags.succor	 = !!cpu_has(c, X86_FEATURE_SUCCOR);
 		mce_flags.smca		 = !!cpu_has(c, X86_FEATURE_SMCA);
diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
index dac4d64dfb2a..a3f2f1c236bc 100644
--- a/arch/x86/kernel/cpu/mce/severity.c
+++ b/arch/x86/kernel/cpu/mce/severity.c
@@ -413,8 +413,7 @@ static noinstr int mce_severity_intel(struct mce *m, struct pt_regs *regs, char
 
 int noinstr mce_severity(struct mce *m, struct pt_regs *regs, char **msg, bool is_excp)
 {
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
-	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+	if (x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor))
 		return mce_severity_amd(m, regs, msg, is_excp);
 	else
 		return mce_severity_intel(m, regs, msg, is_excp);
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 18cf79d6e2c5..236d7e3b4e55 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -820,8 +820,7 @@ int __init amd_special_default_mtrr(void)
 {
 	u32 l, h;
 
-	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
-	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+	if (!x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor))
 		return 0;
 	if (boot_cpu_data.x86 < 0xf)
 		return 0;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index e199465dc9e1..5ba8424cf4e6 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1250,8 +1250,7 @@ static inline void mwait_play_dead(void)
 	unsigned int highest_subcstate = 0;
 	int i;
 
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
-	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+	if (x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor))
 		return;
 	if (!this_cpu_has(X86_FEATURE_MWAIT))
 		return;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index a713c803a3a3..8c88f3c0c2cd 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -523,8 +523,7 @@ static bool __kvm_is_svm_supported(void)
 	int cpu = smp_processor_id();
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-	if (c->x86_vendor != X86_VENDOR_AMD &&
-	    c->x86_vendor != X86_VENDOR_HYGON) {
+	if (!x86_vendor_amd_or_hygon(c->x86_vendor)) {
 		pr_err("CPU %d isn't AMD or Hygon\n", cpu);
 		return false;
 	}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 631512f7ec85..43033d54080a 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -399,8 +399,7 @@ static int __init pci_io_ecs_init(void)
 
 static int __init amd_postcore_init(void)
 {
-	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
-	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+	if (!x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor))
 		return 0;
 
 	early_root_info_init();
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 43dcd8c7badc..13df4917d7d8 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -82,11 +82,9 @@ void xen_hypercall_setfunc(void)
 	if (static_call_query(xen_hypercall) != xen_hypercall_hvm)
 		return;
 
-	if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
-	     boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
-		static_call_update(xen_hypercall, xen_hypercall_amd);
-	else
-		static_call_update(xen_hypercall, xen_hypercall_intel);
+	static_call_update(xen_hypercall,
+			   x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor) ?
+			   xen_hypercall_amd : xen_hypercall_intel);
 }
 
 /*
@@ -118,11 +116,8 @@ noinstr void *__xen_hypercall_setfunc(void)
 	if (!boot_cpu_has(X86_FEATURE_CPUID))
 		xen_get_vendor();
 
-	if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
-	     boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
-		func = xen_hypercall_amd;
-	else
-		func = xen_hypercall_intel;
+	func = x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor) ?
+		xen_hypercall_amd : xen_hypercall_intel;
 
 	static_call_update_early(xen_hypercall, func);
 
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index f06987b0efc3..af5cb19b5990 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -130,8 +130,7 @@ static inline uint32_t get_fam15h_addr(u32 addr)
 
 static inline bool is_amd_pmu_msr(unsigned int msr)
 {
-	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
-	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+	if (!x86_vendor_amd_or_hygon(boot_cpu_data.x86_vendor))
 		return false;
 
 	if ((msr >= MSR_F15H_PERF_CTL &&
-- 
2.48.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ