lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240222183956.CA91A140@davehans-spike.ostc.intel.com>
Date: Thu, 22 Feb 2024 10:39:56 -0800
From: Dave Hansen <dave.hansen@...ux.intel.com>
To: linux-kernel@...r.kernel.org
Cc: kirill.shutemov@...ux.intel.com,pbonzini@...hat.com,tglx@...utronix.de,x86@...nel.org,bp@...en8.de,Dave Hansen <dave.hansen@...ux.intel.com>
Subject: [RFC][PATCH 23/34] x86/cpu: Move cache alignment configuration to global struct


From: Dave Hansen <dave.hansen@...ux.intel.com>

It is a tale as old as time: x86_cache_alignment is established and
stored per-cpu despite being system-wide.  Move it to the global
configuration structure.

The other values have received _new_ wrappers.  But this one already
had cache_line_size().  Just use that wrapper.

Signed-off-by: Dave Hansen <dave.hansen@...ux.intel.com>
---

 b/arch/x86/include/asm/processor.h |    4 ++--
 b/arch/x86/kernel/cpu/common.c     |    4 ++--
 b/arch/x86/kernel/cpu/proc.c       |    2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

diff -puN arch/x86/include/asm/processor.h~x86_config-x86_cache_alignment arch/x86/include/asm/processor.h
--- a/arch/x86/include/asm/processor.h~x86_config-x86_cache_alignment	2024-02-22 10:08:59.700894617 -0800
+++ b/arch/x86/include/asm/processor.h	2024-02-22 10:08:59.708894931 -0800
@@ -137,7 +137,6 @@ struct cpuinfo_x86 {
 	struct cpuinfo_topology	topo;
 	/* in KB - valid for CPUS which support this call: */
 	unsigned int		x86_cache_size;
-	int			x86_cache_alignment;	/* In bytes */
 	/* Cache QoS architectural values, valid only on the BSP: */
 	int			x86_cache_max_rmid;	/* max index */
 	int			x86_cache_occ_scale;	/* scale to bytes */
@@ -191,6 +190,7 @@ struct x86_sys_config {
 	u8	phys_bits;
 	u8	virt_bits;
 	u16	clflush_size;
+	int	cache_alignment; /* in bytes */
 };
 
 extern struct x86_sys_config x86_config;
@@ -229,7 +229,7 @@ DECLARE_PER_CPU_READ_MOSTLY(struct cpuin
 
 extern const struct seq_operations cpuinfo_op;
 
-#define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
+#define cache_line_size()	(x86_config.cache_alignment)
 
 extern void cpu_detect(struct cpuinfo_x86 *c);
 
diff -puN arch/x86/kernel/cpu/common.c~x86_config-x86_cache_alignment arch/x86/kernel/cpu/common.c
--- a/arch/x86/kernel/cpu/common.c~x86_config-x86_cache_alignment	2024-02-22 10:08:59.704894774 -0800
+++ b/arch/x86/kernel/cpu/common.c	2024-02-22 10:08:59.708894931 -0800
@@ -1140,9 +1140,9 @@ void get_cpu_address_sizes(struct cpuinf
 
 	c->x86_cache_bits = x86_config.phys_bits;
 
-	c->x86_cache_alignment = x86_clflush_size();
+	x86_config.cache_alignment = x86_clflush_size();
 	if (bsp_addr_config.cache_align_mult)
-		c->x86_cache_alignment *= bsp_addr_config.cache_align_mult;
+		x86_config.cache_alignment *= bsp_addr_config.cache_align_mult;
 
 	/* Do this last to avoid affecting ->x86_cache_bits. */
 	x86_config.phys_bits -= bsp_addr_config.phys_addr_reduction_bits;
diff -puN arch/x86/kernel/cpu/proc.c~x86_config-x86_cache_alignment arch/x86/kernel/cpu/proc.c
--- a/arch/x86/kernel/cpu/proc.c~x86_config-x86_cache_alignment	2024-02-22 10:08:59.704894774 -0800
+++ b/arch/x86/kernel/cpu/proc.c	2024-02-22 10:08:59.708894931 -0800
@@ -131,7 +131,7 @@ static int show_cpuinfo(struct seq_file
 		seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
 #endif
 	seq_printf(m, "clflush size\t: %u\n", x86_clflush_size());
-	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
+	seq_printf(m, "cache_alignment\t: %d\n", x86_config.cache_alignment);
 	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
 		   x86_phys_bits(), x86_virt_bits());
 
_

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ