lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Thu, 22 Feb 2024 10:39:35 -0800
From: Dave Hansen <dave.hansen@...ux.intel.com>
To: linux-kernel@...r.kernel.org
Cc: kirill.shutemov@...ux.intel.com,pbonzini@...hat.com,tglx@...utronix.de,x86@...nel.org,bp@...en8.de,Dave Hansen <dave.hansen@...ux.intel.com>
Subject: [RFC][PATCH 07/34] x86/mm: Introduce virtual address space limit helper


From: Dave Hansen <dave.hansen@...ux.intel.com>

This uses the same logic and approach which were used for the physical
address limits with x86_phys_bits() and extends them to the virtual
address space.

Introduce a system-wide helper for users to query the size of the
virtual address space: x86_virt_bits()

Signed-off-by: Dave Hansen <dave.hansen@...ux.intel.com>
---

 b/arch/x86/events/amd/brs.c        |    2 +-
 b/arch/x86/events/amd/lbr.c        |    2 +-
 b/arch/x86/events/intel/pt.c       |    4 ++--
 b/arch/x86/include/asm/processor.h |    5 +++++
 b/arch/x86/kernel/cpu/proc.c       |    2 +-
 b/arch/x86/mm/maccess.c            |    4 ++--
 6 files changed, 12 insertions(+), 7 deletions(-)

diff -puN arch/x86/events/amd/brs.c~x86_virt_bits-func arch/x86/events/amd/brs.c
--- a/arch/x86/events/amd/brs.c~x86_virt_bits-func	2024-02-22 10:08:51.528573793 -0800
+++ b/arch/x86/events/amd/brs.c	2024-02-22 10:08:51.536574107 -0800
@@ -285,7 +285,7 @@ void amd_brs_drain(void)
 	struct perf_branch_entry *br = cpuc->lbr_entries;
 	union amd_debug_extn_cfg cfg;
 	u32 i, nr = 0, num, tos, start;
-	u32 shift = 64 - boot_cpu_data.x86_virt_bits;
+	u32 shift = 64 - x86_virt_bits();
 
 	/*
 	 * BRS event forced on PMC0,
diff -puN arch/x86/events/amd/lbr.c~x86_virt_bits-func arch/x86/events/amd/lbr.c
--- a/arch/x86/events/amd/lbr.c~x86_virt_bits-func	2024-02-22 10:08:51.528573793 -0800
+++ b/arch/x86/events/amd/lbr.c	2024-02-22 10:08:51.536574107 -0800
@@ -89,7 +89,7 @@ static __always_inline u64 amd_pmu_lbr_g
 
 static __always_inline u64 sign_ext_branch_ip(u64 ip)
 {
-	u32 shift = 64 - boot_cpu_data.x86_virt_bits;
+	u32 shift = 64 - x86_virt_bits();
 
 	return (u64)(((s64)ip << shift) >> shift);
 }
diff -puN arch/x86/events/intel/pt.c~x86_virt_bits-func arch/x86/events/intel/pt.c
--- a/arch/x86/events/intel/pt.c~x86_virt_bits-func	2024-02-22 10:08:51.528573793 -0800
+++ b/arch/x86/events/intel/pt.c	2024-02-22 10:08:51.536574107 -0800
@@ -1453,8 +1453,8 @@ static void pt_event_addr_filters_sync(s
 			 * canonical addresses does not affect the result of the
 			 * address filter.
 			 */
-			msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits);
-			msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits);
+			msr_a = clamp_to_ge_canonical_addr(a, x86_virt_bits());
+			msr_b = clamp_to_le_canonical_addr(b, x86_virt_bits());
 			if (msr_b < msr_a)
 				msr_a = msr_b = 0;
 		}
diff -puN arch/x86/include/asm/processor.h~x86_virt_bits-func arch/x86/include/asm/processor.h
--- a/arch/x86/include/asm/processor.h~x86_virt_bits-func	2024-02-22 10:08:51.532573950 -0800
+++ b/arch/x86/include/asm/processor.h	2024-02-22 10:08:51.536574107 -0800
@@ -772,4 +772,9 @@ static inline u8 x86_phys_bits(void)
 	return boot_cpu_data.x86_phys_bits;
 }
 
+static inline u8 x86_virt_bits(void)
+{
+	return boot_cpu_data.x86_virt_bits;
+}
+
 #endif /* _ASM_X86_PROCESSOR_H */
diff -puN arch/x86/kernel/cpu/proc.c~x86_virt_bits-func arch/x86/kernel/cpu/proc.c
--- a/arch/x86/kernel/cpu/proc.c~x86_virt_bits-func	2024-02-22 10:08:51.532573950 -0800
+++ b/arch/x86/kernel/cpu/proc.c	2024-02-22 10:08:51.536574107 -0800
@@ -133,7 +133,7 @@ static int show_cpuinfo(struct seq_file
 	seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size);
 	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
 	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
-		   x86_phys_bits(), c->x86_virt_bits);
+		   x86_phys_bits(), x86_virt_bits());
 
 	seq_puts(m, "power management:");
 	for (i = 0; i < 32; i++) {
diff -puN arch/x86/mm/maccess.c~x86_virt_bits-func arch/x86/mm/maccess.c
--- a/arch/x86/mm/maccess.c~x86_virt_bits-func	2024-02-22 10:08:51.536574107 -0800
+++ b/arch/x86/mm/maccess.c	2024-02-22 10:08:51.536574107 -0800
@@ -20,10 +20,10 @@ bool copy_from_kernel_nofault_allowed(co
 	 * is initialized.  Needed for instruction decoding in early
 	 * exception handlers.
 	 */
-	if (!boot_cpu_data.x86_virt_bits)
+	if (!x86_virt_bits())
 		return true;
 
-	return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
+	return __is_canonical_address(vaddr, x86_virt_bits());
 }
 #else
 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
_

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ