lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y16osqK3kbCz8Sf3@zn.tnic>
Date:   Sun, 30 Oct 2022 17:39:14 +0100
From:   Borislav Petkov <bp@...en8.de>
To:     Juergen Gross <jgross@...e.com>
Cc:     linux-kernel@...r.kernel.org, x86@...nel.org,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>,
        Dave Hansen <dave.hansen@...ux.intel.com>,
        "H. Peter Anvin" <hpa@...or.com>
Subject: Re: [PATCH v4 16/16] x86/mtrr: simplify mtrr_ops initialization

On Sun, Oct 30, 2022 at 04:05:29PM +0100, Juergen Gross wrote:
> As the specific ops variables are available for X86_32 only, this
> would require to add an "#ifdef CONFIG_X86_32" around the code block
> doing the assignments. Otherwise the build would fail.

Well, it looks like my compiler is smart enough and eliminates all that
dead code, see diff below.

I've added the asm markers "#begin" and "#end" and the resulting asm
looks like this:

# arch/x86/kernel/cpu/mtrr/mtrr.c:666:          asm volatile("#begin");
        call    __sanitizer_cov_trace_pc        #
#APP
# 666 "arch/x86/kernel/cpu/mtrr/mtrr.c" 1
        #begin
# 0 "" 2
# arch/x86/kernel/cpu/mtrr/mtrr.c:693:          asm volatile("#end");
# 693 "arch/x86/kernel/cpu/mtrr/mtrr.c" 1
        #end
# 0 "" 2
# arch/x86/kernel/cpu/mtrr/mtrr.c:630:  phys_addr = 32;
#NO_APP

which basically says that all between line 666 and 693 has been
eliminated.

I have the suspicion, though, that clang might not be that smart.

Lemme test it a bit.

---

diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 7ba68356c0ff..d499c83b2ad7 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -663,25 +663,26 @@ void __init mtrr_bp_init(void)
 			phys_addr = 32;
 		}
 	} else {
+		asm volatile("#begin");
 		switch (boot_cpu_data.x86_vendor) {
 		case X86_VENDOR_AMD:
 			if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
 				/* Pre-Athlon (K6) AMD CPU MTRRs */
-				mtrr_if = vendor_mtrr_ops(amd_mtrr_ops);
+				mtrr_if = &amd_mtrr_ops;
 				size_or_mask = SIZE_OR_MASK_BITS(32);
 				size_and_mask = 0;
 			}
 			break;
 		case X86_VENDOR_CENTAUR:
 			if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
-				mtrr_if = vendor_mtrr_ops(centaur_mtrr_ops);
+				mtrr_if = &centaur_mtrr_ops;
 				size_or_mask = SIZE_OR_MASK_BITS(32);
 				size_and_mask = 0;
 			}
 			break;
 		case X86_VENDOR_CYRIX:
 			if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
-				mtrr_if = vendor_mtrr_ops(cyrix_mtrr_ops);
+				mtrr_if = &cyrix_mtrr_ops;
 				size_or_mask = SIZE_OR_MASK_BITS(32);
 				size_and_mask = 0;
 			}
@@ -689,6 +690,7 @@ void __init mtrr_bp_init(void)
 		default:
 			break;
 		}
+		asm volatile("#end");
 	}
 
 	if (mtrr_if) {
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 7a7387356192..02eb5871492d 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -68,11 +68,6 @@ void mtrr_wrmsr(unsigned, unsigned, unsigned);
 extern const struct mtrr_ops amd_mtrr_ops;
 extern const struct mtrr_ops cyrix_mtrr_ops;
 extern const struct mtrr_ops centaur_mtrr_ops;
-#ifdef CONFIG_X86_64
-#define vendor_mtrr_ops(x) NULL
-#else
-#define vendor_mtrr_ops(x) &(x)
-#endif
 
 extern int changed_by_mtrr_cleanup;
 extern int mtrr_cleanup(unsigned address_bits);


-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ