lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 28 Mar 2007 16:30:59 +0200
From:	Andi Kleen <ak@...e.de>
To:	Michal Piotrowski <michal.k.k.piotrowski@...il.com>
Cc:	Linus Torvalds <torvalds@...ux-foundation.org>,
	Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
	Andrew Morton <akpm@...ux-foundation.org>
Subject: Re: Linux 2.6.21-rc5

On Tuesday 27 March 2007 20:53, Michal Piotrowski wrote:
> Linus Torvalds napisaƂ(a):
> > There's various fixes here, ranging from some architecture updates (ia64, 
> > ARM, MIPS, SH, Sparc64) to KVM, networking and network drivers.
> > 
> > And random one-liners.
> > 
> 
> I found this in mm snapshot
> http://www.ussg.iu.edu/hypermail/linux/kernel/0703.2/1367.html
> it's in mainline too.
> 
> Andi, any progress with this bug?

Can you test this patch please? 

-Andi

i386/x86-64: Convert nmi reservation to be global

It doesn't make much sense to have this per CPU, because all
the services using NMIs run on all CPUs. So make it global.

This also fixes a warning about unprotected use of smp_processor_id
on preemptible kernels.

Signed-off-by: Andi Kleen <ak@...e.de>

Index: linux/arch/i386/kernel/nmi.c
===================================================================
--- linux.orig/arch/i386/kernel/nmi.c
+++ linux/arch/i386/kernel/nmi.c
@@ -41,8 +41,8 @@ int nmi_watchdog_enabled;
  *   different subsystems this reservation system just tries to coordinate
  *   things a little
  */
-static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner);
-static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]);
+static unsigned long perfctr_nmi_owner;
+static unsigned long evntsel_nmi_owner[3];
 
 static cpumask_t backtrace_mask = CPU_MASK_NONE;
 
@@ -124,7 +124,7 @@ int avail_to_resrv_perfctr_nmi_bit(unsig
 {
 	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-	return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+	return (!test_bit(counter, &perfctr_nmi_owner));
 }
 
 /* checks the an msr for availability */
@@ -135,7 +135,7 @@ int avail_to_resrv_perfctr_nmi(unsigned 
 	counter = nmi_perfctr_msr_to_bit(msr);
 	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-	return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+	return (!test_bit(counter, &perfctr_nmi_owner));
 }
 
 int reserve_perfctr_nmi(unsigned int msr)
@@ -145,7 +145,7 @@ int reserve_perfctr_nmi(unsigned int msr
 	counter = nmi_perfctr_msr_to_bit(msr);
 	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-	if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
+	if (!test_and_set_bit(counter, &perfctr_nmi_owner))
 		return 1;
 	return 0;
 }
@@ -157,7 +157,7 @@ void release_perfctr_nmi(unsigned int ms
 	counter = nmi_perfctr_msr_to_bit(msr);
 	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-	clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
+	clear_bit(counter, &perfctr_nmi_owner);
 }
 
 int reserve_evntsel_nmi(unsigned int msr)
@@ -167,7 +167,7 @@ int reserve_evntsel_nmi(unsigned int msr
 	counter = nmi_evntsel_msr_to_bit(msr);
 	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-	if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]))
+	if (!test_and_set_bit(counter, &evntsel_nmi_owner[0]))
 		return 1;
 	return 0;
 }
@@ -179,7 +179,7 @@ void release_evntsel_nmi(unsigned int ms
 	counter = nmi_evntsel_msr_to_bit(msr);
 	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-	clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]);
+	clear_bit(counter, &evntsel_nmi_owner[0]);
 }
 
 static __cpuinit inline int nmi_known_cpu(void)
Index: linux/arch/x86_64/kernel/nmi.c
===================================================================
--- linux.orig/arch/x86_64/kernel/nmi.c
+++ linux/arch/x86_64/kernel/nmi.c
@@ -39,8 +39,8 @@ int panic_on_unrecovered_nmi;
  *   different subsystems this reservation system just tries to coordinate
  *   things a little
  */
-static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
-static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
+static unsigned perfctr_nmi_owner;
+static unsigned evntsel_nmi_owner[2];
 
 static cpumask_t backtrace_mask = CPU_MASK_NONE;
 
@@ -110,7 +110,7 @@ int avail_to_resrv_perfctr_nmi_bit(unsig
 {
 	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-	return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+	return (!test_bit(counter, &perfctr_nmi_owner));
 }
 
 /* checks the an msr for availability */
@@ -121,7 +121,7 @@ int avail_to_resrv_perfctr_nmi(unsigned 
 	counter = nmi_perfctr_msr_to_bit(msr);
 	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-	return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+	return (!test_bit(counter, &perfctr_nmi_owner));
 }
 
 int reserve_perfctr_nmi(unsigned int msr)
@@ -131,7 +131,7 @@ int reserve_perfctr_nmi(unsigned int msr
 	counter = nmi_perfctr_msr_to_bit(msr);
 	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-	if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
+	if (!test_and_set_bit(counter, &perfctr_nmi_owner))
 		return 1;
 	return 0;
 }
@@ -143,7 +143,7 @@ void release_perfctr_nmi(unsigned int ms
 	counter = nmi_perfctr_msr_to_bit(msr);
 	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-	clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
+	clear_bit(counter, &perfctr_nmi_owner);
 }
 
 int reserve_evntsel_nmi(unsigned int msr)
@@ -153,7 +153,7 @@ int reserve_evntsel_nmi(unsigned int msr
 	counter = nmi_evntsel_msr_to_bit(msr);
 	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-	if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
+	if (!test_and_set_bit(counter, &evntsel_nmi_owner))
 		return 1;
 	return 0;
 }
@@ -165,7 +165,7 @@ void release_evntsel_nmi(unsigned int ms
 	counter = nmi_evntsel_msr_to_bit(msr);
 	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-	clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
+	clear_bit(counter, &evntsel_nmi_owner);
 }
 
 static __cpuinit inline int nmi_known_cpu(void)
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ