[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <11979320551036-git-send-email-gcosta@redhat.com>
Date: Mon, 17 Dec 2007 20:52:41 -0200
From: Glauber de Oliveira Costa <gcosta@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: akpm@...ux-foundation.org, glommer@...il.com, tglx@...utronix.de,
mingo@...e.hu, ehabkost@...hat.com, jeremy@...p.org,
avi@...ranet.com, anthony@...emonkey.ws,
virtualization@...ts.linux-foundation.org, rusty@...tcorp.com.au,
ak@...e.de, chrisw@...s-sol.org, rostedt@...dmis.org,
hpa@...or.com, zach@...are.com, roland@...hat.com,
Glauber de Oliveira Costa <gcosta@...hat.com>
Subject: [PATCH 18/21] [PATCH] unify prefetch operations
This patch moves the prefetch[w]? functions to processor.h
Signed-off-by: Glauber de Oliveira Costa <gcosta@...hat.com>
---
include/asm-x86/processor.h | 30 ++++++++++++++++++++++++++++++
include/asm-x86/processor_32.h | 25 -------------------------
include/asm-x86/processor_64.h | 8 --------
3 files changed, 30 insertions(+), 33 deletions(-)
Index: linux-2.6-x86/include/asm-x86/processor.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/processor.h
+++ linux-2.6-x86/include/asm-x86/processor.h
@@ -592,6 +592,36 @@ extern char ignore_fpu_irq;
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
+#ifdef CONFIG_X86_32
+#define BASE_PREFETCH ASM_NOP4
+#define ARCH_HAS_PREFETCH
+#else
+#define BASE_PREFETCH "prefetcht0 (%1)"
+#endif
+
+/* Prefetch instructions for Pentium III and AMD Athlon */
+/* It's not worth to care about 3dnow! prefetches for the K6
+ because they are microcoded there and very slow.
+ However we don't do prefetches for pre XP Athlons currently
+ That should be fixed. */
+static inline void prefetch(const void *x)
+{
+ alternative_input(BASE_PREFETCH,
+ "prefetchnta (%1)",
+ X86_FEATURE_XMM,
+ "r" (x));
+}
+
+/* 3dnow! prefetch to get an exclusive cache line. Useful for
+ spinlocks to avoid one state transition in the cache coherency protocol. */
+static inline void prefetchw(const void *x)
+{
+ alternative_input(BASE_PREFETCH,
+ "prefetchw (%1)",
+ X86_FEATURE_3DNOW,
+ "r" (x));
+}
+
#define spin_lock_prefetch(x) prefetchw(x)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
Index: linux-2.6-x86/include/asm-x86/processor_32.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/processor_32.h
+++ linux-2.6-x86/include/asm-x86/processor_32.h
@@ -228,29 +228,4 @@ extern unsigned long thread_saved_pc(str
#define ASM_NOP_MAX 8
-/* Prefetch instructions for Pentium III and AMD Athlon */
-/* It's not worth to care about 3dnow! prefetches for the K6
- because they are microcoded there and very slow.
- However we don't do prefetches for pre XP Athlons currently
- That should be fixed. */
-static inline void prefetch(const void *x)
-{
- alternative_input(ASM_NOP4,
- "prefetchnta (%1)",
- X86_FEATURE_XMM,
- "r" (x));
-}
-
-#define ARCH_HAS_PREFETCH
-
-/* 3dnow! prefetch to get an exclusive cache line. Useful for
- spinlocks to avoid one state transition in the cache coherency protocol. */
-static inline void prefetchw(const void *x)
-{
- alternative_input(ASM_NOP4,
- "prefetchw (%1)",
- X86_FEATURE_3DNOW,
- "r" (x));
-}
-
#endif /* __ASM_I386_PROCESSOR_H */
Index: linux-2.6-x86/include/asm-x86/processor_64.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/processor_64.h
+++ linux-2.6-x86/include/asm-x86/processor_64.h
@@ -124,12 +124,4 @@ DECLARE_PER_CPU(struct orig_ist, orig_is
#define ASM_NOP_MAX 8
-static inline void prefetchw(void *x)
-{
- alternative_input("prefetcht0 (%1)",
- "prefetchw (%1)",
- X86_FEATURE_3DNOW,
- "r" (x));
-}
-
#endif /* __ASM_X86_64_PROCESSOR_H */
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists