[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <159405969791.19216.14520281863577841635.stgit@djiang5-desk3.ch.intel.com>
Date: Mon, 06 Jul 2020 11:21:37 -0700
From: Dave Jiang <dave.jiang@...el.com>
To: vkoul@...nel.org, tglx@...utronix.de, mingo@...hat.com,
bp@...en8.de
Cc: Tony Luck <tony.luck@...el.com>, dmaengine@...r.kernel.org,
linux-kernel@...r.kernel.org, x86@...nel.org,
dan.j.williams@...el.com, ashok.raj@...el.com,
fenghua.yu@...el.com, tony.luck@...el.com, jing.lin@...el.com
Subject: [PATCH v3 2/6] x86/asm: move the raw asm in iosubmit_cmds512() to
special_insns.h
The MOVDIR64B instruction can be used by other wrapper instructions. Move
the core asm code to special_insns.h and have iosubmit_cmds512() call the
core asm function.
Signed-off-by: Dave Jiang <dave.jiang@...el.com>
Reviewed-by: Tony Luck <tony.luck@...el.com>
---
arch/x86/include/asm/io.h | 17 +++--------------
arch/x86/include/asm/special_insns.h | 17 +++++++++++++++++
2 files changed, 20 insertions(+), 14 deletions(-)
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index e1aa17a468a8..d726459d08e5 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -401,7 +401,7 @@ extern bool phys_mem_access_encrypted(unsigned long phys_addr,
/**
* iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
- * @__dst: destination, in MMIO space (must be 512-bit aligned)
+ * @dst: destination, in MMIO space (must be 512-bit aligned)
* @src: source
* @count: number of 512 bits quantities to submit
*
@@ -412,25 +412,14 @@ extern bool phys_mem_access_encrypted(unsigned long phys_addr,
* Warning: Do not use this helper unless your driver has checked that the CPU
* instruction is supported on the platform.
*/
-static inline void iosubmit_cmds512(void __iomem *__dst, const void *src,
+static inline void iosubmit_cmds512(void __iomem *dst, const void *src,
size_t count)
{
- /*
- * Note that this isn't an "on-stack copy", just definition of "dst"
- * as a pointer to 64-bytes of stuff that is going to be overwritten.
- * In the MOVDIR64B case that may be needed as you can use the
- * MOVDIR64B instruction to copy arbitrary memory around. This trick
- * lets the compiler know how much gets clobbered.
- */
- volatile struct { char _[64]; } *dst = __dst;
const u8 *from = src;
const u8 *end = from + count * 64;
while (from < end) {
- /* MOVDIR64B [rdx], rax */
- asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
- : "=m" (dst)
- : "d" (from), "a" (dst));
+ movdir64b(dst, from);
from += 64;
}
}
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index eb8e781c4353..fb28caec9aa0 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -234,6 +234,23 @@ static inline void clwb(volatile void *__p)
#define nop() asm volatile ("nop")
+static inline void movdir64b(void *__dst, const void *src)
+{
+ /*
+ * Note that this isn't an "on-stack copy", just definition of "dst"
+ * as a pointer to 64-bytes of stuff that is going to be overwritten.
+ * In the MOVDIR64B case that may be needed as you can use the
+ * MOVDIR64B instruction to copy arbitrary memory around. This trick
+ * lets the compiler know how much gets clobbered.
+ */
+ volatile struct { char _[64]; } *dst = __dst;
+
+ /* MOVDIR64B [rdx], rax */
+ asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
+ : "=m" (dst)
+ : "d" (src), "a" (dst));
+}
+
#endif /* __KERNEL__ */
Powered by blists - more mailing lists