[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1560897679-228028-18-git-send-email-fenghua.yu@intel.com>
Date: Tue, 18 Jun 2019 15:41:19 -0700
From: Fenghua Yu <fenghua.yu@...el.com>
To: "Thomas Gleixner" <tglx@...utronix.de>,
"Ingo Molnar" <mingo@...hat.com>, "Borislav Petkov" <bp@...en8.de>,
"H Peter Anvin" <hpa@...or.com>,
"Peter Zijlstra" <peterz@...radead.org>,
"Andrew Morton" <akpm@...ux-foundation.org>,
"Dave Hansen" <dave.hansen@...el.com>,
"Paolo Bonzini" <pbonzini@...hat.com>,
"Radim Krcmar" <rkrcmar@...hat.com>,
"Christopherson Sean J" <sean.j.christopherson@...el.com>,
"Ashok Raj" <ashok.raj@...el.com>,
"Tony Luck" <tony.luck@...el.com>,
"Dan Williams" <dan.j.williams@...el.com>,
"Xiaoyao Li " <xiaoyao.li@...el.com>,
"Sai Praneeth Prakhya" <sai.praneeth.prakhya@...el.com>,
"Ravi V Shankar" <ravi.v.shankar@...el.com>
Cc: "linux-kernel" <linux-kernel@...r.kernel.org>,
"x86" <x86@...nel.org>, kvm@...r.kernel.org,
Fenghua Yu <fenghua.yu@...el.com>
Subject: [PATCH v9 17/17] x86/split_lock: Warn on unaligned address in atomic bit operations
An atomic bit operation operates one bit in a single unsigned long location
in a bitmap. In 64-bit mode, the location is at:
base address of the bitmap + (bit offset in the bitmap / 64) * 8
If the base address is unaligned to unsigned long, each unsigned long
location operated by the atomic operation will be unaligned to unsigned
long and a split lock issue will happen if the unsigned long location
crosses two cache lines.
So checking alignment of the base address can proactively audit potential
split lock issues in the atomic bit operation. A real split lock issue
may or may not happen depending on the bit offset.
Once analyzing the warning information, kernel developer can fix the
potential split lock issue by aligning the base address to unsigned long
instead of waiting for a real split lock issue happens.
After applying this patch on 5.2-rc1, vmlinux size is increased by 0.2%
and bzImage size is increased by 0.3% with allyesconfig.
Suggested-by: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Fenghua Yu <fenghua.yu@...el.com>
---
FYI. After applying this patch, I haven't noticed any warning generated
from this patch with booting and limited run time tests on a few platforms.
arch/x86/include/asm/bitops.h | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 8e790ec219a5..44d7a353d6fd 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -14,6 +14,7 @@
#endif
#include <linux/compiler.h>
+#include <linux/bug.h>
#include <asm/alternative.h>
#include <asm/rmwcc.h>
#include <asm/barrier.h>
@@ -67,6 +68,8 @@
static __always_inline void
set_bit(long nr, volatile unsigned long *addr)
{
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)));
+
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "orb %1,%0"
: CONST_MASK_ADDR(nr, addr)
@@ -105,6 +108,8 @@ static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
static __always_inline void
clear_bit(long nr, volatile unsigned long *addr)
{
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)));
+
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "andb %1,%0"
: CONST_MASK_ADDR(nr, addr)
@@ -137,6 +142,9 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
bool negative;
+
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)));
+
asm volatile(LOCK_PREFIX "andb %2,%1"
CC_SET(s)
: CC_OUT(s) (negative), WBYTE_ADDR(addr)
@@ -186,6 +194,8 @@ static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
*/
static __always_inline void change_bit(long nr, volatile unsigned long *addr)
{
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)));
+
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "xorb %1,%0"
: CONST_MASK_ADDR(nr, addr)
@@ -206,6 +216,8 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
*/
static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)));
+
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
}
@@ -252,6 +264,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
*/
static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)));
+
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
}
@@ -305,6 +319,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
*/
static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)));
+
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
--
2.19.1
Powered by blists - more mailing lists