lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220724122517.1019187-11-guoren@kernel.org>
Date:   Sun, 24 Jul 2022 08:25:17 -0400
From:   guoren@...nel.org
To:     palmer@...osinc.com, heiko@...ech.de, hch@...radead.org,
        arnd@...db.de, peterz@...radead.org, will@...nel.org,
        boqun.feng@...il.com, longman@...hat.com, mingo@...hat.com,
        philipp.tomsich@...ll.eu, cmuellner@...ux.com,
        linux-kernel@...r.kernel.org, David.Laight@...LAB.COM
Cc:     linux-riscv@...ts.infradead.org, linux-csky@...r.kernel.org,
        Guo Ren <guoren@...ux.alibaba.com>, Guo Ren <guoren@...nel.org>
Subject: [PATCH V8 10/10] csky: Add qspinlock support

From: Guo Ren <guoren@...ux.alibaba.com>

Enable qspinlock by the requirements mentioned in a8ad07e5240c9
("asm-generic: qspinlock: Indicate the use of mixed-size atomics").

C-SKY only has "ldex/stex" for all atomic operations. So csky give a
strong forward guarantee for "ldex/stex." That means when ldex grabbed
the cache line into $L1, it would block other cores from snooping the
address with several cycles.

Signed-off-by: Guo Ren <guoren@...ux.alibaba.com>
Signed-off-by: Guo Ren <guoren@...nel.org>
---
 arch/csky/Kconfig               | 16 ++++++++++++++++
 arch/csky/include/asm/Kbuild    |  2 ++
 arch/csky/include/asm/cmpxchg.h | 20 ++++++++++++++++++++
 3 files changed, 38 insertions(+)

diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index dfdb436b6078..09f7d1f06bca 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -354,6 +354,22 @@ config HAVE_EFFICIENT_UNALIGNED_STRING_OPS
 	  Say Y here to enable EFFICIENT_UNALIGNED_STRING_OPS. Some CPU models could
 	  deal with unaligned access by hardware.
 
+choice
+	prompt "C-SKY spinlock type"
+	default CSKY_TICKET_SPINLOCKS
+
+config CSKY_TICKET_SPINLOCKS
+	bool "Using ticket spinlock"
+
+config CSKY_QUEUED_SPINLOCKS
+	bool "Using queued spinlock"
+	depends on SMP
+	select ARCH_USE_QUEUED_SPINLOCKS
+	help
+	  Make sure your micro arch LL/SC has a strong forward progress guarantee.
+	  Otherwise, stay at ticket-lock/combo-lock.
+endchoice
+
 endmenu
 
 source "arch/csky/Kconfig.platforms"
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index 103207a58f97..b70b14de904f 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -3,10 +3,12 @@ generic-y += asm-offsets.h
 generic-y += extable.h
 generic-y += gpio.h
 generic-y += kvm_para.h
+generic-y += mcs_spinlock.h
 generic-y += spinlock.h
 generic-y += spinlock_types.h
 generic-y += qrwlock.h
 generic-y += qrwlock_types.h
+generic-y += qspinlock.h
 generic-y += parport.h
 generic-y += user.h
 generic-y += vmlinux.lds.h
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
index 5b8faccd65e4..5f693fadb56c 100644
--- a/arch/csky/include/asm/cmpxchg.h
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -15,6 +15,26 @@ extern void __bad_xchg(void);
 	__typeof__(*(ptr)) __ret;				\
 	unsigned long tmp;					\
 	switch (size) {						\
+	case 2: {						\
+		u32 ret;					\
+		u32 shif = ((ulong)__ptr & 2) ? 16 : 0;		\
+		u32 mask = 0xffff << shif;			\
+		__ptr = (__typeof__(ptr))((ulong)__ptr & ~2);	\
+		__asm__ __volatile__ (				\
+			"1:	ldex.w %0, (%4)\n"		\
+			"	and    %1, %0, %2\n"		\
+			"	or     %1, %1, %3\n"		\
+			"	stex.w %1, (%4)\n"		\
+			"	bez    %1, 1b\n"		\
+			: "=&r" (ret), "=&r" (tmp)		\
+			: "r" (~mask),				\
+			  "r" ((u32)__new << shif),		\
+			  "r" (__ptr)				\
+			: "memory");				\
+		__ret = (__typeof__(*(ptr)))			\
+			((ret & mask) >> shif);			\
+		break;						\
+	}							\
 	case 4:							\
 		asm volatile (					\
 		"1:	ldex.w		%0, (%3) \n"		\
-- 
2.36.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ