[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20241222033917.1754495-3-guoren@kernel.org>
Date: Sat, 21 Dec 2024 22:39:16 -0500
From: guoren@...nel.org
To: paul.walmsley@...ive.com,
palmer@...belt.com,
guoren@...nel.org,
bjorn@...osinc.com,
conor@...nel.org,
leobras@...hat.com,
peterz@...radead.org,
parri.andrea@...il.com,
will@...nel.org,
longman@...hat.com,
boqun.feng@...il.com,
arnd@...db.de,
alexghiti@...osinc.com,
ajones@...tanamicro.com,
rkrcmar@...tanamicro.com
Cc: linux-riscv@...ts.infradead.org,
linux-kernel@...r.kernel.org,
Guo Ren <guoren@...ux.alibaba.com>
Subject: [PATCH 2/3] RISC-V: paravirt: Add pvqspinlock frontend
From: Guo Ren <guoren@...ux.alibaba.com>
Add an unfair qspinlock virtualization-friendly frontend, by halting the
virtual CPU rather than spinning.
Using static_call to switch between:
native_queued_spin_lock_slowpath() __pv_queued_spin_lock_slowpath()
native_queued_spin_unlock() __pv_queued_spin_unlock()
Add the pv_wait & pv_kick implementations.
Reviewed-by: Leonardo Bras <leobras@...hat.com>
Signed-off-by: Guo Ren <guoren@...ux.alibaba.com>
Signed-off-by: Guo Ren <guoren@...nel.org>
---
arch/riscv/Kconfig | 12 ++++
arch/riscv/include/asm/Kbuild | 1 -
arch/riscv/include/asm/qspinlock.h | 35 +++++++++++
arch/riscv/include/asm/qspinlock_paravirt.h | 28 +++++++++
arch/riscv/kernel/Makefile | 1 +
arch/riscv/kernel/qspinlock_paravirt.c | 67 +++++++++++++++++++++
arch/riscv/kernel/setup.c | 4 ++
7 files changed, 147 insertions(+), 1 deletion(-)
create mode 100644 arch/riscv/include/asm/qspinlock.h
create mode 100644 arch/riscv/include/asm/qspinlock_paravirt.h
create mode 100644 arch/riscv/kernel/qspinlock_paravirt.c
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index d4a7ca0388c0..e241ac39ecd6 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -1071,6 +1071,18 @@ config PARAVIRT_TIME_ACCOUNTING
If in doubt, say N here.
+config PARAVIRT_SPINLOCKS
+ bool "Paravirtualization layer for spinlocks"
+ depends on QUEUED_SPINLOCKS
+ default y
+ help
+ Paravirtualized spinlocks allow a unfair qspinlock to replace the
+ test-set kvm-guest virt spinlock implementation with something
+ virtualization-friendly, for example, halt the virtual CPU rather
+ than spinning.
+
+ If you are unsure how to answer this question, answer Y.
+
config RELOCATABLE
bool "Build a relocatable kernel"
depends on MMU && 64BIT && !XIP_KERNEL
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index de13d5a234f8..c726330d2b9f 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -12,6 +12,5 @@ generic-y += spinlock_types.h
generic-y += ticket_spinlock.h
generic-y += qrwlock.h
generic-y += qrwlock_types.h
-generic-y += qspinlock.h
generic-y += user.h
generic-y += vmlinux.lds.h
diff --git a/arch/riscv/include/asm/qspinlock.h b/arch/riscv/include/asm/qspinlock.h
new file mode 100644
index 000000000000..1d9f32334ff1
--- /dev/null
+++ b/arch/riscv/include/asm/qspinlock.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c), 2024 Alibaba
+ * Authors:
+ * Guo Ren <guoren@...ux.alibaba.com>
+ */
+
+#ifndef _ASM_RISCV_QSPINLOCK_H
+#define _ASM_RISCV_QSPINLOCK_H
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#include <asm/qspinlock_paravirt.h>
+
+/* How long a lock should spin before we consider blocking */
+#define SPIN_THRESHOLD (1 << 15)
+
+void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+void __pv_init_lock_hash(void);
+void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ static_call(pv_queued_spin_lock_slowpath)(lock, val);
+}
+
+#define queued_spin_unlock queued_spin_unlock
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ static_call(pv_queued_spin_unlock)(lock);
+}
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+#include <asm-generic/qspinlock.h>
+
+#endif /* _ASM_RISCV_QSPINLOCK_H */
diff --git a/arch/riscv/include/asm/qspinlock_paravirt.h b/arch/riscv/include/asm/qspinlock_paravirt.h
new file mode 100644
index 000000000000..a365203dd782
--- /dev/null
+++ b/arch/riscv/include/asm/qspinlock_paravirt.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c), 2024 Alibaba Cloud
+ * Authors:
+ * Guo Ren <guoren@...ux.alibaba.com>
+ */
+
+#ifndef _ASM_RISCV_QSPINLOCK_PARAVIRT_H
+#define _ASM_RISCV_QSPINLOCK_PARAVIRT_H
+
+void pv_wait(u8 *ptr, u8 val);
+void pv_kick(int cpu);
+
+void dummy_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+void dummy_queued_spin_unlock(struct qspinlock *lock);
+
+DECLARE_STATIC_CALL(pv_queued_spin_lock_slowpath, dummy_queued_spin_lock_slowpath);
+DECLARE_STATIC_CALL(pv_queued_spin_unlock, dummy_queued_spin_unlock);
+
+void __init pv_qspinlock_init(void);
+
+void __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked);
+
+bool pv_is_native_spin_unlock(void);
+
+void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+#endif /* _ASM_RISCV_QSPINLOCK_PARAVIRT_H */
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 063d1faf5a53..79f823e0e57d 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -123,3 +123,4 @@ obj-$(CONFIG_COMPAT) += compat_vdso/
obj-$(CONFIG_64BIT) += pi/
obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
+obj-$(CONFIG_PARAVIRT_SPINLOCKS) += qspinlock_paravirt.o
diff --git a/arch/riscv/kernel/qspinlock_paravirt.c b/arch/riscv/kernel/qspinlock_paravirt.c
new file mode 100644
index 000000000000..4ec4765f57f3
--- /dev/null
+++ b/arch/riscv/kernel/qspinlock_paravirt.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c), 2024 Alibaba Cloud
+ * Authors:
+ * Guo Ren <guoren@...ux.alibaba.com>
+ */
+
+#include <linux/static_call.h>
+#include <asm/qspinlock_paravirt.h>
+#include <asm/sbi.h>
+
+void pv_kick(int cpu)
+{
+ sbi_ecall(SBI_EXT_PVLOCK, SBI_EXT_PVLOCK_KICK_CPU,
+ cpuid_to_hartid_map(cpu), 0, 0, 0, 0, 0);
+ return;
+}
+
+void pv_wait(u8 *ptr, u8 val)
+{
+ unsigned long flags;
+
+ if (in_nmi())
+ return;
+
+ local_irq_save(flags);
+ if (READ_ONCE(*ptr) != val)
+ goto out;
+
+ wait_for_interrupt();
+out:
+ local_irq_restore(flags);
+}
+
+static void native_queued_spin_unlock(struct qspinlock *lock)
+{
+ smp_store_release(&lock->locked, 0);
+}
+
+DEFINE_STATIC_CALL(pv_queued_spin_lock_slowpath, native_queued_spin_lock_slowpath);
+EXPORT_STATIC_CALL(pv_queued_spin_lock_slowpath);
+
+DEFINE_STATIC_CALL(pv_queued_spin_unlock, native_queued_spin_unlock);
+EXPORT_STATIC_CALL(pv_queued_spin_unlock);
+
+void __init pv_qspinlock_init(void)
+{
+ if (num_possible_cpus() == 1)
+ return;
+
+ if (!sbi_probe_extension(SBI_EXT_PVLOCK))
+ return;
+
+ pr_info("PV qspinlocks enabled\n");
+ __pv_init_lock_hash();
+
+ static_call_update(pv_queued_spin_lock_slowpath, __pv_queued_spin_lock_slowpath);
+ static_call_update(pv_queued_spin_unlock, __pv_queued_spin_unlock);
+}
+
+bool pv_is_native_spin_unlock(void)
+{
+ if (static_call_query(pv_queued_spin_unlock) == native_queued_spin_unlock)
+ return true;
+ else
+ return false;
+}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 45010e71df86..8b51ff5c7300 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -278,6 +278,10 @@ static void __init riscv_spinlock_init(void)
pr_err("Queued spinlock without Zabha or Ziccrse");
else
pr_info("Queued spinlock %s: enabled\n", using_ext);
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+ pv_qspinlock_init();
+#endif
}
extern void __init init_rt_signal_env(void);
--
2.40.1
Powered by blists - more mailing lists