[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1538987374-51217-3-git-send-email-yi.y.sun@linux.intel.com>
Date: Mon, 8 Oct 2018 16:29:34 +0800
From: Yi Sun <yi.y.sun@...ux.intel.com>
To: linux-kernel@...r.kernel.org
Cc: x86@...nel.org, tglx@...utronix.de, jgross@...e.com,
chao.p.peng@...el.com, chao.gao@...el.com,
isaku.yamahata@...el.com, michael.h.kelley@...rosoft.com,
tianyu.lan@...rosoft.com, Yi Sun <yi.y.sun@...ux.intel.com>,
"K. Y. Srinivasan" <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Stephen Hemminger <sthemmin@...rosoft.com>,
"Michael Kelley (EOSG)" <Michael.H.Kelley@...rosoft.com>
Subject: [PATCH v4 2/2] locking/pvqspinlock, hv: Enable PV qspinlock for Hyper-V
Follow PV spinlock mechanism to implement the callback functions
to allow the CPU idling and kicking operations on Hyper-V.
Cc: "K. Y. Srinivasan" <kys@...rosoft.com>
Cc: Haiyang Zhang <haiyangz@...rosoft.com>
Cc: Stephen Hemminger <sthemmin@...rosoft.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Michael Kelley (EOSG) <Michael.H.Kelley@...rosoft.com>
Cc: Juergen Gross <jgross@...e.com>
Signed-off-by: Yi Sun <yi.y.sun@...ux.intel.com>
---
v3->v4:
- To avoid possible hang issue (if hv_qlock_wait can be
interrupted, kick may happen before reading HV_X64_MSR_GUEST_IDLE
so that hang happens), disable interrupt before READ_ONCE, then
restore it after reading HV_X64_MSR_GUEST_IDLE.
(suggested by Juergen Gross)
---
Documentation/admin-guide/kernel-parameters.txt | 5 ++
arch/x86/hyperv/Makefile | 4 ++
arch/x86/hyperv/hv_spinlock.c | 85 +++++++++++++++++++++++++
arch/x86/include/asm/mshyperv.h | 1 +
arch/x86/kernel/cpu/mshyperv.c | 14 ++++
5 files changed, 109 insertions(+)
create mode 100644 arch/x86/hyperv/hv_spinlock.c
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 92eb1f4..0fc8448 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1385,6 +1385,11 @@
hvc_iucv_allow= [S390] Comma-separated list of z/VM user IDs.
If specified, z/VM IUCV HVC accepts connections
from listed z/VM user IDs only.
+
+ hv_nopvspin [X86,HYPER_V]
+ Disables the ticketlock slowpath using HYPER-V PV
+ optimizations.
+
keep_bootcon [KNL]
Do not unregister boot console at start. This is only
useful for debugging when something happens in the window
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile
index b21ee65..1c11f94 100644
--- a/arch/x86/hyperv/Makefile
+++ b/arch/x86/hyperv/Makefile
@@ -1,2 +1,6 @@
obj-y := hv_init.o mmu.o nested.o
obj-$(CONFIG_X86_64) += hv_apic.o
+
+ifdef CONFIG_X86_64
+obj-$(CONFIG_PARAVIRT_SPINLOCKS) += hv_spinlock.o
+endif
diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c
new file mode 100644
index 0000000..b628343
--- /dev/null
+++ b/arch/x86/hyperv/hv_spinlock.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Hyper-V specific spinlock code.
+ *
+ * Copyright (C) 2018, Intel, Inc.
+ *
+ * Author : Yi Sun <yi.y.sun@...el.com>
+ */
+
+#define pr_fmt(fmt) "Hyper-V: " fmt
+
+#include <linux/spinlock.h>
+
+#include <asm/mshyperv.h>
+#include <asm/hyperv-tlfs.h>
+#include <asm/paravirt.h>
+#include <asm/qspinlock.h>
+#include <asm/apic.h>
+
+static bool __initdata hv_pvspin = true;
+
+static void hv_qlock_kick(int cpu)
+{
+ apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
+}
+
+static void hv_qlock_wait(u8 *byte, u8 val)
+{
+ unsigned long msr_val;
+ unsigned long flags;
+
+ if (in_nmi())
+ return;
+
+ local_irq_save(flags);
+
+ if (READ_ONCE(*byte) != val)
+ goto out;
+
+ /*
+ * Read HV_X64_MSR_GUEST_IDLE MSR can trigger the guest's
+ * transition to the idle power state which can be exited
+ * by an IPI even if IF flag is disabled.
+ */
+ rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
+
+out:
+ local_irq_restore(flags);
+}
+
+/*
+ * Hyper-V does not support this so far.
+ */
+bool hv_vcpu_is_preempted(int vcpu)
+{
+ return false;
+}
+PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
+
+void __init hv_init_spinlocks(void)
+{
+ if (!hv_pvspin ||
+ !apic ||
+ !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
+ !(ms_hyperv.features & HV_X64_MSR_GUEST_IDLE_AVAILABLE)) {
+ pr_info("PV spinlocks disabled\n");
+ return;
+ }
+ pr_info("PV spinlocks enabled\n");
+
+ __pv_init_lock_hash();
+ pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+ pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
+ pv_lock_ops.wait = hv_qlock_wait;
+ pv_lock_ops.kick = hv_qlock_kick;
+ pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
+}
+
+static __init int hv_parse_nopvspin(char *arg)
+{
+ hv_pvspin = false;
+ return 0;
+}
+early_param("hv_nopvspin", hv_parse_nopvspin);
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index f377044..759cfd2 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -351,6 +351,7 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
#ifdef CONFIG_X86_64
void hv_apic_init(void);
+void __init hv_init_spinlocks(void);
#else
static inline void hv_apic_init(void) {}
#endif
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index ad12733..a5cc219 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -199,6 +199,16 @@ static unsigned long hv_get_tsc_khz(void)
return freq / 1000;
}
+#if defined(CONFIG_SMP) && IS_ENABLED(CONFIG_HYPERV)
+static void __init hv_smp_prepare_boot_cpu(void)
+{
+ native_smp_prepare_boot_cpu();
+#if defined(CONFIG_X86_64) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+ hv_init_spinlocks();
+#endif
+}
+#endif
+
static void __init ms_hyperv_init_platform(void)
{
int hv_host_info_eax;
@@ -303,6 +313,10 @@ static void __init ms_hyperv_init_platform(void)
if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE)
alloc_intr_gate(HYPERV_STIMER0_VECTOR,
hv_stimer0_callback_vector);
+
+#if defined(CONFIG_SMP)
+ smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
+#endif
#endif
}
--
1.9.1
Powered by blists - more mailing lists