[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1329423541-11677-3-git-send-email-emunson@mgebm.net>
Date: Thu, 16 Feb 2012 15:18:59 -0500
From: Eric B Munson <emunson@...bm.net>
To: avi@...hat.com
Cc: Eric B Munson <emunson@...bm.net>, mingo@...hat.com, hpa@...or.com,
ryanh@...ux.vnet.ibm.com, aliguori@...ibm.com, mtosatti@...hat.com,
kvm@...r.kernel.org, linux-arch@...r.kernel.org, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 2/4 V14] Add functions to check if the host has stopped the vm
When a host stops or suspends a VM it will set a flag to show this. The
watchdog will use these functions to determine if a softlockup is real, or the
result of a suspended VM.
Signed-off-by: Eric B Munson <emunson@...bm.net>
asm-generic changes Acked-by: Arnd Bergmann <arnd@...db.de>
Cc: mingo@...hat.com
Cc: hpa@...or.com
Cc: ryanh@...ux.vnet.ibm.com
Cc: aliguori@...ibm.com
Cc: mtosatti@...hat.com
Cc: kvm@...r.kernel.org
Cc: linux-arch@...r.kernel.org
Cc: x86@...nel.org
Cc: linux-kernel@...r.kernel.org
---
Changes from V11:
Re-add the missing asm-generic stub for check_and_clear_guest_stopped()
Changes from V6:
Use __this_cpu_and when clearing the PVCLOCK_GUEST_STOPPED flag
Changes from V5:
Collapse generic stubs into this patch
check_and_clear_guest_stopped() takes no args and uses __get_cpu_var()
Include individual definitions in ia64, s390, and powerpc
arch/ia64/include/asm/kvm_para.h | 5 +++++
arch/powerpc/include/asm/kvm_para.h | 5 +++++
arch/s390/include/asm/kvm_para.h | 5 +++++
arch/x86/include/asm/kvm_para.h | 8 ++++++++
arch/x86/kernel/kvmclock.c | 21 +++++++++++++++++++++
include/asm-generic/kvm_para.h | 14 ++++++++++++++
6 files changed, 58 insertions(+), 0 deletions(-)
create mode 100644 include/asm-generic/kvm_para.h
diff --git a/arch/ia64/include/asm/kvm_para.h b/arch/ia64/include/asm/kvm_para.h
index 1588aee..2019cb9 100644
--- a/arch/ia64/include/asm/kvm_para.h
+++ b/arch/ia64/include/asm/kvm_para.h
@@ -26,6 +26,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
#endif
#endif
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 7b754e7..c18916b 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -206,6 +206,11 @@ static inline unsigned int kvm_arch_para_features(void)
return r;
}
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
#endif /* __KERNEL__ */
#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h
index 6964db2..a988329 100644
--- a/arch/s390/include/asm/kvm_para.h
+++ b/arch/s390/include/asm/kvm_para.h
@@ -149,6 +149,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
#endif
#endif /* __S390_KVM_PARA_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 734c376..99c4bbe 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -95,6 +95,14 @@ struct kvm_vcpu_pv_apf_data {
extern void kvmclock_init(void);
extern int kvm_register_clock(char *txt);
+#ifdef CONFIG_KVM_CLOCK
+bool kvm_check_and_clear_guest_paused(void);
+#else
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+#endif /* CONFIG_KVMCLOCK */
/* This instruction is vmcall. On non-VT architectures, it will generate a
* trap that we will then rewrite to the appropriate instruction.
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index ca4e735..8562f77 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -22,6 +22,7 @@
#include <asm/msr.h>
#include <asm/apic.h>
#include <linux/percpu.h>
+#include <linux/hardirq.h>
#include <asm/x86_init.h>
#include <asm/reboot.h>
@@ -114,6 +115,26 @@ static void kvm_get_preset_lpj(void)
preset_lpj = lpj;
}
+bool kvm_check_and_clear_guest_paused(void)
+{
+ bool ret = false;
+ struct pvclock_vcpu_time_info *src;
+
+ /*
+ * per_cpu() is safe here because this function is only called from
+ * timer functions where preemption is already disabled.
+ */
+ WARN_ON(!in_atomic());
+ src = &__get_cpu_var(hv_clock);
+ if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
+ __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
+ ret = true;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_check_and_clear_guest_paused);
+
static struct clocksource kvm_clock = {
.name = "kvm-clock",
.read = kvm_clock_get_cycles,
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
new file mode 100644
index 0000000..05ef7e7
--- /dev/null
+++ b/include/asm-generic/kvm_para.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_GENERIC_KVM_PARA_H
+#define _ASM_GENERIC_KVM_PARA_H
+
+
+/*
+ * This function is used by architectures that support kvm to avoid issuing
+ * false soft lockup messages.
+ */
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
+#endif
--
1.7.5.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists