lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 3 Mar 2014 13:24:03 -0500
From:	"Li, Bin (Bin)" <bin.bl.li@...atel-lucent.com>
To:	<kvm@...r.kernel.org>
CC:	Neel Jatania <neel.jatania@...atel-lucent.com>,
	<linux-kernel@...r.kernel.org>, Avi Kiviti <avi@...hat.com>,
	Srivatsa Vaddagiri <vatsa@...ux.vnet.ibm.com>,
	Peter Zijlstra <a.p.zijlstra@...llo.nl>,
	Mike Galbraith <efault@....de>,
	Chris Wright <chrisw@...s-sol.org>, <ttracy@...hat.com>,
	"Nakajima, Jun" <jun.nakajima@...el.com>, <riel@...hat.com>
Subject: Enhancement for PLE handler in KVM

Hello, all.

The PLE handler attempts to determine an alternate vCPU to schedule.  In 
some cases the wrong vCPU is scheduled and performance suffers.

This patch allows for the guest OS to signal, using a hypercall, that 
it's starting/ending a critical section.  Using this information in the 
PLE handler allows for a more intelligent VCPU scheduling determination 
to be made.  The patch only changes the PLE behaviour if this new 
hypercall mechanism is used; if it isn't used, then the existing PLE 
algorithm continues to be used to determine the next vCPU.

Benefit from the patch:
  -  the guest OS real time performance being significantly improved 
when using hyper call marking entering and leaving guest OS kernel state.
  - The guest OS system clock jitter measured on on Intel E5 2620 
reduced from 400ms down to 6ms.
  - The guest OS system lock is set to a 2ms clock interrupt. The jitter 
is measured by the difference between dtsc() value in clock interrupt 
handler and the expectation of tsc value.
  - detail of test report is attached as reference.

Path details:

 From 77edfa193a4e29ab357ec3b1e097f8469d418507 Mon Sep 17 00:00:00 2001

From: Bin BL LI <bin.bl.li@...atel-lucent.com>

Date: Mon, 3 Mar 2014 11:23:35 -0500

Subject: [PATCH] Initial commit

---

  arch/x86/kvm/x86.c            |    7 +++++++

  include/linux/kvm_host.h      |   16 ++++++++++++++++

  include/uapi/linux/kvm_para.h |    2 ++

  virt/kvm/kvm_main.c           |   14 +++++++++++++-

  4 files changed, 38 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c

index 39c28f0..e735de3 100644

--- a/arch/x86/kvm/x86.c

+++ b/arch/x86/kvm/x86.c

@@ -5582,6 +5582,7 @@ void kvm_arch_exit(void)

  int kvm_emulate_halt(struct kvm_vcpu *vcpu)

  {

      ++vcpu->stat.halt_exits;

+    kvm_vcpu_set_holding_lock(vcpu,false);

      if (irqchip_in_kernel(vcpu->kvm)) {

          vcpu->arch.mp_state = KVM_MP_STATE_HALTED;

          return 1;

@@ -5708,6 +5709,12 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)

          kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);

          ret = 0;

          break;

+    case KVM_HC_LOCK_GET:

+        kvm_vcpu_set_holding_lock(vcpu,true);

+        break;

+    case KVM_HC_LOCK_RELEASE:

+        kvm_vcpu_set_holding_lock(vcpu,false);

+        break;

      default:

          ret = -KVM_ENOSYS;

          break;

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h

index b8e9a43..f24892e 100644

--- a/include/linux/kvm_host.h

+++ b/include/linux/kvm_host.h

@@ -266,6 +266,7 @@ struct kvm_vcpu {

          bool in_spin_loop;

          bool dy_eligible;

      } spin_loop;

+    bool holding_lock;

  #endif

      bool preempted;

      struct kvm_vcpu_arch arch;

@@ -403,6 +404,10 @@ struct kvm {

  #endif

      long tlbs_dirty;

      struct list_head devices;

+

+#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT

+    bool using_lock_flag;

+#endif

  };

  

  #define kvm_err(fmt, ...) \

@@ -1076,6 +1081,13 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)

      vcpu->spin_loop.dy_eligible = val;

  }

  

+static inline void kvm_vcpu_set_holding_lock(struct kvm_vcpu *vcpu, bool val)

+{

+    if ( ! vcpu->kvm->using_lock_flag )

+        vcpu->kvm->using_lock_flag = true;

+    vcpu->holding_lock = val;

+}

+

  #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */

  

  static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)

@@ -1085,6 +1097,10 @@ static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)

  static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)

  {

  }

+

+static inline void kvm_vcpu_set_holding_lock(struct kvm_vcpu *vcpu, bool val)

+{

+}

  #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */

  #endif

  

diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h

index 2841f86..2c563a1 100644

--- a/include/uapi/linux/kvm_para.h

+++ b/include/uapi/linux/kvm_para.h

@@ -20,6 +20,8 @@

  #define KVM_HC_FEATURES            3

  #define KVM_HC_PPC_MAP_MAGIC_PAGE    4

  #define KVM_HC_KICK_CPU            5

+#define KVM_HC_LOCK_GET            6

+#define KVM_HC_LOCK_RELEASE        7

  

  /*

   * hypercalls use architecture specific

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c

index 03a0381..c3a5046 100644

--- a/virt/kvm/kvm_main.c

+++ b/virt/kvm/kvm_main.c

@@ -232,6 +232,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)

  

      kvm_vcpu_set_in_spin_loop(vcpu, false);

      kvm_vcpu_set_dy_eligible(vcpu, false);

+    kvm_vcpu_set_holding_lock(vcpu, false);

      vcpu->preempted = false;

  

      r = kvm_arch_vcpu_init(vcpu);

@@ -502,6 +503,10 @@ static struct kvm *kvm_create_vm(unsigned long type)

      list_add(&kvm->vm_list, &vm_list);

      spin_unlock(&kvm_lock);

  

+#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT

+    kvm->using_lock_flag = false;

+#endif

+

      return kvm;

  

  out_err:

@@ -1762,9 +1767,16 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)

  #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT

      bool eligible;

  

-    eligible = !vcpu->spin_loop.in_spin_loop ||

+    if ( ! vcpu->kvm->using_lock_flag )

+    {

+        eligible = !vcpu->spin_loop.in_spin_loop ||

              (vcpu->spin_loop.in_spin_loop &&

               vcpu->spin_loop.dy_eligible);

+    }

+    else

+    {

+        eligible = vcpu->holding_lock; /* if holding any lock, yield to it */

+    }

  

      if (vcpu->spin_loop.in_spin_loop)

          kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);

-- 

1.7.1

~/ref/kvm_git >



Regards
Bin


View attachment "tst.log" of type "text/plain" (12661 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ