lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZzU8qY92Q2QNtuyg@google.com>
Date: Wed, 13 Nov 2024 15:56:25 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: linux-kernel@...r.kernel.org, kvm@...r.kernel.org, 
	michael.christie@...cle.com, Tejun Heo <tj@...nel.org>, 
	Luca Boccassi <bluca@...ian.org>
Subject: Re: [PATCH] KVM: x86: switch hugepage recovery thread to vhost_task

On Fri, Nov 08, 2024, Paolo Bonzini wrote:
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 8e853a5fc867..d5af4f8c5a6a 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -7281,7 +7281,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
>  			kvm_mmu_zap_all_fast(kvm);
>  			mutex_unlock(&kvm->slots_lock);
>  
> -			wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
> +			vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
>  		}
>  		mutex_unlock(&kvm_lock);
>  	}
> @@ -7427,7 +7427,7 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
>  		mutex_lock(&kvm_lock);
>  
>  		list_for_each_entry(kvm, &vm_list, vm_list)
> -			wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
> +			vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
>  
>  		mutex_unlock(&kvm_lock);
>  	}
> @@ -7530,62 +7530,65 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
>  	srcu_read_unlock(&kvm->srcu, rcu_idx);
>  }
>  
> -static long get_nx_huge_page_recovery_timeout(u64 start_time)
> +#define NX_HUGE_PAGE_DISABLED (-1)

I don't see any point in using -1.  That's more legal (though still impossible
and absurd) than an deadline of '0'.  And it's somewhat confusing because KVM
uses -1 for the default nx_huge_pages value to indicate "enable the NX huge page
mitigation if the CPU is vulnerable to L1TF", not "disable the mitigation".

> +static u64 get_nx_huge_page_recovery_next(void)
>  {
>  	bool enabled;
>  	uint period;
>  
>  	enabled = calc_nx_huge_pages_recovery_period(&period);
>  
> -	return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
> -		       : MAX_SCHEDULE_TIMEOUT;
> +	return enabled ? get_jiffies_64() + msecs_to_jiffies(period)
> +		: NX_HUGE_PAGE_DISABLED;

Please align the '?' and ':' to show that they are related paths of the ternary
operator.  Moot point if we go without a literal '0'.

>  }
>  
> -static int kvm_nx_huge_page_recovery_worker(struct kvm *kvm, uintptr_t data)
> +static void kvm_nx_huge_page_recovery_worker_kill(void *data)
>  {
> -	u64 start_time;
> +}
> +
> +static bool kvm_nx_huge_page_recovery_worker(void *data)
> +{
> +	struct kvm *kvm = data;
>  	long remaining_time;
>  
> -	while (true) {
> -		start_time = get_jiffies_64();
> -		remaining_time = get_nx_huge_page_recovery_timeout(start_time);
> +	if (kvm->arch.nx_huge_page_next == NX_HUGE_PAGE_DISABLED)
> +		return false;

The "next" concept is broken.  Once KVM sees NX_HUGE_PAGE_DISABLED for a given VM,
KVM will never re-evaluate nx_huge_page_next.  Similarly, if the recovery period
and/or ratio changes, KVM won't recompute the "next" time until the current timeout
has expired.

I fiddled around with various ideas, but I don't see a better solution that something
along the lines of KVM's request system, e.g. set a bool to indicate the params
changed, and sprinkle smp_{r,w}mb() barriers to ensure the vhost task sees the
new params.

FWIW, I also found "next" to be confusing.  How about "deadline"? KVM uses that
terminology for the APIC timer, i.e. it's familiar, intuitive, and accurate(ish).

Something like this as fixup?  (comments would be nice)

---
 arch/x86/include/asm/kvm_host.h |  3 ++-
 arch/x86/kvm/mmu/mmu.c          | 34 +++++++++++++++++++++------------
 2 files changed, 24 insertions(+), 13 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 72f3bcfc54d7..e9fb8b9a9c2b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1444,7 +1444,8 @@ struct kvm_arch {
 
 	struct kvm_x86_pmu_event_filter __rcu *pmu_event_filter;
 	struct vhost_task *nx_huge_page_recovery_thread;
-	u64 nx_huge_page_next;
+	u64 nx_huge_page_deadline;
+	bool nx_huge_page_params_changed;
 
 #ifdef CONFIG_X86_64
 	/* The number of TDP MMU pages across all roots. */
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d0c2d9d2588f..acfa14d4248b 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -7102,6 +7102,13 @@ static void mmu_destroy_caches(void)
 	kmem_cache_destroy(mmu_page_header_cache);
 }
 
+static void mmu_wake_nx_huge_page_task(struct kvm *kvm)
+{
+	smp_wmb();
+	WRITE_ONCE(kvm->arch.nx_huge_page_deadline, true);
+	vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
+}
+
 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
 {
 	if (nx_hugepage_mitigation_hard_disabled)
@@ -7162,7 +7169,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
 			kvm_mmu_zap_all_fast(kvm);
 			mutex_unlock(&kvm->slots_lock);
 
-			vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
+			mmu_wake_nx_huge_page_task(kvm);
 		}
 		mutex_unlock(&kvm_lock);
 	}
@@ -7291,7 +7298,7 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
 		mutex_lock(&kvm_lock);
 
 		list_for_each_entry(kvm, &vm_list, vm_list)
-			vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
+			mmu_wake_nx_huge_page_task(kvm);
 
 		mutex_unlock(&kvm_lock);
 	}
@@ -7394,17 +7401,14 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
 	srcu_read_unlock(&kvm->srcu, rcu_idx);
 }
 
-#define NX_HUGE_PAGE_DISABLED (-1)
-
-static u64 get_nx_huge_page_recovery_next(void)
+static u64 get_nx_huge_page_recovery_deadline(void)
 {
 	bool enabled;
 	uint period;
 
 	enabled = calc_nx_huge_pages_recovery_period(&period);
 
-	return enabled ? get_jiffies_64() + msecs_to_jiffies(period)
-		: NX_HUGE_PAGE_DISABLED;
+	return enabled ? get_jiffies_64() + msecs_to_jiffies(period) : 0;
 }
 
 static void kvm_nx_huge_page_recovery_worker_kill(void *data)
@@ -7416,10 +7420,16 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
 	struct kvm *kvm = data;
 	long remaining_time;
 
-	if (kvm->arch.nx_huge_page_next == NX_HUGE_PAGE_DISABLED)
+	if (READ_ONCE(kvm->arch.nx_huge_page_params_changed)) {
+		smp_rmb();
+		WRITE_ONCE(kvm->arch.nx_huge_page_params_changed, false);
+		kvm->arch.nx_huge_page_deadline = get_nx_huge_page_recovery_deadline();
+	}
+
+	if (!kvm->arch.nx_huge_page_deadline)
 		return false;
 
-	remaining_time = kvm->arch.nx_huge_page_next - get_jiffies_64();
+	remaining_time = kvm->arch.nx_huge_page_deadline - get_jiffies_64();
 	if (remaining_time > 0) {
 		schedule_timeout(remaining_time);
 		/* check for signals and come back */
@@ -7428,7 +7438,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
 
 	__set_current_state(TASK_RUNNING);
 	kvm_recover_nx_huge_pages(kvm);
-	kvm->arch.nx_huge_page_next = get_nx_huge_page_recovery_next();
+	kvm->arch.nx_huge_page_deadline = get_nx_huge_page_recovery_deadline();
 	return true;
 }
 
@@ -7437,11 +7447,11 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
 	if (nx_hugepage_mitigation_hard_disabled)
 		return 0;
 
-	kvm->arch.nx_huge_page_next = get_nx_huge_page_recovery_next();
+	WRITE_ONCE(kvm->arch.nx_huge_page_params_changed, true);
 	kvm->arch.nx_huge_page_recovery_thread = vhost_task_create(
 		kvm_nx_huge_page_recovery_worker, kvm_nx_huge_page_recovery_worker_kill,
 		kvm, "kvm-nx-lpage-recovery");
-	
+
 	if (!kvm->arch.nx_huge_page_recovery_thread)
 		return -ENOMEM;
 

base-commit: 922a5630cd31e4414f964aa64f45a5884f40188c
--

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ