[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2a30773b-7c2b-2d95-6da7-ba6c2f5e66a4@google.com>
Date: Tue, 23 Nov 2021 14:04:06 -0800
From: Junaid Shahid <junaids@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>
Cc: Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] KVM: x86/mmu: Handle "default" period when selectively
waking kthread
On 11/19/21 5:57 PM, Sean Christopherson wrote:
> Account for the '0' being a default, "let KVM choose" period, when
> determining whether or not the recovery worker needs to be awakened in
> response to userspace reducing the period. Failure to do so results in
> the worker not being awakened properly, e.g. when changing the period
> from '0' to any small-ish value.
>
> Fixes: 4dfe4f40d845 ("kvm: x86: mmu: Make NX huge page recovery period configurable")
> Cc: stable@...r.kernel.org
> Cc: Junaid Shahid <junaids@...gle.com>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
> arch/x86/kvm/mmu/mmu.c | 48 +++++++++++++++++++++++++++++-------------
> 1 file changed, 33 insertions(+), 15 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 8f0035517450..db7e1ad4d046 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -6165,23 +6165,46 @@ void kvm_mmu_module_exit(void)
> mmu_audit_disable();
> }
>
> +/*
> + * Calculate the effective recovery period, accounting for '0' meaning "let KVM
> + * select a period of ~1 hour per page". Returns true if recovery is enabled.
> + */
> +static bool calc_nx_huge_pages_recovery_period(uint *period)
> +{
> + /*
> + * Use READ_ONCE to get the params, this may be called outside of the
> + * param setters, e.g. by the kthread to compute its next timeout.
> + */
> + bool enabled = READ_ONCE(nx_huge_pages);
> + uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
> +
> + if (!enabled || !ratio)
> + return false;
> +
> + *period = READ_ONCE(nx_huge_pages_recovery_period_ms);
> + if (!*period) {
> + /* Make sure the period is not less than one second. */
> + ratio = min(ratio, 3600u);
> + *period = 60 * 60 * 1000 / ratio;
> + }
> + return true;
> +}
> +
> static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
> {
> bool was_recovery_enabled, is_recovery_enabled;
> uint old_period, new_period;
> int err;
>
> - was_recovery_enabled = nx_huge_pages_recovery_ratio;
> - old_period = nx_huge_pages_recovery_period_ms;
> + was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
>
> err = param_set_uint(val, kp);
> if (err)
> return err;
>
> - is_recovery_enabled = nx_huge_pages_recovery_ratio;
> - new_period = nx_huge_pages_recovery_period_ms;
> + is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
>
> - if (READ_ONCE(nx_huge_pages) && is_recovery_enabled &&
> + if (is_recovery_enabled &&
> (!was_recovery_enabled || old_period > new_period)) {
> struct kvm *kvm;
>
> @@ -6245,18 +6268,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
>
> static long get_nx_lpage_recovery_timeout(u64 start_time)
> {
> - uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
> - uint period = READ_ONCE(nx_huge_pages_recovery_period_ms);
> + bool enabled;
> + uint period;
>
> - if (!period && ratio) {
> - /* Make sure the period is not less than one second. */
> - ratio = min(ratio, 3600u);
> - period = 60 * 60 * 1000 / ratio;
> - }
> + enabled = calc_nx_huge_pages_recovery_period(&period);
>
> - return READ_ONCE(nx_huge_pages) && ratio
> - ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
> - : MAX_SCHEDULE_TIMEOUT;
> + return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
> + : MAX_SCHEDULE_TIMEOUT;
> }
>
> static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
>
Reviewed-by: Junaid Shahid <junaids@...gle.com>
Powered by blists - more mailing lists