[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200520090158.4x4lkbssm7ncirn7@liuwe-devbox-debian-v2.j3c5onc20sse1dnehy4noqpfcg.zx.internal.cloudapp.net>
Date: Wed, 20 May 2020 09:01:58 +0000
From: Wei Liu <wei.liu@...nel.org>
To: Sunil Muthuswamy <sunilmut@...rosoft.com>
Cc: KY Srinivasan <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Stephen Hemminger <sthemmin@...rosoft.com>,
Wei Liu <liuwe@...rosoft.com>,
Michael Kelley <mikelley@...rosoft.com>,
Tianyu Lan <Tianyu.Lan@...rosoft.com>,
"linux-hyperv@...r.kernel.org" <linux-hyperv@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Wei Liu <wei.liu@...nel.org>
Subject: Re: [PATCH] x86/Hyper-V: Support for free page reporting
On Tue, May 19, 2020 at 06:37:57PM +0000, Sunil Muthuswamy wrote:
> Linux has support for free page reporting now (36e66c554b5c) for
> virtualized environment. On Hyper-V when virtually backed VMs are
> configured, Hyper-V will advertise cold memory discard capability,
> when supported. This patch adds the support to hook into the free
> page reporting infrastructure and leverage the Hyper-V cold memory
> discard hint hypercall to report/free these pages back to the host.
>
> Signed-off-by: Sunil Muthuswamy <sunilmut@...rosoft.com>
> ---
> First patch mail bounced backed. Sending it again with the email
> addresses fixed.
> ---
> arch/x86/hyperv/hv_init.c | 24 ++++++++
> arch/x86/kernel/cpu/mshyperv.c | 6 +-
> drivers/hv/hv_balloon.c | 93 +++++++++++++++++++++++++++++++
> include/asm-generic/hyperv-tlfs.h | 29 ++++++++++
> include/asm-generic/mshyperv.h | 2 +
> 5 files changed, 152 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
> index 624f5d9b0f79..925e2f7eb82c 100644
> --- a/arch/x86/hyperv/hv_init.c
> +++ b/arch/x86/hyperv/hv_init.c
> @@ -506,3 +506,27 @@ bool hv_is_hibernation_supported(void)
> return acpi_sleep_state_supported(ACPI_STATE_S4);
> }
> EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
> +
> +u64 hv_query_ext_cap(void)
> +{
> + u64 *cap;
> + unsigned long flags;
> + u64 ext_cap = 0;
> +
> + /*
> + * Querying extended capabilities is an extended hypercall. Check if the
> + * partition supports extended hypercall, first.
> + */
> + if (!(ms_hyperv.b_features & HV_ENABLE_EXTENDED_HYPERCALLS))
> + return 0;
> +
> + local_irq_save(flags);
> + cap = *(u64 **)this_cpu_ptr(hyperv_pcpu_input_arg);
The cast here is not strictly needed.
> + if (hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL, cap) ==
> + HV_STATUS_SUCCESS)
You're using the input page as the output parameter. Ideally we should
introduce hyperv_pcpu_output_arg page, but that would waste one page per
cpu just for this one call.
So for now I think this setup is fine, but I would like to add the
following comment.
/*
* Repurpose the input_arg page to accept output from Hyper-V for
* now because this is the only call that needs output from the
* hypervisor. It should be fixed properly by introducing an
* output_arg page once we have more places that require output.
*/
> + ext_cap = *cap;
> +
> + local_irq_restore(flags);
> + return ext_cap;
> +}
> +EXPORT_SYMBOL_GPL(hv_query_ext_cap);
> diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
> index ebf34c7bc8bc..2de3f692c8bf 100644
> --- a/arch/x86/kernel/cpu/mshyperv.c
> +++ b/arch/x86/kernel/cpu/mshyperv.c
> @@ -224,11 +224,13 @@ static void __init ms_hyperv_init_platform(void)
> * Extract the features and hints
> */
> ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
> + ms_hyperv.b_features = cpuid_ebx(HYPERV_CPUID_FEATURES);
> ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
> ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
>
> - pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n",
> - ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features);
> + pr_info("Hyper-V: features 0x%x, additional features: 0x%x, hints 0x%x, misc 0x%x\n",
> + ms_hyperv.features, ms_hyperv.b_features, ms_hyperv.hints,
> + ms_hyperv.misc_features);
>
> ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
> ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);
> diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
> index 32e3bc0aa665..77be31094556 100644
> --- a/drivers/hv/hv_balloon.c
> +++ b/drivers/hv/hv_balloon.c
> @@ -21,6 +21,7 @@
> #include <linux/memory.h>
> #include <linux/notifier.h>
> #include <linux/percpu_counter.h>
> +#include <linux/page_reporting.h>
>
> #include <linux/hyperv.h>
> #include <asm/hyperv-tlfs.h>
> @@ -563,6 +564,10 @@ struct hv_dynmem_device {
> * The negotiated version agreed by host.
> */
> __u32 version;
> +
> +#ifdef CONFIG_PAGE_REPORTING
> + struct page_reporting_dev_info pr_dev_info;
> +#endif
> };
>
> static struct hv_dynmem_device dm_device;
> @@ -1565,6 +1570,83 @@ static void balloon_onchannelcallback(void *context)
>
> }
>
> +#ifdef CONFIG_PAGE_REPORTING
> +static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
> + struct scatterlist *sgl, unsigned int nents)
> +{
> + unsigned long flags;
> + struct hv_memory_hint *hint;
> + int i;
> + u64 status;
> + struct scatterlist *sg;
> +
> + WARN_ON(nents > HV_MAX_GPA_PAGE_RANGES);
Should we return -ENOSPC here?
> + local_irq_save(flags);
> + hint = *(struct hv_memory_hint **)this_cpu_ptr(hyperv_pcpu_input_arg);
> + if (!hint) {
> + local_irq_restore(flags);
> + return -ENOSPC;
> + }
> +
> + hint->type = HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD;
> + hint->reserved = 0;
> + for (i = 0, sg = sgl; sg; sg = sg_next(sg), i++) {
> + int order;
> + union hv_gpa_page_range *range;
> +
Unfortunately I can't find the semantics of this hypercall in TLFS 6, so
I have a few questions here.
> + order = get_order(sg->length);
> + range = &hint->ranges[i];
> + range->address_space = 0;
I guess this means all address spaces?
> + range->page.largepage = 1;
What effect does this have? What if the page is a 4k page?
> + range->page.additional_pages = (1ull << (order - 9)) - 1;
What is 9 here? Is there a macro name *ORDER that you can use?
Wei.
Powered by blists - more mailing lists