[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250226200612.2062-5-mhklinux@outlook.com>
Date: Wed, 26 Feb 2025 12:06:09 -0800
From: mhkelley58@...il.com
To: kys@...rosoft.com,
haiyangz@...rosoft.com,
wei.liu@...nel.org,
decui@...rosoft.com,
tglx@...utronix.de,
mingo@...hat.com,
bp@...en8.de,
dave.hansen@...ux.intel.com,
hpa@...or.com,
lpieralisi@...nel.org,
kw@...ux.com,
manivannan.sadhasivam@...aro.org,
robh@...nel.org,
bhelgaas@...gle.com,
arnd@...db.de
Cc: x86@...nel.org,
linux-hyperv@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-pci@...r.kernel.org,
linux-arch@...r.kernel.org
Subject: [PATCH 4/7] x86/hyperv: Use hv_hvcall_*() to set up hypercall arguments -- part 2
From: Michael Kelley <mhklinux@...look.com>
Update hypercall call sites to use the new hv_hvcall_*() functions
to set up hypercall arguments. Since these functions zero the
fixed portion of input memory, remove now redundant calls to memset()
and explicit zero'ing of input fields.
For hv_mark_gpa_visibility(), use the computed batch_size instead
of HV_MAX_MODIFY_GPA_REP_COUNT. Also update the associated gpa_page_list[]
field to have zero size, which is more consistent with other array
arguments to hypercalls. Due to the interaction with the calling
hv_vtom_set_host_visibility(), HV_MAX_MODIFY_GPA_REP_COUNT cannot be
completely eliminated without some further restructuring, but that's
for another patch set.
Similarly, for the nested flush functions, update the gpa_list[] to
have zero size. Again, separate restructuring would be required to
completely eliminate the need for HV_MAX_FLUSH_REP_COUNT.
Finally, hyperv_flush_tlb_others_ex() requires special handling
because the input consists of two arrays -- one for the hv_vp_set and
another for the gva list. The batch_size computed by hv_hvcall_in_array()
is adjusted to account for the number of entries in the hv_vp_set.
Signed-off-by: Michael Kelley <mhklinux@...look.com>
---
arch/x86/hyperv/ivm.c | 18 +++++++++---------
arch/x86/hyperv/mmu.c | 17 +++--------------
arch/x86/hyperv/nested.c | 14 +++++---------
include/hyperv/hvgdk_mini.h | 4 ++--
4 files changed, 19 insertions(+), 34 deletions(-)
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index ec7880271cf9..1e4f65aef09b 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -465,30 +465,30 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
{
struct hv_gpa_range_for_visibility *input;
u64 hv_status;
+ int batch_size;
unsigned long flags;
/* no-op if partition isolation is not enabled */
if (!hv_is_isolation_supported())
return 0;
- if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
- pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
- HV_MAX_MODIFY_GPA_REP_COUNT);
+ local_irq_save(flags);
+ batch_size = hv_hvcall_in_array(&input, sizeof(*input),
+ sizeof(input->gpa_page_list[0]));
+ if (unlikely(!input)) {
+ local_irq_restore(flags);
return -EINVAL;
}
- local_irq_save(flags);
- input = *this_cpu_ptr(hyperv_pcpu_input_arg);
-
- if (unlikely(!input)) {
+ if (count > batch_size) {
+ pr_err("Hyper-V: GPA count:%d exceeds supported:%u\n", count,
+ batch_size);
local_irq_restore(flags);
return -EINVAL;
}
input->partition_id = HV_PARTITION_ID_SELF;
input->host_visibility = visibility;
- input->reserved0 = 0;
- input->reserved1 = 0;
memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
hv_status = hv_do_rep_hypercall(
HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 1f7c3082a36d..ab9db23247c1 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -72,7 +72,7 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
local_irq_save(flags);
- flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ max_gvas = hv_hvcall_in_array(&flush, sizeof(*flush), sizeof(flush->gva_list[0]));
if (unlikely(!flush)) {
local_irq_restore(flags);
@@ -86,13 +86,10 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
*/
flush->address_space = virt_to_phys(info->mm->pgd);
flush->address_space &= CR3_ADDR_MASK;
- flush->flags = 0;
} else {
- flush->address_space = 0;
flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
}
- flush->processor_mask = 0;
if (cpumask_equal(cpus, cpu_present_mask)) {
flush->flags |= HV_FLUSH_ALL_PROCESSORS;
} else {
@@ -139,8 +136,6 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
* We can flush not more than max_gvas with one hypercall. Flush the
* whole address space if we were asked to do more.
*/
- max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
-
if (info->end == TLB_FLUSH_ALL) {
flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
@@ -179,7 +174,7 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
return HV_STATUS_INVALID_PARAMETER;
- flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ max_gvas = hv_hvcall_in_array(&flush, sizeof(*flush), sizeof(flush->gva_list[0]));
if (info->mm) {
/*
@@ -188,14 +183,10 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
*/
flush->address_space = virt_to_phys(info->mm->pgd);
flush->address_space &= CR3_ADDR_MASK;
- flush->flags = 0;
} else {
- flush->address_space = 0;
flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
}
- flush->hv_vp_set.valid_bank_mask = 0;
-
flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
nr_bank = cpumask_to_vpset_skip(&flush->hv_vp_set, cpus,
info->freed_tables ? NULL : cpu_is_lazy);
@@ -206,9 +197,7 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
* We can flush not more than max_gvas with one hypercall. Flush the
* whole address space if we were asked to do more.
*/
- max_gvas =
- (PAGE_SIZE - sizeof(*flush) - nr_bank *
- sizeof(flush->hv_vp_set.bank_contents[0])) /
+ max_gvas -= (nr_bank * sizeof(flush->hv_vp_set.bank_contents[0])) /
sizeof(flush->gva_list[0]);
if (info->end == TLB_FLUSH_ALL) {
diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c
index 1083dc8646f9..88c39ac8d0aa 100644
--- a/arch/x86/hyperv/nested.c
+++ b/arch/x86/hyperv/nested.c
@@ -29,15 +29,13 @@ int hyperv_flush_guest_mapping(u64 as)
local_irq_save(flags);
- flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
-
+ hv_hvcall_in(&flush, sizeof(*flush));
if (unlikely(!flush)) {
local_irq_restore(flags);
goto fault;
}
flush->address_space = as;
- flush->flags = 0;
status = hv_do_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE,
flush, NULL);
@@ -90,25 +88,23 @@ int hyperv_flush_guest_mapping_range(u64 as,
u64 status;
unsigned long flags;
int ret = -ENOTSUPP;
- int gpa_n = 0;
+ int batch_size, gpa_n = 0;
if (!hv_hypercall_pg || !fill_flush_list_func)
goto fault;
local_irq_save(flags);
- flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
-
+ batch_size = hv_hvcall_in_array(&flush, sizeof(*flush),
+ sizeof(flush->gpa_list[0]));
if (unlikely(!flush)) {
local_irq_restore(flags);
goto fault;
}
flush->address_space = as;
- flush->flags = 0;
-
gpa_n = fill_flush_list_func(flush, data);
- if (gpa_n < 0) {
+ if (gpa_n < 0 || gpa_n > batch_size) {
local_irq_restore(flags);
goto fault;
}
diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h
index 58895883f636..70e5d7ee40c8 100644
--- a/include/hyperv/hvgdk_mini.h
+++ b/include/hyperv/hvgdk_mini.h
@@ -533,7 +533,7 @@ union hv_gpa_page_range {
struct hv_guest_mapping_flush_list {
u64 address_space;
u64 flags;
- union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
+ union hv_gpa_page_range gpa_list[];
};
struct hv_tlb_flush { /* HV_INPUT_FLUSH_VIRTUAL_ADDRESS_LIST */
@@ -1218,7 +1218,7 @@ struct hv_gpa_range_for_visibility {
u32 host_visibility : 2;
u32 reserved0 : 30;
u32 reserved1;
- u64 gpa_page_list[HV_MAX_MODIFY_GPA_REP_COUNT];
+ u64 gpa_page_list[];
} __packed;
#if defined(CONFIG_X86)
--
2.25.1
Powered by blists - more mailing lists