[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID:
<175916178500.55038.7514040046563468369.stgit@skinsburskii-cloud-desktop.internal.cloudapp.net>
Date: Mon, 29 Sep 2025 16:03:05 +0000
From: Stanislav Kinsburskii <skinsburskii@...ux.microsoft.com>
To: kys@...rosoft.com, haiyangz@...rosoft.com, wei.liu@...nel.org,
decui@...rosoft.com
Cc: linux-hyperv@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH v2 1/4] Drivers: hv: Refactor and rename memory region
handling functions
Simplify and unify memory region management to improve code clarity and
reliability. Consolidate pinning and invalidation logic, adopt consistent
naming, and remove redundant checks to reduce complexity.
Enhance documentation and update call sites for maintainability.
Signed-off-by: Stanislav Kinsburskii <skinsburskii@...ux.microsoft.com>
---
drivers/hv/mshv_root_main.c | 78 +++++++++++++++++++------------------------
1 file changed, 35 insertions(+), 43 deletions(-)
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
index a1c8c3bc79bf1..8cb309dce6258 100644
--- a/drivers/hv/mshv_root_main.c
+++ b/drivers/hv/mshv_root_main.c
@@ -1120,8 +1120,8 @@ mshv_region_map(struct mshv_mem_region *region)
}
static void
-mshv_region_evict_pages(struct mshv_mem_region *region,
- u64 page_offset, u64 page_count)
+mshv_region_invalidate_pages(struct mshv_mem_region *region,
+ u64 page_offset, u64 page_count)
{
if (region->flags.range_pinned)
unpin_user_pages(region->pages + page_offset, page_count);
@@ -1131,29 +1131,24 @@ mshv_region_evict_pages(struct mshv_mem_region *region,
}
static void
-mshv_region_evict(struct mshv_mem_region *region)
+mshv_region_invalidate(struct mshv_mem_region *region)
{
- mshv_region_evict_pages(region, 0, region->nr_pages);
+ mshv_region_invalidate_pages(region, 0, region->nr_pages);
}
static int
-mshv_region_populate_pages(struct mshv_mem_region *region,
- u64 page_offset, u64 page_count)
+mshv_region_pin(struct mshv_mem_region *region)
{
u64 done_count, nr_pages;
struct page **pages;
__u64 userspace_addr;
int ret;
- if (page_offset + page_count > region->nr_pages)
- return -EINVAL;
-
- for (done_count = 0; done_count < page_count; done_count += ret) {
- pages = region->pages + page_offset + done_count;
+ for (done_count = 0; done_count < region->nr_pages; done_count += ret) {
+ pages = region->pages + done_count;
userspace_addr = region->start_uaddr +
- (page_offset + done_count) *
- HV_HYP_PAGE_SIZE;
- nr_pages = min(page_count - done_count,
+ done_count * HV_HYP_PAGE_SIZE;
+ nr_pages = min(region->nr_pages - done_count,
MSHV_PIN_PAGES_BATCH_SIZE);
/*
@@ -1164,34 +1159,23 @@ mshv_region_populate_pages(struct mshv_mem_region *region,
* with the FOLL_LONGTERM flag does a large temporary
* allocation of contiguous memory.
*/
- if (region->flags.range_pinned)
- ret = pin_user_pages_fast(userspace_addr,
- nr_pages,
- FOLL_WRITE | FOLL_LONGTERM,
- pages);
- else
- ret = -EOPNOTSUPP;
-
+ ret = pin_user_pages_fast(userspace_addr, nr_pages,
+ FOLL_WRITE | FOLL_LONGTERM,
+ pages);
if (ret < 0)
goto release_pages;
}
- if (PageHuge(region->pages[page_offset]))
+ if (PageHuge(region->pages[0]))
region->flags.large_pages = true;
return 0;
release_pages:
- mshv_region_evict_pages(region, page_offset, done_count);
+ mshv_region_invalidate_pages(region, 0, done_count);
return ret;
}
-static int
-mshv_region_populate(struct mshv_mem_region *region)
-{
- return mshv_region_populate_pages(region, 0, region->nr_pages);
-}
-
static struct mshv_mem_region *
mshv_partition_region_by_gfn(struct mshv_partition *partition, u64 gfn)
{
@@ -1264,17 +1248,25 @@ static int mshv_partition_create_region(struct mshv_partition *partition,
return 0;
}
-/*
- * Map guest ram. if snp, make sure to release that from the host first
- * Side Effects: In case of failure, pages are unpinned when feasible.
+/**
+ * mshv_prepare_pinned_region - Pin and map memory regions
+ * @region: Pointer to the memory region structure
+ *
+ * This function processes memory regions that are explicitly marked as pinned.
+ * Pinned regions are preallocated, mapped upfront, and do not rely on fault-based
+ * population. The function ensures the region is properly populated, handles
+ * encryption requirements for SNP partitions if applicable, maps the region,
+ * and performs necessary sharing or eviction operations based on the mapping
+ * result.
+ *
+ * Return: 0 on success, negative error code on failure.
*/
-static int
-mshv_partition_mem_region_map(struct mshv_mem_region *region)
+static int mshv_prepare_pinned_region(struct mshv_mem_region *region)
{
struct mshv_partition *partition = region->partition;
int ret;
- ret = mshv_region_populate(region);
+ ret = mshv_region_pin(region);
if (ret) {
pt_err(partition, "Failed to populate memory region: %d\n",
ret);
@@ -1294,7 +1286,7 @@ mshv_partition_mem_region_map(struct mshv_mem_region *region)
pt_err(partition,
"Failed to unshare memory region (guest_pfn: %llu): %d\n",
region->start_gfn, ret);
- goto evict_region;
+ goto invalidate_region;
}
}
@@ -1304,7 +1296,7 @@ mshv_partition_mem_region_map(struct mshv_mem_region *region)
shrc = mshv_partition_region_share(region);
if (!shrc)
- goto evict_region;
+ goto invalidate_region;
pt_err(partition,
"Failed to share memory region (guest_pfn: %llu): %d\n",
@@ -1318,8 +1310,8 @@ mshv_partition_mem_region_map(struct mshv_mem_region *region)
return 0;
-evict_region:
- mshv_region_evict(region);
+invalidate_region:
+ mshv_region_invalidate(region);
err_out:
return ret;
}
@@ -1368,7 +1360,7 @@ mshv_map_user_memory(struct mshv_partition *partition,
ret = hv_call_map_mmio_pages(partition->pt_id, mem.guest_pfn,
mmio_pfn, HVPFN_DOWN(mem.size));
else
- ret = mshv_partition_mem_region_map(region);
+ ret = mshv_prepare_pinned_region(region);
if (ret)
goto errout;
@@ -1413,7 +1405,7 @@ mshv_unmap_user_memory(struct mshv_partition *partition,
hv_call_unmap_gpa_pages(partition->pt_id, region->start_gfn,
region->nr_pages, unmap_flags);
- mshv_region_evict(region);
+ mshv_region_invalidate(region);
vfree(region);
return 0;
@@ -1827,7 +1819,7 @@ static void destroy_partition(struct mshv_partition *partition)
}
}
- mshv_region_evict(region);
+ mshv_region_invalidate(region);
vfree(region);
}
Powered by blists - more mailing lists