[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aNcPYJzETLZHCNpW@skinsburskii.localdomain>
Date: Fri, 26 Sep 2025 15:10:40 -0700
From: Stanislav Kinsburskii <skinsburskii@...ux.microsoft.com>
To: Nuno Das Neves <nunodasneves@...ux.microsoft.com>
Cc: kys@...rosoft.com, haiyangz@...rosoft.com, wei.liu@...nel.org,
decui@...rosoft.com, linux-hyperv@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/3] Drivers: hv: Centralize guest memory region
destruction in helper
On Fri, Sep 26, 2025 at 11:15:54AM -0700, Nuno Das Neves wrote:
> On 9/24/2025 2:31 PM, Stanislav Kinsburskii wrote:
<snip>
> > + /*
> > + * Unmap only the mapped pages to optimize performance,
> > + * especially for large memory regions.
> > + */
> > + for (page_offset = 0; page_offset < region->nr_pages; page_offset += page_count) {
> > + page_count = 1;
> > + if (!region->pages[page_offset])
> > + continue;
> I mentioned it above, but can this even happen in the current code (i.e. without
> moveable pages)?
>
No.
> Also, has the impact of this change been measured? I understand the logic behind
> the change - there could be large unmapped sequences within the region so we might
> be able to skip a lot of reps of the unmap hypercall, but the region could also be
> very fragmented and this method might cause *more* reps in that case, right?
>
I see your point. Indeed, we should optimize the number of pages to
unmap by the maximum number allowed for the hypercall.
I'll make this change, thanks.
> Either way, this change belongs in a separate patch.
Fair enough.
Thanks,
Stanislav
> > +
> > + for (; page_count < region->nr_pages - page_offset; page_count++) {
> > + if (!region->pages[page_offset + page_count])
> > + break;
> > + }
> > +
> > + /* ignore unmap failures and continue as process may be exiting */
> > + hv_call_unmap_gpa_pages(partition->pt_id,
> > + region->start_gfn + page_offset,
> > + page_count, unmap_flags);
> > + }
> > +
> > + mshv_region_evict(region);
> > +
> > + vfree(region);
> > +}
> > +
> > /* Called for unmapping both the guest ram and the mmio space */
> > static long
> > mshv_unmap_user_memory(struct mshv_partition *partition,
> > struct mshv_user_mem_region mem)
> > {
> > struct mshv_mem_region *region;
> > - u32 unmap_flags = 0;
> >
> > if (!(mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP)))
> > return -EINVAL;
> > @@ -1407,18 +1453,7 @@ mshv_unmap_user_memory(struct mshv_partition *partition,
> > region->nr_pages != HVPFN_DOWN(mem.size))
> > return -EINVAL;
> >
> > - hlist_del(®ion->hnode);
> > -
> > - if (region->flags.large_pages)
> > - unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
> > -
> > - /* ignore unmap failures and continue as process may be exiting */
> > - hv_call_unmap_gpa_pages(partition->pt_id, region->start_gfn,
> > - region->nr_pages, unmap_flags);
> > -
> > - mshv_region_evict(region);
> > -
> > - vfree(region);
> > + mshv_partition_destroy_region(region);
> > return 0;
> > }
> >
> > @@ -1754,8 +1789,8 @@ static void destroy_partition(struct mshv_partition *partition)
> > {
> > struct mshv_vp *vp;
> > struct mshv_mem_region *region;
> > - int i, ret;
> > struct hlist_node *n;
> > + int i;
> >
> > if (refcount_read(&partition->pt_ref_count)) {
> > pt_err(partition,
> > @@ -1815,25 +1850,9 @@ static void destroy_partition(struct mshv_partition *partition)
> >
> > remove_partition(partition);
> >
> > - /* Remove regions, regain access to the memory and unpin the pages */
> > hlist_for_each_entry_safe(region, n, &partition->pt_mem_regions,
> > - hnode) {
> > - hlist_del(®ion->hnode);
> > -
> > - if (mshv_partition_encrypted(partition)) {
> > - ret = mshv_partition_region_share(region);
> > - if (ret) {
> > - pt_err(partition,
> > - "Failed to regain access to memory, unpinning user pages will fail and crash the host error: %d\n",
> > - ret);
> > - return;
> > - }
> > - }
> > -
> > - mshv_region_evict(region);
> > -
> > - vfree(region);
> > - }
> > + hnode)
> > + mshv_partition_destroy_region(region);
> >
> > /* Withdraw and free all pages we deposited */
> > hv_call_withdraw_memory(U64_MAX, NUMA_NO_NODE, partition->pt_id);
> >
> >
>
Powered by blists - more mailing lists