[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1ffd72f1-7345-1d31-ea6f-77bec83cb570@linux.dev>
Date: Tue, 19 Sep 2023 14:48:54 +0800
From: Muchun Song <muchun.song@...ux.dev>
To: Mike Kravetz <mike.kravetz@...cle.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: Muchun Song <songmuchun@...edance.com>,
Joao Martins <joao.m.martins@...cle.com>,
Oscar Salvador <osalvador@...e.de>,
David Hildenbrand <david@...hat.com>,
Miaohe Lin <linmiaohe@...wei.com>,
David Rientjes <rientjes@...gle.com>,
Anshuman Khandual <anshuman.khandual@....com>,
Naoya Horiguchi <naoya.horiguchi@...ux.dev>,
Barry Song <21cnbao@...il.com>, Michal Hocko <mhocko@...e.com>,
Matthew Wilcox <willy@...radead.org>,
Xiongchun Duan <duanxiongchun@...edance.com>,
Andrew Morton <akpm@...ux-foundation.org>
Subject: Re: [PATCH v4 8/8] hugetlb: batch TLB flushes when restoring vmemmap
On 2023/9/19 07:02, Mike Kravetz wrote:
> Update the internal hugetlb restore vmemmap code path such that TLB
> flushing can be batched. Use the existing mechanism of passing the
> VMEMMAP_REMAP_NO_TLB_FLUSH flag to indicate flushing should not be
> performed for individual pages. The routine hugetlb_vmemmap_restore_folios
> is the only user of this new mechanism, and it will perform a global
> flush after all vmemmap is restored.
>
> Signed-off-by: Joao Martins <joao.m.martins@...cle.com>
> Signed-off-by: Mike Kravetz <mike.kravetz@...cle.com>
> ---
> mm/hugetlb_vmemmap.c | 39 ++++++++++++++++++++++++---------------
> 1 file changed, 24 insertions(+), 15 deletions(-)
>
> diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> index a6c356acb1fc..ae2229f19158 100644
> --- a/mm/hugetlb_vmemmap.c
> +++ b/mm/hugetlb_vmemmap.c
> @@ -460,18 +460,19 @@ static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
> * @end: end address of the vmemmap virtual address range that we want to
> * remap.
> * @reuse: reuse address.
> + * @flags: modify behavior for bulk operations
Please keep the comment consistent with vmemmap_remap_split(), which says:
"@flags: modifications to vmemmap_remap_walk flags".
Thanks.
> *
> * Return: %0 on success, negative error code otherwise.
> */
> static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
> - unsigned long reuse)
> + unsigned long reuse, unsigned long flags)
> {
> LIST_HEAD(vmemmap_pages);
> struct vmemmap_remap_walk walk = {
> .remap_pte = vmemmap_restore_pte,
> .reuse_addr = reuse,
> .vmemmap_pages = &vmemmap_pages,
> - .flags = 0,
> + .flags = flags,
> };
>
> /* See the comment in the vmemmap_remap_free(). */
> @@ -493,17 +494,7 @@ EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
> static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
> core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
>
> -/**
> - * hugetlb_vmemmap_restore - restore previously optimized (by
> - * hugetlb_vmemmap_optimize()) vmemmap pages which
> - * will be reallocated and remapped.
> - * @h: struct hstate.
> - * @head: the head page whose vmemmap pages will be restored.
> - *
> - * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
> - * negative error code otherwise.
> - */
> -int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> +static int __hugetlb_vmemmap_restore(const struct hstate *h, struct page *head, unsigned long flags)
> {
> int ret;
> unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
> @@ -524,7 +515,7 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> * When a HugeTLB page is freed to the buddy allocator, previously
> * discarded vmemmap pages must be allocated and remapping.
> */
> - ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse);
> + ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, flags);
> if (!ret) {
> ClearHPageVmemmapOptimized(head);
> static_branch_dec(&hugetlb_optimize_vmemmap_key);
> @@ -533,6 +524,21 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> return ret;
> }
>
> +/**
> + * hugetlb_vmemmap_restore - restore previously optimized (by
> + * hugetlb_vmemmap_optimize()) vmemmap pages which
> + * will be reallocated and remapped.
> + * @h: struct hstate.
> + * @head: the head page whose vmemmap pages will be restored.
> + *
> + * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
> + * negative error code otherwise.
> + */
> +int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> +{
> + return __hugetlb_vmemmap_restore(h, head, 0);
> +}
> +
> /**
> * hugetlb_vmemmap_restore_folios - restore vmemmap for every folio on the list.
> * @h: struct hstate.
> @@ -557,7 +563,8 @@ int hugetlb_vmemmap_restore_folios(const struct hstate *h,
> num_restored = 0;
> list_for_each_entry(folio, folio_list, lru) {
> if (folio_test_hugetlb_vmemmap_optimized(folio)) {
> - t_ret = hugetlb_vmemmap_restore(h, &folio->page);
> + t_ret = __hugetlb_vmemmap_restore(h, &folio->page,
> + VMEMMAP_REMAP_NO_TLB_FLUSH);
> if (t_ret)
> ret = t_ret;
> else
> @@ -565,6 +572,8 @@ int hugetlb_vmemmap_restore_folios(const struct hstate *h,
> }
> }
>
> + flush_tlb_all();
> +
> if (*restored)
> *restored = num_restored;
> return ret;
Powered by blists - more mailing lists