[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <79f4d305-bddb-9042-d15f-a683fd6003d9@linux.dev>
Date: Wed, 30 Aug 2023 15:20:27 +0800
From: Muchun Song <muchun.song@...ux.dev>
To: Mike Kravetz <mike.kravetz@...cle.com>
Cc: Muchun Song <songmuchun@...edance.com>,
Joao Martins <joao.m.martins@...cle.com>,
Oscar Salvador <osalvador@...e.de>,
David Hildenbrand <david@...hat.com>,
Miaohe Lin <linmiaohe@...wei.com>,
David Rientjes <rientjes@...gle.com>,
Anshuman Khandual <anshuman.khandual@....com>,
Naoya Horiguchi <naoya.horiguchi@...ux.dev>,
Barry Song <song.bao.hua@...ilicon.com>,
Michal Hocko <mhocko@...e.com>,
Matthew Wilcox <willy@...radead.org>,
Xiongchun Duan <duanxiongchun@...edance.com>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH 08/12] hugetlb: batch freeing of vmemmap pages
On 2023/8/26 03:04, Mike Kravetz wrote:
> Now that batching of hugetlb vmemmap optimization processing is possible,
> batch the freeing of vmemmap pages. When freeing vmemmap pages for a
> hugetlb page, we add them to a list that is freed after the entire batch
> has been processed.
>
> This enhances the ability to return contiguous ranges of memory to the
> low level allocators.
>
> Signed-off-by: Mike Kravetz <mike.kravetz@...cle.com>
> ---
> mm/hugetlb_vmemmap.c | 56 ++++++++++++++++++++++++++++++++------------
> 1 file changed, 41 insertions(+), 15 deletions(-)
>
> diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> index d5e6b6c76dce..e390170c0887 100644
> --- a/mm/hugetlb_vmemmap.c
> +++ b/mm/hugetlb_vmemmap.c
> @@ -305,11 +305,14 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
> * @end: end address of the vmemmap virtual address range that we want to
> * remap.
> * @reuse: reuse address.
> + * @bulk_pages: list to deposit vmemmap pages to be freed in bulk operations
> + * or NULL in non-bulk case;
I'd like to rename bulk_pages to vmemmap_pages. Always add the vmemmap
pages to this list and let the caller (hugetlb_vmemmap_optimize and
hugetlb_vmemmap_optimize_folios) to help us to free them. It will be
clear to me.
> *
> * Return: %0 on success, negative error code otherwise.
> */
> static int vmemmap_remap_free(unsigned long start, unsigned long end,
> - unsigned long reuse)
> + unsigned long reuse,
> + struct list_head *bulk_pages)
> {
> int ret;
> LIST_HEAD(vmemmap_pages);
> @@ -372,7 +375,14 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
> }
> mmap_read_unlock(&init_mm);
>
> - free_vmemmap_page_list(&vmemmap_pages);
> + /*
> + * if performing bulk operation, do not free pages here.
> + * rather add them to the bulk list
> + */
> + if (!bulk_pages)
> + free_vmemmap_page_list(&vmemmap_pages);
> + else
> + list_splice(&vmemmap_pages, bulk_pages);
Here, always add vmemmap_pages to the list.
>
> return ret;
> }
> @@ -546,17 +556,9 @@ static bool vmemmap_should_optimize(const struct hstate *h, const struct page *h
> return true;
> }
>
> -/**
> - * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
> - * @h: struct hstate.
> - * @head: the head page whose vmemmap pages will be optimized.
> - *
> - * This function only tries to optimize @head's vmemmap pages and does not
> - * guarantee that the optimization will succeed after it returns. The caller
> - * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
> - * have been optimized.
> - */
> -void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
> +static void __hugetlb_vmemmap_optimize(const struct hstate *h,
> + struct page *head,
> + struct list_head *bulk_pages)
Also struct list_head *vmemmap_pages.
> {
> unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
> unsigned long vmemmap_reuse;
> @@ -575,18 +577,42 @@ void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
> * to the page which @vmemmap_reuse is mapped to, then free the pages
> * which the range [@vmemmap_start, @vmemmap_end] is mapped to.
> */
> - if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse))
> + if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse, bulk_pages))
> static_branch_dec(&hugetlb_optimize_vmemmap_key);
> else
> SetHPageVmemmapOptimized(head);
> }
>
> +/**
> + * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
> + * @h: struct hstate.
> + * @head: the head page whose vmemmap pages will be optimized.
> + *
> + * This function only tries to optimize @head's vmemmap pages and does not
> + * guarantee that the optimization will succeed after it returns. The caller
> + * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
> + * have been optimized.
> + */
> +void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
> +{
> + __hugetlb_vmemmap_optimize(h, head, NULL);
Use free_vmemmap_page_list to free vmemmap pages here.
> +}
> +
> +void hugetlb_vmemmap_optimize_bulk(const struct hstate *h, struct page *head,
> + struct list_head *bulk_pages)
> +{
> + __hugetlb_vmemmap_optimize(h, head, bulk_pages);
> +}
> +
> void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
> {
> struct folio *folio;
> + LIST_HEAD(vmemmap_pages);
>
> list_for_each_entry(folio, folio_list, lru)
> - hugetlb_vmemmap_optimize(h, &folio->page);
> + hugetlb_vmemmap_optimize_bulk(h, &folio->page, &vmemmap_pages);
Directly use __hugetlb_vmemmap_optimize and delete
hugetlb_vmemmap_optimize_bulk.
In the future, we could rename hugetlb_vmemmap_optimize to
hugetlb_vmemmap_optimize_folio,
then, both function names are more consistent. E.g.
1) hugetlb_vmemmap_optimize_folio(): used to free one folio's vmemmap
pages.
2) hugetlb_vmemmap_optimize_folios(): used to free multiple folio's
vmemmap pages.
Thanks.
> +
> + free_vmemmap_page_list(&vmemmap_pages);
> }
>
> static struct ctl_table hugetlb_vmemmap_sysctls[] = {
Powered by blists - more mailing lists