[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2ae5ec3f-4a96-4819-af65-5f04df0c2ebd@linux.dev>
Date: Tue, 16 Jan 2024 15:02:39 +0800
From: Muchun Song <muchun.song@...ux.dev>
To: ligang.bdlg@...edance.com, Gang Li <gang.li@...ux.dev>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
David Hildenbrand <david@...hat.com>, David Rientjes <rientjes@...gle.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Tim Chen <tim.c.chen@...ux.intel.com>
Subject: Re: [PATCH v3 2/7] hugetlb: split hugetlb_hstate_alloc_pages
On 2024/1/2 21:12, Gang Li wrote:
> 1G and 2M huge pages have different allocation and initialization logic,
> which leads to subtle differences in parallelization. Therefore, it is
> appropriate to split hugetlb_hstate_alloc_pages into gigantic and
> non-gigantic.
>
> This patch has no functional changes.
>
> Signed-off-by: Gang Li <gang.li@...ux.dev>
> ---
> mm/hugetlb.c | 86 +++++++++++++++++++++++++++-------------------------
> 1 file changed, 45 insertions(+), 41 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 2606135ec55e6..92448e747991d 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -3509,6 +3509,47 @@ static void __init hugetlb_hstate_alloc_pages_report(unsigned long allocated, st
> }
> }
>
> +static unsigned long __init hugetlb_hstate_alloc_pages_gigantic(struct hstate *h)
The name is so long, how about hugetlb_gigantic_pages_alloc_boot?
> +{
> + unsigned long i;
> +
> + for (i = 0; i < h->max_huge_pages; ++i) {
> + /*
> + * gigantic pages not added to list as they are not
> + * added to pools now.
> + */
> + if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
> + break;
> + cond_resched();
> + }
> +
> + return i;
> +}
> +
> +static unsigned long __init hugetlb_hstate_alloc_pages_non_gigantic(struct hstate *h)
hugetlb_pages_alloc_boot?
> +{
> + unsigned long i;
> + struct folio *folio;
> + LIST_HEAD(folio_list);
> + nodemask_t node_alloc_noretry;
> +
> + /* Bit mask controlling how hard we retry per-node allocations.*/
> + nodes_clear(node_alloc_noretry);
> +
> + for (i = 0; i < h->max_huge_pages; ++i) {
> + folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
> + &node_alloc_noretry);
> + if (!folio)
> + break;
> + list_add(&folio->lru, &folio_list);
> + cond_resched();
> + }
> +
> + prep_and_add_allocated_folios(h, &folio_list);
> +
> + return i;
> +}
> +
> /*
> * NOTE: this routine is called in different contexts for gigantic and
> * non-gigantic pages.
> @@ -3522,10 +3563,7 @@ static void __init hugetlb_hstate_alloc_pages_report(unsigned long allocated, st
> */
> static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
> {
> - unsigned long i;
> - struct folio *folio;
> - LIST_HEAD(folio_list);
> - nodemask_t *node_alloc_noretry;
> + unsigned long allocated;
>
> /* skip gigantic hugepages allocation if hugetlb_cma enabled */
> if (hstate_is_gigantic(h) && hugetlb_cma_size) {
> @@ -3539,46 +3577,12 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
>
> /* below will do all node balanced alloc */
> if (!hstate_is_gigantic(h)) {
It it unnecessary to reverse the condition. A little sime like following:
if (hstate_is_gigantic(h))
/* gigantic pages */
else
/* normal pages */
> - /*
> - * Bit mask controlling how hard we retry per-node allocations.
> - * Ignore errors as lower level routines can deal with
> - * node_alloc_noretry == NULL. If this kmalloc fails at boot
> - * time, we are likely in bigger trouble.
> - */
> - node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
> - GFP_KERNEL);
> + allocated = hugetlb_hstate_alloc_pages_non_gigantic(h);
> } else {
> - /* allocations done at boot time */
> - node_alloc_noretry = NULL;
> - }
> -
> - /* bit mask controlling how hard we retry per-node allocations */
> - if (node_alloc_noretry)
> - nodes_clear(*node_alloc_noretry);
> -
> - for (i = 0; i < h->max_huge_pages; ++i) {
> - if (hstate_is_gigantic(h)) {
> - /*
> - * gigantic pages not added to list as they are not
> - * added to pools now.
> - */
> - if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
> - break;
> - } else {
> - folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
> - node_alloc_noretry);
> - if (!folio)
> - break;
> - list_add(&folio->lru, &folio_list);
> - }
> - cond_resched();
> + allocated = hugetlb_hstate_alloc_pages_gigantic(h);
> }
>
> - /* list will be empty if hstate_is_gigantic */
> - prep_and_add_allocated_folios(h, &folio_list);
> -
> - hugetlb_hstate_alloc_pages_report(i, h);
> - kfree(node_alloc_noretry);
> + hugetlb_hstate_alloc_pages_report(allocated, h);
> }
>
> static void __init hugetlb_init_hstates(void)
Powered by blists - more mailing lists