[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <73111be3-3077-4821-8c2f-9c5564cb2bb7@linux.dev>
Date: Wed, 10 Jan 2024 18:19:49 +0800
From: Muchun Song <muchun.song@...ux.dev>
To: Gang Li <gang.li@...ux.dev>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
ligang.bdlg@...edance.com, David Hildenbrand <david@...hat.com>,
David Rientjes <rientjes@...gle.com>, Mike Kravetz
<mike.kravetz@...cle.com>, Andrew Morton <akpm@...ux-foundation.org>,
Tim Chen <tim.c.chen@...ux.intel.com>
Subject: Re: [PATCH v3 1/7] hugetlb: code clean for hugetlb_hstate_alloc_pages
On 2024/1/2 21:12, Gang Li wrote:
> The readability of `hugetlb_hstate_alloc_pages` is poor. By cleaning the
> code, its readability can be improved, facilitating future modifications.
>
> This patch extracts two functions to reduce the complexity of
> `hugetlb_hstate_alloc_pages` and has no functional changes.
>
> - hugetlb_hstate_alloc_pages_node_specific() to handle iterates through
> each online node and performs allocation if necessary.
> - hugetlb_hstate_alloc_pages_report() report error during allocation.
> And the value of h->max_huge_pages is updated accordingly.
>
> Signed-off-by: Gang Li <gang.li@...ux.dev>
> ---
> mm/hugetlb.c | 46 +++++++++++++++++++++++++++++-----------------
> 1 file changed, 29 insertions(+), 17 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index ed1581b670d42..2606135ec55e6 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -3482,6 +3482,33 @@ static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
> h->max_huge_pages_node[nid] = i;
> }
>
> +static bool __init hugetlb_hstate_alloc_pages_node_specific(struct hstate *h)
I'd like to rename this to hugetlb_hstate_alloc_pages_specific_nodes.
Otherwise, LGTM.
Reviewed-by: Muchun Song <muchun.song@...ux.dev>
> +{
> + int i;
> + bool node_specific_alloc = false;
> +
> + for_each_online_node(i) {
> + if (h->max_huge_pages_node[i] > 0) {
> + hugetlb_hstate_alloc_pages_onenode(h, i);
> + node_specific_alloc = true;
> + }
> + }
> +
> + return node_specific_alloc;
> +}
> +
> +static void __init hugetlb_hstate_alloc_pages_report(unsigned long allocated, struct hstate *h)
> +{
> + if (allocated < h->max_huge_pages) {
> + char buf[32];
> +
> + string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
> + pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
> + h->max_huge_pages, buf, allocated);
> + h->max_huge_pages = allocated;
> + }
> +}
> +
> /*
> * NOTE: this routine is called in different contexts for gigantic and
> * non-gigantic pages.
> @@ -3499,7 +3526,6 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
> struct folio *folio;
> LIST_HEAD(folio_list);
> nodemask_t *node_alloc_noretry;
> - bool node_specific_alloc = false;
>
> /* skip gigantic hugepages allocation if hugetlb_cma enabled */
> if (hstate_is_gigantic(h) && hugetlb_cma_size) {
> @@ -3508,14 +3534,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
> }
>
> /* do node specific alloc */
> - for_each_online_node(i) {
> - if (h->max_huge_pages_node[i] > 0) {
> - hugetlb_hstate_alloc_pages_onenode(h, i);
> - node_specific_alloc = true;
> - }
> - }
> -
> - if (node_specific_alloc)
> + if (hugetlb_hstate_alloc_pages_node_specific(h))
> return;
>
> /* below will do all node balanced alloc */
> @@ -3558,14 +3577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
> /* list will be empty if hstate_is_gigantic */
> prep_and_add_allocated_folios(h, &folio_list);
>
> - if (i < h->max_huge_pages) {
> - char buf[32];
> -
> - string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
> - pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
> - h->max_huge_pages, buf, i);
> - h->max_huge_pages = i;
> - }
> + hugetlb_hstate_alloc_pages_report(i, h);
> kfree(node_alloc_noretry);
> }
>
Powered by blists - more mailing lists