[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200722084913.GO32539@MiWiFi-R3L-srv>
Date: Wed, 22 Jul 2020 16:49:13 +0800
From: Baoquan He <bhe@...hat.com>
To: Mike Kravetz <mike.kravetz@...cle.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
akpm@...ux-foundation.org
Subject: Re: [PATCH 5/5] mm/hugetl.c: warn out if expected count of huge
pages adjustment is not achieved
Hi Mike,
On 07/20/20 at 05:38pm, Mike Kravetz wrote:
> > diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> > index 467894d8332a..1dfb5d9e4e06 100644
> > --- a/mm/hugetlb.c
> > +++ b/mm/hugetlb.c
> > @@ -2661,7 +2661,7 @@ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
> > static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
> > nodemask_t *nodes_allowed)
> > {
> > - unsigned long min_count, ret;
> > + unsigned long min_count, ret, old_max;
> > NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
> >
> > /*
> > @@ -2723,6 +2723,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
> > * pool might be one hugepage larger than it needs to be, but
> > * within all the constraints specified by the sysctls.
> > */
> > + old_max = persistent_huge_pages(h);
> > while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
> > if (!adjust_pool_surplus(h, nodes_allowed, -1))
> > break;
> > @@ -2779,6 +2780,16 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
> > }
> > out:
> > h->max_huge_pages = persistent_huge_pages(h);
> > + if (count != h->max_huge_pages) {
> > + char buf[32];
> > +
> > + string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
> > + pr_warn("HugeTLB: %s %lu of page size %s failed. Only %s %lu hugepages.\n",
> > + count > old_max ? "increasing" : "decreasing",
> > + abs(count - old_max), buf,
> > + count > old_max ? "increased" : "decreased",
> > + abs(old_max - h->max_huge_pages));
> > + }
> > spin_unlock(&hugetlb_lock);
>
> I would prefer if we drop the lock before logging the message. That would
> involve grabbing the value of h->max_huge_pages before dropping the lock.
Do you think the below change is OK to you to move the message logging
after lock dropping? If yes, I will repost with updated patches.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6a9b7556ce5b..b5aa32a13569 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2661,7 +2661,7 @@ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
nodemask_t *nodes_allowed)
{
- unsigned long min_count, ret, old_max;
+ unsigned long min_count, ret, old_max, new_max;
NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
/*
@@ -2780,7 +2780,10 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
}
out:
h->max_huge_pages = persistent_huge_pages(h);
- if (count != h->max_huge_pages) {
+ new_max = h->max_huge_pages;
+ spin_unlock(&hugetlb_lock);
+
+ if (count != new_max) {
char buf[32];
string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
@@ -2788,9 +2791,8 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
count > old_max ? "increasing" : "decreasing",
abs(count - old_max), buf,
count > old_max ? "increased" : "decreased",
- abs(old_max - h->max_huge_pages));
+ abs(old_max - new_max));
}
- spin_unlock(&hugetlb_lock);
NODEMASK_FREE(node_alloc_noretry);
Powered by blists - more mailing lists